From e8740685ceb3ad8637532e7ddffb84ea55d4fc27 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Thu, 6 Feb 2014 14:13:03 -0800 Subject: Remove linux specific calls Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- archive/archive.go | 25 +++++++++++++++---------- archive/start_unsupported.go | 21 +++++++++++++++++++++ archive/stat_darwin.go | 17 ----------------- archive/stat_linux.go | 7 +++++++ 4 files changed, 43 insertions(+), 27 deletions(-) create mode 100644 archive/start_unsupported.go delete mode 100644 archive/stat_darwin.go diff --git a/archive/archive.go b/archive/archive.go index b1400c2210..3a1c111ea2 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -5,6 +5,7 @@ import ( "bytes" "compress/bzip2" "compress/gzip" + "errors" "fmt" "github.com/dotcloud/docker/utils" "io" @@ -17,14 +18,18 @@ import ( "syscall" ) -type Archive io.Reader - -type Compression int +type ( + Archive io.Reader + Compression int + TarOptions struct { + Includes []string + Compression Compression + } +) -type TarOptions struct { - Includes []string - Compression Compression -} +var ( + ErrNotImplemented = errors.New("Function not implemented") +) const ( Uncompressed Compression = iota @@ -236,14 +241,14 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader *tar.Reader) return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) } - if err := syscall.Lchown(path, hdr.Uid, hdr.Gid); err != nil { + if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil { return err } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if hdr.Typeflag != tar.TypeSymlink { - if err := syscall.Chmod(path, uint32(hdr.Mode&07777)); err != nil { + if err := os.Chmod(path, os.FileMode(hdr.Mode&07777)); err != nil { return err } } @@ -251,7 +256,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader *tar.Reader) ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and if hdr.Typeflag != tar.TypeSymlink { - if err := syscall.UtimesNano(path, ts); err != nil { + if err := UtimesNano(path, ts); err != nil { return err } } else { diff --git a/archive/start_unsupported.go b/archive/start_unsupported.go new file mode 100644 index 0000000000..834eda8c65 --- /dev/null +++ b/archive/start_unsupported.go @@ -0,0 +1,21 @@ +// +build !linux !amd64 + +package archive + +import "syscall" + +func getLastAccess(stat *syscall.Stat_t) syscall.Timespec { + return stat.Atimespec +} + +func getLastModification(stat *syscall.Stat_t) syscall.Timespec { + return stat.Mtimespec +} + +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotImplemented +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotImplemented +} diff --git a/archive/stat_darwin.go b/archive/stat_darwin.go deleted file mode 100644 index 32203299dd..0000000000 --- a/archive/stat_darwin.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !linux !amd64 - -package archive - -import "syscall" - -func getLastAccess(stat *syscall.Stat_t) syscall.Timespec { - return stat.Atimespec -} - -func getLastModification(stat *syscall.Stat_t) syscall.Timespec { - return stat.Mtimespec -} - -func LUtimesNano(path string, ts []syscall.Timespec) error { - return nil -} diff --git a/archive/stat_linux.go b/archive/stat_linux.go index 2f7a520ccd..f87a99c55a 100644 --- a/archive/stat_linux.go +++ b/archive/stat_linux.go @@ -30,3 +30,10 @@ func LUtimesNano(path string, ts []syscall.Timespec) error { return nil } + +func UtimesNano(path string, ts []syscall.Timespec) error { + if err := syscall.UtimesNano(path, ts); err != nil { + return err + } + return nil +} -- cgit v1.2.1 From 547ac421995860f99d1d0803c3c1e7ea51dc681e Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Fri, 7 Feb 2014 00:44:14 +0000 Subject: Add Freebsd client support Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- pkg/term/termios_bsd.go | 67 +++++++++++++++++++++++++++++++++++++++++++++++++ utils/signal_freebsd.go | 42 +++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) create mode 100644 pkg/term/termios_bsd.go create mode 100644 utils/signal_freebsd.go diff --git a/pkg/term/termios_bsd.go b/pkg/term/termios_bsd.go new file mode 100644 index 0000000000..9acf9dfe15 --- /dev/null +++ b/pkg/term/termios_bsd.go @@ -0,0 +1,67 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA + + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + // C.makeraw() + // return &oldState, nil + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/utils/signal_freebsd.go b/utils/signal_freebsd.go new file mode 100644 index 0000000000..65a700e894 --- /dev/null +++ b/utils/signal_freebsd.go @@ -0,0 +1,42 @@ +package utils + +import ( + "os" + "os/signal" + "syscall" +) + +func CatchAll(sigc chan os.Signal) { + signal.Notify(sigc, + syscall.SIGABRT, + syscall.SIGALRM, + syscall.SIGBUS, + syscall.SIGCHLD, + syscall.SIGCONT, + syscall.SIGFPE, + syscall.SIGHUP, + syscall.SIGILL, + syscall.SIGINT, + syscall.SIGIO, + syscall.SIGIOT, + syscall.SIGKILL, + syscall.SIGPIPE, + syscall.SIGPROF, + syscall.SIGQUIT, + syscall.SIGSEGV, + syscall.SIGSTOP, + syscall.SIGSYS, + syscall.SIGTERM, + syscall.SIGTRAP, + syscall.SIGTSTP, + syscall.SIGTTIN, + syscall.SIGTTOU, + syscall.SIGURG, + syscall.SIGUSR1, + syscall.SIGUSR2, + syscall.SIGVTALRM, + syscall.SIGWINCH, + syscall.SIGXCPU, + syscall.SIGXFSZ, + ) +} -- cgit v1.2.1 From 3b969aad4af430372d5cac1e700f75dba8a38a13 Mon Sep 17 00:00:00 2001 From: Wes Morgan Date: Thu, 30 Jan 2014 12:00:18 -0700 Subject: merge existing config when committing Fixes #1141 Docker-DCO-1.1-Signed-off-by: Wes Morgan (github: cap10morgan) --- docs/sources/reference/commandline/cli.rst | 61 ++++++++++++++++++++++-------- integration/server_test.go | 57 ++++++++++++++++++++++++++++ server.go | 11 ++++-- 3 files changed, 110 insertions(+), 19 deletions(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 564ea8a034..1f83859c5c 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -83,7 +83,7 @@ Commands -v, --version=false: Print version information and quit --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available -The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the +The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the daemon and client. To run the daemon you provide the ``-d`` flag. To force Docker to use devicemapper as the storage driver, use ``docker -d -s devicemapper``. @@ -93,10 +93,10 @@ To set the DNS server for all Docker containers, use ``docker -d -dns 8.8.8.8``. To run the daemon with debug output, use ``docker -d -D``. The docker client will also honor the ``DOCKER_HOST`` environment variable to set -the ``-H`` flag for the client. +the ``-H`` flag for the client. :: - + docker -H tcp://0.0.0.0:4243 ps # or export DOCKER_HOST="tcp://0.0.0.0:4243" @@ -130,7 +130,7 @@ You can find examples of using systemd socket activation with docker and systemd You can detach from the container again (and leave it running) with ``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of -the Docker client when it quits. When you detach from the container's +the Docker client when it quits. When you detach from the container's process the exit code will be returned to the client. To stop a container, use ``docker stop``. @@ -292,7 +292,7 @@ by using the ``git://`` schema. -m, --message="": Commit message -a, --author="": Author (eg. "John Hannibal Smith " - --run="": Configuration to be applied when the image is launched with `docker run`. + --run="": Configuration changes to be applied when the image is launched with `docker run`. (ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}') .. _cli_commit_examples: @@ -304,14 +304,14 @@ Commit an existing container $ sudo docker ps ID IMAGE COMMAND CREATED STATUS PORTS - c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours - 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours + 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours $ docker commit c3f279d17e0a SvenDowideit/testimage:version3 f5283438590d $ docker images | head REPOSITORY TAG ID CREATED VIRTUAL SIZE SvenDowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB - + Change the command that a container runs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -333,11 +333,40 @@ run ``ls /etc``. apt host.conf lsb-base rc2.d ... +Merged configs example +...................... + +Say you have a Dockerfile like so: + +.. code-block:: bash + + ENV MYVAR foobar + RUN apt-get install openssh + EXPOSE 22 + CMD ["/usr/sbin/sshd -D"] + ... + +If you run that, make some changes, and then commit, Docker will merge the environment variable and exposed port configuration settings with any that you specify in the -run= option. This is a change from Docker 0.8.0 and prior where no attempt was made to preserve any existing configuration on commit. + +.. code-block:: bash + + $ docker build -t me/foo . + $ docker run -t -i me/foo /bin/bash + foo-container$ [make changes in the container] + foo-container$ exit + $ docker commit -run='{"Cmd": ["ls"]}' [container-id] me/bar + ... + +The me/bar image will now have port 22 exposed, MYVAR env var set to 'foobar', and its default command will be ["ls"]. + +Note that this is currently a shallow merge. So, for example, if you had specified a new port spec in the -run= config above, that would have clobbered the 'EXPOSE 22' setting from the parent container. + Full -run example ................. The ``--run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID`` -or ``config`` when running ``docker inspect IMAGEID``. +or ``config`` when running ``docker inspect IMAGEID``. Existing configuration key-values that are +not overridden in the JSON hash will be merged in. (Multiline is okay within a single quote ``'``) @@ -653,7 +682,7 @@ Displaying image hierarchy Usage: docker import URL|- [REPOSITORY[:TAG]] - Create an empty filesystem image and import the contents of the tarball + Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. At this time, the URL must start with ``http`` and point to a single @@ -918,7 +947,7 @@ Running ``docker ps`` showing 2 linked containers. $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp + 4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db fd2645e2e2b5 busybox:latest top 10 days ago Ghost insane_ptolemy @@ -1022,7 +1051,7 @@ containers will not be deleted. Remove one or more images -f, --force=false: Force - + Removing tagged images ~~~~~~~~~~~~~~~~~~~~~~ @@ -1101,8 +1130,8 @@ Once the container is stopped it still exists and can be started back up. See ` The ``docker run`` command can be used in combination with ``docker commit`` to :ref:`change the command that a container runs `. -See :ref:`port_redirection` for more detailed information about the ``--expose``, -``-p``, ``-P`` and ``--link`` parameters, and :ref:`working_with_links_names` for +See :ref:`port_redirection` for more detailed information about the ``--expose``, +``-p``, ``-P`` and ``--link`` parameters, and :ref:`working_with_links_names` for specific examples using ``--link``. Known Issues (run -volumes-from) @@ -1182,8 +1211,8 @@ starting your container. $ sudo docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v ./static-docker:/usr/bin/docker busybox sh -By bind-mounting the docker unix socket and statically linked docker binary -(such as that provided by https://get.docker.io), you give the container +By bind-mounting the docker unix socket and statically linked docker binary +(such as that provided by https://get.docker.io), you give the container the full access to create and manipulate the host's docker daemon. .. code-block:: bash diff --git a/integration/server_test.go b/integration/server_test.go index 1247e8d2d8..915119f65c 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -219,6 +219,63 @@ func TestCommit(t *testing.T) { } } +func TestMergeConfigOnCommit(t *testing.T) { + eng := NewTestEngine(t) + runtime := mkRuntimeFromEngine(eng, t) + defer runtime.Nuke() + + container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t) + defer runtime.Destroy(container1) + + config, _, _, err := runconfig.Parse([]string{container1.ID, "cat /tmp/foo"}, nil) + if err != nil { + t.Error(err) + } + + job := eng.Job("commit", container1.ID) + job.Setenv("repo", "testrepo") + job.Setenv("tag", "testtag") + job.SetenvJson("config", config) + var newId string + job.Stdout.AddString(&newId) + if err := job.Run(); err != nil { + t.Error(err) + } + + container2, _, _ := mkContainer(runtime, []string{newId}, t) + defer runtime.Destroy(container2) + + job = eng.Job("inspect", container1.Name, "container") + baseContainer, _ := job.Stdout.AddEnv() + if err := job.Run(); err != nil { + t.Error(err) + } + + job = eng.Job("inspect", container2.Name, "container") + commitContainer, _ := job.Stdout.AddEnv() + if err := job.Run(); err != nil { + t.Error(err) + } + + baseConfig := baseContainer.GetSubEnv("Config") + commitConfig := commitContainer.GetSubEnv("Config") + + if commitConfig.Get("Env") != baseConfig.Get("Env") { + t.Fatalf("Env config in committed container should be %v, was %v", + baseConfig.Get("Env"), commitConfig.Get("Env")) + } + + if baseConfig.Get("Cmd") != "[\"echo test \\u003e /tmp/foo\"]" { + t.Fatalf("Cmd in base container should be [\"echo test \\u003e /tmp/foo\"], was %s", + baseConfig.Get("Cmd")) + } + + if commitConfig.Get("Cmd") != "[\"cat /tmp/foo\"]" { + t.Fatalf("Cmd in committed container should be [\"cat /tmp/foo\"], was %s", + commitConfig.Get("Cmd")) + } +} + func TestRestartKillWait(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) diff --git a/server.go b/server.go index e6176e18bd..a0bfd286c5 100644 --- a/server.go +++ b/server.go @@ -1043,12 +1043,17 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status { if container == nil { return job.Errorf("No such container: %s", name) } - var config runconfig.Config - if err := job.GetenvJson("config", &config); err != nil { + var config = container.Config + var newConfig runconfig.Config + if err := job.GetenvJson("config", &newConfig); err != nil { return job.Error(err) } - img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &config) + if err := runconfig.Merge(&newConfig, config); err != nil { + return job.Error(err) + } + + img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig) if err != nil { return job.Error(err) } -- cgit v1.2.1 From b39d02b611f1cc0af283f417b73bf0d36f26277a Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Mon, 3 Mar 2014 21:53:57 -0700 Subject: Support hairpin NAT without going through docker server Hairpin NAT is currently done by passing through the docker server. If two containers on the same box try to access each other through exposed ports and using the host IP the current iptables rules will not match the DNAT and thus the traffic goes to 'docker -d' This change drops the restriction that DNAT traffic must not originate from docker0. It should be safe to drop this restriction because the DOCKER chain is already gated by jumps that check for the destination address to be a local address. Docker-DCO-1.1-Signed-off-by: Darren Shepherd (github: ibuildthecloud) --- pkg/iptables/iptables.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go index 4cdd67ef7c..1f25952bd9 100644 --- a/pkg/iptables/iptables.go +++ b/pkg/iptables/iptables.go @@ -66,7 +66,6 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str "-p", proto, "-d", daddr, "--dport", strconv.Itoa(port), - "!", "-i", c.Bridge, "-j", "DNAT", "--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil { return err -- cgit v1.2.1 From 2aeccdd3bb98760ab10e834c6c134bb76f664910 Mon Sep 17 00:00:00 2001 From: Alexey Kotlyarov Date: Tue, 4 Mar 2014 14:45:14 +1100 Subject: Create directories for tar files with relaxed permissions Docker-DCO-1.1-Signed-off-by: Alexey Kotlyarov (github: koterpillar) --- archive/archive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/archive/archive.go b/archive/archive.go index 72bd31a281..c15a9d153b 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -403,7 +403,7 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error { parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = os.MkdirAll(parentPath, 600) + err = os.MkdirAll(parentPath, 0777) if err != nil { return err } -- cgit v1.2.1 From 28a545d294cac3b2e1f4266f5099bd2c5ddb342f Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Fri, 14 Feb 2014 16:56:21 +1000 Subject: Show some ENV / local updated baseimage tricks that use an apt-cacher-ng proxy to make debian based installations instant Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/examples/apt-cacher-ng.Dockerfile | 15 ++++ docs/sources/examples/apt-cacher-ng.rst | 104 +++++++++++++++++++++++++ docs/sources/examples/index.rst | 1 + 3 files changed, 120 insertions(+) create mode 100644 docs/sources/examples/apt-cacher-ng.Dockerfile create mode 100644 docs/sources/examples/apt-cacher-ng.rst diff --git a/docs/sources/examples/apt-cacher-ng.Dockerfile b/docs/sources/examples/apt-cacher-ng.Dockerfile new file mode 100644 index 0000000000..fcc326815d --- /dev/null +++ b/docs/sources/examples/apt-cacher-ng.Dockerfile @@ -0,0 +1,15 @@ +# +# BUILD docker build -t apt-cacher . +# RUN docker run -d -p 3142:3142 -name apt-cacher-run apt-cacher +# +# and then you can run containers with: +# docker run -t -i -rm -e http_proxy http://dockerhost:3142/ debian bash +# +FROM ubuntu +MAINTAINER SvenDowideit@docker.com + +VOLUME ["/var/cache/apt-cacher-ng"] +RUN apt-get update ; apt-get install -yq apt-cacher-ng + +EXPOSE 3142 +CMD chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/* diff --git a/docs/sources/examples/apt-cacher-ng.rst b/docs/sources/examples/apt-cacher-ng.rst new file mode 100644 index 0000000000..f828e4e1e7 --- /dev/null +++ b/docs/sources/examples/apt-cacher-ng.rst @@ -0,0 +1,104 @@ +:title: Running an apt-cacher-ng service +:description: Installing and running an apt-cacher-ng service +:keywords: docker, example, package installation, networking, debian, ubuntu + +.. _running_apt-cacher-ng_service: + +Apt-Cacher-ng Service +===================== + +.. include:: example_header.inc + + +When you have multiple Docker servers, or build unrelated Docker containers +which can't make use of the Docker build cache, it can be useful to have a +caching proxy for your packages. This container makes the second download of +any package almost instant. + +Use the following Dockerfile: + +.. literalinclude:: apt-cacher-ng.Dockerfile + +To build the image using: + +.. code-block:: bash + + $ sudo docker build -rm -t eg_apt_cacher_ng . + +Then run it, mapping the exposed port to one on the host + +.. code-block:: bash + + $ sudo docker run -d -p 3142:3142 -name test_apt_cacher_ng eg_apt_cacher_ng + +To see the logfiles that are 'tailed' in the default command, you can use: + +.. code-block:: bash + + $ sudo docker logs -f test_apt_cacher_ng + +To get your Debian based containers to use the proxy, you can do one of 3 things + +1. Set and environment variable: ``http_proxy=http://dockerhost:3142/`` +2. Add an apt Proxy setting ``echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy`` +3. Change your sources.list entries to start with ``http://dockerhost:3142/`` + +Option 1 will work for running a container, so is good for testing, but will +break any non-apt http requests, like ``curl``, ``wget`` and more.: + +.. code-block:: bash + + $ sudo docker run -rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash + +Or you can inject the settings safely into your apt configuration in a local +version of a common base: + +.. code-block:: bash + + FROM ubuntu + + RUN echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy + + RUN apt-get update ; apt-get install vim git + + # docker build -t my_ubuntu . + +Option 3 is the least portable, but there will be times when you might need to +do it - and you can do it from your Dockerfile too. + +Apt-cacher-ng has some tools that allow you to manage the repository, and they +can be used by leveraging the ``VOLUME``, and the image we built to run the +service: + +.. code-block:: bash + + $ sudo docker run -rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash + + $$ /usr/lib/apt-cacher-ng/distkill.pl + Scanning /var/cache/apt-cacher-ng, please wait... + Found distributions: + bla, taggedcount: 0 + 1. precise-security (36 index files) + 2. wheezy (25 index files) + 3. precise-updates (36 index files) + 4. precise (36 index files) + 5. wheezy-updates (18 index files) + + Found architectures: + 6. amd64 (36 index files) + 7. i386 (24 index files) + + WARNING: The removal action may wipe out whole directories containing + index files. Select d to see detailed list. + + (Number nn: tag distribution or architecture nn; 0: exit; d: show details; r: remove tagged; q: quit): q + + +Finally, clean up after your test by stopping and removing the container, and +then removing the image. + +.. code-block:: bash + + $ sudo docker stop test_apt_cacher_ng + $ sudo docker rm test_apt_cacher_ng + $ sudo docker rmi eg_apt_cacher_ng diff --git a/docs/sources/examples/index.rst b/docs/sources/examples/index.rst index cf9ed9340a..383d0760f7 100644 --- a/docs/sources/examples/index.rst +++ b/docs/sources/examples/index.rst @@ -26,3 +26,4 @@ to more substantial services like those which you might find in production. using_supervisord cfengine_process_management python_web_app + apt-cacher-ng -- cgit v1.2.1 From cadd94f44c6f6e276a5b028a2935b5e352408d3b Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 18 Feb 2014 12:55:14 +1000 Subject: implement pharvey's suggestions Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/examples/apt-cacher-ng.rst | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/sources/examples/apt-cacher-ng.rst b/docs/sources/examples/apt-cacher-ng.rst index f828e4e1e7..0fb55720f2 100644 --- a/docs/sources/examples/apt-cacher-ng.rst +++ b/docs/sources/examples/apt-cacher-ng.rst @@ -37,20 +37,13 @@ To see the logfiles that are 'tailed' in the default command, you can use: $ sudo docker logs -f test_apt_cacher_ng -To get your Debian based containers to use the proxy, you can do one of 3 things +To get your Debian based containers to use the proxy, you can do one of three things -1. Set and environment variable: ``http_proxy=http://dockerhost:3142/`` -2. Add an apt Proxy setting ``echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy`` +1. Add an apt Proxy setting ``echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy`` +2. Set and environment variable: ``http_proxy=http://dockerhost:3142/`` 3. Change your sources.list entries to start with ``http://dockerhost:3142/`` -Option 1 will work for running a container, so is good for testing, but will -break any non-apt http requests, like ``curl``, ``wget`` and more.: - -.. code-block:: bash - - $ sudo docker run -rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash - -Or you can inject the settings safely into your apt configuration in a local +**Option 1** injects the settings safely into your apt configuration in a local version of a common base: .. code-block:: bash @@ -63,7 +56,14 @@ version of a common base: # docker build -t my_ubuntu . -Option 3 is the least portable, but there will be times when you might need to +**Option 2** is good for testing, but will +break other HTTP clients which obey ``http_proxy``, such as ``curl``, ``wget`` and others: + +.. code-block:: bash + + $ sudo docker run -rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash + +**Option 3** is the least portable, but there will be times when you might need to do it - and you can do it from your Dockerfile too. Apt-cacher-ng has some tools that allow you to manage the repository, and they -- cgit v1.2.1 From 1cdd775f5d95c4da2895da85b00ffa2917bbf9b0 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Thu, 6 Mar 2014 15:12:09 +0100 Subject: DeviceMapper: Succeed immediately when removing non-existant devices We've seen situations where removal of "ID-init" failed during container deletion (EBUSY), after removal of "ID" has succeeded. This caused the container delete operation to fail, and on the next delete attempt the removal of "ID" failed immediately with "does not exist". Ideally we should not fail the ID-init removal, but its also non-ideal to allow a state where the container is half-removed and we cannot make progress deleting the container. So, we silently ignore not-exist errors on device removal. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- graphdriver/devmapper/driver.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/graphdriver/devmapper/driver.go b/graphdriver/devmapper/driver.go index 4d414f9a75..8c5a19eea0 100644 --- a/graphdriver/devmapper/driver.go +++ b/graphdriver/devmapper/driver.go @@ -90,6 +90,13 @@ func (d *Driver) Create(id, parent string) error { } func (d *Driver) Remove(id string) error { + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + // Sink the float from create in case no Get() call was made if err := d.DeviceSet.UnmountDevice(id, UnmountSink); err != nil { return err -- cgit v1.2.1 From 3e8a02a9399618917194b37435f5eed9ff86fe2f Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Thu, 6 Mar 2014 18:14:56 +0100 Subject: devmapper: Add per-device lock We currently use a global lock to protect global data (like the Devices map) as well as device data itself and access to (non-threadsafe) libdevmapper. This commit also adds a per-device lock, which will allow per-device operations to temporarily release the global lock while e.g. waiting. The per-device lock will make sure that nothing else accesses that device while we're operating on it. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- graphdriver/devmapper/deviceset.go | 39 +++++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/graphdriver/devmapper/deviceset.go b/graphdriver/devmapper/deviceset.go index 303e363e92..f1a0e47d03 100644 --- a/graphdriver/devmapper/deviceset.go +++ b/graphdriver/devmapper/deviceset.go @@ -39,6 +39,13 @@ type DevInfo struct { // first get (since we need to mount to set up the device // a bit first). floating bool `json:"-"` + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + lock sync.Mutex `json:"-"` } type MetaData struct { @@ -47,7 +54,7 @@ type MetaData struct { type DeviceSet struct { MetaData - sync.Mutex + sync.Mutex // Protects Devices map and serializes calls into libdevmapper root string devicePrefix string TransactionId uint64 @@ -569,6 +576,9 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { return fmt.Errorf("Error adding device for '%s': can't find device for parent '%s'", hash, baseHash) } + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + deviceId := devices.allocateDeviceId() if err := devices.createSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { @@ -636,6 +646,14 @@ func (devices *DeviceSet) DeleteDevice(hash string) error { devices.Lock() defer devices.Unlock() + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("Unknown device %s", hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + return devices.deleteDevice(hash) } @@ -773,20 +791,26 @@ func (devices *DeviceSet) Shutdown() error { defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) for _, info := range devices.Devices { + info.lock.Lock() if info.mountCount > 0 { if err := sysUnmount(info.mountPath, 0); err != nil { utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) } } + info.lock.Unlock() } for _, d := range devices.Devices { + d.lock.Lock() + if err := devices.waitClose(d.Hash); err != nil { utils.Errorf("Warning: error waiting for device %s to unmount: %s\n", d.Hash, err) } if err := devices.deactivateDevice(d.Hash); err != nil { utils.Debugf("Shutdown deactivate %s , error: %s\n", d.Hash, err) } + + d.lock.Unlock() } if err := devices.deactivatePool(); err != nil { @@ -805,6 +829,9 @@ func (devices *DeviceSet) MountDevice(hash, path string) error { return fmt.Errorf("Unknown device %s", hash) } + info.lock.Lock() + defer info.lock.Unlock() + if info.mountCount > 0 { if path != info.mountPath { return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) @@ -851,6 +878,9 @@ func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { return fmt.Errorf("UnmountDevice: no such device %s\n", hash) } + info.lock.Lock() + defer info.lock.Unlock() + if mode == UnmountFloat { if info.floating { return fmt.Errorf("UnmountDevice: can't float floating reference %s\n", hash) @@ -920,6 +950,10 @@ func (devices *DeviceSet) HasActivatedDevice(hash string) bool { if info == nil { return false } + + info.lock.Lock() + defer info.lock.Unlock() + devinfo, _ := getInfo(info.Name()) return devinfo != nil && devinfo.Exists != 0 } @@ -974,6 +1008,9 @@ func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { return nil, fmt.Errorf("No device %s", hash) } + info.lock.Lock() + defer info.lock.Unlock() + status := &DevStatus{ DeviceId: info.DeviceId, Size: info.Size, -- cgit v1.2.1 From 81f148be566ab2b17810ad4be61a5d8beac8330f Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Thu, 6 Mar 2014 18:25:43 +0100 Subject: devmapper: Increase sleep times and unlock while sleeping We've seen some cases in the wild where waiting for unmount/deactivate of devmapper devices taking a long time (several seconds). So, we increase the sleeps to 10 seconds before we timeout. For instance: https://github.com/dotcloud/docker/issues/4389 But, in order to not keep other processes blocked we unlock the global dm lock while waiting to allow other devices to continue working. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- graphdriver/devmapper/deviceset.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/graphdriver/devmapper/deviceset.go b/graphdriver/devmapper/deviceset.go index f1a0e47d03..f6b26655a3 100644 --- a/graphdriver/devmapper/deviceset.go +++ b/graphdriver/devmapper/deviceset.go @@ -701,7 +701,7 @@ func (devices *DeviceSet) deactivateDevice(hash string) error { func (devices *DeviceSet) removeDeviceAndWait(devname string) error { var err error - for i := 0; i < 10; i++ { + for i := 0; i < 1000; i++ { devices.sawBusy = false err = removeDevice(devname) if err == nil { @@ -713,7 +713,9 @@ func (devices *DeviceSet) removeDeviceAndWait(devname string) error { // If we see EBUSY it may be a transient error, // sleep a bit a retry a few times. - time.Sleep(5 * time.Millisecond) + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() } if err != nil { return err @@ -746,7 +748,9 @@ func (devices *DeviceSet) waitRemove(devname string) error { break } - time.Sleep(1 * time.Millisecond) + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() } if i == 1000 { return fmt.Errorf("Timeout while waiting for device %s to be removed", devname) -- cgit v1.2.1 From 188dea9e0e8b968b28a288192c357343dcc5834c Mon Sep 17 00:00:00 2001 From: Walter Leibbrandt Date: Thu, 6 Mar 2014 23:33:08 +0200 Subject: Fixed installmirrors ref Nested inline markup is not (yet) possible: http://stackoverflow.com/a/9645684 --- docs/sources/installation/ubuntulinux.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index 416d56765e..c459f33d3c 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -64,7 +64,7 @@ Installation an earlier version, you will need to follow them again. Docker is available as a Debian package, which makes installation -easy. **See the :ref:`installmirrors` section below if you are not in +easy. **See the** :ref:`installmirrors` **section below if you are not in the United States.** Other sources of the Debian packages may be faster for you to install. -- cgit v1.2.1 From 3729ece2ea1c4aad286b7535a7c137045a9da107 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 7 Mar 2014 02:20:59 +0000 Subject: improve alpha sort in mflag Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- pkg/mflag/flag.go | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go index 7125c030ed..8b3d61e816 100644 --- a/pkg/mflag/flag.go +++ b/pkg/mflag/flag.go @@ -286,9 +286,24 @@ type Flag struct { DefValue string // default value (as text); for usage message } +type flagSlice []string + +func (p flagSlice) Len() int { return len(p) } +func (p flagSlice) Less(i, j int) bool { + pi, pj := p[i], p[j] + if pi[0] == '-' { + pi = pi[1:] + } + if pj[0] == '-' { + pj = pj[1:] + } + return pi < pj +} +func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + // sortFlags returns the flags as a slice in lexicographical sorted order. func sortFlags(flags map[string]*Flag) []*Flag { - var list sort.StringSlice + var list flagSlice for _, f := range flags { fName := strings.TrimPrefix(f.Names[0], "#") if len(f.Names) == 1 { @@ -307,7 +322,7 @@ func sortFlags(flags map[string]*Flag) []*Flag { list = append(list, fName) } } - list.Sort() + sort.Sort(list) result := make([]*Flag, len(list)) for i, name := range list { result[i] = flags[name] -- cgit v1.2.1 From aceb10b1e51d1d9016b25fa5275e5a4f02772c57 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 6 Mar 2014 22:26:47 -0700 Subject: Resync the DCO text with upstream at http://developercertificate.org/ ``` Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. ``` Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- CONTRIBUTING.md | 63 ++++++++++++++++++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 25 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c03c5d0d9c..0e8b98122f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -126,33 +126,46 @@ For more details see [MAINTAINERS.md](hack/MAINTAINERS.md) The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you -can certify the below: +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): ``` -Docker Developer Certificate of Origin 1.1 - -By making a contribution to the Docker Project ("Project"), I represent and -warrant that: - -a. The contribution was created in whole or in part by me and I have the right -to submit the contribution on my own behalf or on behalf of a third party who -has authorized me to submit this contribution to the Project; or - -b. The contribution is based upon previous work that, to the best of my -knowledge, is covered under an appropriate open source license and I have the -right and authorization to submit that work with modifications, whether -created in whole or in part by me, under the same open source license (unless -I am permitted to submit under a different license) that I have identified in -the contribution; or - -c. The contribution was provided directly to me by some other person who -represented and warranted (a) or (b) and I have not modified it. - -d. I understand and agree that this Project and the contribution are publicly -known and that a record of the contribution (including all personal -information I submit with it, including my sign-off record) is maintained -indefinitely and may be redistributed consistent with this Project or the open -source license(s) involved. +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. ``` then you just add a line to every git commit message: -- cgit v1.2.1 From 0a819380c52cbfcc9d674475913267a33b249e00 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 6 Mar 2014 23:16:26 -0700 Subject: Clarify how to update the docs branch in the RELEASE-CHECKLIST with concrete instructions Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/RELEASE-CHECKLIST.md | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/hack/RELEASE-CHECKLIST.md b/hack/RELEASE-CHECKLIST.md index 84a0ff70e1..d6c91a5e43 100644 --- a/hack/RELEASE-CHECKLIST.md +++ b/hack/RELEASE-CHECKLIST.md @@ -175,18 +175,22 @@ release is uploaded to get.docker.io! ### 10. Go to github to merge the `bump_$VERSION` branch into release -Don't delete the leftover branch just yet, as we will need it for the next step. +Don't forget to push that pretty blue button to delete the leftover +branch afterwards! -### 11. Go to github to merge the `bump_$VERSION` branch into docs +### 11. Update the docs branch -Merging the pull request to the docs branch will automatically -update the documentation on the "latest" revision of the docs. You -should see the updated docs 5-10 minutes after the merge. The docs -will appear on http://docs.docker.io/. For more information about -documentation releases, see `docs/README.md`. +```bash +git checkout docs +git fetch +git reset --hard origin/release +git push -f origin docs +``` -Don't forget to push that pretty blue button to delete the leftover -branch afterwards! +Updating the docs branch will automatically update the documentation on the +"latest" revision of the docs. You should see the updated docs 5-10 minutes +after the merge. The docs will appear on http://docs.docker.io/. For more +information about documentation releases, see `docs/README.md`. ### 12. Create a new pull request to merge release back into master -- cgit v1.2.1 From 661cf32e4f996385ec7791e93ea8edea80fd2e37 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 6 Mar 2014 23:26:18 -0700 Subject: Note within the RELEASE-CHECKLIST that "origin" is assumed to be upstream Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/RELEASE-CHECKLIST.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/hack/RELEASE-CHECKLIST.md b/hack/RELEASE-CHECKLIST.md index 84a0ff70e1..6e23489a42 100644 --- a/hack/RELEASE-CHECKLIST.md +++ b/hack/RELEASE-CHECKLIST.md @@ -6,6 +6,21 @@ So you're in charge of a Docker release? Cool. Here's what to do. If your experience deviates from this document, please document the changes to keep it up-to-date. +It is important to note that this document assumes that the git remote in your +repository that corresponds to "https://github.com/dotcloud/docker" is named +"origin". If yours is not (for example, if you've chosen to name it "upstream" +or something similar instead), be sure to adjust the listed snippets for your +local environment accordingly. If you are not sure what your upstream remote is +named, use a command like `git remote -v` to find out. + +If you don't have an upstream remote, you can add one easily using something +like: + +```bash +git remote add origin https://github.com/dotcloud/docker.git +git remote add YOURUSER git@github.com:YOURUSER/docker.git +``` + ### 1. Pull from master and create a release branch ```bash -- cgit v1.2.1 From d61938d2b879fe0ccbf46b4a7ca8dd2eb537eee9 Mon Sep 17 00:00:00 2001 From: Tom Fotherby Date: Fri, 7 Mar 2014 16:41:11 +0000 Subject: Correct Docker run --host param to --hostname --- docs/sources/reference/commandline/cli.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 2e49cd5ca5..2404e29b29 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -1096,7 +1096,7 @@ image is removed. --cidfile="": Write the container ID to the file -d, --detach=false: Detached mode: Run container in the background, print new container id -e, --env=[]: Set environment variables - -h, --host="": Container host name + -h, --hostname="": Container host name -i, --interactive=false: Keep stdin open even if not attached --privileged=false: Give extended privileges to this container -m, --memory="": Memory limit (format: , where unit = b, k, m or g) -- cgit v1.2.1 From bc086a9cd61b2d15fbef9db3cb53c7f3650fda48 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 7 Mar 2014 20:07:17 +0000 Subject: fix string in docker images Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- server.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/server.go b/server.go index 024b8aa3c4..91c58f62e2 100644 --- a/server.go +++ b/server.go @@ -1011,7 +1011,11 @@ func (srv *Server) Containers(job *engine.Job) engine.Status { out.Set("Id", container.ID) out.SetList("Names", names[container.ID]) out.Set("Image", srv.runtime.repositories.ImageName(container.Image)) - out.Set("Command", fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " "))) + if len(container.Args) > 0 { + out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, strings.Join(container.Args, " "))) + } else { + out.Set("Command", fmt.Sprintf("\"%s\"", container.Path)) + } out.SetInt64("Created", container.Created.Unix()) out.Set("Status", container.State.String()) str, err := container.NetworkSettings.PortMappingAPI().ToListString() -- cgit v1.2.1 From 7da37fec13a0097284ffbbe05514de477cd98677 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 7 Mar 2014 23:39:03 +0000 Subject: handle capital Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- pkg/mflag/flag.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go index 8b3d61e816..f16f641341 100644 --- a/pkg/mflag/flag.go +++ b/pkg/mflag/flag.go @@ -290,7 +290,7 @@ type flagSlice []string func (p flagSlice) Len() int { return len(p) } func (p flagSlice) Less(i, j int) bool { - pi, pj := p[i], p[j] + pi, pj := strings.ToLower(p[i]), strings.ToLower(p[j]) if pi[0] == '-' { pi = pi[1:] } -- cgit v1.2.1 From d04f4d836c6b2a9266350a1b0e284949dcc6510a Mon Sep 17 00:00:00 2001 From: unclejack Date: Sat, 8 Mar 2014 17:36:18 +0200 Subject: upgrade packages after debootstrap This makes mkimage-debootstrap upgrade packages after retrieving updated lists of packages. Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- contrib/mkimage-debootstrap.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/mkimage-debootstrap.sh b/contrib/mkimage-debootstrap.sh index bf89600973..33ba7b07cb 100755 --- a/contrib/mkimage-debootstrap.sh +++ b/contrib/mkimage-debootstrap.sh @@ -219,6 +219,7 @@ if [ -z "$strictDebootstrap" ]; then # make sure our packages lists are as up to date as we can get them sudo chroot . apt-get update + sudo chroot . apt-get dist-upgrade -y fi if [ "$justTar" ]; then -- cgit v1.2.1 From 59acb8c83d3f21ae82c540774653ccd0a46f1a1f Mon Sep 17 00:00:00 2001 From: Scott Collier Date: Sat, 8 Mar 2014 16:32:00 -0600 Subject: Adding the new options to the `docker ps` documentation. URL of documentation page is: http://docs.docker.io/en/latest/reference/commandline/cli/#ps Docker-DCO-1.1-Signed-off-by: Scott Collier (github: scollier) --- docs/sources/reference/commandline/cli.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 2e49cd5ca5..5b43e45eb4 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -933,8 +933,14 @@ new output from the container's stdout and stderr. List containers -a, --all=false: Show all containers. Only running containers are shown by default. + --before-id="": Show only container created before Id, include non-running ones. + -l, --latest=false: Show only the latest created container, include non-running ones. + -n=-1: Show n last created containers, include non-running ones. --no-trunc=false: Don't truncate output -q, --quiet=false: Only display numeric IDs + -s, --size=false: Display sizes, not to be used with -q + --since-id="": Show only containers created since Id, include non-running ones. + Running ``docker ps`` showing 2 linked containers. -- cgit v1.2.1 From c000cb64712349141596318dea2a8de2462c8f81 Mon Sep 17 00:00:00 2001 From: Johannes 'fish' Ziemke Date: Thu, 27 Feb 2014 13:47:59 +0100 Subject: Add authenticated TLS support for API Docker-DCO-1.1-Signed-off-by: Johannes 'fish' Ziemke (github: discordianfish) --- api/client.go | 22 ++-- api/server.go | 39 +++++-- contrib/host-integration/manager.go | 2 +- docker/docker.go | 70 ++++++++++++- docs/sources/examples/https.rst | 126 +++++++++++++++++++++++ docs/sources/examples/index.rst | 1 + docs/sources/reference/commandline/cli.rst | 5 + integration/commands_test.go | 44 ++++---- integration/fixtures/https/ca.pem | 23 +++++ integration/fixtures/https/client-cert.pem | 73 +++++++++++++ integration/fixtures/https/client-key.pem | 16 +++ integration/fixtures/https/client-rogue-cert.pem | 73 +++++++++++++ integration/fixtures/https/client-rogue-key.pem | 16 +++ integration/fixtures/https/server-cert.pem | 76 ++++++++++++++ integration/fixtures/https/server-key.pem | 16 +++ integration/fixtures/https/server-rogue-cert.pem | 76 ++++++++++++++ integration/fixtures/https/server-rogue-key.pem | 16 +++ integration/https_test.go | 82 +++++++++++++++ integration/runtime_test.go | 86 +++++++++++++--- 19 files changed, 812 insertions(+), 50 deletions(-) create mode 100644 docs/sources/examples/https.rst create mode 100644 integration/fixtures/https/ca.pem create mode 100644 integration/fixtures/https/client-cert.pem create mode 100644 integration/fixtures/https/client-key.pem create mode 100644 integration/fixtures/https/client-rogue-cert.pem create mode 100644 integration/fixtures/https/client-rogue-key.pem create mode 100644 integration/fixtures/https/server-cert.pem create mode 100644 integration/fixtures/https/server-key.pem create mode 100644 integration/fixtures/https/server-rogue-cert.pem create mode 100644 integration/fixtures/https/server-rogue-key.pem create mode 100644 integration/https_test.go diff --git a/api/client.go b/api/client.go index 338a5b0de1..b8ac0f94c7 100644 --- a/api/client.go +++ b/api/client.go @@ -3,6 +3,7 @@ package api import ( "bufio" "bytes" + "crypto/tls" "encoding/base64" "encoding/json" "errors" @@ -57,8 +58,8 @@ func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) { return method.Interface().(func(...string) error), true } -func ParseCommands(proto, addr string, args ...string) error { - cli := NewDockerCli(os.Stdin, os.Stdout, os.Stderr, proto, addr) +func ParseCommands(proto, addr string, tlsConfig *tls.Config, args ...string) error { + cli := NewDockerCli(os.Stdin, os.Stdout, os.Stderr, proto, addr, tlsConfig) if len(args) > 0 { method, exists := cli.getMethod(args[0]) @@ -2026,6 +2027,13 @@ func (cli *DockerCli) CmdLoad(args ...string) error { return nil } +func (cli *DockerCli) dial() (net.Conn, error) { + if cli.tlsConfig != nil && cli.proto != "unix" { + return tls.Dial(cli.proto, cli.addr, cli.tlsConfig) + } + return net.Dial(cli.proto, cli.addr) +} + func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { params := bytes.NewBuffer(nil) if data != nil { @@ -2078,7 +2086,7 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b } else if method == "POST" { req.Header.Set("Content-Type", "plain/text") } - dial, err := net.Dial(cli.proto, cli.addr) + dial, err := cli.dial() if err != nil { if strings.Contains(err.Error(), "connection refused") { return nil, -1, ErrConnectionRefused @@ -2140,7 +2148,7 @@ func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, h } } - dial, err := net.Dial(cli.proto, cli.addr) + dial, err := cli.dial() if err != nil { if strings.Contains(err.Error(), "connection refused") { return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") @@ -2196,7 +2204,7 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea req.Header.Set("Content-Type", "plain/text") req.Host = cli.addr - dial, err := net.Dial(cli.proto, cli.addr) + dial, err := cli.dial() if err != nil { if strings.Contains(err.Error(), "connection refused") { return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") @@ -2388,7 +2396,7 @@ func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, err return body, statusCode, nil } -func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli { +func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsConfig *tls.Config) *DockerCli { var ( isTerminal = false terminalFd uintptr @@ -2412,6 +2420,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *Doc err: err, isTerminal: isTerminal, terminalFd: terminalFd, + tlsConfig: tlsConfig, } } @@ -2424,4 +2433,5 @@ type DockerCli struct { err io.Writer isTerminal bool terminalFd uintptr + tlsConfig *tls.Config } diff --git a/api/server.go b/api/server.go index 8fc6b4f68b..d12381a70a 100644 --- a/api/server.go +++ b/api/server.go @@ -4,6 +4,8 @@ import ( "bufio" "bytes" "code.google.com/p/go.net/websocket" + "crypto/tls" + "crypto/x509" "encoding/base64" "encoding/json" "expvar" @@ -1129,9 +1131,8 @@ func changeGroup(addr string, nameOrGid string) error { // ListenAndServe sets up the required http.Server and gets it listening for // each addr passed in and does protocol specific checking. -func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors bool, dockerVersion string, socketGroup string) error { - r, err := createRouter(eng, logging, enableCors, dockerVersion) - +func ListenAndServe(proto, addr string, job *engine.Job) error { + r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) if err != nil { return err } @@ -1151,17 +1152,43 @@ func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors return err } + if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) { + tlsCert := job.Getenv("TlsCert") + tlsKey := job.Getenv("TlsKey") + cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) + if err != nil { + return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", + tlsCert, tlsKey, err) + } + tlsConfig := &tls.Config{ + NextProtos: []string{"http/1.1"}, + Certificates: []tls.Certificate{cert}, + } + if job.GetenvBool("TlsVerify") { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(job.Getenv("TlsCa")) + if err != nil { + return fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + l = tls.NewListener(l, tlsConfig) + } + // Basic error and sanity checking switch proto { case "tcp": - if !strings.HasPrefix(addr, "127.0.0.1") { + if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } case "unix": if err := os.Chmod(addr, 0660); err != nil { return err } - + socketGroup := job.Getenv("SocketGroup") if socketGroup != "" { if err := changeGroup(addr, socketGroup); err != nil { if socketGroup == "docker" { @@ -1197,7 +1224,7 @@ func ServeApi(job *engine.Job) engine.Status { protoAddrParts := strings.SplitN(protoAddr, "://", 2) go func() { log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1]) - chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"), job.Getenv("SocketGroup")) + chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) }() } diff --git a/contrib/host-integration/manager.go b/contrib/host-integration/manager.go index 6742ee4d7c..2798a5d06f 100644 --- a/contrib/host-integration/manager.go +++ b/contrib/host-integration/manager.go @@ -70,7 +70,7 @@ func main() { bufErr := bytes.NewBuffer(nil) // Instanciate the Docker CLI - cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock") + cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock", false, nil) // Retrieve the container info if err := cli.CmdInspect(flag.Arg(0)); err != nil { // As of docker v0.6.3, CmdInspect always returns nil diff --git a/docker/docker.go b/docker/docker.go index 2aa10dbe54..0c017eca9a 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -1,7 +1,10 @@ package main import ( + "crypto/tls" + "crypto/x509" "fmt" + "io/ioutil" "log" "os" "strings" @@ -16,6 +19,16 @@ import ( "github.com/dotcloud/docker/utils" ) +const ( + defaultCaFile = "ca.pem" + defaultKeyFile = "key.pem" + defaultCertFile = "cert.pem" +) + +var ( + dockerConfDir = os.Getenv("HOME") + "/.docker/" +) + func main() { if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") { // Running in init mode @@ -43,6 +56,11 @@ func main() { flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the docker runtime to use a specific exec driver") flHosts = opts.NewListOpts(api.ValidateHost) flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available") + flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags") + flTlsVerify = flag.Bool([]string{"-tlsverify"}, false, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)") + flCa = flag.String([]string{"-tlscacert"}, dockerConfDir+defaultCaFile, "Trust only remotes providing a certificate signed by the CA given here") + flCert = flag.String([]string{"-tlscert"}, dockerConfDir+defaultCertFile, "Path to TLS certificate file") + flKey = flag.String([]string{"-tlskey"}, dockerConfDir+defaultKeyFile, "Path to TLS key file") ) flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers") flag.Var(&flHosts, []string{"H", "-host"}, "tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified") @@ -73,6 +91,7 @@ func main() { if *flDebug { os.Setenv("DEBUG", "1") } + if *flDaemon { if flag.NArg() != 0 { flag.Usage() @@ -140,6 +159,12 @@ func main() { job.SetenvBool("EnableCors", *flEnableCors) job.Setenv("Version", dockerversion.VERSION) job.Setenv("SocketGroup", *flSocketGroup) + + job.SetenvBool("Tls", *flTls) + job.SetenvBool("TlsVerify", *flTlsVerify) + job.Setenv("TlsCa", *flCa) + job.Setenv("TlsCert", *flCert) + job.Setenv("TlsKey", *flKey) if err := job.Run(); err != nil { log.Fatal(err) } @@ -148,14 +173,53 @@ func main() { log.Fatal("Please specify only one -H") } protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2) - if err := api.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil { - if sterr, ok := err.(*utils.StatusError); ok { + + var ( + errc error + tlsConfig tls.Config + ) + tlsConfig.InsecureSkipVerify = true + + // If we should verify the server, we need to load a trusted ca + if *flTlsVerify { + *flTls = true + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(*flCa) + if err != nil { + log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err) + } + certPool.AppendCertsFromPEM(file) + tlsConfig.RootCAs = certPool + tlsConfig.InsecureSkipVerify = false + } + + // If tls is enabled, try to load and send client certificates + if *flTls || *flTlsVerify { + _, errCert := os.Stat(*flCert) + _, errKey := os.Stat(*flKey) + if errCert == nil && errKey == nil { + *flTls = true + cert, err := tls.LoadX509KeyPair(*flCert, *flKey) + if err != nil { + log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + } + + if *flTls || *flTlsVerify { + errc = api.ParseCommands(protoAddrParts[0], protoAddrParts[1], &tlsConfig, flag.Args()...) + } else { + errc = api.ParseCommands(protoAddrParts[0], protoAddrParts[1], nil, flag.Args()...) + } + if errc != nil { + if sterr, ok := errc.(*utils.StatusError); ok { if sterr.Status != "" { log.Println(sterr.Status) } os.Exit(sterr.StatusCode) } - log.Fatal(err) + log.Fatal(errc) } } } diff --git a/docs/sources/examples/https.rst b/docs/sources/examples/https.rst new file mode 100644 index 0000000000..7a221ed951 --- /dev/null +++ b/docs/sources/examples/https.rst @@ -0,0 +1,126 @@ +:title: Docker HTTPS Setup +:description: How to setup docker with https +:keywords: docker, example, https, daemon + +.. _running_docker_https: + +Running Docker with https +========================= + +By default, Docker runs via a non-networked Unix socket. It can also optionally +communicate using a HTTP socket. + +If you need Docker reachable via the network in a safe manner, you can enable +TLS by specifying the `tlsverify` flag and pointing Docker's `tlscacert` flag to a +trusted CA certificate. + +In daemon mode, it will only allow connections from clients authenticated by a +certificate signed by that CA. In client mode, it will only connect to servers +with a certificate signed by that CA. + +.. warning:: + + Using TLS and managing a CA is an advanced topic. Please make you self familiar + with openssl, x509 and tls before using it in production. + +Create a CA, server and client keys with OpenSSL +------------------------------------------------ + +First, initialize the CA serial file and generate CA private and public keys: + +.. code-block:: bash + + $ echo 01 > ca.srl + $ openssl genrsa -des3 -out ca-key.pem + $ openssl req -new -x509 -days 365 -key ca-key.pem -out ca.pem + +Now that we have a CA, you can create a server key and certificate signing request. +Make sure that `"Common Name (e.g. server FQDN or YOUR name)"` matches the hostname you will use +to connect to Docker or just use '*' for a certificate valid for any hostname: + +.. code-block:: bash + + $ openssl genrsa -des3 -out server-key.pem + $ openssl req -new -key server-key.pem -out server.csr + +Next we're going to sign the key with our CA: + +.. code-block:: bash + + $ openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem \ + -out server-cert.pem + +For client authentication, create a client key and certificate signing request: + +.. code-block:: bash + + $ openssl genrsa -des3 -out client-key.pem + $ openssl req -new -key client-key.pem -out client.csr + + +To make the key suitable for client authentication, create a extensions config file: + +.. code-block:: bash + + $ echo extendedKeyUsage = clientAuth > extfile.cnf + +Now sign the key: + +.. code-block:: bash + + $ openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem \ + -out client-cert.pem -extfile extfile.cnf + +Finally you need to remove the passphrase from the client and server key: + +.. code-block:: bash + + $ openssl rsa -in server-key.pem -out server-key.pem + $ openssl rsa -in client-key.pem -out client-key.pem + +Now you can make the Docker daemon only accept connections from clients providing +a certificate trusted by our CA: + +.. code-block:: bash + + $ sudo docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \ + -H=0.0.0.0:4243 + +To be able to connect to Docker and validate its certificate, you now need to provide your client keys, +certificates and trusted CA: + +.. code-block:: bash + + $ docker --tlsverify --tlscacert=ca.pem --tlscert=client-cert.pem --tlskey=client-key.pem \ + -H=dns-name-of-docker-host:4243 + +.. warning:: + + As shown in the example above, you don't have to run the ``docker`` + client with ``sudo`` or the ``docker`` group when you use + certificate authentication. That means anyone with the keys can + give any instructions to your Docker daemon, giving them root + access to the machine hosting the daemon. Guard these keys as you + would a root password! + +Other modes +----------- +If you don't want to have complete two-way authentication, you can run Docker in +various other modes by mixing the flags. + +Daemon modes +~~~~~~~~~~~~ +- tlsverify, tlscacert, tlscert, tlskey set: Authenticate clients +- tls, tlscert, tlskey: Do not authenticate clients + +Client modes +~~~~~~~~~~~~ +- tls: Authenticate server based on public/default CA pool +- tlsverify, tlscacert: Authenticate server based on given CA +- tls, tlscert, tlskey: Authenticate with client certificate, do not authenticate + server based on given CA +- tlsverify, tlscacert, tlscert, tlskey: Authenticate with client certificate, + authenticate server based on given CA + +The client will send its client certificate if found, so you just need to drop +your keys into `~/.docker/.pem` diff --git a/docs/sources/examples/index.rst b/docs/sources/examples/index.rst index cf9ed9340a..6dcc40a1c7 100644 --- a/docs/sources/examples/index.rst +++ b/docs/sources/examples/index.rst @@ -26,3 +26,4 @@ to more substantial services like those which you might find in production. using_supervisord cfengine_process_management python_web_app + https diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index c7ce421d88..8b4bafc2bd 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -86,6 +86,11 @@ Commands -s, --storage-driver="": Force the docker runtime to use a specific storage driver -e, --exec-driver="native": Force the docker runtime to use a specific exec driver -v, --version=false: Print version information and quit + --tls=false: Use TLS; implied by tls-verify flags + --tlscacert="~/.docker/ca.pem": Trust only remotes providing a certificate signed by the CA given here + --tlscert="~/.docker/cert.pem": Path to TLS certificate file + --tlskey="~/.docker/key.pem": Path to TLS key file + --tlsverify=false: Use TLS and verify the remote (daemon: verify client, client: verify daemon) --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the diff --git a/integration/commands_test.go b/integration/commands_test.go index 9f7a41384c..838295af4f 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -120,7 +120,7 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error func TestRunHostname(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -165,7 +165,7 @@ func TestRunHostname(t *testing.T) { func TestRunWorkdir(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -210,7 +210,7 @@ func TestRunWorkdir(t *testing.T) { func TestRunWorkdirExists(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -255,7 +255,7 @@ func TestRunExit(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -308,7 +308,7 @@ func TestRunDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -354,7 +354,7 @@ func TestRunDisconnectTty(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -406,7 +406,7 @@ func TestRunAttachStdin(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -470,7 +470,7 @@ func TestRunDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -517,7 +517,7 @@ func TestAttachDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -550,7 +550,7 @@ func TestAttachDetach(t *testing.T) { stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) ch = make(chan struct{}) go func() { @@ -598,7 +598,7 @@ func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) // Discard the CmdRun output @@ -616,7 +616,7 @@ func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) ch := make(chan struct{}) go func() { @@ -663,7 +663,7 @@ func TestAttachDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) go func() { @@ -732,7 +732,7 @@ func TestAttachDisconnect(t *testing.T) { func TestRunAutoRemove(t *testing.T) { t.Skip("Fixme. Skipping test for now, race condition") stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -768,7 +768,7 @@ func TestRunAutoRemove(t *testing.T) { func TestCmdLogs(t *testing.T) { t.Skip("Test not impemented") - cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil { @@ -786,7 +786,7 @@ func TestCmdLogs(t *testing.T) { // Expected behaviour: error out when attempting to bind mount non-existing source paths func TestRunErrorBindNonExistingSource(t *testing.T) { - cli := api.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -806,7 +806,7 @@ func TestRunErrorBindNonExistingSource(t *testing.T) { func TestImagesViz(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) image := buildTestImages(t, globalEngine) @@ -856,7 +856,7 @@ func TestImagesViz(t *testing.T) { func TestImagesTree(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) image := buildTestImages(t, globalEngine) @@ -939,7 +939,7 @@ func TestRunCidFile(t *testing.T) { } tmpCidFile := path.Join(tmpDir, "cid") - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -989,7 +989,7 @@ func TestContainerOrphaning(t *testing.T) { defer os.RemoveAll(tmpDir) // setup a CLI and server - cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) srv := mkServerFromEngine(globalEngine, t) @@ -1049,8 +1049,8 @@ func TestCmdKill(t *testing.T) { var ( stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr) - cli2 = api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli2 = api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) ) defer cleanup(globalEngine, t) diff --git a/integration/fixtures/https/ca.pem b/integration/fixtures/https/ca.pem new file mode 100644 index 0000000000..6825d6d1bd --- /dev/null +++ b/integration/fixtures/https/ca.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID0TCCAzqgAwIBAgIJAP2r7GqEJwSnMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD +VQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG +A1UEChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMI +Y2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWls +QGhvc3QuZG9tYWluMB4XDTEzMTIwMzE2NTYzMFoXDTIzMTIwMTE2NTYzMFowgaIx +CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2Nv +MRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYD +VQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEW +EG1haWxAaG9zdC5kb21haW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALAn +0xDw+5y7ZptQacq66pUhRu82JP2WU6IDgo5QUtNU6/CX5PwQATe/OnYTZQFbksxp +AU9boG0FCkgxfsgPYXEuZxVEGKI2fxfKHOZZI8mrkWmj6eWU/0cvCjGVc9rTITP5 +sNQvg+hORyVDdNp2IdsbMJayiB3AQYMFx3vSDOMTAgMBAAGjggELMIIBBzAdBgNV +HQ4EFgQUZu7DFz09q0QBa2+ymRm9qgK1NPswgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAF8fJKKM+/oOdnNi +zEd0M1+PmZOyqvjYQn/2ZR8UHH6Imgc/OPQKZXf0bVE1Txc/DaUNn9Isd1SuCuaE +ic3vAIYYU7PmgeNN6vwec48V96T7jr+GAi6AVMhQEc2hHCfVtx11Xx+x6aHDZzJt +Zxtf5lL6KSO9Y+EFwM+rju6hm5hW +-----END CERTIFICATE----- diff --git a/integration/fixtures/https/client-cert.pem b/integration/fixtures/https/client-cert.pem new file mode 100644 index 0000000000..c05ed47c2c --- /dev/null +++ b/integration/fixtures/https/client-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 14:17:54 2013 GMT + Not After : Dec 2 14:17:54 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:ca:c9:05:d0:09:4e:3e:a4:fc:d5:14:f4:a5:e8: + 34:d3:6b:51:e3:f3:62:ea:a1:f0:e8:ed:c4:2a:bc: + f0:4f:ca:07:df:e3:88:fa:f4:21:99:35:0e:3d:ea: + b0:86:e7:c4:d2:8a:83:2b:42:b8:ec:a3:99:62:70: + 81:46:cc:fc:a5:1d:d2:63:e8:eb:07:25:9a:e2:25: + 6d:11:56:f2:1a:51:a1:b6:3e:1c:57:32:e9:7b:2c: + aa:1b:cc:97:2d:89:2d:b1:c9:5e:35:28:4d:7c:fa: + 65:31:3e:f7:70:dd:6e:0b:3c:58:af:a8:2e:24:c0: + 7e:4e:78:7d:0a:9e:8f:42:43 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + DE:42:EF:2D:98:A3:6C:A8:AA:E0:8C:71:2C:9D:64:23:A9:E2:7E:81 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 1c:44:26:ea:e1:66:25:cb:e4:8e:57:1c:f6:b9:17:22:62:40: + 12:90:8f:3b:b2:61:7a:54:94:8f:b1:20:0b:bf:a3:51:e3:fa: + 1c:a1:be:92:3a:d0:76:44:c0:57:83:ab:6a:e4:1a:45:49:a4: + af:39:0d:60:32:fc:3a:be:d7:fb:5d:99:7a:1f:87:e7:d5:ab: + 84:a2:5e:90:d8:bf:fa:89:6d:32:26:02:5e:31:35:68:7f:31: + f5:6b:51:46:bc:af:70:ed:5a:09:7d:ec:b2:48:4f:fe:c5:2f: + 56:04:ad:f6:c1:d2:2a:e4:6a:c4:87:fe:08:35:c5:38:cb:5e: + 4a:c4 +-----BEGIN CERTIFICATE----- +MIIEFTCCA36gAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNDE3NTRaFw0yMzEyMDIxNDE3NTRaMIGgMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEPMA0GA1UEAxMGY2xp +ZW50MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0 +LmRvbWFpbjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAyskF0AlOPqT81RT0 +peg002tR4/Ni6qHw6O3EKrzwT8oH3+OI+vQhmTUOPeqwhufE0oqDK0K47KOZYnCB +Rsz8pR3SY+jrByWa4iVtEVbyGlGhtj4cVzLpeyyqG8yXLYktscleNShNfPplMT73 +cN1uCzxYr6guJMB+Tnh9Cp6PQkMCAwEAAaOCAVkwggFVMAkGA1UdEwQCMAAwLQYJ +YIZIAYb4QgENBCAWHkVhc3ktUlNBIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNV +HQ4EFgQU3kLvLZijbKiq4IxxLJ1kI6nifoEwgdcGA1UdIwSBzzCBzIAUZu7DFz09 +q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD +QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x +ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI +Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq +hCcEpzATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZIhvcN +AQEFBQADgYEAHEQm6uFmJcvkjlcc9rkXImJAEpCPO7JhelSUj7EgC7+jUeP6HKG+ +kjrQdkTAV4OrauQaRUmkrzkNYDL8Or7X+12Zeh+H59WrhKJekNi/+oltMiYCXjE1 +aH8x9WtRRryvcO1aCX3sskhP/sUvVgSt9sHSKuRqxIf+CDXFOMteSsQ= +-----END CERTIFICATE----- diff --git a/integration/fixtures/https/client-key.pem b/integration/fixtures/https/client-key.pem new file mode 100644 index 0000000000..b5c15f8dc7 --- /dev/null +++ b/integration/fixtures/https/client-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMrJBdAJTj6k/NUU +9KXoNNNrUePzYuqh8OjtxCq88E/KB9/jiPr0IZk1Dj3qsIbnxNKKgytCuOyjmWJw +gUbM/KUd0mPo6wclmuIlbRFW8hpRobY+HFcy6XssqhvMly2JLbHJXjUoTXz6ZTE+ +93Ddbgs8WK+oLiTAfk54fQqej0JDAgMBAAECgYBOFEzKp2qbMEexe9ofL2N3rDDh +xkrl8OijpzkLA6i78BxMFn4dsnZlWUpciMrjhsYAExkiRRSS+QMMJimAq1jzQqc3 +FAQV2XGYwkd0cUn7iZGvfNnEPysjsfyYQM+m+sT0ATj4BZjVShC6kkSjTdm1leLN +OSvcHdcu3Xxg9ufF0QJBAPYdnNt5sIndt2WECePuRVi+uF4mlxTobFY0fjn26yhC +4RsnhhD3Vldygo9gvnkwrAZYaALGSPBewes2InxvjA8CQQDS7erKiNXpwoqz5XiU +SVEsIIVTdWzBjGbIqMOu/hUwM5FK4j6JTBks0aTGMyh0YV9L1EzM0X79J29JahCe +iQKNAkBKNMOGqTpBV0hko1sYDk96YobUXG5RL4L6uvkUIQ7mJMQam+AgXXL7Ctuy +v0iu4a38e8tgisiTMP7nHHtpaXihAkAOiN54/lzfMsykANgCP9scE1GcoqbP34Dl +qttxH4kOPT9xzY1JoLjLYdbc4YGUI3GRpBt2sajygNkmUey7P+2xAkBBsVCZFvTw +qHvOpPS2kX5ml5xoc/QAHK9N7kR+X7XFYx82RTVSqJEK4lPb+aEWn+CjiIewO4Q5 +ksDFuNxAzbhl +-----END PRIVATE KEY----- diff --git a/integration/fixtures/https/client-rogue-cert.pem b/integration/fixtures/https/client-rogue-cert.pem new file mode 100644 index 0000000000..21ae4bd579 --- /dev/null +++ b/integration/fixtures/https/client-rogue-cert.pem @@ -0,0 +1,73 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 24 17:54:59 2014 GMT + Not After : Feb 22 17:54:59 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:e8:e2:2c:b8:d4:db:89:50:4f:47:1e:68:db:f7: + e4:cc:47:41:63:75:03:37:50:7a:a8:4d:27:36:d5: + 15:01:08:b6:cf:56:f7:56:6d:3d:f9:e2:8d:1a:5d: + bf:a0:24:5e:07:55:8e:d0:dc:f1:fa:19:87:1d:d6: + b6:58:82:2e:ba:69:6d:e9:d9:c8:16:0d:1d:59:7f: + f4:8e:58:10:01:3d:21:14:16:3c:ec:cd:8c:b7:0e: + e6:7b:77:b4:f9:90:a5:17:01:bb:84:c6:b2:12:87: + 70:eb:9f:6d:4f:d0:68:8b:96:c0:e7:0b:51:b4:9d: + 1d:7b:6c:7b:be:89:6b:88:8b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + Easy-RSA Generated Certificate + X509v3 Subject Key Identifier: + 9E:F8:49:D0:A2:76:30:5C:AB:2B:8A:B5:8D:C6:45:1F:A7:F8:CF:85 + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha1WithRSAEncryption + 48:76:c0:18:fa:0a:ee:4e:1a:ec:02:9d:d4:83:ca:94:54:a1: + 3f:51:2f:3e:4b:95:c3:42:9b:71:a0:4b:d9:af:47:23:b9:1c: + fb:85:ba:76:e2:09:cb:65:bb:d2:7d:44:3d:4b:67:ba:80:83: + be:a8:ed:c4:b9:ea:1a:1b:c7:59:3b:d9:5c:0d:46:d8:c9:92: + cb:10:c5:f2:1a:38:a4:aa:07:2c:e3:84:16:79:c7:95:09:e3: + 01:d2:15:a2:77:0b:8b:bf:94:04:e9:7f:c0:cd:e6:2e:64:cd: + 1e:a3:32:ec:11:cc:62:ce:c7:4e:cd:ad:48:5c:b1:b8:e9:76: + b3:f9 +-----BEGIN CERTIFICATE----- +MIIEDTCCA3agAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyNDE3NTQ1OVoXDTI0MDIyMjE3NTQ1OVowgaAxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMQ8wDQYDVQQDEwZjbGllbnQx +ETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9t +YWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDo4iy41NuJUE9HHmjb9+TM +R0FjdQM3UHqoTSc21RUBCLbPVvdWbT354o0aXb+gJF4HVY7Q3PH6GYcd1rZYgi66 +aW3p2cgWDR1Zf/SOWBABPSEUFjzszYy3DuZ7d7T5kKUXAbuExrISh3Drn21P0GiL +lsDnC1G0nR17bHu+iWuIiwIDAQABo4IBVTCCAVEwCQYDVR0TBAIwADAtBglghkgB +hvhCAQ0EIBYeRWFzeS1SU0EgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQW +BBSe+EnQonYwXKsrirWNxkUfp/jPhTCB0wYDVR0jBIHLMIHIgBTcpfF2207Nju+x +I1YdkoCZdDvqb6GBpKSBoTCBnjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUw +EwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2aWwgSW5jMREwDwYDVQQL +EwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1l +MR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWluggkA5yEeGEEbloMwEwYD +VR0lBAwwCgYIKwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBBQUAA4GB +AEh2wBj6Cu5OGuwCndSDypRUoT9RLz5LlcNCm3GgS9mvRyO5HPuFunbiCctlu9J9 +RD1LZ7qAg76o7cS56hobx1k72VwNRtjJkssQxfIaOKSqByzjhBZ5x5UJ4wHSFaJ3 +C4u/lATpf8DN5i5kzR6jMuwRzGLOx07NrUhcsbjpdrP5 +-----END CERTIFICATE----- diff --git a/integration/fixtures/https/client-rogue-key.pem b/integration/fixtures/https/client-rogue-key.pem new file mode 100644 index 0000000000..53c122ab70 --- /dev/null +++ b/integration/fixtures/https/client-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAOjiLLjU24lQT0ce +aNv35MxHQWN1AzdQeqhNJzbVFQEIts9W91ZtPfnijRpdv6AkXgdVjtDc8foZhx3W +tliCLrppbenZyBYNHVl/9I5YEAE9IRQWPOzNjLcO5nt3tPmQpRcBu4TGshKHcOuf +bU/QaIuWwOcLUbSdHXtse76Ja4iLAgMBAAECgYADs+TmI2xCKKa6CL++D5jxrohZ +nnionnz0xBVFh+nHlG3jqgxQsXf0yydXLfpn/2wHTdLxezHVuiYt0UYg7iD0CglW ++IjcgMebzyjLeYqYOE5llPlMvhp2HoEMYJNb+7bRrZ1WCITbu+Su0w1cgA7Cs+Ej +VlfvGzN+qqnDThRUYQJBAPY0sMWZJKly8QhUmUvmcXdPczzSOf6Mm7gc5LR6wzxd +vW7syuqk50qjqVqFpN81vCV7GoDxRUWbTM9ftf7JGFkCQQDyJc/1RMygE2o+enU1 +6UBxJyclXITEYtDn8aoEpLNc7RakP1WoPUKjZOnjkcoKcIkFNkSPeCfQujrb5f3F +MkuDAkByAI/hzzmkpK5rFxEsjfX4Mve/L/DepyjrpaVY1IdWimlO1aJX6CeY7hNa +8QsYt/74s/nfvtg+lNyKIV1aLq9xAkB+WSSNgfyTeg3x08vc+Xxajmdqoz/TiQwg +OoTQL3A3iK5LvZBgXLasszcnOycFE3srcQmNItEDpGiZ3QPxJTEpAkEA45EE9NMJ +SA7EGWSFlbz4f4u4oBeiDiJRJbGGfAyVxZlpCWUjPpg9+swsWoFEOjnGYaChAMk5 +nrOdMf15T6QF7Q== +-----END PRIVATE KEY----- diff --git a/integration/fixtures/https/server-cert.pem b/integration/fixtures/https/server-cert.pem new file mode 100644 index 0000000000..08abfd1a3b --- /dev/null +++ b/integration/fixtures/https/server-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 4 (0x4) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Dec 4 15:01:20 2013 GMT + Not After : Dec 2 15:01:20 2023 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=*/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:c1:ff:7d:30:6f:64:4a:b1:92:b1:71:d1:c1:74: + e2:1d:db:2d:11:24:e1:00:d4:00:ae:6f:c8:9e:ae: + 67:b3:4a:bd:f7:e6:9e:57:6d:19:4c:3c:23:94:2d: + 3d:d6:63:84:d8:fa:76:2b:38:12:c1:ed:20:9d:32: + e0:e8:c2:bf:9a:77:70:04:3f:7f:ca:8c:2c:82:d6: + 3d:25:5c:02:1a:4f:64:93:03:dd:9c:42:97:5e:09: + 49:af:f0:c2:e1:30:08:0e:21:46:95:d1:13:59:c0: + c8:76:be:94:0d:8b:43:67:21:33:b2:08:60:9d:76: + a8:05:32:1e:f9:95:09:14:75 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 14:02:FD:FD:DD:13:38:E0:71:EA:D1:BE:C0:0E:89:1A:2D:B6:19:06 + X509v3 Authority Key Identifier: + keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB + DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:FD:AB:EC:6A:84:27:04:A7 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 40:0f:10:39:c4:b7:0f:0d:2f:bf:d2:16:cc:8e:d3:9a:fb:8b: + ce:4b:7b:0d:48:77:ce:f1:fe:d5:8f:ea:b1:71:ed:49:1d:9f: + 23:3a:16:d4:70:7c:c5:29:bf:e4:90:34:d0:f0:00:24:f4:e4: + df:2c:c3:83:01:66:61:c9:a8:ab:29:e7:98:6d:27:89:4a:76: + c9:2e:19:8e:fe:6e:d5:f8:99:11:0e:97:67:4b:34:e3:1e:e3: + 9f:35:00:a5:32:f9:b5:2c:f2:e0:c5:2e:cc:81:bd:18:dd:5c: + 12:c8:6b:fa:0c:17:74:30:55:f6:6e:20:9a:6c:1e:09:b4:0c: + 15:42 +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv +cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l +MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv +bWFpbjAeFw0xMzEyMDQxNTAxMjBaFw0yMzEyMDIxNTAxMjBaMIGbMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE +ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEKMAgGA1UEAxQBKjER +MA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21h +aW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMH/fTBvZEqxkrFx0cF04h3b +LREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y4OjCv5p3 +cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+lA2LQ2ch +M7IIYJ12qAUyHvmVCRR1AgMBAAGjggFzMIIBbzAJBgNVHRMEAjAAMBEGCWCGSAGG ++EIBAQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNl +cnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUFAL9/d0TOOBx6tG+wA6JGi22GQYw +gdcGA1UdIwSBzzCBzIAUZu7DFz09q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJ +BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUw +EwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQD9q+xqhCcEpzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEAQA8QOcS3Dw0vv9IWzI7TmvuL +zkt7DUh3zvH+1Y/qsXHtSR2fIzoW1HB8xSm/5JA00PAAJPTk3yzDgwFmYcmoqynn +mG0niUp2yS4Zjv5u1fiZEQ6XZ0s04x7jnzUApTL5tSzy4MUuzIG9GN1cEshr+gwX +dDBV9m4gmmweCbQMFUI= +-----END CERTIFICATE----- diff --git a/integration/fixtures/https/server-key.pem b/integration/fixtures/https/server-key.pem new file mode 100644 index 0000000000..c269320ef0 --- /dev/null +++ b/integration/fixtures/https/server-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMH/fTBvZEqxkrFx +0cF04h3bLREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y +4OjCv5p3cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+ +lA2LQ2chM7IIYJ12qAUyHvmVCRR1AgMBAAECgYAmwckb9RUfSwyYgLm8IYLPHiuJ +wkllZfVg5Bo7gXJcQnFjZmJ56uTj8xvUjZlODIHM63TSO5ibv6kFXtXKCqZGd2M+ +wGbhZ0f+2GvKcwMmJERnIQjuoNaYSQLT0tM0VB9Iz0rJlZC+tzPZ+5pPqEumRdsS +IzWNXfF42AhcbwAQYQJBAPVXtMYIJc9EZsz86ZcQiMPWUpCX5vnRmtwL8kKyR8D5 +4KfYeiowyFffSRMMcclwNHq7TgSXN+nIXM9WyzyzwikCQQDKbNA28AgZp9aT54HP +WnbeE2pmt+uk/zl/BtxJSoK6H+69Jec+lf7EgL7HgOWYRSNot4uQWu8IhsHLTiUq ++0FtAkEAqwlRxRy4/x24bP+D+QRV0/D97j93joFJbE4Hved7jlSlAV4xDGilwlyv +HNB4Iu5OJ6Gcaibhm+FKkmD3noHSwQJBAIpu3fokLzX0bS+bDFBU6qO3HXX/47xj ++tsfQvkwZrSI8AkU6c8IX0HdVhsz0FBRQAT2ORDQz1XCarfxykNZrwUCQQCGCBIc +BBCWzhHlswlGidWJg3HqqO6hPPClEr3B5G87oCsdeYwiO23XT6rUnoJXfJHp6oCW +5nCwDu5ZTP+khltg +-----END PRIVATE KEY----- diff --git a/integration/fixtures/https/server-rogue-cert.pem b/integration/fixtures/https/server-rogue-cert.pem new file mode 100644 index 0000000000..28feba6656 --- /dev/null +++ b/integration/fixtures/https/server-rogue-cert.pem @@ -0,0 +1,76 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 3 (0x3) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain + Validity + Not Before: Feb 28 18:49:31 2014 GMT + Not After : Feb 26 18:49:31 2024 GMT + Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=localhost/name=changeme/emailAddress=mail@host.domain + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:d1:08:58:24:60:a1:69:65:4b:76:46:8f:88:75: + 7c:49:3a:d8:03:cc:5b:58:c5:d1:bb:e5:f9:54:b9: + 75:65:df:7e:bb:fb:54:d4:b2:e9:6f:58:a2:a4:84: + 43:94:77:24:81:38:36:36:f0:66:65:26:e5:5b:2a: + 14:1c:a9:ae:57:7f:75:00:23:14:4b:61:58:e4:82: + aa:15:97:94:bd:50:35:0d:5d:18:18:ed:10:6a:bb: + d3:64:5a:eb:36:98:5b:58:a7:fe:67:48:c1:6c:3f: + 51:2f:02:65:96:54:77:9b:34:f9:a7:d2:63:54:6a: + 9e:02:5c:be:65:98:a4:b4:b5 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + Easy-RSA Generated Server Certificate + X509v3 Subject Key Identifier: + 1F:E0:57:CA:CB:76:C9:C4:86:B9:EA:69:17:C0:F3:51:CE:95:40:EC + X509v3 Authority Key Identifier: + keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F + DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain + serial:E7:21:1E:18:41:1B:96:83 + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha1WithRSAEncryption + 04:93:0e:28:01:94:18:f0:8c:7c:d3:0c:ad:e9:b7:46:b1:30: + 65:ed:68:7c:8c:91:cd:1a:86:66:87:4a:4f:c0:97:bc:f7:85: + 4b:38:79:31:b2:65:88:b1:76:16:9e:80:93:38:f4:b9:eb:65: + 00:6d:bb:89:e0:a1:bf:95:5e:80:13:8e:01:73:d3:f1:08:73: + 85:a5:33:75:0b:42:8a:a3:07:09:35:ef:d7:c6:58:eb:60:a3: + 06:89:a0:53:99:e2:aa:41:90:e0:1a:d2:12:4b:48:7d:c3:9c: + ad:bd:0e:5e:5f:f7:09:0c:5d:7c:86:24:dd:92:d5:b3:14:06: + c7:9f +-----BEGIN CERTIFICATE----- +MIIEKjCCA5OgAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx +CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2 +aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP +BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu +MB4XDTE0MDIyODE4NDkzMVoXDTI0MDIyNjE4NDkzMVowgaMxCzAJBgNVBAYTAlVT +MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMRIwEAYDVQQDEwlsb2NhbGhv +c3QxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3Qu +ZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRCFgkYKFpZUt2Ro+I +dXxJOtgDzFtYxdG75flUuXVl3367+1TUsulvWKKkhEOUdySBODY28GZlJuVbKhQc +qa5Xf3UAIxRLYVjkgqoVl5S9UDUNXRgY7RBqu9NkWus2mFtYp/5nSMFsP1EvAmWW +VHebNPmn0mNUap4CXL5lmKS0tQIDAQABo4IBbzCCAWswCQYDVR0TBAIwADARBglg +hkgBhvhCAQEEBAMCBkAwNAYJYIZIAYb4QgENBCcWJUVhc3ktUlNBIEdlbmVyYXRl +ZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFB/gV8rLdsnEhrnqaRfA81HO +lUDsMIHTBgNVHSMEgcswgciAFNyl8XbbTs2O77EjVh2SgJl0O+pvoYGkpIGhMIGe +MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNj +bzERMA8GA1UEChMIRXZpbCBJbmMxETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD +EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h +aWxAaG9zdC5kb21haW6CCQDnIR4YQRuWgzATBgNVHSUEDDAKBggrBgEFBQcDATAL +BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEABJMOKAGUGPCMfNMMrem3RrEw +Ze1ofIyRzRqGZodKT8CXvPeFSzh5MbJliLF2Fp6Akzj0uetlAG27ieChv5VegBOO +AXPT8QhzhaUzdQtCiqMHCTXv18ZY62CjBomgU5niqkGQ4BrSEktIfcOcrb0OXl/3 +CQxdfIYk3ZLVsxQGx58= +-----END CERTIFICATE----- diff --git a/integration/fixtures/https/server-rogue-key.pem b/integration/fixtures/https/server-rogue-key.pem new file mode 100644 index 0000000000..10f7c65001 --- /dev/null +++ b/integration/fixtures/https/server-rogue-key.pem @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBANEIWCRgoWllS3ZG +j4h1fEk62APMW1jF0bvl+VS5dWXffrv7VNSy6W9YoqSEQ5R3JIE4NjbwZmUm5Vsq +FByprld/dQAjFEthWOSCqhWXlL1QNQ1dGBjtEGq702Ra6zaYW1in/mdIwWw/US8C +ZZZUd5s0+afSY1RqngJcvmWYpLS1AgMBAAECgYAJXh9dGfuB1qlIFqduDR3RxlJR +8UGSu+LHUeoXkuwg8aAjWoMVuSLe+5DmYIsKx0AajmNXmPRtyg1zRXJ7SltmubJ8 +6qQVDsRk6biMdkpkl6a9Gk2av40psD9/VPGxagEoop7IKYhf3AeKPvPiwVB2qFrl +1aYMZm0aMR55pgRajQJBAOk8IsJDf0beooDZXVdv/oe4hcbM9fxO8Cn3qzoGImqD +37LL+PCzDP7AEV3fk43SsZDeSk+LDX+h0o9nPyhzHasCQQDlb3aDgcQY9NaGLUWO +moOCB3148eBVcAwCocu+OSkf7sbQdvXxgThBOrZl11wwRIMQqh99c2yeUwj+tELl +3VcfAkBZTiNpCvtDIaBLge9RuZpWUXs3wec2cutWxnSTxSGMc25GQf/R+l0xdk2w +ChmvpktDUzpU9sN2aXn8WuY+EMX9AkEApbLpUbKPUELLB958RLA819TW/lkZXjrs +wZ3eSoR3ufM1rOqtVvyvBxUDE+wETWu9iHSFB5Ir2PA5J9JCGkbPmwJAFI1ndfBj +iuyU93nFX0p+JE2wVHKx4dMzKCearNKiJh/lGDtUq3REGgamTNUnG8RAITUbxFs+ +Z1hrIq8xYl2LOQ== +-----END PRIVATE KEY----- diff --git a/integration/https_test.go b/integration/https_test.go new file mode 100644 index 0000000000..a1c855e1a9 --- /dev/null +++ b/integration/https_test.go @@ -0,0 +1,82 @@ +package docker + +import ( + "crypto/tls" + "crypto/x509" + "github.com/dotcloud/docker/api" + "io/ioutil" + "testing" + "time" +) + +const ( + errBadCertificate = "remote error: bad certificate" + errCaUnknown = "x509: certificate signed by unknown authority" +) + +func getTlsConfig(certFile, keyFile string, t *testing.T) *tls.Config { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile("fixtures/https/ca.pem") + if err != nil { + t.Fatal(err) + } + certPool.AppendCertsFromPEM(file) + + cert, err := tls.LoadX509KeyPair("fixtures/https/"+certFile, "fixtures/https/"+keyFile) + if err != nil { + t.Fatalf("Couldn't load X509 key pair: %s", err) + } + tlsConfig := &tls.Config{ + RootCAs: certPool, + Certificates: []tls.Certificate{cert}, + } + return tlsConfig +} + +// TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint +func TestHttpsInfo(t *testing.T) { + cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, + testDaemonHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t)) + + setTimeout(t, "Reading command output time out", 10*time.Second, func() { + if err := cli.CmdInfo(); err != nil { + t.Fatal(err) + } + }) +} + +// TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint +// by using a rogue client certificate and checks that it fails with the expected error. +func TestHttpsInfoRogueCert(t *testing.T) { + cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, + testDaemonHttpsAddr, getTlsConfig("client-rogue-cert.pem", "client-rogue-key.pem", t)) + + setTimeout(t, "Reading command output time out", 10*time.Second, func() { + err := cli.CmdInfo() + if err == nil { + t.Fatal("Expected error but got nil") + } + if err.Error() != errBadCertificate { + t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err) + } + }) +} + +// TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint +// which provides a rogue server certificate and checks that it fails with the expected error +func TestHttpsInfoRogueServerCert(t *testing.T) { + cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, + testDaemonRogueHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t)) + + setTimeout(t, "Reading command output time out", 10*time.Second, func() { + err := cli.CmdInfo() + if err == nil { + t.Fatal("Expected error but got nil") + } + + if err.Error() != errCaUnknown { + t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err) + } + + }) +} diff --git a/integration/runtime_test.go b/integration/runtime_test.go index 6003c89b51..07d0fc285b 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -24,21 +24,26 @@ import ( ) const ( - unitTestImageName = "docker-test-image" - unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0 - unitTestImageIDShort = "83599e29c455" - unitTestNetworkBridge = "testdockbr0" - unitTestStoreBase = "/var/lib/docker/unit-tests" - testDaemonAddr = "127.0.0.1:4270" - testDaemonProto = "tcp" + unitTestImageName = "docker-test-image" + unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0 + unitTestImageIDShort = "83599e29c455" + unitTestNetworkBridge = "testdockbr0" + unitTestStoreBase = "/var/lib/docker/unit-tests" + testDaemonAddr = "127.0.0.1:4270" + testDaemonProto = "tcp" + testDaemonHttpsProto = "tcp" + testDaemonHttpsAddr = "localhost:4271" + testDaemonRogueHttpsAddr = "localhost:4272" ) var ( // FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted. - globalRuntime *docker.Runtime - globalEngine *engine.Engine - startFds int - startGoroutines int + globalRuntime *docker.Runtime + globalEngine *engine.Engine + globalHttpsEngine *engine.Engine + globalRogueHttpsEngine *engine.Engine + startFds int + startGoroutines int ) // FIXME: nuke() is deprecated by Runtime.Nuke() @@ -117,8 +122,10 @@ func init() { // (no tests are run directly in the base) setupBaseImage() - // Create the "global runtime" with a long-running daemon for integration tests + // Create the "global runtime" with a long-running daemons for integration tests spawnGlobalDaemon() + spawnLegitHttpsDaemon() + spawnRogueHttpsDaemon() startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine() } @@ -170,6 +177,61 @@ func spawnGlobalDaemon() { } } +func spawnLegitHttpsDaemon() { + if globalHttpsEngine != nil { + return + } + globalHttpsEngine = spawnHttpsDaemon(testDaemonHttpsAddr, "fixtures/https/ca.pem", + "fixtures/https/server-cert.pem", "fixtures/https/server-key.pem") +} + +func spawnRogueHttpsDaemon() { + if globalRogueHttpsEngine != nil { + return + } + globalRogueHttpsEngine = spawnHttpsDaemon(testDaemonRogueHttpsAddr, "fixtures/https/ca.pem", + "fixtures/https/server-rogue-cert.pem", "fixtures/https/server-rogue-key.pem") +} + +func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine { + t := log.New(os.Stderr, "", 0) + root, err := newTestDirectory(unitTestStoreBase) + if err != nil { + t.Fatal(err) + } + // FIXME: here we don't use NewTestEngine because it calls initserver with Autorestart=false, + // and we want to set it to true. + + eng := newTestEngine(t, true, root) + + // Spawn a Daemon + go func() { + utils.Debugf("Spawning https daemon for integration tests") + listenURL := &url.URL{ + Scheme: testDaemonHttpsProto, + Host: addr, + } + job := eng.Job("serveapi", listenURL.String()) + job.SetenvBool("Logging", true) + job.SetenvBool("Tls", true) + job.SetenvBool("TlsVerify", true) + job.Setenv("TlsCa", cacert) + job.Setenv("TlsCert", cert) + job.Setenv("TlsKey", key) + if err := job.Run(); err != nil { + log.Fatalf("Unable to spawn the test daemon: %s", err) + } + }() + + // Give some time to ListenAndServer to actually start + time.Sleep(time.Second) + + if err := eng.Job("acceptconnections").Run(); err != nil { + log.Fatalf("Unable to accept connections for test api: %s", err) + } + return eng +} + // FIXME: test that ImagePull(json=true) send correct json output func GetTestImage(runtime *docker.Runtime) *docker.Image { -- cgit v1.2.1 From 694c8e7dfca16fabf63e6fdcdce4151c8440a7a3 Mon Sep 17 00:00:00 2001 From: Scott Collier Date: Sat, 8 Mar 2014 17:23:06 -0600 Subject: Adding options to `docker restart` documentation URL of page is: http://docs.docker.io/en/latest/reference/commandline/cli/#restart Docker-DCO-1.1-Signed-off-by: Scott Collier (github: scollier) --- docs/sources/reference/commandline/cli.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 5b43e45eb4..6fe9b3dfea 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -991,6 +991,8 @@ The last container is marked as a ``Ghost`` container. It is a container that wa Restart a running container + -t, --time=10: Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10 + .. _cli_rm: ``rm`` -- cgit v1.2.1 From df9b99aca0b8a65da866aa5696b9f45df3b92e50 Mon Sep 17 00:00:00 2001 From: Fabio Falci Date: Sun, 9 Mar 2014 01:49:36 +0000 Subject: Remove manual http cookie management Since docker uses cookiejar it doesn't need to manage cookies manually anymore. Managing cookie was duplicating it. Docker-DCO-1.1-Signed-off-by: Fabio Falci (github: fabiofalci) --- registry/registry.go | 32 +++++++++----------------------- 1 file changed, 9 insertions(+), 23 deletions(-) diff --git a/registry/registry.go b/registry/registry.go index 543dcea383..cc2e985c31 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -149,20 +149,6 @@ func ExpandAndVerifyRegistryUrl(hostname string) (string, error) { return endpoint, nil } -func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) { - for _, cookie := range c.Jar.Cookies(req.URL) { - req.AddCookie(cookie) - } - res, err := c.Do(req) - if err != nil { - return nil, err - } - if len(res.Cookies()) > 0 { - c.Jar.SetCookies(req.URL, res.Cookies()) - } - return res, err -} - func setTokenAuth(req *http.Request, token []string) { if req.Header.Get("Authorization") == "" { // Don't override req.Header.Set("Authorization", "Token "+strings.Join(token, ",")) @@ -177,7 +163,7 @@ func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]s return nil, err } setTokenAuth(req, token) - res, err := doWithCookies(r.client, req) + res, err := r.client.Do(req) if err != nil { return nil, err } @@ -212,7 +198,7 @@ func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) boo return false } setTokenAuth(req, token) - res, err := doWithCookies(r.client, req) + res, err := r.client.Do(req) if err != nil { utils.Errorf("Error in LookupRemoteImage %s", err) return false @@ -229,7 +215,7 @@ func (r *Registry) GetRemoteImageJSON(imgID, registry string, token []string) ([ return nil, -1, fmt.Errorf("Failed to download json: %s", err) } setTokenAuth(req, token) - res, err := doWithCookies(r.client, req) + res, err := r.client.Do(req) if err != nil { return nil, -1, fmt.Errorf("Failed to download json: %s", err) } @@ -256,7 +242,7 @@ func (r *Registry) GetRemoteImageLayer(imgID, registry string, token []string) ( return nil, fmt.Errorf("Error while getting from the server: %s\n", err) } setTokenAuth(req, token) - res, err := doWithCookies(r.client, req) + res, err := r.client.Do(req) if err != nil { return nil, err } @@ -282,7 +268,7 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [ return nil, err } setTokenAuth(req, token) - res, err := doWithCookies(r.client, req) + res, err := r.client.Do(req) if err != nil { return nil, err } @@ -388,7 +374,7 @@ func (r *Registry) PushImageChecksumRegistry(imgData *ImgData, registry string, req.Header.Set("X-Docker-Checksum", imgData.Checksum) req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) - res, err := doWithCookies(r.client, req) + res, err := r.client.Do(req) if err != nil { return fmt.Errorf("Failed to upload metadata: %s", err) } @@ -424,7 +410,7 @@ func (r *Registry) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regis req.Header.Add("Content-type", "application/json") setTokenAuth(req, token) - res, err := doWithCookies(r.client, req) + res, err := r.client.Do(req) if err != nil { return fmt.Errorf("Failed to upload metadata: %s", err) } @@ -460,7 +446,7 @@ func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registr req.ContentLength = -1 req.TransferEncoding = []string{"chunked"} setTokenAuth(req, token) - res, err := doWithCookies(r.client, req) + res, err := r.client.Do(req) if err != nil { return "", "", fmt.Errorf("Failed to upload layer: %s", err) } @@ -497,7 +483,7 @@ func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token req.Header.Add("Content-type", "application/json") setTokenAuth(req, token) req.ContentLength = int64(len(revision)) - res, err := doWithCookies(r.client, req) + res, err := r.client.Do(req) if err != nil { return err } -- cgit v1.2.1 From b24be254fa042e62e1a08af9105ac92da13a6336 Mon Sep 17 00:00:00 2001 From: Rovanion Luckey Date: Fri, 7 Mar 2014 15:51:52 +0100 Subject: All caps variables in normal bash should be avoided not to accidentally collide with environment variables. Docker-DCO-1.1-Signed-off-by: Rovanion Luckey (github: Rovanion) --- docs/sources/examples/hello_world.rst | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/docs/sources/examples/hello_world.rst b/docs/sources/examples/hello_world.rst index 63362e7d7b..b8538debb9 100644 --- a/docs/sources/examples/hello_world.rst +++ b/docs/sources/examples/hello_world.rst @@ -52,8 +52,8 @@ This command will run a simple ``echo`` command, that will echo ``hello world`` **Explanation:** -- **"sudo"** execute the following commands as user *root* -- **"docker run"** run a command in a new container +- **"sudo"** execute the following commands as user *root* +- **"docker run"** run a command in a new container - **"busybox"** is the image we are running the command in. - **"/bin/echo"** is the command we want to run in the container - **"hello world"** is the input for the echo command @@ -67,9 +67,9 @@ See the example in action .. raw:: html @@ -92,7 +92,7 @@ we stop it. .. code-block:: bash - CONTAINER_ID=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done") + container_id=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done") We are going to run a simple hello world daemon in a new container made from the ``ubuntu`` image. @@ -104,22 +104,22 @@ made from the ``ubuntu`` image. - **"while true; do echo hello world; sleep 1; done"** is the mini script we want to run, that will just print hello world once a second until we stop it. -- **$CONTAINER_ID** the output of the run command will return a +- **$container_id** the output of the run command will return a container id, we can use in future commands to see what is going on with this process. .. code-block:: bash - sudo docker logs $CONTAINER_ID + sudo docker logs $container_id Check the logs make sure it is working correctly. - **"docker logs**" This will return the logs for a container -- **$CONTAINER_ID** The Id of the container we want the logs for. +- **$container_id** The Id of the container we want the logs for. .. code-block:: bash - sudo docker attach -sig-proxy=false $CONTAINER_ID + sudo docker attach -sig-proxy=false $container_id Attach to the container to see the results in real-time. @@ -127,7 +127,7 @@ Attach to the container to see the results in real-time. process to see what is going on. - **"-sig-proxy=false"** Do not forward signals to the container; allows us to exit the attachment using Control-C without stopping the container. -- **$CONTAINER_ID** The Id of the container we want to attach too. +- **$container_id** The Id of the container we want to attach too. Exit from the container attachment by pressing Control-C. @@ -141,12 +141,12 @@ Check the process list to make sure it is running. .. code-block:: bash - sudo docker stop $CONTAINER_ID + sudo docker stop $container_id Stop the container, since we don't need it anymore. - **"docker stop"** This stops a container -- **$CONTAINER_ID** The Id of the container we want to stop. +- **$container_id** The Id of the container we want to stop. .. code-block:: bash @@ -162,9 +162,9 @@ See the example in action .. raw:: html -- cgit v1.2.1 From 12bd83182dff24a800eee6e7e93beeab30b480b7 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 7 Mar 2014 15:22:23 -0800 Subject: Move daemon config into sub pkg Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- config.go | 67 -------------------------------------------------- daemonconfig/config.go | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++ runtime.go | 7 +++--- server.go | 5 ++-- 4 files changed, 74 insertions(+), 72 deletions(-) delete mode 100644 config.go create mode 100644 daemonconfig/config.go diff --git a/config.go b/config.go deleted file mode 100644 index 19aad9ed4a..0000000000 --- a/config.go +++ /dev/null @@ -1,67 +0,0 @@ -package docker - -import ( - "net" - - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/networkdriver" -) - -const ( - defaultNetworkMtu = 1500 - DisableNetworkBridge = "none" -) - -// FIXME: separate runtime configuration from http api configuration -type DaemonConfig struct { - Pidfile string - Root string - AutoRestart bool - Dns []string - EnableIptables bool - EnableIpForward bool - DefaultIp net.IP - BridgeIface string - BridgeIP string - InterContainerCommunication bool - GraphDriver string - ExecDriver string - Mtu int - DisableNetwork bool -} - -// ConfigFromJob creates and returns a new DaemonConfig object -// by parsing the contents of a job's environment. -func DaemonConfigFromJob(job *engine.Job) *DaemonConfig { - config := &DaemonConfig{ - Pidfile: job.Getenv("Pidfile"), - Root: job.Getenv("Root"), - AutoRestart: job.GetenvBool("AutoRestart"), - EnableIptables: job.GetenvBool("EnableIptables"), - EnableIpForward: job.GetenvBool("EnableIpForward"), - BridgeIP: job.Getenv("BridgeIP"), - BridgeIface: job.Getenv("BridgeIface"), - DefaultIp: net.ParseIP(job.Getenv("DefaultIp")), - InterContainerCommunication: job.GetenvBool("InterContainerCommunication"), - GraphDriver: job.Getenv("GraphDriver"), - ExecDriver: job.Getenv("ExecDriver"), - } - if dns := job.GetenvList("Dns"); dns != nil { - config.Dns = dns - } - if mtu := job.GetenvInt("Mtu"); mtu != 0 { - config.Mtu = mtu - } else { - config.Mtu = GetDefaultNetworkMtu() - } - config.DisableNetwork = config.BridgeIface == DisableNetworkBridge - - return config -} - -func GetDefaultNetworkMtu() int { - if iface, err := networkdriver.GetDefaultRouteIface(); err == nil { - return iface.MTU - } - return defaultNetworkMtu -} diff --git a/daemonconfig/config.go b/daemonconfig/config.go new file mode 100644 index 0000000000..0aee7e78ba --- /dev/null +++ b/daemonconfig/config.go @@ -0,0 +1,67 @@ +package daemonconfig + +import ( + "net" + + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/networkdriver" +) + +const ( + defaultNetworkMtu = 1500 + DisableNetworkBridge = "none" +) + +// FIXME: separate runtime configuration from http api configuration +type Config struct { + Pidfile string + Root string + AutoRestart bool + Dns []string + EnableIptables bool + EnableIpForward bool + DefaultIp net.IP + BridgeIface string + BridgeIP string + InterContainerCommunication bool + GraphDriver string + ExecDriver string + Mtu int + DisableNetwork bool +} + +// ConfigFromJob creates and returns a new DaemonConfig object +// by parsing the contents of a job's environment. +func ConfigFromJob(job *engine.Job) *Config { + config := &Config{ + Pidfile: job.Getenv("Pidfile"), + Root: job.Getenv("Root"), + AutoRestart: job.GetenvBool("AutoRestart"), + EnableIptables: job.GetenvBool("EnableIptables"), + EnableIpForward: job.GetenvBool("EnableIpForward"), + BridgeIP: job.Getenv("BridgeIP"), + BridgeIface: job.Getenv("BridgeIface"), + DefaultIp: net.ParseIP(job.Getenv("DefaultIp")), + InterContainerCommunication: job.GetenvBool("InterContainerCommunication"), + GraphDriver: job.Getenv("GraphDriver"), + ExecDriver: job.Getenv("ExecDriver"), + } + if dns := job.GetenvList("Dns"); dns != nil { + config.Dns = dns + } + if mtu := job.GetenvInt("Mtu"); mtu != 0 { + config.Mtu = mtu + } else { + config.Mtu = GetDefaultNetworkMtu() + } + config.DisableNetwork = config.BridgeIface == DisableNetworkBridge + + return config +} + +func GetDefaultNetworkMtu() int { + if iface, err := networkdriver.GetDefaultRouteIface(); err == nil { + return iface.MTU + } + return defaultNetworkMtu +} diff --git a/runtime.go b/runtime.go index 84f11e87b2..d1aeef4f97 100644 --- a/runtime.go +++ b/runtime.go @@ -4,6 +4,7 @@ import ( "container/list" "fmt" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/daemonconfig" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/execdriver" @@ -53,7 +54,7 @@ type Runtime struct { volumes *Graph srv *Server eng *engine.Engine - config *DaemonConfig + config *daemonconfig.Config containerGraph *graphdb.Database driver graphdriver.Driver execDriver execdriver.Driver @@ -624,7 +625,7 @@ func (runtime *Runtime) RegisterLink(parent, child *Container, alias string) err } // FIXME: harmonize with NewGraph() -func NewRuntime(config *DaemonConfig, eng *engine.Engine) (*Runtime, error) { +func NewRuntime(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) { runtime, err := NewRuntimeFromDirectory(config, eng) if err != nil { return nil, err @@ -632,7 +633,7 @@ func NewRuntime(config *DaemonConfig, eng *engine.Engine) (*Runtime, error) { return runtime, nil } -func NewRuntimeFromDirectory(config *DaemonConfig, eng *engine.Engine) (*Runtime, error) { +func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) { // Set the default driver graphdriver.DefaultDriver = config.GraphDriver diff --git a/server.go b/server.go index d824d78d7a..70ee7f241b 100644 --- a/server.go +++ b/server.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/auth" + "github.com/dotcloud/docker/daemonconfig" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/graphdb" @@ -34,7 +35,7 @@ import ( // The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup. func InitServer(job *engine.Job) engine.Status { job.Logf("Creating server") - srv, err := NewServer(job.Eng, DaemonConfigFromJob(job)) + srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job)) if err != nil { return job.Error(err) } @@ -2318,7 +2319,7 @@ func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { return job.Errorf("No such container: %s", name) } -func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) { +func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) { runtime, err := NewRuntime(config, eng) if err != nil { return nil, err -- cgit v1.2.1 From 82a5439835b0bff3ab3dfb169415948dae504d56 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 7 Mar 2014 17:36:47 -0800 Subject: Move image into sub pkg Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- container.go | 3 +- graph.go | 59 ++++---- image.go | 318 ------------------------------------------ image/graph.go | 11 ++ image/image.go | 292 ++++++++++++++++++++++++++++++++++++++ integration/api_test.go | 3 +- integration/buildfile_test.go | 3 +- integration/commands_test.go | 3 +- integration/graph_test.go | 39 +++--- integration/runtime_test.go | 3 +- runtime.go | 5 +- server.go | 29 ++-- tags.go | 5 +- tags_unit_test.go | 3 +- utils/utils.go | 29 ++++ 15 files changed, 415 insertions(+), 390 deletions(-) delete mode 100644 image.go create mode 100644 image/graph.go create mode 100644 image/image.go diff --git a/container.go b/container.go index 50332f27de..9c1a28c98a 100644 --- a/container.go +++ b/container.go @@ -8,6 +8,7 @@ import ( "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/links" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/runconfig" @@ -992,7 +993,7 @@ func (container *Container) Changes() ([]archive.Change, error) { return container.runtime.Changes(container) } -func (container *Container) GetImage() (*Image, error) { +func (container *Container) GetImage() (*image.Image, error) { if container.runtime == nil { return nil, fmt.Errorf("Can't get image of unregistered container") } diff --git a/graph.go b/graph.go index 43af2c278a..d164760d4c 100644 --- a/graph.go +++ b/graph.go @@ -5,6 +5,7 @@ import ( "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" @@ -79,20 +80,20 @@ func (graph *Graph) Exists(id string) bool { } // Get returns the image with the given id, or an error if the image doesn't exist. -func (graph *Graph) Get(name string) (*Image, error) { +func (graph *Graph) Get(name string) (*image.Image, error) { id, err := graph.idIndex.Get(name) if err != nil { return nil, err } // FIXME: return nil when the image doesn't exist, instead of an error - img, err := LoadImage(graph.imageRoot(id)) + img, err := image.LoadImage(graph.ImageRoot(id)) if err != nil { return nil, err } if img.ID != id { return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID) } - img.graph = graph + img.SetGraph(graph) if img.Size < 0 { rootfs, err := graph.driver.Get(img.ID) @@ -119,7 +120,7 @@ func (graph *Graph) Get(name string) (*Image, error) { } img.Size = size - if err := img.SaveSize(graph.imageRoot(id)); err != nil { + if err := img.SaveSize(graph.ImageRoot(id)); err != nil { return nil, err } } @@ -127,9 +128,9 @@ func (graph *Graph) Get(name string) (*Image, error) { } // Create creates a new image and registers it in the graph. -func (graph *Graph) Create(layerData archive.ArchiveReader, container *Container, comment, author string, config *runconfig.Config) (*Image, error) { - img := &Image{ - ID: GenerateID(), +func (graph *Graph) Create(layerData archive.ArchiveReader, container *Container, comment, author string, config *runconfig.Config) (*image.Image, error) { + img := &image.Image{ + ID: utils.GenerateRandomID(), Comment: comment, Created: time.Now().UTC(), DockerVersion: dockerversion.VERSION, @@ -151,7 +152,7 @@ func (graph *Graph) Create(layerData archive.ArchiveReader, container *Container // Register imports a pre-existing image into the graph. // FIXME: pass img as first argument -func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, img *Image) (err error) { +func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, img *image.Image) (err error) { defer func() { // If any error occurs, remove the new dir from the driver. // Don't check for errors since the dir might not have been created. @@ -160,7 +161,7 @@ func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, i graph.driver.Remove(img.ID) } }() - if err := ValidateID(img.ID); err != nil { + if err := utils.ValidateID(img.ID); err != nil { return err } // (This is a convenience to save time. Race conditions are taken care of by os.Rename) @@ -171,7 +172,7 @@ func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, i // Ensure that the image root does not exist on the filesystem // when it is not registered in the graph. // This is common when you switch from one graph driver to another - if err := os.RemoveAll(graph.imageRoot(img.ID)); err != nil && !os.IsNotExist(err) { + if err := os.RemoveAll(graph.ImageRoot(img.ID)); err != nil && !os.IsNotExist(err) { return err } @@ -197,12 +198,12 @@ func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, i return fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err) } defer graph.driver.Put(img.ID) - img.graph = graph - if err := StoreImage(img, jsonData, layerData, tmp, rootfs); err != nil { + img.SetGraph(graph) + if err := image.StoreImage(img, jsonData, layerData, tmp, rootfs); err != nil { return err } // Commit - if err := os.Rename(tmp, graph.imageRoot(img.ID)); err != nil { + if err := os.Rename(tmp, graph.ImageRoot(img.ID)); err != nil { return err } graph.idIndex.Add(img.ID) @@ -233,7 +234,7 @@ func (graph *Graph) TempLayerArchive(id string, compression archive.Compression, // Mktemp creates a temporary sub-directory inside the graph's filesystem. func (graph *Graph) Mktemp(id string) (string, error) { - dir := path.Join(graph.Root, "_tmp", GenerateID()) + dir := path.Join(graph.Root, "_tmp", utils.GenerateRandomID()) if err := os.MkdirAll(dir, 0700); err != nil { return "", err } @@ -320,7 +321,7 @@ func (graph *Graph) Delete(name string) error { return err } graph.idIndex.Delete(id) - err = os.Rename(graph.imageRoot(id), tmp) + err = os.Rename(graph.ImageRoot(id), tmp) if err != nil { return err } @@ -331,9 +332,9 @@ func (graph *Graph) Delete(name string) error { } // Map returns a list of all images in the graph, addressable by ID. -func (graph *Graph) Map() (map[string]*Image, error) { - images := make(map[string]*Image) - err := graph.walkAll(func(image *Image) { +func (graph *Graph) Map() (map[string]*image.Image, error) { + images := make(map[string]*image.Image) + err := graph.walkAll(func(image *image.Image) { images[image.ID] = image }) if err != nil { @@ -344,7 +345,7 @@ func (graph *Graph) Map() (map[string]*Image, error) { // walkAll iterates over each image in the graph, and passes it to a handler. // The walking order is undetermined. -func (graph *Graph) walkAll(handler func(*Image)) error { +func (graph *Graph) walkAll(handler func(*image.Image)) error { files, err := ioutil.ReadDir(graph.Root) if err != nil { return err @@ -364,17 +365,17 @@ func (graph *Graph) walkAll(handler func(*Image)) error { // If an image of id ID has 3 children images, then the value for key ID // will be a list of 3 images. // If an image has no children, it will not have an entry in the table. -func (graph *Graph) ByParent() (map[string][]*Image, error) { - byParent := make(map[string][]*Image) - err := graph.walkAll(func(image *Image) { - parent, err := graph.Get(image.Parent) +func (graph *Graph) ByParent() (map[string][]*image.Image, error) { + byParent := make(map[string][]*image.Image) + err := graph.walkAll(func(img *image.Image) { + parent, err := graph.Get(img.Parent) if err != nil { return } if children, exists := byParent[parent.ID]; exists { - byParent[parent.ID] = append(children, image) + byParent[parent.ID] = append(children, img) } else { - byParent[parent.ID] = []*Image{image} + byParent[parent.ID] = []*image.Image{img} } }) return byParent, err @@ -382,13 +383,13 @@ func (graph *Graph) ByParent() (map[string][]*Image, error) { // Heads returns all heads in the graph, keyed by id. // A head is an image which is not the parent of another image in the graph. -func (graph *Graph) Heads() (map[string]*Image, error) { - heads := make(map[string]*Image) +func (graph *Graph) Heads() (map[string]*image.Image, error) { + heads := make(map[string]*image.Image) byParent, err := graph.ByParent() if err != nil { return nil, err } - err = graph.walkAll(func(image *Image) { + err = graph.walkAll(func(image *image.Image) { // If it's not in the byParent lookup table, then // it's not a parent -> so it's a head! if _, exists := byParent[image.ID]; !exists { @@ -398,7 +399,7 @@ func (graph *Graph) Heads() (map[string]*Image, error) { return heads, err } -func (graph *Graph) imageRoot(id string) string { +func (graph *Graph) ImageRoot(id string) string { return path.Join(graph.Root, id) } diff --git a/image.go b/image.go deleted file mode 100644 index fa5b65787c..0000000000 --- a/image.go +++ /dev/null @@ -1,318 +0,0 @@ -package docker - -import ( - "crypto/rand" - "encoding/hex" - "encoding/json" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "os" - "path" - "strconv" - "strings" - "time" -) - -type Image struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - Container string `json:"container,omitempty"` - ContainerConfig runconfig.Config `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - Config *runconfig.Config `json:"config,omitempty"` - Architecture string `json:"architecture,omitempty"` - OS string `json:"os,omitempty"` - graph *Graph - Size int64 -} - -func LoadImage(root string) (*Image, error) { - // Load the json data - jsonData, err := ioutil.ReadFile(jsonPath(root)) - if err != nil { - return nil, err - } - img := &Image{} - - if err := json.Unmarshal(jsonData, img); err != nil { - return nil, err - } - if err := ValidateID(img.ID); err != nil { - return nil, err - } - - if buf, err := ioutil.ReadFile(path.Join(root, "layersize")); err != nil { - if !os.IsNotExist(err) { - return nil, err - } - // If the layersize file does not exist then set the size to a negative number - // because a layer size of 0 (zero) is valid - img.Size = -1 - } else { - size, err := strconv.Atoi(string(buf)) - if err != nil { - return nil, err - } - img.Size = int64(size) - } - - return img, nil -} - -func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, root, layer string) error { - // Store the layer - var ( - size int64 - err error - driver = img.graph.driver - ) - if err := os.MkdirAll(layer, 0755); err != nil { - return err - } - - // If layerData is not nil, unpack it into the new layer - if layerData != nil { - if differ, ok := driver.(graphdriver.Differ); ok { - if err := differ.ApplyDiff(img.ID, layerData); err != nil { - return err - } - - if size, err = differ.DiffSize(img.ID); err != nil { - return err - } - } else { - start := time.Now().UTC() - utils.Debugf("Start untar layer") - if err := archive.ApplyLayer(layer, layerData); err != nil { - return err - } - utils.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) - - if img.Parent == "" { - if size, err = utils.TreeSize(layer); err != nil { - return err - } - } else { - parent, err := driver.Get(img.Parent) - if err != nil { - return err - } - defer driver.Put(img.Parent) - changes, err := archive.ChangesDirs(layer, parent) - if err != nil { - return err - } - size = archive.ChangesSize(layer, changes) - } - } - } - - img.Size = size - if err := img.SaveSize(root); err != nil { - return err - } - - // If raw json is provided, then use it - if jsonData != nil { - if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { - return err - } - } else { - if jsonData, err = json.Marshal(img); err != nil { - return err - } - if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { - return err - } - } - return nil -} - -// SaveSize stores the current `size` value of `img` in the directory `root`. -func (img *Image) SaveSize(root string) error { - if err := ioutil.WriteFile(path.Join(root, "layersize"), []byte(strconv.Itoa(int(img.Size))), 0600); err != nil { - return fmt.Errorf("Error storing image size in %s/layersize: %s", root, err) - } - return nil -} - -func jsonPath(root string) string { - return path.Join(root, "json") -} - -// TarLayer returns a tar archive of the image's filesystem layer. -func (img *Image) TarLayer() (arch archive.Archive, err error) { - if img.graph == nil { - return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID) - } - driver := img.graph.driver - if differ, ok := driver.(graphdriver.Differ); ok { - return differ.Diff(img.ID) - } - - imgFs, err := driver.Get(img.ID) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - driver.Put(img.ID) - } - }() - - if img.Parent == "" { - archive, err := archive.Tar(imgFs, archive.Uncompressed) - if err != nil { - return nil, err - } - return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(img.ID) - return err - }), nil - } - - parentFs, err := driver.Get(img.Parent) - if err != nil { - return nil, err - } - defer driver.Put(img.Parent) - changes, err := archive.ChangesDirs(imgFs, parentFs) - if err != nil { - return nil, err - } - archive, err := archive.ExportChanges(imgFs, changes) - if err != nil { - return nil, err - } - return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(img.ID) - return err - }), nil -} - -func ValidateID(id string) error { - if id == "" { - return fmt.Errorf("Image id can't be empty") - } - if strings.Contains(id, ":") { - return fmt.Errorf("Invalid character in image id: ':'") - } - return nil -} - -func GenerateID() string { - for { - id := make([]byte, 32) - if _, err := io.ReadFull(rand.Reader, id); err != nil { - panic(err) // This shouldn't happen - } - value := hex.EncodeToString(id) - // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numberic and causes issues when - // used as a hostname. ref #3869 - if _, err := strconv.Atoi(utils.TruncateID(value)); err == nil { - continue - } - return value - } -} - -// Image includes convenience proxy functions to its graph -// These functions will return an error if the image is not registered -// (ie. if image.graph == nil) -func (img *Image) History() ([]*Image, error) { - var parents []*Image - if err := img.WalkHistory( - func(img *Image) error { - parents = append(parents, img) - return nil - }, - ); err != nil { - return nil, err - } - return parents, nil -} - -func (img *Image) WalkHistory(handler func(*Image) error) (err error) { - currentImg := img - for currentImg != nil { - if handler != nil { - if err := handler(currentImg); err != nil { - return err - } - } - currentImg, err = currentImg.GetParent() - if err != nil { - return fmt.Errorf("Error while getting parent image: %v", err) - } - } - return nil -} - -func (img *Image) GetParent() (*Image, error) { - if img.Parent == "" { - return nil, nil - } - if img.graph == nil { - return nil, fmt.Errorf("Can't lookup parent of unregistered image") - } - return img.graph.Get(img.Parent) -} - -func (img *Image) root() (string, error) { - if img.graph == nil { - return "", fmt.Errorf("Can't lookup root of unregistered image") - } - return img.graph.imageRoot(img.ID), nil -} - -func (img *Image) getParentsSize(size int64) int64 { - parentImage, err := img.GetParent() - if err != nil || parentImage == nil { - return size - } - size += parentImage.Size - return parentImage.getParentsSize(size) -} - -// Depth returns the number of parents for a -// current image -func (img *Image) Depth() (int, error) { - var ( - count = 0 - parent = img - err error - ) - - for parent != nil { - count++ - parent, err = parent.GetParent() - if err != nil { - return -1, err - } - } - return count, nil -} - -// Build an Image object from raw json data -func NewImgJSON(src []byte) (*Image, error) { - ret := &Image{} - - utils.Debugf("Json string: {%s}", src) - // FIXME: Is there a cleaner way to "purify" the input json? - if err := json.Unmarshal(src, ret); err != nil { - return nil, err - } - return ret, nil -} diff --git a/image/graph.go b/image/graph.go new file mode 100644 index 0000000000..857c09edd9 --- /dev/null +++ b/image/graph.go @@ -0,0 +1,11 @@ +package image + +import ( + "github.com/dotcloud/docker/graphdriver" +) + +type Graph interface { + Get(id string) (*Image, error) + ImageRoot(id string) string + Driver() graphdriver.Driver +} diff --git a/image/image.go b/image/image.go new file mode 100644 index 0000000000..e091879049 --- /dev/null +++ b/image/image.go @@ -0,0 +1,292 @@ +package image + +import ( + "encoding/json" + "fmt" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "os" + "path" + "strconv" + "time" +) + +type Image struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig runconfig.Config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *runconfig.Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + OS string `json:"os,omitempty"` + Size int64 + + graph Graph +} + +func LoadImage(root string) (*Image, error) { + // Load the json data + jsonData, err := ioutil.ReadFile(jsonPath(root)) + if err != nil { + return nil, err + } + img := &Image{} + + if err := json.Unmarshal(jsonData, img); err != nil { + return nil, err + } + if err := utils.ValidateID(img.ID); err != nil { + return nil, err + } + + if buf, err := ioutil.ReadFile(path.Join(root, "layersize")); err != nil { + if !os.IsNotExist(err) { + return nil, err + } + // If the layersize file does not exist then set the size to a negative number + // because a layer size of 0 (zero) is valid + img.Size = -1 + } else { + size, err := strconv.Atoi(string(buf)) + if err != nil { + return nil, err + } + img.Size = int64(size) + } + + return img, nil +} + +func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, root, layer string) error { + // Store the layer + var ( + size int64 + err error + driver = img.graph.Driver() + ) + if err := os.MkdirAll(layer, 0755); err != nil { + return err + } + + // If layerData is not nil, unpack it into the new layer + if layerData != nil { + if differ, ok := driver.(graphdriver.Differ); ok { + if err := differ.ApplyDiff(img.ID, layerData); err != nil { + return err + } + + if size, err = differ.DiffSize(img.ID); err != nil { + return err + } + } else { + start := time.Now().UTC() + utils.Debugf("Start untar layer") + if err := archive.ApplyLayer(layer, layerData); err != nil { + return err + } + utils.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + if img.Parent == "" { + if size, err = utils.TreeSize(layer); err != nil { + return err + } + } else { + parent, err := driver.Get(img.Parent) + if err != nil { + return err + } + defer driver.Put(img.Parent) + changes, err := archive.ChangesDirs(layer, parent) + if err != nil { + return err + } + size = archive.ChangesSize(layer, changes) + } + } + } + + img.Size = size + if err := img.SaveSize(root); err != nil { + return err + } + + // If raw json is provided, then use it + if jsonData != nil { + if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { + return err + } + } else { + if jsonData, err = json.Marshal(img); err != nil { + return err + } + if err := ioutil.WriteFile(jsonPath(root), jsonData, 0600); err != nil { + return err + } + } + return nil +} + +func (img *Image) SetGraph(graph Graph) { + img.graph = graph +} + +// SaveSize stores the current `size` value of `img` in the directory `root`. +func (img *Image) SaveSize(root string) error { + if err := ioutil.WriteFile(path.Join(root, "layersize"), []byte(strconv.Itoa(int(img.Size))), 0600); err != nil { + return fmt.Errorf("Error storing image size in %s/layersize: %s", root, err) + } + return nil +} + +func jsonPath(root string) string { + return path.Join(root, "json") +} + +// TarLayer returns a tar archive of the image's filesystem layer. +func (img *Image) TarLayer() (arch archive.Archive, err error) { + if img.graph == nil { + return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID) + } + driver := img.graph.Driver() + if differ, ok := driver.(graphdriver.Differ); ok { + return differ.Diff(img.ID) + } + + imgFs, err := driver.Get(img.ID) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + driver.Put(img.ID) + } + }() + + if img.Parent == "" { + archive, err := archive.Tar(imgFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(img.ID) + return err + }), nil + } + + parentFs, err := driver.Get(img.Parent) + if err != nil { + return nil, err + } + defer driver.Put(img.Parent) + changes, err := archive.ChangesDirs(imgFs, parentFs) + if err != nil { + return nil, err + } + archive, err := archive.ExportChanges(imgFs, changes) + if err != nil { + return nil, err + } + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(img.ID) + return err + }), nil +} + +// Image includes convenience proxy functions to its graph +// These functions will return an error if the image is not registered +// (ie. if image.graph == nil) +func (img *Image) History() ([]*Image, error) { + var parents []*Image + if err := img.WalkHistory( + func(img *Image) error { + parents = append(parents, img) + return nil + }, + ); err != nil { + return nil, err + } + return parents, nil +} + +func (img *Image) WalkHistory(handler func(*Image) error) (err error) { + currentImg := img + for currentImg != nil { + if handler != nil { + if err := handler(currentImg); err != nil { + return err + } + } + currentImg, err = currentImg.GetParent() + if err != nil { + return fmt.Errorf("Error while getting parent image: %v", err) + } + } + return nil +} + +func (img *Image) GetParent() (*Image, error) { + if img.Parent == "" { + return nil, nil + } + if img.graph == nil { + return nil, fmt.Errorf("Can't lookup parent of unregistered image") + } + return img.graph.Get(img.Parent) +} + +func (img *Image) root() (string, error) { + if img.graph == nil { + return "", fmt.Errorf("Can't lookup root of unregistered image") + } + return img.graph.ImageRoot(img.ID), nil +} + +func (img *Image) GetParentsSize(size int64) int64 { + parentImage, err := img.GetParent() + if err != nil || parentImage == nil { + return size + } + size += parentImage.Size + return parentImage.GetParentsSize(size) +} + +// Depth returns the number of parents for a +// current image +func (img *Image) Depth() (int, error) { + var ( + count = 0 + parent = img + err error + ) + + for parent != nil { + count++ + parent, err = parent.GetParent() + if err != nil { + return -1, err + } + } + return count, nil +} + +// Build an Image object from raw json data +func NewImgJSON(src []byte) (*Image, error) { + ret := &Image{} + + utils.Debugf("Json string: {%s}", src) + // FIXME: Is there a cleaner way to "purify" the input json? + if err := json.Unmarshal(src, ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/integration/api_test.go b/integration/api_test.go index cb92d89858..c050b4934d 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -9,6 +9,7 @@ import ( "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" @@ -287,7 +288,7 @@ func TestGetImagesByName(t *testing.T) { } assertHttpNotError(r, t) - img := &docker.Image{} + img := &image.Image{} if err := json.Unmarshal(r.Body.Bytes(), img); err != nil { t.Fatal(err) } diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go index efab9707ec..e5084d4355 100644 --- a/integration/buildfile_test.go +++ b/integration/buildfile_test.go @@ -5,6 +5,7 @@ import ( "github.com/dotcloud/docker" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/utils" "io/ioutil" "net" @@ -350,7 +351,7 @@ func TestBuild(t *testing.T) { } } -func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) (*docker.Image, error) { +func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) (*image.Image, error) { if eng == nil { eng = NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) diff --git a/integration/commands_test.go b/integration/commands_test.go index 9f7a41384c..6d3ac86347 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -6,6 +6,7 @@ import ( "github.com/dotcloud/docker" "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/pkg/term" "github.com/dotcloud/docker/utils" "io" @@ -902,7 +903,7 @@ func TestImagesTree(t *testing.T) { }) } -func buildTestImages(t *testing.T, eng *engine.Engine) *docker.Image { +func buildTestImages(t *testing.T, eng *engine.Engine) *image.Image { var testBuilder = testContextTemplate{ ` diff --git a/integration/graph_test.go b/integration/graph_test.go index ff1c0d9361..4fd612b5ac 100644 --- a/integration/graph_test.go +++ b/integration/graph_test.go @@ -6,6 +6,7 @@ import ( "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -67,8 +68,8 @@ func TestInterruptedRegister(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data - image := &docker.Image{ - ID: docker.GenerateID(), + image := &image.Image{ + ID: utils.GenerateRandomID(), Comment: "testing", Created: time.Now(), } @@ -96,18 +97,18 @@ func TestGraphCreate(t *testing.T) { if err != nil { t.Fatal(err) } - image, err := graph.Create(archive, nil, "Testing", "", nil) + img, err := graph.Create(archive, nil, "Testing", "", nil) if err != nil { t.Fatal(err) } - if err := docker.ValidateID(image.ID); err != nil { + if err := utils.ValidateID(img.ID); err != nil { t.Fatal(err) } - if image.Comment != "Testing" { - t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", image.Comment) + if img.Comment != "Testing" { + t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", img.Comment) } - if image.DockerVersion != dockerversion.VERSION { - t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, image.DockerVersion) + if img.DockerVersion != dockerversion.VERSION { + t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, img.DockerVersion) } images, err := graph.Map() if err != nil { @@ -115,8 +116,8 @@ func TestGraphCreate(t *testing.T) { } else if l := len(images); l != 1 { t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l) } - if images[image.ID] == nil { - t.Fatalf("Could not find image with id %s", image.ID) + if images[img.ID] == nil { + t.Fatalf("Could not find image with id %s", img.ID) } } @@ -127,8 +128,8 @@ func TestRegister(t *testing.T) { if err != nil { t.Fatal(err) } - image := &docker.Image{ - ID: docker.GenerateID(), + image := &image.Image{ + ID: utils.GenerateRandomID(), Comment: "testing", Created: time.Now(), } @@ -164,7 +165,7 @@ func TestDeletePrefix(t *testing.T) { assertNImages(graph, t, 0) } -func createTestImage(graph *docker.Graph, t *testing.T) *docker.Image { +func createTestImage(graph *docker.Graph, t *testing.T) *image.Image { archive, err := fakeTar() if err != nil { t.Fatal(err) @@ -243,20 +244,20 @@ func TestByParent(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) - parentImage := &docker.Image{ - ID: docker.GenerateID(), + parentImage := &image.Image{ + ID: utils.GenerateRandomID(), Comment: "parent", Created: time.Now(), Parent: "", } - childImage1 := &docker.Image{ - ID: docker.GenerateID(), + childImage1 := &image.Image{ + ID: utils.GenerateRandomID(), Comment: "child1", Created: time.Now(), Parent: parentImage.ID, } - childImage2 := &docker.Image{ - ID: docker.GenerateID(), + childImage2 := &image.Image{ + ID: utils.GenerateRandomID(), Comment: "child2", Created: time.Now(), Parent: parentImage.ID, diff --git a/integration/runtime_test.go b/integration/runtime_test.go index 1e912c1bb4..a79f84365a 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/dotcloud/docker" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/sysinit" @@ -172,7 +173,7 @@ func spawnGlobalDaemon() { // FIXME: test that ImagePull(json=true) send correct json output -func GetTestImage(runtime *docker.Runtime) *docker.Image { +func GetTestImage(runtime *docker.Runtime) *image.Image { imgs, err := runtime.Graph().Map() if err != nil { log.Fatalf("Unable to get the test image: %s", err) diff --git a/runtime.go b/runtime.go index d1aeef4f97..81bc9cbded 100644 --- a/runtime.go +++ b/runtime.go @@ -15,6 +15,7 @@ import ( _ "github.com/dotcloud/docker/graphdriver/btrfs" _ "github.com/dotcloud/docker/graphdriver/devmapper" _ "github.com/dotcloud/docker/graphdriver/vfs" + "github.com/dotcloud/docker/image" _ "github.com/dotcloud/docker/networkdriver/lxc" "github.com/dotcloud/docker/networkdriver/portallocator" "github.com/dotcloud/docker/pkg/graphdb" @@ -396,7 +397,7 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe } // Generate id - id := GenerateID() + id := utils.GenerateRandomID() if name == "" { name, err = generateRandomName(runtime) @@ -539,7 +540,7 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe // Commit creates a new filesystem image from the current state of a container. // The image can optionally be tagged into a repository -func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*Image, error) { +func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) { // FIXME: freeze the container before copying it to avoid data corruption? // FIXME: this shouldn't be in commands. if err := container.Mount(); err != nil { diff --git a/server.go b/server.go index 70ee7f241b..37402ee502 100644 --- a/server.go +++ b/server.go @@ -8,6 +8,7 @@ import ( "github.com/dotcloud/docker/daemonconfig" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" @@ -362,8 +363,8 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status { return engine.StatusOK } -func (srv *Server) exportImage(image *Image, tempdir string) error { - for i := image; i != nil; { +func (srv *Server) exportImage(img *image.Image, tempdir string) error { + for i := img; i != nil; { // temporary directory tmpImageDir := path.Join(tempdir, i.ID) if err := os.Mkdir(tmpImageDir, os.ModeDir); err != nil { @@ -580,7 +581,7 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error { utils.Debugf("Error reading embedded tar", err) return err } - img, err := NewImgJSON(imageJson) + img, err := image.NewImgJSON(imageJson) if err != nil { utils.Debugf("Error unmarshalling json", err) return err @@ -690,7 +691,7 @@ func (srv *Server) ImagesViz(job *engine.Job) engine.Status { job.Stdout.Write([]byte("digraph docker {\n")) var ( - parentImage *Image + parentImage *image.Image err error ) for _, image := range images { @@ -722,7 +723,7 @@ func (srv *Server) ImagesViz(job *engine.Job) engine.Status { func (srv *Server) Images(job *engine.Job) engine.Status { var ( - allImages map[string]*Image + allImages map[string]*image.Image err error ) if job.GetenvBool("all") { @@ -757,7 +758,7 @@ func (srv *Server) Images(job *engine.Job) engine.Status { out.Set("Id", image.ID) out.SetInt64("Created", image.Created.Unix()) out.SetInt64("Size", image.Size) - out.SetInt64("VirtualSize", image.getParentsSize(0)+image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) lookup[id] = out } @@ -778,7 +779,7 @@ func (srv *Server) Images(job *engine.Job) engine.Status { out.Set("Id", image.ID) out.SetInt64("Created", image.Created.Unix()) out.SetInt64("Size", image.Size) - out.SetInt64("VirtualSize", image.getParentsSize(0)+image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) outs.Add(out) } } @@ -838,7 +839,7 @@ func (srv *Server) ImageHistory(job *engine.Job) engine.Status { return job.Errorf("Usage: %s IMAGE", job.Name) } name := job.Args[0] - image, err := srv.runtime.repositories.LookupImage(name) + foundImage, err := srv.runtime.repositories.LookupImage(name) if err != nil { return job.Error(err) } @@ -855,7 +856,7 @@ func (srv *Server) ImageHistory(job *engine.Job) engine.Status { } outs := engine.NewTable("Created", 0) - err = image.WalkHistory(func(img *Image) error { + err = foundImage.WalkHistory(func(img *image.Image) error { out := &engine.Env{} out.Set("Id", img.ID) out.SetInt64("Created", img.Created.Unix()) @@ -1098,7 +1099,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin // FIXME: Keep going in case of error? return err } - img, err := NewImgJSON(imgJSON) + img, err := image.NewImgJSON(imgJSON) if err != nil { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) return fmt.Errorf("Failed to parse json: %s", err) @@ -1946,7 +1947,7 @@ func (srv *Server) canDeleteImage(imgID string) error { return err } - if err := parent.WalkHistory(func(p *Image) error { + if err := parent.WalkHistory(func(p *image.Image) error { if imgID == p.ID { return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it", utils.TruncateID(imgID), utils.TruncateID(container.ID)) } @@ -1958,7 +1959,7 @@ func (srv *Server) canDeleteImage(imgID string) error { return nil } -func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*Image, error) { +func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) { // Retrieve all images images, err := srv.runtime.graph.Map() @@ -1976,7 +1977,7 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*Imag } // Loop on the children of the given image and check the config - var match *Image + var match *image.Image for elem := range imageMap[imgID] { img, err := srv.runtime.graph.Get(elem) if err != nil { @@ -2242,7 +2243,7 @@ func (srv *Server) ContainerInspect(name string) (*Container, error) { return nil, fmt.Errorf("No such container: %s", name) } -func (srv *Server) ImageInspect(name string) (*Image, error) { +func (srv *Server) ImageInspect(name string) (*image.Image, error) { if image, err := srv.runtime.repositories.LookupImage(name); err == nil && image != nil { return image, nil } diff --git a/tags.go b/tags.go index 92c32b1ff5..27e19cd671 100644 --- a/tags.go +++ b/tags.go @@ -3,6 +3,7 @@ package docker import ( "encoding/json" "fmt" + "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/utils" "io/ioutil" "os" @@ -65,7 +66,7 @@ func (store *TagStore) Reload() error { return nil } -func (store *TagStore) LookupImage(name string) (*Image, error) { +func (store *TagStore) LookupImage(name string) (*image.Image, error) { // FIXME: standardize on returning nil when the image doesn't exist, and err for everything else // (so we can pass all errors here) repos, tag := utils.ParseRepositoryTag(name) @@ -195,7 +196,7 @@ func (store *TagStore) Get(repoName string) (Repository, error) { return nil, nil } -func (store *TagStore) GetImage(repoName, tagOrID string) (*Image, error) { +func (store *TagStore) GetImage(repoName, tagOrID string) (*image.Image, error) { repo, err := store.Get(repoName) if err != nil { return nil, err diff --git a/tags_unit_test.go b/tags_unit_test.go index b6236280a8..8ee913f527 100644 --- a/tags_unit_test.go +++ b/tags_unit_test.go @@ -2,6 +2,7 @@ package docker import ( "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/utils" "os" "path" @@ -30,7 +31,7 @@ func mkTestTagStore(root string, t *testing.T) *TagStore { if err != nil { t.Fatal(err) } - img := &Image{ID: testImageID} + img := &image.Image{ID: testImageID} // FIXME: this fails on Darwin with: // tags_unit_test.go:36: mkdir /var/folders/7g/b3ydb5gx4t94ndr_cljffbt80000gq/T/docker-test569b-tRunner-075013689/vfs/dir/foo/etc/postgres: permission denied if err := graph.Register(nil, archive, img); err != nil { diff --git a/utils/utils.go b/utils/utils.go index 07b8f6a3d0..e4cb04f39c 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -2,6 +2,7 @@ package utils import ( "bytes" + "crypto/rand" "crypto/sha1" "crypto/sha256" "encoding/hex" @@ -493,6 +494,34 @@ func TruncateID(id string) string { return id[:shortLen] } +// GenerateRandomID returns an unique id +func GenerateRandomID() string { + for { + id := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, id); err != nil { + panic(err) // This shouldn't happen + } + value := hex.EncodeToString(id) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numberic and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.Atoi(TruncateID(value)); err == nil { + continue + } + return value + } +} + +func ValidateID(id string) error { + if id == "" { + return fmt.Errorf("Id can't be empty") + } + if strings.Contains(id, ":") { + return fmt.Errorf("Invalid character in id: ':'") + } + return nil +} + // Code c/c from io.Copy() modified to handle escape sequence func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { buf := make([]byte, 32*1024) -- cgit v1.2.1 From 01b6b2be73a6f40e0179e0217385eea6b41100a5 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 7 Mar 2014 18:04:38 -0800 Subject: Move graph and tags to graph sub pkg Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- graph.go | 408 ---------------------------------------------- graph/graph.go | 408 ++++++++++++++++++++++++++++++++++++++++++++++ graph/tags.go | 235 ++++++++++++++++++++++++++ graph/tags_unit_test.go | 110 +++++++++++++ integration/graph_test.go | 24 +-- runtime.go | 30 ++-- server.go | 11 +- tags.go | 235 -------------------------- tags_unit_test.go | 89 ---------- utils_test.go | 24 --- volumes.go | 4 +- 11 files changed, 793 insertions(+), 785 deletions(-) delete mode 100644 graph.go create mode 100644 graph/graph.go create mode 100644 graph/tags.go create mode 100644 graph/tags_unit_test.go delete mode 100644 tags.go delete mode 100644 tags_unit_test.go delete mode 100644 utils_test.go diff --git a/graph.go b/graph.go deleted file mode 100644 index d164760d4c..0000000000 --- a/graph.go +++ /dev/null @@ -1,408 +0,0 @@ -package docker - -import ( - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "syscall" - "time" -) - -// A Graph is a store for versioned filesystem images and the relationship between them. -type Graph struct { - Root string - idIndex *utils.TruncIndex - driver graphdriver.Driver -} - -// NewGraph instantiates a new graph at the given root path in the filesystem. -// `root` will be created if it doesn't exist. -func NewGraph(root string, driver graphdriver.Driver) (*Graph, error) { - abspath, err := filepath.Abs(root) - if err != nil { - return nil, err - } - // Create the root directory if it doesn't exists - if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) { - return nil, err - } - - graph := &Graph{ - Root: abspath, - idIndex: utils.NewTruncIndex(), - driver: driver, - } - if err := graph.restore(); err != nil { - return nil, err - } - return graph, nil -} - -func (graph *Graph) restore() error { - dir, err := ioutil.ReadDir(graph.Root) - if err != nil { - return err - } - for _, v := range dir { - id := v.Name() - if graph.driver.Exists(id) { - graph.idIndex.Add(id) - } - } - utils.Debugf("Restored %d elements", len(dir)) - return nil -} - -// FIXME: Implement error subclass instead of looking at the error text -// Note: This is the way golang implements os.IsNotExists on Plan9 -func (graph *Graph) IsNotExist(err error) bool { - return err != nil && (strings.Contains(err.Error(), "does not exist") || strings.Contains(err.Error(), "No such")) -} - -// Exists returns true if an image is registered at the given id. -// If the image doesn't exist or if an error is encountered, false is returned. -func (graph *Graph) Exists(id string) bool { - if _, err := graph.Get(id); err != nil { - return false - } - return true -} - -// Get returns the image with the given id, or an error if the image doesn't exist. -func (graph *Graph) Get(name string) (*image.Image, error) { - id, err := graph.idIndex.Get(name) - if err != nil { - return nil, err - } - // FIXME: return nil when the image doesn't exist, instead of an error - img, err := image.LoadImage(graph.ImageRoot(id)) - if err != nil { - return nil, err - } - if img.ID != id { - return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID) - } - img.SetGraph(graph) - - if img.Size < 0 { - rootfs, err := graph.driver.Get(img.ID) - if err != nil { - return nil, fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err) - } - defer graph.driver.Put(img.ID) - - var size int64 - if img.Parent == "" { - if size, err = utils.TreeSize(rootfs); err != nil { - return nil, err - } - } else { - parentFs, err := graph.driver.Get(img.Parent) - if err != nil { - return nil, err - } - changes, err := archive.ChangesDirs(rootfs, parentFs) - if err != nil { - return nil, err - } - size = archive.ChangesSize(rootfs, changes) - } - - img.Size = size - if err := img.SaveSize(graph.ImageRoot(id)); err != nil { - return nil, err - } - } - return img, nil -} - -// Create creates a new image and registers it in the graph. -func (graph *Graph) Create(layerData archive.ArchiveReader, container *Container, comment, author string, config *runconfig.Config) (*image.Image, error) { - img := &image.Image{ - ID: utils.GenerateRandomID(), - Comment: comment, - Created: time.Now().UTC(), - DockerVersion: dockerversion.VERSION, - Author: author, - Config: config, - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - } - if container != nil { - img.Parent = container.Image - img.Container = container.ID - img.ContainerConfig = *container.Config - } - if err := graph.Register(nil, layerData, img); err != nil { - return nil, err - } - return img, nil -} - -// Register imports a pre-existing image into the graph. -// FIXME: pass img as first argument -func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, img *image.Image) (err error) { - defer func() { - // If any error occurs, remove the new dir from the driver. - // Don't check for errors since the dir might not have been created. - // FIXME: this leaves a possible race condition. - if err != nil { - graph.driver.Remove(img.ID) - } - }() - if err := utils.ValidateID(img.ID); err != nil { - return err - } - // (This is a convenience to save time. Race conditions are taken care of by os.Rename) - if graph.Exists(img.ID) { - return fmt.Errorf("Image %s already exists", img.ID) - } - - // Ensure that the image root does not exist on the filesystem - // when it is not registered in the graph. - // This is common when you switch from one graph driver to another - if err := os.RemoveAll(graph.ImageRoot(img.ID)); err != nil && !os.IsNotExist(err) { - return err - } - - // If the driver has this ID but the graph doesn't, remove it from the driver to start fresh. - // (the graph is the source of truth). - // Ignore errors, since we don't know if the driver correctly returns ErrNotExist. - // (FIXME: make that mandatory for drivers). - graph.driver.Remove(img.ID) - - tmp, err := graph.Mktemp("") - defer os.RemoveAll(tmp) - if err != nil { - return fmt.Errorf("Mktemp failed: %s", err) - } - - // Create root filesystem in the driver - if err := graph.driver.Create(img.ID, img.Parent); err != nil { - return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err) - } - // Mount the root filesystem so we can apply the diff/layer - rootfs, err := graph.driver.Get(img.ID) - if err != nil { - return fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err) - } - defer graph.driver.Put(img.ID) - img.SetGraph(graph) - if err := image.StoreImage(img, jsonData, layerData, tmp, rootfs); err != nil { - return err - } - // Commit - if err := os.Rename(tmp, graph.ImageRoot(img.ID)); err != nil { - return err - } - graph.idIndex.Add(img.ID) - return nil -} - -// TempLayerArchive creates a temporary archive of the given image's filesystem layer. -// The archive is stored on disk and will be automatically deleted as soon as has been read. -// If output is not nil, a human-readable progress bar will be written to it. -// FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives? -func (graph *Graph) TempLayerArchive(id string, compression archive.Compression, sf *utils.StreamFormatter, output io.Writer) (*archive.TempArchive, error) { - image, err := graph.Get(id) - if err != nil { - return nil, err - } - tmp, err := graph.Mktemp("") - if err != nil { - return nil, err - } - a, err := image.TarLayer() - if err != nil { - return nil, err - } - progress := utils.ProgressReader(a, 0, output, sf, false, utils.TruncateID(id), "Buffering to disk") - defer progress.Close() - return archive.NewTempArchive(progress, tmp) -} - -// Mktemp creates a temporary sub-directory inside the graph's filesystem. -func (graph *Graph) Mktemp(id string) (string, error) { - dir := path.Join(graph.Root, "_tmp", utils.GenerateRandomID()) - if err := os.MkdirAll(dir, 0700); err != nil { - return "", err - } - return dir, nil -} - -// setupInitLayer populates a directory with mountpoints suitable -// for bind-mounting dockerinit into the container. The mountpoint is simply an -// empty file at /.dockerinit -// -// This extra layer is used by all containers as the top-most ro layer. It protects -// the container from unwanted side-effects on the rw layer. -func setupInitLayer(initLayer string) error { - for pth, typ := range map[string]string{ - "/dev/pts": "dir", - "/dev/shm": "dir", - "/proc": "dir", - "/sys": "dir", - "/.dockerinit": "file", - "/.dockerenv": "file", - "/etc/resolv.conf": "file", - "/etc/hosts": "file", - "/etc/hostname": "file", - "/dev/console": "file", - // "var/run": "dir", - // "var/lock": "dir", - } { - parts := strings.Split(pth, "/") - prev := "/" - for _, p := range parts[1:] { - prev = path.Join(prev, p) - syscall.Unlink(path.Join(initLayer, prev)) - } - - if _, err := os.Stat(path.Join(initLayer, pth)); err != nil { - if os.IsNotExist(err) { - switch typ { - case "dir": - if err := os.MkdirAll(path.Join(initLayer, pth), 0755); err != nil { - return err - } - case "file": - if err := os.MkdirAll(path.Join(initLayer, path.Dir(pth)), 0755); err != nil { - return err - } - f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } else { - return err - } - } - } - - // Layer is ready to use, if it wasn't before. - return nil -} - -// Check if given error is "not empty". -// Note: this is the way golang does it internally with os.IsNotExists. -func isNotEmpty(err error) bool { - switch pe := err.(type) { - case nil: - return false - case *os.PathError: - err = pe.Err - case *os.LinkError: - err = pe.Err - } - return strings.Contains(err.Error(), " not empty") -} - -// Delete atomically removes an image from the graph. -func (graph *Graph) Delete(name string) error { - id, err := graph.idIndex.Get(name) - if err != nil { - return err - } - tmp, err := graph.Mktemp("") - if err != nil { - return err - } - graph.idIndex.Delete(id) - err = os.Rename(graph.ImageRoot(id), tmp) - if err != nil { - return err - } - // Remove rootfs data from the driver - graph.driver.Remove(id) - // Remove the trashed image directory - return os.RemoveAll(tmp) -} - -// Map returns a list of all images in the graph, addressable by ID. -func (graph *Graph) Map() (map[string]*image.Image, error) { - images := make(map[string]*image.Image) - err := graph.walkAll(func(image *image.Image) { - images[image.ID] = image - }) - if err != nil { - return nil, err - } - return images, nil -} - -// walkAll iterates over each image in the graph, and passes it to a handler. -// The walking order is undetermined. -func (graph *Graph) walkAll(handler func(*image.Image)) error { - files, err := ioutil.ReadDir(graph.Root) - if err != nil { - return err - } - for _, st := range files { - if img, err := graph.Get(st.Name()); err != nil { - // Skip image - continue - } else if handler != nil { - handler(img) - } - } - return nil -} - -// ByParent returns a lookup table of images by their parent. -// If an image of id ID has 3 children images, then the value for key ID -// will be a list of 3 images. -// If an image has no children, it will not have an entry in the table. -func (graph *Graph) ByParent() (map[string][]*image.Image, error) { - byParent := make(map[string][]*image.Image) - err := graph.walkAll(func(img *image.Image) { - parent, err := graph.Get(img.Parent) - if err != nil { - return - } - if children, exists := byParent[parent.ID]; exists { - byParent[parent.ID] = append(children, img) - } else { - byParent[parent.ID] = []*image.Image{img} - } - }) - return byParent, err -} - -// Heads returns all heads in the graph, keyed by id. -// A head is an image which is not the parent of another image in the graph. -func (graph *Graph) Heads() (map[string]*image.Image, error) { - heads := make(map[string]*image.Image) - byParent, err := graph.ByParent() - if err != nil { - return nil, err - } - err = graph.walkAll(func(image *image.Image) { - // If it's not in the byParent lookup table, then - // it's not a parent -> so it's a head! - if _, exists := byParent[image.ID]; !exists { - heads[image.ID] = image - } - }) - return heads, err -} - -func (graph *Graph) ImageRoot(id string) string { - return path.Join(graph.Root, id) -} - -func (graph *Graph) Driver() graphdriver.Driver { - return graph.driver -} diff --git a/graph/graph.go b/graph/graph.go new file mode 100644 index 0000000000..01659b549f --- /dev/null +++ b/graph/graph.go @@ -0,0 +1,408 @@ +package graph + +import ( + "fmt" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/dockerversion" + "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/utils" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "syscall" + "time" +) + +// A Graph is a store for versioned filesystem images and the relationship between them. +type Graph struct { + Root string + idIndex *utils.TruncIndex + driver graphdriver.Driver +} + +// NewGraph instantiates a new graph at the given root path in the filesystem. +// `root` will be created if it doesn't exist. +func NewGraph(root string, driver graphdriver.Driver) (*Graph, error) { + abspath, err := filepath.Abs(root) + if err != nil { + return nil, err + } + // Create the root directory if it doesn't exists + if err := os.MkdirAll(root, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + graph := &Graph{ + Root: abspath, + idIndex: utils.NewTruncIndex(), + driver: driver, + } + if err := graph.restore(); err != nil { + return nil, err + } + return graph, nil +} + +func (graph *Graph) restore() error { + dir, err := ioutil.ReadDir(graph.Root) + if err != nil { + return err + } + for _, v := range dir { + id := v.Name() + if graph.driver.Exists(id) { + graph.idIndex.Add(id) + } + } + utils.Debugf("Restored %d elements", len(dir)) + return nil +} + +// FIXME: Implement error subclass instead of looking at the error text +// Note: This is the way golang implements os.IsNotExists on Plan9 +func (graph *Graph) IsNotExist(err error) bool { + return err != nil && (strings.Contains(err.Error(), "does not exist") || strings.Contains(err.Error(), "No such")) +} + +// Exists returns true if an image is registered at the given id. +// If the image doesn't exist or if an error is encountered, false is returned. +func (graph *Graph) Exists(id string) bool { + if _, err := graph.Get(id); err != nil { + return false + } + return true +} + +// Get returns the image with the given id, or an error if the image doesn't exist. +func (graph *Graph) Get(name string) (*image.Image, error) { + id, err := graph.idIndex.Get(name) + if err != nil { + return nil, err + } + // FIXME: return nil when the image doesn't exist, instead of an error + img, err := image.LoadImage(graph.ImageRoot(id)) + if err != nil { + return nil, err + } + if img.ID != id { + return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID) + } + img.SetGraph(graph) + + if img.Size < 0 { + rootfs, err := graph.driver.Get(img.ID) + if err != nil { + return nil, fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err) + } + defer graph.driver.Put(img.ID) + + var size int64 + if img.Parent == "" { + if size, err = utils.TreeSize(rootfs); err != nil { + return nil, err + } + } else { + parentFs, err := graph.driver.Get(img.Parent) + if err != nil { + return nil, err + } + changes, err := archive.ChangesDirs(rootfs, parentFs) + if err != nil { + return nil, err + } + size = archive.ChangesSize(rootfs, changes) + } + + img.Size = size + if err := img.SaveSize(graph.ImageRoot(id)); err != nil { + return nil, err + } + } + return img, nil +} + +// Create creates a new image and registers it in the graph. +func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) { + img := &image.Image{ + ID: utils.GenerateRandomID(), + Comment: comment, + Created: time.Now().UTC(), + DockerVersion: dockerversion.VERSION, + Author: author, + Config: config, + Architecture: runtime.GOARCH, + OS: runtime.GOOS, + } + if containerID != "" { + img.Parent = containerImage + img.Container = containerID + img.ContainerConfig = *containerConfig + } + if err := graph.Register(nil, layerData, img); err != nil { + return nil, err + } + return img, nil +} + +// Register imports a pre-existing image into the graph. +// FIXME: pass img as first argument +func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, img *image.Image) (err error) { + defer func() { + // If any error occurs, remove the new dir from the driver. + // Don't check for errors since the dir might not have been created. + // FIXME: this leaves a possible race condition. + if err != nil { + graph.driver.Remove(img.ID) + } + }() + if err := utils.ValidateID(img.ID); err != nil { + return err + } + // (This is a convenience to save time. Race conditions are taken care of by os.Rename) + if graph.Exists(img.ID) { + return fmt.Errorf("Image %s already exists", img.ID) + } + + // Ensure that the image root does not exist on the filesystem + // when it is not registered in the graph. + // This is common when you switch from one graph driver to another + if err := os.RemoveAll(graph.ImageRoot(img.ID)); err != nil && !os.IsNotExist(err) { + return err + } + + // If the driver has this ID but the graph doesn't, remove it from the driver to start fresh. + // (the graph is the source of truth). + // Ignore errors, since we don't know if the driver correctly returns ErrNotExist. + // (FIXME: make that mandatory for drivers). + graph.driver.Remove(img.ID) + + tmp, err := graph.Mktemp("") + defer os.RemoveAll(tmp) + if err != nil { + return fmt.Errorf("Mktemp failed: %s", err) + } + + // Create root filesystem in the driver + if err := graph.driver.Create(img.ID, img.Parent); err != nil { + return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err) + } + // Mount the root filesystem so we can apply the diff/layer + rootfs, err := graph.driver.Get(img.ID) + if err != nil { + return fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err) + } + defer graph.driver.Put(img.ID) + img.SetGraph(graph) + if err := image.StoreImage(img, jsonData, layerData, tmp, rootfs); err != nil { + return err + } + // Commit + if err := os.Rename(tmp, graph.ImageRoot(img.ID)); err != nil { + return err + } + graph.idIndex.Add(img.ID) + return nil +} + +// TempLayerArchive creates a temporary archive of the given image's filesystem layer. +// The archive is stored on disk and will be automatically deleted as soon as has been read. +// If output is not nil, a human-readable progress bar will be written to it. +// FIXME: does this belong in Graph? How about MktempFile, let the caller use it for archives? +func (graph *Graph) TempLayerArchive(id string, compression archive.Compression, sf *utils.StreamFormatter, output io.Writer) (*archive.TempArchive, error) { + image, err := graph.Get(id) + if err != nil { + return nil, err + } + tmp, err := graph.Mktemp("") + if err != nil { + return nil, err + } + a, err := image.TarLayer() + if err != nil { + return nil, err + } + progress := utils.ProgressReader(a, 0, output, sf, false, utils.TruncateID(id), "Buffering to disk") + defer progress.Close() + return archive.NewTempArchive(progress, tmp) +} + +// Mktemp creates a temporary sub-directory inside the graph's filesystem. +func (graph *Graph) Mktemp(id string) (string, error) { + dir := path.Join(graph.Root, "_tmp", utils.GenerateRandomID()) + if err := os.MkdirAll(dir, 0700); err != nil { + return "", err + } + return dir, nil +} + +// setupInitLayer populates a directory with mountpoints suitable +// for bind-mounting dockerinit into the container. The mountpoint is simply an +// empty file at /.dockerinit +// +// This extra layer is used by all containers as the top-most ro layer. It protects +// the container from unwanted side-effects on the rw layer. +func SetupInitLayer(initLayer string) error { + for pth, typ := range map[string]string{ + "/dev/pts": "dir", + "/dev/shm": "dir", + "/proc": "dir", + "/sys": "dir", + "/.dockerinit": "file", + "/.dockerenv": "file", + "/etc/resolv.conf": "file", + "/etc/hosts": "file", + "/etc/hostname": "file", + "/dev/console": "file", + // "var/run": "dir", + // "var/lock": "dir", + } { + parts := strings.Split(pth, "/") + prev := "/" + for _, p := range parts[1:] { + prev = path.Join(prev, p) + syscall.Unlink(path.Join(initLayer, prev)) + } + + if _, err := os.Stat(path.Join(initLayer, pth)); err != nil { + if os.IsNotExist(err) { + switch typ { + case "dir": + if err := os.MkdirAll(path.Join(initLayer, pth), 0755); err != nil { + return err + } + case "file": + if err := os.MkdirAll(path.Join(initLayer, path.Dir(pth)), 0755); err != nil { + return err + } + f, err := os.OpenFile(path.Join(initLayer, pth), os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } else { + return err + } + } + } + + // Layer is ready to use, if it wasn't before. + return nil +} + +// Check if given error is "not empty". +// Note: this is the way golang does it internally with os.IsNotExists. +func isNotEmpty(err error) bool { + switch pe := err.(type) { + case nil: + return false + case *os.PathError: + err = pe.Err + case *os.LinkError: + err = pe.Err + } + return strings.Contains(err.Error(), " not empty") +} + +// Delete atomically removes an image from the graph. +func (graph *Graph) Delete(name string) error { + id, err := graph.idIndex.Get(name) + if err != nil { + return err + } + tmp, err := graph.Mktemp("") + if err != nil { + return err + } + graph.idIndex.Delete(id) + err = os.Rename(graph.ImageRoot(id), tmp) + if err != nil { + return err + } + // Remove rootfs data from the driver + graph.driver.Remove(id) + // Remove the trashed image directory + return os.RemoveAll(tmp) +} + +// Map returns a list of all images in the graph, addressable by ID. +func (graph *Graph) Map() (map[string]*image.Image, error) { + images := make(map[string]*image.Image) + err := graph.walkAll(func(image *image.Image) { + images[image.ID] = image + }) + if err != nil { + return nil, err + } + return images, nil +} + +// walkAll iterates over each image in the graph, and passes it to a handler. +// The walking order is undetermined. +func (graph *Graph) walkAll(handler func(*image.Image)) error { + files, err := ioutil.ReadDir(graph.Root) + if err != nil { + return err + } + for _, st := range files { + if img, err := graph.Get(st.Name()); err != nil { + // Skip image + continue + } else if handler != nil { + handler(img) + } + } + return nil +} + +// ByParent returns a lookup table of images by their parent. +// If an image of id ID has 3 children images, then the value for key ID +// will be a list of 3 images. +// If an image has no children, it will not have an entry in the table. +func (graph *Graph) ByParent() (map[string][]*image.Image, error) { + byParent := make(map[string][]*image.Image) + err := graph.walkAll(func(img *image.Image) { + parent, err := graph.Get(img.Parent) + if err != nil { + return + } + if children, exists := byParent[parent.ID]; exists { + byParent[parent.ID] = append(children, img) + } else { + byParent[parent.ID] = []*image.Image{img} + } + }) + return byParent, err +} + +// Heads returns all heads in the graph, keyed by id. +// A head is an image which is not the parent of another image in the graph. +func (graph *Graph) Heads() (map[string]*image.Image, error) { + heads := make(map[string]*image.Image) + byParent, err := graph.ByParent() + if err != nil { + return nil, err + } + err = graph.walkAll(func(image *image.Image) { + // If it's not in the byParent lookup table, then + // it's not a parent -> so it's a head! + if _, exists := byParent[image.ID]; !exists { + heads[image.ID] = image + } + }) + return heads, err +} + +func (graph *Graph) ImageRoot(id string) string { + return path.Join(graph.Root, id) +} + +func (graph *Graph) Driver() graphdriver.Driver { + return graph.driver +} diff --git a/graph/tags.go b/graph/tags.go new file mode 100644 index 0000000000..524e1a1f9d --- /dev/null +++ b/graph/tags.go @@ -0,0 +1,235 @@ +package graph + +import ( + "encoding/json" + "fmt" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" +) + +const DEFAULTTAG = "latest" + +type TagStore struct { + path string + graph *Graph + Repositories map[string]Repository +} + +type Repository map[string]string + +func NewTagStore(path string, graph *Graph) (*TagStore, error) { + abspath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + store := &TagStore{ + path: abspath, + graph: graph, + Repositories: make(map[string]Repository), + } + // Load the json file if it exists, otherwise create it. + if err := store.Reload(); os.IsNotExist(err) { + if err := store.Save(); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return store, nil +} + +func (store *TagStore) Save() error { + // Store the json ball + jsonData, err := json.Marshal(store) + if err != nil { + return err + } + if err := ioutil.WriteFile(store.path, jsonData, 0600); err != nil { + return err + } + return nil +} + +func (store *TagStore) Reload() error { + jsonData, err := ioutil.ReadFile(store.path) + if err != nil { + return err + } + if err := json.Unmarshal(jsonData, store); err != nil { + return err + } + return nil +} + +func (store *TagStore) LookupImage(name string) (*image.Image, error) { + // FIXME: standardize on returning nil when the image doesn't exist, and err for everything else + // (so we can pass all errors here) + repos, tag := utils.ParseRepositoryTag(name) + if tag == "" { + tag = DEFAULTTAG + } + img, err := store.GetImage(repos, tag) + if err != nil { + return nil, err + } else if img == nil { + if img, err = store.graph.Get(name); err != nil { + return nil, err + } + } + return img, nil +} + +// Return a reverse-lookup table of all the names which refer to each image +// Eg. {"43b5f19b10584": {"base:latest", "base:v1"}} +func (store *TagStore) ByID() map[string][]string { + byID := make(map[string][]string) + for repoName, repository := range store.Repositories { + for tag, id := range repository { + name := repoName + ":" + tag + if _, exists := byID[id]; !exists { + byID[id] = []string{name} + } else { + byID[id] = append(byID[id], name) + sort.Strings(byID[id]) + } + } + } + return byID +} + +func (store *TagStore) ImageName(id string) string { + if names, exists := store.ByID()[id]; exists && len(names) > 0 { + return names[0] + } + return utils.TruncateID(id) +} + +func (store *TagStore) DeleteAll(id string) error { + names, exists := store.ByID()[id] + if !exists || len(names) == 0 { + return nil + } + for _, name := range names { + if strings.Contains(name, ":") { + nameParts := strings.Split(name, ":") + if _, err := store.Delete(nameParts[0], nameParts[1]); err != nil { + return err + } + } else { + if _, err := store.Delete(name, ""); err != nil { + return err + } + } + } + return nil +} + +func (store *TagStore) Delete(repoName, tag string) (bool, error) { + deleted := false + if err := store.Reload(); err != nil { + return false, err + } + if r, exists := store.Repositories[repoName]; exists { + if tag != "" { + if _, exists2 := r[tag]; exists2 { + delete(r, tag) + if len(r) == 0 { + delete(store.Repositories, repoName) + } + deleted = true + } else { + return false, fmt.Errorf("No such tag: %s:%s", repoName, tag) + } + } else { + delete(store.Repositories, repoName) + deleted = true + } + } else { + fmt.Errorf("No such repository: %s", repoName) + } + return deleted, store.Save() +} + +func (store *TagStore) Set(repoName, tag, imageName string, force bool) error { + img, err := store.LookupImage(imageName) + if err != nil { + return err + } + if tag == "" { + tag = DEFAULTTAG + } + if err := validateRepoName(repoName); err != nil { + return err + } + if err := validateTagName(tag); err != nil { + return err + } + if err := store.Reload(); err != nil { + return err + } + var repo Repository + if r, exists := store.Repositories[repoName]; exists { + repo = r + } else { + repo = make(map[string]string) + if old, exists := store.Repositories[repoName]; exists && !force { + return fmt.Errorf("Conflict: Tag %s:%s is already set to %s", repoName, tag, old) + } + store.Repositories[repoName] = repo + } + repo[tag] = img.ID + return store.Save() +} + +func (store *TagStore) Get(repoName string) (Repository, error) { + if err := store.Reload(); err != nil { + return nil, err + } + if r, exists := store.Repositories[repoName]; exists { + return r, nil + } + return nil, nil +} + +func (store *TagStore) GetImage(repoName, tagOrID string) (*image.Image, error) { + repo, err := store.Get(repoName) + if err != nil { + return nil, err + } else if repo == nil { + return nil, nil + } + if revision, exists := repo[tagOrID]; exists { + return store.graph.Get(revision) + } + // If no matching tag is found, search through images for a matching image id + for _, revision := range repo { + if strings.HasPrefix(revision, tagOrID) { + return store.graph.Get(revision) + } + } + return nil, nil +} + +// Validate the name of a repository +func validateRepoName(name string) error { + if name == "" { + return fmt.Errorf("Repository name can't be empty") + } + return nil +} + +// Validate the name of a tag +func validateTagName(name string) error { + if name == "" { + return fmt.Errorf("Tag name can't be empty") + } + if strings.Contains(name, "/") || strings.Contains(name, ":") { + return fmt.Errorf("Illegal tag name: %s", name) + } + return nil +} diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go new file mode 100644 index 0000000000..153f94db3d --- /dev/null +++ b/graph/tags_unit_test.go @@ -0,0 +1,110 @@ +package graph + +import ( + "bytes" + "github.com/dotcloud/docker/graphdriver" + _ "github.com/dotcloud/docker/graphdriver/vfs" // import the vfs driver so it is used in the tests + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "io" + "os" + "path" + "testing" +) + +const ( + testImageName = "myapp" + testImageID = "foo" +) + +func fakeTar() (io.Reader, error) { + content := []byte("Hello world!\n") + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { + hdr := new(tar.Header) + hdr.Size = int64(len(content)) + hdr.Name = name + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + tw.Write([]byte(content)) + } + tw.Close() + return buf, nil +} + +func mkTestTagStore(root string, t *testing.T) *TagStore { + driver, err := graphdriver.New(root) + if err != nil { + t.Fatal(err) + } + graph, err := NewGraph(root, driver) + if err != nil { + t.Fatal(err) + } + store, err := NewTagStore(path.Join(root, "tags"), graph) + if err != nil { + t.Fatal(err) + } + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + img := &image.Image{ID: testImageID} + // FIXME: this fails on Darwin with: + // tags_unit_test.go:36: mkdir /var/folders/7g/b3ydb5gx4t94ndr_cljffbt80000gq/T/docker-test569b-tRunner-075013689/vfs/dir/foo/etc/postgres: permission denied + if err := graph.Register(nil, archive, img); err != nil { + t.Fatal(err) + } + if err := store.Set(testImageName, "", testImageID, false); err != nil { + t.Fatal(err) + } + return store +} + +func TestLookupImage(t *testing.T) { + tmp, err := utils.TestDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + store := mkTestTagStore(tmp, t) + defer store.graph.driver.Cleanup() + + if img, err := store.LookupImage(testImageName); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + if img, err := store.LookupImage(testImageName + ":" + DEFAULTTAG); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + + if img, err := store.LookupImage(testImageName + ":" + "fail"); err == nil { + t.Errorf("Expected error, none found") + } else if img != nil { + t.Errorf("Expected 0 image, 1 found") + } + + if img, err := store.LookupImage("fail:fail"); err == nil { + t.Errorf("Expected error, none found") + } else if img != nil { + t.Errorf("Expected 0 image, 1 found") + } + + if img, err := store.LookupImage(testImageID); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } + + if img, err := store.LookupImage(testImageName + ":" + testImageID); err != nil { + t.Fatal(err) + } else if img == nil { + t.Errorf("Expected 1 image, none found") + } +} diff --git a/integration/graph_test.go b/integration/graph_test.go index 4fd612b5ac..e575a252f3 100644 --- a/integration/graph_test.go +++ b/integration/graph_test.go @@ -2,9 +2,9 @@ package docker import ( "errors" - "github.com/dotcloud/docker" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/dockerversion" + "github.com/dotcloud/docker/graph" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/utils" @@ -25,7 +25,7 @@ func TestMount(t *testing.T) { if err != nil { t.Fatal(err) } - image, err := graph.Create(archive, nil, "Testing", "", nil) + image, err := graph.Create(archive, "", "", "Testing", "", nil, nil) if err != nil { t.Fatal(err) } @@ -97,7 +97,7 @@ func TestGraphCreate(t *testing.T) { if err != nil { t.Fatal(err) } - img, err := graph.Create(archive, nil, "Testing", "", nil) + img, err := graph.Create(archive, "", "", "Testing", "", nil, nil) if err != nil { t.Fatal(err) } @@ -165,12 +165,12 @@ func TestDeletePrefix(t *testing.T) { assertNImages(graph, t, 0) } -func createTestImage(graph *docker.Graph, t *testing.T) *image.Image { +func createTestImage(graph *graph.Graph, t *testing.T) *image.Image { archive, err := fakeTar() if err != nil { t.Fatal(err) } - img, err := graph.Create(archive, nil, "Test image", "", nil) + img, err := graph.Create(archive, "", "", "Test image", "", nil, nil) if err != nil { t.Fatal(err) } @@ -185,7 +185,7 @@ func TestDelete(t *testing.T) { t.Fatal(err) } assertNImages(graph, t, 0) - img, err := graph.Create(archive, nil, "Bla bla", "", nil) + img, err := graph.Create(archive, "", "", "Bla bla", "", nil, nil) if err != nil { t.Fatal(err) } @@ -200,7 +200,7 @@ func TestDelete(t *testing.T) { t.Fatal(err) } // Test 2 create (same name) / 1 delete - img1, err := graph.Create(archive, nil, "Testing", "", nil) + img1, err := graph.Create(archive, "", "", "Testing", "", nil, nil) if err != nil { t.Fatal(err) } @@ -208,7 +208,7 @@ func TestDelete(t *testing.T) { if err != nil { t.Fatal(err) } - if _, err = graph.Create(archive, nil, "Testing", "", nil); err != nil { + if _, err = graph.Create(archive, "", "", "Testing", "", nil, nil); err != nil { t.Fatal(err) } assertNImages(graph, t, 2) @@ -280,7 +280,7 @@ func TestByParent(t *testing.T) { * HELPER FUNCTIONS */ -func assertNImages(graph *docker.Graph, t *testing.T, n int) { +func assertNImages(graph *graph.Graph, t *testing.T, n int) { if images, err := graph.Map(); err != nil { t.Fatal(err) } else if actualN := len(images); actualN != n { @@ -288,7 +288,7 @@ func assertNImages(graph *docker.Graph, t *testing.T, n int) { } } -func tempGraph(t *testing.T) (*docker.Graph, graphdriver.Driver) { +func tempGraph(t *testing.T) (*graph.Graph, graphdriver.Driver) { tmp, err := ioutil.TempDir("", "docker-graph-") if err != nil { t.Fatal(err) @@ -297,14 +297,14 @@ func tempGraph(t *testing.T) (*docker.Graph, graphdriver.Driver) { if err != nil { t.Fatal(err) } - graph, err := docker.NewGraph(tmp, driver) + graph, err := graph.NewGraph(tmp, driver) if err != nil { t.Fatal(err) } return graph, driver } -func nukeGraph(graph *docker.Graph) { +func nukeGraph(graph *graph.Graph) { graph.Driver().Cleanup() os.RemoveAll(graph.Root) } diff --git a/runtime.go b/runtime.go index 81bc9cbded..2608701b9b 100644 --- a/runtime.go +++ b/runtime.go @@ -10,6 +10,7 @@ import ( "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/execdriver/lxc" "github.com/dotcloud/docker/execdriver/native" + "github.com/dotcloud/docker/graph" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/graphdriver/aufs" _ "github.com/dotcloud/docker/graphdriver/btrfs" @@ -48,11 +49,11 @@ type Runtime struct { repository string sysInitPath string containers *list.List - graph *Graph - repositories *TagStore + graph *graph.Graph + repositories *graph.TagStore idIndex *utils.TruncIndex sysInfo *sysinfo.SysInfo - volumes *Graph + volumes *graph.Graph srv *Server eng *engine.Engine config *daemonconfig.Config @@ -486,7 +487,7 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe } defer runtime.driver.Put(initID) - if err := setupInitLayer(initPath); err != nil { + if err := graph.SetupInitLayer(initPath); err != nil { return nil, nil, err } @@ -555,7 +556,16 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a defer rwTar.Close() // Create a new image from the container's base layers + a new layer from container changes - img, err := runtime.graph.Create(rwTar, container, comment, author, config) + var ( + containerID, containerImage string + containerConfig *runconfig.Config + ) + if container != nil { + containerID = container.ID + containerImage = container.Image + containerConfig = container.Config + } + img, err := runtime.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config) if err != nil { return nil, err } @@ -654,13 +664,13 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (* if ad, ok := driver.(*aufs.Driver); ok { utils.Debugf("Migrating existing containers") - if err := ad.Migrate(config.Root, setupInitLayer); err != nil { + if err := ad.Migrate(config.Root, graph.SetupInitLayer); err != nil { return nil, err } } utils.Debugf("Creating images graph") - g, err := NewGraph(path.Join(config.Root, "graph"), driver) + g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver) if err != nil { return nil, err } @@ -672,12 +682,12 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (* return nil, err } utils.Debugf("Creating volumes graph") - volumes, err := NewGraph(path.Join(config.Root, "volumes"), volumesDriver) + volumes, err := graph.NewGraph(path.Join(config.Root, "volumes"), volumesDriver) if err != nil { return nil, err } utils.Debugf("Creating repository list") - repositories, err := NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g) + repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g) if err != nil { return nil, fmt.Errorf("Couldn't create Tag store: %s", err) } @@ -878,7 +888,7 @@ func (runtime *Runtime) Nuke() error { // which need direct access to runtime.graph. // Once the tests switch to using engine and jobs, this method // can go away. -func (runtime *Runtime) Graph() *Graph { +func (runtime *Runtime) Graph() *graph.Graph { return runtime.graph } diff --git a/server.go b/server.go index 37402ee502..5c28b262dc 100644 --- a/server.go +++ b/server.go @@ -8,6 +8,7 @@ import ( "github.com/dotcloud/docker/daemonconfig" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/graph" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/registry" @@ -334,7 +335,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status { } // write repositories - rootRepoMap := map[string]Repository{} + rootRepoMap := map[string]graph.Repository{} rootRepoMap[name] = rootRepo rootRepoJson, _ := json.Marshal(rootRepoMap) @@ -547,7 +548,7 @@ func (srv *Server) ImageLoad(job *engine.Job) engine.Status { repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories")) if err == nil { - repositories := map[string]Repository{} + repositories := map[string]graph.Repository{} if err := json.Unmarshal(repositoriesJson, &repositories); err != nil { return job.Error(err) } @@ -1617,7 +1618,7 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status { defer progressReader.Close() archive = progressReader } - img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil) + img, err := srv.runtime.graph.Create(archive, "", "", "Imported from "+src, "", nil, nil) if err != nil { return job.Error(err) } @@ -1664,7 +1665,7 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { if srv.runtime.graph.IsNotExist(err) { _, tag := utils.ParseRepositoryTag(config.Image) if tag == "" { - tag = DEFAULTTAG + tag = graph.DEFAULTTAG } return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) } @@ -1837,7 +1838,7 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo repoName, tag = utils.ParseRepositoryTag(name) if tag == "" { - tag = DEFAULTTAG + tag = graph.DEFAULTTAG } img, err := srv.runtime.repositories.LookupImage(name) diff --git a/tags.go b/tags.go deleted file mode 100644 index 27e19cd671..0000000000 --- a/tags.go +++ /dev/null @@ -1,235 +0,0 @@ -package docker - -import ( - "encoding/json" - "fmt" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" -) - -const DEFAULTTAG = "latest" - -type TagStore struct { - path string - graph *Graph - Repositories map[string]Repository -} - -type Repository map[string]string - -func NewTagStore(path string, graph *Graph) (*TagStore, error) { - abspath, err := filepath.Abs(path) - if err != nil { - return nil, err - } - store := &TagStore{ - path: abspath, - graph: graph, - Repositories: make(map[string]Repository), - } - // Load the json file if it exists, otherwise create it. - if err := store.Reload(); os.IsNotExist(err) { - if err := store.Save(); err != nil { - return nil, err - } - } else if err != nil { - return nil, err - } - return store, nil -} - -func (store *TagStore) Save() error { - // Store the json ball - jsonData, err := json.Marshal(store) - if err != nil { - return err - } - if err := ioutil.WriteFile(store.path, jsonData, 0600); err != nil { - return err - } - return nil -} - -func (store *TagStore) Reload() error { - jsonData, err := ioutil.ReadFile(store.path) - if err != nil { - return err - } - if err := json.Unmarshal(jsonData, store); err != nil { - return err - } - return nil -} - -func (store *TagStore) LookupImage(name string) (*image.Image, error) { - // FIXME: standardize on returning nil when the image doesn't exist, and err for everything else - // (so we can pass all errors here) - repos, tag := utils.ParseRepositoryTag(name) - if tag == "" { - tag = DEFAULTTAG - } - img, err := store.GetImage(repos, tag) - if err != nil { - return nil, err - } else if img == nil { - if img, err = store.graph.Get(name); err != nil { - return nil, err - } - } - return img, nil -} - -// Return a reverse-lookup table of all the names which refer to each image -// Eg. {"43b5f19b10584": {"base:latest", "base:v1"}} -func (store *TagStore) ByID() map[string][]string { - byID := make(map[string][]string) - for repoName, repository := range store.Repositories { - for tag, id := range repository { - name := repoName + ":" + tag - if _, exists := byID[id]; !exists { - byID[id] = []string{name} - } else { - byID[id] = append(byID[id], name) - sort.Strings(byID[id]) - } - } - } - return byID -} - -func (store *TagStore) ImageName(id string) string { - if names, exists := store.ByID()[id]; exists && len(names) > 0 { - return names[0] - } - return utils.TruncateID(id) -} - -func (store *TagStore) DeleteAll(id string) error { - names, exists := store.ByID()[id] - if !exists || len(names) == 0 { - return nil - } - for _, name := range names { - if strings.Contains(name, ":") { - nameParts := strings.Split(name, ":") - if _, err := store.Delete(nameParts[0], nameParts[1]); err != nil { - return err - } - } else { - if _, err := store.Delete(name, ""); err != nil { - return err - } - } - } - return nil -} - -func (store *TagStore) Delete(repoName, tag string) (bool, error) { - deleted := false - if err := store.Reload(); err != nil { - return false, err - } - if r, exists := store.Repositories[repoName]; exists { - if tag != "" { - if _, exists2 := r[tag]; exists2 { - delete(r, tag) - if len(r) == 0 { - delete(store.Repositories, repoName) - } - deleted = true - } else { - return false, fmt.Errorf("No such tag: %s:%s", repoName, tag) - } - } else { - delete(store.Repositories, repoName) - deleted = true - } - } else { - fmt.Errorf("No such repository: %s", repoName) - } - return deleted, store.Save() -} - -func (store *TagStore) Set(repoName, tag, imageName string, force bool) error { - img, err := store.LookupImage(imageName) - if err != nil { - return err - } - if tag == "" { - tag = DEFAULTTAG - } - if err := validateRepoName(repoName); err != nil { - return err - } - if err := validateTagName(tag); err != nil { - return err - } - if err := store.Reload(); err != nil { - return err - } - var repo Repository - if r, exists := store.Repositories[repoName]; exists { - repo = r - } else { - repo = make(map[string]string) - if old, exists := store.Repositories[repoName]; exists && !force { - return fmt.Errorf("Conflict: Tag %s:%s is already set to %s", repoName, tag, old) - } - store.Repositories[repoName] = repo - } - repo[tag] = img.ID - return store.Save() -} - -func (store *TagStore) Get(repoName string) (Repository, error) { - if err := store.Reload(); err != nil { - return nil, err - } - if r, exists := store.Repositories[repoName]; exists { - return r, nil - } - return nil, nil -} - -func (store *TagStore) GetImage(repoName, tagOrID string) (*image.Image, error) { - repo, err := store.Get(repoName) - if err != nil { - return nil, err - } else if repo == nil { - return nil, nil - } - if revision, exists := repo[tagOrID]; exists { - return store.graph.Get(revision) - } - // If no matching tag is found, search through images for a matching image id - for _, revision := range repo { - if strings.HasPrefix(revision, tagOrID) { - return store.graph.Get(revision) - } - } - return nil, nil -} - -// Validate the name of a repository -func validateRepoName(name string) error { - if name == "" { - return fmt.Errorf("Repository name can't be empty") - } - return nil -} - -// Validate the name of a tag -func validateTagName(name string) error { - if name == "" { - return fmt.Errorf("Tag name can't be empty") - } - if strings.Contains(name, "/") || strings.Contains(name, ":") { - return fmt.Errorf("Illegal tag name: %s", name) - } - return nil -} diff --git a/tags_unit_test.go b/tags_unit_test.go deleted file mode 100644 index 8ee913f527..0000000000 --- a/tags_unit_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/utils" - "os" - "path" - "testing" -) - -const ( - testImageName = "myapp" - testImageID = "foo" -) - -func mkTestTagStore(root string, t *testing.T) *TagStore { - driver, err := graphdriver.New(root) - if err != nil { - t.Fatal(err) - } - graph, err := NewGraph(root, driver) - if err != nil { - t.Fatal(err) - } - store, err := NewTagStore(path.Join(root, "tags"), graph) - if err != nil { - t.Fatal(err) - } - archive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - img := &image.Image{ID: testImageID} - // FIXME: this fails on Darwin with: - // tags_unit_test.go:36: mkdir /var/folders/7g/b3ydb5gx4t94ndr_cljffbt80000gq/T/docker-test569b-tRunner-075013689/vfs/dir/foo/etc/postgres: permission denied - if err := graph.Register(nil, archive, img); err != nil { - t.Fatal(err) - } - if err := store.Set(testImageName, "", testImageID, false); err != nil { - t.Fatal(err) - } - return store -} - -func TestLookupImage(t *testing.T) { - tmp, err := utils.TestDirectory("") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - store := mkTestTagStore(tmp, t) - defer store.graph.driver.Cleanup() - - if img, err := store.LookupImage(testImageName); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } - if img, err := store.LookupImage(testImageName + ":" + DEFAULTTAG); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } - - if img, err := store.LookupImage(testImageName + ":" + "fail"); err == nil { - t.Errorf("Expected error, none found") - } else if img != nil { - t.Errorf("Expected 0 image, 1 found") - } - - if img, err := store.LookupImage("fail:fail"); err == nil { - t.Errorf("Expected error, none found") - } else if img != nil { - t.Errorf("Expected 0 image, 1 found") - } - - if img, err := store.LookupImage(testImageID); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } - - if img, err := store.LookupImage(testImageName + ":" + testImageID); err != nil { - t.Fatal(err) - } else if img == nil { - t.Errorf("Expected 1 image, none found") - } -} diff --git a/utils_test.go b/utils_test.go deleted file mode 100644 index 31fa12b6ad..0000000000 --- a/utils_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package docker - -import ( - "bytes" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - "io" -) - -func fakeTar() (io.Reader, error) { - content := []byte("Hello world!\n") - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} { - hdr := new(tar.Header) - hdr.Size = int64(len(content)) - hdr.Name = name - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - tw.Write([]byte(content)) - } - tw.Close() - return buf, nil -} diff --git a/volumes.go b/volumes.go index 9f76e3698b..8684ff4e59 100644 --- a/volumes.go +++ b/volumes.go @@ -216,7 +216,7 @@ func createVolumes(container *Container) error { return err } - volumesDriver := container.runtime.volumes.driver + volumesDriver := container.runtime.volumes.Driver() // Create the requested volumes if they don't exist for volPath := range container.Config.Volumes { volPath = filepath.Clean(volPath) @@ -246,7 +246,7 @@ func createVolumes(container *Container) error { // Do not pass a container as the parameter for the volume creation. // The graph driver using the container's information ( Image ) to // create the parent. - c, err := container.runtime.volumes.Create(nil, nil, "", "", nil) + c, err := container.runtime.volumes.Create(nil, "", "", "", "", nil, nil) if err != nil { return err } -- cgit v1.2.1 From 36c3614fdde079fad178390f651945fba884668a Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 7 Mar 2014 18:42:29 -0800 Subject: Move runtime and container into sub pkg Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- buildfile.go | 17 +- container.go | 1143 --------------------------------------- container_unit_test.go | 145 ----- integration/api_test.go | 4 +- integration/commands_test.go | 10 +- integration/runtime_test.go | 14 +- integration/utils_test.go | 15 +- runtime.go | 918 ------------------------------- runtime/container.go | 1161 ++++++++++++++++++++++++++++++++++++++++ runtime/container_unit_test.go | 145 +++++ runtime/runtime.go | 953 +++++++++++++++++++++++++++++++++ runtime/server.go | 9 + runtime/sorter.go | 25 + runtime/state.go | 81 +++ runtime/utils.go | 44 ++ runtime/volumes.go | 332 ++++++++++++ server.go | 181 +++---- sorter.go | 25 - state.go | 81 --- utils.go | 40 -- volumes.go | 332 ------------ 21 files changed, 2870 insertions(+), 2805 deletions(-) delete mode 100644 container.go delete mode 100644 container_unit_test.go delete mode 100644 runtime.go create mode 100644 runtime/container.go create mode 100644 runtime/container_unit_test.go create mode 100644 runtime/runtime.go create mode 100644 runtime/server.go create mode 100644 runtime/sorter.go create mode 100644 runtime/state.go create mode 100644 runtime/utils.go create mode 100644 runtime/volumes.go delete mode 100644 sorter.go delete mode 100644 state.go delete mode 100644 volumes.go diff --git a/buildfile.go b/buildfile.go index 6fae6a24a5..160db4d434 100644 --- a/buildfile.go +++ b/buildfile.go @@ -10,6 +10,7 @@ import ( "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/runtime" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -34,7 +35,7 @@ type BuildFile interface { } type buildFile struct { - runtime *Runtime + runtime *runtime.Runtime srv *Server image string @@ -74,9 +75,9 @@ func (b *buildFile) clearTmp(containers map[string]struct{}) { } func (b *buildFile) CmdFrom(name string) error { - image, err := b.runtime.repositories.LookupImage(name) + image, err := b.runtime.Repositories().LookupImage(name) if err != nil { - if b.runtime.graph.IsNotExist(err) { + if b.runtime.Graph().IsNotExist(err) { remote, tag := utils.ParseRepositoryTag(name) pullRegistryAuth := b.authConfig if len(b.configFile.Configs) > 0 { @@ -96,7 +97,7 @@ func (b *buildFile) CmdFrom(name string) error { if err := job.Run(); err != nil { return err } - image, err = b.runtime.repositories.LookupImage(name) + image, err = b.runtime.Repositories().LookupImage(name) if err != nil { return err } @@ -110,7 +111,7 @@ func (b *buildFile) CmdFrom(name string) error { b.config = image.Config } if b.config.Env == nil || len(b.config.Env) == 0 { - b.config.Env = append(b.config.Env, "HOME=/", "PATH="+defaultPathEnv) + b.config.Env = append(b.config.Env, "HOME=/", "PATH="+runtime.DefaultPathEnv) } // Process ONBUILD triggers if they exist if nTriggers := len(b.config.OnBuild); nTriggers != 0 { @@ -371,7 +372,7 @@ func (b *buildFile) checkPathForAddition(orig string) error { return nil } -func (b *buildFile) addContext(container *Container, orig, dest string, remote bool) error { +func (b *buildFile) addContext(container *runtime.Container, orig, dest string, remote bool) error { var ( origPath = path.Join(b.contextPath, orig) destPath = path.Join(container.BasefsPath(), dest) @@ -604,7 +605,7 @@ func (sf *StderrFormater) Write(buf []byte) (int, error) { return len(buf), err } -func (b *buildFile) create() (*Container, error) { +func (b *buildFile) create() (*runtime.Container, error) { if b.image == "" { return nil, fmt.Errorf("Please provide a source image with `from` prior to run") } @@ -625,7 +626,7 @@ func (b *buildFile) create() (*Container, error) { return c, nil } -func (b *buildFile) run(c *Container) error { +func (b *buildFile) run(c *runtime.Container) error { var errCh chan error if b.verbose { diff --git a/container.go b/container.go deleted file mode 100644 index 9c1a28c98a..0000000000 --- a/container.go +++ /dev/null @@ -1,1143 +0,0 @@ -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/links" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "log" - "os" - "path" - "strings" - "sync" - "syscall" - "time" -) - -const defaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -var ( - ErrNotATTY = errors.New("The PTY is not a file") - ErrNoTTY = errors.New("No PTY found") - ErrContainerStart = errors.New("The container failed to start. Unknown error") - ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.") -) - -type Container struct { - sync.Mutex - root string // Path to the "home" of the container, including metadata. - basefs string // Path to the graphdriver mountpoint - - ID string - - Created time.Time - - Path string - Args []string - - Config *runconfig.Config - State State - Image string - - NetworkSettings *NetworkSettings - - ResolvConfPath string - HostnamePath string - HostsPath string - Name string - Driver string - ExecDriver string - - command *execdriver.Command - stdout *utils.WriteBroadcaster - stderr *utils.WriteBroadcaster - stdin io.ReadCloser - stdinPipe io.WriteCloser - - runtime *Runtime - - waitLock chan struct{} - Volumes map[string]string - // Store rw/ro in a separate structure to preserve reverse-compatibility on-disk. - // Easier than migrating older container configs :) - VolumesRW map[string]bool - hostConfig *runconfig.HostConfig - - activeLinks map[string]*links.Link -} - -// FIXME: move deprecated port stuff to nat to clean up the core. -type PortMapping map[string]string // Deprecated - -type NetworkSettings struct { - IPAddress string - IPPrefixLen int - Gateway string - Bridge string - PortMapping map[string]PortMapping // Deprecated - Ports nat.PortMap -} - -func (settings *NetworkSettings) PortMappingAPI() *engine.Table { - var outs = engine.NewTable("", 0) - for port, bindings := range settings.Ports { - p, _ := nat.ParsePort(port.Port()) - if len(bindings) == 0 { - out := &engine.Env{} - out.SetInt("PublicPort", p) - out.Set("Type", port.Proto()) - outs.Add(out) - continue - } - for _, binding := range bindings { - out := &engine.Env{} - h, _ := nat.ParsePort(binding.HostPort) - out.SetInt("PrivatePort", p) - out.SetInt("PublicPort", h) - out.Set("Type", port.Proto()) - out.Set("IP", binding.HostIp) - outs.Add(out) - } - } - return outs -} - -// Inject the io.Reader at the given path. Note: do not close the reader -func (container *Container) Inject(file io.Reader, pth string) error { - if err := container.Mount(); err != nil { - return fmt.Errorf("inject: error mounting container %s: %s", container.ID, err) - } - defer container.Unmount() - - // Return error if path exists - destPath := path.Join(container.basefs, pth) - if _, err := os.Stat(destPath); err == nil { - // Since err is nil, the path could be stat'd and it exists - return fmt.Errorf("%s exists", pth) - } else if !os.IsNotExist(err) { - // Expect err might be that the file doesn't exist, so - // if it's some other error, return that. - - return err - } - - // Make sure the directory exists - if err := os.MkdirAll(path.Join(container.basefs, path.Dir(pth)), 0755); err != nil { - return err - } - - dest, err := os.Create(destPath) - if err != nil { - return err - } - defer dest.Close() - - if _, err := io.Copy(dest, file); err != nil { - return err - } - return nil -} - -func (container *Container) When() time.Time { - return container.Created -} - -func (container *Container) FromDisk() error { - data, err := ioutil.ReadFile(container.jsonPath()) - if err != nil { - return err - } - // Load container settings - // udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it - if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { - return err - } - return container.readHostConfig() -} - -func (container *Container) ToDisk() (err error) { - data, err := json.Marshal(container) - if err != nil { - return - } - err = ioutil.WriteFile(container.jsonPath(), data, 0666) - if err != nil { - return - } - return container.writeHostConfig() -} - -func (container *Container) readHostConfig() error { - container.hostConfig = &runconfig.HostConfig{} - // If the hostconfig file does not exist, do not read it. - // (We still have to initialize container.hostConfig, - // but that's OK, since we just did that above.) - _, err := os.Stat(container.hostConfigPath()) - if os.IsNotExist(err) { - return nil - } - data, err := ioutil.ReadFile(container.hostConfigPath()) - if err != nil { - return err - } - return json.Unmarshal(data, container.hostConfig) -} - -func (container *Container) writeHostConfig() (err error) { - data, err := json.Marshal(container.hostConfig) - if err != nil { - return - } - return ioutil.WriteFile(container.hostConfigPath(), data, 0666) -} - -func (container *Container) generateEnvConfig(env []string) error { - data, err := json.Marshal(env) - if err != nil { - return err - } - p, err := container.EnvConfigPath() - if err != nil { - return err - } - ioutil.WriteFile(p, data, 0600) - return nil -} - -func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { - var cStdout, cStderr io.ReadCloser - - var nJobs int - errors := make(chan error, 3) - if stdin != nil && container.Config.OpenStdin { - nJobs += 1 - if cStdin, err := container.StdinPipe(); err != nil { - errors <- err - } else { - go func() { - utils.Debugf("attach: stdin: begin") - defer utils.Debugf("attach: stdin: end") - // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr - if container.Config.StdinOnce && !container.Config.Tty { - defer cStdin.Close() - } else { - defer func() { - if cStdout != nil { - cStdout.Close() - } - if cStderr != nil { - cStderr.Close() - } - }() - } - if container.Config.Tty { - _, err = utils.CopyEscapable(cStdin, stdin) - } else { - _, err = io.Copy(cStdin, stdin) - } - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - utils.Errorf("attach: stdin: %s", err) - } - errors <- err - }() - } - } - if stdout != nil { - nJobs += 1 - if p, err := container.StdoutPipe(); err != nil { - errors <- err - } else { - cStdout = p - go func() { - utils.Debugf("attach: stdout: begin") - defer utils.Debugf("attach: stdout: end") - // If we are in StdinOnce mode, then close stdin - if container.Config.StdinOnce && stdin != nil { - defer stdin.Close() - } - if stdinCloser != nil { - defer stdinCloser.Close() - } - _, err := io.Copy(stdout, cStdout) - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - utils.Errorf("attach: stdout: %s", err) - } - errors <- err - }() - } - } else { - go func() { - if stdinCloser != nil { - defer stdinCloser.Close() - } - if cStdout, err := container.StdoutPipe(); err != nil { - utils.Errorf("attach: stdout pipe: %s", err) - } else { - io.Copy(&utils.NopWriter{}, cStdout) - } - }() - } - if stderr != nil { - nJobs += 1 - if p, err := container.StderrPipe(); err != nil { - errors <- err - } else { - cStderr = p - go func() { - utils.Debugf("attach: stderr: begin") - defer utils.Debugf("attach: stderr: end") - // If we are in StdinOnce mode, then close stdin - if container.Config.StdinOnce && stdin != nil { - defer stdin.Close() - } - if stdinCloser != nil { - defer stdinCloser.Close() - } - _, err := io.Copy(stderr, cStderr) - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - utils.Errorf("attach: stderr: %s", err) - } - errors <- err - }() - } - } else { - go func() { - if stdinCloser != nil { - defer stdinCloser.Close() - } - - if cStderr, err := container.StderrPipe(); err != nil { - utils.Errorf("attach: stdout pipe: %s", err) - } else { - io.Copy(&utils.NopWriter{}, cStderr) - } - }() - } - - return utils.Go(func() error { - defer func() { - if cStdout != nil { - cStdout.Close() - } - if cStderr != nil { - cStderr.Close() - } - }() - - // FIXME: how to clean up the stdin goroutine without the unwanted side effect - // of closing the passed stdin? Add an intermediary io.Pipe? - for i := 0; i < nJobs; i += 1 { - utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs) - if err := <-errors; err != nil { - utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err) - return err - } - utils.Debugf("attach: job %d completed successfully", i+1) - } - utils.Debugf("attach: all jobs completed successfully") - return nil - }) -} - -func populateCommand(c *Container) { - var ( - en *execdriver.Network - driverConfig []string - ) - - if !c.Config.NetworkDisabled { - network := c.NetworkSettings - en = &execdriver.Network{ - Gateway: network.Gateway, - Bridge: network.Bridge, - IPAddress: network.IPAddress, - IPPrefixLen: network.IPPrefixLen, - Mtu: c.runtime.config.Mtu, - } - } - - if lxcConf := c.hostConfig.LxcConf; lxcConf != nil { - for _, pair := range lxcConf { - driverConfig = append(driverConfig, fmt.Sprintf("%s = %s", pair.Key, pair.Value)) - } - } - resources := &execdriver.Resources{ - Memory: c.Config.Memory, - MemorySwap: c.Config.MemorySwap, - CpuShares: c.Config.CpuShares, - } - c.command = &execdriver.Command{ - ID: c.ID, - Privileged: c.hostConfig.Privileged, - Rootfs: c.RootfsPath(), - InitPath: "/.dockerinit", - Entrypoint: c.Path, - Arguments: c.Args, - WorkingDir: c.Config.WorkingDir, - Network: en, - Tty: c.Config.Tty, - User: c.Config.User, - Config: driverConfig, - Resources: resources, - } - c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true} -} - -func (container *Container) Start() (err error) { - container.Lock() - defer container.Unlock() - - if container.State.IsRunning() { - return fmt.Errorf("The container %s is already running.", container.ID) - } - - defer func() { - if err != nil { - container.cleanup() - } - }() - - if err := container.Mount(); err != nil { - return err - } - - if container.runtime.config.DisableNetwork { - container.Config.NetworkDisabled = true - container.buildHostnameAndHostsFiles("127.0.1.1") - } else { - if err := container.allocateNetwork(); err != nil { - return err - } - container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress) - } - - // Make sure the config is compatible with the current kernel - if container.Config.Memory > 0 && !container.runtime.sysInfo.MemoryLimit { - log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n") - container.Config.Memory = 0 - } - if container.Config.Memory > 0 && !container.runtime.sysInfo.SwapLimit { - log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") - container.Config.MemorySwap = -1 - } - - if container.runtime.sysInfo.IPv4ForwardingDisabled { - log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work") - } - - if err := prepareVolumesForContainer(container); err != nil { - return err - } - - // Setup environment - env := []string{ - "HOME=/", - "PATH=" + defaultPathEnv, - "HOSTNAME=" + container.Config.Hostname, - } - - if container.Config.Tty { - env = append(env, "TERM=xterm") - } - - // Init any links between the parent and children - runtime := container.runtime - - children, err := runtime.Children(container.Name) - if err != nil { - return err - } - - if len(children) > 0 { - container.activeLinks = make(map[string]*links.Link, len(children)) - - // If we encounter an error make sure that we rollback any network - // config and ip table changes - rollback := func() { - for _, link := range container.activeLinks { - link.Disable() - } - container.activeLinks = nil - } - - for linkAlias, child := range children { - if !child.State.IsRunning() { - return fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) - } - - link, err := links.NewLink( - container.NetworkSettings.IPAddress, - child.NetworkSettings.IPAddress, - linkAlias, - child.Config.Env, - child.Config.ExposedPorts, - runtime.eng) - - if err != nil { - rollback() - return err - } - - container.activeLinks[link.Alias()] = link - if err := link.Enable(); err != nil { - rollback() - return err - } - - for _, envVar := range link.ToEnv() { - env = append(env, envVar) - } - } - } - - // because the env on the container can override certain default values - // we need to replace the 'env' keys where they match and append anything - // else. - env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) - if err := container.generateEnvConfig(env); err != nil { - return err - } - - if container.Config.WorkingDir != "" { - container.Config.WorkingDir = path.Clean(container.Config.WorkingDir) - if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil { - return nil - } - } - - envPath, err := container.EnvConfigPath() - if err != nil { - return err - } - - if err := mountVolumesForContainer(container, envPath); err != nil { - return err - } - - populateCommand(container) - container.command.Env = env - - // Setup logging of stdout and stderr to disk - if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil { - return err - } - if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil { - return err - } - container.waitLock = make(chan struct{}) - - callbackLock := make(chan struct{}) - callback := func(command *execdriver.Command) { - container.State.SetRunning(command.Pid()) - if command.Tty { - // The callback is called after the process Start() - // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace - // which we close here. - if c, ok := command.Stdout.(io.Closer); ok { - c.Close() - } - } - if err := container.ToDisk(); err != nil { - utils.Debugf("%s", err) - } - close(callbackLock) - } - - // We use a callback here instead of a goroutine and an chan for - // syncronization purposes - cErr := utils.Go(func() error { return container.monitor(callback) }) - - // Start should not return until the process is actually running - select { - case <-callbackLock: - case err := <-cErr: - return err - } - return nil -} - -func (container *Container) Run() error { - if err := container.Start(); err != nil { - return err - } - container.Wait() - return nil -} - -func (container *Container) Output() (output []byte, err error) { - pipe, err := container.StdoutPipe() - if err != nil { - return nil, err - } - defer pipe.Close() - if err := container.Start(); err != nil { - return nil, err - } - output, err = ioutil.ReadAll(pipe) - container.Wait() - return output, err -} - -// Container.StdinPipe returns a WriteCloser which can be used to feed data -// to the standard input of the container's active process. -// Container.StdoutPipe and Container.StderrPipe each return a ReadCloser -// which can be used to retrieve the standard output (and error) generated -// by the container's active process. The output (and error) are actually -// copied and delivered to all StdoutPipe and StderrPipe consumers, using -// a kind of "broadcaster". - -func (container *Container) StdinPipe() (io.WriteCloser, error) { - return container.stdinPipe, nil -} - -func (container *Container) StdoutPipe() (io.ReadCloser, error) { - reader, writer := io.Pipe() - container.stdout.AddWriter(writer, "") - return utils.NewBufReader(reader), nil -} - -func (container *Container) StderrPipe() (io.ReadCloser, error) { - reader, writer := io.Pipe() - container.stderr.AddWriter(writer, "") - return utils.NewBufReader(reader), nil -} - -func (container *Container) buildHostnameAndHostsFiles(IP string) { - container.HostnamePath = path.Join(container.root, "hostname") - ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) - - hostsContent := []byte(` -127.0.0.1 localhost -::1 localhost ip6-localhost ip6-loopback -fe00::0 ip6-localnet -ff00::0 ip6-mcastprefix -ff02::1 ip6-allnodes -ff02::2 ip6-allrouters -`) - - container.HostsPath = path.Join(container.root, "hosts") - - if container.Config.Domainname != "" { - hostsContent = append([]byte(fmt.Sprintf("%s\t%s.%s %s\n", IP, container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...) - } else if !container.Config.NetworkDisabled { - hostsContent = append([]byte(fmt.Sprintf("%s\t%s\n", IP, container.Config.Hostname)), hostsContent...) - } - - ioutil.WriteFile(container.HostsPath, hostsContent, 0644) -} - -func (container *Container) allocateNetwork() error { - if container.Config.NetworkDisabled { - return nil - } - - var ( - env *engine.Env - err error - eng = container.runtime.eng - ) - - if container.State.IsGhost() { - if container.runtime.config.DisableNetwork { - env = &engine.Env{} - } else { - currentIP := container.NetworkSettings.IPAddress - - job := eng.Job("allocate_interface", container.ID) - if currentIP != "" { - job.Setenv("RequestIP", currentIP) - } - - env, err = job.Stdout.AddEnv() - if err != nil { - return err - } - - if err := job.Run(); err != nil { - return err - } - } - } else { - job := eng.Job("allocate_interface", container.ID) - env, err = job.Stdout.AddEnv() - if err != nil { - return err - } - if err := job.Run(); err != nil { - return err - } - } - - if container.Config.PortSpecs != nil { - utils.Debugf("Migrating port mappings for container: %s", strings.Join(container.Config.PortSpecs, ", ")) - if err := migratePortMappings(container.Config, container.hostConfig); err != nil { - return err - } - container.Config.PortSpecs = nil - if err := container.writeHostConfig(); err != nil { - return err - } - } - - var ( - portSpecs = make(nat.PortSet) - bindings = make(nat.PortMap) - ) - - if !container.State.IsGhost() { - if container.Config.ExposedPorts != nil { - portSpecs = container.Config.ExposedPorts - } - if container.hostConfig.PortBindings != nil { - bindings = container.hostConfig.PortBindings - } - } else { - if container.NetworkSettings.Ports != nil { - for port, binding := range container.NetworkSettings.Ports { - portSpecs[port] = struct{}{} - bindings[port] = binding - } - } - } - - container.NetworkSettings.PortMapping = nil - - for port := range portSpecs { - binding := bindings[port] - if container.hostConfig.PublishAllPorts && len(binding) == 0 { - binding = append(binding, nat.PortBinding{}) - } - - for i := 0; i < len(binding); i++ { - b := binding[i] - - portJob := eng.Job("allocate_port", container.ID) - portJob.Setenv("HostIP", b.HostIp) - portJob.Setenv("HostPort", b.HostPort) - portJob.Setenv("Proto", port.Proto()) - portJob.Setenv("ContainerPort", port.Port()) - - portEnv, err := portJob.Stdout.AddEnv() - if err != nil { - return err - } - if err := portJob.Run(); err != nil { - eng.Job("release_interface", container.ID).Run() - return err - } - b.HostIp = portEnv.Get("HostIP") - b.HostPort = portEnv.Get("HostPort") - - binding[i] = b - } - bindings[port] = binding - } - container.writeHostConfig() - - container.NetworkSettings.Ports = bindings - - container.NetworkSettings.Bridge = env.Get("Bridge") - container.NetworkSettings.IPAddress = env.Get("IP") - container.NetworkSettings.IPPrefixLen = env.GetInt("IPPrefixLen") - container.NetworkSettings.Gateway = env.Get("Gateway") - - return nil -} - -func (container *Container) releaseNetwork() { - if container.Config.NetworkDisabled { - return - } - eng := container.runtime.eng - - eng.Job("release_interface", container.ID).Run() - container.NetworkSettings = &NetworkSettings{} -} - -func (container *Container) monitor(callback execdriver.StartCallback) error { - var ( - err error - exitCode int - ) - - pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin) - exitCode, err = container.runtime.Run(container, pipes, callback) - if err != nil { - utils.Errorf("Error running container: %s", err) - } - - if container.runtime.srv.IsRunning() { - container.State.SetStopped(exitCode) - - // FIXME: there is a race condition here which causes this to fail during the unit tests. - // If another goroutine was waiting for Wait() to return before removing the container's root - // from the filesystem... At this point it may already have done so. - // This is because State.setStopped() has already been called, and has caused Wait() - // to return. - // FIXME: why are we serializing running state to disk in the first place? - //log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err) - if err := container.ToDisk(); err != nil { - utils.Errorf("Error dumping container state to disk: %s\n", err) - } - } - - // Cleanup - container.cleanup() - - // Re-create a brand new stdin pipe once the container exited - if container.Config.OpenStdin { - container.stdin, container.stdinPipe = io.Pipe() - } - - if container.runtime != nil && container.runtime.srv != nil { - container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image)) - } - - close(container.waitLock) - - return err -} - -func (container *Container) cleanup() { - container.releaseNetwork() - - // Disable all active links - if container.activeLinks != nil { - for _, link := range container.activeLinks { - link.Disable() - } - } - if container.Config.OpenStdin { - if err := container.stdin.Close(); err != nil { - utils.Errorf("%s: Error close stdin: %s", container.ID, err) - } - } - if err := container.stdout.CloseWriters(); err != nil { - utils.Errorf("%s: Error close stdout: %s", container.ID, err) - } - if err := container.stderr.CloseWriters(); err != nil { - utils.Errorf("%s: Error close stderr: %s", container.ID, err) - } - if container.command != nil && container.command.Terminal != nil { - if err := container.command.Terminal.Close(); err != nil { - utils.Errorf("%s: Error closing terminal: %s", container.ID, err) - } - } - - unmountVolumesForContainer(container) - - if err := container.Unmount(); err != nil { - log.Printf("%v: Failed to umount filesystem: %v", container.ID, err) - } -} - -func (container *Container) kill(sig int) error { - container.Lock() - defer container.Unlock() - - if !container.State.IsRunning() { - return nil - } - return container.runtime.Kill(container, sig) -} - -func (container *Container) Kill() error { - if !container.State.IsRunning() { - return nil - } - - // 1. Send SIGKILL - if err := container.kill(9); err != nil { - return err - } - - // 2. Wait for the process to die, in last resort, try to kill the process directly - if err := container.WaitTimeout(10 * time.Second); err != nil { - if container.command == nil { - return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", utils.TruncateID(container.ID)) - } - log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", utils.TruncateID(container.ID)) - if err := container.runtime.Kill(container, 9); err != nil { - return err - } - } - - container.Wait() - return nil -} - -func (container *Container) Stop(seconds int) error { - if !container.State.IsRunning() { - return nil - } - - // 1. Send a SIGTERM - if err := container.kill(15); err != nil { - utils.Debugf("Error sending kill SIGTERM: %s", err) - log.Print("Failed to send SIGTERM to the process, force killing") - if err := container.kill(9); err != nil { - return err - } - } - - // 2. Wait for the process to exit on its own - if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil { - log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) - // 3. If it doesn't, then send SIGKILL - if err := container.Kill(); err != nil { - return err - } - } - return nil -} - -func (container *Container) Restart(seconds int) error { - // Avoid unnecessarily unmounting and then directly mounting - // the container when the container stops and then starts - // again - if err := container.Mount(); err == nil { - defer container.Unmount() - } - - if err := container.Stop(seconds); err != nil { - return err - } - return container.Start() -} - -// Wait blocks until the container stops running, then returns its exit code. -func (container *Container) Wait() int { - <-container.waitLock - return container.State.GetExitCode() -} - -func (container *Container) Resize(h, w int) error { - return container.command.Terminal.Resize(h, w) -} - -func (container *Container) ExportRw() (archive.Archive, error) { - if err := container.Mount(); err != nil { - return nil, err - } - if container.runtime == nil { - return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID) - } - archive, err := container.runtime.Diff(container) - if err != nil { - container.Unmount() - return nil, err - } - return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.Unmount() - return err - }), nil -} - -func (container *Container) Export() (archive.Archive, error) { - if err := container.Mount(); err != nil { - return nil, err - } - - archive, err := archive.Tar(container.basefs, archive.Uncompressed) - if err != nil { - container.Unmount() - return nil, err - } - return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.Unmount() - return err - }), nil -} - -func (container *Container) WaitTimeout(timeout time.Duration) error { - done := make(chan bool) - go func() { - container.Wait() - done <- true - }() - - select { - case <-time.After(timeout): - return fmt.Errorf("Timed Out") - case <-done: - return nil - } -} - -func (container *Container) Mount() error { - return container.runtime.Mount(container) -} - -func (container *Container) Changes() ([]archive.Change, error) { - return container.runtime.Changes(container) -} - -func (container *Container) GetImage() (*image.Image, error) { - if container.runtime == nil { - return nil, fmt.Errorf("Can't get image of unregistered container") - } - return container.runtime.graph.Get(container.Image) -} - -func (container *Container) Unmount() error { - return container.runtime.Unmount(container) -} - -func (container *Container) logPath(name string) string { - return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name)) -} - -func (container *Container) ReadLog(name string) (io.Reader, error) { - return os.Open(container.logPath(name)) -} - -func (container *Container) hostConfigPath() string { - return path.Join(container.root, "hostconfig.json") -} - -func (container *Container) jsonPath() string { - return path.Join(container.root, "config.json") -} - -func (container *Container) EnvConfigPath() (string, error) { - p := path.Join(container.root, "config.env") - if _, err := os.Stat(p); err != nil { - if os.IsNotExist(err) { - f, err := os.Create(p) - if err != nil { - return "", err - } - f.Close() - } else { - return "", err - } - } - return p, nil -} - -// This method must be exported to be used from the lxc template -// This directory is only usable when the container is running -func (container *Container) RootfsPath() string { - return path.Join(container.root, "root") -} - -// This is the stand-alone version of the root fs, without any additional mounts. -// This directory is usable whenever the container is mounted (and not unmounted) -func (container *Container) BasefsPath() string { - return container.basefs -} - -func validateID(id string) error { - if id == "" { - return fmt.Errorf("Invalid empty id") - } - return nil -} - -// GetSize, return real size, virtual size -func (container *Container) GetSize() (int64, int64) { - var ( - sizeRw, sizeRootfs int64 - err error - driver = container.runtime.driver - ) - - if err := container.Mount(); err != nil { - utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err) - return sizeRw, sizeRootfs - } - defer container.Unmount() - - if differ, ok := container.runtime.driver.(graphdriver.Differ); ok { - sizeRw, err = differ.DiffSize(container.ID) - if err != nil { - utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) - // FIXME: GetSize should return an error. Not changing it now in case - // there is a side-effect. - sizeRw = -1 - } - } else { - changes, _ := container.Changes() - if changes != nil { - sizeRw = archive.ChangesSize(container.basefs, changes) - } else { - sizeRw = -1 - } - } - - if _, err = os.Stat(container.basefs); err != nil { - if sizeRootfs, err = utils.TreeSize(container.basefs); err != nil { - sizeRootfs = -1 - } - } - return sizeRw, sizeRootfs -} - -func (container *Container) Copy(resource string) (io.ReadCloser, error) { - if err := container.Mount(); err != nil { - return nil, err - } - var filter []string - basePath := path.Join(container.basefs, resource) - stat, err := os.Stat(basePath) - if err != nil { - container.Unmount() - return nil, err - } - if !stat.IsDir() { - d, f := path.Split(basePath) - basePath = d - filter = []string{f} - } else { - filter = []string{path.Base(basePath)} - basePath = path.Dir(basePath) - } - - archive, err := archive.TarFilter(basePath, &archive.TarOptions{ - Compression: archive.Uncompressed, - Includes: filter, - }) - if err != nil { - return nil, err - } - return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.Unmount() - return err - }), nil -} - -// Returns true if the container exposes a certain port -func (container *Container) Exposes(p nat.Port) bool { - _, exists := container.Config.ExposedPorts[p] - return exists -} - -func (container *Container) GetPtyMaster() (*os.File, error) { - ttyConsole, ok := container.command.Terminal.(execdriver.TtyTerminal) - if !ok { - return nil, ErrNoTTY - } - return ttyConsole.Master(), nil -} diff --git a/container_unit_test.go b/container_unit_test.go deleted file mode 100644 index 3877b7f0da..0000000000 --- a/container_unit_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/nat" - "testing" -) - -func TestParseNetworkOptsPrivateOnly(t *testing.T) { - ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::80"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "tcp" { - t.Logf("Expected tcp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "80" { - t.Logf("Expected 80 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "" { - t.Logf("Expected \"\" got %s", s.HostPort) - t.Fail() - } - if s.HostIp != "192.168.1.100" { - t.Fail() - } - } -} - -func TestParseNetworkOptsPublic(t *testing.T) { - ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:8080:80"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "tcp" { - t.Logf("Expected tcp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "80" { - t.Logf("Expected 80 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "8080" { - t.Logf("Expected 8080 got %s", s.HostPort) - t.Fail() - } - if s.HostIp != "192.168.1.100" { - t.Fail() - } - } -} - -func TestParseNetworkOptsUdp(t *testing.T) { - ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) - if err != nil { - t.Fatal(err) - } - if len(ports) != 1 { - t.Logf("Expected 1 got %d", len(ports)) - t.FailNow() - } - if len(bindings) != 1 { - t.Logf("Expected 1 got %d", len(bindings)) - t.FailNow() - } - for k := range ports { - if k.Proto() != "udp" { - t.Logf("Expected udp got %s", k.Proto()) - t.Fail() - } - if k.Port() != "6000" { - t.Logf("Expected 6000 got %s", k.Port()) - t.Fail() - } - b, exists := bindings[k] - if !exists { - t.Log("Binding does not exist") - t.FailNow() - } - if len(b) != 1 { - t.Logf("Expected 1 got %d", len(b)) - t.FailNow() - } - s := b[0] - if s.HostPort != "" { - t.Logf("Expected \"\" got %s", s.HostPort) - t.Fail() - } - if s.HostIp != "192.168.1.100" { - t.Fail() - } - } -} - -func TestGetFullName(t *testing.T) { - name, err := getFullName("testing") - if err != nil { - t.Fatal(err) - } - if name != "/testing" { - t.Fatalf("Expected /testing got %s", name) - } - if _, err := getFullName(""); err == nil { - t.Fatal("Error should not be nil") - } -} diff --git a/integration/api_test.go b/integration/api_test.go index c050b4934d..bac4efea53 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -5,12 +5,12 @@ import ( "bytes" "encoding/json" "fmt" - "github.com/dotcloud/docker" "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/runtime" "github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" @@ -600,7 +600,7 @@ func TestGetContainersByName(t *testing.T) { t.Fatal(err) } assertHttpNotError(r, t) - outContainer := &docker.Container{} + outContainer := &runtime.Container{} if err := json.Unmarshal(r.Body.Bytes(), outContainer); err != nil { t.Fatal(err) } diff --git a/integration/commands_test.go b/integration/commands_test.go index 6d3ac86347..46f623bedf 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -3,11 +3,11 @@ package docker import ( "bufio" "fmt" - "github.com/dotcloud/docker" "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/pkg/term" + "github.com/dotcloud/docker/runtime" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -36,7 +36,7 @@ func closeWrap(args ...io.Closer) error { return nil } -func setRaw(t *testing.T, c *docker.Container) *term.State { +func setRaw(t *testing.T, c *runtime.Container) *term.State { pty, err := c.GetPtyMaster() if err != nil { t.Fatal(err) @@ -48,7 +48,7 @@ func setRaw(t *testing.T, c *docker.Container) *term.State { return state } -func unsetRaw(t *testing.T, c *docker.Container, state *term.State) { +func unsetRaw(t *testing.T, c *runtime.Container, state *term.State) { pty, err := c.GetPtyMaster() if err != nil { t.Fatal(err) @@ -56,8 +56,8 @@ func unsetRaw(t *testing.T, c *docker.Container, state *term.State) { term.RestoreTerminal(pty.Fd(), state) } -func waitContainerStart(t *testing.T, timeout time.Duration) *docker.Container { - var container *docker.Container +func waitContainerStart(t *testing.T, timeout time.Duration) *runtime.Container { + var container *runtime.Container setTimeout(t, "Waiting for the container to be started timed out", timeout, func() { for { diff --git a/integration/runtime_test.go b/integration/runtime_test.go index a79f84365a..dd478c289e 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -3,11 +3,11 @@ package docker import ( "bytes" "fmt" - "github.com/dotcloud/docker" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/runtime" "github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/utils" "io" @@ -16,7 +16,7 @@ import ( "net/url" "os" "path/filepath" - "runtime" + goruntime "runtime" "strconv" "strings" "syscall" @@ -36,14 +36,14 @@ const ( var ( // FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted. - globalRuntime *docker.Runtime + globalRuntime *runtime.Runtime globalEngine *engine.Engine startFds int startGoroutines int ) // FIXME: nuke() is deprecated by Runtime.Nuke() -func nuke(runtime *docker.Runtime) error { +func nuke(runtime *runtime.Runtime) error { return runtime.Nuke() } @@ -120,7 +120,7 @@ func init() { // Create the "global runtime" with a long-running daemon for integration tests spawnGlobalDaemon() - startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine() + startFds, startGoroutines = utils.GetTotalUsedFds(), goruntime.NumGoroutine() } func setupBaseImage() { @@ -173,7 +173,7 @@ func spawnGlobalDaemon() { // FIXME: test that ImagePull(json=true) send correct json output -func GetTestImage(runtime *docker.Runtime) *image.Image { +func GetTestImage(runtime *runtime.Runtime) *image.Image { imgs, err := runtime.Graph().Map() if err != nil { log.Fatalf("Unable to get the test image: %s", err) @@ -357,7 +357,7 @@ func TestGet(t *testing.T) { } -func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *docker.Container, string) { +func startEchoServerContainer(t *testing.T, proto string) (*runtime.Runtime, *runtime.Container, string) { var ( err error id string diff --git a/integration/utils_test.go b/integration/utils_test.go index 05d73df52a..88f2cc49c3 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -18,6 +18,7 @@ import ( "github.com/dotcloud/docker/builtins" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/runtime" "github.com/dotcloud/docker/utils" ) @@ -27,7 +28,7 @@ import ( // Create a temporary runtime suitable for unit testing. // Call t.Fatal() at the first error. -func mkRuntime(f utils.Fataler) *docker.Runtime { +func mkRuntime(f utils.Fataler) *runtime.Runtime { eng := newTestEngine(f, false, "") return mkRuntimeFromEngine(eng, f) // FIXME: @@ -139,7 +140,7 @@ func assertHttpError(r *httptest.ResponseRecorder, t utils.Fataler) { } } -func getContainer(eng *engine.Engine, id string, t utils.Fataler) *docker.Container { +func getContainer(eng *engine.Engine, id string, t utils.Fataler) *runtime.Container { runtime := mkRuntimeFromEngine(eng, t) c := runtime.Get(id) if c == nil { @@ -160,14 +161,14 @@ func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Server { return srv } -func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Runtime { +func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *runtime.Runtime { iRuntime := eng.Hack_GetGlobalVar("httpapi.runtime") if iRuntime == nil { panic("Legacy runtime field not set in engine") } - runtime, ok := iRuntime.(*docker.Runtime) + runtime, ok := iRuntime.(*runtime.Runtime) if !ok { - panic("Legacy runtime field in engine does not cast to *docker.Runtime") + panic("Legacy runtime field in engine does not cast to *runtime.Runtime") } return runtime } @@ -249,7 +250,7 @@ func readFile(src string, t *testing.T) (content string) { // dynamically replaced by the current test image. // The caller is responsible for destroying the container. // Call t.Fatal() at the first error. -func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *runconfig.HostConfig, error) { +func mkContainer(r *runtime.Runtime, args []string, t *testing.T) (*runtime.Container, *runconfig.HostConfig, error) { config, hc, _, err := runconfig.Parse(args, nil) defer func() { if err != nil && t != nil { @@ -280,7 +281,7 @@ func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Contai // and return its standard output as a string. // The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image. // If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally. -func runContainer(eng *engine.Engine, r *docker.Runtime, args []string, t *testing.T) (output string, err error) { +func runContainer(eng *engine.Engine, r *runtime.Runtime, args []string, t *testing.T) (output string, err error) { defer func() { if err != nil && t != nil { t.Fatal(err) diff --git a/runtime.go b/runtime.go deleted file mode 100644 index 2608701b9b..0000000000 --- a/runtime.go +++ /dev/null @@ -1,918 +0,0 @@ -package docker - -import ( - "container/list" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemonconfig" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/execdriver/lxc" - "github.com/dotcloud/docker/execdriver/native" - "github.com/dotcloud/docker/graph" - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/graphdriver/aufs" - _ "github.com/dotcloud/docker/graphdriver/btrfs" - _ "github.com/dotcloud/docker/graphdriver/devmapper" - _ "github.com/dotcloud/docker/graphdriver/vfs" - "github.com/dotcloud/docker/image" - _ "github.com/dotcloud/docker/networkdriver/lxc" - "github.com/dotcloud/docker/networkdriver/portallocator" - "github.com/dotcloud/docker/pkg/graphdb" - "github.com/dotcloud/docker/pkg/sysinfo" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "os" - "path" - "regexp" - "sort" - "strings" - "sync" - "time" -) - -// Set the max depth to the aufs default that most -// kernels are compiled with -// For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk -const MaxImageDepth = 127 - -var ( - defaultDns = []string{"8.8.8.8", "8.8.4.4"} - validContainerNameChars = `[a-zA-Z0-9_.-]` - validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) -) - -type Runtime struct { - repository string - sysInitPath string - containers *list.List - graph *graph.Graph - repositories *graph.TagStore - idIndex *utils.TruncIndex - sysInfo *sysinfo.SysInfo - volumes *graph.Graph - srv *Server - eng *engine.Engine - config *daemonconfig.Config - containerGraph *graphdb.Database - driver graphdriver.Driver - execDriver execdriver.Driver -} - -// List returns an array of all containers registered in the runtime. -func (runtime *Runtime) List() []*Container { - containers := new(History) - for e := runtime.containers.Front(); e != nil; e = e.Next() { - containers.Add(e.Value.(*Container)) - } - return *containers -} - -func (runtime *Runtime) getContainerElement(id string) *list.Element { - for e := runtime.containers.Front(); e != nil; e = e.Next() { - container := e.Value.(*Container) - if container.ID == id { - return e - } - } - return nil -} - -// Get looks for a container by the specified ID or name, and returns it. -// If the container is not found, or if an error occurs, nil is returned. -func (runtime *Runtime) Get(name string) *Container { - if c, _ := runtime.GetByName(name); c != nil { - return c - } - - id, err := runtime.idIndex.Get(name) - if err != nil { - return nil - } - - e := runtime.getContainerElement(id) - if e == nil { - return nil - } - return e.Value.(*Container) -} - -// Exists returns a true if a container of the specified ID or name exists, -// false otherwise. -func (runtime *Runtime) Exists(id string) bool { - return runtime.Get(id) != nil -} - -func (runtime *Runtime) containerRoot(id string) string { - return path.Join(runtime.repository, id) -} - -// Load reads the contents of a container from disk -// This is typically done at startup. -func (runtime *Runtime) load(id string) (*Container, error) { - container := &Container{root: runtime.containerRoot(id)} - if err := container.FromDisk(); err != nil { - return nil, err - } - if container.ID != id { - return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) - } - if container.State.IsRunning() { - container.State.SetGhost(true) - } - return container, nil -} - -// Register makes a container object usable by the runtime as -func (runtime *Runtime) Register(container *Container) error { - if container.runtime != nil || runtime.Exists(container.ID) { - return fmt.Errorf("Container is already loaded") - } - if err := validateID(container.ID); err != nil { - return err - } - if err := runtime.ensureName(container); err != nil { - return err - } - - container.runtime = runtime - - // Attach to stdout and stderr - container.stderr = utils.NewWriteBroadcaster() - container.stdout = utils.NewWriteBroadcaster() - // Attach to stdin - if container.Config.OpenStdin { - container.stdin, container.stdinPipe = io.Pipe() - } else { - container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin - } - // done - runtime.containers.PushBack(container) - runtime.idIndex.Add(container.ID) - - // FIXME: if the container is supposed to be running but is not, auto restart it? - // if so, then we need to restart monitor and init a new lock - // If the container is supposed to be running, make sure of it - if container.State.IsRunning() { - if container.State.IsGhost() { - utils.Debugf("killing ghost %s", container.ID) - - existingPid := container.State.Pid - container.State.SetGhost(false) - container.State.SetStopped(0) - - if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { - lxc.KillLxc(container.ID, 9) - } else { - command := &execdriver.Command{ - ID: container.ID, - } - command.Process = &os.Process{Pid: existingPid} - runtime.execDriver.Kill(command, 9) - } - // ensure that the filesystem is also unmounted - unmountVolumesForContainer(container) - if err := container.Unmount(); err != nil { - utils.Debugf("ghost unmount error %s", err) - } - } - - info := runtime.execDriver.Info(container.ID) - if !info.IsRunning() { - utils.Debugf("Container %s was supposed to be running but is not.", container.ID) - if runtime.config.AutoRestart { - utils.Debugf("Restarting") - unmountVolumesForContainer(container) - if err := container.Unmount(); err != nil { - utils.Debugf("restart unmount error %s", err) - } - - container.State.SetGhost(false) - container.State.SetStopped(0) - if err := container.Start(); err != nil { - return err - } - } else { - utils.Debugf("Marking as stopped") - container.State.SetStopped(-127) - if err := container.ToDisk(); err != nil { - return err - } - } - } - } else { - // When the container is not running, we still initialize the waitLock - // chan and close it. Receiving on nil chan blocks whereas receiving on a - // closed chan does not. In this case we do not want to block. - container.waitLock = make(chan struct{}) - close(container.waitLock) - } - return nil -} - -func (runtime *Runtime) ensureName(container *Container) error { - if container.Name == "" { - name, err := generateRandomName(runtime) - if err != nil { - name = utils.TruncateID(container.ID) - } - container.Name = name - - if err := container.ToDisk(); err != nil { - utils.Debugf("Error saving container name %s", err) - } - if !runtime.containerGraph.Exists(name) { - if _, err := runtime.containerGraph.Set(name, container.ID); err != nil { - utils.Debugf("Setting default id - %s", err) - } - } - } - return nil -} - -func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error { - log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) - if err != nil { - return err - } - src.AddWriter(log, stream) - return nil -} - -// Destroy unregisters a container from the runtime and cleanly removes its contents from the filesystem. -func (runtime *Runtime) Destroy(container *Container) error { - if container == nil { - return fmt.Errorf("The given container is ") - } - - element := runtime.getContainerElement(container.ID) - if element == nil { - return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID) - } - - if err := container.Stop(3); err != nil { - return err - } - - if err := runtime.driver.Remove(container.ID); err != nil { - return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", runtime.driver, container.ID, err) - } - - initID := fmt.Sprintf("%s-init", container.ID) - if err := runtime.driver.Remove(initID); err != nil { - return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", runtime.driver, initID, err) - } - - if _, err := runtime.containerGraph.Purge(container.ID); err != nil { - utils.Debugf("Unable to remove container from link graph: %s", err) - } - - // Deregister the container before removing its directory, to avoid race conditions - runtime.idIndex.Delete(container.ID) - runtime.containers.Remove(element) - if err := os.RemoveAll(container.root); err != nil { - return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) - } - return nil -} - -func (runtime *Runtime) restore() error { - if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { - fmt.Printf("Loading containers: ") - } - dir, err := ioutil.ReadDir(runtime.repository) - if err != nil { - return err - } - containers := make(map[string]*Container) - currentDriver := runtime.driver.String() - - for _, v := range dir { - id := v.Name() - container, err := runtime.load(id) - if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { - fmt.Print(".") - } - if err != nil { - utils.Errorf("Failed to load container %v: %v", id, err) - continue - } - - // Ignore the container if it does not support the current driver being used by the graph - if container.Driver == "" && currentDriver == "aufs" || container.Driver == currentDriver { - utils.Debugf("Loaded container %v", container.ID) - containers[container.ID] = container - } else { - utils.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) - } - } - - register := func(container *Container) { - if err := runtime.Register(container); err != nil { - utils.Debugf("Failed to register container %s: %s", container.ID, err) - } - } - - if entities := runtime.containerGraph.List("/", -1); entities != nil { - for _, p := range entities.Paths() { - if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { - fmt.Print(".") - } - e := entities[p] - if container, ok := containers[e.ID()]; ok { - register(container) - delete(containers, e.ID()) - } - } - } - - // Any containers that are left over do not exist in the graph - for _, container := range containers { - // Try to set the default name for a container if it exists prior to links - container.Name, err = generateRandomName(runtime) - if err != nil { - container.Name = utils.TruncateID(container.ID) - } - - if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil { - utils.Debugf("Setting default id - %s", err) - } - register(container) - } - - if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { - fmt.Printf(": done.\n") - } - - return nil -} - -// Create creates a new container from the given configuration with a given name. -func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Container, []string, error) { - // Lookup image - img, err := runtime.repositories.LookupImage(config.Image) - if err != nil { - return nil, nil, err - } - - // We add 2 layers to the depth because the container's rw and - // init layer add to the restriction - depth, err := img.Depth() - if err != nil { - return nil, nil, err - } - - if depth+2 >= MaxImageDepth { - return nil, nil, fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) - } - - checkDeprecatedExpose := func(config *runconfig.Config) bool { - if config != nil { - if config.PortSpecs != nil { - for _, p := range config.PortSpecs { - if strings.Contains(p, ":") { - return true - } - } - } - } - return false - } - - warnings := []string{} - if checkDeprecatedExpose(img.Config) || checkDeprecatedExpose(config) { - warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.") - } - - if img.Config != nil { - if err := runconfig.Merge(config, img.Config); err != nil { - return nil, nil, err - } - } - - if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { - return nil, nil, fmt.Errorf("No command specified") - } - - // Generate id - id := utils.GenerateRandomID() - - if name == "" { - name, err = generateRandomName(runtime) - if err != nil { - name = utils.TruncateID(id) - } - } else { - if !validContainerNamePattern.MatchString(name) { - return nil, nil, fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) - } - } - - if name[0] != '/' { - name = "/" + name - } - - // Set the enitity in the graph using the default name specified - if _, err := runtime.containerGraph.Set(name, id); err != nil { - if !graphdb.IsNonUniqueNameError(err) { - return nil, nil, err - } - - conflictingContainer, err := runtime.GetByName(name) - if err != nil { - if strings.Contains(err.Error(), "Could not find entity") { - return nil, nil, err - } - - // Remove name and continue starting the container - if err := runtime.containerGraph.Delete(name); err != nil { - return nil, nil, err - } - } else { - nameAsKnownByUser := strings.TrimPrefix(name, "/") - return nil, nil, fmt.Errorf( - "Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser, - utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser) - } - } - - // Generate default hostname - // FIXME: the lxc template no longer needs to set a default hostname - if config.Hostname == "" { - config.Hostname = id[:12] - } - - var args []string - var entrypoint string - - if len(config.Entrypoint) != 0 { - entrypoint = config.Entrypoint[0] - args = append(config.Entrypoint[1:], config.Cmd...) - } else { - entrypoint = config.Cmd[0] - args = config.Cmd[1:] - } - - container := &Container{ - // FIXME: we should generate the ID here instead of receiving it as an argument - ID: id, - Created: time.Now().UTC(), - Path: entrypoint, - Args: args, //FIXME: de-duplicate from config - Config: config, - hostConfig: &runconfig.HostConfig{}, - Image: img.ID, // Always use the resolved image id - NetworkSettings: &NetworkSettings{}, - Name: name, - Driver: runtime.driver.String(), - ExecDriver: runtime.execDriver.Name(), - } - container.root = runtime.containerRoot(container.ID) - // Step 1: create the container directory. - // This doubles as a barrier to avoid race conditions. - if err := os.Mkdir(container.root, 0700); err != nil { - return nil, nil, err - } - - initID := fmt.Sprintf("%s-init", container.ID) - if err := runtime.driver.Create(initID, img.ID); err != nil { - return nil, nil, err - } - initPath, err := runtime.driver.Get(initID) - if err != nil { - return nil, nil, err - } - defer runtime.driver.Put(initID) - - if err := graph.SetupInitLayer(initPath); err != nil { - return nil, nil, err - } - - if err := runtime.driver.Create(container.ID, initID); err != nil { - return nil, nil, err - } - resolvConf, err := utils.GetResolvConf() - if err != nil { - return nil, nil, err - } - - if len(config.Dns) == 0 && len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { - //"WARNING: Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns - runtime.config.Dns = defaultDns - } - - // If custom dns exists, then create a resolv.conf for the container - if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 { - var dns []string - if len(config.Dns) > 0 { - dns = config.Dns - } else { - dns = runtime.config.Dns - } - container.ResolvConfPath = path.Join(container.root, "resolv.conf") - f, err := os.Create(container.ResolvConfPath) - if err != nil { - return nil, nil, err - } - defer f.Close() - for _, dns := range dns { - if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil { - return nil, nil, err - } - } - } else { - container.ResolvConfPath = "/etc/resolv.conf" - } - - // Step 2: save the container json - if err := container.ToDisk(); err != nil { - return nil, nil, err - } - - // Step 3: register the container - if err := runtime.Register(container); err != nil { - return nil, nil, err - } - return container, warnings, nil -} - -// Commit creates a new filesystem image from the current state of a container. -// The image can optionally be tagged into a repository -func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) { - // FIXME: freeze the container before copying it to avoid data corruption? - // FIXME: this shouldn't be in commands. - if err := container.Mount(); err != nil { - return nil, err - } - defer container.Unmount() - - rwTar, err := container.ExportRw() - if err != nil { - return nil, err - } - defer rwTar.Close() - - // Create a new image from the container's base layers + a new layer from container changes - var ( - containerID, containerImage string - containerConfig *runconfig.Config - ) - if container != nil { - containerID = container.ID - containerImage = container.Image - containerConfig = container.Config - } - img, err := runtime.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config) - if err != nil { - return nil, err - } - // Register the image if needed - if repository != "" { - if err := runtime.repositories.Set(repository, tag, img.ID, true); err != nil { - return img, err - } - } - return img, nil -} - -func getFullName(name string) (string, error) { - if name == "" { - return "", fmt.Errorf("Container name cannot be empty") - } - if name[0] != '/' { - name = "/" + name - } - return name, nil -} - -func (runtime *Runtime) GetByName(name string) (*Container, error) { - fullName, err := getFullName(name) - if err != nil { - return nil, err - } - entity := runtime.containerGraph.Get(fullName) - if entity == nil { - return nil, fmt.Errorf("Could not find entity for %s", name) - } - e := runtime.getContainerElement(entity.ID()) - if e == nil { - return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID()) - } - return e.Value.(*Container), nil -} - -func (runtime *Runtime) Children(name string) (map[string]*Container, error) { - name, err := getFullName(name) - if err != nil { - return nil, err - } - children := make(map[string]*Container) - - err = runtime.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error { - c := runtime.Get(e.ID()) - if c == nil { - return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p) - } - children[p] = c - return nil - }, 0) - - if err != nil { - return nil, err - } - return children, nil -} - -func (runtime *Runtime) RegisterLink(parent, child *Container, alias string) error { - fullName := path.Join(parent.Name, alias) - if !runtime.containerGraph.Exists(fullName) { - _, err := runtime.containerGraph.Set(fullName, child.ID) - return err - } - return nil -} - -// FIXME: harmonize with NewGraph() -func NewRuntime(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) { - runtime, err := NewRuntimeFromDirectory(config, eng) - if err != nil { - return nil, err - } - return runtime, nil -} - -func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) { - - // Set the default driver - graphdriver.DefaultDriver = config.GraphDriver - - // Load storage driver - driver, err := graphdriver.New(config.Root) - if err != nil { - return nil, err - } - utils.Debugf("Using graph driver %s", driver) - - runtimeRepo := path.Join(config.Root, "containers") - - if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) { - return nil, err - } - - if ad, ok := driver.(*aufs.Driver); ok { - utils.Debugf("Migrating existing containers") - if err := ad.Migrate(config.Root, graph.SetupInitLayer); err != nil { - return nil, err - } - } - - utils.Debugf("Creating images graph") - g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver) - if err != nil { - return nil, err - } - - // We don't want to use a complex driver like aufs or devmapper - // for volumes, just a plain filesystem - volumesDriver, err := graphdriver.GetDriver("vfs", config.Root) - if err != nil { - return nil, err - } - utils.Debugf("Creating volumes graph") - volumes, err := graph.NewGraph(path.Join(config.Root, "volumes"), volumesDriver) - if err != nil { - return nil, err - } - utils.Debugf("Creating repository list") - repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g) - if err != nil { - return nil, fmt.Errorf("Couldn't create Tag store: %s", err) - } - - if !config.DisableNetwork { - job := eng.Job("init_networkdriver") - - job.SetenvBool("EnableIptables", config.EnableIptables) - job.SetenvBool("InterContainerCommunication", config.InterContainerCommunication) - job.SetenvBool("EnableIpForward", config.EnableIpForward) - job.Setenv("BridgeIface", config.BridgeIface) - job.Setenv("BridgeIP", config.BridgeIP) - job.Setenv("DefaultBindingIP", config.DefaultIp.String()) - - if err := job.Run(); err != nil { - return nil, err - } - } - - graphdbPath := path.Join(config.Root, "linkgraph.db") - graph, err := graphdb.NewSqliteConn(graphdbPath) - if err != nil { - return nil, err - } - - localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION)) - sysInitPath := utils.DockerInitPath(localCopy) - if sysInitPath == "" { - return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.io/en/latest/contributing/devenvironment for official build instructions.") - } - - if sysInitPath != localCopy { - // When we find a suitable dockerinit binary (even if it's our local binary), we copy it into config.Root at localCopy for future use (so that the original can go away without that being a problem, for example during a package upgrade). - if err := os.Mkdir(path.Dir(localCopy), 0700); err != nil && !os.IsExist(err) { - return nil, err - } - if _, err := utils.CopyFile(sysInitPath, localCopy); err != nil { - return nil, err - } - if err := os.Chmod(localCopy, 0700); err != nil { - return nil, err - } - sysInitPath = localCopy - } - - var ( - ed execdriver.Driver - sysInfo = sysinfo.New(false) - ) - - switch config.ExecDriver { - case "lxc": - // we want to five the lxc driver the full docker root because it needs - // to access and write config and template files in /var/lib/docker/containers/* - // to be backwards compatible - ed, err = lxc.NewDriver(config.Root, sysInfo.AppArmor) - case "native": - ed, err = native.NewDriver(path.Join(config.Root, "execdriver", "native")) - default: - return nil, fmt.Errorf("unknown exec driver %s", config.ExecDriver) - } - if err != nil { - return nil, err - } - - runtime := &Runtime{ - repository: runtimeRepo, - containers: list.New(), - graph: g, - repositories: repositories, - idIndex: utils.NewTruncIndex(), - sysInfo: sysInfo, - volumes: volumes, - config: config, - containerGraph: graph, - driver: driver, - sysInitPath: sysInitPath, - execDriver: ed, - eng: eng, - } - - if err := runtime.restore(); err != nil { - return nil, err - } - return runtime, nil -} - -func (runtime *Runtime) Close() error { - errorsStrings := []string{} - if err := portallocator.ReleaseAll(); err != nil { - utils.Errorf("portallocator.ReleaseAll(): %s", err) - errorsStrings = append(errorsStrings, err.Error()) - } - if err := runtime.driver.Cleanup(); err != nil { - utils.Errorf("runtime.driver.Cleanup(): %s", err.Error()) - errorsStrings = append(errorsStrings, err.Error()) - } - if err := runtime.containerGraph.Close(); err != nil { - utils.Errorf("runtime.containerGraph.Close(): %s", err.Error()) - errorsStrings = append(errorsStrings, err.Error()) - } - if len(errorsStrings) > 0 { - return fmt.Errorf("%s", strings.Join(errorsStrings, ", ")) - } - return nil -} - -func (runtime *Runtime) Mount(container *Container) error { - dir, err := runtime.driver.Get(container.ID) - if err != nil { - return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, runtime.driver, err) - } - if container.basefs == "" { - container.basefs = dir - } else if container.basefs != dir { - return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", - runtime.driver, container.ID, container.basefs, dir) - } - return nil -} - -func (runtime *Runtime) Unmount(container *Container) error { - runtime.driver.Put(container.ID) - return nil -} - -func (runtime *Runtime) Changes(container *Container) ([]archive.Change, error) { - if differ, ok := runtime.driver.(graphdriver.Differ); ok { - return differ.Changes(container.ID) - } - cDir, err := runtime.driver.Get(container.ID) - if err != nil { - return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) - } - defer runtime.driver.Put(container.ID) - initDir, err := runtime.driver.Get(container.ID + "-init") - if err != nil { - return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) - } - defer runtime.driver.Put(container.ID + "-init") - return archive.ChangesDirs(cDir, initDir) -} - -func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) { - if differ, ok := runtime.driver.(graphdriver.Differ); ok { - return differ.Diff(container.ID) - } - - changes, err := runtime.Changes(container) - if err != nil { - return nil, err - } - - cDir, err := runtime.driver.Get(container.ID) - if err != nil { - return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) - } - - archive, err := archive.ExportChanges(cDir, changes) - if err != nil { - return nil, err - } - return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - runtime.driver.Put(container.ID) - return err - }), nil -} - -func (runtime *Runtime) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { - return runtime.execDriver.Run(c.command, pipes, startCallback) -} - -func (runtime *Runtime) Kill(c *Container, sig int) error { - return runtime.execDriver.Kill(c.command, sig) -} - -// Nuke kills all containers then removes all content -// from the content root, including images, volumes and -// container filesystems. -// Again: this will remove your entire docker runtime! -func (runtime *Runtime) Nuke() error { - var wg sync.WaitGroup - for _, container := range runtime.List() { - wg.Add(1) - go func(c *Container) { - c.Kill() - wg.Done() - }(container) - } - wg.Wait() - runtime.Close() - - return os.RemoveAll(runtime.config.Root) -} - -// FIXME: this is a convenience function for integration tests -// which need direct access to runtime.graph. -// Once the tests switch to using engine and jobs, this method -// can go away. -func (runtime *Runtime) Graph() *graph.Graph { - return runtime.graph -} - -// History is a convenience type for storing a list of containers, -// ordered by creation date. -type History []*Container - -func (history *History) Len() int { - return len(*history) -} - -func (history *History) Less(i, j int) bool { - containers := *history - return containers[j].When().Before(containers[i].When()) -} - -func (history *History) Swap(i, j int) { - containers := *history - tmp := containers[i] - containers[i] = containers[j] - containers[j] = tmp -} - -func (history *History) Add(container *Container) { - *history = append(*history, container) - sort.Sort(history) -} diff --git a/runtime/container.go b/runtime/container.go new file mode 100644 index 0000000000..813147e508 --- /dev/null +++ b/runtime/container.go @@ -0,0 +1,1161 @@ +package runtime + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/execdriver" + "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/links" + "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/utils" + "io" + "io/ioutil" + "log" + "os" + "path" + "strings" + "sync" + "syscall" + "time" +) + +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +var ( + ErrNotATTY = errors.New("The PTY is not a file") + ErrNoTTY = errors.New("No PTY found") + ErrContainerStart = errors.New("The container failed to start. Unknown error") + ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.") +) + +type Container struct { + sync.Mutex + root string // Path to the "home" of the container, including metadata. + basefs string // Path to the graphdriver mountpoint + + ID string + + Created time.Time + + Path string + Args []string + + Config *runconfig.Config + State State + Image string + + NetworkSettings *NetworkSettings + + ResolvConfPath string + HostnamePath string + HostsPath string + Name string + Driver string + ExecDriver string + + command *execdriver.Command + stdout *utils.WriteBroadcaster + stderr *utils.WriteBroadcaster + stdin io.ReadCloser + stdinPipe io.WriteCloser + + runtime *Runtime + + waitLock chan struct{} + Volumes map[string]string + // Store rw/ro in a separate structure to preserve reverse-compatibility on-disk. + // Easier than migrating older container configs :) + VolumesRW map[string]bool + hostConfig *runconfig.HostConfig + + activeLinks map[string]*links.Link +} + +// FIXME: move deprecated port stuff to nat to clean up the core. +type PortMapping map[string]string // Deprecated + +type NetworkSettings struct { + IPAddress string + IPPrefixLen int + Gateway string + Bridge string + PortMapping map[string]PortMapping // Deprecated + Ports nat.PortMap +} + +func (settings *NetworkSettings) PortMappingAPI() *engine.Table { + var outs = engine.NewTable("", 0) + for port, bindings := range settings.Ports { + p, _ := nat.ParsePort(port.Port()) + if len(bindings) == 0 { + out := &engine.Env{} + out.SetInt("PublicPort", p) + out.Set("Type", port.Proto()) + outs.Add(out) + continue + } + for _, binding := range bindings { + out := &engine.Env{} + h, _ := nat.ParsePort(binding.HostPort) + out.SetInt("PrivatePort", p) + out.SetInt("PublicPort", h) + out.Set("Type", port.Proto()) + out.Set("IP", binding.HostIp) + outs.Add(out) + } + } + return outs +} + +// Inject the io.Reader at the given path. Note: do not close the reader +func (container *Container) Inject(file io.Reader, pth string) error { + if err := container.Mount(); err != nil { + return fmt.Errorf("inject: error mounting container %s: %s", container.ID, err) + } + defer container.Unmount() + + // Return error if path exists + destPath := path.Join(container.basefs, pth) + if _, err := os.Stat(destPath); err == nil { + // Since err is nil, the path could be stat'd and it exists + return fmt.Errorf("%s exists", pth) + } else if !os.IsNotExist(err) { + // Expect err might be that the file doesn't exist, so + // if it's some other error, return that. + + return err + } + + // Make sure the directory exists + if err := os.MkdirAll(path.Join(container.basefs, path.Dir(pth)), 0755); err != nil { + return err + } + + dest, err := os.Create(destPath) + if err != nil { + return err + } + defer dest.Close() + + if _, err := io.Copy(dest, file); err != nil { + return err + } + return nil +} + +func (container *Container) When() time.Time { + return container.Created +} + +func (container *Container) FromDisk() error { + data, err := ioutil.ReadFile(container.jsonPath()) + if err != nil { + return err + } + // Load container settings + // udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it + if err := json.Unmarshal(data, container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { + return err + } + return container.readHostConfig() +} + +func (container *Container) ToDisk() (err error) { + data, err := json.Marshal(container) + if err != nil { + return + } + err = ioutil.WriteFile(container.jsonPath(), data, 0666) + if err != nil { + return + } + return container.WriteHostConfig() +} + +func (container *Container) readHostConfig() error { + container.hostConfig = &runconfig.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.hostConfig, + // but that's OK, since we just did that above.) + _, err := os.Stat(container.hostConfigPath()) + if os.IsNotExist(err) { + return nil + } + data, err := ioutil.ReadFile(container.hostConfigPath()) + if err != nil { + return err + } + return json.Unmarshal(data, container.hostConfig) +} + +func (container *Container) WriteHostConfig() (err error) { + data, err := json.Marshal(container.hostConfig) + if err != nil { + return + } + return ioutil.WriteFile(container.hostConfigPath(), data, 0666) +} + +func (container *Container) generateEnvConfig(env []string) error { + data, err := json.Marshal(env) + if err != nil { + return err + } + p, err := container.EnvConfigPath() + if err != nil { + return err + } + ioutil.WriteFile(p, data, 0600) + return nil +} + +func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, stdout io.Writer, stderr io.Writer) chan error { + var cStdout, cStderr io.ReadCloser + + var nJobs int + errors := make(chan error, 3) + if stdin != nil && container.Config.OpenStdin { + nJobs += 1 + if cStdin, err := container.StdinPipe(); err != nil { + errors <- err + } else { + go func() { + utils.Debugf("attach: stdin: begin") + defer utils.Debugf("attach: stdin: end") + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if container.Config.StdinOnce && !container.Config.Tty { + defer cStdin.Close() + } else { + defer func() { + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + }() + } + if container.Config.Tty { + _, err = utils.CopyEscapable(cStdin, stdin) + } else { + _, err = io.Copy(cStdin, stdin) + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + utils.Errorf("attach: stdin: %s", err) + } + errors <- err + }() + } + } + if stdout != nil { + nJobs += 1 + if p, err := container.StdoutPipe(); err != nil { + errors <- err + } else { + cStdout = p + go func() { + utils.Debugf("attach: stdout: begin") + defer utils.Debugf("attach: stdout: end") + // If we are in StdinOnce mode, then close stdin + if container.Config.StdinOnce && stdin != nil { + defer stdin.Close() + } + if stdinCloser != nil { + defer stdinCloser.Close() + } + _, err := io.Copy(stdout, cStdout) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + utils.Errorf("attach: stdout: %s", err) + } + errors <- err + }() + } + } else { + go func() { + if stdinCloser != nil { + defer stdinCloser.Close() + } + if cStdout, err := container.StdoutPipe(); err != nil { + utils.Errorf("attach: stdout pipe: %s", err) + } else { + io.Copy(&utils.NopWriter{}, cStdout) + } + }() + } + if stderr != nil { + nJobs += 1 + if p, err := container.StderrPipe(); err != nil { + errors <- err + } else { + cStderr = p + go func() { + utils.Debugf("attach: stderr: begin") + defer utils.Debugf("attach: stderr: end") + // If we are in StdinOnce mode, then close stdin + if container.Config.StdinOnce && stdin != nil { + defer stdin.Close() + } + if stdinCloser != nil { + defer stdinCloser.Close() + } + _, err := io.Copy(stderr, cStderr) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + utils.Errorf("attach: stderr: %s", err) + } + errors <- err + }() + } + } else { + go func() { + if stdinCloser != nil { + defer stdinCloser.Close() + } + + if cStderr, err := container.StderrPipe(); err != nil { + utils.Errorf("attach: stdout pipe: %s", err) + } else { + io.Copy(&utils.NopWriter{}, cStderr) + } + }() + } + + return utils.Go(func() error { + defer func() { + if cStdout != nil { + cStdout.Close() + } + if cStderr != nil { + cStderr.Close() + } + }() + + // FIXME: how to clean up the stdin goroutine without the unwanted side effect + // of closing the passed stdin? Add an intermediary io.Pipe? + for i := 0; i < nJobs; i += 1 { + utils.Debugf("attach: waiting for job %d/%d", i+1, nJobs) + if err := <-errors; err != nil { + utils.Errorf("attach: job %d returned error %s, aborting all jobs", i+1, err) + return err + } + utils.Debugf("attach: job %d completed successfully", i+1) + } + utils.Debugf("attach: all jobs completed successfully") + return nil + }) +} + +func populateCommand(c *Container) { + var ( + en *execdriver.Network + driverConfig []string + ) + + if !c.Config.NetworkDisabled { + network := c.NetworkSettings + en = &execdriver.Network{ + Gateway: network.Gateway, + Bridge: network.Bridge, + IPAddress: network.IPAddress, + IPPrefixLen: network.IPPrefixLen, + Mtu: c.runtime.config.Mtu, + } + } + + if lxcConf := c.hostConfig.LxcConf; lxcConf != nil { + for _, pair := range lxcConf { + driverConfig = append(driverConfig, fmt.Sprintf("%s = %s", pair.Key, pair.Value)) + } + } + resources := &execdriver.Resources{ + Memory: c.Config.Memory, + MemorySwap: c.Config.MemorySwap, + CpuShares: c.Config.CpuShares, + } + c.command = &execdriver.Command{ + ID: c.ID, + Privileged: c.hostConfig.Privileged, + Rootfs: c.RootfsPath(), + InitPath: "/.dockerinit", + Entrypoint: c.Path, + Arguments: c.Args, + WorkingDir: c.Config.WorkingDir, + Network: en, + Tty: c.Config.Tty, + User: c.Config.User, + Config: driverConfig, + Resources: resources, + } + c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true} +} + +func (container *Container) Start() (err error) { + container.Lock() + defer container.Unlock() + + if container.State.IsRunning() { + return fmt.Errorf("The container %s is already running.", container.ID) + } + + defer func() { + if err != nil { + container.cleanup() + } + }() + + if err := container.Mount(); err != nil { + return err + } + + if container.runtime.config.DisableNetwork { + container.Config.NetworkDisabled = true + container.buildHostnameAndHostsFiles("127.0.1.1") + } else { + if err := container.allocateNetwork(); err != nil { + return err + } + container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress) + } + + // Make sure the config is compatible with the current kernel + if container.Config.Memory > 0 && !container.runtime.sysInfo.MemoryLimit { + log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n") + container.Config.Memory = 0 + } + if container.Config.Memory > 0 && !container.runtime.sysInfo.SwapLimit { + log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n") + container.Config.MemorySwap = -1 + } + + if container.runtime.sysInfo.IPv4ForwardingDisabled { + log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work") + } + + if err := prepareVolumesForContainer(container); err != nil { + return err + } + + // Setup environment + env := []string{ + "HOME=/", + "PATH=" + DefaultPathEnv, + "HOSTNAME=" + container.Config.Hostname, + } + + if container.Config.Tty { + env = append(env, "TERM=xterm") + } + + // Init any links between the parent and children + runtime := container.runtime + + children, err := runtime.Children(container.Name) + if err != nil { + return err + } + + if len(children) > 0 { + container.activeLinks = make(map[string]*links.Link, len(children)) + + // If we encounter an error make sure that we rollback any network + // config and ip table changes + rollback := func() { + for _, link := range container.activeLinks { + link.Disable() + } + container.activeLinks = nil + } + + for linkAlias, child := range children { + if !child.State.IsRunning() { + return fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) + } + + link, err := links.NewLink( + container.NetworkSettings.IPAddress, + child.NetworkSettings.IPAddress, + linkAlias, + child.Config.Env, + child.Config.ExposedPorts, + runtime.eng) + + if err != nil { + rollback() + return err + } + + container.activeLinks[link.Alias()] = link + if err := link.Enable(); err != nil { + rollback() + return err + } + + for _, envVar := range link.ToEnv() { + env = append(env, envVar) + } + } + } + + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) + if err := container.generateEnvConfig(env); err != nil { + return err + } + + if container.Config.WorkingDir != "" { + container.Config.WorkingDir = path.Clean(container.Config.WorkingDir) + if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil { + return nil + } + } + + envPath, err := container.EnvConfigPath() + if err != nil { + return err + } + + if err := mountVolumesForContainer(container, envPath); err != nil { + return err + } + + populateCommand(container) + container.command.Env = env + + // Setup logging of stdout and stderr to disk + if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil { + return err + } + if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil { + return err + } + container.waitLock = make(chan struct{}) + + callbackLock := make(chan struct{}) + callback := func(command *execdriver.Command) { + container.State.SetRunning(command.Pid()) + if command.Tty { + // The callback is called after the process Start() + // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace + // which we close here. + if c, ok := command.Stdout.(io.Closer); ok { + c.Close() + } + } + if err := container.ToDisk(); err != nil { + utils.Debugf("%s", err) + } + close(callbackLock) + } + + // We use a callback here instead of a goroutine and an chan for + // syncronization purposes + cErr := utils.Go(func() error { return container.monitor(callback) }) + + // Start should not return until the process is actually running + select { + case <-callbackLock: + case err := <-cErr: + return err + } + return nil +} + +func (container *Container) Run() error { + if err := container.Start(); err != nil { + return err + } + container.Wait() + return nil +} + +func (container *Container) Output() (output []byte, err error) { + pipe, err := container.StdoutPipe() + if err != nil { + return nil, err + } + defer pipe.Close() + if err := container.Start(); err != nil { + return nil, err + } + output, err = ioutil.ReadAll(pipe) + container.Wait() + return output, err +} + +// Container.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the container's active process. +// Container.StdoutPipe and Container.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". + +func (container *Container) StdinPipe() (io.WriteCloser, error) { + return container.stdinPipe, nil +} + +func (container *Container) StdoutPipe() (io.ReadCloser, error) { + reader, writer := io.Pipe() + container.stdout.AddWriter(writer, "") + return utils.NewBufReader(reader), nil +} + +func (container *Container) StderrPipe() (io.ReadCloser, error) { + reader, writer := io.Pipe() + container.stderr.AddWriter(writer, "") + return utils.NewBufReader(reader), nil +} + +func (container *Container) buildHostnameAndHostsFiles(IP string) { + container.HostnamePath = path.Join(container.root, "hostname") + ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) + + hostsContent := []byte(` +127.0.0.1 localhost +::1 localhost ip6-localhost ip6-loopback +fe00::0 ip6-localnet +ff00::0 ip6-mcastprefix +ff02::1 ip6-allnodes +ff02::2 ip6-allrouters +`) + + container.HostsPath = path.Join(container.root, "hosts") + + if container.Config.Domainname != "" { + hostsContent = append([]byte(fmt.Sprintf("%s\t%s.%s %s\n", IP, container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...) + } else if !container.Config.NetworkDisabled { + hostsContent = append([]byte(fmt.Sprintf("%s\t%s\n", IP, container.Config.Hostname)), hostsContent...) + } + + ioutil.WriteFile(container.HostsPath, hostsContent, 0644) +} + +func (container *Container) allocateNetwork() error { + if container.Config.NetworkDisabled { + return nil + } + + var ( + env *engine.Env + err error + eng = container.runtime.eng + ) + + if container.State.IsGhost() { + if container.runtime.config.DisableNetwork { + env = &engine.Env{} + } else { + currentIP := container.NetworkSettings.IPAddress + + job := eng.Job("allocate_interface", container.ID) + if currentIP != "" { + job.Setenv("RequestIP", currentIP) + } + + env, err = job.Stdout.AddEnv() + if err != nil { + return err + } + + if err := job.Run(); err != nil { + return err + } + } + } else { + job := eng.Job("allocate_interface", container.ID) + env, err = job.Stdout.AddEnv() + if err != nil { + return err + } + if err := job.Run(); err != nil { + return err + } + } + + if container.Config.PortSpecs != nil { + utils.Debugf("Migrating port mappings for container: %s", strings.Join(container.Config.PortSpecs, ", ")) + if err := migratePortMappings(container.Config, container.hostConfig); err != nil { + return err + } + container.Config.PortSpecs = nil + if err := container.WriteHostConfig(); err != nil { + return err + } + } + + var ( + portSpecs = make(nat.PortSet) + bindings = make(nat.PortMap) + ) + + if !container.State.IsGhost() { + if container.Config.ExposedPorts != nil { + portSpecs = container.Config.ExposedPorts + } + if container.hostConfig.PortBindings != nil { + bindings = container.hostConfig.PortBindings + } + } else { + if container.NetworkSettings.Ports != nil { + for port, binding := range container.NetworkSettings.Ports { + portSpecs[port] = struct{}{} + bindings[port] = binding + } + } + } + + container.NetworkSettings.PortMapping = nil + + for port := range portSpecs { + binding := bindings[port] + if container.hostConfig.PublishAllPorts && len(binding) == 0 { + binding = append(binding, nat.PortBinding{}) + } + + for i := 0; i < len(binding); i++ { + b := binding[i] + + portJob := eng.Job("allocate_port", container.ID) + portJob.Setenv("HostIP", b.HostIp) + portJob.Setenv("HostPort", b.HostPort) + portJob.Setenv("Proto", port.Proto()) + portJob.Setenv("ContainerPort", port.Port()) + + portEnv, err := portJob.Stdout.AddEnv() + if err != nil { + return err + } + if err := portJob.Run(); err != nil { + eng.Job("release_interface", container.ID).Run() + return err + } + b.HostIp = portEnv.Get("HostIP") + b.HostPort = portEnv.Get("HostPort") + + binding[i] = b + } + bindings[port] = binding + } + container.WriteHostConfig() + + container.NetworkSettings.Ports = bindings + + container.NetworkSettings.Bridge = env.Get("Bridge") + container.NetworkSettings.IPAddress = env.Get("IP") + container.NetworkSettings.IPPrefixLen = env.GetInt("IPPrefixLen") + container.NetworkSettings.Gateway = env.Get("Gateway") + + return nil +} + +func (container *Container) releaseNetwork() { + if container.Config.NetworkDisabled { + return + } + eng := container.runtime.eng + + eng.Job("release_interface", container.ID).Run() + container.NetworkSettings = &NetworkSettings{} +} + +func (container *Container) monitor(callback execdriver.StartCallback) error { + var ( + err error + exitCode int + ) + + pipes := execdriver.NewPipes(container.stdin, container.stdout, container.stderr, container.Config.OpenStdin) + exitCode, err = container.runtime.Run(container, pipes, callback) + if err != nil { + utils.Errorf("Error running container: %s", err) + } + + if container.runtime.srv.IsRunning() { + container.State.SetStopped(exitCode) + + // FIXME: there is a race condition here which causes this to fail during the unit tests. + // If another goroutine was waiting for Wait() to return before removing the container's root + // from the filesystem... At this point it may already have done so. + // This is because State.setStopped() has already been called, and has caused Wait() + // to return. + // FIXME: why are we serializing running state to disk in the first place? + //log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err) + if err := container.ToDisk(); err != nil { + utils.Errorf("Error dumping container state to disk: %s\n", err) + } + } + + // Cleanup + container.cleanup() + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.stdin, container.stdinPipe = io.Pipe() + } + + if container.runtime != nil && container.runtime.srv != nil { + container.runtime.srv.LogEvent("die", container.ID, container.runtime.repositories.ImageName(container.Image)) + } + + close(container.waitLock) + + return err +} + +func (container *Container) cleanup() { + container.releaseNetwork() + + // Disable all active links + if container.activeLinks != nil { + for _, link := range container.activeLinks { + link.Disable() + } + } + if container.Config.OpenStdin { + if err := container.stdin.Close(); err != nil { + utils.Errorf("%s: Error close stdin: %s", container.ID, err) + } + } + if err := container.stdout.CloseWriters(); err != nil { + utils.Errorf("%s: Error close stdout: %s", container.ID, err) + } + if err := container.stderr.CloseWriters(); err != nil { + utils.Errorf("%s: Error close stderr: %s", container.ID, err) + } + if container.command != nil && container.command.Terminal != nil { + if err := container.command.Terminal.Close(); err != nil { + utils.Errorf("%s: Error closing terminal: %s", container.ID, err) + } + } + + unmountVolumesForContainer(container) + + if err := container.Unmount(); err != nil { + log.Printf("%v: Failed to umount filesystem: %v", container.ID, err) + } +} + +func (container *Container) KillSig(sig int) error { + container.Lock() + defer container.Unlock() + + if !container.State.IsRunning() { + return nil + } + return container.runtime.Kill(container, sig) +} + +func (container *Container) Kill() error { + if !container.State.IsRunning() { + return nil + } + + // 1. Send SIGKILL + if err := container.KillSig(9); err != nil { + return err + } + + // 2. Wait for the process to die, in last resort, try to kill the process directly + if err := container.WaitTimeout(10 * time.Second); err != nil { + if container.command == nil { + return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", utils.TruncateID(container.ID)) + } + log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", utils.TruncateID(container.ID)) + if err := container.runtime.Kill(container, 9); err != nil { + return err + } + } + + container.Wait() + return nil +} + +func (container *Container) Stop(seconds int) error { + if !container.State.IsRunning() { + return nil + } + + // 1. Send a SIGTERM + if err := container.KillSig(15); err != nil { + utils.Debugf("Error sending kill SIGTERM: %s", err) + log.Print("Failed to send SIGTERM to the process, force killing") + if err := container.KillSig(9); err != nil { + return err + } + } + + // 2. Wait for the process to exit on its own + if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil { + log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) + // 3. If it doesn't, then send SIGKILL + if err := container.Kill(); err != nil { + return err + } + } + return nil +} + +func (container *Container) Restart(seconds int) error { + // Avoid unnecessarily unmounting and then directly mounting + // the container when the container stops and then starts + // again + if err := container.Mount(); err == nil { + defer container.Unmount() + } + + if err := container.Stop(seconds); err != nil { + return err + } + return container.Start() +} + +// Wait blocks until the container stops running, then returns its exit code. +func (container *Container) Wait() int { + <-container.waitLock + return container.State.GetExitCode() +} + +func (container *Container) Resize(h, w int) error { + return container.command.Terminal.Resize(h, w) +} + +func (container *Container) ExportRw() (archive.Archive, error) { + if err := container.Mount(); err != nil { + return nil, err + } + if container.runtime == nil { + return nil, fmt.Errorf("Can't load storage driver for unregistered container %s", container.ID) + } + archive, err := container.runtime.Diff(container) + if err != nil { + container.Unmount() + return nil, err + } + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.Unmount() + return err + }), nil +} + +func (container *Container) Export() (archive.Archive, error) { + if err := container.Mount(); err != nil { + return nil, err + } + + archive, err := archive.Tar(container.basefs, archive.Uncompressed) + if err != nil { + container.Unmount() + return nil, err + } + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.Unmount() + return err + }), nil +} + +func (container *Container) WaitTimeout(timeout time.Duration) error { + done := make(chan bool) + go func() { + container.Wait() + done <- true + }() + + select { + case <-time.After(timeout): + return fmt.Errorf("Timed Out") + case <-done: + return nil + } +} + +func (container *Container) Mount() error { + return container.runtime.Mount(container) +} + +func (container *Container) Changes() ([]archive.Change, error) { + return container.runtime.Changes(container) +} + +func (container *Container) GetImage() (*image.Image, error) { + if container.runtime == nil { + return nil, fmt.Errorf("Can't get image of unregistered container") + } + return container.runtime.graph.Get(container.Image) +} + +func (container *Container) Unmount() error { + return container.runtime.Unmount(container) +} + +func (container *Container) logPath(name string) string { + return path.Join(container.root, fmt.Sprintf("%s-%s.log", container.ID, name)) +} + +func (container *Container) ReadLog(name string) (io.Reader, error) { + return os.Open(container.logPath(name)) +} + +func (container *Container) hostConfigPath() string { + return path.Join(container.root, "hostconfig.json") +} + +func (container *Container) jsonPath() string { + return path.Join(container.root, "config.json") +} + +func (container *Container) EnvConfigPath() (string, error) { + p := path.Join(container.root, "config.env") + if _, err := os.Stat(p); err != nil { + if os.IsNotExist(err) { + f, err := os.Create(p) + if err != nil { + return "", err + } + f.Close() + } else { + return "", err + } + } + return p, nil +} + +// This method must be exported to be used from the lxc template +// This directory is only usable when the container is running +func (container *Container) RootfsPath() string { + return path.Join(container.root, "root") +} + +// This is the stand-alone version of the root fs, without any additional mounts. +// This directory is usable whenever the container is mounted (and not unmounted) +func (container *Container) BasefsPath() string { + return container.basefs +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("Invalid empty id") + } + return nil +} + +// GetSize, return real size, virtual size +func (container *Container) GetSize() (int64, int64) { + var ( + sizeRw, sizeRootfs int64 + err error + driver = container.runtime.driver + ) + + if err := container.Mount(); err != nil { + utils.Errorf("Warning: failed to compute size of container rootfs %s: %s", container.ID, err) + return sizeRw, sizeRootfs + } + defer container.Unmount() + + if differ, ok := container.runtime.driver.(graphdriver.Differ); ok { + sizeRw, err = differ.DiffSize(container.ID) + if err != nil { + utils.Errorf("Warning: driver %s couldn't return diff size of container %s: %s", driver, container.ID, err) + // FIXME: GetSize should return an error. Not changing it now in case + // there is a side-effect. + sizeRw = -1 + } + } else { + changes, _ := container.Changes() + if changes != nil { + sizeRw = archive.ChangesSize(container.basefs, changes) + } else { + sizeRw = -1 + } + } + + if _, err = os.Stat(container.basefs); err != nil { + if sizeRootfs, err = utils.TreeSize(container.basefs); err != nil { + sizeRootfs = -1 + } + } + return sizeRw, sizeRootfs +} + +func (container *Container) Copy(resource string) (io.ReadCloser, error) { + if err := container.Mount(); err != nil { + return nil, err + } + var filter []string + basePath := path.Join(container.basefs, resource) + stat, err := os.Stat(basePath) + if err != nil { + container.Unmount() + return nil, err + } + if !stat.IsDir() { + d, f := path.Split(basePath) + basePath = d + filter = []string{f} + } else { + filter = []string{path.Base(basePath)} + basePath = path.Dir(basePath) + } + + archive, err := archive.TarFilter(basePath, &archive.TarOptions{ + Compression: archive.Uncompressed, + Includes: filter, + }) + if err != nil { + return nil, err + } + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + container.Unmount() + return err + }), nil +} + +// Returns true if the container exposes a certain port +func (container *Container) Exposes(p nat.Port) bool { + _, exists := container.Config.ExposedPorts[p] + return exists +} + +func (container *Container) GetPtyMaster() (*os.File, error) { + ttyConsole, ok := container.command.Terminal.(execdriver.TtyTerminal) + if !ok { + return nil, ErrNoTTY + } + return ttyConsole.Master(), nil +} + +func (container *Container) HostConfig() *runconfig.HostConfig { + return container.hostConfig +} + +func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) { + container.hostConfig = hostConfig +} + +func (container *Container) DisableLink(name string) { + if container.activeLinks != nil { + if link, exists := container.activeLinks[name]; exists { + link.Disable() + } else { + utils.Debugf("Could not find active link for %s", name) + } + } +} diff --git a/runtime/container_unit_test.go b/runtime/container_unit_test.go new file mode 100644 index 0000000000..fba036ca50 --- /dev/null +++ b/runtime/container_unit_test.go @@ -0,0 +1,145 @@ +package runtime + +import ( + "github.com/dotcloud/docker/nat" + "testing" +) + +func TestParseNetworkOptsPrivateOnly(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIp != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublic(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100:8080:80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "8080" { + t.Logf("Expected 8080 got %s", s.HostPort) + t.Fail() + } + if s.HostIp != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsUdp(t *testing.T) { + ports, bindings, err := nat.ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "udp" { + t.Logf("Expected udp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "6000" { + t.Logf("Expected 6000 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIp != "192.168.1.100" { + t.Fail() + } + } +} + +func TestGetFullName(t *testing.T) { + name, err := GetFullContainerName("testing") + if err != nil { + t.Fatal(err) + } + if name != "/testing" { + t.Fatalf("Expected /testing got %s", name) + } + if _, err := GetFullContainerName(""); err == nil { + t.Fatal("Error should not be nil") + } +} diff --git a/runtime/runtime.go b/runtime/runtime.go new file mode 100644 index 0000000000..c11c309ad8 --- /dev/null +++ b/runtime/runtime.go @@ -0,0 +1,953 @@ +package runtime + +import ( + "container/list" + "fmt" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/daemonconfig" + "github.com/dotcloud/docker/dockerversion" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/execdriver" + "github.com/dotcloud/docker/execdriver/lxc" + "github.com/dotcloud/docker/execdriver/native" + "github.com/dotcloud/docker/graph" + "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/graphdriver/aufs" + _ "github.com/dotcloud/docker/graphdriver/btrfs" + _ "github.com/dotcloud/docker/graphdriver/devmapper" + _ "github.com/dotcloud/docker/graphdriver/vfs" + "github.com/dotcloud/docker/image" + _ "github.com/dotcloud/docker/networkdriver/lxc" + "github.com/dotcloud/docker/networkdriver/portallocator" + "github.com/dotcloud/docker/pkg/graphdb" + "github.com/dotcloud/docker/pkg/sysinfo" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/utils" + "io" + "io/ioutil" + "os" + "path" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Set the max depth to the aufs default that most +// kernels are compiled with +// For more information see: http://sourceforge.net/p/aufs/aufs3-standalone/ci/aufs3.12/tree/config.mk +const MaxImageDepth = 127 + +var ( + DefaultDns = []string{"8.8.8.8", "8.8.4.4"} + validContainerNameChars = `[a-zA-Z0-9_.-]` + validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`) +) + +type Runtime struct { + repository string + sysInitPath string + containers *list.List + graph *graph.Graph + repositories *graph.TagStore + idIndex *utils.TruncIndex + sysInfo *sysinfo.SysInfo + volumes *graph.Graph + srv Server + eng *engine.Engine + config *daemonconfig.Config + containerGraph *graphdb.Database + driver graphdriver.Driver + execDriver execdriver.Driver +} + +// List returns an array of all containers registered in the runtime. +func (runtime *Runtime) List() []*Container { + containers := new(History) + for e := runtime.containers.Front(); e != nil; e = e.Next() { + containers.Add(e.Value.(*Container)) + } + return *containers +} + +func (runtime *Runtime) getContainerElement(id string) *list.Element { + for e := runtime.containers.Front(); e != nil; e = e.Next() { + container := e.Value.(*Container) + if container.ID == id { + return e + } + } + return nil +} + +// Get looks for a container by the specified ID or name, and returns it. +// If the container is not found, or if an error occurs, nil is returned. +func (runtime *Runtime) Get(name string) *Container { + if c, _ := runtime.GetByName(name); c != nil { + return c + } + + id, err := runtime.idIndex.Get(name) + if err != nil { + return nil + } + + e := runtime.getContainerElement(id) + if e == nil { + return nil + } + return e.Value.(*Container) +} + +// Exists returns a true if a container of the specified ID or name exists, +// false otherwise. +func (runtime *Runtime) Exists(id string) bool { + return runtime.Get(id) != nil +} + +func (runtime *Runtime) containerRoot(id string) string { + return path.Join(runtime.repository, id) +} + +// Load reads the contents of a container from disk +// This is typically done at startup. +func (runtime *Runtime) load(id string) (*Container, error) { + container := &Container{root: runtime.containerRoot(id)} + if err := container.FromDisk(); err != nil { + return nil, err + } + if container.ID != id { + return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) + } + if container.State.IsRunning() { + container.State.SetGhost(true) + } + return container, nil +} + +// Register makes a container object usable by the runtime as +func (runtime *Runtime) Register(container *Container) error { + if container.runtime != nil || runtime.Exists(container.ID) { + return fmt.Errorf("Container is already loaded") + } + if err := validateID(container.ID); err != nil { + return err + } + if err := runtime.ensureName(container); err != nil { + return err + } + + container.runtime = runtime + + // Attach to stdout and stderr + container.stderr = utils.NewWriteBroadcaster() + container.stdout = utils.NewWriteBroadcaster() + // Attach to stdin + if container.Config.OpenStdin { + container.stdin, container.stdinPipe = io.Pipe() + } else { + container.stdinPipe = utils.NopWriteCloser(ioutil.Discard) // Silently drop stdin + } + // done + runtime.containers.PushBack(container) + runtime.idIndex.Add(container.ID) + + // FIXME: if the container is supposed to be running but is not, auto restart it? + // if so, then we need to restart monitor and init a new lock + // If the container is supposed to be running, make sure of it + if container.State.IsRunning() { + if container.State.IsGhost() { + utils.Debugf("killing ghost %s", container.ID) + + existingPid := container.State.Pid + container.State.SetGhost(false) + container.State.SetStopped(0) + + if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { + lxc.KillLxc(container.ID, 9) + } else { + command := &execdriver.Command{ + ID: container.ID, + } + command.Process = &os.Process{Pid: existingPid} + runtime.execDriver.Kill(command, 9) + } + // ensure that the filesystem is also unmounted + unmountVolumesForContainer(container) + if err := container.Unmount(); err != nil { + utils.Debugf("ghost unmount error %s", err) + } + } + + info := runtime.execDriver.Info(container.ID) + if !info.IsRunning() { + utils.Debugf("Container %s was supposed to be running but is not.", container.ID) + if runtime.config.AutoRestart { + utils.Debugf("Restarting") + unmountVolumesForContainer(container) + if err := container.Unmount(); err != nil { + utils.Debugf("restart unmount error %s", err) + } + + container.State.SetGhost(false) + container.State.SetStopped(0) + if err := container.Start(); err != nil { + return err + } + } else { + utils.Debugf("Marking as stopped") + container.State.SetStopped(-127) + if err := container.ToDisk(); err != nil { + return err + } + } + } + } else { + // When the container is not running, we still initialize the waitLock + // chan and close it. Receiving on nil chan blocks whereas receiving on a + // closed chan does not. In this case we do not want to block. + container.waitLock = make(chan struct{}) + close(container.waitLock) + } + return nil +} + +func (runtime *Runtime) ensureName(container *Container) error { + if container.Name == "" { + name, err := generateRandomName(runtime) + if err != nil { + name = utils.TruncateID(container.ID) + } + container.Name = name + + if err := container.ToDisk(); err != nil { + utils.Debugf("Error saving container name %s", err) + } + if !runtime.containerGraph.Exists(name) { + if _, err := runtime.containerGraph.Set(name, container.ID); err != nil { + utils.Debugf("Setting default id - %s", err) + } + } + } + return nil +} + +func (runtime *Runtime) LogToDisk(src *utils.WriteBroadcaster, dst, stream string) error { + log, err := os.OpenFile(dst, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600) + if err != nil { + return err + } + src.AddWriter(log, stream) + return nil +} + +// Destroy unregisters a container from the runtime and cleanly removes its contents from the filesystem. +func (runtime *Runtime) Destroy(container *Container) error { + if container == nil { + return fmt.Errorf("The given container is ") + } + + element := runtime.getContainerElement(container.ID) + if element == nil { + return fmt.Errorf("Container %v not found - maybe it was already destroyed?", container.ID) + } + + if err := container.Stop(3); err != nil { + return err + } + + if err := runtime.driver.Remove(container.ID); err != nil { + return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", runtime.driver, container.ID, err) + } + + initID := fmt.Sprintf("%s-init", container.ID) + if err := runtime.driver.Remove(initID); err != nil { + return fmt.Errorf("Driver %s failed to remove init filesystem %s: %s", runtime.driver, initID, err) + } + + if _, err := runtime.containerGraph.Purge(container.ID); err != nil { + utils.Debugf("Unable to remove container from link graph: %s", err) + } + + // Deregister the container before removing its directory, to avoid race conditions + runtime.idIndex.Delete(container.ID) + runtime.containers.Remove(element) + if err := os.RemoveAll(container.root); err != nil { + return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) + } + return nil +} + +func (runtime *Runtime) restore() error { + if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { + fmt.Printf("Loading containers: ") + } + dir, err := ioutil.ReadDir(runtime.repository) + if err != nil { + return err + } + containers := make(map[string]*Container) + currentDriver := runtime.driver.String() + + for _, v := range dir { + id := v.Name() + container, err := runtime.load(id) + if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { + fmt.Print(".") + } + if err != nil { + utils.Errorf("Failed to load container %v: %v", id, err) + continue + } + + // Ignore the container if it does not support the current driver being used by the graph + if container.Driver == "" && currentDriver == "aufs" || container.Driver == currentDriver { + utils.Debugf("Loaded container %v", container.ID) + containers[container.ID] = container + } else { + utils.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) + } + } + + register := func(container *Container) { + if err := runtime.Register(container); err != nil { + utils.Debugf("Failed to register container %s: %s", container.ID, err) + } + } + + if entities := runtime.containerGraph.List("/", -1); entities != nil { + for _, p := range entities.Paths() { + if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { + fmt.Print(".") + } + e := entities[p] + if container, ok := containers[e.ID()]; ok { + register(container) + delete(containers, e.ID()) + } + } + } + + // Any containers that are left over do not exist in the graph + for _, container := range containers { + // Try to set the default name for a container if it exists prior to links + container.Name, err = generateRandomName(runtime) + if err != nil { + container.Name = utils.TruncateID(container.ID) + } + + if _, err := runtime.containerGraph.Set(container.Name, container.ID); err != nil { + utils.Debugf("Setting default id - %s", err) + } + register(container) + } + + if os.Getenv("DEBUG") == "" && os.Getenv("TEST") == "" { + fmt.Printf(": done.\n") + } + + return nil +} + +// Create creates a new container from the given configuration with a given name. +func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Container, []string, error) { + // Lookup image + img, err := runtime.repositories.LookupImage(config.Image) + if err != nil { + return nil, nil, err + } + + // We add 2 layers to the depth because the container's rw and + // init layer add to the restriction + depth, err := img.Depth() + if err != nil { + return nil, nil, err + } + + if depth+2 >= MaxImageDepth { + return nil, nil, fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) + } + + checkDeprecatedExpose := func(config *runconfig.Config) bool { + if config != nil { + if config.PortSpecs != nil { + for _, p := range config.PortSpecs { + if strings.Contains(p, ":") { + return true + } + } + } + } + return false + } + + warnings := []string{} + if checkDeprecatedExpose(img.Config) || checkDeprecatedExpose(config) { + warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.") + } + + if img.Config != nil { + if err := runconfig.Merge(config, img.Config); err != nil { + return nil, nil, err + } + } + + if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { + return nil, nil, fmt.Errorf("No command specified") + } + + // Generate id + id := utils.GenerateRandomID() + + if name == "" { + name, err = generateRandomName(runtime) + if err != nil { + name = utils.TruncateID(id) + } + } else { + if !validContainerNamePattern.MatchString(name) { + return nil, nil, fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + } + } + + if name[0] != '/' { + name = "/" + name + } + + // Set the enitity in the graph using the default name specified + if _, err := runtime.containerGraph.Set(name, id); err != nil { + if !graphdb.IsNonUniqueNameError(err) { + return nil, nil, err + } + + conflictingContainer, err := runtime.GetByName(name) + if err != nil { + if strings.Contains(err.Error(), "Could not find entity") { + return nil, nil, err + } + + // Remove name and continue starting the container + if err := runtime.containerGraph.Delete(name); err != nil { + return nil, nil, err + } + } else { + nameAsKnownByUser := strings.TrimPrefix(name, "/") + return nil, nil, fmt.Errorf( + "Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser, + utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser) + } + } + + // Generate default hostname + // FIXME: the lxc template no longer needs to set a default hostname + if config.Hostname == "" { + config.Hostname = id[:12] + } + + var args []string + var entrypoint string + + if len(config.Entrypoint) != 0 { + entrypoint = config.Entrypoint[0] + args = append(config.Entrypoint[1:], config.Cmd...) + } else { + entrypoint = config.Cmd[0] + args = config.Cmd[1:] + } + + container := &Container{ + // FIXME: we should generate the ID here instead of receiving it as an argument + ID: id, + Created: time.Now().UTC(), + Path: entrypoint, + Args: args, //FIXME: de-duplicate from config + Config: config, + hostConfig: &runconfig.HostConfig{}, + Image: img.ID, // Always use the resolved image id + NetworkSettings: &NetworkSettings{}, + Name: name, + Driver: runtime.driver.String(), + ExecDriver: runtime.execDriver.Name(), + } + container.root = runtime.containerRoot(container.ID) + // Step 1: create the container directory. + // This doubles as a barrier to avoid race conditions. + if err := os.Mkdir(container.root, 0700); err != nil { + return nil, nil, err + } + + initID := fmt.Sprintf("%s-init", container.ID) + if err := runtime.driver.Create(initID, img.ID); err != nil { + return nil, nil, err + } + initPath, err := runtime.driver.Get(initID) + if err != nil { + return nil, nil, err + } + defer runtime.driver.Put(initID) + + if err := graph.SetupInitLayer(initPath); err != nil { + return nil, nil, err + } + + if err := runtime.driver.Create(container.ID, initID); err != nil { + return nil, nil, err + } + resolvConf, err := utils.GetResolvConf() + if err != nil { + return nil, nil, err + } + + if len(config.Dns) == 0 && len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { + runtime.config.Dns = DefaultDns + } + + // If custom dns exists, then create a resolv.conf for the container + if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 { + var dns []string + if len(config.Dns) > 0 { + dns = config.Dns + } else { + dns = runtime.config.Dns + } + container.ResolvConfPath = path.Join(container.root, "resolv.conf") + f, err := os.Create(container.ResolvConfPath) + if err != nil { + return nil, nil, err + } + defer f.Close() + for _, dns := range dns { + if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil { + return nil, nil, err + } + } + } else { + container.ResolvConfPath = "/etc/resolv.conf" + } + + // Step 2: save the container json + if err := container.ToDisk(); err != nil { + return nil, nil, err + } + + // Step 3: register the container + if err := runtime.Register(container); err != nil { + return nil, nil, err + } + return container, warnings, nil +} + +// Commit creates a new filesystem image from the current state of a container. +// The image can optionally be tagged into a repository +func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) { + // FIXME: freeze the container before copying it to avoid data corruption? + // FIXME: this shouldn't be in commands. + if err := container.Mount(); err != nil { + return nil, err + } + defer container.Unmount() + + rwTar, err := container.ExportRw() + if err != nil { + return nil, err + } + defer rwTar.Close() + + // Create a new image from the container's base layers + a new layer from container changes + var ( + containerID, containerImage string + containerConfig *runconfig.Config + ) + if container != nil { + containerID = container.ID + containerImage = container.Image + containerConfig = container.Config + } + img, err := runtime.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config) + if err != nil { + return nil, err + } + // Register the image if needed + if repository != "" { + if err := runtime.repositories.Set(repository, tag, img.ID, true); err != nil { + return img, err + } + } + return img, nil +} + +func GetFullContainerName(name string) (string, error) { + if name == "" { + return "", fmt.Errorf("Container name cannot be empty") + } + if name[0] != '/' { + name = "/" + name + } + return name, nil +} + +func (runtime *Runtime) GetByName(name string) (*Container, error) { + fullName, err := GetFullContainerName(name) + if err != nil { + return nil, err + } + entity := runtime.containerGraph.Get(fullName) + if entity == nil { + return nil, fmt.Errorf("Could not find entity for %s", name) + } + e := runtime.getContainerElement(entity.ID()) + if e == nil { + return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID()) + } + return e.Value.(*Container), nil +} + +func (runtime *Runtime) Children(name string) (map[string]*Container, error) { + name, err := GetFullContainerName(name) + if err != nil { + return nil, err + } + children := make(map[string]*Container) + + err = runtime.containerGraph.Walk(name, func(p string, e *graphdb.Entity) error { + c := runtime.Get(e.ID()) + if c == nil { + return fmt.Errorf("Could not get container for name %s and id %s", e.ID(), p) + } + children[p] = c + return nil + }, 0) + + if err != nil { + return nil, err + } + return children, nil +} + +func (runtime *Runtime) RegisterLink(parent, child *Container, alias string) error { + fullName := path.Join(parent.Name, alias) + if !runtime.containerGraph.Exists(fullName) { + _, err := runtime.containerGraph.Set(fullName, child.ID) + return err + } + return nil +} + +// FIXME: harmonize with NewGraph() +func NewRuntime(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) { + runtime, err := NewRuntimeFromDirectory(config, eng) + if err != nil { + return nil, err + } + return runtime, nil +} + +func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) { + + // Set the default driver + graphdriver.DefaultDriver = config.GraphDriver + + // Load storage driver + driver, err := graphdriver.New(config.Root) + if err != nil { + return nil, err + } + utils.Debugf("Using graph driver %s", driver) + + runtimeRepo := path.Join(config.Root, "containers") + + if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + if ad, ok := driver.(*aufs.Driver); ok { + utils.Debugf("Migrating existing containers") + if err := ad.Migrate(config.Root, graph.SetupInitLayer); err != nil { + return nil, err + } + } + + utils.Debugf("Creating images graph") + g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver) + if err != nil { + return nil, err + } + + // We don't want to use a complex driver like aufs or devmapper + // for volumes, just a plain filesystem + volumesDriver, err := graphdriver.GetDriver("vfs", config.Root) + if err != nil { + return nil, err + } + utils.Debugf("Creating volumes graph") + volumes, err := graph.NewGraph(path.Join(config.Root, "volumes"), volumesDriver) + if err != nil { + return nil, err + } + utils.Debugf("Creating repository list") + repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store: %s", err) + } + + if !config.DisableNetwork { + job := eng.Job("init_networkdriver") + + job.SetenvBool("EnableIptables", config.EnableIptables) + job.SetenvBool("InterContainerCommunication", config.InterContainerCommunication) + job.SetenvBool("EnableIpForward", config.EnableIpForward) + job.Setenv("BridgeIface", config.BridgeIface) + job.Setenv("BridgeIP", config.BridgeIP) + job.Setenv("DefaultBindingIP", config.DefaultIp.String()) + + if err := job.Run(); err != nil { + return nil, err + } + } + + graphdbPath := path.Join(config.Root, "linkgraph.db") + graph, err := graphdb.NewSqliteConn(graphdbPath) + if err != nil { + return nil, err + } + + localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION)) + sysInitPath := utils.DockerInitPath(localCopy) + if sysInitPath == "" { + return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.io/en/latest/contributing/devenvironment for official build instructions.") + } + + if sysInitPath != localCopy { + // When we find a suitable dockerinit binary (even if it's our local binary), we copy it into config.Root at localCopy for future use (so that the original can go away without that being a problem, for example during a package upgrade). + if err := os.Mkdir(path.Dir(localCopy), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + if _, err := utils.CopyFile(sysInitPath, localCopy); err != nil { + return nil, err + } + if err := os.Chmod(localCopy, 0700); err != nil { + return nil, err + } + sysInitPath = localCopy + } + + var ( + ed execdriver.Driver + sysInfo = sysinfo.New(false) + ) + + switch config.ExecDriver { + case "lxc": + // we want to five the lxc driver the full docker root because it needs + // to access and write config and template files in /var/lib/docker/containers/* + // to be backwards compatible + ed, err = lxc.NewDriver(config.Root, sysInfo.AppArmor) + case "native": + ed, err = native.NewDriver(path.Join(config.Root, "execdriver", "native")) + default: + return nil, fmt.Errorf("unknown exec driver %s", config.ExecDriver) + } + if err != nil { + return nil, err + } + + runtime := &Runtime{ + repository: runtimeRepo, + containers: list.New(), + graph: g, + repositories: repositories, + idIndex: utils.NewTruncIndex(), + sysInfo: sysInfo, + volumes: volumes, + config: config, + containerGraph: graph, + driver: driver, + sysInitPath: sysInitPath, + execDriver: ed, + eng: eng, + } + + if err := runtime.restore(); err != nil { + return nil, err + } + return runtime, nil +} + +func (runtime *Runtime) Close() error { + errorsStrings := []string{} + if err := portallocator.ReleaseAll(); err != nil { + utils.Errorf("portallocator.ReleaseAll(): %s", err) + errorsStrings = append(errorsStrings, err.Error()) + } + if err := runtime.driver.Cleanup(); err != nil { + utils.Errorf("runtime.driver.Cleanup(): %s", err.Error()) + errorsStrings = append(errorsStrings, err.Error()) + } + if err := runtime.containerGraph.Close(); err != nil { + utils.Errorf("runtime.containerGraph.Close(): %s", err.Error()) + errorsStrings = append(errorsStrings, err.Error()) + } + if len(errorsStrings) > 0 { + return fmt.Errorf("%s", strings.Join(errorsStrings, ", ")) + } + return nil +} + +func (runtime *Runtime) Mount(container *Container) error { + dir, err := runtime.driver.Get(container.ID) + if err != nil { + return fmt.Errorf("Error getting container %s from driver %s: %s", container.ID, runtime.driver, err) + } + if container.basefs == "" { + container.basefs = dir + } else if container.basefs != dir { + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + runtime.driver, container.ID, container.basefs, dir) + } + return nil +} + +func (runtime *Runtime) Unmount(container *Container) error { + runtime.driver.Put(container.ID) + return nil +} + +func (runtime *Runtime) Changes(container *Container) ([]archive.Change, error) { + if differ, ok := runtime.driver.(graphdriver.Differ); ok { + return differ.Changes(container.ID) + } + cDir, err := runtime.driver.Get(container.ID) + if err != nil { + return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) + } + defer runtime.driver.Put(container.ID) + initDir, err := runtime.driver.Get(container.ID + "-init") + if err != nil { + return nil, fmt.Errorf("Error getting container init rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) + } + defer runtime.driver.Put(container.ID + "-init") + return archive.ChangesDirs(cDir, initDir) +} + +func (runtime *Runtime) Diff(container *Container) (archive.Archive, error) { + if differ, ok := runtime.driver.(graphdriver.Differ); ok { + return differ.Diff(container.ID) + } + + changes, err := runtime.Changes(container) + if err != nil { + return nil, err + } + + cDir, err := runtime.driver.Get(container.ID) + if err != nil { + return nil, fmt.Errorf("Error getting container rootfs %s from driver %s: %s", container.ID, container.runtime.driver, err) + } + + archive, err := archive.ExportChanges(cDir, changes) + if err != nil { + return nil, err + } + return utils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + runtime.driver.Put(container.ID) + return err + }), nil +} + +func (runtime *Runtime) Run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + return runtime.execDriver.Run(c.command, pipes, startCallback) +} + +func (runtime *Runtime) Kill(c *Container, sig int) error { + return runtime.execDriver.Kill(c.command, sig) +} + +// Nuke kills all containers then removes all content +// from the content root, including images, volumes and +// container filesystems. +// Again: this will remove your entire docker runtime! +func (runtime *Runtime) Nuke() error { + var wg sync.WaitGroup + for _, container := range runtime.List() { + wg.Add(1) + go func(c *Container) { + c.Kill() + wg.Done() + }(container) + } + wg.Wait() + runtime.Close() + + return os.RemoveAll(runtime.config.Root) +} + +// FIXME: this is a convenience function for integration tests +// which need direct access to runtime.graph. +// Once the tests switch to using engine and jobs, this method +// can go away. +func (runtime *Runtime) Graph() *graph.Graph { + return runtime.graph +} + +func (runtime *Runtime) Repositories() *graph.TagStore { + return runtime.repositories +} + +func (runtime *Runtime) Config() *daemonconfig.Config { + return runtime.config +} + +func (runtime *Runtime) SystemConfig() *sysinfo.SysInfo { + return runtime.sysInfo +} + +func (runtime *Runtime) SystemInitPath() string { + return runtime.sysInitPath +} + +func (runtime *Runtime) GraphDriver() graphdriver.Driver { + return runtime.driver +} + +func (runtime *Runtime) ExecutionDriver() execdriver.Driver { + return runtime.execDriver +} + +func (runtime *Runtime) Volumes() *graph.Graph { + return runtime.volumes +} + +func (runtime *Runtime) ContainerGraph() *graphdb.Database { + return runtime.containerGraph +} + +func (runtime *Runtime) SetServer(server Server) { + runtime.srv = server +} + +// History is a convenience type for storing a list of containers, +// ordered by creation date. +type History []*Container + +func (history *History) Len() int { + return len(*history) +} + +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].When().Before(containers[i].When()) +} + +func (history *History) Swap(i, j int) { + containers := *history + tmp := containers[i] + containers[i] = containers[j] + containers[j] = tmp +} + +func (history *History) Add(container *Container) { + *history = append(*history, container) + sort.Sort(history) +} diff --git a/runtime/server.go b/runtime/server.go new file mode 100644 index 0000000000..0d2b71dea7 --- /dev/null +++ b/runtime/server.go @@ -0,0 +1,9 @@ +package runtime + +import ( + "github.com/dotcloud/docker/utils" +) + +type Server interface { + LogEvent(action, id, from string) *utils.JSONMessage +} diff --git a/runtime/sorter.go b/runtime/sorter.go new file mode 100644 index 0000000000..c5af772dae --- /dev/null +++ b/runtime/sorter.go @@ -0,0 +1,25 @@ +package runtime + +import "sort" + +type containerSorter struct { + containers []*Container + by func(i, j *Container) bool +} + +func (s *containerSorter) Len() int { + return len(s.containers) +} + +func (s *containerSorter) Swap(i, j int) { + s.containers[i], s.containers[j] = s.containers[j], s.containers[i] +} + +func (s *containerSorter) Less(i, j int) bool { + return s.by(s.containers[i], s.containers[j]) +} + +func sortContainers(containers []*Container, predicate func(i, j *Container) bool) { + s := &containerSorter{containers, predicate} + sort.Sort(s) +} diff --git a/runtime/state.go b/runtime/state.go new file mode 100644 index 0000000000..cce6912b46 --- /dev/null +++ b/runtime/state.go @@ -0,0 +1,81 @@ +package runtime + +import ( + "fmt" + "github.com/dotcloud/docker/utils" + "sync" + "time" +) + +type State struct { + sync.RWMutex + Running bool + Pid int + ExitCode int + StartedAt time.Time + FinishedAt time.Time + Ghost bool +} + +// String returns a human-readable description of the state +func (s *State) String() string { + s.RLock() + defer s.RUnlock() + + if s.Running { + if s.Ghost { + return fmt.Sprintf("Ghost") + } + return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + return fmt.Sprintf("Exit %d", s.ExitCode) +} + +func (s *State) IsRunning() bool { + s.RLock() + defer s.RUnlock() + + return s.Running +} + +func (s *State) IsGhost() bool { + s.RLock() + defer s.RUnlock() + + return s.Ghost +} + +func (s *State) GetExitCode() int { + s.RLock() + defer s.RUnlock() + + return s.ExitCode +} + +func (s *State) SetGhost(val bool) { + s.Lock() + defer s.Unlock() + + s.Ghost = val +} + +func (s *State) SetRunning(pid int) { + s.Lock() + defer s.Unlock() + + s.Running = true + s.Ghost = false + s.ExitCode = 0 + s.Pid = pid + s.StartedAt = time.Now().UTC() +} + +func (s *State) SetStopped(exitCode int) { + s.Lock() + defer s.Unlock() + + s.Running = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.ExitCode = exitCode +} diff --git a/runtime/utils.go b/runtime/utils.go new file mode 100644 index 0000000000..b343b5b10e --- /dev/null +++ b/runtime/utils.go @@ -0,0 +1,44 @@ +package runtime + +import ( + "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/pkg/namesgenerator" + "github.com/dotcloud/docker/runconfig" +) + +func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error { + if config.PortSpecs != nil { + ports, bindings, err := nat.ParsePortSpecs(config.PortSpecs) + if err != nil { + return err + } + config.PortSpecs = nil + if len(bindings) > 0 { + if hostConfig == nil { + hostConfig = &runconfig.HostConfig{} + } + hostConfig.PortBindings = bindings + } + + if config.ExposedPorts == nil { + config.ExposedPorts = make(nat.PortSet, len(ports)) + } + for k, v := range ports { + config.ExposedPorts[k] = v + } + } + return nil +} + +type checker struct { + runtime *Runtime +} + +func (c *checker) Exists(name string) bool { + return c.runtime.containerGraph.Exists("/" + name) +} + +// Generate a random and unique name +func generateRandomName(runtime *Runtime) (string, error) { + return namesgenerator.GenerateRandomName(&checker{runtime}) +} diff --git a/runtime/volumes.go b/runtime/volumes.go new file mode 100644 index 0000000000..1a548eca47 --- /dev/null +++ b/runtime/volumes.go @@ -0,0 +1,332 @@ +package runtime + +import ( + "fmt" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/pkg/mount" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "syscall" +) + +type BindMap struct { + SrcPath string + DstPath string + Mode string +} + +func prepareVolumesForContainer(container *Container) error { + if container.Volumes == nil || len(container.Volumes) == 0 { + container.Volumes = make(map[string]string) + container.VolumesRW = make(map[string]bool) + if err := applyVolumesFrom(container); err != nil { + return err + } + } + + if err := createVolumes(container); err != nil { + return err + } + return nil +} + +func mountVolumesForContainer(container *Container, envPath string) error { + // Setup the root fs as a bind mount of the base fs + var ( + root = container.RootfsPath() + runtime = container.runtime + ) + if err := os.MkdirAll(root, 0755); err != nil && !os.IsExist(err) { + return nil + } + + // Create a bind mount of the base fs as a place where we can add mounts + // without affecting the ability to access the base fs + if err := mount.Mount(container.basefs, root, "none", "bind,rw"); err != nil { + return err + } + + // Make sure the root fs is private so the mounts here don't propagate to basefs + if err := mount.ForceMount(root, root, "none", "private"); err != nil { + return err + } + + // Mount docker specific files into the containers root fs + if err := mount.Mount(runtime.sysInitPath, filepath.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil { + return err + } + if err := mount.Mount(envPath, filepath.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil { + return err + } + if err := mount.Mount(container.ResolvConfPath, filepath.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil { + return err + } + + if container.HostnamePath != "" && container.HostsPath != "" { + if err := mount.Mount(container.HostnamePath, filepath.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil { + return err + } + if err := mount.Mount(container.HostsPath, filepath.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil { + return err + } + } + + // Mount user specified volumes + for r, v := range container.Volumes { + mountAs := "ro" + if container.VolumesRW[r] { + mountAs = "rw" + } + + r = filepath.Join(root, r) + if p, err := utils.FollowSymlinkInScope(r, root); err != nil { + return err + } else { + r = p + } + + if err := mount.Mount(v, r, "none", fmt.Sprintf("bind,%s", mountAs)); err != nil { + return err + } + } + return nil +} + +func unmountVolumesForContainer(container *Container) { + var ( + root = container.RootfsPath() + mounts = []string{ + root, + filepath.Join(root, "/.dockerinit"), + filepath.Join(root, "/.dockerenv"), + filepath.Join(root, "/etc/resolv.conf"), + } + ) + + if container.HostnamePath != "" && container.HostsPath != "" { + mounts = append(mounts, filepath.Join(root, "/etc/hostname"), filepath.Join(root, "/etc/hosts")) + } + + for r := range container.Volumes { + mounts = append(mounts, filepath.Join(root, r)) + } + + for i := len(mounts) - 1; i >= 0; i-- { + if lastError := mount.Unmount(mounts[i]); lastError != nil { + log.Printf("Failed to umount %v: %v", mounts[i], lastError) + } + } +} + +func applyVolumesFrom(container *Container) error { + if container.Config.VolumesFrom != "" { + for _, containerSpec := range strings.Split(container.Config.VolumesFrom, ",") { + var ( + mountRW = true + specParts = strings.SplitN(containerSpec, ":", 2) + ) + + switch len(specParts) { + case 0: + return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom) + case 2: + switch specParts[1] { + case "ro": + mountRW = false + case "rw": // mountRW is already true + default: + return fmt.Errorf("Malformed volumes-from specification: %s", containerSpec) + } + } + + c := container.runtime.Get(specParts[0]) + if c == nil { + return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID) + } + + for volPath, id := range c.Volumes { + if _, exists := container.Volumes[volPath]; exists { + continue + } + if err := os.MkdirAll(filepath.Join(container.basefs, volPath), 0755); err != nil { + return err + } + container.Volumes[volPath] = id + if isRW, exists := c.VolumesRW[volPath]; exists { + container.VolumesRW[volPath] = isRW && mountRW + } + } + + } + } + return nil +} + +func getBindMap(container *Container) (map[string]BindMap, error) { + var ( + // Create the requested bind mounts + binds = make(map[string]BindMap) + // Define illegal container destinations + illegalDsts = []string{"/", "."} + ) + + for _, bind := range container.hostConfig.Binds { + // FIXME: factorize bind parsing in parseBind + var ( + src, dst, mode string + arr = strings.Split(bind, ":") + ) + + if len(arr) == 2 { + src = arr[0] + dst = arr[1] + mode = "rw" + } else if len(arr) == 3 { + src = arr[0] + dst = arr[1] + mode = arr[2] + } else { + return nil, fmt.Errorf("Invalid bind specification: %s", bind) + } + + // Bail if trying to mount to an illegal destination + for _, illegal := range illegalDsts { + if dst == illegal { + return nil, fmt.Errorf("Illegal bind destination: %s", dst) + } + } + + bindMap := BindMap{ + SrcPath: src, + DstPath: dst, + Mode: mode, + } + binds[filepath.Clean(dst)] = bindMap + } + return binds, nil +} + +func createVolumes(container *Container) error { + binds, err := getBindMap(container) + if err != nil { + return err + } + + volumesDriver := container.runtime.volumes.Driver() + // Create the requested volumes if they don't exist + for volPath := range container.Config.Volumes { + volPath = filepath.Clean(volPath) + volIsDir := true + // Skip existing volumes + if _, exists := container.Volumes[volPath]; exists { + continue + } + var srcPath string + var isBindMount bool + srcRW := false + // If an external bind is defined for this volume, use that as a source + if bindMap, exists := binds[volPath]; exists { + isBindMount = true + srcPath = bindMap.SrcPath + if strings.ToLower(bindMap.Mode) == "rw" { + srcRW = true + } + if stat, err := os.Stat(bindMap.SrcPath); err != nil { + return err + } else { + volIsDir = stat.IsDir() + } + // Otherwise create an directory in $ROOT/volumes/ and use that + } else { + + // Do not pass a container as the parameter for the volume creation. + // The graph driver using the container's information ( Image ) to + // create the parent. + c, err := container.runtime.volumes.Create(nil, "", "", "", "", nil, nil) + if err != nil { + return err + } + srcPath, err = volumesDriver.Get(c.ID) + if err != nil { + return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) + } + srcRW = true // RW by default + } + + if p, err := filepath.EvalSymlinks(srcPath); err != nil { + return err + } else { + srcPath = p + } + + container.Volumes[volPath] = srcPath + container.VolumesRW[volPath] = srcRW + + // Create the mountpoint + volPath = filepath.Join(container.basefs, volPath) + rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.basefs) + if err != nil { + return err + } + + if _, err := os.Stat(rootVolPath); err != nil { + if os.IsNotExist(err) { + if volIsDir { + if err := os.MkdirAll(rootVolPath, 0755); err != nil { + return err + } + } else { + if err := os.MkdirAll(filepath.Dir(rootVolPath), 0755); err != nil { + return err + } + if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil { + return err + } else { + f.Close() + } + } + } + } + + // Do not copy or change permissions if we are mounting from the host + if srcRW && !isBindMount { + volList, err := ioutil.ReadDir(rootVolPath) + if err != nil { + return err + } + if len(volList) > 0 { + srcList, err := ioutil.ReadDir(srcPath) + if err != nil { + return err + } + if len(srcList) == 0 { + // If the source volume is empty copy files from the root into the volume + if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil { + return err + } + + var stat syscall.Stat_t + if err := syscall.Stat(rootVolPath, &stat); err != nil { + return err + } + var srcStat syscall.Stat_t + if err := syscall.Stat(srcPath, &srcStat); err != nil { + return err + } + // Change the source volume's ownership if it differs from the root + // files that were just copied + if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid { + if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + } + } + } + } + } + return nil +} diff --git a/server.go b/server.go index 5c28b262dc..85d56afdb6 100644 --- a/server.go +++ b/server.go @@ -13,6 +13,7 @@ import ( "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/runtime" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -24,7 +25,7 @@ import ( "os/signal" "path" "path/filepath" - "runtime" + goruntime "runtime" "strconv" "strings" "sync" @@ -41,9 +42,9 @@ func InitServer(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } - if srv.runtime.config.Pidfile != "" { + if srv.runtime.Config().Pidfile != "" { job.Logf("Creating pidfile") - if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil { + if err := utils.CreatePidFile(srv.runtime.Config().Pidfile); err != nil { // FIXME: do we need fatal here instead of returning a job error? log.Fatal(err) } @@ -54,7 +55,7 @@ func InitServer(job *engine.Job) engine.Status { go func() { sig := <-c log.Printf("Received signal '%v', exiting\n", sig) - utils.RemovePidFile(srv.runtime.config.Pidfile) + utils.RemovePidFile(srv.runtime.Config().Pidfile) srv.Close() os.Exit(0) }() @@ -181,10 +182,10 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status { if err := container.Kill(); err != nil { return job.Errorf("Cannot kill container %s: %s", name, err) } - srv.LogEvent("kill", container.ID, srv.runtime.repositories.ImageName(container.Image)) + srv.LogEvent("kill", container.ID, srv.runtime.Repositories().ImageName(container.Image)) } else { // Otherwise, just send the requested signal - if err := container.kill(int(sig)); err != nil { + if err := container.KillSig(int(sig)); err != nil { return job.Errorf("Cannot kill container %s: %s", name, err) } // FIXME: Add event for signals @@ -293,7 +294,7 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status { return job.Errorf("%s: %s", name, err) } // FIXME: factor job-specific LogEvent to engine.Job.Run() - srv.LogEvent("export", container.ID, srv.runtime.repositories.ImageName(container.Image)) + srv.LogEvent("export", container.ID, srv.runtime.Repositories().ImageName(container.Image)) return engine.StatusOK } return job.Errorf("No such container: %s", name) @@ -318,7 +319,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status { utils.Debugf("Serializing %s", name) - rootRepo, err := srv.runtime.repositories.Get(name) + rootRepo, err := srv.runtime.Repositories().Get(name) if err != nil { return job.Error(err) } @@ -494,7 +495,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status { return job.Error(err) } if repoName != "" { - srv.runtime.repositories.Set(repoName, tag, id, false) + srv.runtime.Repositories().Set(repoName, tag, id, false) } return engine.StatusOK } @@ -555,7 +556,7 @@ func (srv *Server) ImageLoad(job *engine.Job) engine.Status { for imageName, tagMap := range repositories { for tag, address := range tagMap { - if err := srv.runtime.repositories.Set(imageName, tag, address, true); err != nil { + if err := srv.runtime.Repositories().Set(imageName, tag, address, true); err != nil { return job.Error(err) } } @@ -588,13 +589,13 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error { return err } if img.Parent != "" { - if !srv.runtime.graph.Exists(img.Parent) { + if !srv.runtime.Graph().Exists(img.Parent) { if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil { return err } } } - if err := srv.runtime.graph.Register(imageJson, layer, img); err != nil { + if err := srv.runtime.Graph().Register(imageJson, layer, img); err != nil { return err } } @@ -650,7 +651,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status { sf := utils.NewStreamFormatter(job.GetenvBool("json")) out := utils.NewWriteFlusher(job.Stdout) - img, err := srv.runtime.repositories.LookupImage(name) + img, err := srv.runtime.Repositories().LookupImage(name) if err != nil { return job.Error(err) } @@ -661,7 +662,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status { } defer file.Body.Close() - config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.sysInfo) + config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.SystemConfig()) if err != nil { return job.Error(err) } @@ -685,7 +686,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status { } func (srv *Server) ImagesViz(job *engine.Job) engine.Status { - images, _ := srv.runtime.graph.Map() + images, _ := srv.runtime.Graph().Map() if images == nil { return engine.StatusOK } @@ -709,7 +710,7 @@ func (srv *Server) ImagesViz(job *engine.Job) engine.Status { reporefs := make(map[string][]string) - for name, repository := range srv.runtime.repositories.Repositories { + for name, repository := range srv.runtime.Repositories().Repositories { for tag, id := range repository { reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag)) } @@ -728,22 +729,22 @@ func (srv *Server) Images(job *engine.Job) engine.Status { err error ) if job.GetenvBool("all") { - allImages, err = srv.runtime.graph.Map() + allImages, err = srv.runtime.Graph().Map() } else { - allImages, err = srv.runtime.graph.Heads() + allImages, err = srv.runtime.Graph().Heads() } if err != nil { return job.Error(err) } lookup := make(map[string]*engine.Env) - for name, repository := range srv.runtime.repositories.Repositories { + for name, repository := range srv.runtime.Repositories().Repositories { if job.Getenv("filter") != "" { if match, _ := path.Match(job.Getenv("filter"), name); !match { continue } } for tag, id := range repository { - image, err := srv.runtime.graph.Get(id) + image, err := srv.runtime.Graph().Get(id) if err != nil { log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err) continue @@ -793,7 +794,7 @@ func (srv *Server) Images(job *engine.Job) engine.Status { } func (srv *Server) DockerInfo(job *engine.Job) engine.Status { - images, _ := srv.runtime.graph.Map() + images, _ := srv.runtime.Graph().Map() var imgcount int if images == nil { imgcount = 0 @@ -809,21 +810,21 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { initPath := utils.DockerInitPath("") if initPath == "" { // if that fails, we'll just return the path from the runtime - initPath = srv.runtime.sysInitPath + initPath = srv.runtime.SystemInitPath() } v := &engine.Env{} v.SetInt("Containers", len(srv.runtime.List())) v.SetInt("Images", imgcount) - v.Set("Driver", srv.runtime.driver.String()) - v.SetJson("DriverStatus", srv.runtime.driver.Status()) - v.SetBool("MemoryLimit", srv.runtime.sysInfo.MemoryLimit) - v.SetBool("SwapLimit", srv.runtime.sysInfo.SwapLimit) - v.SetBool("IPv4Forwarding", !srv.runtime.sysInfo.IPv4ForwardingDisabled) + v.Set("Driver", srv.runtime.GraphDriver().String()) + v.SetJson("DriverStatus", srv.runtime.GraphDriver().Status()) + v.SetBool("MemoryLimit", srv.runtime.SystemConfig().MemoryLimit) + v.SetBool("SwapLimit", srv.runtime.SystemConfig().SwapLimit) + v.SetBool("IPv4Forwarding", !srv.runtime.SystemConfig().IPv4ForwardingDisabled) v.SetBool("Debug", os.Getenv("DEBUG") != "") v.SetInt("NFd", utils.GetTotalUsedFds()) - v.SetInt("NGoroutines", runtime.NumGoroutine()) - v.Set("ExecutionDriver", srv.runtime.execDriver.Name()) + v.SetInt("NGoroutines", goruntime.NumGoroutine()) + v.Set("ExecutionDriver", srv.runtime.ExecutionDriver().Name()) v.SetInt("NEventsListener", len(srv.listeners)) v.Set("KernelVersion", kernelVersion) v.Set("IndexServerAddress", auth.IndexServerAddress()) @@ -840,13 +841,13 @@ func (srv *Server) ImageHistory(job *engine.Job) engine.Status { return job.Errorf("Usage: %s IMAGE", job.Name) } name := job.Args[0] - foundImage, err := srv.runtime.repositories.LookupImage(name) + foundImage, err := srv.runtime.Repositories().LookupImage(name) if err != nil { return job.Error(err) } lookupMap := make(map[string][]string) - for name, repository := range srv.runtime.repositories.Repositories { + for name, repository := range srv.runtime.Repositories().Repositories { for tag, id := range repository { // If the ID already has a reverse lookup, do not update it unless for "latest" if _, exists := lookupMap[id]; !exists { @@ -891,7 +892,7 @@ func (srv *Server) ContainerTop(job *engine.Job) engine.Status { if !container.State.IsRunning() { return job.Errorf("Container %s is not running", name) } - pids, err := srv.runtime.execDriver.GetPidsForContainer(container.ID) + pids, err := srv.runtime.ExecutionDriver().GetPidsForContainer(container.ID) if err != nil { return job.Error(err) } @@ -984,7 +985,7 @@ func (srv *Server) Containers(job *engine.Job) engine.Status { outs := engine.NewTable("Created", 0) names := map[string][]string{} - srv.runtime.containerGraph.Walk("/", func(p string, e *graphdb.Entity) error { + srv.runtime.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error { names[e.ID()] = append(names[e.ID()], p) return nil }, -1) @@ -1009,7 +1010,7 @@ func (srv *Server) Containers(job *engine.Job) engine.Status { out := &engine.Env{} out.Set("Id", container.ID) out.SetList("Names", names[container.ID]) - out.Set("Image", srv.runtime.repositories.ImageName(container.Image)) + out.Set("Image", srv.runtime.Repositories().ImageName(container.Image)) if len(container.Args) > 0 { out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, strings.Join(container.Args, " "))) } else { @@ -1067,7 +1068,7 @@ func (srv *Server) ImageTag(job *engine.Job) engine.Status { if len(job.Args) == 3 { tag = job.Args[2] } - if err := srv.runtime.repositories.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { + if err := srv.runtime.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { return job.Error(err) } return engine.StatusOK @@ -1092,7 +1093,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin } defer srv.poolRemove("pull", "layer:"+id) - if !srv.runtime.graph.Exists(id) { + if !srv.runtime.Graph().Exists(id) { out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token) if err != nil { @@ -1114,7 +1115,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin return err } defer layer.Close() - if err := srv.runtime.graph.Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil { + if err := srv.runtime.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) return err } @@ -1249,11 +1250,11 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName if askedTag != "" && tag != askedTag { continue } - if err := srv.runtime.repositories.Set(localName, tag, id, true); err != nil { + if err := srv.runtime.Repositories().Set(localName, tag, id, true); err != nil { return err } } - if err := srv.runtime.repositories.Save(); err != nil { + if err := srv.runtime.Repositories().Save(); err != nil { return err } @@ -1374,7 +1375,7 @@ func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[stri tagsByImage[id] = append(tagsByImage[id], tag) - for img, err := srv.runtime.graph.Get(id); img != nil; img, err = img.GetParent() { + for img, err := srv.runtime.Graph().Get(id); img != nil; img, err = img.GetParent() { if err != nil { return nil, nil, err } @@ -1481,7 +1482,7 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { out = utils.NewWriteFlusher(out) - jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgID, "json")) + jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.Graph().Root, imgID, "json")) if err != nil { return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) } @@ -1500,7 +1501,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, return "", err } - layerData, err := srv.runtime.graph.TempLayerArchive(imgID, archive.Uncompressed, sf, out) + layerData, err := srv.runtime.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out) if err != nil { return "", fmt.Errorf("Failed to generate layer archive: %s", err) } @@ -1552,17 +1553,17 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status { return job.Error(err) } - img, err := srv.runtime.graph.Get(localName) + img, err := srv.runtime.Graph().Get(localName) r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) if err2 != nil { return job.Error(err2) } if err != nil { - reposLen := len(srv.runtime.repositories.Repositories[localName]) + reposLen := len(srv.runtime.Repositories().Repositories[localName]) job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) // If it fails, try to get the repository - if localRepo, exists := srv.runtime.repositories.Repositories[localName]; exists { + if localRepo, exists := srv.runtime.Repositories().Repositories[localName]; exists { if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, sf); err != nil { return job.Error(err) } @@ -1618,13 +1619,13 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status { defer progressReader.Close() archive = progressReader } - img, err := srv.runtime.graph.Create(archive, "", "", "Imported from "+src, "", nil, nil) + img, err := srv.runtime.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil) if err != nil { return job.Error(err) } // Optionally register the image at REPO/TAG if repo != "" { - if err := srv.runtime.repositories.Set(repo, tag, img.ID, true); err != nil { + if err := srv.runtime.Repositories().Set(repo, tag, img.ID, true); err != nil { return job.Error(err) } } @@ -1643,11 +1644,11 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { if config.Memory != 0 && config.Memory < 524288 { return job.Errorf("Minimum memory limit allowed is 512k") } - if config.Memory > 0 && !srv.runtime.sysInfo.MemoryLimit { + if config.Memory > 0 && !srv.runtime.SystemConfig().MemoryLimit { job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") config.Memory = 0 } - if config.Memory > 0 && !srv.runtime.sysInfo.SwapLimit { + if config.Memory > 0 && !srv.runtime.SystemConfig().SwapLimit { job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") config.MemorySwap = -1 } @@ -1655,14 +1656,14 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { if err != nil { return job.Error(err) } - if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { - job.Errorf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", defaultDns) - config.Dns = defaultDns + if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.Config().Dns) == 0 && utils.CheckLocalDns(resolvConf) { + job.Errorf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", runtime.DefaultDns) + config.Dns = runtime.DefaultDns } container, buildWarnings, err := srv.runtime.Create(config, name) if err != nil { - if srv.runtime.graph.IsNotExist(err) { + if srv.runtime.Graph().IsNotExist(err) { _, tag := utils.ParseRepositoryTag(config.Image) if tag == "" { tag = graph.DEFAULTTAG @@ -1671,10 +1672,10 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { } return job.Error(err) } - if !container.Config.NetworkDisabled && srv.runtime.sysInfo.IPv4ForwardingDisabled { + if !container.Config.NetworkDisabled && srv.runtime.SystemConfig().IPv4ForwardingDisabled { job.Errorf("IPv4 forwarding is disabled.\n") } - srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image)) + srv.LogEvent("create", container.ID, srv.runtime.Repositories().ImageName(container.Image)) // FIXME: this is necessary because runtime.Create might return a nil container // with a non-nil error. This should not happen! Once it's fixed we // can remove this workaround. @@ -1702,7 +1703,7 @@ func (srv *Server) ContainerRestart(job *engine.Job) engine.Status { if err := container.Restart(int(t)); err != nil { return job.Errorf("Cannot restart container %s: %s\n", name, err) } - srv.LogEvent("restart", container.ID, srv.runtime.repositories.ImageName(container.Image)) + srv.LogEvent("restart", container.ID, srv.runtime.Repositories().ImageName(container.Image)) } else { return job.Errorf("No such container: %s\n", name) } @@ -1724,7 +1725,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { if container == nil { return job.Errorf("No such link: %s", name) } - name, err := getFullName(name) + name, err := runtime.GetFullContainerName(name) if err != nil { job.Error(err) } @@ -1732,21 +1733,17 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { if parent == "/" { return job.Errorf("Conflict, cannot remove the default name of the container") } - pe := srv.runtime.containerGraph.Get(parent) + pe := srv.runtime.ContainerGraph().Get(parent) if pe == nil { return job.Errorf("Cannot get parent %s for name %s", parent, name) } parentContainer := srv.runtime.Get(pe.ID()) - if parentContainer != nil && parentContainer.activeLinks != nil { - if link, exists := parentContainer.activeLinks[n]; exists { - link.Disable() - } else { - utils.Debugf("Could not find active link for %s", name) - } + if parentContainer != nil { + parentContainer.DisableLink(n) } - if err := srv.runtime.containerGraph.Delete(name); err != nil { + if err := srv.runtime.ContainerGraph().Delete(name); err != nil { return job.Error(err) } return engine.StatusOK @@ -1765,13 +1762,13 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { if err := srv.runtime.Destroy(container); err != nil { return job.Errorf("Cannot destroy container %s: %s", name, err) } - srv.LogEvent("destroy", container.ID, srv.runtime.repositories.ImageName(container.Image)) + srv.LogEvent("destroy", container.ID, srv.runtime.Repositories().ImageName(container.Image)) if removeVolume { var ( volumes = make(map[string]struct{}) binds = make(map[string]struct{}) - usedVolumes = make(map[string]*Container) + usedVolumes = make(map[string]*runtime.Container) ) // the volume id is always the base of the path @@ -1780,7 +1777,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { } // populate bind map so that they can be skipped and not removed - for _, bind := range container.hostConfig.Binds { + for _, bind := range container.HostConfig().Binds { source := strings.Split(bind, ":")[0] // TODO: refactor all volume stuff, all of it // this is very important that we eval the link @@ -1819,7 +1816,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID) continue } - if err := srv.runtime.volumes.Delete(volumeId); err != nil { + if err := srv.runtime.Volumes().Delete(volumeId); err != nil { return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err) } } @@ -1841,9 +1838,9 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo tag = graph.DEFAULTTAG } - img, err := srv.runtime.repositories.LookupImage(name) + img, err := srv.runtime.Repositories().LookupImage(name) if err != nil { - if r, _ := srv.runtime.repositories.Get(repoName); r != nil { + if r, _ := srv.runtime.Repositories().Get(repoName); r != nil { return fmt.Errorf("No such image: %s:%s", repoName, tag) } return fmt.Errorf("No such image: %s", name) @@ -1854,14 +1851,14 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo tag = "" } - byParents, err := srv.runtime.graph.ByParent() + byParents, err := srv.runtime.Graph().ByParent() if err != nil { return err } //If delete by id, see if the id belong only to one repository if repoName == "" { - for _, repoAndTag := range srv.runtime.repositories.ByID()[img.ID] { + for _, repoAndTag := range srv.runtime.Repositories().ByID()[img.ID] { parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag) if repoName == "" || repoName == parsedRepo { repoName = parsedRepo @@ -1884,7 +1881,7 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo //Untag the current image for _, tag := range tags { - tagDeleted, err := srv.runtime.repositories.Delete(repoName, tag) + tagDeleted, err := srv.runtime.Repositories().Delete(repoName, tag) if err != nil { return err } @@ -1895,16 +1892,16 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo srv.LogEvent("untag", img.ID, "") } } - tags = srv.runtime.repositories.ByID()[img.ID] + tags = srv.runtime.Repositories().ByID()[img.ID] if (len(tags) <= 1 && repoName == "") || len(tags) == 0 { if len(byParents[img.ID]) == 0 { if err := srv.canDeleteImage(img.ID); err != nil { return err } - if err := srv.runtime.repositories.DeleteAll(img.ID); err != nil { + if err := srv.runtime.Repositories().DeleteAll(img.ID); err != nil { return err } - if err := srv.runtime.graph.Delete(img.ID); err != nil { + if err := srv.runtime.Graph().Delete(img.ID); err != nil { return err } out := &engine.Env{} @@ -1943,7 +1940,7 @@ func (srv *Server) ImageDelete(job *engine.Job) engine.Status { func (srv *Server) canDeleteImage(imgID string) error { for _, container := range srv.runtime.List() { - parent, err := srv.runtime.repositories.LookupImage(container.Image) + parent, err := srv.runtime.Repositories().LookupImage(container.Image) if err != nil { return err } @@ -1963,7 +1960,7 @@ func (srv *Server) canDeleteImage(imgID string) error { func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) { // Retrieve all images - images, err := srv.runtime.graph.Map() + images, err := srv.runtime.Graph().Map() if err != nil { return nil, err } @@ -1980,7 +1977,7 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*imag // Loop on the children of the given image and check the config var match *image.Image for elem := range imageMap[imgID] { - img, err := srv.runtime.graph.Get(elem) + img, err := srv.runtime.Graph().Get(elem) if err != nil { return nil, err } @@ -1993,7 +1990,7 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*imag return match, nil } -func (srv *Server) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { +func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runconfig.HostConfig) error { runtime := srv.runtime if hostConfig != nil && hostConfig.Links != nil { @@ -2017,7 +2014,7 @@ func (srv *Server) RegisterLinks(container *Container, hostConfig *runconfig.Hos // After we load all the links into the runtime // set them to nil on the hostconfig hostConfig.Links = nil - if err := container.writeHostConfig(); err != nil { + if err := container.WriteHostConfig(); err != nil { return err } } @@ -2065,13 +2062,13 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status { if err := srv.RegisterLinks(container, hostConfig); err != nil { return job.Error(err) } - container.hostConfig = hostConfig + container.SetHostConfig(hostConfig) container.ToDisk() } if err := container.Start(); err != nil { return job.Errorf("Cannot start container %s: %s", name, err) } - srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image)) + srv.LogEvent("start", container.ID, runtime.Repositories().ImageName(container.Image)) return engine.StatusOK } @@ -2091,7 +2088,7 @@ func (srv *Server) ContainerStop(job *engine.Job) engine.Status { if err := container.Stop(int(t)); err != nil { return job.Errorf("Cannot stop container %s: %s\n", name, err) } - srv.LogEvent("stop", container.ID, srv.runtime.repositories.ImageName(container.Image)) + srv.LogEvent("stop", container.ID, srv.runtime.Repositories().ImageName(container.Image)) } else { return job.Errorf("No such container: %s\n", name) } @@ -2237,7 +2234,7 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { return engine.StatusOK } -func (srv *Server) ContainerInspect(name string) (*Container, error) { +func (srv *Server) ContainerInspect(name string) (*runtime.Container, error) { if container := srv.runtime.Get(name); container != nil { return container, nil } @@ -2245,7 +2242,7 @@ func (srv *Server) ContainerInspect(name string) (*Container, error) { } func (srv *Server) ImageInspect(name string) (*image.Image, error) { - if image, err := srv.runtime.repositories.LookupImage(name); err == nil && image != nil { + if image, err := srv.runtime.Repositories().LookupImage(name); err == nil && image != nil { return image, nil } return nil, fmt.Errorf("No such image: %s", name) @@ -2280,9 +2277,9 @@ func (srv *Server) JobInspect(job *engine.Job) engine.Status { return job.Error(errContainer) } object = &struct { - *Container + *runtime.Container HostConfig *runconfig.HostConfig - }{container, container.hostConfig} + }{container, container.HostConfig()} default: return job.Errorf("Unknown kind: %s", kind) } @@ -2322,7 +2319,7 @@ func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { } func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) { - runtime, err := NewRuntime(config, eng) + runtime, err := runtime.NewRuntime(config, eng) if err != nil { return nil, err } @@ -2335,7 +2332,7 @@ func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) listeners: make(map[string]chan utils.JSONMessage), running: true, } - runtime.srv = srv + runtime.SetServer(srv) return srv, nil } @@ -2403,7 +2400,7 @@ func (srv *Server) Close() error { type Server struct { sync.RWMutex - runtime *Runtime + runtime *runtime.Runtime pullingPool map[string]chan struct{} pushingPool map[string]chan struct{} events []utils.JSONMessage diff --git a/sorter.go b/sorter.go deleted file mode 100644 index b49ac58c24..0000000000 --- a/sorter.go +++ /dev/null @@ -1,25 +0,0 @@ -package docker - -import "sort" - -type containerSorter struct { - containers []*Container - by func(i, j *Container) bool -} - -func (s *containerSorter) Len() int { - return len(s.containers) -} - -func (s *containerSorter) Swap(i, j int) { - s.containers[i], s.containers[j] = s.containers[j], s.containers[i] -} - -func (s *containerSorter) Less(i, j int) bool { - return s.by(s.containers[i], s.containers[j]) -} - -func sortContainers(containers []*Container, predicate func(i, j *Container) bool) { - s := &containerSorter{containers, predicate} - sort.Sort(s) -} diff --git a/state.go b/state.go deleted file mode 100644 index 1dc92af204..0000000000 --- a/state.go +++ /dev/null @@ -1,81 +0,0 @@ -package docker - -import ( - "fmt" - "github.com/dotcloud/docker/utils" - "sync" - "time" -) - -type State struct { - sync.RWMutex - Running bool - Pid int - ExitCode int - StartedAt time.Time - FinishedAt time.Time - Ghost bool -} - -// String returns a human-readable description of the state -func (s *State) String() string { - s.RLock() - defer s.RUnlock() - - if s.Running { - if s.Ghost { - return fmt.Sprintf("Ghost") - } - return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) - } - return fmt.Sprintf("Exit %d", s.ExitCode) -} - -func (s *State) IsRunning() bool { - s.RLock() - defer s.RUnlock() - - return s.Running -} - -func (s *State) IsGhost() bool { - s.RLock() - defer s.RUnlock() - - return s.Ghost -} - -func (s *State) GetExitCode() int { - s.RLock() - defer s.RUnlock() - - return s.ExitCode -} - -func (s *State) SetGhost(val bool) { - s.Lock() - defer s.Unlock() - - s.Ghost = val -} - -func (s *State) SetRunning(pid int) { - s.Lock() - defer s.Unlock() - - s.Running = true - s.Ghost = false - s.ExitCode = 0 - s.Pid = pid - s.StartedAt = time.Now().UTC() -} - -func (s *State) SetStopped(exitCode int) { - s.Lock() - defer s.Unlock() - - s.Running = false - s.Pid = 0 - s.FinishedAt = time.Now().UTC() - s.ExitCode = exitCode -} diff --git a/utils.go b/utils.go index ef666b0de1..0fda006860 100644 --- a/utils.go +++ b/utils.go @@ -2,9 +2,6 @@ package docker import ( "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/pkg/namesgenerator" - "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" ) @@ -12,45 +9,8 @@ type Change struct { archive.Change } -func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error { - if config.PortSpecs != nil { - ports, bindings, err := nat.ParsePortSpecs(config.PortSpecs) - if err != nil { - return err - } - config.PortSpecs = nil - if len(bindings) > 0 { - if hostConfig == nil { - hostConfig = &runconfig.HostConfig{} - } - hostConfig.PortBindings = bindings - } - - if config.ExposedPorts == nil { - config.ExposedPorts = make(nat.PortSet, len(ports)) - } - for k, v := range ports { - config.ExposedPorts[k] = v - } - } - return nil -} - // Links come in the format of // name:alias func parseLink(rawLink string) (map[string]string, error) { return utils.PartParser("name:alias", rawLink) } - -type checker struct { - runtime *Runtime -} - -func (c *checker) Exists(name string) bool { - return c.runtime.containerGraph.Exists("/" + name) -} - -// Generate a random and unique name -func generateRandomName(runtime *Runtime) (string, error) { - return namesgenerator.GenerateRandomName(&checker{runtime}) -} diff --git a/volumes.go b/volumes.go deleted file mode 100644 index 8684ff4e59..0000000000 --- a/volumes.go +++ /dev/null @@ -1,332 +0,0 @@ -package docker - -import ( - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/pkg/mount" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" - "syscall" -) - -type BindMap struct { - SrcPath string - DstPath string - Mode string -} - -func prepareVolumesForContainer(container *Container) error { - if container.Volumes == nil || len(container.Volumes) == 0 { - container.Volumes = make(map[string]string) - container.VolumesRW = make(map[string]bool) - if err := applyVolumesFrom(container); err != nil { - return err - } - } - - if err := createVolumes(container); err != nil { - return err - } - return nil -} - -func mountVolumesForContainer(container *Container, envPath string) error { - // Setup the root fs as a bind mount of the base fs - var ( - root = container.RootfsPath() - runtime = container.runtime - ) - if err := os.MkdirAll(root, 0755); err != nil && !os.IsExist(err) { - return nil - } - - // Create a bind mount of the base fs as a place where we can add mounts - // without affecting the ability to access the base fs - if err := mount.Mount(container.basefs, root, "none", "bind,rw"); err != nil { - return err - } - - // Make sure the root fs is private so the mounts here don't propagate to basefs - if err := mount.ForceMount(root, root, "none", "private"); err != nil { - return err - } - - // Mount docker specific files into the containers root fs - if err := mount.Mount(runtime.sysInitPath, filepath.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil { - return err - } - if err := mount.Mount(envPath, filepath.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil { - return err - } - if err := mount.Mount(container.ResolvConfPath, filepath.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil { - return err - } - - if container.HostnamePath != "" && container.HostsPath != "" { - if err := mount.Mount(container.HostnamePath, filepath.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil { - return err - } - if err := mount.Mount(container.HostsPath, filepath.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil { - return err - } - } - - // Mount user specified volumes - for r, v := range container.Volumes { - mountAs := "ro" - if container.VolumesRW[r] { - mountAs = "rw" - } - - r = filepath.Join(root, r) - if p, err := utils.FollowSymlinkInScope(r, root); err != nil { - return err - } else { - r = p - } - - if err := mount.Mount(v, r, "none", fmt.Sprintf("bind,%s", mountAs)); err != nil { - return err - } - } - return nil -} - -func unmountVolumesForContainer(container *Container) { - var ( - root = container.RootfsPath() - mounts = []string{ - root, - filepath.Join(root, "/.dockerinit"), - filepath.Join(root, "/.dockerenv"), - filepath.Join(root, "/etc/resolv.conf"), - } - ) - - if container.HostnamePath != "" && container.HostsPath != "" { - mounts = append(mounts, filepath.Join(root, "/etc/hostname"), filepath.Join(root, "/etc/hosts")) - } - - for r := range container.Volumes { - mounts = append(mounts, filepath.Join(root, r)) - } - - for i := len(mounts) - 1; i >= 0; i-- { - if lastError := mount.Unmount(mounts[i]); lastError != nil { - log.Printf("Failed to umount %v: %v", mounts[i], lastError) - } - } -} - -func applyVolumesFrom(container *Container) error { - if container.Config.VolumesFrom != "" { - for _, containerSpec := range strings.Split(container.Config.VolumesFrom, ",") { - var ( - mountRW = true - specParts = strings.SplitN(containerSpec, ":", 2) - ) - - switch len(specParts) { - case 0: - return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom) - case 2: - switch specParts[1] { - case "ro": - mountRW = false - case "rw": // mountRW is already true - default: - return fmt.Errorf("Malformed volumes-from specification: %s", containerSpec) - } - } - - c := container.runtime.Get(specParts[0]) - if c == nil { - return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID) - } - - for volPath, id := range c.Volumes { - if _, exists := container.Volumes[volPath]; exists { - continue - } - if err := os.MkdirAll(filepath.Join(container.basefs, volPath), 0755); err != nil { - return err - } - container.Volumes[volPath] = id - if isRW, exists := c.VolumesRW[volPath]; exists { - container.VolumesRW[volPath] = isRW && mountRW - } - } - - } - } - return nil -} - -func getBindMap(container *Container) (map[string]BindMap, error) { - var ( - // Create the requested bind mounts - binds = make(map[string]BindMap) - // Define illegal container destinations - illegalDsts = []string{"/", "."} - ) - - for _, bind := range container.hostConfig.Binds { - // FIXME: factorize bind parsing in parseBind - var ( - src, dst, mode string - arr = strings.Split(bind, ":") - ) - - if len(arr) == 2 { - src = arr[0] - dst = arr[1] - mode = "rw" - } else if len(arr) == 3 { - src = arr[0] - dst = arr[1] - mode = arr[2] - } else { - return nil, fmt.Errorf("Invalid bind specification: %s", bind) - } - - // Bail if trying to mount to an illegal destination - for _, illegal := range illegalDsts { - if dst == illegal { - return nil, fmt.Errorf("Illegal bind destination: %s", dst) - } - } - - bindMap := BindMap{ - SrcPath: src, - DstPath: dst, - Mode: mode, - } - binds[filepath.Clean(dst)] = bindMap - } - return binds, nil -} - -func createVolumes(container *Container) error { - binds, err := getBindMap(container) - if err != nil { - return err - } - - volumesDriver := container.runtime.volumes.Driver() - // Create the requested volumes if they don't exist - for volPath := range container.Config.Volumes { - volPath = filepath.Clean(volPath) - volIsDir := true - // Skip existing volumes - if _, exists := container.Volumes[volPath]; exists { - continue - } - var srcPath string - var isBindMount bool - srcRW := false - // If an external bind is defined for this volume, use that as a source - if bindMap, exists := binds[volPath]; exists { - isBindMount = true - srcPath = bindMap.SrcPath - if strings.ToLower(bindMap.Mode) == "rw" { - srcRW = true - } - if stat, err := os.Stat(bindMap.SrcPath); err != nil { - return err - } else { - volIsDir = stat.IsDir() - } - // Otherwise create an directory in $ROOT/volumes/ and use that - } else { - - // Do not pass a container as the parameter for the volume creation. - // The graph driver using the container's information ( Image ) to - // create the parent. - c, err := container.runtime.volumes.Create(nil, "", "", "", "", nil, nil) - if err != nil { - return err - } - srcPath, err = volumesDriver.Get(c.ID) - if err != nil { - return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err) - } - srcRW = true // RW by default - } - - if p, err := filepath.EvalSymlinks(srcPath); err != nil { - return err - } else { - srcPath = p - } - - container.Volumes[volPath] = srcPath - container.VolumesRW[volPath] = srcRW - - // Create the mountpoint - volPath = filepath.Join(container.basefs, volPath) - rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.basefs) - if err != nil { - return err - } - - if _, err := os.Stat(rootVolPath); err != nil { - if os.IsNotExist(err) { - if volIsDir { - if err := os.MkdirAll(rootVolPath, 0755); err != nil { - return err - } - } else { - if err := os.MkdirAll(filepath.Dir(rootVolPath), 0755); err != nil { - return err - } - if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil { - return err - } else { - f.Close() - } - } - } - } - - // Do not copy or change permissions if we are mounting from the host - if srcRW && !isBindMount { - volList, err := ioutil.ReadDir(rootVolPath) - if err != nil { - return err - } - if len(volList) > 0 { - srcList, err := ioutil.ReadDir(srcPath) - if err != nil { - return err - } - if len(srcList) == 0 { - // If the source volume is empty copy files from the root into the volume - if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil { - return err - } - - var stat syscall.Stat_t - if err := syscall.Stat(rootVolPath, &stat); err != nil { - return err - } - var srcStat syscall.Stat_t - if err := syscall.Stat(srcPath, &srcStat); err != nil { - return err - } - // Change the source volume's ownership if it differs from the root - // files that were just copied - if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid { - if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil { - return err - } - } - } - } - } - } - return nil -} -- cgit v1.2.1 From 47edf3e8bf759270c243711eaed1faa1d4d61f35 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Sun, 9 Mar 2014 23:16:15 -0700 Subject: Add IsRunning to server interface Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/runtime/server.go b/runtime/server.go index 0d2b71dea7..a74c4d1200 100644 --- a/runtime/server.go +++ b/runtime/server.go @@ -6,4 +6,5 @@ import ( type Server interface { LogEvent(action, id, from string) *utils.JSONMessage + IsRunning() bool // returns true if the server is currently in operation } -- cgit v1.2.1 From 50082f792be22b97dcc79afe666289ecefbc3864 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Sun, 9 Mar 2014 23:16:42 -0600 Subject: Fix a few packaging bugs, including and especially a temporary patch to our upstart script to mount cgroups properly Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- contrib/init/sysvinit-debian/docker | 114 ++++++++++++++++++++++++ contrib/init/sysvinit-debian/docker.default | 13 +++ contrib/init/sysvinit/docker | 96 -------------------- contrib/init/upstart/docker.conf | 13 ++- hack/make/ubuntu | 131 +++++++++++++++------------- 5 files changed, 207 insertions(+), 160 deletions(-) create mode 100755 contrib/init/sysvinit-debian/docker create mode 100644 contrib/init/sysvinit-debian/docker.default delete mode 100755 contrib/init/sysvinit/docker diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker new file mode 100755 index 0000000000..510683a459 --- /dev/null +++ b/contrib/init/sysvinit-debian/docker @@ -0,0 +1,114 @@ +#!/bin/sh + +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $syslog $remote_fs +# Required-Stop: $syslog $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Create lightweight, portable, self-sufficient containers. +# Description: +# Docker is an open-source project to easily create lightweight, portable, +# self-sufficient containers from any application. The same container that a +# developer builds and tests on a laptop can run at scale, in production, on +# VMs, bare metal, OpenStack clusters, public clouds and more. +### END INIT INFO + +export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin + +BASE=$(basename $0) + +# modify these in /etc/default/$BASE (/etc/default/docker) +DOCKER=/usr/bin/$BASE +DOCKER_PIDFILE=/var/run/$BASE.pid +DOCKER_OPTS= +DOCKER_DESC="Docker" + +# Get lsb functions +. /lib/lsb/init-functions + +if [ -f /etc/default/$BASE ]; then + . /etc/default/$BASE +fi + +# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it) +if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then + log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" + exit 1 +fi + +# Check docker is present +if [ ! -x $DOCKER ]; then + log_failure_msg "$DOCKER not present or not executable" + exit 1 +fi + +fail_unless_root() { + if [ "$(id -u)" != '0' ]; then + log_failure_msg "$DOCKER_DESC must be run as root" + exit 1 + fi +} + +case "$1" in + start) + fail_unless_root + + if ! grep -q cgroup /proc/mounts; then + # rough approximation of cgroupfs-mount + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + for sys in $(cut -d' ' -f1 /proc/cgroups); do + mkdir -p /sys/fs/cgroup/$sys + if ! mount -n -t cgroup -o $sys cgroup /sys/fs/cgroup/$sys 2>/dev/null; then + rmdir /sys/fs/cgroup/$sys 2>/dev/null || true + fi + done + fi + + touch /var/log/docker.log + chgrp docker /var/log/docker.log + + log_begin_msg "Starting $DOCKER_DESC: $BASE" + start-stop-daemon --start --background \ + --no-close \ + --exec "$DOCKER" \ + --pidfile "$DOCKER_PIDFILE" \ + -- \ + -d -p "$DOCKER_PIDFILE" \ + $DOCKER_OPTS \ + > /var/log/docker.log 2>&1 + log_end_msg $? + ;; + + stop) + fail_unless_root + log_begin_msg "Stopping $DOCKER_DESC: $BASE" + start-stop-daemon --stop --pidfile "$DOCKER_PIDFILE" + log_end_msg $? + ;; + + restart) + fail_unless_root + docker_pid=`cat "$DOCKER_PIDFILE" 2>/dev/null` + [ -n "$docker_pid" ] \ + && ps -p $docker_pid > /dev/null 2>&1 \ + && $0 stop + $0 start + ;; + + force-reload) + fail_unless_root + $0 restart + ;; + + status) + status_of_proc -p "$DOCKER_PIDFILE" "$DOCKER" docker + ;; + + *) + echo "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/contrib/init/sysvinit-debian/docker.default b/contrib/init/sysvinit-debian/docker.default new file mode 100644 index 0000000000..d5110b5e2f --- /dev/null +++ b/contrib/init/sysvinit-debian/docker.default @@ -0,0 +1,13 @@ +# Docker Upstart and SysVinit configuration file + +# Customize location of Docker binary (especially for development testing). +#DOCKER="/usr/local/bin/docker" + +# Use DOCKER_OPTS to modify the daemon startup options. +#DOCKER_OPTS="-dns 8.8.8.8 -dns 8.8.4.4" + +# If you need Docker to use an HTTP proxy, it can also be specified here. +#export http_proxy="http://127.0.0.1:3128/" + +# This is also a handy place to tweak where Docker's temporary files go. +#export TMPDIR="/mnt/bigdrive/docker-tmp" diff --git a/contrib/init/sysvinit/docker b/contrib/init/sysvinit/docker deleted file mode 100755 index 2d79c4d4c0..0000000000 --- a/contrib/init/sysvinit/docker +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/sh - -### BEGIN INIT INFO -# Provides: docker -# Required-Start: $syslog $remote_fs -# Required-Stop: $syslog $remote_fs -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Create lightweight, portable, self-sufficient containers. -# Description: -# Docker is an open-source project to easily create lightweight, portable, -# self-sufficient containers from any application. The same container that a -# developer builds and tests on a laptop can run at scale, in production, on -# VMs, bare metal, OpenStack clusters, public clouds and more. -### END INIT INFO - -BASE=$(basename $0) - -DOCKER=/usr/bin/$BASE -DOCKER_PIDFILE=/var/run/$BASE.pid -DOCKER_OPTS= - -PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin - -# Get lsb functions -. /lib/lsb/init-functions - -if [ -f /etc/default/$BASE ]; then - . /etc/default/$BASE -fi - -# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it) -if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | /bin/grep -q upstart; then - log_failure_msg "Docker is managed via upstart, try using service $BASE $1" - exit 1 -fi - -# Check docker is present -if [ ! -x $DOCKER ]; then - log_failure_msg "$DOCKER not present or not executable" - exit 1 -fi - -fail_unless_root() { - if [ "$(id -u)" != '0' ]; then - log_failure_msg "Docker must be run as root" - exit 1 - fi -} - -case "$1" in - start) - fail_unless_root - log_begin_msg "Starting Docker: $BASE" - mount | grep cgroup >/dev/null || mount -t cgroup none /sys/fs/cgroup 2>/dev/null - start-stop-daemon --start --background \ - --exec "$DOCKER" \ - --pidfile "$DOCKER_PIDFILE" \ - -- -d -p "$DOCKER_PIDFILE" \ - $DOCKER_OPTS - log_end_msg $? - ;; - - stop) - fail_unless_root - log_begin_msg "Stopping Docker: $BASE" - start-stop-daemon --stop \ - --pidfile "$DOCKER_PIDFILE" - log_end_msg $? - ;; - - restart) - fail_unless_root - docker_pid=`cat "$DOCKER_PIDFILE" 2>/dev/null` - [ -n "$docker_pid" ] \ - && ps -p $docker_pid > /dev/null 2>&1 \ - && $0 stop - $0 start - ;; - - force-reload) - fail_unless_root - $0 restart - ;; - - status) - status_of_proc -p "$DOCKER_PIDFILE" "$DOCKER" docker - ;; - - *) - echo "Usage: $0 {start|stop|restart|status}" - exit 1 - ;; -esac - -exit 0 diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf index ee8a447c27..e2cc4536e1 100644 --- a/contrib/init/upstart/docker.conf +++ b/contrib/init/upstart/docker.conf @@ -1,15 +1,26 @@ description "Docker daemon" -start on filesystem and started lxc-net +start on filesystem stop on runlevel [!2345] respawn script + # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) DOCKER=/usr/bin/$UPSTART_JOB DOCKER_OPTS= if [ -f /etc/default/$UPSTART_JOB ]; then . /etc/default/$UPSTART_JOB fi + if ! grep -q cgroup /proc/mounts; then + # rough approximation of cgroupfs-mount + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + for sys in $(cut -d' ' -f1 /proc/cgroups); do + mkdir -p /sys/fs/cgroup/$sys + if ! mount -n -t cgroup -o $sys cgroup /sys/fs/cgroup/$sys 2>/dev/null; then + rmdir /sys/fs/cgroup/$sys 2>/dev/null || true + fi + done + fi "$DOCKER" -d $DOCKER_OPTS end script diff --git a/hack/make/ubuntu b/hack/make/ubuntu index 602d4ac1ad..ebc12f27ec 100644 --- a/hack/make/ubuntu +++ b/hack/make/ubuntu @@ -29,42 +29,36 @@ bundle_ubuntu() { cp contrib/udev/80-docker.rules $DIR/etc/udev/rules.d/ # Include our init scripts - mkdir -p $DIR/etc - cp -R contrib/init/upstart $DIR/etc/init - cp -R contrib/init/sysvinit $DIR/etc/init.d - mkdir -p $DIR/lib/systemd - cp -R contrib/init/systemd $DIR/lib/systemd/system - + mkdir -p $DIR/etc/init + cp contrib/init/upstart/docker.conf $DIR/etc/init/ + mkdir -p $DIR/etc/init.d + cp contrib/init/sysvinit-debian/docker $DIR/etc/init.d/ mkdir -p $DIR/etc/default - cat > $DIR/etc/default/docker <<'EOF' -# Docker Upstart and SysVinit configuration file - -# Customize location of Docker binary (especially for development testing). -#DOCKER="/usr/local/bin/docker" - -# Use DOCKER_OPTS to modify the daemon startup options. -#DOCKER_OPTS="-dns 8.8.8.8" - -# If you need Docker to use an HTTP proxy, it can also be specified here. -#export http_proxy=http://127.0.0.1:3128/ -EOF + cp contrib/init/sysvinit-debian/docker.default $DIR/etc/default/docker + mkdir -p $DIR/lib/systemd/system + cp contrib/init/systemd/docker.service $DIR/lib/systemd/system/ # Copy the binary # This will fail if the binary bundle hasn't been built mkdir -p $DIR/usr/bin - # Copy the binary - # This will fail if the binary bundle hasn't been built cp $DEST/../binary/docker-$VERSION $DIR/usr/bin/docker # Generate postinst/prerm/postrm scripts - cat > /tmp/postinst <<'EOF' + cat > $DEST/postinst <<'EOF' #!/bin/sh set -e set -u -getent group docker > /dev/null || groupadd --system docker || true +if [ "$1" = 'configure' ] && [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi +fi -update-rc.d docker defaults > /dev/null || true +if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then + # we only need to do this if upstart isn't in charge + update-rc.d docker defaults > /dev/null || true +fi if [ -n "$2" ]; then _dh_action=restart else @@ -74,7 +68,7 @@ service docker $_dh_action 2>/dev/null || true #DEBHELPER# EOF - cat > /tmp/prerm <<'EOF' + cat > $DEST/prerm <<'EOF' #!/bin/sh set -e set -u @@ -83,7 +77,7 @@ service docker stop 2>/dev/null || true #DEBHELPER# EOF - cat > /tmp/postrm <<'EOF' + cat > $DEST/postrm <<'EOF' #!/bin/sh set -e set -u @@ -101,50 +95,61 @@ fi #DEBHELPER# EOF # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way - chmod +x /tmp/postinst /tmp/prerm + chmod +x $DEST/postinst $DEST/prerm $DEST/postrm ( + # switch directories so we create *.deb in the right folder cd $DEST + + # create lxc-docker-VERSION package fpm -s dir -C $DIR \ - --name lxc-docker-$VERSION --version $PKGVERSION \ - --after-install /tmp/postinst \ - --before-remove /tmp/prerm \ - --after-remove /tmp/postrm \ - --architecture "$PACKAGE_ARCHITECTURE" \ - --prefix / \ - --depends iptables \ - --deb-recommends aufs-tools \ - --deb-recommends ca-certificates \ - --deb-recommends git \ - --deb-recommends xz-utils \ - --description "$PACKAGE_DESCRIPTION" \ - --maintainer "$PACKAGE_MAINTAINER" \ - --conflicts docker \ - --conflicts docker.io \ - --conflicts lxc-docker-virtual-package \ - --provides lxc-docker \ - --provides lxc-docker-virtual-package \ - --replaces lxc-docker \ - --replaces lxc-docker-virtual-package \ - --url "$PACKAGE_URL" \ - --license "$PACKAGE_LICENSE" \ - --config-files /etc/udev/rules.d/80-docker.rules \ - --config-files /etc/init/docker.conf \ - --config-files /etc/init.d/docker \ - --config-files /etc/default/docker \ - --deb-compression gz \ - -t deb . + --name lxc-docker-$VERSION --version $PKGVERSION \ + --after-install $DEST/postinst \ + --before-remove $DEST/prerm \ + --after-remove $DEST/postrm \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --prefix / \ + --depends iptables \ + --deb-recommends aufs-tools \ + --deb-recommends ca-certificates \ + --deb-recommends git \ + --deb-recommends xz-utils \ + --deb-suggests cgroup-lite \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --conflicts docker \ + --conflicts docker.io \ + --conflicts lxc-docker-virtual-package \ + --provides lxc-docker \ + --provides lxc-docker-virtual-package \ + --replaces lxc-docker \ + --replaces lxc-docker-virtual-package \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --config-files /etc/udev/rules.d/80-docker.rules \ + --config-files /etc/init/docker.conf \ + --config-files /etc/init.d/docker \ + --config-files /etc/default/docker \ + --deb-compression gz \ + -t deb . + # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available + + # create empty lxc-docker wrapper package fpm -s empty \ - --name lxc-docker --version $PKGVERSION \ - --architecture "$PACKAGE_ARCHITECTURE" \ - --depends lxc-docker-$VERSION \ - --description "$PACKAGE_DESCRIPTION" \ - --maintainer "$PACKAGE_MAINTAINER" \ - --url "$PACKAGE_URL" \ - --license "$PACKAGE_LICENSE" \ - --deb-compression gz \ - -t deb + --name lxc-docker --version $PKGVERSION \ + --architecture "$PACKAGE_ARCHITECTURE" \ + --depends lxc-docker-$VERSION \ + --description "$PACKAGE_DESCRIPTION" \ + --maintainer "$PACKAGE_MAINTAINER" \ + --url "$PACKAGE_URL" \ + --license "$PACKAGE_LICENSE" \ + --deb-compression gz \ + -t deb ) + + # clean up after ourselves so we have a clean output directory + rm $DEST/postinst $DEST/prerm $DEST/postrm + rm -r $DIR } bundle_ubuntu -- cgit v1.2.1 From 3e4d0857bf560296801e68b1b218654954c952e9 Mon Sep 17 00:00:00 2001 From: Andy Rothfusz Date: Mon, 10 Mar 2014 12:48:07 -0700 Subject: Removing myself from doc maintainers due to other obligations eliminating time to maintain docs. Docker-DCO-1.1-Signed-off-by: Andy Rothfusz (github: metalivedev) --- docs/MAINTAINERS | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/MAINTAINERS b/docs/MAINTAINERS index e816670419..52505fab00 100644 --- a/docs/MAINTAINERS +++ b/docs/MAINTAINERS @@ -1,3 +1,2 @@ -Andy Rothfusz (@metalivedev) James Turnbull (@jamtur01) Sven Dowideit (@SvenDowideit) -- cgit v1.2.1 From 0ebf5d0ab3a3999a7e2efe619ef194ff3817e3a3 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 10 Mar 2014 20:24:15 +0000 Subject: move a unit test file Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- commands_unit_test.go | 160 ----------------------------------------------- runconfig/config_test.go | 154 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 154 insertions(+), 160 deletions(-) delete mode 100644 commands_unit_test.go diff --git a/commands_unit_test.go b/commands_unit_test.go deleted file mode 100644 index 60d8d60398..0000000000 --- a/commands_unit_test.go +++ /dev/null @@ -1,160 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/runconfig" - "strings" - "testing" -) - -func parse(t *testing.T, args string) (*runconfig.Config, *runconfig.HostConfig, error) { - config, hostConfig, _, err := runconfig.Parse(strings.Split(args+" ubuntu bash", " "), nil) - return config, hostConfig, err -} - -func mustParse(t *testing.T, args string) (*runconfig.Config, *runconfig.HostConfig) { - config, hostConfig, err := parse(t, args) - if err != nil { - t.Fatal(err) - } - return config, hostConfig -} - -func TestParseRunLinks(t *testing.T) { - if _, hostConfig := mustParse(t, "-link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, "-link a:b -link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { - t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) - } - - if _, _, err := parse(t, "-link a"); err == nil { - t.Fatalf("Error parsing links. `-link a` should be an error but is not") - } - if _, _, err := parse(t, "-link"); err == nil { - t.Fatalf("Error parsing links. `-link` should be an error but is not") - } -} - -func TestParseRunAttach(t *testing.T) { - if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - - if _, _, err := parse(t, "-a"); err == nil { - t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid"); err == nil { - t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdin -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-d -rm"); err == nil { - t.Fatalf("Error parsing attach flags, `-d -rm` should be an error but is not") - } -} - -func TestParseRunVolumes(t *testing.T) { - if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/tmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/tmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes) - } else if _, exists := config.Volumes["/var"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) - } else if _, exists := config.Volumes["/containerVar"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) - } else if _, exists := config.Volumes["/containerVar"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { - t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes["/containerTmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) - } else if _, exists := config.Volumes["/containerVar"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) - } - - if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds) - } else if len(config.Volumes) != 0 { - t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes) - } - - if _, _, err := parse(t, "-v /"); err == nil { - t.Fatalf("Expected error, but got none") - } - - if _, _, err := parse(t, "-v /:/"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't") - } - if _, _, err := parse(t, "-v"); err == nil { - t.Fatalf("Error parsing volume flags, `-v` should fail but didn't") - } - if _, _, err := parse(t, "-v /tmp:"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't") - } - if _, _, err := parse(t, "-v /tmp:ro"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't") - } - if _, _, err := parse(t, "-v /tmp::"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't") - } - if _, _, err := parse(t, "-v :"); err == nil { - t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't") - } - if _, _, err := parse(t, "-v ::"); err == nil { - t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't") - } - if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil { - t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't") - } -} diff --git a/runconfig/config_test.go b/runconfig/config_test.go index 3ef31491fc..40d53fa2f4 100644 --- a/runconfig/config_test.go +++ b/runconfig/config_test.go @@ -2,9 +2,163 @@ package runconfig import ( "github.com/dotcloud/docker/nat" + "strings" "testing" ) +func parse(t *testing.T, args string) (*Config, *HostConfig, error) { + config, hostConfig, _, err := Parse(strings.Split(args+" ubuntu bash", " "), nil) + return config, hostConfig, err +} + +func mustParse(t *testing.T, args string) (*Config, *HostConfig) { + config, hostConfig, err := parse(t, args) + if err != nil { + t.Fatal(err) + } + return config, hostConfig +} + +func TestParseRunLinks(t *testing.T) { + if _, hostConfig := mustParse(t, "-link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, "-link a:b -link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { + t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) + } + if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { + t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) + } + + if _, _, err := parse(t, "-link a"); err == nil { + t.Fatalf("Error parsing links. `-link a` should be an error but is not") + } + if _, _, err := parse(t, "-link"); err == nil { + t.Fatalf("Error parsing links. `-link` should be an error but is not") + } +} + +func TestParseRunAttach(t *testing.T) { + if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { + t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) + } + + if _, _, err := parse(t, "-a"); err == nil { + t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid"); err == nil { + t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdin -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stdout -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") + } + if _, _, err := parse(t, "-a stderr -d"); err == nil { + t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") + } + if _, _, err := parse(t, "-d -rm"); err == nil { + t.Fatalf("Error parsing attach flags, `-d -rm` should be an error but is not") + } +} + +func TestParseRunVolumes(t *testing.T) { + if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/tmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/tmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes) + } else if _, exists := config.Volumes["/var"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/containerTmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/containerTmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) + } else if _, exists := config.Volumes["/containerVar"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/containerTmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) + } else if _, exists := config.Volumes["/containerVar"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" { + t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds) + } else if _, exists := config.Volumes["/containerTmp"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes) + } else if _, exists := config.Volumes["/containerVar"]; !exists { + t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes) + } + + if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil { + t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds) + } else if len(config.Volumes) != 0 { + t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes) + } + + if _, _, err := parse(t, "-v /"); err == nil { + t.Fatalf("Expected error, but got none") + } + + if _, _, err := parse(t, "-v /:/"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't") + } + if _, _, err := parse(t, "-v"); err == nil { + t.Fatalf("Error parsing volume flags, `-v` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:ro"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp::"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't") + } + if _, _, err := parse(t, "-v :"); err == nil { + t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't") + } + if _, _, err := parse(t, "-v ::"); err == nil { + t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't") + } + if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil { + t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't") + } +} + func TestCompare(t *testing.T) { volumes1 := make(map[string]struct{}) volumes1["/test1"] = struct{}{} -- cgit v1.2.1 From d648708d02134c3dc6788ad21325224d849b3b8f Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 10 Mar 2014 21:06:27 +0000 Subject: remove utils.go Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- pkg/opts/opts.go | 12 +----------- server.go | 2 +- utils.go | 16 ---------------- 3 files changed, 2 insertions(+), 28 deletions(-) delete mode 100644 utils.go diff --git a/pkg/opts/opts.go b/pkg/opts/opts.go index a1b8752bad..4f5897c796 100644 --- a/pkg/opts/opts.go +++ b/pkg/opts/opts.go @@ -92,22 +92,12 @@ func ValidateAttach(val string) (string, error) { } func ValidateLink(val string) (string, error) { - if _, err := parseLink(val); err != nil { + if _, err := utils.PartParser("name:alias", val); err != nil { return val, err } return val, nil } -// FIXME: this is a duplicate of docker.utils.parseLink. -// it can't be moved to a separate links/ package because -// links depends on Container which is defined in the core. -// -// Links come in the format of -// name:alias -func parseLink(rawLink string) (map[string]string, error) { - return utils.PartParser("name:alias", rawLink) -} - func ValidatePath(val string) (string, error) { var containerPath string diff --git a/server.go b/server.go index 85d56afdb6..52f5f14c0a 100644 --- a/server.go +++ b/server.go @@ -1995,7 +1995,7 @@ func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runco if hostConfig != nil && hostConfig.Links != nil { for _, l := range hostConfig.Links { - parts, err := parseLink(l) + parts, err := utils.PartParser("name:alias", l) if err != nil { return err } diff --git a/utils.go b/utils.go deleted file mode 100644 index 0fda006860..0000000000 --- a/utils.go +++ /dev/null @@ -1,16 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/utils" -) - -type Change struct { - archive.Change -} - -// Links come in the format of -// name:alias -func parseLink(rawLink string) (map[string]string, error) { - return utils.PartParser("name:alias", rawLink) -} -- cgit v1.2.1 From d7646f934a268bf071a16439880ba2bba608426e Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 10 Mar 2014 14:08:26 -0700 Subject: Change version to v0.9.0 Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index ac39a106c4..c70836ca5c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.9.0 +0.9.0-dev -- cgit v1.2.1 From fde5f573d39020476c08ed25fac0a6306f7b18cc Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 10 Mar 2014 21:10:23 +0000 Subject: move opts out of pkg because it's related to docker Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- docker/docker.go | 2 +- opts/opts.go | 138 ++++++++++++++++++++++++++++++++++++++++++++++++++ opts/opts_test.go | 24 +++++++++ pkg/opts/opts.go | 138 -------------------------------------------------- pkg/opts/opts_test.go | 24 --------- runconfig/parse.go | 2 +- 6 files changed, 164 insertions(+), 164 deletions(-) create mode 100644 opts/opts.go create mode 100644 opts/opts_test.go delete mode 100644 pkg/opts/opts.go delete mode 100644 pkg/opts/opts_test.go diff --git a/docker/docker.go b/docker/docker.go index 2aa10dbe54..b783c6da02 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -10,8 +10,8 @@ import ( "github.com/dotcloud/docker/builtins" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/opts" flag "github.com/dotcloud/docker/pkg/mflag" - "github.com/dotcloud/docker/pkg/opts" "github.com/dotcloud/docker/sysinit" "github.com/dotcloud/docker/utils" ) diff --git a/opts/opts.go b/opts/opts.go new file mode 100644 index 0000000000..4f5897c796 --- /dev/null +++ b/opts/opts.go @@ -0,0 +1,138 @@ +package opts + +import ( + "fmt" + "github.com/dotcloud/docker/utils" + "os" + "path/filepath" + "regexp" + "strings" +) + +// ListOpts type +type ListOpts struct { + values []string + validator ValidatorFctType +} + +func NewListOpts(validator ValidatorFctType) ListOpts { + return ListOpts{ + validator: validator, + } +} + +func (opts *ListOpts) String() string { + return fmt.Sprintf("%v", []string(opts.values)) +} + +// Set validates if needed the input value and add it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + opts.values = append(opts.values, value) + return nil +} + +// Delete remove the given element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range opts.values { + if k == key { + opts.values = append(opts.values[:i], opts.values[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +// FIXME: can we remove this? +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values' slice. +// FIXME: Can we remove this? +func (opts *ListOpts) GetAll() []string { + return opts.values +} + +// Get checks the existence of the given key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len(opts.values) +} + +// Validators +type ValidatorFctType func(val string) (string, error) + +func ValidateAttach(val string) (string, error) { + if val != "stdin" && val != "stdout" && val != "stderr" { + return val, fmt.Errorf("Unsupported stream name: %s", val) + } + return val, nil +} + +func ValidateLink(val string) (string, error) { + if _, err := utils.PartParser("name:alias", val); err != nil { + return val, err + } + return val, nil +} + +func ValidatePath(val string) (string, error) { + var containerPath string + + if strings.Count(val, ":") > 2 { + return val, fmt.Errorf("bad format for volumes: %s", val) + } + + splited := strings.SplitN(val, ":", 2) + if len(splited) == 1 { + containerPath = splited[0] + val = filepath.Clean(splited[0]) + } else { + containerPath = splited[1] + val = fmt.Sprintf("%s:%s", splited[0], filepath.Clean(splited[1])) + } + + if !filepath.IsAbs(containerPath) { + return val, fmt.Errorf("%s is not an absolute path", containerPath) + } + return val, nil +} + +func ValidateEnv(val string) (string, error) { + arr := strings.Split(val, "=") + if len(arr) > 1 { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +func ValidateIp4Address(val string) (string, error) { + re := regexp.MustCompile(`^(([0-9]+\.){3}([0-9]+))\s*$`) + var ns = re.FindSubmatch([]byte(val)) + if len(ns) > 0 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not an ip4 address", val) +} diff --git a/opts/opts_test.go b/opts/opts_test.go new file mode 100644 index 0000000000..a5c1fac9ca --- /dev/null +++ b/opts/opts_test.go @@ -0,0 +1,24 @@ +package opts + +import ( + "testing" +) + +func TestValidateIP4(t *testing.T) { + if ret, err := ValidateIp4Address(`1.2.3.4`); err != nil || ret == "" { + t.Fatalf("ValidateIp4Address(`1.2.3.4`) got %s %s", ret, err) + } + + if ret, err := ValidateIp4Address(`127.0.0.1`); err != nil || ret == "" { + t.Fatalf("ValidateIp4Address(`127.0.0.1`) got %s %s", ret, err) + } + + if ret, err := ValidateIp4Address(`127`); err == nil || ret != "" { + t.Fatalf("ValidateIp4Address(`127`) got %s %s", ret, err) + } + + if ret, err := ValidateIp4Address(`random invalid string`); err == nil || ret != "" { + t.Fatalf("ValidateIp4Address(`random invalid string`) got %s %s", ret, err) + } + +} diff --git a/pkg/opts/opts.go b/pkg/opts/opts.go deleted file mode 100644 index 4f5897c796..0000000000 --- a/pkg/opts/opts.go +++ /dev/null @@ -1,138 +0,0 @@ -package opts - -import ( - "fmt" - "github.com/dotcloud/docker/utils" - "os" - "path/filepath" - "regexp" - "strings" -) - -// ListOpts type -type ListOpts struct { - values []string - validator ValidatorFctType -} - -func NewListOpts(validator ValidatorFctType) ListOpts { - return ListOpts{ - validator: validator, - } -} - -func (opts *ListOpts) String() string { - return fmt.Sprintf("%v", []string(opts.values)) -} - -// Set validates if needed the input value and add it to the -// internal slice. -func (opts *ListOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - opts.values = append(opts.values, value) - return nil -} - -// Delete remove the given element from the slice. -func (opts *ListOpts) Delete(key string) { - for i, k := range opts.values { - if k == key { - opts.values = append(opts.values[:i], opts.values[i+1:]...) - return - } - } -} - -// GetMap returns the content of values in a map in order to avoid -// duplicates. -// FIXME: can we remove this? -func (opts *ListOpts) GetMap() map[string]struct{} { - ret := make(map[string]struct{}) - for _, k := range opts.values { - ret[k] = struct{}{} - } - return ret -} - -// GetAll returns the values' slice. -// FIXME: Can we remove this? -func (opts *ListOpts) GetAll() []string { - return opts.values -} - -// Get checks the existence of the given key. -func (opts *ListOpts) Get(key string) bool { - for _, k := range opts.values { - if k == key { - return true - } - } - return false -} - -// Len returns the amount of element in the slice. -func (opts *ListOpts) Len() int { - return len(opts.values) -} - -// Validators -type ValidatorFctType func(val string) (string, error) - -func ValidateAttach(val string) (string, error) { - if val != "stdin" && val != "stdout" && val != "stderr" { - return val, fmt.Errorf("Unsupported stream name: %s", val) - } - return val, nil -} - -func ValidateLink(val string) (string, error) { - if _, err := utils.PartParser("name:alias", val); err != nil { - return val, err - } - return val, nil -} - -func ValidatePath(val string) (string, error) { - var containerPath string - - if strings.Count(val, ":") > 2 { - return val, fmt.Errorf("bad format for volumes: %s", val) - } - - splited := strings.SplitN(val, ":", 2) - if len(splited) == 1 { - containerPath = splited[0] - val = filepath.Clean(splited[0]) - } else { - containerPath = splited[1] - val = fmt.Sprintf("%s:%s", splited[0], filepath.Clean(splited[1])) - } - - if !filepath.IsAbs(containerPath) { - return val, fmt.Errorf("%s is not an absolute path", containerPath) - } - return val, nil -} - -func ValidateEnv(val string) (string, error) { - arr := strings.Split(val, "=") - if len(arr) > 1 { - return val, nil - } - return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil -} - -func ValidateIp4Address(val string) (string, error) { - re := regexp.MustCompile(`^(([0-9]+\.){3}([0-9]+))\s*$`) - var ns = re.FindSubmatch([]byte(val)) - if len(ns) > 0 { - return string(ns[1]), nil - } - return "", fmt.Errorf("%s is not an ip4 address", val) -} diff --git a/pkg/opts/opts_test.go b/pkg/opts/opts_test.go deleted file mode 100644 index a5c1fac9ca..0000000000 --- a/pkg/opts/opts_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package opts - -import ( - "testing" -) - -func TestValidateIP4(t *testing.T) { - if ret, err := ValidateIp4Address(`1.2.3.4`); err != nil || ret == "" { - t.Fatalf("ValidateIp4Address(`1.2.3.4`) got %s %s", ret, err) - } - - if ret, err := ValidateIp4Address(`127.0.0.1`); err != nil || ret == "" { - t.Fatalf("ValidateIp4Address(`127.0.0.1`) got %s %s", ret, err) - } - - if ret, err := ValidateIp4Address(`127`); err == nil || ret != "" { - t.Fatalf("ValidateIp4Address(`127`) got %s %s", ret, err) - } - - if ret, err := ValidateIp4Address(`random invalid string`); err == nil || ret != "" { - t.Fatalf("ValidateIp4Address(`random invalid string`) got %s %s", ret, err) - } - -} diff --git a/runconfig/parse.go b/runconfig/parse.go index fb08c068b2..d481da8d3b 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -3,8 +3,8 @@ package runconfig import ( "fmt" "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/opts" flag "github.com/dotcloud/docker/pkg/mflag" - "github.com/dotcloud/docker/pkg/opts" "github.com/dotcloud/docker/pkg/sysinfo" "github.com/dotcloud/docker/utils" "io/ioutil" -- cgit v1.2.1 From 6f70ed3a742162c4cf374a2c2bbd094eed3b043b Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 10 Mar 2014 21:23:29 +0000 Subject: remove useless lock move job to server and remove version.go Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- builtins/builtins.go | 1 - server.go | 34 +++++++++++++++++++++++++--------- version.go | 32 -------------------------------- 3 files changed, 25 insertions(+), 42 deletions(-) delete mode 100644 version.go diff --git a/builtins/builtins.go b/builtins/builtins.go index 5b146cd20f..ba3f41b1ca 100644 --- a/builtins/builtins.go +++ b/builtins/builtins.go @@ -36,5 +36,4 @@ func remote(eng *engine.Engine) { func daemon(eng *engine.Engine) { eng.Register("initserver", docker.InitServer) eng.Register("init_networkdriver", lxc.InitDriver) - eng.Register("version", docker.GetVersion) } diff --git a/server.go b/server.go index 52f5f14c0a..1619a16e52 100644 --- a/server.go +++ b/server.go @@ -85,6 +85,7 @@ func InitServer(job *engine.Job) engine.Status { "search": srv.ImagesSearch, "changes": srv.ContainerChanges, "top": srv.ContainerTop, + "version": srv.DockerVersion, "load": srv.ImageLoad, "build": srv.Build, "pull": srv.ImagePull, @@ -836,6 +837,22 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { return engine.StatusOK } +func (srv *Server) DockerVersion(job *engine.Job) engine.Status { + v := &engine.Env{} + v.Set("Version", dockerversion.VERSION) + v.Set("GitCommit", dockerversion.GITCOMMIT) + v.Set("GoVersion", goruntime.Version()) + v.Set("Os", goruntime.GOOS) + v.Set("Arch", goruntime.GOARCH) + if kernelVersion, err := utils.GetKernelVersion(); err == nil { + v.Set("KernelVersion", kernelVersion.String()) + } + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + func (srv *Server) ImageHistory(job *engine.Job) engine.Status { if n := len(job.Args); n != 1 { return job.Errorf("Usage: %s IMAGE", job.Name) @@ -2337,16 +2354,15 @@ func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) } func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory { - srv.Lock() - defer srv.Unlock() - v := dockerVersion() httpVersion := make([]utils.VersionInfo, 0, 4) - httpVersion = append(httpVersion, &simpleVersionInfo{"docker", v.Get("Version")}) - httpVersion = append(httpVersion, &simpleVersionInfo{"go", v.Get("GoVersion")}) - httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", v.Get("GitCommit")}) - httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", v.Get("KernelVersion")}) - httpVersion = append(httpVersion, &simpleVersionInfo{"os", v.Get("Os")}) - httpVersion = append(httpVersion, &simpleVersionInfo{"arch", v.Get("Arch")}) + httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION}) + httpVersion = append(httpVersion, &simpleVersionInfo{"go", goruntime.Version()}) + httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT}) + if kernelVersion, err := utils.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()}) + } + httpVersion = append(httpVersion, &simpleVersionInfo{"os", goruntime.GOOS}) + httpVersion = append(httpVersion, &simpleVersionInfo{"arch", goruntime.GOARCH}) ud := utils.NewHTTPUserAgentDecorator(httpVersion...) md := &utils.HTTPMetaHeadersDecorator{ Headers: metaHeaders, diff --git a/version.go b/version.go deleted file mode 100644 index d88def9619..0000000000 --- a/version.go +++ /dev/null @@ -1,32 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/utils" - "runtime" -) - -func GetVersion(job *engine.Job) engine.Status { - if _, err := dockerVersion().WriteTo(job.Stdout); err != nil { - job.Errorf("%s", err) - return engine.StatusErr - } - return engine.StatusOK -} - -// dockerVersion returns detailed version information in the form of a queriable -// environment. -func dockerVersion() *engine.Env { - v := &engine.Env{} - v.Set("Version", dockerversion.VERSION) - v.Set("GitCommit", dockerversion.GITCOMMIT) - v.Set("GoVersion", runtime.Version()) - v.Set("Os", runtime.GOOS) - v.Set("Arch", runtime.GOARCH) - // FIXME:utils.GetKernelVersion should only be needed here - if kernelVersion, err := utils.GetKernelVersion(); err == nil { - v.Set("KernelVersion", kernelVersion.String()) - } - return v -} -- cgit v1.2.1 From 802407a099705a91017fbaa1b6820f145f580d86 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Mon, 10 Mar 2014 13:21:30 -0700 Subject: Update vendor for kr/pty Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- vendor/src/github.com/kr/pty/doc.go | 5 +++ vendor/src/github.com/kr/pty/pty_freebsd.go | 53 +++++++++++++++++++++++++ vendor/src/github.com/kr/pty/pty_unsupported.go | 27 +++++++++++++ 3 files changed, 85 insertions(+) create mode 100644 vendor/src/github.com/kr/pty/pty_freebsd.go create mode 100644 vendor/src/github.com/kr/pty/pty_unsupported.go diff --git a/vendor/src/github.com/kr/pty/doc.go b/vendor/src/github.com/kr/pty/doc.go index 491c060b28..190cfbea92 100644 --- a/vendor/src/github.com/kr/pty/doc.go +++ b/vendor/src/github.com/kr/pty/doc.go @@ -2,9 +2,14 @@ package pty import ( + "errors" "os" ) +// ErrUnsupported is returned if a function is not +// available on the current platform. +var ErrUnsupported = errors.New("unsupported") + // Opens a pty and its corresponding tty. func Open() (pty, tty *os.File, err error) { return open() diff --git a/vendor/src/github.com/kr/pty/pty_freebsd.go b/vendor/src/github.com/kr/pty/pty_freebsd.go new file mode 100644 index 0000000000..13b64d722e --- /dev/null +++ b/vendor/src/github.com/kr/pty/pty_freebsd.go @@ -0,0 +1,53 @@ +package pty + +import ( + "os" + "strconv" + "syscall" + "unsafe" +) + +const ( + sys_TIOCGPTN = 0x4004740F + sys_TIOCSPTLCK = 0x40045431 +) + +func open() (pty, tty *os.File, err error) { + p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + + sname, err := ptsname(p) + if err != nil { + return nil, nil, err + } + + t, err := os.OpenFile(sname, os.O_RDWR|syscall.O_NOCTTY, 0) + if err != nil { + return nil, nil, err + } + return p, t, nil +} + +func ptsname(f *os.File) (string, error) { + var n int + err := ioctl(f.Fd(), sys_TIOCGPTN, &n) + if err != nil { + return "", err + } + return "/dev/pts/" + strconv.Itoa(n), nil +} + +func ioctl(fd uintptr, cmd uintptr, data *int) error { + _, _, e := syscall.Syscall( + syscall.SYS_IOCTL, + fd, + cmd, + uintptr(unsafe.Pointer(data)), + ) + if e != 0 { + return syscall.ENOTTY + } + return nil +} diff --git a/vendor/src/github.com/kr/pty/pty_unsupported.go b/vendor/src/github.com/kr/pty/pty_unsupported.go new file mode 100644 index 0000000000..d4958b3583 --- /dev/null +++ b/vendor/src/github.com/kr/pty/pty_unsupported.go @@ -0,0 +1,27 @@ +// +build !linux,!darwin,!freebsd + +package pty + +import ( + "os" +) + +func open() (pty, tty *os.File, err error) { + return nil, nil, ErrUnsupported +} + +func ptsname(f *os.File) (string, error) { + return "", ErrUnsupported +} + +func grantpt(f *os.File) error { + return ErrUnsupported +} + +func unlockpt(f *os.File) error { + return ErrUnsupported +} + +func ioctl(fd, cmd, ptr uintptr) error { + return ErrUnsupported +} -- cgit v1.2.1 From 6ccfb7fb9af207a9999c60e57d1c9486ca949a5e Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Mon, 10 Mar 2014 13:25:00 -0700 Subject: Update bsd specs Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- Dockerfile | 2 +- archive/start_unsupported.go | 21 -------------- archive/stat_unsupported.go | 21 ++++++++++++++ pkg/term/termios_bsd.go | 67 -------------------------------------------- pkg/term/termios_freebsd.go | 67 ++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 89 insertions(+), 89 deletions(-) delete mode 100644 archive/start_unsupported.go create mode 100644 archive/stat_unsupported.go delete mode 100644 pkg/term/termios_bsd.go create mode 100644 pkg/term/termios_freebsd.go diff --git a/Dockerfile b/Dockerfile index 9929a10f3c..7fad3d56a1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -68,7 +68,7 @@ ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 # Compile Go for cross compilation -ENV DOCKER_CROSSPLATFORMS linux/386 linux/arm darwin/amd64 darwin/386 +ENV DOCKER_CROSSPLATFORMS linux/386 linux/arm darwin/amd64 darwin/386 freebsd/amd64 freebsd/386 freebsd/arm # (set an explicit GOARM of 5 for maximum compatibility) ENV GOARM 5 RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' diff --git a/archive/start_unsupported.go b/archive/start_unsupported.go deleted file mode 100644 index 834eda8c65..0000000000 --- a/archive/start_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !linux !amd64 - -package archive - -import "syscall" - -func getLastAccess(stat *syscall.Stat_t) syscall.Timespec { - return stat.Atimespec -} - -func getLastModification(stat *syscall.Stat_t) syscall.Timespec { - return stat.Mtimespec -} - -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotImplemented -} - -func UtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotImplemented -} diff --git a/archive/stat_unsupported.go b/archive/stat_unsupported.go new file mode 100644 index 0000000000..004fa0f0a4 --- /dev/null +++ b/archive/stat_unsupported.go @@ -0,0 +1,21 @@ +// +build !linux !amd64 + +package archive + +import "syscall" + +func getLastAccess(stat *syscall.Stat_t) syscall.Timespec { + return syscall.Timespec{} +} + +func getLastModification(stat *syscall.Stat_t) syscall.Timespec { + return syscall.Timespec{} +} + +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotImplemented +} + +func UtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotImplemented +} diff --git a/pkg/term/termios_bsd.go b/pkg/term/termios_bsd.go deleted file mode 100644 index 9acf9dfe15..0000000000 --- a/pkg/term/termios_bsd.go +++ /dev/null @@ -1,67 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA - - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - // C.makeraw() - // return &oldState, nil - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/pkg/term/termios_freebsd.go b/pkg/term/termios_freebsd.go new file mode 100644 index 0000000000..9acf9dfe15 --- /dev/null +++ b/pkg/term/termios_freebsd.go @@ -0,0 +1,67 @@ +package term + +import ( + "syscall" + "unsafe" +) + +const ( + getTermios = syscall.TIOCGETA + setTermios = syscall.TIOCSETA + + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN +) + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]byte + Ispeed uint32 + Ospeed uint32 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + // C.makeraw() + // return &oldState, nil + + newState := oldState.termios + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} -- cgit v1.2.1 From 3c25302a0b9fae2c3fff9262b2ae9fa5f6f04db7 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Mon, 10 Mar 2014 15:34:38 -0700 Subject: Update vendor.sh with new kr/pty revision Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- hack/vendor.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/vendor.sh b/hack/vendor.sh index 184cb750a5..ac996dde12 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -39,7 +39,7 @@ clone() { echo done } -clone git github.com/kr/pty 3b1f6487b +clone git github.com/kr/pty 98c7b80083 clone git github.com/gorilla/context 708054d61e5 -- cgit v1.2.1 From 03211ecce07ab64f5263232e1aa3c6248530c5b4 Mon Sep 17 00:00:00 2001 From: srid Date: Mon, 10 Mar 2014 16:50:29 -0700 Subject: nsinit: prefix errors with their source Docker-DCO-1.1-Signed-off-by: Sridhar Ratnakumar (github: srid) --- pkg/libcontainer/nsinit/nsinit/main.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/libcontainer/nsinit/nsinit/main.go b/pkg/libcontainer/nsinit/nsinit/main.go index 61921c59a3..916be6624e 100644 --- a/pkg/libcontainer/nsinit/nsinit/main.go +++ b/pkg/libcontainer/nsinit/nsinit/main.go @@ -33,11 +33,11 @@ func main() { } container, err := loadContainer() if err != nil { - log.Fatal(err) + log.Fatalf("Unable to load container: %s", err) } ns, err := newNsInit() if err != nil { - log.Fatal(err) + log.Fatalf("Unable to initialize nsinit: %s", err) } switch flag.Arg(0) { @@ -46,7 +46,7 @@ func main() { nspid, err := readPid() if err != nil { if !os.IsNotExist(err) { - log.Fatal(err) + log.Fatalf("Unable to read pid: %s", err) } } if nspid > 0 { @@ -56,7 +56,7 @@ func main() { exitCode, err = ns.Exec(container, term, flag.Args()[1:]) } if err != nil { - log.Fatal(err) + log.Fatalf("Failed to exec: %s", err) } os.Exit(exitCode) case "init": // this is executed inside of the namespace to setup the container @@ -69,10 +69,10 @@ func main() { } syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(pipeFd)) if err != nil { - log.Fatal(err) + log.Fatalf("Unable to create sync pipe: %s", err) } if err := ns.Init(container, cwd, console, syncPipe, flag.Args()[1:]); err != nil { - log.Fatal(err) + log.Fatalf("Unable to initialize for container: %s", err) } default: log.Fatalf("command not supported for nsinit %s", flag.Arg(0)) -- cgit v1.2.1 From 8d88ea0c15b7ce7fd2b0b695c498a7ffa0f2bc87 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Mon, 10 Mar 2014 17:16:58 -0700 Subject: Merge auth package within registry Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- api/client.go | 27 +++-- api/server.go | 22 ++-- auth/MAINTAINERS | 3 - auth/auth.go | 290 ---------------------------------------------- auth/auth_test.go | 149 ------------------------ buildfile.go | 7 +- integration/auth_test.go | 12 +- registry/auth.go | 290 ++++++++++++++++++++++++++++++++++++++++++++++ registry/auth_test.go | 149 ++++++++++++++++++++++++ registry/registry.go | 19 ++- registry/registry_test.go | 5 +- server.go | 23 ++-- 12 files changed, 494 insertions(+), 502 deletions(-) delete mode 100644 auth/MAINTAINERS delete mode 100644 auth/auth.go delete mode 100644 auth/auth_test.go create mode 100644 registry/auth.go create mode 100644 registry/auth_test.go diff --git a/api/client.go b/api/client.go index 10075ae613..59ff17fe44 100644 --- a/api/client.go +++ b/api/client.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/nat" @@ -229,7 +228,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { // 'docker login': login / register a user to registry service. func (cli *DockerCli) CmdLogin(args ...string) error { - cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+auth.IndexServerAddress()+"\" is the default.") + cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.") var username, password, email string @@ -240,7 +239,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error { if err != nil { return nil } - serverAddress := auth.IndexServerAddress() + serverAddress := registry.IndexServerAddress() if len(cmd.Args()) > 0 { serverAddress = cmd.Arg(0) } @@ -266,7 +265,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error { cli.LoadConfigFile() authconfig, ok := cli.configFile.Configs[serverAddress] if !ok { - authconfig = auth.AuthConfig{} + authconfig = registry.AuthConfig{} } if username == "" { @@ -311,7 +310,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error { stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false) if statusCode == 401 { delete(cli.configFile.Configs, serverAddress) - auth.SaveConfig(cli.configFile) + registry.SaveConfig(cli.configFile) return err } if err != nil { @@ -320,10 +319,10 @@ func (cli *DockerCli) CmdLogin(args ...string) error { var out2 engine.Env err = out2.Decode(stream) if err != nil { - cli.configFile, _ = auth.LoadConfig(os.Getenv("HOME")) + cli.configFile, _ = registry.LoadConfig(os.Getenv("HOME")) return err } - auth.SaveConfig(cli.configFile) + registry.SaveConfig(cli.configFile) if out2.Get("Status") != "" { fmt.Fprintf(cli.out, "%s\n", out2.Get("Status")) } @@ -1008,7 +1007,7 @@ func (cli *DockerCli) CmdPush(args ...string) error { // Custom repositories can have different rules, and we must also // allow pushing by image ID. if len(strings.SplitN(name, "/", 2)) == 1 { - username := cli.configFile.Configs[auth.IndexServerAddress()].Username + username := cli.configFile.Configs[registry.IndexServerAddress()].Username if username == "" { username = "" } @@ -1016,7 +1015,7 @@ func (cli *DockerCli) CmdPush(args ...string) error { } v := url.Values{} - push := func(authConfig auth.AuthConfig) error { + push := func(authConfig registry.AuthConfig) error { buf, err := json.Marshal(authConfig) if err != nil { return err @@ -1075,7 +1074,7 @@ func (cli *DockerCli) CmdPull(args ...string) error { v.Set("fromImage", remote) v.Set("tag", *tag) - pull := func(authConfig auth.AuthConfig) error { + pull := func(authConfig registry.AuthConfig) error { buf, err := json.Marshal(authConfig) if err != nil { return err @@ -2058,8 +2057,8 @@ func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo b if passAuthInfo { cli.LoadConfigFile() // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(auth.IndexServerAddress()) - getHeaders := func(authConfig auth.AuthConfig) (map[string][]string, error) { + authConfig := cli.configFile.ResolveAuthConfig(registry.IndexServerAddress()) + getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) { buf, err := json.Marshal(authConfig) if err != nil { return nil, err @@ -2340,7 +2339,7 @@ func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet } func (cli *DockerCli) LoadConfigFile() (err error) { - cli.configFile, err = auth.LoadConfig(os.Getenv("HOME")) + cli.configFile, err = registry.LoadConfig(os.Getenv("HOME")) if err != nil { fmt.Fprintf(cli.err, "WARNING: %s\n", err) } @@ -2422,7 +2421,7 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *Doc type DockerCli struct { proto string addr string - configFile *auth.ConfigFile + configFile *registry.ConfigFile in io.ReadCloser out io.Writer err io.Writer diff --git a/api/server.go b/api/server.go index 6fafe60f9f..048c989540 100644 --- a/api/server.go +++ b/api/server.go @@ -8,12 +8,12 @@ import ( "encoding/json" "expvar" "fmt" - "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/listenbuffer" "github.com/dotcloud/docker/pkg/systemd" "github.com/dotcloud/docker/pkg/user" "github.com/dotcloud/docker/pkg/version" + "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/utils" "github.com/gorilla/mux" "io" @@ -381,13 +381,13 @@ func postImagesCreate(eng *engine.Engine, version version.Version, w http.Respon job *engine.Job ) authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := &auth.AuthConfig{} + authConfig := ®istry.AuthConfig{} if authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty - authConfig = &auth.AuthConfig{} + authConfig = ®istry.AuthConfig{} } } if image != "" { //pull @@ -429,7 +429,7 @@ func getImagesSearch(eng *engine.Engine, version version.Version, w http.Respons } var ( authEncoded = r.Header.Get("X-Registry-Auth") - authConfig = &auth.AuthConfig{} + authConfig = ®istry.AuthConfig{} metaHeaders = map[string][]string{} ) @@ -438,7 +438,7 @@ func getImagesSearch(eng *engine.Engine, version version.Version, w http.Respons if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a search it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty - authConfig = &auth.AuthConfig{} + authConfig = ®istry.AuthConfig{} } } for k, v := range r.Header { @@ -494,7 +494,7 @@ func postImagesPush(eng *engine.Engine, version version.Version, w http.Response if err := parseForm(r); err != nil { return err } - authConfig := &auth.AuthConfig{} + authConfig := ®istry.AuthConfig{} authEncoded := r.Header.Get("X-Registry-Auth") if authEncoded != "" { @@ -502,7 +502,7 @@ func postImagesPush(eng *engine.Engine, version version.Version, w http.Response authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // to increase compatibility to existing api it is defaulting to be empty - authConfig = &auth.AuthConfig{} + authConfig = ®istry.AuthConfig{} } } else { // the old format is supported for compatibility if there was no authConfig header @@ -823,9 +823,9 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite } var ( authEncoded = r.Header.Get("X-Registry-Auth") - authConfig = &auth.AuthConfig{} + authConfig = ®istry.AuthConfig{} configFileEncoded = r.Header.Get("X-Registry-Config") - configFile = &auth.ConfigFile{} + configFile = ®istry.ConfigFile{} job = eng.Job("build") ) @@ -838,7 +838,7 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty - authConfig = &auth.AuthConfig{} + authConfig = ®istry.AuthConfig{} } } @@ -847,7 +847,7 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty - configFile = &auth.ConfigFile{} + configFile = ®istry.ConfigFile{} } } diff --git a/auth/MAINTAINERS b/auth/MAINTAINERS deleted file mode 100644 index bf3984f5f9..0000000000 --- a/auth/MAINTAINERS +++ /dev/null @@ -1,3 +0,0 @@ -Sam Alba (@samalba) -Joffrey Fuhrer (@shin-) -Ken Cochrane (@kencochrane) diff --git a/auth/auth.go b/auth/auth.go deleted file mode 100644 index 4417dd0f7a..0000000000 --- a/auth/auth.go +++ /dev/null @@ -1,290 +0,0 @@ -package auth - -import ( - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "net/http" - "os" - "path" - "strings" -) - -// Where we store the config file -const CONFIGFILE = ".dockercfg" - -// Only used for user auth + account creation -const INDEXSERVER = "https://index.docker.io/v1/" - -//const INDEXSERVER = "https://indexstaging-docker.dotcloud.com/v1/" - -var ( - ErrConfigFileMissing = errors.New("The Auth config file is missing") -) - -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth"` - Email string `json:"email"` - ServerAddress string `json:"serveraddress,omitempty"` -} - -type ConfigFile struct { - Configs map[string]AuthConfig `json:"configs,omitempty"` - rootPath string -} - -func IndexServerAddress() string { - return INDEXSERVER -} - -// create a base64 encoded auth string to store in config -func encodeAuth(authConfig *AuthConfig) string { - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decode the auth string -func decodeAuth(authStr string) (string, string, error) { - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", fmt.Errorf("Something went wrong decoding auth config") - } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { - return "", "", fmt.Errorf("Invalid auth configuration file") - } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil -} - -// load up the auth config information and return values -// FIXME: use the internal golang config parser -func LoadConfig(rootPath string) (*ConfigFile, error) { - configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath} - confFile := path.Join(rootPath, CONFIGFILE) - if _, err := os.Stat(confFile); err != nil { - return &configFile, nil //missing file is not an error - } - b, err := ioutil.ReadFile(confFile) - if err != nil { - return &configFile, err - } - - if err := json.Unmarshal(b, &configFile.Configs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return &configFile, fmt.Errorf("The Auth config file is empty") - } - authConfig := AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return &configFile, fmt.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return &configFile, err - } - origEmail := strings.Split(arr[1], " = ") - if len(origEmail) != 2 { - return &configFile, fmt.Errorf("Invalid Auth config file") - } - authConfig.Email = origEmail[1] - authConfig.ServerAddress = IndexServerAddress() - configFile.Configs[IndexServerAddress()] = authConfig - } else { - for k, authConfig := range configFile.Configs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return &configFile, err - } - authConfig.Auth = "" - configFile.Configs[k] = authConfig - authConfig.ServerAddress = k - } - } - return &configFile, nil -} - -// save the auth config -func SaveConfig(configFile *ConfigFile) error { - confFile := path.Join(configFile.rootPath, CONFIGFILE) - if len(configFile.Configs) == 0 { - os.Remove(confFile) - return nil - } - - configs := make(map[string]AuthConfig, len(configFile.Configs)) - for k, authConfig := range configFile.Configs { - authCopy := authConfig - - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - configs[k] = authCopy - } - - b, err := json.Marshal(configs) - if err != nil { - return err - } - err = ioutil.WriteFile(confFile, b, 0600) - if err != nil { - return err - } - return nil -} - -// try to register/login to the registry server -func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, error) { - var ( - status string - reqBody []byte - err error - client = &http.Client{} - reqStatusCode = 0 - serverAddress = authConfig.ServerAddress - ) - - if serverAddress == "" { - serverAddress = IndexServerAddress() - } - - loginAgainstOfficialIndex := serverAddress == IndexServerAddress() - - // to avoid sending the server address to the server it should be removed before being marshalled - authCopy := *authConfig - authCopy.ServerAddress = "" - - jsonBody, err := json.Marshal(authCopy) - if err != nil { - return "", fmt.Errorf("Config Error: %s", err) - } - - // using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status. - b := strings.NewReader(string(jsonBody)) - req1, err := http.Post(serverAddress+"users/", "application/json; charset=utf-8", b) - if err != nil { - return "", fmt.Errorf("Server Error: %s", err) - } - reqStatusCode = req1.StatusCode - defer req1.Body.Close() - reqBody, err = ioutil.ReadAll(req1.Body) - if err != nil { - return "", fmt.Errorf("Server Error: [%#v] %s", reqStatusCode, err) - } - - if reqStatusCode == 201 { - if loginAgainstOfficialIndex { - status = "Account created. Please use the confirmation link we sent" + - " to your e-mail to activate it." - } else { - status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it." - } - } else if reqStatusCode == 400 { - if string(reqBody) == "\"Username or email already exists\"" { - req, err := factory.NewRequest("GET", serverAddress+"users/", nil) - req.SetBasicAuth(authConfig.Username, authConfig.Password) - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode == 200 { - status = "Login Succeeded" - } else if resp.StatusCode == 401 { - return "", fmt.Errorf("Wrong login/password, please try again") - } else if resp.StatusCode == 403 { - if loginAgainstOfficialIndex { - return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") - } - return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) - } else { - return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) - } - } else { - return "", fmt.Errorf("Registration: %s", reqBody) - } - } else if reqStatusCode == 401 { - // This case would happen with private registries where /v1/users is - // protected, so people can use `docker login` as an auth check. - req, err := factory.NewRequest("GET", serverAddress+"users/", nil) - req.SetBasicAuth(authConfig.Username, authConfig.Password) - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode == 200 { - status = "Login Succeeded" - } else if resp.StatusCode == 401 { - return "", fmt.Errorf("Wrong login/password, please try again") - } else { - return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, - resp.StatusCode, resp.Header) - } - } else { - return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody) - } - return status, nil -} - -// this method matches a auth configuration to a server address or a url -func (config *ConfigFile) ResolveAuthConfig(hostname string) AuthConfig { - if hostname == IndexServerAddress() || len(hostname) == 0 { - // default to the index server - return config.Configs[IndexServerAddress()] - } - - // First try the happy case - if c, found := config.Configs[hostname]; found { - return c - } - - convertToHostname := func(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.Replace(url, "http://", "", 1) - } else if strings.HasPrefix(url, "https://") { - stripped = strings.Replace(url, "https://", "", 1) - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] - } - - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - normalizedHostename := convertToHostname(hostname) - for registry, config := range config.Configs { - if registryHostname := convertToHostname(registry); registryHostname == normalizedHostename { - return config - } - } - - // When all else fails, return an empty auth config - return AuthConfig{} -} diff --git a/auth/auth_test.go b/auth/auth_test.go deleted file mode 100644 index 2335072609..0000000000 --- a/auth/auth_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package auth - -import ( - "io/ioutil" - "os" - "testing" -) - -func TestEncodeAuth(t *testing.T) { - newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} - authStr := encodeAuth(newAuthConfig) - decAuthConfig := &AuthConfig{} - var err error - decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) - if err != nil { - t.Fatal(err) - } - if newAuthConfig.Username != decAuthConfig.Username { - t.Fatal("Encode Username doesn't match decoded Username") - } - if newAuthConfig.Password != decAuthConfig.Password { - t.Fatal("Encode Password doesn't match decoded Password") - } - if authStr != "a2VuOnRlc3Q=" { - t.Fatal("AuthString encoding isn't correct.") - } -} - -func setupTempConfigFile() (*ConfigFile, error) { - root, err := ioutil.TempDir("", "docker-test-auth") - if err != nil { - return nil, err - } - configFile := &ConfigFile{ - rootPath: root, - Configs: make(map[string]AuthConfig), - } - - for _, registry := range []string{"testIndex", IndexServerAddress()} { - configFile.Configs[registry] = AuthConfig{ - Username: "docker-user", - Password: "docker-pass", - Email: "docker@docker.io", - } - } - - return configFile, nil -} - -func TestSameAuthDataPostSave(t *testing.T) { - configFile, err := setupTempConfigFile() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(configFile.rootPath) - - err = SaveConfig(configFile) - if err != nil { - t.Fatal(err) - } - - authConfig := configFile.Configs["testIndex"] - if authConfig.Username != "docker-user" { - t.Fail() - } - if authConfig.Password != "docker-pass" { - t.Fail() - } - if authConfig.Email != "docker@docker.io" { - t.Fail() - } - if authConfig.Auth != "" { - t.Fail() - } -} - -func TestResolveAuthConfigIndexServer(t *testing.T) { - configFile, err := setupTempConfigFile() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(configFile.rootPath) - - for _, registry := range []string{"", IndexServerAddress()} { - resolved := configFile.ResolveAuthConfig(registry) - if resolved != configFile.Configs[IndexServerAddress()] { - t.Fail() - } - } -} - -func TestResolveAuthConfigFullURL(t *testing.T) { - configFile, err := setupTempConfigFile() - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(configFile.rootPath) - - registryAuth := AuthConfig{ - Username: "foo-user", - Password: "foo-pass", - Email: "foo@example.com", - } - localAuth := AuthConfig{ - Username: "bar-user", - Password: "bar-pass", - Email: "bar@example.com", - } - configFile.Configs["https://registry.example.com/v1/"] = registryAuth - configFile.Configs["http://localhost:8000/v1/"] = localAuth - configFile.Configs["registry.com"] = registryAuth - - validRegistries := map[string][]string{ - "https://registry.example.com/v1/": { - "https://registry.example.com/v1/", - "http://registry.example.com/v1/", - "registry.example.com", - "registry.example.com/v1/", - }, - "http://localhost:8000/v1/": { - "https://localhost:8000/v1/", - "http://localhost:8000/v1/", - "localhost:8000", - "localhost:8000/v1/", - }, - "registry.com": { - "https://registry.com/v1/", - "http://registry.com/v1/", - "registry.com", - "registry.com/v1/", - }, - } - - for configKey, registries := range validRegistries { - for _, registry := range registries { - var ( - configured AuthConfig - ok bool - ) - resolved := configFile.ResolveAuthConfig(registry) - if configured, ok = configFile.Configs[configKey]; !ok { - t.Fail() - } - if resolved.Email != configured.Email { - t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email) - } - } - } -} diff --git a/buildfile.go b/buildfile.go index 160db4d434..959b085685 100644 --- a/buildfile.go +++ b/buildfile.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/runtime" @@ -49,8 +48,8 @@ type buildFile struct { utilizeCache bool rm bool - authConfig *auth.AuthConfig - configFile *auth.ConfigFile + authConfig *registry.AuthConfig + configFile *registry.ConfigFile tmpContainers map[string]struct{} tmpImages map[string]struct{} @@ -793,7 +792,7 @@ func (b *buildFile) BuildStep(name, expression string) error { return nil } -func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig, authConfigFile *auth.ConfigFile) BuildFile { +func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile { return &buildFile{ runtime: srv.runtime, srv: srv, diff --git a/integration/auth_test.go b/integration/auth_test.go index c5bdabace2..1d9d450573 100644 --- a/integration/auth_test.go +++ b/integration/auth_test.go @@ -4,7 +4,7 @@ import ( "crypto/rand" "encoding/hex" "fmt" - "github.com/dotcloud/docker/auth" + "github.com/dotcloud/docker/registry" "os" "strings" "testing" @@ -18,13 +18,13 @@ import ( func TestLogin(t *testing.T) { os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com") defer os.Setenv("DOCKER_INDEX_URL", "") - authConfig := &auth.AuthConfig{ + authConfig := ®istry.AuthConfig{ Username: "unittester", Password: "surlautrerivejetattendrai", Email: "noise+unittester@docker.com", ServerAddress: "https://indexstaging-docker.dotcloud.com/v1/", } - status, err := auth.Login(authConfig, nil) + status, err := registry.Login(authConfig, nil) if err != nil { t.Fatal(err) } @@ -41,13 +41,13 @@ func TestCreateAccount(t *testing.T) { } token := hex.EncodeToString(tokenBuffer)[:12] username := "ut" + token - authConfig := &auth.AuthConfig{ + authConfig := ®istry.AuthConfig{ Username: username, Password: "test42", Email: fmt.Sprintf("docker-ut+%s@example.com", token), ServerAddress: "https://indexstaging-docker.dotcloud.com/v1/", } - status, err := auth.Login(authConfig, nil) + status, err := registry.Login(authConfig, nil) if err != nil { t.Fatal(err) } @@ -59,7 +59,7 @@ func TestCreateAccount(t *testing.T) { t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status) } - status, err = auth.Login(authConfig, nil) + status, err = registry.Login(authConfig, nil) if err == nil { t.Fatalf("Expected error but found nil instead") } diff --git a/registry/auth.go b/registry/auth.go new file mode 100644 index 0000000000..4fdd51fda4 --- /dev/null +++ b/registry/auth.go @@ -0,0 +1,290 @@ +package registry + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "net/http" + "os" + "path" + "strings" +) + +// Where we store the config file +const CONFIGFILE = ".dockercfg" + +// Only used for user auth + account creation +const INDEXSERVER = "https://index.docker.io/v1/" + +//const INDEXSERVER = "https://indexstaging-docker.dotcloud.com/v1/" + +var ( + ErrConfigFileMissing = errors.New("The Auth config file is missing") +) + +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth"` + Email string `json:"email"` + ServerAddress string `json:"serveraddress,omitempty"` +} + +type ConfigFile struct { + Configs map[string]AuthConfig `json:"configs,omitempty"` + rootPath string +} + +func IndexServerAddress() string { + return INDEXSERVER +} + +// create a base64 encoded auth string to store in config +func encodeAuth(authConfig *AuthConfig) string { + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decode the auth string +func decodeAuth(authStr string) (string, string, error) { + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", fmt.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", fmt.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} + +// load up the auth config information and return values +// FIXME: use the internal golang config parser +func LoadConfig(rootPath string) (*ConfigFile, error) { + configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath} + confFile := path.Join(rootPath, CONFIGFILE) + if _, err := os.Stat(confFile); err != nil { + return &configFile, nil //missing file is not an error + } + b, err := ioutil.ReadFile(confFile) + if err != nil { + return &configFile, err + } + + if err := json.Unmarshal(b, &configFile.Configs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return &configFile, fmt.Errorf("The Auth config file is empty") + } + authConfig := AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return &configFile, fmt.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) + if err != nil { + return &configFile, err + } + origEmail := strings.Split(arr[1], " = ") + if len(origEmail) != 2 { + return &configFile, fmt.Errorf("Invalid Auth config file") + } + authConfig.Email = origEmail[1] + authConfig.ServerAddress = IndexServerAddress() + configFile.Configs[IndexServerAddress()] = authConfig + } else { + for k, authConfig := range configFile.Configs { + authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) + if err != nil { + return &configFile, err + } + authConfig.Auth = "" + configFile.Configs[k] = authConfig + authConfig.ServerAddress = k + } + } + return &configFile, nil +} + +// save the auth config +func SaveConfig(configFile *ConfigFile) error { + confFile := path.Join(configFile.rootPath, CONFIGFILE) + if len(configFile.Configs) == 0 { + os.Remove(confFile) + return nil + } + + configs := make(map[string]AuthConfig, len(configFile.Configs)) + for k, authConfig := range configFile.Configs { + authCopy := authConfig + + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + configs[k] = authCopy + } + + b, err := json.Marshal(configs) + if err != nil { + return err + } + err = ioutil.WriteFile(confFile, b, 0600) + if err != nil { + return err + } + return nil +} + +// try to register/login to the registry server +func Login(authConfig *AuthConfig, factory *utils.HTTPRequestFactory) (string, error) { + var ( + status string + reqBody []byte + err error + client = &http.Client{} + reqStatusCode = 0 + serverAddress = authConfig.ServerAddress + ) + + if serverAddress == "" { + serverAddress = IndexServerAddress() + } + + loginAgainstOfficialIndex := serverAddress == IndexServerAddress() + + // to avoid sending the server address to the server it should be removed before being marshalled + authCopy := *authConfig + authCopy.ServerAddress = "" + + jsonBody, err := json.Marshal(authCopy) + if err != nil { + return "", fmt.Errorf("Config Error: %s", err) + } + + // using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status. + b := strings.NewReader(string(jsonBody)) + req1, err := http.Post(serverAddress+"users/", "application/json; charset=utf-8", b) + if err != nil { + return "", fmt.Errorf("Server Error: %s", err) + } + reqStatusCode = req1.StatusCode + defer req1.Body.Close() + reqBody, err = ioutil.ReadAll(req1.Body) + if err != nil { + return "", fmt.Errorf("Server Error: [%#v] %s", reqStatusCode, err) + } + + if reqStatusCode == 201 { + if loginAgainstOfficialIndex { + status = "Account created. Please use the confirmation link we sent" + + " to your e-mail to activate it." + } else { + status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it." + } + } else if reqStatusCode == 400 { + if string(reqBody) == "\"Username or email already exists\"" { + req, err := factory.NewRequest("GET", serverAddress+"users/", nil) + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode == 200 { + status = "Login Succeeded" + } else if resp.StatusCode == 401 { + return "", fmt.Errorf("Wrong login/password, please try again") + } else if resp.StatusCode == 403 { + if loginAgainstOfficialIndex { + return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") + } + return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) + } else { + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) + } + } else { + return "", fmt.Errorf("Registration: %s", reqBody) + } + } else if reqStatusCode == 401 { + // This case would happen with private registries where /v1/users is + // protected, so people can use `docker login` as an auth check. + req, err := factory.NewRequest("GET", serverAddress+"users/", nil) + req.SetBasicAuth(authConfig.Username, authConfig.Password) + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + if resp.StatusCode == 200 { + status = "Login Succeeded" + } else if resp.StatusCode == 401 { + return "", fmt.Errorf("Wrong login/password, please try again") + } else { + return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, + resp.StatusCode, resp.Header) + } + } else { + return "", fmt.Errorf("Unexpected status code [%d] : %s", reqStatusCode, reqBody) + } + return status, nil +} + +// this method matches a auth configuration to a server address or a url +func (config *ConfigFile) ResolveAuthConfig(hostname string) AuthConfig { + if hostname == IndexServerAddress() || len(hostname) == 0 { + // default to the index server + return config.Configs[IndexServerAddress()] + } + + // First try the happy case + if c, found := config.Configs[hostname]; found { + return c + } + + convertToHostname := func(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.Replace(url, "http://", "", 1) + } else if strings.HasPrefix(url, "https://") { + stripped = strings.Replace(url, "https://", "", 1) + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + normalizedHostename := convertToHostname(hostname) + for registry, config := range config.Configs { + if registryHostname := convertToHostname(registry); registryHostname == normalizedHostename { + return config + } + } + + // When all else fails, return an empty auth config + return AuthConfig{} +} diff --git a/registry/auth_test.go b/registry/auth_test.go new file mode 100644 index 0000000000..3cb1a9ac4b --- /dev/null +++ b/registry/auth_test.go @@ -0,0 +1,149 @@ +package registry + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestEncodeAuth(t *testing.T) { + newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} + authStr := encodeAuth(newAuthConfig) + decAuthConfig := &AuthConfig{} + var err error + decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) + if err != nil { + t.Fatal(err) + } + if newAuthConfig.Username != decAuthConfig.Username { + t.Fatal("Encode Username doesn't match decoded Username") + } + if newAuthConfig.Password != decAuthConfig.Password { + t.Fatal("Encode Password doesn't match decoded Password") + } + if authStr != "a2VuOnRlc3Q=" { + t.Fatal("AuthString encoding isn't correct.") + } +} + +func setupTempConfigFile() (*ConfigFile, error) { + root, err := ioutil.TempDir("", "docker-test-auth") + if err != nil { + return nil, err + } + configFile := &ConfigFile{ + rootPath: root, + Configs: make(map[string]AuthConfig), + } + + for _, registry := range []string{"testIndex", IndexServerAddress()} { + configFile.Configs[registry] = AuthConfig{ + Username: "docker-user", + Password: "docker-pass", + Email: "docker@docker.io", + } + } + + return configFile, nil +} + +func TestSameAuthDataPostSave(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.rootPath) + + err = SaveConfig(configFile) + if err != nil { + t.Fatal(err) + } + + authConfig := configFile.Configs["testIndex"] + if authConfig.Username != "docker-user" { + t.Fail() + } + if authConfig.Password != "docker-pass" { + t.Fail() + } + if authConfig.Email != "docker@docker.io" { + t.Fail() + } + if authConfig.Auth != "" { + t.Fail() + } +} + +func TestResolveAuthConfigIndexServer(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.rootPath) + + for _, registry := range []string{"", IndexServerAddress()} { + resolved := configFile.ResolveAuthConfig(registry) + if resolved != configFile.Configs[IndexServerAddress()] { + t.Fail() + } + } +} + +func TestResolveAuthConfigFullURL(t *testing.T) { + configFile, err := setupTempConfigFile() + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(configFile.rootPath) + + registryAuth := AuthConfig{ + Username: "foo-user", + Password: "foo-pass", + Email: "foo@example.com", + } + localAuth := AuthConfig{ + Username: "bar-user", + Password: "bar-pass", + Email: "bar@example.com", + } + configFile.Configs["https://registry.example.com/v1/"] = registryAuth + configFile.Configs["http://localhost:8000/v1/"] = localAuth + configFile.Configs["registry.com"] = registryAuth + + validRegistries := map[string][]string{ + "https://registry.example.com/v1/": { + "https://registry.example.com/v1/", + "http://registry.example.com/v1/", + "registry.example.com", + "registry.example.com/v1/", + }, + "http://localhost:8000/v1/": { + "https://localhost:8000/v1/", + "http://localhost:8000/v1/", + "localhost:8000", + "localhost:8000/v1/", + }, + "registry.com": { + "https://registry.com/v1/", + "http://registry.com/v1/", + "registry.com", + "registry.com/v1/", + }, + } + + for configKey, registries := range validRegistries { + for _, registry := range registries { + var ( + configured AuthConfig + ok bool + ) + resolved := configFile.ResolveAuthConfig(registry) + if configured, ok = configFile.Configs[configKey]; !ok { + t.Fail() + } + if resolved.Email != configured.Email { + t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email) + } + } + } +} diff --git a/registry/registry.go b/registry/registry.go index cc2e985c31..dbf5d539ff 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -6,7 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -27,7 +26,7 @@ var ( ) func pingRegistryEndpoint(endpoint string) (bool, error) { - if endpoint == auth.IndexServerAddress() { + if endpoint == IndexServerAddress() { // Skip the check, we now this one is valid // (and we never want to fallback to http in case of error) return false, nil @@ -103,7 +102,7 @@ func ResolveRepositoryName(reposName string) (string, string, error) { nameParts[0] != "localhost" { // This is a Docker Index repos (ex: samalba/hipache or ubuntu) err := validateRepositoryName(reposName) - return auth.IndexServerAddress(), reposName, err + return IndexServerAddress(), reposName, err } if len(nameParts) < 2 { // There is a dot in repos name (and no registry address) @@ -601,7 +600,7 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat func (r *Registry) SearchRepositories(term string) (*SearchResults, error) { utils.Debugf("Index server: %s", r.indexEndpoint) - u := auth.IndexServerAddress() + "search?q=" + url.QueryEscape(term) + u := IndexServerAddress() + "search?q=" + url.QueryEscape(term) req, err := r.reqFactory.NewRequest("GET", u, nil) if err != nil { return nil, err @@ -627,12 +626,12 @@ func (r *Registry) SearchRepositories(term string) (*SearchResults, error) { return result, err } -func (r *Registry) GetAuthConfig(withPasswd bool) *auth.AuthConfig { +func (r *Registry) GetAuthConfig(withPasswd bool) *AuthConfig { password := "" if withPasswd { password = r.authConfig.Password } - return &auth.AuthConfig{ + return &AuthConfig{ Username: r.authConfig.Username, Password: password, Email: r.authConfig.Email, @@ -668,12 +667,12 @@ type ImgData struct { type Registry struct { client *http.Client - authConfig *auth.AuthConfig + authConfig *AuthConfig reqFactory *utils.HTTPRequestFactory indexEndpoint string } -func NewRegistry(authConfig *auth.AuthConfig, factory *utils.HTTPRequestFactory, indexEndpoint string) (r *Registry, err error) { +func NewRegistry(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, indexEndpoint string) (r *Registry, err error) { httpTransport := &http.Transport{ DisableKeepAlives: true, Proxy: http.ProxyFromEnvironment, @@ -693,13 +692,13 @@ func NewRegistry(authConfig *auth.AuthConfig, factory *utils.HTTPRequestFactory, // If we're working with a standalone private registry over HTTPS, send Basic Auth headers // alongside our requests. - if indexEndpoint != auth.IndexServerAddress() && strings.HasPrefix(indexEndpoint, "https://") { + if indexEndpoint != IndexServerAddress() && strings.HasPrefix(indexEndpoint, "https://") { standalone, err := pingRegistryEndpoint(indexEndpoint) if err != nil { return nil, err } if standalone { - utils.Debugf("Endpoint %s is eligible for private registry auth. Enabling decorator.", indexEndpoint) + utils.Debugf("Endpoint %s is eligible for private registry registry. Enabling decorator.", indexEndpoint) dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password) factory.AddDecorator(dec) } diff --git a/registry/registry_test.go b/registry/registry_test.go index 82a27a166f..f21814c791 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -1,7 +1,6 @@ package registry import ( - "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/utils" "strings" "testing" @@ -14,7 +13,7 @@ var ( ) func spawnTestRegistry(t *testing.T) *Registry { - authConfig := &auth.AuthConfig{} + authConfig := &AuthConfig{} r, err := NewRegistry(authConfig, utils.NewHTTPRequestFactory(), makeURL("/v1/")) if err != nil { t.Fatal(err) @@ -137,7 +136,7 @@ func TestResolveRepositoryName(t *testing.T) { if err != nil { t.Fatal(err) } - assertEqual(t, ep, auth.IndexServerAddress(), "Expected endpoint to be index server address") + assertEqual(t, ep, IndexServerAddress(), "Expected endpoint to be index server address") assertEqual(t, repo, "fooo/bar", "Expected resolved repo to be foo/bar") u := makeURL("")[7:] diff --git a/server.go b/server.go index 52f5f14c0a..0cf78eefa3 100644 --- a/server.go +++ b/server.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/auth" "github.com/dotcloud/docker/daemonconfig" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" @@ -199,19 +198,19 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status { func (srv *Server) Auth(job *engine.Job) engine.Status { var ( err error - authConfig = &auth.AuthConfig{} + authConfig = ®istry.AuthConfig{} ) job.GetenvJson("authConfig", authConfig) // TODO: this is only done here because auth and registry need to be merged into one pkg - if addr := authConfig.ServerAddress; addr != "" && addr != auth.IndexServerAddress() { + if addr := authConfig.ServerAddress; addr != "" && addr != registry.IndexServerAddress() { addr, err = registry.ExpandAndVerifyRegistryUrl(addr) if err != nil { return job.Error(err) } authConfig.ServerAddress = addr } - status, err := auth.Login(authConfig, srv.HTTPRequestFactory(nil)) + status, err := registry.Login(authConfig, srv.HTTPRequestFactory(nil)) if err != nil { return job.Error(err) } @@ -431,8 +430,8 @@ func (srv *Server) Build(job *engine.Job) engine.Status { suppressOutput = job.GetenvBool("q") noCache = job.GetenvBool("nocache") rm = job.GetenvBool("rm") - authConfig = &auth.AuthConfig{} - configFile = &auth.ConfigFile{} + authConfig = ®istry.AuthConfig{} + configFile = ®istry.ConfigFile{} tag string context io.ReadCloser ) @@ -611,12 +610,12 @@ func (srv *Server) ImagesSearch(job *engine.Job) engine.Status { var ( term = job.Args[0] metaHeaders = map[string][]string{} - authConfig = &auth.AuthConfig{} + authConfig = ®istry.AuthConfig{} ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", metaHeaders) - r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), auth.IndexServerAddress()) + r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), registry.IndexServerAddress()) if err != nil { return job.Error(err) } @@ -827,7 +826,7 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { v.Set("ExecutionDriver", srv.runtime.ExecutionDriver().Name()) v.SetInt("NEventsListener", len(srv.listeners)) v.Set("KernelVersion", kernelVersion) - v.Set("IndexServerAddress", auth.IndexServerAddress()) + v.Set("IndexServerAddress", registry.IndexServerAddress()) v.Set("InitSha1", dockerversion.INITSHA1) v.Set("InitPath", initPath) if _, err := v.WriteTo(job.Stdout); err != nil { @@ -1312,7 +1311,7 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status { localName = job.Args[0] tag string sf = utils.NewStreamFormatter(job.GetenvBool("json")) - authConfig = &auth.AuthConfig{} + authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) if len(job.Args) > 1 { @@ -1350,7 +1349,7 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status { return job.Error(err) } - if endpoint == auth.IndexServerAddress() { + if endpoint == registry.IndexServerAddress() { // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" localName = remoteName } @@ -1531,7 +1530,7 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status { var ( localName = job.Args[0] sf = utils.NewStreamFormatter(job.GetenvBool("json")) - authConfig = &auth.AuthConfig{} + authConfig = ®istry.AuthConfig{} metaHeaders map[string][]string ) -- cgit v1.2.1 From c5632622391921160687f3e0155bdfe3d3cfc07d Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Mon, 10 Mar 2014 13:38:17 -0700 Subject: Move signal to pkg Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- api/client.go | 13 ++++--- pkg/signal/signal.go | 11 ++++++ pkg/signal/signal_darwin.go | 44 ++++++++++++++++++++++ pkg/signal/signal_freebsd.go | 42 +++++++++++++++++++++ pkg/signal/signal_linux.go | 87 ++++++++++++++++++++++++++++++++++++++++++++ utils/signal.go | 11 ------ utils/signal_darwin.go | 44 ---------------------- utils/signal_linux.go | 47 ------------------------ 8 files changed, 191 insertions(+), 108 deletions(-) create mode 100644 pkg/signal/signal.go create mode 100644 pkg/signal/signal_darwin.go create mode 100644 pkg/signal/signal_freebsd.go create mode 100644 pkg/signal/signal_linux.go delete mode 100644 utils/signal.go delete mode 100644 utils/signal_darwin.go delete mode 100644 utils/signal_linux.go diff --git a/api/client.go b/api/client.go index 10075ae613..5e110d49f5 100644 --- a/api/client.go +++ b/api/client.go @@ -13,6 +13,7 @@ import ( "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/nat" flag "github.com/dotcloud/docker/pkg/mflag" + "github.com/dotcloud/docker/pkg/signal" "github.com/dotcloud/docker/pkg/term" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" @@ -24,7 +25,7 @@ import ( "net/http/httputil" "net/url" "os" - "os/signal" + gosignal "os/signal" "path" "reflect" "regexp" @@ -533,7 +534,7 @@ func (cli *DockerCli) CmdRestart(args ...string) error { func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { sigc := make(chan os.Signal, 1) - utils.CatchAll(sigc) + signal.CatchAll(sigc) go func() { for s := range sigc { if s == syscall.SIGCHLD { @@ -581,7 +582,7 @@ func (cli *DockerCli) CmdStart(args ...string) error { if !container.Config.Tty { sigc := cli.forwardAllSignals(cmd.Arg(0)) - defer utils.StopCatch(sigc) + defer signal.StopCatch(sigc) } var in io.ReadCloser @@ -1614,7 +1615,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error { if *proxy && !container.Config.Tty { sigc := cli.forwardAllSignals(cmd.Arg(0)) - defer utils.StopCatch(sigc) + defer signal.StopCatch(sigc) } if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil { @@ -1818,7 +1819,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { if sigProxy { sigc := cli.forwardAllSignals(runResult.Get("Id")) - defer utils.StopCatch(sigc) + defer signal.StopCatch(sigc) } var ( @@ -2320,7 +2321,7 @@ func (cli *DockerCli) monitorTtySize(id string) error { cli.resizeTty(id) sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, syscall.SIGWINCH) + gosignal.Notify(sigchan, syscall.SIGWINCH) go func() { for _ = range sigchan { cli.resizeTty(id) diff --git a/pkg/signal/signal.go b/pkg/signal/signal.go new file mode 100644 index 0000000000..6f9874bd01 --- /dev/null +++ b/pkg/signal/signal.go @@ -0,0 +1,11 @@ +package signal + +import ( + "os" + "os/signal" +) + +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} diff --git a/pkg/signal/signal_darwin.go b/pkg/signal/signal_darwin.go new file mode 100644 index 0000000000..22a60ae18d --- /dev/null +++ b/pkg/signal/signal_darwin.go @@ -0,0 +1,44 @@ +package signal + +import ( + "os" + "os/signal" + "syscall" +) + +func CatchAll(sigc chan os.Signal) { + signal.Notify(sigc, + syscall.SIGABRT, + syscall.SIGALRM, + syscall.SIGBUS, + syscall.SIGCHLD, + syscall.SIGCONT, + syscall.SIGEMT, + syscall.SIGFPE, + syscall.SIGHUP, + syscall.SIGILL, + syscall.SIGINFO, + syscall.SIGINT, + syscall.SIGIO, + syscall.SIGIOT, + syscall.SIGKILL, + syscall.SIGPIPE, + syscall.SIGPROF, + syscall.SIGQUIT, + syscall.SIGSEGV, + syscall.SIGSTOP, + syscall.SIGSYS, + syscall.SIGTERM, + syscall.SIGTRAP, + syscall.SIGTSTP, + syscall.SIGTTIN, + syscall.SIGTTOU, + syscall.SIGURG, + syscall.SIGUSR1, + syscall.SIGUSR2, + syscall.SIGVTALRM, + syscall.SIGWINCH, + syscall.SIGXCPU, + syscall.SIGXFSZ, + ) +} diff --git a/pkg/signal/signal_freebsd.go b/pkg/signal/signal_freebsd.go new file mode 100644 index 0000000000..d27782217f --- /dev/null +++ b/pkg/signal/signal_freebsd.go @@ -0,0 +1,42 @@ +package signal + +import ( + "os" + "os/signal" + "syscall" +) + +func CatchAll(sigc chan os.Signal) { + signal.Notify(sigc, + syscall.SIGABRT, + syscall.SIGALRM, + syscall.SIGBUS, + syscall.SIGCHLD, + syscall.SIGCONT, + syscall.SIGFPE, + syscall.SIGHUP, + syscall.SIGILL, + syscall.SIGINT, + syscall.SIGIO, + syscall.SIGIOT, + syscall.SIGKILL, + syscall.SIGPIPE, + syscall.SIGPROF, + syscall.SIGQUIT, + syscall.SIGSEGV, + syscall.SIGSTOP, + syscall.SIGSYS, + syscall.SIGTERM, + syscall.SIGTRAP, + syscall.SIGTSTP, + syscall.SIGTTIN, + syscall.SIGTTOU, + syscall.SIGURG, + syscall.SIGUSR1, + syscall.SIGUSR2, + syscall.SIGVTALRM, + syscall.SIGWINCH, + syscall.SIGXCPU, + syscall.SIGXFSZ, + ) +} diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go new file mode 100644 index 0000000000..b6b25d518b --- /dev/null +++ b/pkg/signal/signal_linux.go @@ -0,0 +1,87 @@ +package signal + +import ( + "os" + "os/signal" + "syscall" +) + +var signalMap = map[string]syscall.Signal{} + +/* + syscall.SIGABRT, + syscall.SIGALRM, + syscall.SIGBUS, + syscall.SIGCHLD, + syscall.SIGCLD, + syscall.SIGCONT, + syscall.SIGFPE, + syscall.SIGHUP, + syscall.SIGILL, + syscall.SIGINT, + syscall.SIGIO, + syscall.SIGIOT, + syscall.SIGKILL, + syscall.SIGPIPE, + syscall.SIGPOLL, + syscall.SIGPROF, + syscall.SIGPWR, + syscall.SIGQUIT, + syscall.SIGSEGV, + syscall.SIGSTKFLT, + syscall.SIGSTOP, + syscall.SIGSYS, + syscall.SIGTERM, + syscall.SIGTRAP, + syscall.SIGTSTP, + syscall.SIGTTIN, + syscall.SIGTTOU, + syscall.SIGUNUSED, + syscall.SIGURG, + syscall.SIGUSR1, + syscall.SIGUSR2, + syscall.SIGVTALRM, + syscall.SIGWINCH, + syscall.SIGXCPU, + syscall.SIGXFSZ, +*/ + +func CatchAll(sigc chan os.Signal) { + signal.Notify(sigc, + syscall.SIGABRT, + syscall.SIGALRM, + syscall.SIGBUS, + syscall.SIGCHLD, + syscall.SIGCLD, + syscall.SIGCONT, + syscall.SIGFPE, + syscall.SIGHUP, + syscall.SIGILL, + syscall.SIGINT, + syscall.SIGIO, + syscall.SIGIOT, + syscall.SIGKILL, + syscall.SIGPIPE, + syscall.SIGPOLL, + syscall.SIGPROF, + syscall.SIGPWR, + syscall.SIGQUIT, + syscall.SIGSEGV, + syscall.SIGSTKFLT, + syscall.SIGSTOP, + syscall.SIGSYS, + syscall.SIGTERM, + syscall.SIGTRAP, + syscall.SIGTSTP, + syscall.SIGTTIN, + syscall.SIGTTOU, + syscall.SIGUNUSED, + syscall.SIGURG, + syscall.SIGUSR1, + syscall.SIGUSR2, + syscall.SIGVTALRM, + syscall.SIGWINCH, + syscall.SIGXCPU, + syscall.SIGXFSZ, + ) +} diff --git a/utils/signal.go b/utils/signal.go deleted file mode 100644 index 0cac7d113f..0000000000 --- a/utils/signal.go +++ /dev/null @@ -1,11 +0,0 @@ -package utils - -import ( - "os" - "os/signal" -) - -func StopCatch(sigc chan os.Signal) { - signal.Stop(sigc) - close(sigc) -} diff --git a/utils/signal_darwin.go b/utils/signal_darwin.go deleted file mode 100644 index 28730db8e5..0000000000 --- a/utils/signal_darwin.go +++ /dev/null @@ -1,44 +0,0 @@ -package utils - -import ( - "os" - "os/signal" - "syscall" -) - -func CatchAll(sigc chan os.Signal) { - signal.Notify(sigc, - syscall.SIGABRT, - syscall.SIGALRM, - syscall.SIGBUS, - syscall.SIGCHLD, - syscall.SIGCONT, - syscall.SIGEMT, - syscall.SIGFPE, - syscall.SIGHUP, - syscall.SIGILL, - syscall.SIGINFO, - syscall.SIGINT, - syscall.SIGIO, - syscall.SIGIOT, - syscall.SIGKILL, - syscall.SIGPIPE, - syscall.SIGPROF, - syscall.SIGQUIT, - syscall.SIGSEGV, - syscall.SIGSTOP, - syscall.SIGSYS, - syscall.SIGTERM, - syscall.SIGTRAP, - syscall.SIGTSTP, - syscall.SIGTTIN, - syscall.SIGTTOU, - syscall.SIGURG, - syscall.SIGUSR1, - syscall.SIGUSR2, - syscall.SIGVTALRM, - syscall.SIGWINCH, - syscall.SIGXCPU, - syscall.SIGXFSZ, - ) -} diff --git a/utils/signal_linux.go b/utils/signal_linux.go deleted file mode 100644 index 26cfd56967..0000000000 --- a/utils/signal_linux.go +++ /dev/null @@ -1,47 +0,0 @@ -package utils - -import ( - "os" - "os/signal" - "syscall" -) - -func CatchAll(sigc chan os.Signal) { - signal.Notify(sigc, - syscall.SIGABRT, - syscall.SIGALRM, - syscall.SIGBUS, - syscall.SIGCHLD, - syscall.SIGCLD, - syscall.SIGCONT, - syscall.SIGFPE, - syscall.SIGHUP, - syscall.SIGILL, - syscall.SIGINT, - syscall.SIGIO, - syscall.SIGIOT, - syscall.SIGKILL, - syscall.SIGPIPE, - syscall.SIGPOLL, - syscall.SIGPROF, - syscall.SIGPWR, - syscall.SIGQUIT, - syscall.SIGSEGV, - syscall.SIGSTKFLT, - syscall.SIGSTOP, - syscall.SIGSYS, - syscall.SIGTERM, - syscall.SIGTRAP, - syscall.SIGTSTP, - syscall.SIGTTIN, - syscall.SIGTTOU, - syscall.SIGUNUSED, - syscall.SIGURG, - syscall.SIGUSR1, - syscall.SIGUSR2, - syscall.SIGVTALRM, - syscall.SIGWINCH, - syscall.SIGXCPU, - syscall.SIGXFSZ, - ) -} -- cgit v1.2.1 From 10dc16dcd3aa82be256e5072a25dcf18af8e3844 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Mon, 10 Mar 2014 13:50:16 -0700 Subject: Create portable signalMap Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- pkg/signal/signal.go | 8 +++ pkg/signal/signal_darwin.go | 70 +++++++++++------------ pkg/signal/signal_freebsd.go | 68 ++++++++++++----------- pkg/signal/signal_linux.go | 116 ++++++++++++--------------------------- pkg/signal/signal_unsupported.go | 9 +++ 5 files changed, 121 insertions(+), 150 deletions(-) create mode 100644 pkg/signal/signal_unsupported.go diff --git a/pkg/signal/signal.go b/pkg/signal/signal.go index 6f9874bd01..a673222628 100644 --- a/pkg/signal/signal.go +++ b/pkg/signal/signal.go @@ -5,6 +5,14 @@ import ( "os/signal" ) +func CatchAll(sigc chan os.Signal) { + handledSigs := []os.Signal{} + for _, s := range signalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + func StopCatch(sigc chan os.Signal) { signal.Stop(sigc) close(sigc) diff --git a/pkg/signal/signal_darwin.go b/pkg/signal/signal_darwin.go index 22a60ae18d..b290a8b53d 100644 --- a/pkg/signal/signal_darwin.go +++ b/pkg/signal/signal_darwin.go @@ -1,44 +1,40 @@ package signal import ( - "os" - "os/signal" "syscall" ) -func CatchAll(sigc chan os.Signal) { - signal.Notify(sigc, - syscall.SIGABRT, - syscall.SIGALRM, - syscall.SIGBUS, - syscall.SIGCHLD, - syscall.SIGCONT, - syscall.SIGEMT, - syscall.SIGFPE, - syscall.SIGHUP, - syscall.SIGILL, - syscall.SIGINFO, - syscall.SIGINT, - syscall.SIGIO, - syscall.SIGIOT, - syscall.SIGKILL, - syscall.SIGPIPE, - syscall.SIGPROF, - syscall.SIGQUIT, - syscall.SIGSEGV, - syscall.SIGSTOP, - syscall.SIGSYS, - syscall.SIGTERM, - syscall.SIGTRAP, - syscall.SIGTSTP, - syscall.SIGTTIN, - syscall.SIGTTOU, - syscall.SIGURG, - syscall.SIGUSR1, - syscall.SIGUSR2, - syscall.SIGVTALRM, - syscall.SIGWINCH, - syscall.SIGXCPU, - syscall.SIGXFSZ, - ) +var signalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUG": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, } diff --git a/pkg/signal/signal_freebsd.go b/pkg/signal/signal_freebsd.go index d27782217f..b7e3ff4f7c 100644 --- a/pkg/signal/signal_freebsd.go +++ b/pkg/signal/signal_freebsd.go @@ -6,37 +6,39 @@ import ( "syscall" ) -func CatchAll(sigc chan os.Signal) { - signal.Notify(sigc, - syscall.SIGABRT, - syscall.SIGALRM, - syscall.SIGBUS, - syscall.SIGCHLD, - syscall.SIGCONT, - syscall.SIGFPE, - syscall.SIGHUP, - syscall.SIGILL, - syscall.SIGINT, - syscall.SIGIO, - syscall.SIGIOT, - syscall.SIGKILL, - syscall.SIGPIPE, - syscall.SIGPROF, - syscall.SIGQUIT, - syscall.SIGSEGV, - syscall.SIGSTOP, - syscall.SIGSYS, - syscall.SIGTERM, - syscall.SIGTRAP, - syscall.SIGTSTP, - syscall.SIGTTIN, - syscall.SIGTTOU, - syscall.SIGURG, - syscall.SIGUSR1, - syscall.SIGUSR2, - syscall.SIGVTALRM, - syscall.SIGWINCH, - syscall.SIGXCPU, - syscall.SIGXFSZ, - ) +var signalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "THR": syscall.SIGTHR, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, } diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go index b6b25d518b..cd8cb83e42 100644 --- a/pkg/signal/signal_linux.go +++ b/pkg/signal/signal_linux.go @@ -1,87 +1,43 @@ package signal import ( - "os" - "os/signal" "syscall" ) -var signalMap = map[string]syscall.Signal{} - -/* - syscall.SIGABRT, - syscall.SIGALRM, - syscall.SIGBUS, - syscall.SIGCHLD, - syscall.SIGCLD, - syscall.SIGCONT, - syscall.SIGFPE, - syscall.SIGHUP, - syscall.SIGILL, - syscall.SIGINT, - syscall.SIGIO, - syscall.SIGIOT, - syscall.SIGKILL, - syscall.SIGPIPE, - syscall.SIGPOLL, - syscall.SIGPROF, - syscall.SIGPWR, - syscall.SIGQUIT, - syscall.SIGSEGV, - syscall.SIGSTKFLT, - syscall.SIGSTOP, - syscall.SIGSYS, - syscall.SIGTERM, - syscall.SIGTRAP, - syscall.SIGTSTP, - syscall.SIGTTIN, - syscall.SIGTTOU, - syscall.SIGUNUSED, - syscall.SIGURG, - syscall.SIGUSR1, - syscall.SIGUSR2, - syscall.SIGVTALRM, - syscall.SIGWINCH, - syscall.SIGXCPU, - syscall.SIGXFSZ, -*/ - -func CatchAll(sigc chan os.Signal) { - signal.Notify(sigc, - syscall.SIGABRT, - syscall.SIGALRM, - syscall.SIGBUS, - syscall.SIGCHLD, - syscall.SIGCLD, - syscall.SIGCONT, - syscall.SIGFPE, - syscall.SIGHUP, - syscall.SIGILL, - syscall.SIGINT, - syscall.SIGIO, - syscall.SIGIOT, - syscall.SIGKILL, - syscall.SIGPIPE, - syscall.SIGPOLL, - syscall.SIGPROF, - syscall.SIGPWR, - syscall.SIGQUIT, - syscall.SIGSEGV, - syscall.SIGSTKFLT, - syscall.SIGSTOP, - syscall.SIGSYS, - syscall.SIGTERM, - syscall.SIGTRAP, - syscall.SIGTSTP, - syscall.SIGTTIN, - syscall.SIGTTOU, - syscall.SIGUNUSED, - syscall.SIGURG, - syscall.SIGUSR1, - syscall.SIGUSR2, - syscall.SIGVTALRM, - syscall.SIGWINCH, - syscall.SIGXCPU, - syscall.SIGXFSZ, - ) +var signalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUS": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CLD": syscall.SIGCLD, + "CONT": syscall.SIGCONT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "POLL": syscall.SIGPOLL, + "PROF": syscall.SIGPROF, + "PWR": syscall.SIGPWR, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STKFLT": syscall.SIGSTKFLT, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "UNUSED": syscall.SIGUNUSED, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, } diff --git a/pkg/signal/signal_unsupported.go b/pkg/signal/signal_unsupported.go new file mode 100644 index 0000000000..2c49a0b0f6 --- /dev/null +++ b/pkg/signal/signal_unsupported.go @@ -0,0 +1,9 @@ +// +build !linux,!darwin,!freebsd + +package signal + +import ( + "syscall" +) + +var signalMap = map[string]syscall.Signal{} -- cgit v1.2.1 From 157f24ca77a38f7c5c2b22322a2a353d5098a21e Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Mon, 10 Mar 2014 14:22:27 -0700 Subject: Make docker use the signal pkg with strings Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- api/client.go | 12 +++++++- pkg/signal/signal.go | 2 +- pkg/signal/signal_darwin.go | 64 ++++++++++++++++++++-------------------- pkg/signal/signal_freebsd.go | 2 +- pkg/signal/signal_linux.go | 2 +- pkg/signal/signal_unsupported.go | 2 +- server.go | 63 ++++++++++++--------------------------- 7 files changed, 66 insertions(+), 81 deletions(-) diff --git a/api/client.go b/api/client.go index 5e110d49f5..35ce5c6969 100644 --- a/api/client.go +++ b/api/client.go @@ -540,7 +540,17 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { if s == syscall.SIGCHLD { continue } - if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%d", cid, s), nil, false)); err != nil { + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + utils.Errorf("Unsupported signal: %d. Discarding.", s) + } + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil { utils.Debugf("Error sending signal: %s", err) } } diff --git a/pkg/signal/signal.go b/pkg/signal/signal.go index a673222628..63337542d7 100644 --- a/pkg/signal/signal.go +++ b/pkg/signal/signal.go @@ -7,7 +7,7 @@ import ( func CatchAll(sigc chan os.Signal) { handledSigs := []os.Signal{} - for _, s := range signalMap { + for _, s := range SignalMap { handledSigs = append(handledSigs, s) } signal.Notify(sigc, handledSigs...) diff --git a/pkg/signal/signal_darwin.go b/pkg/signal/signal_darwin.go index b290a8b53d..fcd3a8f2c9 100644 --- a/pkg/signal/signal_darwin.go +++ b/pkg/signal/signal_darwin.go @@ -4,37 +4,37 @@ import ( "syscall" ) -var signalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUG": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INFO": syscall.SIGINFO, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUG": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, } diff --git a/pkg/signal/signal_freebsd.go b/pkg/signal/signal_freebsd.go index b7e3ff4f7c..da042d7e72 100644 --- a/pkg/signal/signal_freebsd.go +++ b/pkg/signal/signal_freebsd.go @@ -6,7 +6,7 @@ import ( "syscall" ) -var signalMap = map[string]syscall.Signal{ +var SignalMap = map[string]syscall.Signal{ "ABRT": syscall.SIGABRT, "ALRM": syscall.SIGALRM, "BUF": syscall.SIGBUS, diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go index cd8cb83e42..a62f79d4af 100644 --- a/pkg/signal/signal_linux.go +++ b/pkg/signal/signal_linux.go @@ -4,7 +4,7 @@ import ( "syscall" ) -var signalMap = map[string]syscall.Signal{ +var SignalMap = map[string]syscall.Signal{ "ABRT": syscall.SIGABRT, "ALRM": syscall.SIGALRM, "BUS": syscall.SIGBUS, diff --git a/pkg/signal/signal_unsupported.go b/pkg/signal/signal_unsupported.go index 2c49a0b0f6..99f9465970 100644 --- a/pkg/signal/signal_unsupported.go +++ b/pkg/signal/signal_unsupported.go @@ -6,4 +6,4 @@ import ( "syscall" ) -var signalMap = map[string]syscall.Signal{} +var SignalMap = map[string]syscall.Signal{} diff --git a/server.go b/server.go index d824d78d7a..610b3ccfba 100644 --- a/server.go +++ b/server.go @@ -8,6 +8,7 @@ import ( "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/pkg/graphdb" + "github.com/dotcloud/docker/pkg/signal" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" @@ -18,7 +19,7 @@ import ( "net/url" "os" "os/exec" - "os/signal" + gosignal "os/signal" "path" "path/filepath" "runtime" @@ -47,7 +48,7 @@ func InitServer(job *engine.Job) engine.Status { } job.Logf("Setting up signal traps") c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) + gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) go func() { sig := <-c log.Printf("Received signal '%v', exiting\n", sig) @@ -122,56 +123,30 @@ func (v *simpleVersionInfo) Version() string { // for the container to exit. // If a signal is given, then just send it to the container and return. func (srv *Server) ContainerKill(job *engine.Job) engine.Status { - signalMap := map[string]syscall.Signal{ - "HUP": syscall.SIGHUP, - "INT": syscall.SIGINT, - "QUIT": syscall.SIGQUIT, - "ILL": syscall.SIGILL, - "TRAP": syscall.SIGTRAP, - "ABRT": syscall.SIGABRT, - "BUS": syscall.SIGBUS, - "FPE": syscall.SIGFPE, - "KILL": syscall.SIGKILL, - "USR1": syscall.SIGUSR1, - "SEGV": syscall.SIGSEGV, - "USR2": syscall.SIGUSR2, - "PIPE": syscall.SIGPIPE, - "ALRM": syscall.SIGALRM, - "TERM": syscall.SIGTERM, - //"STKFLT": syscall.SIGSTKFLT, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "STOP": syscall.SIGSTOP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, - "VTALRM": syscall.SIGVTALRM, - "PROF": syscall.SIGPROF, - "WINCH": syscall.SIGWINCH, - "IO": syscall.SIGIO, - //"PWR": syscall.SIGPWR, - "SYS": syscall.SIGSYS, - } - if n := len(job.Args); n < 1 || n > 2 { return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) } - name := job.Args[0] - var sig uint64 + var ( + name = job.Args[0] + sig uint64 + err error + ) + + // If we have a signal, look at it. Otherwise, do nothing if len(job.Args) == 2 && job.Args[1] != "" { - sig = uint64(signalMap[job.Args[1]]) - if sig == 0 { - var err error - // The largest legal signal is 31, so let's parse on 5 bits - sig, err = strconv.ParseUint(job.Args[1], 10, 5) - if err != nil { + // Check if we passed the singal as a number: + // The largest legal signal is 31, so let's parse on 5 bits + sig, err = strconv.ParseUint(job.Args[1], 10, 5) + if err != nil { + // The signal is not a number, treat it as a string + sig = uint64(signal.SignalMap[job.Args[1]]) + if sig == 0 { return job.Errorf("Invalid signal: %s", job.Args[1]) } + } } + if container := srv.runtime.Get(name); container != nil { // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { -- cgit v1.2.1 From 8301fc8e56503d5a0ea2316a0778faf4cf5f5f1e Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Wed, 26 Feb 2014 23:20:58 +0000 Subject: move git clone from daemon to client Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) add a little doc --- api/client.go | 26 ++++++++++++++++++++++---- docs/sources/reference/commandline/cli.rst | 16 ++++++++++------ utils/utils.go | 2 +- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/api/client.go b/api/client.go index 7f8921d9e1..6dabeac835 100644 --- a/api/client.go +++ b/api/client.go @@ -24,6 +24,7 @@ import ( "net/http/httputil" "net/url" "os" + "os/exec" gosignal "os/signal" "path" "reflect" @@ -168,17 +169,34 @@ func (cli *DockerCli) CmdBuild(args ...string) error { return err } context, err = archive.Generate("Dockerfile", string(dockerfile)) - } else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) { + } else if utils.IsURL(cmd.Arg(0)) && !utils.IsGIT(cmd.Arg(0)) { isRemote = true } else { - if _, err := os.Stat(cmd.Arg(0)); err != nil { + root := cmd.Arg(0) + if utils.IsGIT(root) { + remoteURL := cmd.Arg(0) + if !strings.HasPrefix(remoteURL, "git://") && !strings.HasPrefix(remoteURL, "git@") && !utils.IsURL(remoteURL) { + remoteURL = "https://" + remoteURL + } + + root, err = ioutil.TempDir("", "docker-build-git") + if err != nil { + return err + } + defer os.RemoveAll(root) + + if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { + return fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + } + if _, err := os.Stat(root); err != nil { return err } - filename := path.Join(cmd.Arg(0), "Dockerfile") + filename := path.Join(root, "Dockerfile") if _, err = os.Stat(filename); os.IsNotExist(err) { return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0)) } - context, err = archive.Tar(cmd.Arg(0), archive.Uncompressed) + context, err = archive.Tar(root, archive.Uncompressed) } var body io.Reader // Setup an upload progress bar diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 2404e29b29..6fe2e2943e 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -202,12 +202,16 @@ Examples: --no-cache: Do not use the cache when building the image. --rm=true: Remove intermediate containers after a successful build -The files at ``PATH`` or ``URL`` are called the "context" of the build. The -build process may refer to any of the files in the context, for example when -using an :ref:`ADD ` instruction. When a single ``Dockerfile`` -is given as ``URL``, then no context is set. When a Git repository is set as -``URL``, then the repository is used as the context. Git repositories are -cloned with their submodules (`git clone --recursive`). +The files at ``PATH`` or ``URL`` are called the "context" of the build. +The build process may refer to any of the files in the context, for example when +using an :ref:`ADD ` instruction. +When a single ``Dockerfile`` is given as ``URL``, then no context is set. + +When a Git repository is set as ``URL``, then the repository is used as the context. +The Git repository is cloned with its submodules (`git clone --recursive`). +A fresh git clone occurs in a temporary directory on your local host, and then this +is sent to the Docker daemon as the context. +This way, your local user credentials and vpn's etc can be used to access private repositories .. _cli_build_examples: diff --git a/utils/utils.go b/utils/utils.go index e4cb04f39c..57a8200a7c 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -714,7 +714,7 @@ func IsURL(str string) bool { } func IsGIT(str string) bool { - return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") + return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") || strings.HasPrefix(str, "git@github.com:") || (strings.HasSuffix(str, ".git") && IsURL(str)) } // GetResolvConf opens and read the content of /etc/resolv.conf. -- cgit v1.2.1 From 0d6275b298ebb9161c2f55d4b4ac0f87603a11cd Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Wed, 5 Mar 2014 01:54:08 +0000 Subject: if client has no git, use server Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/client.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/api/client.go b/api/client.go index 6dabeac835..4854c19013 100644 --- a/api/client.go +++ b/api/client.go @@ -161,6 +161,8 @@ func (cli *DockerCli) CmdBuild(args ...string) error { err error ) + _, err = exec.LookPath("git") + hasGit := err == nil if cmd.Arg(0) == "-" { // As a special case, 'docker build -' will build from an empty context with the // contents of stdin as a Dockerfile @@ -169,7 +171,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { return err } context, err = archive.Generate("Dockerfile", string(dockerfile)) - } else if utils.IsURL(cmd.Arg(0)) && !utils.IsGIT(cmd.Arg(0)) { + } else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) { isRemote = true } else { root := cmd.Arg(0) -- cgit v1.2.1 From 18ea183ea0abd18cde0d17e57fc0fa9b19cfab08 Mon Sep 17 00:00:00 2001 From: Dolph Mathews Date: Mon, 10 Mar 2014 21:51:19 -0500 Subject: spelling correction s/singal/signal/ Docker-DCO-1.1-Signed-off-by: Dolph Mathews (github: dolph) --- server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.go b/server.go index f557c02b64..48bb6f9805 100644 --- a/server.go +++ b/server.go @@ -138,7 +138,7 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status { // If we have a signal, look at it. Otherwise, do nothing if len(job.Args) == 2 && job.Args[1] != "" { - // Check if we passed the singal as a number: + // Check if we passed the signal as a number: // The largest legal signal is 31, so let's parse on 5 bits sig, err = strconv.ParseUint(job.Args[1], 10, 5) if err != nil { -- cgit v1.2.1 From 915d967f556bc7bb3faea34db8a06ea64fd5de92 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Mon, 10 Mar 2014 20:26:45 -0700 Subject: Update email + add self to pkg/signal Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- MAINTAINERS | 2 +- contrib/host-integration/Dockerfile.dev | 2 +- contrib/host-integration/Dockerfile.min | 2 +- docs/sources/reference/builder.rst | 2 +- execdriver/MAINTAINERS | 2 +- pkg/libcontainer/MAINTAINERS | 2 +- pkg/netlink/MAINTAINERS | 2 +- pkg/signal/MAINTAINERS | 2 ++ pkg/term/MAINTAINERS | 2 +- 9 files changed, 10 insertions(+), 8 deletions(-) create mode 100644 pkg/signal/MAINTAINERS diff --git a/MAINTAINERS b/MAINTAINERS index 49d14ba0bd..d1f4d15491 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1,5 +1,5 @@ Solomon Hykes (@shykes) -Guillaume Charmes (@creack) +Guillaume J. Charmes (@creack) Victor Vieux (@vieux) Michael Crosby (@crosbymichael) .travis.yml: Tianon Gravi (@tianon) diff --git a/contrib/host-integration/Dockerfile.dev b/contrib/host-integration/Dockerfile.dev index 161416e750..800216532f 100644 --- a/contrib/host-integration/Dockerfile.dev +++ b/contrib/host-integration/Dockerfile.dev @@ -6,7 +6,7 @@ # FROM ubuntu:12.10 -MAINTAINER Guillaume J. Charmes +MAINTAINER Guillaume J. Charmes RUN apt-get update && apt-get install -y wget git mercurial diff --git a/contrib/host-integration/Dockerfile.min b/contrib/host-integration/Dockerfile.min index 1a7b3a9d82..60bb89b986 100644 --- a/contrib/host-integration/Dockerfile.min +++ b/contrib/host-integration/Dockerfile.min @@ -1,4 +1,4 @@ FROM busybox -MAINTAINER Guillaume J. Charmes +MAINTAINER Guillaume J. Charmes ADD manager /usr/bin/ ENTRYPOINT ["/usr/bin/manager"] diff --git a/docs/sources/reference/builder.rst b/docs/sources/reference/builder.rst index 9f7a816801..3c48939c82 100644 --- a/docs/sources/reference/builder.rst +++ b/docs/sources/reference/builder.rst @@ -481,7 +481,7 @@ For example you might add something like this: # VERSION 0.0.1 FROM ubuntu - MAINTAINER Guillaume J. Charmes + MAINTAINER Guillaume J. Charmes # make sure the package repository is up to date RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list diff --git a/execdriver/MAINTAINERS b/execdriver/MAINTAINERS index e53d933d47..1cb551364d 100644 --- a/execdriver/MAINTAINERS +++ b/execdriver/MAINTAINERS @@ -1,2 +1,2 @@ Michael Crosby (@crosbymichael) -Guillaume Charmes (@creack) +Guillaume J. Charmes (@creack) diff --git a/pkg/libcontainer/MAINTAINERS b/pkg/libcontainer/MAINTAINERS index e53d933d47..1cb551364d 100644 --- a/pkg/libcontainer/MAINTAINERS +++ b/pkg/libcontainer/MAINTAINERS @@ -1,2 +1,2 @@ Michael Crosby (@crosbymichael) -Guillaume Charmes (@creack) +Guillaume J. Charmes (@creack) diff --git a/pkg/netlink/MAINTAINERS b/pkg/netlink/MAINTAINERS index e53d933d47..1cb551364d 100644 --- a/pkg/netlink/MAINTAINERS +++ b/pkg/netlink/MAINTAINERS @@ -1,2 +1,2 @@ Michael Crosby (@crosbymichael) -Guillaume Charmes (@creack) +Guillaume J. Charmes (@creack) diff --git a/pkg/signal/MAINTAINERS b/pkg/signal/MAINTAINERS new file mode 100644 index 0000000000..3300331598 --- /dev/null +++ b/pkg/signal/MAINTAINERS @@ -0,0 +1,2 @@ +Guillaume J. Charmes (@creack) + diff --git a/pkg/term/MAINTAINERS b/pkg/term/MAINTAINERS index 48d4d91b2a..15b8ac3729 100644 --- a/pkg/term/MAINTAINERS +++ b/pkg/term/MAINTAINERS @@ -1,2 +1,2 @@ -Guillaume Charmes (@creack) +Guillaume J. Charmes (@creack) Solomon Hykes (@shykes) -- cgit v1.2.1 From b21f8872cc684c95a2e30cec9f7c744a78a819f8 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Tue, 11 Mar 2014 01:21:59 -0600 Subject: Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- contrib/init/sysvinit-debian/docker | 34 ++++++++++++++++++++++++---------- contrib/init/upstart/docker.conf | 33 +++++++++++++++++++++++---------- 2 files changed, 47 insertions(+), 20 deletions(-) diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker index 510683a459..62e8f5f0a5 100755 --- a/contrib/init/sysvinit-debian/docker +++ b/contrib/init/sysvinit-debian/docker @@ -50,20 +50,34 @@ fail_unless_root() { fi } +cgroupfs_mount() { + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + return + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +} + case "$1" in start) fail_unless_root - if ! grep -q cgroup /proc/mounts; then - # rough approximation of cgroupfs-mount - mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup - for sys in $(cut -d' ' -f1 /proc/cgroups); do - mkdir -p /sys/fs/cgroup/$sys - if ! mount -n -t cgroup -o $sys cgroup /sys/fs/cgroup/$sys 2>/dev/null; then - rmdir /sys/fs/cgroup/$sys 2>/dev/null || true - fi - done - fi + cgroupfs_mount touch /var/log/docker.log chgrp docker /var/log/docker.log diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf index e2cc4536e1..047f21c092 100644 --- a/contrib/init/upstart/docker.conf +++ b/contrib/init/upstart/docker.conf @@ -5,6 +5,29 @@ stop on runlevel [!2345] respawn +pre-start script + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + exit 0 + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +end script + script # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) DOCKER=/usr/bin/$UPSTART_JOB @@ -12,15 +35,5 @@ script if [ -f /etc/default/$UPSTART_JOB ]; then . /etc/default/$UPSTART_JOB fi - if ! grep -q cgroup /proc/mounts; then - # rough approximation of cgroupfs-mount - mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup - for sys in $(cut -d' ' -f1 /proc/cgroups); do - mkdir -p /sys/fs/cgroup/$sys - if ! mount -n -t cgroup -o $sys cgroup /sys/fs/cgroup/$sys 2>/dev/null; then - rmdir /sys/fs/cgroup/$sys 2>/dev/null || true - fi - done - fi "$DOCKER" -d $DOCKER_OPTS end script -- cgit v1.2.1 From 76dc670f413de64361a8bb3efa3381331e796b21 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Tue, 11 Mar 2014 01:40:31 -0600 Subject: Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- contrib/init/sysvinit-debian/docker | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker index 62e8f5f0a5..67f0d2807f 100755 --- a/contrib/init/sysvinit-debian/docker +++ b/contrib/init/sysvinit-debian/docker @@ -21,6 +21,7 @@ BASE=$(basename $0) # modify these in /etc/default/$BASE (/etc/default/docker) DOCKER=/usr/bin/$BASE DOCKER_PIDFILE=/var/run/$BASE.pid +DOCKER_LOGFILE=/var/log/$BASE.log DOCKER_OPTS= DOCKER_DESC="Docker" @@ -79,8 +80,8 @@ case "$1" in cgroupfs_mount - touch /var/log/docker.log - chgrp docker /var/log/docker.log + touch "$DOCKER_LOGFILE" + chgrp docker "$DOCKER_LOGFILE" log_begin_msg "Starting $DOCKER_DESC: $BASE" start-stop-daemon --start --background \ @@ -90,7 +91,7 @@ case "$1" in -- \ -d -p "$DOCKER_PIDFILE" \ $DOCKER_OPTS \ - > /var/log/docker.log 2>&1 + >> "$DOCKER_LOGFILE" 2>&1 log_end_msg $? ;; -- cgit v1.2.1 From b348ee0fd0e4f8d2cb453a08f2b0d174550a339d Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 11 Mar 2014 20:28:26 +1000 Subject: add Net::Docker CPAN module Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/reference/api/remote_api_client_libraries.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/reference/api/remote_api_client_libraries.rst b/docs/sources/reference/api/remote_api_client_libraries.rst index 9bab343bf5..e58c7ced39 100644 --- a/docs/sources/reference/api/remote_api_client_libraries.rst +++ b/docs/sources/reference/api/remote_api_client_libraries.rst @@ -45,3 +45,5 @@ and we will add the libraries here. +----------------------+----------------+--------------------------------------------+----------+ | PHP | Docker-PHP | http://stage1.github.io/docker-php/ | Active | +----------------------+----------------+--------------------------------------------+----------+ +| Perl | Net::Docker | https://metacpan.org/pod/Net::Docker | Active | ++----------------------+----------------+--------------------------------------------+----------+ -- cgit v1.2.1 From b2cd89056f5c49746ee668946ce4e1771f3ce368 Mon Sep 17 00:00:00 2001 From: Kato Kazuyoshi Date: Tue, 11 Mar 2014 22:45:47 +0900 Subject: Like signal_linux.go, we don't have import os and os/signal Docker-DCO-1.1-Signed-off-by: Kato Kazuyoshi (github: kzys) --- pkg/signal/signal_freebsd.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/signal/signal_freebsd.go b/pkg/signal/signal_freebsd.go index da042d7e72..102e918486 100644 --- a/pkg/signal/signal_freebsd.go +++ b/pkg/signal/signal_freebsd.go @@ -1,8 +1,6 @@ package signal import ( - "os" - "os/signal" "syscall" ) -- cgit v1.2.1 From 07c35b41a5a93f31111d47afcb1e6d2926b492a0 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Wed, 5 Mar 2014 10:40:55 +0100 Subject: Move execdriver construction into execdriver/execdrivers This can't be in execdriver (dependency loop) but should not be hardcoded inside runtime.go either. So we put it in a subpackage. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- execdriver/execdrivers/execdrivers.go | 23 +++++++++++++++++++++++ runtime/runtime.go | 20 +++----------------- 2 files changed, 26 insertions(+), 17 deletions(-) create mode 100644 execdriver/execdrivers/execdrivers.go diff --git a/execdriver/execdrivers/execdrivers.go b/execdriver/execdrivers/execdrivers.go new file mode 100644 index 0000000000..95b2fc634d --- /dev/null +++ b/execdriver/execdrivers/execdrivers.go @@ -0,0 +1,23 @@ +package execdrivers + +import ( + "fmt" + "github.com/dotcloud/docker/execdriver" + "github.com/dotcloud/docker/execdriver/lxc" + "github.com/dotcloud/docker/execdriver/native" + "github.com/dotcloud/docker/pkg/sysinfo" + "path" +) + +func NewDriver(name, root string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { + switch name { + case "lxc": + // we want to five the lxc driver the full docker root because it needs + // to access and write config and template files in /var/lib/docker/containers/* + // to be backwards compatible + return lxc.NewDriver(root, sysInfo.AppArmor) + case "native": + return native.NewDriver(path.Join(root, "execdriver", "native")) + } + return nil, fmt.Errorf("unknown exec driver %s", name) +} diff --git a/runtime/runtime.go b/runtime/runtime.go index c11c309ad8..72245a4555 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -8,8 +8,8 @@ import ( "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/execdriver" + "github.com/dotcloud/docker/execdriver/execdrivers" "github.com/dotcloud/docker/execdriver/lxc" - "github.com/dotcloud/docker/execdriver/native" "github.com/dotcloud/docker/graph" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/graphdriver/aufs" @@ -732,22 +732,8 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (* sysInitPath = localCopy } - var ( - ed execdriver.Driver - sysInfo = sysinfo.New(false) - ) - - switch config.ExecDriver { - case "lxc": - // we want to five the lxc driver the full docker root because it needs - // to access and write config and template files in /var/lib/docker/containers/* - // to be backwards compatible - ed, err = lxc.NewDriver(config.Root, sysInfo.AppArmor) - case "native": - ed, err = native.NewDriver(path.Join(config.Root, "execdriver", "native")) - default: - return nil, fmt.Errorf("unknown exec driver %s", config.ExecDriver) - } + sysInfo := sysinfo.New(false) + ed, err := execdrivers.NewDriver(config.ExecDriver, config.Root, sysInfo) if err != nil { return nil, err } -- cgit v1.2.1 From 15e52ccaadea996b409e2f62bcbdb1a088619e35 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 5 Mar 2014 20:45:18 -0800 Subject: Add deprecation warning for -t on pull Closes #3410 Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- api/client.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/client.go b/api/client.go index 338a5b0de1..4d3b49d96e 100644 --- a/api/client.go +++ b/api/client.go @@ -1041,8 +1041,8 @@ func (cli *DockerCli) CmdPush(args ...string) error { } func (cli *DockerCli) CmdPull(args ...string) error { - cmd := cli.Subcmd("pull", "NAME", "Pull an image or a repository from the registry") - tag := cmd.String([]string{"t", "-tag"}, "", "Download tagged image in repository") + cmd := cli.Subcmd("pull", "NAME[:TAG]", "Pull an image or a repository from the registry") + tag := cmd.String([]string{"#t", "#-tag"}, "", "Download tagged image in repository") if err := cmd.Parse(args); err != nil { return nil } -- cgit v1.2.1 From 721562f29685ebf3f3698113cf0ce8000c02e606 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Tue, 11 Mar 2014 11:39:28 -0700 Subject: Remove goroutine leak upon error Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- execdriver/lxc/driver.go | 3 +++ server.go | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/execdriver/lxc/driver.go b/execdriver/lxc/driver.go index 765a52ee43..b398cb1a37 100644 --- a/execdriver/lxc/driver.go +++ b/execdriver/lxc/driver.go @@ -168,6 +168,9 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba // Poll lxc for RUNNING status pid, err := d.waitForStart(c, waitLock) if err != nil { + if c.Process != nil { + c.Process.Kill() + } return -1, err } c.ContainerPid = pid diff --git a/server.go b/server.go index 48bb6f9805..d6a4036faf 100644 --- a/server.go +++ b/server.go @@ -2384,7 +2384,13 @@ func (srv *Server) IsRunning() bool { } func (srv *Server) Close() error { + if srv == nil { + return nil + } srv.SetRunning(false) + if srv.runtime == nil { + return nil + } return srv.runtime.Close() } -- cgit v1.2.1 From fd0737df2c8ec8f0a4b4d8f20b2ad6e4c96adbd3 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Tue, 11 Mar 2014 12:08:32 -0700 Subject: Update parseLxcInfo to comply with new lxc1.0 format Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- execdriver/lxc/info.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/execdriver/lxc/info.go b/execdriver/lxc/info.go index 3b2ea0d07f..27b4c58604 100644 --- a/execdriver/lxc/info.go +++ b/execdriver/lxc/info.go @@ -36,7 +36,7 @@ func parseLxcInfo(raw string) (*lxcInfo, error) { if len(parts) < 2 { continue } - switch strings.TrimSpace(parts[0]) { + switch strings.ToLower(strings.TrimSpace(parts[0])) { case "state": info.Running = strings.TrimSpace(parts[1]) == "RUNNING" case "pid": -- cgit v1.2.1 From 95e5910ab23e3ef7b0154f64e1c6ae01bc647ab3 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Tue, 11 Mar 2014 17:10:05 -0700 Subject: Fix attach exit on darwin Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- api/client.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/api/client.go b/api/client.go index 599d82b39a..3dc64e6d55 100644 --- a/api/client.go +++ b/api/client.go @@ -28,7 +28,7 @@ import ( "path" "reflect" "regexp" - "runtime" + goruntime "runtime" "strconv" "strings" "syscall" @@ -367,7 +367,7 @@ func (cli *DockerCli) CmdVersion(args ...string) error { if dockerversion.VERSION != "" { fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) } - fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version()) + fmt.Fprintf(cli.out, "Go version (client): %s\n", goruntime.Version()) if dockerversion.GITCOMMIT != "" { fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) } @@ -2249,7 +2249,12 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea if setRawTerminal && cli.isTerminal { term.RestoreTerminal(cli.terminalFd, oldState) } - in.Close() + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if goruntime.GOOS != "darwin" { + in.Close() + } } }() -- cgit v1.2.1 From baa70e975186bb0ee8a4b16b9374cb6e794f8975 Mon Sep 17 00:00:00 2001 From: Scott Collier Date: Sat, 8 Mar 2014 16:32:00 -0600 Subject: Adding the new options to the `docker ps` documentation. URL of documentation page is: http://docs.docker.io/en/latest/reference/commandline/cli/#ps Docker-DCO-1.1-Signed-off-by: Scott Collier (github: scollier) Adding the new options to the `docker ps` documentation. URL of documentation page is: http://docs.docker.io/en/latest/reference/commandline/cli/#ps Docker-DCO-1.1-Signed-off-by: Scott Collier (github: scollier) Adding CLI options to the commandline reference documentation. URLs of pages are: http://docs.docker.io/en/latest/reference/commandline/cli/ Docker-DCO-1.1-Signed-off-by: Scott Collier (github: scollier) changing indention Docker-DCO-1.1-Signed-off-by: Scott Collier (github: scollier) --- docs/sources/reference/commandline/cli.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 6fe9b3dfea..07789cf4bd 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -80,7 +80,9 @@ Commands -g, --graph="/var/lib/docker": Path to use as the root of the docker runtime --icc=true: Enable inter-container communication --ip="0.0.0.0": Default IP address to use when binding container ports + --ip-forward=true: Disable enabling of net.ipv4.ip_forward --iptables=true: Disable docker's addition of iptables rules + --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available -p, --pidfile="/var/run/docker.pid": Path to use for daemon PID file -r, --restart=true: Restart previously running containers -s, --storage-driver="": Force the docker runtime to use a specific storage driver @@ -967,6 +969,8 @@ The last container is marked as a ``Ghost`` container. It is a container that wa Pull an image or a repository from the registry + -t, --tag="": Download tagged image in repository + .. _cli_push: @@ -1005,6 +1009,7 @@ The last container is marked as a ``Ghost`` container. It is a container that wa Remove one or more containers -l, --link="": Remove the link instead of the actual container -f, --force=false: Force removal of running container + -v, --volumes=false: Remove the volumes associated to the container Known Issues (rm) ~~~~~~~~~~~~~~~~~ -- cgit v1.2.1 From f0eb227548427f6fc829f2b270ad83d22bd90c69 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Wed, 12 Mar 2014 00:51:46 +0000 Subject: improve deprecation message Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- pkg/mflag/example/example.go | 3 ++- pkg/mflag/flag.go | 15 +++++++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/pkg/mflag/example/example.go b/pkg/mflag/example/example.go index ed940e8d70..ce9dd30e4c 100644 --- a/pkg/mflag/example/example.go +++ b/pkg/mflag/example/example.go @@ -13,7 +13,8 @@ var ( func init() { flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp") - flag.BoolVar(&b, []string{"b"}, false, "a simple bool") + flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool") + flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool") flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool") flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go index f16f641341..fc732d23a0 100644 --- a/pkg/mflag/flag.go +++ b/pkg/mflag/flag.go @@ -820,9 +820,20 @@ func (f *FlagSet) parseOne() (bool, string, error) { f.actual = make(map[string]*Flag) } f.actual[name] = flag - for _, n := range flag.Names { + for i, n := range flag.Names { if n == fmt.Sprintf("#%s", name) { - fmt.Fprintf(f.out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) + replacement := "" + for j := i; j < len(flag.Names); j++ { + if flag.Names[j][0] != '#' { + replacement = flag.Names[j] + break + } + } + if replacement != "" { + fmt.Fprintf(f.out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) + } else { + fmt.Fprintf(f.out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) + } } } return true, "", nil -- cgit v1.2.1 From 7178b285a334ae202c9b6022a4917fd51733f1d1 Mon Sep 17 00:00:00 2001 From: Scott Collier Date: Sat, 8 Mar 2014 18:07:19 -0600 Subject: Adding CLI options to the commandline reference documentation. Fixing bad DCO sig URLs of pages are: http://docs.docker.io/en/latest/reference/commandline/cli/ Docker-DCO-1.1-Signed-off-by: Scott Collier (github: scollier) --- docs/sources/reference/commandline/cli.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 07789cf4bd..b00d5c0b95 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -969,7 +969,9 @@ The last container is marked as a ``Ghost`` container. It is a container that wa Pull an image or a repository from the registry +<<<<<<< HEAD -t, --tag="": Download tagged image in repository +>>>>>>> b47d9c5... Adding CLI options to the commandline reference documentation. .. _cli_push: -- cgit v1.2.1 From fbf74eb079f6a96e006e703cb36c434206757fe6 Mon Sep 17 00:00:00 2001 From: Scott Collier Date: Tue, 11 Mar 2014 20:00:16 -0500 Subject: Removing HEAD tag from last commit Docker-DCO-1.1-Signed-off-by: Scott Collier (github: scollier) --- docs/sources/reference/commandline/cli.rst | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index b00d5c0b95..07789cf4bd 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -969,9 +969,7 @@ The last container is marked as a ``Ghost`` container. It is a container that wa Pull an image or a repository from the registry -<<<<<<< HEAD -t, --tag="": Download tagged image in repository ->>>>>>> b47d9c5... Adding CLI options to the commandline reference documentation. .. _cli_push: -- cgit v1.2.1 From b7ae9984fb541e8b53d09016e78ff352ce310c25 Mon Sep 17 00:00:00 2001 From: Ken ICHIKAWA Date: Wed, 12 Mar 2014 13:44:01 +0900 Subject: Update daemon docs and help messages for --iptables and --ip-forward Fix docs and help messages of --iptables and --ip-forward to describe the true case behaviour Docker-DCO-1.1-Signed-off-by: Ken ICHIKAWA (github: ichik1) --- docker/docker.go | 4 ++-- docs/sources/reference/commandline/cli.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/docker.go b/docker/docker.go index b783c6da02..cc4d40f3ac 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -35,8 +35,8 @@ func main() { flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group") flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") flDns = opts.NewListOpts(opts.ValidateIp4Address) - flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Disable docker's addition of iptables rules") - flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Disable enabling of net.ipv4.ip_forward") + flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules") + flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward") flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication") flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the docker runtime to use a specific storage driver") diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index da4472ed85..692f63f447 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -80,8 +80,8 @@ Commands -g, --graph="/var/lib/docker": Path to use as the root of the docker runtime --icc=true: Enable inter-container communication --ip="0.0.0.0": Default IP address to use when binding container ports - --ip-forward=true: Disable enabling of net.ipv4.ip_forward - --iptables=true: Disable docker's addition of iptables rules + --ip-forward=true: Enable net.ipv4.ip_forward + --iptables=true: Enable Docker's addition of iptables rules --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available -p, --pidfile="/var/run/docker.pid": Path to use for daemon PID file -r, --restart=true: Restart previously running containers -- cgit v1.2.1 From e1c48fa56007e1db028f7f83bfbf79c3d05feccd Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Wed, 12 Mar 2014 01:17:38 -0600 Subject: Clean up the "go test" output from "make test" to be much more readable/scannable Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/make.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/make.sh b/hack/make.sh index 63edca4d4c..d7f8aaeed2 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -125,7 +125,7 @@ go_test_dir() { testcover=( -cover -coverprofile "$coverprofile" $coverpkg ) fi ( - set -x + echo '+ go test' $TESTFLAGS "github.com/dotcloud/docker${dir#.}" cd "$dir" go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS ) -- cgit v1.2.1 From 99b6364790e59a70d57949792fa31014637c93ee Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Wed, 12 Mar 2014 01:18:12 -0600 Subject: Exclude more "definitely not unit tested Go source code" directories from hack/make/test Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/make.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/hack/make.sh b/hack/make.sh index d7f8aaeed2..994da8d9ad 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -136,7 +136,15 @@ go_test_dir() { # output, one per line. find_dirs() { find -not \( \ - \( -wholename './vendor' -o -wholename './integration' -o -wholename './contrib' -o -wholename './pkg/mflag/example' \) \ + \( \ + -wholename './vendor' \ + -o -wholename './integration' \ + -o -wholename './contrib' \ + -o -wholename './pkg/mflag/example' \ + -o -wholename './.git' \ + -o -wholename './bundles' \ + -o -wholename './docs' \ + \) \ -prune \ \) -name "$1" -print0 | xargs -0n1 dirname | sort -u } -- cgit v1.2.1 From 8bf63d532622e22070b92ee71dcb972ace4b9b40 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 12 Mar 2014 22:02:24 +1000 Subject: fixes suggested by James Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/examples/apt-cacher-ng.Dockerfile | 8 ++++---- docs/sources/examples/apt-cacher-ng.rst | 12 +++++------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/docs/sources/examples/apt-cacher-ng.Dockerfile b/docs/sources/examples/apt-cacher-ng.Dockerfile index fcc326815d..a189d28d86 100644 --- a/docs/sources/examples/apt-cacher-ng.Dockerfile +++ b/docs/sources/examples/apt-cacher-ng.Dockerfile @@ -1,9 +1,9 @@ # -# BUILD docker build -t apt-cacher . -# RUN docker run -d -p 3142:3142 -name apt-cacher-run apt-cacher +# Build: docker build -t apt-cacher . +# Run: docker run -d -p 3142:3142 -name apt-cacher-run apt-cacher # # and then you can run containers with: -# docker run -t -i -rm -e http_proxy http://dockerhost:3142/ debian bash +# docker run -t -i -rm -e http_proxy http://dockerhost:3142/ debian bash # FROM ubuntu MAINTAINER SvenDowideit@docker.com @@ -11,5 +11,5 @@ MAINTAINER SvenDowideit@docker.com VOLUME ["/var/cache/apt-cacher-ng"] RUN apt-get update ; apt-get install -yq apt-cacher-ng -EXPOSE 3142 +EXPOSE 3142 CMD chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/* diff --git a/docs/sources/examples/apt-cacher-ng.rst b/docs/sources/examples/apt-cacher-ng.rst index 0fb55720f2..ed22c33d05 100644 --- a/docs/sources/examples/apt-cacher-ng.rst +++ b/docs/sources/examples/apt-cacher-ng.rst @@ -37,11 +37,11 @@ To see the logfiles that are 'tailed' in the default command, you can use: $ sudo docker logs -f test_apt_cacher_ng -To get your Debian based containers to use the proxy, you can do one of three things +To get your Debian-based containers to use the proxy, you can do one of three things 1. Add an apt Proxy setting ``echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy`` -2. Set and environment variable: ``http_proxy=http://dockerhost:3142/`` -3. Change your sources.list entries to start with ``http://dockerhost:3142/`` +2. Set an environment variable: ``http_proxy=http://dockerhost:3142/`` +3. Change your ``sources.list`` entries to start with ``http://dockerhost:3142/`` **Option 1** injects the settings safely into your apt configuration in a local version of a common base: @@ -49,9 +49,7 @@ version of a common base: .. code-block:: bash FROM ubuntu - RUN echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy - RUN apt-get update ; apt-get install vim git # docker build -t my_ubuntu . @@ -64,10 +62,10 @@ break other HTTP clients which obey ``http_proxy``, such as ``curl``, ``wget`` a $ sudo docker run -rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash **Option 3** is the least portable, but there will be times when you might need to -do it - and you can do it from your Dockerfile too. +do it and you can do it from your ``Dockerfile`` too. Apt-cacher-ng has some tools that allow you to manage the repository, and they -can be used by leveraging the ``VOLUME``, and the image we built to run the +can be used by leveraging the ``VOLUME`` instruction, and the image we built to run the service: .. code-block:: bash -- cgit v1.2.1 From 2cfcf42d50b469abfb0e13245726371d445b76e4 Mon Sep 17 00:00:00 2001 From: unclejack Date: Fri, 28 Feb 2014 18:42:20 +0200 Subject: retry to retrieve metadata on failure during pull This makes Docker retry to retrieve the JSON metadata for the layers. Docker will make 5 attempts to retrieve the metadata before failing and it will increase the delay between attempts after each failed attempt. Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- server.go | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/server.go b/server.go index d6a4036faf..4876f31bae 100644 --- a/server.go +++ b/server.go @@ -1086,16 +1086,32 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin if !srv.runtime.Graph().Exists(id) { out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) - imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token) - if err != nil { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) - // FIXME: Keep going in case of error? - return err - } - img, err := image.NewImgJSON(imgJSON) - if err != nil { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) - return fmt.Errorf("Failed to parse json: %s", err) + var ( + imgJSON []byte + imgSize int + err error + img *image.Image + ) + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) + if err != nil && j == retries { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + img, err = image.NewImgJSON(imgJSON) + if err != nil && j == retries { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return fmt.Errorf("Failed to parse json: %s", err) + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else { + break + } } // Get the layer -- cgit v1.2.1 From 2a5e1abaa93b0081446420cae9d6d7d3892b6d75 Mon Sep 17 00:00:00 2001 From: David Gageot Date: Wed, 12 Mar 2014 15:42:10 +0100 Subject: Update port_redirection.rst Fix flags --- docs/sources/use/port_redirection.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/use/port_redirection.rst b/docs/sources/use/port_redirection.rst index 38d6b98841..ba244adadb 100644 --- a/docs/sources/use/port_redirection.rst +++ b/docs/sources/use/port_redirection.rst @@ -128,7 +128,7 @@ The ``client`` then links to the ``server``: .. code-block:: bash # Link - docker run -name client -link server:linked-server + docker run --name client --link server:linked-server ``client`` locally refers to ``server`` as ``linked-server``. The following environment variables, among others, are available on -- cgit v1.2.1 From a56d1b93a1a16ac482b5e30773664f2538949c53 Mon Sep 17 00:00:00 2001 From: unclejack Date: Wed, 12 Mar 2014 17:58:53 +0200 Subject: don't leave empty cidFile behind This makes `--cidfile` clean up empty container ID files. These are left behind when creating the container fails. Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- api/client.go | 16 +++++++++++++++- integration/commands_test.go | 31 ++++++++++++++++++++++++++++++- 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/api/client.go b/api/client.go index 599d82b39a..adaf2dfd4e 100644 --- a/api/client.go +++ b/api/client.go @@ -1763,7 +1763,21 @@ func (cli *DockerCli) CmdRun(args ...string) error { if containerIDFile, err = os.Create(hostConfig.ContainerIDFile); err != nil { return fmt.Errorf("Failed to create the container ID file: %s", err) } - defer containerIDFile.Close() + defer func() { + containerIDFile.Close() + var ( + cidFileInfo os.FileInfo + err error + ) + if cidFileInfo, err = os.Stat(hostConfig.ContainerIDFile); err != nil { + return + } + if cidFileInfo.Size() == 0 { + if err := os.Remove(hostConfig.ContainerIDFile); err != nil { + fmt.Printf("failed to remove CID file '%s': %s \n", hostConfig.ContainerIDFile, err) + } + } + }() } containerValues := url.Values{} diff --git a/integration/commands_test.go b/integration/commands_test.go index 46f623bedf..d226cd7133 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -931,7 +931,7 @@ run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ] // #2098 - Docker cidFiles only contain short version of the containerId //sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" // TestRunCidFile tests that run --cidfile returns the longid -func TestRunCidFile(t *testing.T) { +func TestRunCidFileCheckIDLength(t *testing.T) { stdout, stdoutPipe := io.Pipe() tmpDir, err := ioutil.TempDir("", "TestRunCidFile") @@ -980,6 +980,35 @@ func TestRunCidFile(t *testing.T) { } +// Ensure that CIDFile gets deleted if it's empty +// Perform this test by making `docker run` fail +func TestRunCidFileCleanupIfEmpty(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "TestRunCidFile") + if err != nil { + t.Fatal(err) + } + tmpCidFile := path.Join(tmpDir, "cid") + + cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr) + defer cleanup(globalEngine, t) + + c := make(chan struct{}) + go func() { + defer close(c) + if err := cli.CmdRun("--cidfile", tmpCidFile, unitTestImageID); err == nil { + t.Fatal("running without a command should haveve failed") + } + if _, err := os.Stat(tmpCidFile); err == nil { + t.Fatalf("empty CIDFile '%s' should've been deleted", tmpCidFile) + } + }() + defer os.RemoveAll(tmpDir) + + setTimeout(t, "CmdRun timed out", 5*time.Second, func() { + <-c + }) +} + func TestContainerOrphaning(t *testing.T) { // setup a temporary directory -- cgit v1.2.1 From 6a325f1c7a243689ecf01f257ac7afb95fea7ec2 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Wed, 12 Mar 2014 11:13:24 -0700 Subject: Fix issue when /etc/apparmor.d does not exists Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- pkg/libcontainer/apparmor/setup.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/libcontainer/apparmor/setup.go b/pkg/libcontainer/apparmor/setup.go index e07759cc64..4e1c95143a 100644 --- a/pkg/libcontainer/apparmor/setup.go +++ b/pkg/libcontainer/apparmor/setup.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "os" "os/exec" + "path" ) const DefaultProfilePath = "/etc/apparmor.d/docker" @@ -85,6 +86,11 @@ func InstallDefaultProfile() error { return nil } + // Make sure /etc/apparmor.d exists + if err := os.MkdirAll(path.Dir(DefaultProfilePath), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(DefaultProfilePath, []byte(DefaultProfile), 0644); err != nil { return err } -- cgit v1.2.1 From 471aa870f57e66128b08ec28e908fb1faacdfd40 Mon Sep 17 00:00:00 2001 From: Ken ICHIKAWA Date: Wed, 12 Mar 2014 14:52:39 +0900 Subject: Remove duplicated description of --mtu commit baa70e975186bb0ee8a4b16b9374cb6e794f8975 duplicates the description of --mtu. This patch removes the duplicated description. Docker-DCO-1.1-Signed-off-by: Ken ICHIKAWA (github: ichik1) --- docs/sources/reference/commandline/cli.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 0e3f30ede2..d146c18a15 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -82,7 +82,6 @@ Commands --ip="0.0.0.0": Default IP address to use when binding container ports --ip-forward=true: Enable net.ipv4.ip_forward --iptables=true: Enable Docker's addition of iptables rules - --mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available -p, --pidfile="/var/run/docker.pid": Path to use for daemon PID file -r, --restart=true: Restart previously running containers -s, --storage-driver="": Force the docker runtime to use a specific storage driver -- cgit v1.2.1 From 841fcad0ba03ff71a27ad2892fab3fdc83d071d8 Mon Sep 17 00:00:00 2001 From: Ken ICHIKAWA Date: Thu, 13 Mar 2014 15:19:42 +0900 Subject: Add missing options -t and -v to images subcommand doc Docker-DCO-1.1-Signed-off-by: Ken ICHIKAWA (github: ichik1) --- docs/sources/reference/commandline/cli.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index d146c18a15..e65bd930ae 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -608,8 +608,8 @@ To see how the ``docker:latest`` image was built: -a, --all=false: Show all images (by default filter out the intermediate images used to build) --no-trunc=false: Don't truncate output -q, --quiet=false: Only show numeric IDs - --tree=false: Output graph in tree format - --viz=false: Output graph in graphviz format + -t, --tree=false: Output graph in tree format + -v, --viz=false: Output graph in graphviz format Listing the most recently created images ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- cgit v1.2.1 From 73596b00e053fedbf42f7abb87728e7176e5a95c Mon Sep 17 00:00:00 2001 From: Phillip Alexander Date: Wed, 12 Mar 2014 20:01:27 -0700 Subject: Fix boilerplate text in Apache license This commit updates the Apache license boilerplate with actual information. The Apache license appendix (designed to be removed before publication) states: ``` APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!)... ``` Additionally, the copyright year was not included. Copyright notices must reflect the current year. This commit updates the listed year to 2014. see: http://www.copyright.gov/circs/circ01.pdf for more info Docker-DCO-1.1-Signed-off-by: Phillip Alexander (github: phillipalexander) --- LICENSE | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/LICENSE b/LICENSE index d645695673..27448585ad 100644 --- a/LICENSE +++ b/LICENSE @@ -176,18 +176,7 @@ END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] + Copyright 2014 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. -- cgit v1.2.1 From 9a0d7fe0182da541cc99eab9a4930616792e95c3 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 13 Mar 2014 17:40:34 +0000 Subject: use mock for search Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- registry/registry.go | 2 +- registry/registry_mock_test.go | 7 ++++++- registry/registry_test.go | 6 ++++-- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/registry/registry.go b/registry/registry.go index dbf5d539ff..346132bcc5 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -600,7 +600,7 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat func (r *Registry) SearchRepositories(term string) (*SearchResults, error) { utils.Debugf("Index server: %s", r.indexEndpoint) - u := IndexServerAddress() + "search?q=" + url.QueryEscape(term) + u := r.indexEndpoint + "search?q=" + url.QueryEscape(term) req, err := r.reqFactory.NewRequest("GET", u, nil) if err != nil { return nil, err diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go index 6eb94b63cc..dd5da6bd50 100644 --- a/registry/registry_mock_test.go +++ b/registry/registry_mock_test.go @@ -321,7 +321,12 @@ func handlerAuth(w http.ResponseWriter, r *http.Request) { } func handlerSearch(w http.ResponseWriter, r *http.Request) { - writeResponse(w, "{}", 200) + result := &SearchResults{ + Query: "fakequery", + NumResults: 1, + Results: []SearchResult{{Name: "fakeimage", StarCount: 42}}, + } + writeResponse(w, result, 200) } func TestPing(t *testing.T) { diff --git a/registry/registry_test.go b/registry/registry_test.go index f21814c791..ebfb99b4c3 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -186,14 +186,16 @@ func TestPushImageJSONIndex(t *testing.T) { func TestSearchRepositories(t *testing.T) { r := spawnTestRegistry(t) - results, err := r.SearchRepositories("supercalifragilisticepsialidocious") + results, err := r.SearchRepositories("fakequery") if err != nil { t.Fatal(err) } if results == nil { t.Fatal("Expected non-nil SearchResults object") } - assertEqual(t, results.NumResults, 0, "Expected 0 search results") + assertEqual(t, results.NumResults, 1, "Expected 1 search results") + assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") + assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' a ot hae 42 stars") } func TestValidRepositoryName(t *testing.T) { -- cgit v1.2.1 From 44fe8cbbd174b5d85d4a063ed270f6b9d2279b70 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 13 Mar 2014 11:46:02 -0600 Subject: Update to double-dash everywhere These were found using `git grep -nE '[^-a-zA-Z0-9<>]-[a-zA-Z0-9]{2}'` (fair warning: _many_ false positives there). Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- Dockerfile | 6 +- api/client.go | 2 +- contrib/completion/fish/docker.fish | 4 +- contrib/completion/zsh/_docker | 2 +- contrib/desktop-integration/data/Dockerfile | 6 +- contrib/desktop-integration/iceweasel/Dockerfile | 8 +-- contrib/init/sysvinit-debian/docker.default | 2 +- contrib/mkseccomp.pl | 2 +- docs/sources/examples/apt-cacher-ng.Dockerfile | 4 +- docs/sources/examples/apt-cacher-ng.rst | 8 +-- docs/sources/examples/hello_world.rst | 4 +- docs/sources/examples/postgresql_service.rst | 10 +-- docs/sources/examples/python_web_app.rst | 2 +- docs/sources/examples/running_redis_service.rst | 2 +- docs/sources/examples/running_ssh_service.rst | 4 +- docs/sources/installation/ubuntulinux.rst | 4 +- docs/sources/reference/api/docker_remote_api.rst | 2 +- .../reference/api/docker_remote_api_v1.10.rst | 4 +- .../reference/api/docker_remote_api_v1.2.rst | 4 +- .../reference/api/docker_remote_api_v1.3.rst | 4 +- .../reference/api/docker_remote_api_v1.4.rst | 4 +- .../reference/api/docker_remote_api_v1.5.rst | 4 +- .../reference/api/docker_remote_api_v1.6.rst | 4 +- .../reference/api/docker_remote_api_v1.7.rst | 4 +- .../reference/api/docker_remote_api_v1.8.rst | 4 +- .../reference/api/docker_remote_api_v1.9.rst | 4 +- docs/sources/reference/builder.rst | 4 +- docs/sources/reference/commandline/cli.rst | 42 +++++------ docs/sources/reference/run.rst | 82 +++++++++++----------- docs/sources/use/ambassador_pattern_linking.rst | 28 ++++---- docs/sources/use/basics.rst | 2 +- docs/sources/use/networking.rst | 4 +- docs/sources/use/port_redirection.rst | 4 +- docs/sources/use/working_with_links_names.rst | 16 ++--- docs/sources/use/working_with_volumes.rst | 18 ++--- docs/sources/use/workingwithrepository.rst | 2 +- hack/RELEASE-CHECKLIST.md | 4 +- hack/dind | 4 +- hack/infrastructure/docker-ci/Dockerfile | 4 +- hack/infrastructure/docker-ci/README.rst | 4 +- .../docker-ci/dockertest/nightlyrelease | 2 +- hack/infrastructure/docker-ci/dockertest/project | 2 +- .../docker-ci/testbuilder/Dockerfile | 4 +- .../docker-ci/testbuilder/docker-registry.sh | 6 +- .../infrastructure/docker-ci/testbuilder/docker.sh | 8 +-- hack/release.sh | 2 +- integration/commands_test.go | 2 +- integration/container_test.go | 4 +- integration/server_test.go | 2 +- runconfig/config_test.go | 16 ++--- runconfig/parse.go | 4 +- 51 files changed, 189 insertions(+), 189 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7fad3d56a1..42438e3946 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,13 +6,13 @@ # docker build -t docker . # # # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/dotcloud/docker -privileged -i -t docker bash +# docker run -v `pwd`:/go/src/github.com/dotcloud/docker --privileged -i -t docker bash # # # Run the test suite: -# docker run -privileged docker hack/make.sh test +# docker run --privileged docker hack/make.sh test # # # Publish a release: -# docker run -privileged \ +# docker run --privileged \ # -e AWS_S3_BUCKET=baz \ # -e AWS_ACCESS_KEY=foo \ # -e AWS_SECRET_KEY=bar \ diff --git a/api/client.go b/api/client.go index 6049a892c1..3c605d4af9 100644 --- a/api/client.go +++ b/api/client.go @@ -1409,7 +1409,7 @@ func (cli *DockerCli) CmdCommit(args ...string) error { cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes") flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith \"") - flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`) + flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: --run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`) if err := cmd.Parse(args); err != nil { return nil } diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index 2629533aac..b0c5f38a96 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -79,7 +79,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d ' complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (eg. "John Hannibal Smith "' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' -complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: -run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: --run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" # cp @@ -202,7 +202,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expo complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep stdin open even if not attached' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container (name:alias)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: , where unit = b, k, m or g)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s n -l networking -d 'Enable networking for this container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker index 8b50bac01b..a379fd40f8 100755 --- a/contrib/completion/zsh/_docker +++ b/contrib/completion/zsh/_docker @@ -174,7 +174,7 @@ __docker_subcommand () { (ps) _arguments '-a[Show all containers. Only running containers are shown by default]' \ '-h[Show help]' \ - '-before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \ + '--before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \ '-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)' ;; (tag) diff --git a/contrib/desktop-integration/data/Dockerfile b/contrib/desktop-integration/data/Dockerfile index a9843a52ad..76846af912 100644 --- a/contrib/desktop-integration/data/Dockerfile +++ b/contrib/desktop-integration/data/Dockerfile @@ -9,13 +9,13 @@ # wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile # # # Build data image -# docker build -t data -rm . +# docker build -t data . # # # Create a data container. (eg: iceweasel-data) -# docker run -name iceweasel-data data true +# docker run --name iceweasel-data data true # # # List data from it -# docker run -volumes-from iceweasel-data busybox ls -al /data +# docker run --volumes-from iceweasel-data busybox ls -al /data docker-version 0.6.5 diff --git a/contrib/desktop-integration/iceweasel/Dockerfile b/contrib/desktop-integration/iceweasel/Dockerfile index 721cc6d2cf..f9f58c9ca5 100644 --- a/contrib/desktop-integration/iceweasel/Dockerfile +++ b/contrib/desktop-integration/iceweasel/Dockerfile @@ -10,16 +10,16 @@ # wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile # # # Build iceweasel image -# docker build -t iceweasel -rm . +# docker build -t iceweasel . # # # Run stateful data-on-host iceweasel. For ephemeral, remove -v /data/iceweasel:/data # docker run -v /data/iceweasel:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ -# -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \ +# -v /dev/snd:/dev/snd --lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \ # -e DISPLAY=unix$DISPLAY iceweasel # # # To run stateful dockerized data containers -# docker run -volumes-from iceweasel-data -v /tmp/.X11-unix:/tmp/.X11-unix \ -# -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \ +# docker run --volumes-from iceweasel-data -v /tmp/.X11-unix:/tmp/.X11-unix \ +# -v /dev/snd:/dev/snd --lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \ # -e DISPLAY=unix$DISPLAY iceweasel docker-version 0.6.5 diff --git a/contrib/init/sysvinit-debian/docker.default b/contrib/init/sysvinit-debian/docker.default index d5110b5e2f..14e660175b 100644 --- a/contrib/init/sysvinit-debian/docker.default +++ b/contrib/init/sysvinit-debian/docker.default @@ -4,7 +4,7 @@ #DOCKER="/usr/local/bin/docker" # Use DOCKER_OPTS to modify the daemon startup options. -#DOCKER_OPTS="-dns 8.8.8.8 -dns 8.8.4.4" +#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" # If you need Docker to use an HTTP proxy, it can also be specified here. #export http_proxy="http://127.0.0.1:3128/" diff --git a/contrib/mkseccomp.pl b/contrib/mkseccomp.pl index 5c583cc3d3..28d0645af0 100755 --- a/contrib/mkseccomp.pl +++ b/contrib/mkseccomp.pl @@ -10,7 +10,7 @@ # can configure the list of syscalls. When run, this script produces output # which, when stored in a file, can be passed to docker as follows: # -# docker run -lxc-conf="lxc.seccomp=$file" +# docker run --lxc-conf="lxc.seccomp=$file" # # The included sample file shows how to cut about a quarter of all syscalls, # which affecting most applications. diff --git a/docs/sources/examples/apt-cacher-ng.Dockerfile b/docs/sources/examples/apt-cacher-ng.Dockerfile index a189d28d86..3b7862bb58 100644 --- a/docs/sources/examples/apt-cacher-ng.Dockerfile +++ b/docs/sources/examples/apt-cacher-ng.Dockerfile @@ -1,9 +1,9 @@ # # Build: docker build -t apt-cacher . -# Run: docker run -d -p 3142:3142 -name apt-cacher-run apt-cacher +# Run: docker run -d -p 3142:3142 --name apt-cacher-run apt-cacher # # and then you can run containers with: -# docker run -t -i -rm -e http_proxy http://dockerhost:3142/ debian bash +# docker run -t -i --rm -e http_proxy http://dockerhost:3142/ debian bash # FROM ubuntu MAINTAINER SvenDowideit@docker.com diff --git a/docs/sources/examples/apt-cacher-ng.rst b/docs/sources/examples/apt-cacher-ng.rst index ed22c33d05..dd844d4ef1 100644 --- a/docs/sources/examples/apt-cacher-ng.rst +++ b/docs/sources/examples/apt-cacher-ng.rst @@ -23,13 +23,13 @@ To build the image using: .. code-block:: bash - $ sudo docker build -rm -t eg_apt_cacher_ng . + $ sudo docker build -t eg_apt_cacher_ng . Then run it, mapping the exposed port to one on the host .. code-block:: bash - $ sudo docker run -d -p 3142:3142 -name test_apt_cacher_ng eg_apt_cacher_ng + $ sudo docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng To see the logfiles that are 'tailed' in the default command, you can use: @@ -59,7 +59,7 @@ break other HTTP clients which obey ``http_proxy``, such as ``curl``, ``wget`` a .. code-block:: bash - $ sudo docker run -rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash + $ sudo docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash **Option 3** is the least portable, but there will be times when you might need to do it and you can do it from your ``Dockerfile`` too. @@ -70,7 +70,7 @@ service: .. code-block:: bash - $ sudo docker run -rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash + $ sudo docker run --rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash $$ /usr/lib/apt-cacher-ng/distkill.pl Scanning /var/cache/apt-cacher-ng, please wait... diff --git a/docs/sources/examples/hello_world.rst b/docs/sources/examples/hello_world.rst index b8538debb9..507056da85 100644 --- a/docs/sources/examples/hello_world.rst +++ b/docs/sources/examples/hello_world.rst @@ -119,13 +119,13 @@ Check the logs make sure it is working correctly. .. code-block:: bash - sudo docker attach -sig-proxy=false $container_id + sudo docker attach --sig-proxy=false $container_id Attach to the container to see the results in real-time. - **"docker attach**" This will allow us to attach to a background process to see what is going on. -- **"-sig-proxy=false"** Do not forward signals to the container; allows +- **"--sig-proxy=false"** Do not forward signals to the container; allows us to exit the attachment using Control-C without stopping the container. - **$container_id** The Id of the container we want to attach too. diff --git a/docs/sources/examples/postgresql_service.rst b/docs/sources/examples/postgresql_service.rst index 5a2323471b..66b0fd7aa5 100644 --- a/docs/sources/examples/postgresql_service.rst +++ b/docs/sources/examples/postgresql_service.rst @@ -37,24 +37,24 @@ And run the PostgreSQL server container (in the foreground): .. code-block:: bash - $ sudo docker run -rm -P -name pg_test eg_postgresql + $ sudo docker run --rm -P -name pg_test eg_postgresql There are 2 ways to connect to the PostgreSQL server. We can use :ref:`working_with_links_names`, or we can access it from our host (or the network). -.. note:: The ``-rm`` removes the container and its image when the container +.. note:: The ``--rm`` removes the container and its image when the container exists successfully. Using container linking ^^^^^^^^^^^^^^^^^^^^^^^ Containers can be linked to another container's ports directly using -``-link remote_name:local_alias`` in the client's ``docker run``. This will +``--link remote_name:local_alias`` in the client's ``docker run``. This will set a number of environment variables that can then be used to connect: .. code-block:: bash - $ sudo docker run -rm -t -i -link pg_test:pg eg_postgresql bash + $ sudo docker run --rm -t -i --link pg_test:pg eg_postgresql bash postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password @@ -104,7 +104,7 @@ configuration and data: .. code-block:: bash - docker run -rm --volumes-from pg_test -t -i busybox sh + docker run --rm --volumes-from pg_test -t -i busybox sh / # ls bin etc lib linuxrc mnt proc run sys usr diff --git a/docs/sources/examples/python_web_app.rst b/docs/sources/examples/python_web_app.rst index 5b8e3f6b4b..33c038f9ab 100644 --- a/docs/sources/examples/python_web_app.rst +++ b/docs/sources/examples/python_web_app.rst @@ -51,7 +51,7 @@ try things out, and then exit when you're done. .. code-block:: bash - $ sudo docker run -i -t -name pybuilder_run shykes/pybuilder bash + $ sudo docker run -i -t --name pybuilder_run shykes/pybuilder bash $$ URL=http://github.com/shykes/helloflask/archive/master.tar.gz $$ /usr/local/bin/buildapp $URL diff --git a/docs/sources/examples/running_redis_service.rst b/docs/sources/examples/running_redis_service.rst index c9424867a4..50f1471f17 100644 --- a/docs/sources/examples/running_redis_service.rst +++ b/docs/sources/examples/running_redis_service.rst @@ -49,7 +49,7 @@ use a container link to provide access to our Redis database. Create your web application container ------------------------------------- -Next we can create a container for our application. We're going to use the ``-link`` +Next we can create a container for our application. We're going to use the ``--link`` flag to create a link to the ``redis`` container we've just created with an alias of ``db``. This will create a secure tunnel to the ``redis`` container and expose the Redis instance running inside that container to only this container. diff --git a/docs/sources/examples/running_ssh_service.rst b/docs/sources/examples/running_ssh_service.rst index d27799bee7..4161275019 100644 --- a/docs/sources/examples/running_ssh_service.rst +++ b/docs/sources/examples/running_ssh_service.rst @@ -19,14 +19,14 @@ Build the image using: .. code-block:: bash - $ sudo docker build -rm -t eg_sshd . + $ sudo docker build -t eg_sshd . Then run it. You can then use ``docker port`` to find out what host port the container's port 22 is mapped to: .. code-block:: bash - $ sudo docker run -d -P -name test_sshd eg_sshd + $ sudo docker run -d -P --name test_sshd eg_sshd $ sudo docker port test_sshd 22 0.0.0.0:49154 diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index c459f33d3c..6e79fb8cbc 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -309,9 +309,9 @@ daemon for the containers: sudo nano /etc/default/docker --- # Add: - DOCKER_OPTS="-dns 8.8.8.8" + DOCKER_OPTS="--dns 8.8.8.8" # 8.8.8.8 could be replaced with a local DNS server, such as 192.168.1.1 - # multiple DNS servers can be specified: -dns 8.8.8.8 -dns 192.168.1.1 + # multiple DNS servers can be specified: --dns 8.8.8.8 --dns 192.168.1.1 The Docker daemon has to be restarted: diff --git a/docs/sources/reference/api/docker_remote_api.rst b/docs/sources/reference/api/docker_remote_api.rst index e1071bf085..93558fa974 100644 --- a/docs/sources/reference/api/docker_remote_api.rst +++ b/docs/sources/reference/api/docker_remote_api.rst @@ -203,7 +203,7 @@ What's new .. http:get:: /images/viz - This URI no longer exists. The ``images -viz`` output is now generated in + This URI no longer exists. The ``images --viz`` output is now generated in the client, using the ``/images/json`` data. v1.6 diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.rst b/docs/sources/reference/api/docker_remote_api_v1.10.rst index ed63525e7e..20af253f0e 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.10.rst @@ -1276,8 +1276,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a 3.3 CORS Requests ----------------- -To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode. +To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. .. code-block:: bash - docker -d -H="192.168.1.9:4243" -api-enable-cors + docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.2.rst b/docs/sources/reference/api/docker_remote_api_v1.2.rst index 1ae2db696f..80f76a3de9 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.2.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.2.rst @@ -1045,7 +1045,7 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a 3.3 CORS Requests ----------------- -To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode. +To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. - docker -d -H="tcp://192.168.1.9:4243" -api-enable-cors + docker -d -H="tcp://192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.3.rst b/docs/sources/reference/api/docker_remote_api_v1.3.rst index cb4c54642d..2b17a37a4d 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.3.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.3.rst @@ -1124,7 +1124,7 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a 3.3 CORS Requests ----------------- -To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode. +To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. - docker -d -H="192.168.1.9:4243" -api-enable-cors + docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.4.rst b/docs/sources/reference/api/docker_remote_api_v1.4.rst index 39c8839653..ff5aaa7a74 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.4.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.4.rst @@ -1168,9 +1168,9 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a 3.3 CORS Requests ----------------- -To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode. +To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. .. code-block:: bash - docker -d -H="192.168.1.9:4243" -api-enable-cors + docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.5.rst b/docs/sources/reference/api/docker_remote_api_v1.5.rst index 0cdbaf747a..d4440e4423 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.5.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.5.rst @@ -1137,8 +1137,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a 3.3 CORS Requests ----------------- -To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode. +To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. .. code-block:: bash - docker -d -H="192.168.1.9:4243" -api-enable-cors + docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.rst b/docs/sources/reference/api/docker_remote_api_v1.6.rst index a9ddfb2c13..cfc37084b8 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.6.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.6.rst @@ -1274,9 +1274,9 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a 3.3 CORS Requests ----------------- -To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode. +To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. .. code-block:: bash - docker -d -H="192.168.1.9:4243" -api-enable-cors + docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.rst b/docs/sources/reference/api/docker_remote_api_v1.7.rst index cacd7ab6f7..1bafaddfc5 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.7.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.7.rst @@ -1254,9 +1254,9 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a 3.3 CORS Requests ----------------- -To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode. +To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. .. code-block:: bash - docker -d -H="192.168.1.9:4243" -api-enable-cors + docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.rst b/docs/sources/reference/api/docker_remote_api_v1.8.rst index b752f2f8a4..16492dde76 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.8.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.8.rst @@ -1287,8 +1287,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a 3.3 CORS Requests ----------------- -To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode. +To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. .. code-block:: bash - docker -d -H="192.168.1.9:4243" -api-enable-cors + docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.rst b/docs/sources/reference/api/docker_remote_api_v1.9.rst index 9430ff370d..27812457bb 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.9.rst @@ -1288,8 +1288,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a 3.3 CORS Requests ----------------- -To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode. +To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. .. code-block:: bash - docker -d -H="192.168.1.9:4243" -api-enable-cors + docker -d -H="192.168.1.9:4243" --api-enable-cors diff --git a/docs/sources/reference/builder.rst b/docs/sources/reference/builder.rst index 3c48939c82..0d8d750a04 100644 --- a/docs/sources/reference/builder.rst +++ b/docs/sources/reference/builder.rst @@ -193,7 +193,7 @@ well. When used in the shell or exec formats, the ``CMD`` instruction sets the command to be executed when running the image. This is -functionally equivalent to running ``docker commit -run '{"Cmd": +functionally equivalent to running ``docker commit --run '{"Cmd": }'`` outside the builder. If you use the *shell* form of the CMD, then the ```` will @@ -235,7 +235,7 @@ override the default specified in CMD. ``EXPOSE [...]`` The ``EXPOSE`` instruction exposes ports for use within links. This is -functionally equivalent to running ``docker commit -run '{"PortSpecs": +functionally equivalent to running ``docker commit --run '{"PortSpecs": ["", ""]}'`` outside the builder. Refer to :ref:`port_redirection` for detailed information. diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index e65bd930ae..83f05947c2 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -52,7 +52,7 @@ Sometimes this can use a more complex value string, as for ``-v``:: Strings and Integers ~~~~~~~~~~~~~~~~~~~~ -Options like ``-name=""`` expect a string, and they can only be +Options like ``--name=""`` expect a string, and they can only be specified once. Options like ``-c=0`` expect an integer, and they can only be specified once. @@ -94,7 +94,7 @@ daemon and client. To run the daemon you provide the ``-d`` flag. To force Docker to use devicemapper as the storage driver, use ``docker -d -s devicemapper``. -To set the DNS server for all Docker containers, use ``docker -d -dns 8.8.8.8``. +To set the DNS server for all Docker containers, use ``docker -d --dns 8.8.8.8``. To run the daemon with debug output, use ``docker -d -D``. @@ -305,7 +305,7 @@ by using the ``git://`` schema. -m, --message="": Commit message -a, --author="": Author (eg. "John Hannibal Smith " --run="": Configuration changes to be applied when the image is launched with `docker run`. - (ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}') + (ex: --run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}') .. _cli_commit_examples: @@ -335,9 +335,9 @@ run ``ls /etc``. .. code-block:: bash - $ docker run -t -name test ubuntu ls + $ docker run -t --name test ubuntu ls bin boot dev etc home lib lib64 media mnt opt proc root run sbin selinux srv sys tmp usr var - $ docker commit -run='{"Cmd": ["ls","/etc"]}' test test2 + $ docker commit --run='{"Cmd": ["ls","/etc"]}' test test2 933d16de9e70005304c1717b5c6f2f39d6fd50752834c6f34a155c70790011eb $ docker run -t test2 adduser.conf gshadow login.defs rc0.d @@ -358,7 +358,7 @@ Say you have a Dockerfile like so: CMD ["/usr/sbin/sshd -D"] ... -If you run that, make some changes, and then commit, Docker will merge the environment variable and exposed port configuration settings with any that you specify in the -run= option. This is a change from Docker 0.8.0 and prior where no attempt was made to preserve any existing configuration on commit. +If you run that, make some changes, and then commit, Docker will merge the environment variable and exposed port configuration settings with any that you specify in the --run= option. This is a change from Docker 0.8.0 and prior where no attempt was made to preserve any existing configuration on commit. .. code-block:: bash @@ -366,14 +366,14 @@ If you run that, make some changes, and then commit, Docker will merge the envir $ docker run -t -i me/foo /bin/bash foo-container$ [make changes in the container] foo-container$ exit - $ docker commit -run='{"Cmd": ["ls"]}' [container-id] me/bar + $ docker commit --run='{"Cmd": ["ls"]}' [container-id] me/bar ... The me/bar image will now have port 22 exposed, MYVAR env var set to 'foobar', and its default command will be ["ls"]. -Note that this is currently a shallow merge. So, for example, if you had specified a new port spec in the -run= config above, that would have clobbered the 'EXPOSE 22' setting from the parent container. +Note that this is currently a shallow merge. So, for example, if you had specified a new port spec in the --run= config above, that would have clobbered the 'EXPOSE 22' setting from the parent container. -Full -run example +Full --run example ................. The ``--run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID`` @@ -384,7 +384,7 @@ not overridden in the JSON hash will be merged in. .. code-block:: bash - $ sudo docker commit -run=' + $ sudo docker commit --run=' { "Entrypoint" : null, "Privileged" : false, @@ -516,16 +516,16 @@ Show events in the past from a specified time .. code-block:: bash - $ sudo docker events -since 1378216169 + $ sudo docker events --since 1378216169 [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop - $ sudo docker events -since '2013-09-03' + $ sudo docker events --since '2013-09-03' [2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop - $ sudo docker events -since '2013-09-03 15:49:29 +0200 CEST' + $ sudo docker events --since '2013-09-03 15:49:29 +0200 CEST' [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die [2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop @@ -829,7 +829,7 @@ text output: .. code-block:: bash - $ sudo docker inspect -format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID + $ sudo docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID Find a Specific Port Mapping ............................ @@ -844,7 +844,7 @@ we ask for the ``HostPort`` field to get the public address. .. code-block:: bash - $ sudo docker inspect -format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID + $ sudo docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID Get config .......... @@ -856,7 +856,7 @@ to convert config object into JSON .. code-block:: bash - $ sudo docker inspect -format='{{json .config}}' $INSTANCE_ID + $ sudo docker inspect --format='{{json .config}}' $INSTANCE_ID .. _cli_kill: @@ -1151,7 +1151,7 @@ image is removed. --volumes-from="": Mount all volumes from the given container(s) --entrypoint="": Overwrite the default entrypoint set by the image -w, --workdir="": Working directory inside the container - --lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + --lxc-conf=[]: Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) --expose=[]: Expose a port from the container without publishing it to your host --link="": Add link to another container (name:alias) @@ -1171,7 +1171,7 @@ See :ref:`port_redirection` for more detailed information about the ``--expose`` ``-p``, ``-P`` and ``--link`` parameters, and :ref:`working_with_links_names` for specific examples using ``--link``. -Known Issues (run -volumes-from) +Known Issues (run --volumes-from) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * :issue:`2702`: "lxc-start: Permission denied - failed to mount" @@ -1199,7 +1199,7 @@ error. Docker will close this file when ``docker run`` exits. This will *not* work, because by default, most potentially dangerous kernel capabilities are dropped; including ``cap_sys_admin`` (which is -required to mount filesystems). However, the ``-privileged`` flag will +required to mount filesystems). However, the ``--privileged`` flag will allow it to run: .. code-block:: bash @@ -1211,7 +1211,7 @@ allow it to run: none 1.9G 0 1.9G 0% /mnt -The ``-privileged`` flag gives *all* capabilities to the container, +The ``--privileged`` flag gives *all* capabilities to the container, and it also lifts all the limitations enforced by the ``device`` cgroup controller. In other words, the container can then do almost everything that the host can do. This flag exists to allow special @@ -1313,7 +1313,7 @@ This example shows 5 containers that might be set up to test a web application c 2. Start a pre-prepared ``riakserver`` image, give the container name ``riak`` and expose port ``8098`` to any containers that link to it; 3. Start the ``appserver`` image, restricting its memory usage to 100MB, setting two environment variables ``DEVELOPMENT`` and ``BRANCH`` and bind-mounting the current directory (``$(pwd)``) in the container in read-only mode as ``/app/bin``; 4. Start the ``webserver``, mapping port ``443`` in the container to port ``1443`` on the Docker server, setting the DNS server to ``dns.dev.org``, creating a volume to put the log files into (so we can access it from another container), then importing the files from the volume exposed by the ``static`` container, and linking to all exposed ports from ``riak`` and ``app``. Lastly, we set the hostname to ``web.sven.dev.org`` so its consistent with the pre-generated SSL certificate; -5. Finally, we create a container that runs ``tail -f access.log`` using the logs volume from the ``web`` container, setting the workdir to ``/var/log/httpd``. The ``-rm`` option means that when the container exits, the container's layer is removed. +5. Finally, we create a container that runs ``tail -f access.log`` using the logs volume from the ``web`` container, setting the workdir to ``/var/log/httpd``. The ``--rm`` option means that when the container exits, the container's layer is removed. .. _cli_save: diff --git a/docs/sources/reference/run.rst b/docs/sources/reference/run.rst index d8de280671..8637ac3071 100644 --- a/docs/sources/reference/run.rst +++ b/docs/sources/reference/run.rst @@ -80,7 +80,7 @@ through network connections or shared volumes because the container is no longer listening to the commandline where you executed ``docker run``. You can reattach to a detached container with ``docker`` :ref:`cli_attach`. If you choose to run a container in the detached -mode, then you cannot use the ``-rm`` option. +mode, then you cannot use the ``--rm`` option. Foreground .......... @@ -92,10 +92,10 @@ error. It can even pretend to be a TTY (this is what most commandline executables expect) and pass along signals. All of that is configurable:: - -a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr`` - -t=false : Allocate a pseudo-tty - -sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) - -i=false : Keep STDIN open even if not attached + -a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr`` + -t=false : Allocate a pseudo-tty + --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) + -i=false : Keep STDIN open even if not attached If you do not specify ``-a`` then Docker will `attach everything (stdin,stdout,stderr) @@ -112,7 +112,7 @@ as well as persistent standard input (``stdin``), so you'll use ``-i Container Identification ------------------------ -Name (-name) +Name (--name) ............ The operator can identify a container in three ways: @@ -122,7 +122,7 @@ The operator can identify a container in three ways: * Name ("evil_ptolemy") The UUID identifiers come from the Docker daemon, and if you do not -assign a name to the container with ``-name`` then the daemon will +assign a name to the container with ``--name`` then the daemon will also generate a random string name too. The name can become a handy way to add meaning to a container since you can use this name when defining :ref:`links ` (or any other place @@ -137,7 +137,7 @@ container ID out to a file of your choosing. This is similar to how some programs might write out their process ID to a file (you've seen them as PID files):: - -cidfile="": Write the container ID to the file + --cidfile="": Write the container ID to the file Network Settings ---------------- @@ -145,7 +145,7 @@ Network Settings :: -n=true : Enable networking for this container - -dns=[] : Set custom dns servers for the container + --dns=[] : Set custom dns servers for the container By default, all containers have networking enabled and they can make any outgoing connections. The operator can completely disable @@ -154,9 +154,9 @@ networking. In cases like this, you would perform I/O through files or STDIN/STDOUT only. Your container will use the same DNS servers as the host by default, -but you can override this with ``-dns``. +but you can override this with ``--dns``. -Clean Up (-rm) +Clean Up (--rm) -------------- By default a container's file system persists even after the container @@ -165,9 +165,9 @@ final state) and you retain all your data by default. But if you are running short-term **foreground** processes, these container file systems can really pile up. If instead you'd like Docker to **automatically clean up the container and remove the file system when -the container exits**, you can add the ``-rm`` flag:: +the container exits**, you can add the ``--rm`` flag:: - -rm=false: Automatically remove the container when it exits (incompatible with -d) + --rm=false: Automatically remove the container when it exits (incompatible with -d) Runtime Constraints on CPU and Memory @@ -193,8 +193,8 @@ Runtime Privilege and LXC Configuration :: - -privileged=false: Give extended privileges to this container - -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + --privileged=false: Give extended privileges to this container + --lxc-conf=[]: Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" By default, Docker containers are "unprivileged" and cannot, for example, run a Docker daemon inside a Docker container. This is @@ -203,16 +203,16 @@ but a "privileged" container is given access to all devices (see lxc-template.go_ and documentation on `cgroups devices `_). -When the operator executes ``docker run -privileged``, Docker will +When the operator executes ``docker run --privileged``, Docker will enable to access to all devices on the host as well as set some configuration in AppArmor to allow the container nearly all the same access to the host as processes running outside containers on the -host. Additional information about running with ``-privileged`` is +host. Additional information about running with ``--privileged`` is available on the `Docker Blog `_. An operator can also specify LXC options using one or more -``-lxc-conf`` parameters. These can be new parameters or override +``--lxc-conf`` parameters. These can be new parameters or override existing parameters from the lxc-template.go_. Note that in the future, a given host's Docker daemon may not use LXC, so this is an implementation-specific configuration meant for operators already @@ -260,7 +260,7 @@ ENTRYPOINT (Default Command to Execute at Runtime :: - -entrypoint="": Overwrite the default entrypoint set by the image + --entrypoint="": Overwrite the default entrypoint set by the image The ENTRYPOINT of an image is similar to a ``COMMAND`` because it specifies what executable to run when the container starts, but it is @@ -274,12 +274,12 @@ runtime by using a string to specify the new ``ENTRYPOINT``. Here is an example of how to run a shell in a container that has been set up to automatically run something else (like ``/usr/bin/redis-server``):: - docker run -i -t -entrypoint /bin/bash example/redis + docker run -i -t --entrypoint /bin/bash example/redis or two examples of how to pass more parameters to that ENTRYPOINT:: - docker run -i -t -entrypoint /bin/bash example/redis -c ls -l - docker run -i -t -entrypoint /usr/bin/redis-cli example/redis --help + docker run -i -t --entrypoint /bin/bash example/redis -c ls -l + docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help EXPOSE (Incoming Ports) @@ -290,16 +290,16 @@ providing the ``EXPOSE`` instruction to give a hint to the operator about what incoming ports might provide services. The following options work with or override the ``Dockerfile``'s exposed defaults:: - -expose=[]: Expose a port from the container + --expose=[]: Expose a port from the container without publishing it to your host - -P=false : Publish all exposed ports to the host interfaces - -p=[] : Publish a container's port to the host (format: - ip:hostPort:containerPort | ip::containerPort | - hostPort:containerPort) - (use 'docker port' to see the actual mapping) - -link="" : Add link to another container (name:alias) - -As mentioned previously, ``EXPOSE`` (and ``-expose``) make a port + -P=false : Publish all exposed ports to the host interfaces + -p=[] : Publish a container's port to the host (format: + ip:hostPort:containerPort | ip::containerPort | + hostPort:containerPort) + (use 'docker port' to see the actual mapping) + --link="" : Add link to another container (name:alias) + +As mentioned previously, ``EXPOSE`` (and ``--expose``) make a port available **in** a container for incoming connections. The port number on the inside of the container (where the service listens) does not need to be the same number as the port exposed on the outside of the @@ -308,16 +308,16 @@ have an HTTP service listening on port 80 (and so you ``EXPOSE 80`` in the ``Dockerfile``), but outside the container the port might be 42800. To help a new client container reach the server container's internal -port operator ``-expose``'d by the operator or ``EXPOSE``'d by the +port operator ``--expose``'d by the operator or ``EXPOSE``'d by the developer, the operator has three choices: start the server container -with ``-P`` or ``-p,`` or start the client container with ``-link``. +with ``-P`` or ``-p,`` or start the client container with ``--link``. If the operator uses ``-P`` or ``-p`` then Docker will make the exposed port accessible on the host and the ports will be available to any client that can reach the host. To find the map between the host ports and the exposed ports, use ``docker port``) -If the operator uses ``-link`` when starting the new client container, +If the operator uses ``--link`` when starting the new client container, then the client container can access the exposed port via a private networking interface. Docker will set some environment variables in the client container to help indicate which interface and port to use. @@ -329,7 +329,7 @@ The operator can **set any environment variable** in the container by using one or more ``-e`` flags, even overriding those already defined by the developer with a Dockefile ``ENV``:: - $ docker run -e "deep=purple" -rm ubuntu /bin/bash -c export + $ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export declare -x HOME="/" declare -x HOSTNAME="85bc26a0e200" declare -x OLDPWD @@ -341,13 +341,13 @@ developer with a Dockefile ``ENV``:: Similarly the operator can set the **hostname** with ``-h``. -``-link name:alias`` also sets environment variables, using the +``--link name:alias`` also sets environment variables, using the *alias* string to define environment variables within the container that give the IP and PORT information for connecting to the service container. Let's imagine we have a container running Redis:: # Start the service container, named redis-name - $ docker run -d -name redis-name dockerfiles/redis + $ docker run -d --name redis-name dockerfiles/redis 4241164edf6f5aca5b0e9e4c9eccd899b0b8080c64c0cd26efe02166c73208f3 # The redis-name container exposed port 6379 @@ -361,12 +361,12 @@ container. Let's imagine we have a container running Redis:: Yet we can get information about the Redis container's exposed ports -with ``-link``. Choose an alias that will form a valid environment +with ``--link``. Choose an alias that will form a valid environment variable! :: - $ docker run -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c export + $ docker run --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c export declare -x HOME="/" declare -x HOSTNAME="acda7f7b1cdc" declare -x OLDPWD @@ -383,7 +383,7 @@ variable! And we can use that information to connect from another container as a client:: - $ docker run -i -t -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT' + $ docker run -i -t --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT' 172.17.0.32:6379> VOLUME (Shared Filesystems) @@ -393,7 +393,7 @@ VOLUME (Shared Filesystems) -v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro]. If "container-dir" is missing, then docker creates a new volume. - -volumes-from="": Mount all volumes from the given container(s) + --volumes-from="": Mount all volumes from the given container(s) The volumes commands are complex enough to have their own documentation in section :ref:`volume_def`. A developer can define one diff --git a/docs/sources/use/ambassador_pattern_linking.rst b/docs/sources/use/ambassador_pattern_linking.rst index e7cdbd7c96..bbd5816768 100644 --- a/docs/sources/use/ambassador_pattern_linking.rst +++ b/docs/sources/use/ambassador_pattern_linking.rst @@ -43,26 +43,26 @@ Start actual redis server on one Docker host .. code-block:: bash - big-server $ docker run -d -name redis crosbymichael/redis + big-server $ docker run -d --name redis crosbymichael/redis Then add an ambassador linked to the redis server, mapping a port to the outside world .. code-block:: bash - big-server $ docker run -d -link redis:redis -name redis_ambassador -p 6379:6379 svendowideit/ambassador + big-server $ docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador On the other host, you can set up another ambassador setting environment variables for each remote port we want to proxy to the ``big-server`` .. code-block:: bash - client-server $ docker run -d -name redis_ambassador -expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador + client-server $ docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador Then on the ``client-server`` host, you can use a redis client container to talk to the remote redis server, just by linking to the local redis ambassador. .. code-block:: bash - client-server $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli + client-server $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli redis 172.17.0.160:6379> ping PONG @@ -79,19 +79,19 @@ On the docker host (192.168.1.52) that redis will run on: .. code-block:: bash # start actual redis server - $ docker run -d -name redis crosbymichael/redis + $ docker run -d --name redis crosbymichael/redis # get a redis-cli container for connection testing $ docker pull relateiq/redis-cli # test the redis server by talking to it directly - $ docker run -t -i -rm -link redis:redis relateiq/redis-cli + $ docker run -t -i --rm --link redis:redis relateiq/redis-cli redis 172.17.0.136:6379> ping PONG ^D # add redis ambassador - $ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 busybox sh + $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 busybox sh in the redis_ambassador container, you can see the linked redis containers's env @@ -119,7 +119,7 @@ This environment is used by the ambassador socat script to expose redis to the w $ docker rm redis_ambassador $ sudo ./contrib/mkimage-unittest.sh - $ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 docker-ut sh + $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 docker-ut sh $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379 @@ -127,7 +127,7 @@ then ping the redis server via the ambassador .. code-block::bash - $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli + $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli redis 172.17.0.160:6379> ping PONG @@ -136,7 +136,7 @@ Now goto a different server .. code-block:: bash $ sudo ./contrib/mkimage-unittest.sh - $ docker run -t -i -expose 6379 -name redis_ambassador docker-ut sh + $ docker run -t -i --expose 6379 --name redis_ambassador docker-ut sh $ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379 @@ -145,7 +145,7 @@ and get the redis-cli image so we can talk over the ambassador bridge .. code-block:: bash $ docker pull relateiq/redis-cli - $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli + $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli redis 172.17.0.160:6379> ping PONG @@ -157,7 +157,7 @@ When you start the container, it uses a small ``sed`` script to parse out the (p link environment variables to set up the port forwarding. On the remote host, you need to set the variable using the ``-e`` command line option. -``-expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the +``--expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379``. @@ -171,9 +171,9 @@ local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379 # docker build -t SvenDowideit/ambassador . # docker tag SvenDowideit/ambassador ambassador # then to run it (on the host that has the real backend on it) - # docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 ambassador + # docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 ambassador # on the remote host, you can set up another ambassador - # docker run -t -i -name redis_ambassador -expose 6379 sh + # docker run -t -i --name redis_ambassador --expose 6379 sh FROM docker-ut MAINTAINER SvenDowideit@home.org.au diff --git a/docs/sources/use/basics.rst b/docs/sources/use/basics.rst index 24c22bba39..447366f55a 100644 --- a/docs/sources/use/basics.rst +++ b/docs/sources/use/basics.rst @@ -39,7 +39,7 @@ Repository to a local image cache. 12 character hash ``539c0211cd76: Download complete`` which is the short form of the image ID. These short image IDs are the first 12 characters of the full image ID - which can be found using ``docker - inspect`` or ``docker images -notrunc=true`` + inspect`` or ``docker images --no-trunc=true`` Running an interactive shell ---------------------------- diff --git a/docs/sources/use/networking.rst b/docs/sources/use/networking.rst index c00c608550..59c63ed674 100644 --- a/docs/sources/use/networking.rst +++ b/docs/sources/use/networking.rst @@ -121,8 +121,8 @@ Container intercommunication The value of the Docker daemon's ``icc`` parameter determines whether containers can communicate with each other over the bridge network. -- The default, ``-icc=true`` allows containers to communicate with each other. -- ``-icc=false`` means containers are isolated from each other. +- The default, ``--icc=true`` allows containers to communicate with each other. +- ``--icc=false`` means containers are isolated from each other. Docker uses ``iptables`` under the hood to either accept or drop communication between containers. diff --git a/docs/sources/use/port_redirection.rst b/docs/sources/use/port_redirection.rst index ba244adadb..cf5c2100a9 100644 --- a/docs/sources/use/port_redirection.rst +++ b/docs/sources/use/port_redirection.rst @@ -114,14 +114,14 @@ exposure, is possible because ``client`` is started after ``server`` has been started. Here is a full example. On ``server``, the port of interest is -exposed. The exposure is done either through the ``-expose`` parameter +exposed. The exposure is done either through the ``--expose`` parameter to the ``docker run`` command, or the ``EXPOSE`` build command in a Dockerfile: .. code-block:: bash # Expose port 80 - docker run -expose 80 --name server + docker run --expose 80 --name server The ``client`` then links to the ``server``: diff --git a/docs/sources/use/working_with_links_names.rst b/docs/sources/use/working_with_links_names.rst index 1b0e9f6914..dc370c01c9 100644 --- a/docs/sources/use/working_with_links_names.rst +++ b/docs/sources/use/working_with_links_names.rst @@ -19,14 +19,14 @@ Container Naming .. versionadded:: v0.6.5 -You can now name your container by using the ``-name`` flag. If no +You can now name your container by using the ``--name`` flag. If no name is provided, Docker will automatically generate a name. You can see this name using the ``docker ps`` command. .. code-block:: bash - # format is "sudo docker run -name " - $ sudo docker run -name test ubuntu /bin/bash + # format is "sudo docker run --name " + $ sudo docker run --name test ubuntu /bin/bash # the flag "-a" Show all containers. Only running containers are shown by default. $ sudo docker ps -a @@ -41,9 +41,9 @@ Links: service discovery for docker .. versionadded:: v0.6.5 Links allow containers to discover and securely communicate with each -other by using the flag ``-link name:alias``. Inter-container +other by using the flag ``--link name:alias``. Inter-container communication can be disabled with the daemon flag -``-icc=false``. With this flag set to ``false``, Container A cannot +``--icc=false``. With this flag set to ``false``, Container A cannot access Container B unless explicitly allowed via a link. This is a huge win for securing your containers. When two containers are linked together Docker creates a parent child relationship between the @@ -63,7 +63,7 @@ based on that image and run it as a daemon. .. code-block:: bash - $ sudo docker run -d -name redis crosbymichael/redis + $ sudo docker run -d --name redis crosbymichael/redis We can issue all the commands that you would expect using the name ``redis``; start, stop, attach, using the name for our container. The @@ -77,9 +77,9 @@ we need to establish a link. .. code-block:: bash - $ sudo docker run -t -i -link redis:db -name webapp ubuntu bash + $ sudo docker run -t -i --link redis:db --name webapp ubuntu bash -When you specified ``-link redis:db`` you are telling Docker to link +When you specified ``--link redis:db`` you are telling Docker to link the container named ``redis`` into this new container with the alias ``db``. Environment variables are prefixed with the alias so that the parent container can access network and environment information from diff --git a/docs/sources/use/working_with_volumes.rst b/docs/sources/use/working_with_volumes.rst index 755be009e3..02f4e71b13 100644 --- a/docs/sources/use/working_with_volumes.rst +++ b/docs/sources/use/working_with_volumes.rst @@ -42,14 +42,14 @@ two new volumes:: This command will create the new container with two new volumes that exits instantly (``true`` is pretty much the smallest, simplest program that you can run). Once created you can mount its volumes in any other -container using the ``-volumes-from`` option; irrespective of whether the +container using the ``--volumes-from`` option; irrespective of whether the container is running or not. Or, you can use the VOLUME instruction in a Dockerfile to add one or more new volumes to any container created from that image:: # BUILD-USING: docker build -t data . - # RUN-USING: docker run -name DATA data + # RUN-USING: docker run --name DATA data FROM busybox VOLUME ["/var/volume1", "/var/volume2"] CMD ["/bin/true"] @@ -63,19 +63,19 @@ Data Volume Container, and then to mount the data from it. Create a named container with volumes to share (``/var/volume1`` and ``/var/volume2``):: - $ docker run -v /var/volume1 -v /var/volume2 -name DATA busybox true + $ docker run -v /var/volume1 -v /var/volume2 --name DATA busybox true Then mount those data volumes into your application containers:: - $ docker run -t -i -rm -volumes-from DATA -name client1 ubuntu bash + $ docker run -t -i --rm --volumes-from DATA --name client1 ubuntu bash -You can use multiple ``-volumes-from`` parameters to bring together multiple +You can use multiple ``--volumes-from`` parameters to bring together multiple data volumes from multiple containers. Interestingly, you can mount the volumes that came from the ``DATA`` container in yet another container via the ``client1`` middleman container:: - $ docker run -t -i -rm -volumes-from client1 -name client2 ubuntu bash + $ docker run -t -i --rm --volumes-from client1 --name client2 ubuntu bash This allows you to abstract the actual data source from users of that data, similar to :ref:`ambassador_pattern_linking `. @@ -131,7 +131,7 @@ data-container's volume. For example:: $ sudo docker run -rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data -* ``-rm`` - remove the container when it exits +* ``--rm`` - remove the container when it exits * ``--volumes-from DATA`` - attach to the volumes shared by the ``DATA`` container * ``-v $(pwd):/backup`` - bind mount the current directory into the container; to write the tar file to * ``busybox`` - a small simpler image - good for quick maintenance @@ -142,11 +142,11 @@ Then to restore to the same container, or another that you've made elsewhere:: # create a new data container $ sudo docker run -v /data -name DATA2 busybox true # untar the backup files into the new container's data volume - $ sudo docker run -rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar + $ sudo docker run --rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar data/ data/sven.txt # compare to the original container - $ sudo docker run -rm --volumes-from DATA -v `pwd`:/backup busybox ls /data + $ sudo docker run --rm --volumes-from DATA -v `pwd`:/backup busybox ls /data sven.txt diff --git a/docs/sources/use/workingwithrepository.rst b/docs/sources/use/workingwithrepository.rst index cbde932cde..c126361f8c 100644 --- a/docs/sources/use/workingwithrepository.rst +++ b/docs/sources/use/workingwithrepository.rst @@ -74,7 +74,7 @@ name or description: Search the docker index for images - -notrunc=false: Don't truncate output + --no-trunc=false: Don't truncate output $ sudo docker search centos Found 25 results matching your query ("centos") NAME DESCRIPTION diff --git a/hack/RELEASE-CHECKLIST.md b/hack/RELEASE-CHECKLIST.md index 6ef5d9cf58..2920e52917 100644 --- a/hack/RELEASE-CHECKLIST.md +++ b/hack/RELEASE-CHECKLIST.md @@ -139,7 +139,7 @@ docker run \ -e AWS_ACCESS_KEY \ -e AWS_SECRET_KEY \ -e GPG_PASSPHRASE \ - -i -t -privileged \ + -i -t --privileged \ docker \ hack/release.sh ``` @@ -173,7 +173,7 @@ docker run \ -e AWS_ACCESS_KEY \ -e AWS_SECRET_KEY \ -e GPG_PASSPHRASE \ - -i -t -privileged \ + -i -t --privileged \ docker \ hack/release.sh ``` diff --git a/hack/dind b/hack/dind index eff656b0e0..94147f5324 100755 --- a/hack/dind +++ b/hack/dind @@ -5,7 +5,7 @@ # See the blog post: http://blog.docker.io/2013/09/docker-can-now-run-within-docker/ # # This script should be executed inside a docker container in privilieged mode -# ('docker run -privileged', introduced in docker 0.6). +# ('docker run --privileged', introduced in docker 0.6). # Usage: dind CMD [ARG...] @@ -17,7 +17,7 @@ CGROUP=/sys/fs/cgroup mountpoint -q $CGROUP || mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { - echo "Could not make a tmpfs mount. Did you use -privileged?" + echo "Could not make a tmpfs mount. Did you use --privileged?" exit 1 } diff --git a/hack/infrastructure/docker-ci/Dockerfile b/hack/infrastructure/docker-ci/Dockerfile index fd795f4d45..789c794f54 100644 --- a/hack/infrastructure/docker-ci/Dockerfile +++ b/hack/infrastructure/docker-ci/Dockerfile @@ -1,8 +1,8 @@ # DOCKER-VERSION: 0.7.6 # AUTHOR: Daniel Mizyrycki # DESCRIPTION: docker-ci continuous integration service -# TO_BUILD: docker build -rm -t docker-ci/docker-ci . -# TO_RUN: docker run -rm -i -t -p 8000:80 -p 2222:22 -v /run:/var/socket \ +# TO_BUILD: docker build -t docker-ci/docker-ci . +# TO_RUN: docker run --rm -i -t -p 8000:80 -p 2222:22 -v /run:/var/socket \ # -v /data/docker-ci:/data/docker-ci docker-ci/docker-ci from ubuntu:12.04 diff --git a/hack/infrastructure/docker-ci/README.rst b/hack/infrastructure/docker-ci/README.rst index 3e429ffdd5..07c1ffcec0 100644 --- a/hack/infrastructure/docker-ci/README.rst +++ b/hack/infrastructure/docker-ci/README.rst @@ -57,8 +57,8 @@ Production deployment export EMAIL_RCP=[EMAIL_FOR_BUILD_ERRORS] # Build docker-ci and testbuilder docker images - docker -H $DOCKER_PROD build -rm -t docker-ci/docker-ci . - (cd testbuilder; docker -H $DOCKER_PROD build -rm -t docker-ci/testbuilder .) + docker -H $DOCKER_PROD build -t docker-ci/docker-ci . + (cd testbuilder; docker -H $DOCKER_PROD build --rm -t docker-ci/testbuilder .) # Run docker-ci container ( assuming no previous container running ) (cd dcr/prod; dcr docker-ci.yml start) diff --git a/hack/infrastructure/docker-ci/dockertest/nightlyrelease b/hack/infrastructure/docker-ci/dockertest/nightlyrelease index 475b088065..cface6c125 100755 --- a/hack/infrastructure/docker-ci/dockertest/nightlyrelease +++ b/hack/infrastructure/docker-ci/dockertest/nightlyrelease @@ -6,7 +6,7 @@ else AWS_S3_BUCKET='get-staging.docker.io' fi -docker run -rm -privileged -v /run:/var/socket \ +docker run --rm --privileged -v /run:/var/socket \ -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY \ -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE \ -e DOCKER_RELEASE=1 -e DEPLOYMENT=$DEPLOYMENT docker-ci/testbuilder docker diff --git a/hack/infrastructure/docker-ci/dockertest/project b/hack/infrastructure/docker-ci/dockertest/project index 160f2d5d59..8131ab533a 100755 --- a/hack/infrastructure/docker-ci/dockertest/project +++ b/hack/infrastructure/docker-ci/dockertest/project @@ -3,6 +3,6 @@ set -x PROJECT_NAME=$(basename $0) -docker run -rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \ +docker run --rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \ -v /home/docker-ci/coverage/$PROJECT_NAME:/data docker-ci/testbuilder $PROJECT_NAME $1 $2 $3 diff --git a/hack/infrastructure/docker-ci/testbuilder/Dockerfile b/hack/infrastructure/docker-ci/testbuilder/Dockerfile index a008da6843..8fa9b4c797 100644 --- a/hack/infrastructure/docker-ci/testbuilder/Dockerfile +++ b/hack/infrastructure/docker-ci/testbuilder/Dockerfile @@ -1,5 +1,5 @@ -# TO_BUILD: docker build -rm -no-cache -t docker-ci/testbuilder . -# TO_RUN: docker run -rm -u sysadmin \ +# TO_BUILD: docker build --no-cache -t docker-ci/testbuilder . +# TO_RUN: docker run --rm -u sysadmin \ # -v /run:/var/socket docker-ci/testbuilder docker-registry # diff --git a/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh b/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh index 72087462ad..a73704c50b 100755 --- a/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh +++ b/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh @@ -5,8 +5,8 @@ PROJECT_PATH=$1 # Build the docker project cd /data/$PROJECT_PATH -sg docker -c "docker build -q -rm -t registry ." -cd test; sg docker -c "docker build -q -rm -t docker-registry-test ." +sg docker -c "docker build -q -t registry ." +cd test; sg docker -c "docker build -q -t docker-registry-test ." # Run the tests -sg docker -c "docker run -rm -v /home/docker-ci/coverage/docker-registry:/data docker-registry-test" +sg docker -c "docker run --rm -v /home/docker-ci/coverage/docker-registry:/data docker-registry-test" diff --git a/hack/infrastructure/docker-ci/testbuilder/docker.sh b/hack/infrastructure/docker-ci/testbuilder/docker.sh index b365dd7eaf..c8f3c18eb9 100755 --- a/hack/infrastructure/docker-ci/testbuilder/docker.sh +++ b/hack/infrastructure/docker-ci/testbuilder/docker.sh @@ -5,14 +5,14 @@ PROJECT_PATH=$1 # Build the docker project cd /data/$PROJECT_PATH -sg docker -c "docker build -q -rm -t docker ." +sg docker -c "docker build -q -t docker ." if [ "$DOCKER_RELEASE" == "1" ]; then # Do nightly release - echo sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY= -e AWS_SECRET_KEY= -e GPG_PASSPHRASE= docker hack/release.sh" + echo sg docker -c "docker run --rm --privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY= -e AWS_SECRET_KEY= -e GPG_PASSPHRASE= docker hack/release.sh" set +x - sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh" + sg docker -c "docker run --rm --privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh" else # Run the tests - sg docker -c "docker run -rm -privileged -v /home/docker-ci/coverage/docker:/data docker ./hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh" + sg docker -c "docker run --rm --privileged -v /home/docker-ci/coverage/docker:/data docker ./hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh" fi diff --git a/hack/release.sh b/hack/release.sh index 50913dd395..c380d2239a 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -31,7 +31,7 @@ docker run -e AWS_S3_BUCKET=get-staging.docker.io \ -e AWS_ACCESS_KEY=AKI1234... \ -e AWS_SECRET_KEY=sEs4mE... \ -e GPG_PASSPHRASE=m0resEs4mE... \ - -i -t -privileged \ + -i -t --privileged \ docker ./hack/release.sh EOF exit 1 diff --git a/integration/commands_test.go b/integration/commands_test.go index d226cd7133..dba15842c7 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -739,7 +739,7 @@ func TestRunAutoRemove(t *testing.T) { c := make(chan struct{}) go func() { defer close(c) - if err := cli.CmdRun("-rm", unitTestImageID, "hostname"); err != nil { + if err := cli.CmdRun("--rm", unitTestImageID, "hostname"); err != nil { t.Fatal(err) } }() diff --git a/integration/container_test.go b/integration/container_test.go index 4efb95a2a1..c32a8bcff7 100644 --- a/integration/container_test.go +++ b/integration/container_test.go @@ -1580,7 +1580,7 @@ func TestPrivilegedCanMknod(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer runtime.Nuke() - if output, err := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" { + if output, err := runContainer(eng, runtime, []string{"--privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" { t.Fatalf("Could not mknod into privileged container %s %v", output, err) } } @@ -1589,7 +1589,7 @@ func TestPrivilegedCanMount(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer runtime.Nuke() - if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" { + if output, _ := runContainer(eng, runtime, []string{"--privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" { t.Fatal("Could not mount into privileged container") } } diff --git a/integration/server_test.go b/integration/server_test.go index e9781777e1..54ee9a77a9 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -203,7 +203,7 @@ func TestCreateRmRunning(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() - config, hostConfig, _, err := runconfig.Parse([]string{"-name", "foo", unitTestImageID, "sleep 300"}, nil) + config, hostConfig, _, err := runconfig.Parse([]string{"--name", "foo", unitTestImageID, "sleep 300"}, nil) if err != nil { t.Fatal(err) } diff --git a/runconfig/config_test.go b/runconfig/config_test.go index 40d53fa2f4..46e4691b93 100644 --- a/runconfig/config_test.go +++ b/runconfig/config_test.go @@ -20,21 +20,21 @@ func mustParse(t *testing.T, args string) (*Config, *HostConfig) { } func TestParseRunLinks(t *testing.T) { - if _, hostConfig := mustParse(t, "-link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { + if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) } - if _, hostConfig := mustParse(t, "-link a:b -link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { + if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) } if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) } - if _, _, err := parse(t, "-link a"); err == nil { - t.Fatalf("Error parsing links. `-link a` should be an error but is not") + if _, _, err := parse(t, "--link a"); err == nil { + t.Fatalf("Error parsing links. `--link a` should be an error but is not") } - if _, _, err := parse(t, "-link"); err == nil { - t.Fatalf("Error parsing links. `-link` should be an error but is not") + if _, _, err := parse(t, "--link"); err == nil { + t.Fatalf("Error parsing links. `--link` should be an error but is not") } } @@ -73,8 +73,8 @@ func TestParseRunAttach(t *testing.T) { if _, _, err := parse(t, "-a stderr -d"); err == nil { t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") } - if _, _, err := parse(t, "-d -rm"); err == nil { - t.Fatalf("Error parsing attach flags, `-d -rm` should be an error but is not") + if _, _, err := parse(t, "-d --rm"); err == nil { + t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") } } diff --git a/runconfig/parse.go b/runconfig/parse.go index d481da8d3b..2138f4e68c 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -15,7 +15,7 @@ import ( var ( ErrInvalidWorikingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.") ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") - ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: -rm and -d") + ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") ) //FIXME Only used in tests @@ -74,7 +74,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") - cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") + cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") if err := cmd.Parse(args); err != nil { return nil, nil, cmd, err -- cgit v1.2.1 From 029aac96396f5a9d76adf5e4675d27321273dfbd Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Thu, 13 Mar 2014 11:11:02 -0700 Subject: Use BSD raw mode on darwin. Fixes nano, tmux and others Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- pkg/term/termios_darwin.go | 39 +++++++++++++++++++++++++-------------- pkg/term/termios_freebsd.go | 2 -- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/pkg/term/termios_darwin.go b/pkg/term/termios_darwin.go index 24e79de4b2..11cd70d10b 100644 --- a/pkg/term/termios_darwin.go +++ b/pkg/term/termios_darwin.go @@ -9,16 +9,24 @@ const ( getTermios = syscall.TIOCGETA setTermios = syscall.TIOCSETA - ECHO = 0x00000008 - ONLCR = 0x2 - ISTRIP = 0x20 - INLCR = 0x40 - ISIG = 0x80 - IGNCR = 0x80 - ICANON = 0x100 - ICRNL = 0x100 - IXOFF = 0x400 - IXON = 0x200 + IGNBRK = syscall.IGNBRK + PARMRK = syscall.PARMRK + INLCR = syscall.INLCR + IGNCR = syscall.IGNCR + ECHONL = syscall.ECHONL + CSIZE = syscall.CSIZE + ICRNL = syscall.ICRNL + ISTRIP = syscall.ISTRIP + PARENB = syscall.PARENB + ECHO = syscall.ECHO + ICANON = syscall.ICANON + ISIG = syscall.ISIG + IXON = syscall.IXON + BRKINT = syscall.BRKINT + INPCK = syscall.INPCK + OPOST = syscall.OPOST + CS8 = syscall.CS8 + IEXTEN = syscall.IEXTEN ) type Termios struct { @@ -41,10 +49,13 @@ func MakeRaw(fd uintptr) (*State, error) { } newState := oldState.termios - newState.Iflag &^= (ISTRIP | INLCR | IGNCR | IXON | IXOFF) - newState.Iflag |= ICRNL - newState.Oflag |= ONLCR - newState.Lflag &^= (ECHO | ICANON | ISIG) + newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) + newState.Oflag &^= OPOST + newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) + newState.Cflag &^= (CSIZE | PARENB) + newState.Cflag |= CS8 + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { return nil, err diff --git a/pkg/term/termios_freebsd.go b/pkg/term/termios_freebsd.go index 9acf9dfe15..ed3659572c 100644 --- a/pkg/term/termios_freebsd.go +++ b/pkg/term/termios_freebsd.go @@ -47,8 +47,6 @@ func MakeRaw(fd uintptr) (*State, error) { if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { return nil, err } - // C.makeraw() - // return &oldState, nil newState := oldState.termios newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) -- cgit v1.2.1 From 636959e20ae368e470a5c2420aae4528245b2cf6 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Mon, 3 Mar 2014 19:19:00 +0100 Subject: Move .dockerenv parsing to lxc driver This is not needed for e.g. the native driver Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- execdriver/lxc/driver.go | 4 ++++ execdriver/lxc/init.go | 30 ++++++++++++++++++++++++++++++ sysinit/sysinit.go | 30 ------------------------------ 3 files changed, 34 insertions(+), 30 deletions(-) diff --git a/execdriver/lxc/driver.go b/execdriver/lxc/driver.go index b398cb1a37..9abec8ac3f 100644 --- a/execdriver/lxc/driver.go +++ b/execdriver/lxc/driver.go @@ -21,6 +21,10 @@ const DriverName = "lxc" func init() { execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { + if err := setupEnv(args); err != nil { + return err + } + if err := setupHostname(args); err != nil { return err } diff --git a/execdriver/lxc/init.go b/execdriver/lxc/init.go index e138915212..0f134088a3 100644 --- a/execdriver/lxc/init.go +++ b/execdriver/lxc/init.go @@ -1,17 +1,47 @@ package lxc import ( + "encoding/json" "fmt" "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/pkg/netlink" "github.com/dotcloud/docker/pkg/user" "github.com/syndtr/gocapability/capability" + "io/ioutil" "net" "os" "strings" "syscall" ) +// Clear environment pollution introduced by lxc-start +func setupEnv(args *execdriver.InitArgs) error { + // Get env + var env []string + content, err := ioutil.ReadFile(".dockerenv") + if err != nil { + return fmt.Errorf("Unable to load environment variables: %v", err) + } + if err := json.Unmarshal(content, &env); err != nil { + return fmt.Errorf("Unable to unmarshal environment variables: %v", err) + } + // Propagate the plugin-specific container env variable + env = append(env, "container="+os.Getenv("container")) + + args.Env = env + + os.Clearenv() + for _, kv := range args.Env { + parts := strings.SplitN(kv, "=", 2) + if len(parts) == 1 { + parts = append(parts, "") + } + os.Setenv(parts[0], parts[1]) + } + + return nil +} + func setupHostname(args *execdriver.InitArgs) error { hostname := getEnv(args, "HOSTNAME") if hostname == "" { diff --git a/sysinit/sysinit.go b/sysinit/sysinit.go index c84c05982c..56508b105d 100644 --- a/sysinit/sysinit.go +++ b/sysinit/sysinit.go @@ -1,33 +1,16 @@ package sysinit import ( - "encoding/json" "flag" "fmt" "github.com/dotcloud/docker/execdriver" _ "github.com/dotcloud/docker/execdriver/lxc" _ "github.com/dotcloud/docker/execdriver/native" - "io/ioutil" "log" "os" - "strings" ) -// Clear environment pollution introduced by lxc-start -func setupEnv(args *execdriver.InitArgs) { - os.Clearenv() - for _, kv := range args.Env { - parts := strings.SplitN(kv, "=", 2) - if len(parts) == 1 { - parts = append(parts, "") - } - os.Setenv(parts[0], parts[1]) - } -} - func executeProgram(args *execdriver.InitArgs) error { - setupEnv(args) - dockerInitFct, err := execdriver.GetInitFunc(args.Driver) if err != nil { panic(err) @@ -59,25 +42,12 @@ func SysInit() { ) flag.Parse() - // Get env - var env []string - content, err := ioutil.ReadFile(".dockerenv") - if err != nil { - log.Fatalf("Unable to load environment variables: %v", err) - } - if err := json.Unmarshal(content, &env); err != nil { - log.Fatalf("Unable to unmarshal environment variables: %v", err) - } - // Propagate the plugin-specific container env variable - env = append(env, "container="+os.Getenv("container")) - args := &execdriver.InitArgs{ User: *user, Gateway: *gateway, Ip: *ip, WorkDir: *workDir, Privileged: *privileged, - Env: env, Args: flag.Args(), Mtu: *mtu, Driver: *driver, -- cgit v1.2.1 From 6c266c4b42eeabe2d433a994753d86637fe52a0b Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Mon, 3 Mar 2014 16:15:29 +0100 Subject: Move all bind-mounts in the container inside the namespace This moves the bind mounts like /.dockerinit, /etc/hostname, volumes, etc into the container namespace, by setting them up using lxc. This is useful to avoid littering the global namespace with a lot of mounts that are internal to each container and are not generally needed on the outside. In particular, it seems that having a lot of mounts is problematic wrt scaling to a lot of containers on systems where the root filesystem is mounted --rshared. Note that the "private" option is only supported by the native driver, as lxc doesn't support setting this. This is not a huge problem, but it does mean that some mounts are unnecessarily shared inside the container if you're using the lxc driver. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- execdriver/driver.go | 8 +++++ execdriver/execdrivers/execdrivers.go | 4 +-- execdriver/lxc/lxc_template.go | 8 +++++ execdriver/native/default_template.go | 4 +++ execdriver/native/driver.go | 10 +++--- pkg/libcontainer/container.go | 10 ++++++ pkg/libcontainer/nsinit/init.go | 2 +- pkg/libcontainer/nsinit/mount.go | 20 ++++++++++- runtime/container.go | 6 ++-- runtime/runtime.go | 2 +- runtime/volumes.go | 66 ++++++++--------------------------- 11 files changed, 77 insertions(+), 63 deletions(-) diff --git a/execdriver/driver.go b/execdriver/driver.go index ec8f48f52d..ff37b6bc5b 100644 --- a/execdriver/driver.go +++ b/execdriver/driver.go @@ -97,6 +97,13 @@ type Resources struct { CpuShares int64 `json:"cpu_shares"` } +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Private bool `json:"private"` +} + // Process wrapps an os/exec.Cmd to add more metadata type Command struct { exec.Cmd `json:"-"` @@ -114,6 +121,7 @@ type Command struct { Network *Network `json:"network"` // if network is nil then networking is disabled Config []string `json:"config"` // generic values that specific drivers can consume Resources *Resources `json:"resources"` + Mounts []Mount `json:"mounts"` Terminal Terminal `json:"-"` // standard or tty terminal Console string `json:"-"` // dev/console path diff --git a/execdriver/execdrivers/execdrivers.go b/execdriver/execdrivers/execdrivers.go index 95b2fc634d..7486d649c1 100644 --- a/execdriver/execdrivers/execdrivers.go +++ b/execdriver/execdrivers/execdrivers.go @@ -9,7 +9,7 @@ import ( "path" ) -func NewDriver(name, root string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { +func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { switch name { case "lxc": // we want to five the lxc driver the full docker root because it needs @@ -17,7 +17,7 @@ func NewDriver(name, root string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, // to be backwards compatible return lxc.NewDriver(root, sysInfo.AppArmor) case "native": - return native.NewDriver(path.Join(root, "execdriver", "native")) + return native.NewDriver(path.Join(root, "execdriver", "native"), initPath) } return nil, fmt.Errorf("unknown exec driver %s", name) } diff --git a/execdriver/lxc/lxc_template.go b/execdriver/lxc/lxc_template.go index 1181396a18..84cd4e442e 100644 --- a/execdriver/lxc/lxc_template.go +++ b/execdriver/lxc/lxc_template.go @@ -88,6 +88,14 @@ lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bi lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0 lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0 +{{range $value := .Mounts}} +{{if $value.Writable}} +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,rw 0 0 +{{else}} +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,ro 0 0 +{{end}} +{{end}} + {{if .Privileged}} {{if .AppArmor}} lxc.aa_profile = unconfined diff --git a/execdriver/native/default_template.go b/execdriver/native/default_template.go index 6e7d597b7b..2798f3b084 100644 --- a/execdriver/native/default_template.go +++ b/execdriver/native/default_template.go @@ -48,6 +48,10 @@ func createContainer(c *execdriver.Command) *libcontainer.Container { // check to see if we are running in ramdisk to disable pivot root container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" + for _, m := range c.Mounts { + container.Mounts = append(container.Mounts, libcontainer.Mount{m.Source, m.Destination, m.Writable, m.Private}) + } + return container } diff --git a/execdriver/native/driver.go b/execdriver/native/driver.go index 452e802523..f6c7242620 100644 --- a/execdriver/native/driver.go +++ b/execdriver/native/driver.go @@ -55,10 +55,11 @@ func init() { } type driver struct { - root string + root string + initPath string } -func NewDriver(root string) (*driver, error) { +func NewDriver(root, initPath string) (*driver, error) { if err := os.MkdirAll(root, 0700); err != nil { return nil, err } @@ -66,7 +67,8 @@ func NewDriver(root string) (*driver, error) { return nil, err } return &driver{ - root: root, + root: root, + initPath: initPath, }, nil } @@ -210,7 +212,7 @@ func (d *dockerCommandFactory) Create(container *libcontainer.Container, console // we need to join the rootfs because nsinit will setup the rootfs and chroot initPath := filepath.Join(d.c.Rootfs, d.c.InitPath) - d.c.Path = initPath + d.c.Path = d.driver.initPath d.c.Args = append([]string{ initPath, "-driver", DriverName, diff --git a/pkg/libcontainer/container.go b/pkg/libcontainer/container.go index a777da58a4..14b4b65db7 100644 --- a/pkg/libcontainer/container.go +++ b/pkg/libcontainer/container.go @@ -23,6 +23,7 @@ type Container struct { Networks []*Network `json:"networks,omitempty"` // nil for host's network stack Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` // cgroups Context Context `json:"context,omitempty"` // generic context for specific options (apparmor, selinux) + Mounts []Mount `json:"mounts,omitempty"` } // Network defines configuration for a container's networking stack @@ -36,3 +37,12 @@ type Network struct { Gateway string `json:"gateway,omitempty"` Mtu int `json:"mtu,omitempty"` } + +// Bind mounts from the host system to the container +// +type Mount struct { + Source string `json:"source"` // Source path, in the host namespace + Destination string `json:"destination"` // Destination path, in the container + Writable bool `json:"writable"` + Private bool `json:"private"` +} diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 336fc1eaaf..5d47b95057 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -51,7 +51,7 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol if err := system.ParentDeathSignal(); err != nil { return fmt.Errorf("parent death signal %s", err) } - if err := setupNewMountNamespace(rootfs, console, container.ReadonlyFs, container.NoPivotRoot); err != nil { + if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot); err != nil { return fmt.Errorf("setup mount namespace %s", err) } if err := setupNetwork(container, context); err != nil { diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/nsinit/mount.go index 83577cfa8c..562ae25a59 100644 --- a/pkg/libcontainer/nsinit/mount.go +++ b/pkg/libcontainer/nsinit/mount.go @@ -4,6 +4,7 @@ package nsinit import ( "fmt" + "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/system" "io/ioutil" "os" @@ -19,7 +20,7 @@ const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NOD // // There is no need to unmount the new mounts because as soon as the mount namespace // is no longer in use, the mounts will be removed automatically -func setupNewMountNamespace(rootfs, console string, readonly, noPivotRoot bool) error { +func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, console string, readonly, noPivotRoot bool) error { flag := syscall.MS_PRIVATE if noPivotRoot { flag = syscall.MS_SLAVE @@ -38,6 +39,23 @@ func setupNewMountNamespace(rootfs, console string, readonly, noPivotRoot bool) if err := mountSystem(rootfs); err != nil { return fmt.Errorf("mount system %s", err) } + + for _, m := range bindMounts { + flags := syscall.MS_BIND | syscall.MS_REC + if !m.Writable { + flags = flags | syscall.MS_RDONLY + } + dest := filepath.Join(rootfs, m.Destination) + if err := system.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil { + return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err) + } + if m.Private { + if err := system.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil { + return fmt.Errorf("mounting %s private %s", dest, err) + } + } + } + if err := copyDevNodes(rootfs); err != nil { return fmt.Errorf("copy dev nodes %s", err) } diff --git a/runtime/container.go b/runtime/container.go index 813147e508..2a98149f27 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -529,13 +529,13 @@ func (container *Container) Start() (err error) { return err } + populateCommand(container) + container.command.Env = env + if err := mountVolumesForContainer(container, envPath); err != nil { return err } - populateCommand(container) - container.command.Env = env - // Setup logging of stdout and stderr to disk if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil { return err diff --git a/runtime/runtime.go b/runtime/runtime.go index 72245a4555..28e7bbd1e4 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -733,7 +733,7 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (* } sysInfo := sysinfo.New(false) - ed, err := execdrivers.NewDriver(config.ExecDriver, config.Root, sysInfo) + ed, err := execdrivers.NewDriver(config.ExecDriver, config.Root, sysInitPath, sysInfo) if err != nil { return nil, err } diff --git a/runtime/volumes.go b/runtime/volumes.go index 1a548eca47..81a305f72c 100644 --- a/runtime/volumes.go +++ b/runtime/volumes.go @@ -3,6 +3,7 @@ package runtime import ( "fmt" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/execdriver" "github.com/dotcloud/docker/pkg/mount" "github.com/dotcloud/docker/utils" "io/ioutil" @@ -55,70 +56,33 @@ func mountVolumesForContainer(container *Container, envPath string) error { return err } - // Mount docker specific files into the containers root fs - if err := mount.Mount(runtime.sysInitPath, filepath.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil { - return err - } - if err := mount.Mount(envPath, filepath.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil { - return err - } - if err := mount.Mount(container.ResolvConfPath, filepath.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil { - return err + mounts := []execdriver.Mount{ + {runtime.sysInitPath, "/.dockerinit", false, true}, + {envPath, "/.dockerenv", false, true}, + {container.ResolvConfPath, "/etc/resolv.conf", false, true}, } if container.HostnamePath != "" && container.HostsPath != "" { - if err := mount.Mount(container.HostnamePath, filepath.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil { - return err - } - if err := mount.Mount(container.HostsPath, filepath.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil { - return err - } + mounts = append(mounts, execdriver.Mount{container.HostnamePath, "/etc/hostname", false, true}) + mounts = append(mounts, execdriver.Mount{container.HostsPath, "/etc/hosts", false, true}) } // Mount user specified volumes + // Note, these are not private because you may want propagation of (un)mounts from host + // volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you + // want this new mount in the container for r, v := range container.Volumes { - mountAs := "ro" - if container.VolumesRW[r] { - mountAs = "rw" - } + mounts = append(mounts, execdriver.Mount{v, r, container.VolumesRW[r], false}) + } - r = filepath.Join(root, r) - if p, err := utils.FollowSymlinkInScope(r, root); err != nil { - return err - } else { - r = p - } + container.command.Mounts = mounts - if err := mount.Mount(v, r, "none", fmt.Sprintf("bind,%s", mountAs)); err != nil { - return err - } - } return nil } func unmountVolumesForContainer(container *Container) { - var ( - root = container.RootfsPath() - mounts = []string{ - root, - filepath.Join(root, "/.dockerinit"), - filepath.Join(root, "/.dockerenv"), - filepath.Join(root, "/etc/resolv.conf"), - } - ) - - if container.HostnamePath != "" && container.HostsPath != "" { - mounts = append(mounts, filepath.Join(root, "/etc/hostname"), filepath.Join(root, "/etc/hosts")) - } - - for r := range container.Volumes { - mounts = append(mounts, filepath.Join(root, r)) - } - - for i := len(mounts) - 1; i >= 0; i-- { - if lastError := mount.Unmount(mounts[i]); lastError != nil { - log.Printf("Failed to umount %v: %v", mounts[i], lastError) - } + if err := mount.Unmount(container.RootfsPath()); err != nil { + log.Printf("Failed to umount container: %v", err) } } -- cgit v1.2.1 From bf1b27dfcc6c4e049706d7d104e1abc5c330815d Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 4 Mar 2014 10:16:09 +0100 Subject: Don't use separate bind mount for container Since we're not not mounting anything but the base filesystem outside the container we no longer need the separate bind mount at /var/lib/docker/container/$id/root in order to see the base filesystem without extra mounts. So, we drop this and mount (again) the container root directly at the real basefs mountpoint. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- buildfile.go | 2 +- integration/utils_test.go | 2 +- runtime/container.go | 10 +--------- runtime/runtime.go | 2 -- runtime/volumes.go | 32 ++------------------------------ 5 files changed, 5 insertions(+), 43 deletions(-) diff --git a/buildfile.go b/buildfile.go index 959b085685..e4b3d28e9c 100644 --- a/buildfile.go +++ b/buildfile.go @@ -374,7 +374,7 @@ func (b *buildFile) checkPathForAddition(orig string) error { func (b *buildFile) addContext(container *runtime.Container, orig, dest string, remote bool) error { var ( origPath = path.Join(b.contextPath, orig) - destPath = path.Join(container.BasefsPath(), dest) + destPath = path.Join(container.RootfsPath(), dest) ) // Preserve the trailing '/' if strings.HasSuffix(dest, "/") { diff --git a/integration/utils_test.go b/integration/utils_test.go index 88f2cc49c3..53b4674df7 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -71,7 +71,7 @@ func containerFileExists(eng *engine.Engine, id, dir string, t utils.Fataler) bo t.Fatal(err) } defer c.Unmount() - if _, err := os.Stat(path.Join(c.BasefsPath(), dir)); err != nil { + if _, err := os.Stat(path.Join(c.RootfsPath(), dir)); err != nil { if os.IsNotExist(err) { return false } diff --git a/runtime/container.go b/runtime/container.go index 2a98149f27..ee545db201 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -532,7 +532,7 @@ func (container *Container) Start() (err error) { populateCommand(container) container.command.Env = env - if err := mountVolumesForContainer(container, envPath); err != nil { + if err := setupMountsForContainer(container, envPath); err != nil { return err } @@ -843,8 +843,6 @@ func (container *Container) cleanup() { } } - unmountVolumesForContainer(container) - if err := container.Unmount(); err != nil { log.Printf("%v: Failed to umount filesystem: %v", container.ID, err) } @@ -1039,12 +1037,6 @@ func (container *Container) EnvConfigPath() (string, error) { // This method must be exported to be used from the lxc template // This directory is only usable when the container is running func (container *Container) RootfsPath() string { - return path.Join(container.root, "root") -} - -// This is the stand-alone version of the root fs, without any additional mounts. -// This directory is usable whenever the container is mounted (and not unmounted) -func (container *Container) BasefsPath() string { return container.basefs } diff --git a/runtime/runtime.go b/runtime/runtime.go index 28e7bbd1e4..b364d1d270 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -174,7 +174,6 @@ func (runtime *Runtime) Register(container *Container) error { runtime.execDriver.Kill(command, 9) } // ensure that the filesystem is also unmounted - unmountVolumesForContainer(container) if err := container.Unmount(); err != nil { utils.Debugf("ghost unmount error %s", err) } @@ -185,7 +184,6 @@ func (runtime *Runtime) Register(container *Container) error { utils.Debugf("Container %s was supposed to be running but is not.", container.ID) if runtime.config.AutoRestart { utils.Debugf("Restarting") - unmountVolumesForContainer(container) if err := container.Unmount(); err != nil { utils.Debugf("restart unmount error %s", err) } diff --git a/runtime/volumes.go b/runtime/volumes.go index 81a305f72c..9cb66aae44 100644 --- a/runtime/volumes.go +++ b/runtime/volumes.go @@ -4,10 +4,8 @@ import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/pkg/mount" "github.com/dotcloud/docker/utils" "io/ioutil" - "log" "os" "path/filepath" "strings" @@ -35,29 +33,9 @@ func prepareVolumesForContainer(container *Container) error { return nil } -func mountVolumesForContainer(container *Container, envPath string) error { - // Setup the root fs as a bind mount of the base fs - var ( - root = container.RootfsPath() - runtime = container.runtime - ) - if err := os.MkdirAll(root, 0755); err != nil && !os.IsExist(err) { - return nil - } - - // Create a bind mount of the base fs as a place where we can add mounts - // without affecting the ability to access the base fs - if err := mount.Mount(container.basefs, root, "none", "bind,rw"); err != nil { - return err - } - - // Make sure the root fs is private so the mounts here don't propagate to basefs - if err := mount.ForceMount(root, root, "none", "private"); err != nil { - return err - } - +func setupMountsForContainer(container *Container, envPath string) error { mounts := []execdriver.Mount{ - {runtime.sysInitPath, "/.dockerinit", false, true}, + {container.runtime.sysInitPath, "/.dockerinit", false, true}, {envPath, "/.dockerenv", false, true}, {container.ResolvConfPath, "/etc/resolv.conf", false, true}, } @@ -80,12 +58,6 @@ func mountVolumesForContainer(container *Container, envPath string) error { return nil } -func unmountVolumesForContainer(container *Container) { - if err := mount.Unmount(container.RootfsPath()); err != nil { - log.Printf("Failed to umount container: %v", err) - } -} - func applyVolumesFrom(container *Container) error { if container.Config.VolumesFrom != "" { for _, containerSpec := range strings.Split(container.Config.VolumesFrom, ",") { -- cgit v1.2.1 From 6fc83eefd9e8d78044a51250d2ad185513fddd27 Mon Sep 17 00:00:00 2001 From: Charlie Lewis Date: Thu, 13 Mar 2014 11:15:52 -0700 Subject: add a breakathon for testing Docker-DCO-1.1-Signed-off-by: Charlie Lewis (github: cglewis) --- hack/RELEASE-CHECKLIST.md | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/hack/RELEASE-CHECKLIST.md b/hack/RELEASE-CHECKLIST.md index 6ef5d9cf58..1eb1646f25 100644 --- a/hack/RELEASE-CHECKLIST.md +++ b/hack/RELEASE-CHECKLIST.md @@ -178,7 +178,23 @@ docker run \ hack/release.sh ``` -### 9. Apply tag +### 9. Breakathon + +Spend several days along with the community explicitly investing time and +resources to try and break Docker in every possible way, documenting any +findings pertinent to the release. This time should be spent testing and +finding ways in which the release might have caused various features or upgrade +environments to have issues, not coding. During this time, the release is in +code freeze, and any additional code changes will be pushed out to the next +release. + +It should include various levels of breaking Docker, beyond just using Docker +by the book. + +Any issues found may still remain issues for this release, but they should be +documented and give appropriate warnings. + +### 10. Apply tag ```bash git tag -a $VERSION -m $VERSION bump_$VERSION @@ -188,12 +204,12 @@ git push origin $VERSION It's very important that we don't make the tag until after the official release is uploaded to get.docker.io! -### 10. Go to github to merge the `bump_$VERSION` branch into release +### 11. Go to github to merge the `bump_$VERSION` branch into release Don't forget to push that pretty blue button to delete the leftover branch afterwards! -### 11. Update the docs branch +### 12. Update the docs branch ```bash git checkout docs @@ -207,7 +223,7 @@ Updating the docs branch will automatically update the documentation on the after the merge. The docs will appear on http://docs.docker.io/. For more information about documentation releases, see `docs/README.md`. -### 12. Create a new pull request to merge release back into master +### 13. Create a new pull request to merge release back into master ```bash git checkout master @@ -225,7 +241,7 @@ echo "https://github.com/dotcloud/docker/compare/master...merge_release_$VERSION Again, get two maintainers to validate, then merge, then push that pretty blue button to delete your branch. -### 13. Rejoice and Evangelize! +### 14. Rejoice and Evangelize! Congratulations! You're done. -- cgit v1.2.1 From ab26c16b32420011b0aee3de1a3bce5a0afd6f4d Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Thu, 13 Mar 2014 13:58:09 -0700 Subject: Fix EXPOSE cache miss issue Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- buildfile.go | 21 ++++++++++++++++++--- runconfig/merge.go | 1 + runtime/runtime.go | 1 - server.go | 1 - 4 files changed, 19 insertions(+), 5 deletions(-) diff --git a/buildfile.go b/buildfile.go index 959b085685..dc9039f8d1 100644 --- a/buildfile.go +++ b/buildfile.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/registry" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/runtime" @@ -304,8 +305,22 @@ func (b *buildFile) CmdEntrypoint(args string) error { } func (b *buildFile) CmdExpose(args string) error { - ports := strings.Split(args, " ") - b.config.PortSpecs = append(ports, b.config.PortSpecs...) + portsTab := strings.Split(args, " ") + + if b.config.ExposedPorts == nil { + b.config.ExposedPorts = make(nat.PortSet) + } + ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...)) + if err != nil { + return err + } + for port := range ports { + if _, exists := b.config.ExposedPorts[port]; !exists { + b.config.ExposedPorts[port] = struct{}{} + } + } + b.config.PortSpecs = nil + return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) } @@ -686,12 +701,12 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error { b.tmpContainers[container.ID] = struct{}{} fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) id = container.ID + if err := container.Mount(); err != nil { return err } defer container.Unmount() } - container := b.runtime.Get(id) if container == nil { return fmt.Errorf("An error occured while creating the container") diff --git a/runconfig/merge.go b/runconfig/merge.go index a8d677baa8..3b91aa2af0 100644 --- a/runconfig/merge.go +++ b/runconfig/merge.go @@ -64,6 +64,7 @@ func Merge(userConf, imageConf *Config) error { } } } + if !userConf.Tty { userConf.Tty = imageConf.Tty } diff --git a/runtime/runtime.go b/runtime/runtime.go index 72245a4555..e54acd15d3 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -542,7 +542,6 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe // The image can optionally be tagged into a repository func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) { // FIXME: freeze the container before copying it to avoid data corruption? - // FIXME: this shouldn't be in commands. if err := container.Mount(); err != nil { return nil, err } diff --git a/server.go b/server.go index d4d6a39158..75fa633e8f 100644 --- a/server.go +++ b/server.go @@ -1970,7 +1970,6 @@ func (srv *Server) canDeleteImage(imgID string) error { } func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) { - // Retrieve all images images, err := srv.runtime.Graph().Map() if err != nil { -- cgit v1.2.1 From 747275d30c4d4eb25ca798394cc04db00912adb2 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 13 Mar 2014 14:31:09 -0700 Subject: Always symlink /dev/ptmx for libcontainer Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/mount.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/nsinit/mount.go index 83577cfa8c..072188ecd8 100644 --- a/pkg/libcontainer/nsinit/mount.go +++ b/pkg/libcontainer/nsinit/mount.go @@ -46,10 +46,8 @@ func setupNewMountNamespace(rootfs, console string, readonly, noPivotRoot bool) if err := setupDev(rootfs); err != nil { return err } - if console != "" { - if err := setupPtmx(rootfs, console); err != nil { - return err - } + if err := setupPtmx(rootfs, console); err != nil { + return err } if err := system.Chdir(rootfs); err != nil { return fmt.Errorf("chdir into %s %s", rootfs, err) @@ -245,8 +243,10 @@ func setupPtmx(rootfs, console string) error { if err := os.Symlink("pts/ptmx", ptmx); err != nil { return fmt.Errorf("symlink dev ptmx %s", err) } - if err := setupConsole(rootfs, console); err != nil { - return err + if console != "" { + if err := setupConsole(rootfs, console); err != nil { + return err + } } return nil } -- cgit v1.2.1 From 3fa99b35b05d9159d6f7f4c7465dec747da2c4e1 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 12 Mar 2014 18:04:14 -0700 Subject: Don't kill by pid for other drivers Closes #4575 Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/runtime.go | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/runtime/runtime.go b/runtime/runtime.go index b364d1d270..16117b9788 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -160,22 +160,16 @@ func (runtime *Runtime) Register(container *Container) error { if container.State.IsGhost() { utils.Debugf("killing ghost %s", container.ID) - existingPid := container.State.Pid container.State.SetGhost(false) container.State.SetStopped(0) + // We only have to handle this for lxc because the other drivers will ensure that + // no ghost processes are left when docker dies if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { lxc.KillLxc(container.ID, 9) - } else { - command := &execdriver.Command{ - ID: container.ID, + if err := container.Unmount(); err != nil { + utils.Debugf("ghost unmount error %s", err) } - command.Process = &os.Process{Pid: existingPid} - runtime.execDriver.Kill(command, 9) - } - // ensure that the filesystem is also unmounted - if err := container.Unmount(); err != nil { - utils.Debugf("ghost unmount error %s", err) } } -- cgit v1.2.1 From cbd2a30cd6185d1469f82f8b6693d6158c93d54a Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 13 Mar 2014 15:18:08 -0700 Subject: Update libcontainer readme and todo list Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/README.md | 119 +++++++++++++++++++++++++++------------------ pkg/libcontainer/TODO.md | 8 +-- 2 files changed, 72 insertions(+), 55 deletions(-) diff --git a/pkg/libcontainer/README.md b/pkg/libcontainer/README.md index d6e4dedd63..2c85111b97 100644 --- a/pkg/libcontainer/README.md +++ b/pkg/libcontainer/README.md @@ -16,54 +16,77 @@ process are specified in this file. The configuration is used for each process Sample `container.json` file: ```json { - "hostname": "koye", - "tty": true, - "environment": [ - "HOME=/", - "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin", - "container=docker", - "TERM=xterm-256color" - ], - "namespaces": [ - "NEWIPC", - "NEWNS", - "NEWPID", - "NEWUTS", - "NEWNET" - ], - "capabilities": [ - "SETPCAP", - "SYS_MODULE", - "SYS_RAWIO", - "SYS_PACCT", - "SYS_ADMIN", - "SYS_NICE", - "SYS_RESOURCE", - "SYS_TIME", - "SYS_TTY_CONFIG", - "MKNOD", - "AUDIT_WRITE", - "AUDIT_CONTROL", - "MAC_OVERRIDE", - "MAC_ADMIN", - "NET_ADMIN" - ], - "networks": [{ - "type": "veth", - "context": { - "bridge": "docker0", - "prefix": "dock" - }, - "address": "172.17.0.100/16", - "gateway": "172.17.42.1", - "mtu": 1500 - } - ], - "cgroups": { - "name": "docker-koye", - "parent": "docker", - "memory": 5248000 - } + "hostname" : "koye", + "networks" : [ + { + "gateway" : "172.17.42.1", + "context" : { + "bridge" : "docker0", + "prefix" : "veth" + }, + "address" : "172.17.0.2/16", + "type" : "veth", + "mtu" : 1500 + } + ], + "cgroups" : { + "parent" : "docker", + "name" : "11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620" + }, + "tty" : true, + "environment" : [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + "HOSTNAME=11bb30683fb0", + "TERM=xterm" + ], + "capabilities" : [ + "SETPCAP", + "SYS_MODULE", + "SYS_RAWIO", + "SYS_PACCT", + "SYS_ADMIN", + "SYS_NICE", + "SYS_RESOURCE", + "SYS_TIME", + "SYS_TTY_CONFIG", + "MKNOD", + "AUDIT_WRITE", + "AUDIT_CONTROL", + "MAC_OVERRIDE", + "MAC_ADMIN", + "NET_ADMIN" + ], + "context" : { + "apparmor_profile" : "docker-default" + }, + "mounts" : [ + { + "source" : "/var/lib/docker/containers/11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620/resolv.conf", + "writable" : false, + "destination" : "/etc/resolv.conf", + "private" : true + }, + { + "source" : "/var/lib/docker/containers/11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620/hostname", + "writable" : false, + "destination" : "/etc/hostname", + "private" : true + }, + { + "source" : "/var/lib/docker/containers/11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620/hosts", + "writable" : false, + "destination" : "/etc/hosts", + "private" : true + } + ], + "namespaces" : [ + "NEWNS", + "NEWUTS", + "NEWIPC", + "NEWPID", + "NEWNET" + ] } ``` diff --git a/pkg/libcontainer/TODO.md b/pkg/libcontainer/TODO.md index f18c0b4c51..87224db85d 100644 --- a/pkg/libcontainer/TODO.md +++ b/pkg/libcontainer/TODO.md @@ -1,17 +1,11 @@ #### goals * small and simple - line count is not everything but less code is better -* clean lines between what we do in the pkg * provide primitives for working with namespaces not cater to every option * extend via configuration not by features - host networking, no networking, veth network can be accomplished via adjusting the container.json, nothing to do with code #### tasks -* proper tty for a new process in an existing container -* use exec or raw syscalls for new process in existing container -* setup proper user in namespace if specified -* implement hook or clean interface for cgroups +* reexec or raw syscalls for new process in existing container * example configs for different setups (host networking, boot init) * improve pkg documentation with comments * testing - this is hard in a low level pkg but we could do some, maybe -* pivot root * selinux -* apparmor -- cgit v1.2.1 From 03f0ec35ae31420dd6a56883535056087b1a75dd Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 13 Mar 2014 22:26:42 +0000 Subject: as you could have multiple messages per line with streams, don't \r Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- utils/jsonmessage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/jsonmessage.go b/utils/jsonmessage.go index 9050dda746..f84cc42c78 100644 --- a/utils/jsonmessage.go +++ b/utils/jsonmessage.go @@ -85,7 +85,7 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { return jm.Error } var endl string - if isTerminal { + if isTerminal && jm.Stream == "" { // [2K = erase entire current line fmt.Fprintf(out, "%c[2K\r", 27) endl = "\r" -- cgit v1.2.1 From 6411ee6d24d256e15909ea68b845b354dc51c4ed Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Thu, 13 Mar 2014 16:19:28 -0700 Subject: Have the exec driver and kernel version in non-debug mode in `docker info` Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- api/client.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/api/client.go b/api/client.go index 6049a892c1..e6a74a3b53 100644 --- a/api/client.go +++ b/api/client.go @@ -432,7 +432,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error { fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) - fmt.Fprintf(cli.out, "Driver: %s\n", remoteInfo.Get("Driver")) + fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) var driverStatus [][2]string if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { return err @@ -440,14 +440,15 @@ func (cli *DockerCli) CmdInfo(args ...string) error { for _, pair := range driverStatus { fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) } + fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) + fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) + if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) - fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) - fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) -- cgit v1.2.1 From 7b89af2a08b65fac064603cb3b5eb8e091e2c076 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Thu, 13 Mar 2014 14:03:03 -0700 Subject: Add unit test for expose cache Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- integration/buildfile_test.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go index e5084d4355..f4ed61aaff 100644 --- a/integration/buildfile_test.go +++ b/integration/buildfile_test.go @@ -6,6 +6,7 @@ import ( "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/utils" "io/ioutil" "net" @@ -492,7 +493,7 @@ func TestBuildExpose(t *testing.T) { t.Fatal(err) } - if img.Config.PortSpecs[0] != "4243" { + if _, exists := img.Config.ExposedPorts[nat.NewPort("tcp", "4243")]; !exists { t.Fail() } } @@ -594,6 +595,17 @@ func TestBuildImageWithCache(t *testing.T) { checkCacheBehavior(t, template, true) } +func TestBuildExposeWithCache(t *testing.T) { + template := testContextTemplate{` + from {IMAGE} + maintainer dockerio + expose 80 + run echo hello + `, + nil, nil} + checkCacheBehavior(t, template, true) +} + func TestBuildImageWithoutCache(t *testing.T) { template := testContextTemplate{` from {IMAGE} @@ -877,7 +889,7 @@ func TestBuildInheritance(t *testing.T) { } // from parent - if img.Config.PortSpecs[0] != "4243" { + if _, exists := img.Config.ExposedPorts[nat.NewPort("tcp", "4243")]; !exists { t.Fail() } } -- cgit v1.2.1 From c349c9d14a1c4bf04f35f3f5c62b0bb92614bc81 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 14 Mar 2014 00:47:13 +0000 Subject: create the cli obj before calling parseCommand Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/client.go | 4 +--- docker/docker.go | 3 ++- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/api/client.go b/api/client.go index 858c2bcf25..715f58ab06 100644 --- a/api/client.go +++ b/api/client.go @@ -57,9 +57,7 @@ func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) { return method.Interface().(func(...string) error), true } -func ParseCommands(proto, addr string, args ...string) error { - cli := NewDockerCli(os.Stdin, os.Stdout, os.Stderr, proto, addr) - +func (cli *DockerCli) ParseCommands(args ...string) error { if len(args) > 0 { method, exists := cli.getMethod(args[0]) if !exists { diff --git a/docker/docker.go b/docker/docker.go index cc4d40f3ac..749857a640 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -148,7 +148,8 @@ func main() { log.Fatal("Please specify only one -H") } protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2) - if err := api.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil { + cli := api.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1]) + if err := cli.ParseCommands(flag.Args()...); err != nil { if sterr, ok := err.(*utils.StatusError); ok { if sterr.Status != "" { log.Println(sterr.Status) -- cgit v1.2.1 From f6efcf20943e656ca977f1fd0ae5197f5757dff4 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 13 Mar 2014 22:35:09 -0600 Subject: Fix sphinx header underline warnings I introduced... Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- docs/sources/reference/commandline/cli.rst | 4 ++-- docs/sources/reference/run.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 83f05947c2..2371ed1b5f 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -374,7 +374,7 @@ The me/bar image will now have port 22 exposed, MYVAR env var set to 'foobar', a Note that this is currently a shallow merge. So, for example, if you had specified a new port spec in the --run= config above, that would have clobbered the 'EXPOSE 22' setting from the parent container. Full --run example -................. +.................. The ``--run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID`` or ``config`` when running ``docker inspect IMAGEID``. Existing configuration key-values that are @@ -1172,7 +1172,7 @@ See :ref:`port_redirection` for more detailed information about the ``--expose`` specific examples using ``--link``. Known Issues (run --volumes-from) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * :issue:`2702`: "lxc-start: Permission denied - failed to mount" could indicate a permissions problem with AppArmor. Please see the diff --git a/docs/sources/reference/run.rst b/docs/sources/reference/run.rst index 8637ac3071..0b4f7eebf4 100644 --- a/docs/sources/reference/run.rst +++ b/docs/sources/reference/run.rst @@ -113,7 +113,7 @@ Container Identification ------------------------ Name (--name) -............ +............. The operator can identify a container in three ways: @@ -157,7 +157,7 @@ Your container will use the same DNS servers as the host by default, but you can override this with ``--dns``. Clean Up (--rm) --------------- +--------------- By default a container's file system persists even after the container exits. This makes debugging a lot easier (since you can inspect the -- cgit v1.2.1 From ae47f709ca6a6c29b769191ceabb10e59a0408b1 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 13 Mar 2014 22:35:31 -0600 Subject: Make sphinx warnings fatal in Travis Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 8a43d9a462..b8e4d43fcc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -25,6 +25,6 @@ before_script: script: - hack/travis/dco.py - hack/travis/gofmt.py - - make -sC docs SPHINXOPTS=-q docs man + - make -sC docs SPHINXOPTS=-qW docs man # vim:set sw=2 ts=2: -- cgit v1.2.1 From 5239aa1f11c32f3befc25fb2fa8a0ecf75ec4bf6 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 11 Mar 2014 10:40:06 -0700 Subject: Move server and buildfile into server pkg Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- buildfile.go | 827 --------------- builtins/builtins.go | 4 +- server.go | 2426 -------------------------------------------- server/buildfile.go | 799 +++++++++++++++ server/server.go | 2426 ++++++++++++++++++++++++++++++++++++++++++++ server/server_unit_test.go | 99 ++ server_unit_test.go | 99 -- utils/streamformatter.go | 29 + 8 files changed, 3355 insertions(+), 3354 deletions(-) delete mode 100644 buildfile.go delete mode 100644 server.go create mode 100644 server/buildfile.go create mode 100644 server/server.go create mode 100644 server/server_unit_test.go delete mode 100644 server_unit_test.go diff --git a/buildfile.go b/buildfile.go deleted file mode 100644 index da72be60fb..0000000000 --- a/buildfile.go +++ /dev/null @@ -1,827 +0,0 @@ -package docker - -import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/runtime" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "net/url" - "os" - "path" - "path/filepath" - "reflect" - "regexp" - "sort" - "strings" -) - -var ( - ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") -) - -type BuildFile interface { - Build(io.Reader) (string, error) - CmdFrom(string) error - CmdRun(string) error -} - -type buildFile struct { - runtime *runtime.Runtime - srv *Server - - image string - maintainer string - config *runconfig.Config - - contextPath string - context *utils.TarSum - - verbose bool - utilizeCache bool - rm bool - - authConfig *registry.AuthConfig - configFile *registry.ConfigFile - - tmpContainers map[string]struct{} - tmpImages map[string]struct{} - - outStream io.Writer - errStream io.Writer - - // Deprecated, original writer used for ImagePull. To be removed. - outOld io.Writer - sf *utils.StreamFormatter -} - -func (b *buildFile) clearTmp(containers map[string]struct{}) { - for c := range containers { - tmp := b.runtime.Get(c) - if err := b.runtime.Destroy(tmp); err != nil { - fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) - } else { - fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c)) - } - } -} - -func (b *buildFile) CmdFrom(name string) error { - image, err := b.runtime.Repositories().LookupImage(name) - if err != nil { - if b.runtime.Graph().IsNotExist(err) { - remote, tag := utils.ParseRepositoryTag(name) - pullRegistryAuth := b.authConfig - if len(b.configFile.Configs) > 0 { - // The request came with a full auth config file, we prefer to use that - endpoint, _, err := registry.ResolveRepositoryName(remote) - if err != nil { - return err - } - resolvedAuth := b.configFile.ResolveAuthConfig(endpoint) - pullRegistryAuth = &resolvedAuth - } - job := b.srv.Eng.Job("pull", remote, tag) - job.SetenvBool("json", b.sf.Json()) - job.SetenvBool("parallel", true) - job.SetenvJson("authConfig", pullRegistryAuth) - job.Stdout.Add(b.outOld) - if err := job.Run(); err != nil { - return err - } - image, err = b.runtime.Repositories().LookupImage(name) - if err != nil { - return err - } - } else { - return err - } - } - b.image = image.ID - b.config = &runconfig.Config{} - if image.Config != nil { - b.config = image.Config - } - if b.config.Env == nil || len(b.config.Env) == 0 { - b.config.Env = append(b.config.Env, "HOME=/", "PATH="+runtime.DefaultPathEnv) - } - // Process ONBUILD triggers if they exist - if nTriggers := len(b.config.OnBuild); nTriggers != 0 { - fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers) - } - for n, step := range b.config.OnBuild { - splitStep := strings.Split(step, " ") - stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " ")) - switch stepInstruction { - case "ONBUILD": - return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step) - case "MAINTAINER", "FROM": - return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step) - } - if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil { - return err - } - } - b.config.OnBuild = []string{} - return nil -} - -// The ONBUILD command declares a build instruction to be executed in any future build -// using the current image as a base. -func (b *buildFile) CmdOnbuild(trigger string) error { - splitTrigger := strings.Split(trigger, " ") - triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " ")) - switch triggerInstruction { - case "ONBUILD": - return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) - } - b.config.OnBuild = append(b.config.OnBuild, trigger) - return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger)) -} - -func (b *buildFile) CmdMaintainer(name string) error { - b.maintainer = name - return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name)) -} - -// probeCache checks to see if image-caching is enabled (`b.utilizeCache`) -// and if so attempts to look up the current `b.image` and `b.config` pair -// in the current server `b.srv`. If an image is found, probeCache returns -// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there -// is any error, it returns `(false, err)`. -func (b *buildFile) probeCache() (bool, error) { - if b.utilizeCache { - if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil { - return false, err - } else if cache != nil { - fmt.Fprintf(b.outStream, " ---> Using cache\n") - utils.Debugf("[BUILDER] Use cached version") - b.image = cache.ID - return true, nil - } else { - utils.Debugf("[BUILDER] Cache miss") - } - } - return false, nil -} - -func (b *buildFile) CmdRun(args string) error { - if b.image == "" { - return fmt.Errorf("Please provide a source image with `from` prior to run") - } - config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil) - if err != nil { - return err - } - - cmd := b.config.Cmd - b.config.Cmd = nil - runconfig.Merge(b.config, config) - - defer func(cmd []string) { b.config.Cmd = cmd }(cmd) - - utils.Debugf("Command to be executed: %v", b.config.Cmd) - - hit, err := b.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - c, err := b.create() - if err != nil { - return err - } - // Ensure that we keep the container mounted until the commit - // to avoid unmounting and then mounting directly again - c.Mount() - defer c.Unmount() - - err = b.run(c) - if err != nil { - return err - } - if err := b.commit(c.ID, cmd, "run"); err != nil { - return err - } - - return nil -} - -func (b *buildFile) FindEnvKey(key string) int { - for k, envVar := range b.config.Env { - envParts := strings.SplitN(envVar, "=", 2) - if key == envParts[0] { - return k - } - } - return -1 -} - -func (b *buildFile) ReplaceEnvMatches(value string) (string, error) { - exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)") - if err != nil { - return value, err - } - matches := exp.FindAllString(value, -1) - for _, match := range matches { - match = match[strings.Index(match, "$"):] - matchKey := strings.Trim(match, "${}") - - for _, envVar := range b.config.Env { - envParts := strings.SplitN(envVar, "=", 2) - envKey := envParts[0] - envValue := envParts[1] - - if envKey == matchKey { - value = strings.Replace(value, match, envValue, -1) - break - } - } - } - return value, nil -} - -func (b *buildFile) CmdEnv(args string) error { - tmp := strings.SplitN(args, " ", 2) - if len(tmp) != 2 { - return fmt.Errorf("Invalid ENV format") - } - key := strings.Trim(tmp[0], " \t") - value := strings.Trim(tmp[1], " \t") - - envKey := b.FindEnvKey(key) - replacedValue, err := b.ReplaceEnvMatches(value) - if err != nil { - return err - } - replacedVar := fmt.Sprintf("%s=%s", key, replacedValue) - - if envKey >= 0 { - b.config.Env[envKey] = replacedVar - } else { - b.config.Env = append(b.config.Env, replacedVar) - } - return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar)) -} - -func (b *buildFile) buildCmdFromJson(args string) []string { - var cmd []string - if err := json.Unmarshal([]byte(args), &cmd); err != nil { - utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err) - cmd = []string{"/bin/sh", "-c", args} - } - return cmd -} - -func (b *buildFile) CmdCmd(args string) error { - cmd := b.buildCmdFromJson(args) - b.config.Cmd = cmd - if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil { - return err - } - return nil -} - -func (b *buildFile) CmdEntrypoint(args string) error { - entrypoint := b.buildCmdFromJson(args) - b.config.Entrypoint = entrypoint - if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil { - return err - } - return nil -} - -func (b *buildFile) CmdExpose(args string) error { - portsTab := strings.Split(args, " ") - - if b.config.ExposedPorts == nil { - b.config.ExposedPorts = make(nat.PortSet) - } - ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...)) - if err != nil { - return err - } - for port := range ports { - if _, exists := b.config.ExposedPorts[port]; !exists { - b.config.ExposedPorts[port] = struct{}{} - } - } - b.config.PortSpecs = nil - - return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) -} - -func (b *buildFile) CmdUser(args string) error { - b.config.User = args - return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args)) -} - -func (b *buildFile) CmdInsert(args string) error { - return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") -} - -func (b *buildFile) CmdCopy(args string) error { - return fmt.Errorf("COPY has been deprecated. Please use ADD instead") -} - -func (b *buildFile) CmdWorkdir(workdir string) error { - b.config.WorkingDir = workdir - return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) -} - -func (b *buildFile) CmdVolume(args string) error { - if args == "" { - return fmt.Errorf("Volume cannot be empty") - } - - var volume []string - if err := json.Unmarshal([]byte(args), &volume); err != nil { - volume = []string{args} - } - if b.config.Volumes == nil { - b.config.Volumes = map[string]struct{}{} - } - for _, v := range volume { - b.config.Volumes[v] = struct{}{} - } - if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil { - return err - } - return nil -} - -func (b *buildFile) checkPathForAddition(orig string) error { - origPath := path.Join(b.contextPath, orig) - if p, err := filepath.EvalSymlinks(origPath); err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) - } - return err - } else { - origPath = p - } - if !strings.HasPrefix(origPath, b.contextPath) { - return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) - } - _, err := os.Stat(origPath) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) - } - return err - } - return nil -} - -func (b *buildFile) addContext(container *runtime.Container, orig, dest string, remote bool) error { - var ( - origPath = path.Join(b.contextPath, orig) - destPath = path.Join(container.RootfsPath(), dest) - ) - // Preserve the trailing '/' - if strings.HasSuffix(dest, "/") { - destPath = destPath + "/" - } - fi, err := os.Stat(origPath) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("%s: no such file or directory", orig) - } - return err - } - - if fi.IsDir() { - if err := archive.CopyWithTar(origPath, destPath); err != nil { - return err - } - return nil - } - - // First try to unpack the source as an archive - // to support the untar feature we need to clean up the path a little bit - // because tar is very forgiving. First we need to strip off the archive's - // filename from the path but this is only added if it does not end in / . - tarDest := destPath - if strings.HasSuffix(tarDest, "/") { - tarDest = filepath.Dir(destPath) - } - - // If we are adding a remote file, do not try to untar it - if !remote { - // try to successfully untar the orig - if err := archive.UntarPath(origPath, tarDest); err == nil { - return nil - } - utils.Debugf("Couldn't untar %s to %s: %s", origPath, destPath, err) - } - - // If that fails, just copy it as a regular file - // but do not use all the magic path handling for the tar path - if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { - return err - } - if err := archive.CopyWithTar(origPath, destPath); err != nil { - return err - } - return nil -} - -func (b *buildFile) CmdAdd(args string) error { - if b.context == nil { - return fmt.Errorf("No context given. Impossible to use ADD") - } - tmp := strings.SplitN(args, " ", 2) - if len(tmp) != 2 { - return fmt.Errorf("Invalid ADD format") - } - - orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) - if err != nil { - return err - } - - dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t")) - if err != nil { - return err - } - - cmd := b.config.Cmd - b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)} - b.config.Image = b.image - - var ( - origPath = orig - destPath = dest - remoteHash string - isRemote bool - ) - - if utils.IsURL(orig) { - isRemote = true - resp, err := utils.Download(orig) - if err != nil { - return err - } - tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") - if err != nil { - return err - } - tmpFileName := path.Join(tmpDirName, "tmp") - tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if err != nil { - return err - } - defer os.RemoveAll(tmpDirName) - if _, err = io.Copy(tmpFile, resp.Body); err != nil { - tmpFile.Close() - return err - } - origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) - tmpFile.Close() - - // Process the checksum - r, err := archive.Tar(tmpFileName, archive.Uncompressed) - if err != nil { - return err - } - tarSum := utils.TarSum{Reader: r, DisableCompression: true} - remoteHash = tarSum.Sum(nil) - r.Close() - - // If the destination is a directory, figure out the filename. - if strings.HasSuffix(dest, "/") { - u, err := url.Parse(orig) - if err != nil { - return err - } - path := u.Path - if strings.HasSuffix(path, "/") { - path = path[:len(path)-1] - } - parts := strings.Split(path, "/") - filename := parts[len(parts)-1] - if filename == "" { - return fmt.Errorf("cannot determine filename from url: %s", u) - } - destPath = dest + filename - } - } - - if err := b.checkPathForAddition(origPath); err != nil { - return err - } - - // Hash path and check the cache - if b.utilizeCache { - var ( - hash string - sums = b.context.GetSums() - ) - - if remoteHash != "" { - hash = remoteHash - } else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil { - return err - } else if fi.IsDir() { - var subfiles []string - for file, sum := range sums { - absFile := path.Join(b.contextPath, file) - absOrigPath := path.Join(b.contextPath, origPath) - if strings.HasPrefix(absFile, absOrigPath) { - subfiles = append(subfiles, sum) - } - } - sort.Strings(subfiles) - hasher := sha256.New() - hasher.Write([]byte(strings.Join(subfiles, ","))) - hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) - } else { - if origPath[0] == '/' && len(origPath) > 1 { - origPath = origPath[1:] - } - origPath = strings.TrimPrefix(origPath, "./") - if h, ok := sums[origPath]; ok { - hash = "file:" + h - } - } - b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", hash, dest)} - hit, err := b.probeCache() - if err != nil { - return err - } - // If we do not have a hash, never use the cache - if hit && hash != "" { - return nil - } - } - - // Create the container and start it - container, _, err := b.runtime.Create(b.config, "") - if err != nil { - return err - } - b.tmpContainers[container.ID] = struct{}{} - - if err := container.Mount(); err != nil { - return err - } - defer container.Unmount() - - if err := b.addContext(container, origPath, destPath, isRemote); err != nil { - return err - } - - if err := b.commit(container.ID, cmd, fmt.Sprintf("ADD %s in %s", orig, dest)); err != nil { - return err - } - b.config.Cmd = cmd - return nil -} - -type StdoutFormater struct { - io.Writer - *utils.StreamFormatter -} - -func (sf *StdoutFormater) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} - -type StderrFormater struct { - io.Writer - *utils.StreamFormatter -} - -func (sf *StderrFormater) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} - -func (b *buildFile) create() (*runtime.Container, error) { - if b.image == "" { - return nil, fmt.Errorf("Please provide a source image with `from` prior to run") - } - b.config.Image = b.image - - // Create the container and start it - c, _, err := b.runtime.Create(b.config, "") - if err != nil { - return nil, err - } - b.tmpContainers[c.ID] = struct{}{} - fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) - - // override the entry point that may have been picked up from the base image - c.Path = b.config.Cmd[0] - c.Args = b.config.Cmd[1:] - - return c, nil -} - -func (b *buildFile) run(c *runtime.Container) error { - var errCh chan error - - if b.verbose { - errCh = utils.Go(func() error { - return <-c.Attach(nil, nil, b.outStream, b.errStream) - }) - } - - //start the container - if err := c.Start(); err != nil { - return err - } - - if errCh != nil { - if err := <-errCh; err != nil { - return err - } - } - - // Wait for it to finish - if ret := c.Wait(); ret != 0 { - err := &utils.JSONError{ - Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret), - Code: ret, - } - return err - } - - return nil -} - -// Commit the container with the autorun command -func (b *buildFile) commit(id string, autoCmd []string, comment string) error { - if b.image == "" { - return fmt.Errorf("Please provide a source image with `from` prior to commit") - } - b.config.Image = b.image - if id == "" { - cmd := b.config.Cmd - b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} - defer func(cmd []string) { b.config.Cmd = cmd }(cmd) - - hit, err := b.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - container, warnings, err := b.runtime.Create(b.config, "") - if err != nil { - return err - } - for _, warning := range warnings { - fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning) - } - b.tmpContainers[container.ID] = struct{}{} - fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) - id = container.ID - - if err := container.Mount(); err != nil { - return err - } - defer container.Unmount() - } - container := b.runtime.Get(id) - if container == nil { - return fmt.Errorf("An error occured while creating the container") - } - - // Note: Actually copy the struct - autoConfig := *b.config - autoConfig.Cmd = autoCmd - // Commit the container - image, err := b.runtime.Commit(container, "", "", "", b.maintainer, &autoConfig) - if err != nil { - return err - } - b.tmpImages[image.ID] = struct{}{} - b.image = image.ID - return nil -} - -// Long lines can be split with a backslash -var lineContinuation = regexp.MustCompile(`\s*\\\s*\n`) - -func (b *buildFile) Build(context io.Reader) (string, error) { - tmpdirPath, err := ioutil.TempDir("", "docker-build") - if err != nil { - return "", err - } - - decompressedStream, err := archive.DecompressStream(context) - if err != nil { - return "", err - } - - b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true} - if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { - return "", err - } - defer os.RemoveAll(tmpdirPath) - - b.contextPath = tmpdirPath - filename := path.Join(tmpdirPath, "Dockerfile") - if _, err := os.Stat(filename); os.IsNotExist(err) { - return "", fmt.Errorf("Can't build a directory with no Dockerfile") - } - fileBytes, err := ioutil.ReadFile(filename) - if err != nil { - return "", err - } - if len(fileBytes) == 0 { - return "", ErrDockerfileEmpty - } - dockerfile := string(fileBytes) - dockerfile = lineContinuation.ReplaceAllString(dockerfile, "") - stepN := 0 - for _, line := range strings.Split(dockerfile, "\n") { - line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n") - // Skip comments and empty line - if len(line) == 0 || line[0] == '#' { - continue - } - if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil { - return "", err - } - stepN += 1 - - } - if b.image != "" { - fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image)) - if b.rm { - b.clearTmp(b.tmpContainers) - } - return b.image, nil - } - return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n") -} - -// BuildStep parses a single build step from `instruction` and executes it in the current context. -func (b *buildFile) BuildStep(name, expression string) error { - fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression) - tmp := strings.SplitN(expression, " ", 2) - if len(tmp) != 2 { - return fmt.Errorf("Invalid Dockerfile format") - } - instruction := strings.ToLower(strings.Trim(tmp[0], " ")) - arguments := strings.Trim(tmp[1], " ") - - method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:])) - if !exists { - fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction)) - return nil - } - - ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface() - if ret != nil { - return ret.(error) - } - - fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image)) - return nil -} - -func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile { - return &buildFile{ - runtime: srv.runtime, - srv: srv, - config: &runconfig.Config{}, - outStream: outStream, - errStream: errStream, - tmpContainers: make(map[string]struct{}), - tmpImages: make(map[string]struct{}), - verbose: verbose, - utilizeCache: utilizeCache, - rm: rm, - sf: sf, - authConfig: auth, - configFile: authConfigFile, - outOld: outOld, - } -} diff --git a/builtins/builtins.go b/builtins/builtins.go index ba3f41b1ca..eb4a0be874 100644 --- a/builtins/builtins.go +++ b/builtins/builtins.go @@ -3,9 +3,9 @@ package builtins import ( "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker" "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/networkdriver/lxc" + "github.com/dotcloud/docker/server" ) func Register(eng *engine.Engine) { @@ -34,6 +34,6 @@ func remote(eng *engine.Engine) { // These components should be broken off into plugins of their own. // func daemon(eng *engine.Engine) { - eng.Register("initserver", docker.InitServer) + eng.Register("initserver", server.InitServer) eng.Register("init_networkdriver", lxc.InitDriver) } diff --git a/server.go b/server.go deleted file mode 100644 index 75fa633e8f..0000000000 --- a/server.go +++ /dev/null @@ -1,2426 +0,0 @@ -package docker - -import ( - "encoding/json" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/daemonconfig" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/graph" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/pkg/graphdb" - "github.com/dotcloud/docker/pkg/signal" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/runtime" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "log" - "net/http" - "net/url" - "os" - "os/exec" - gosignal "os/signal" - "path" - "path/filepath" - goruntime "runtime" - "strconv" - "strings" - "sync" - "syscall" - "time" -) - -// jobInitApi runs the remote api server `srv` as a daemon, -// Only one api server can run at the same time - this is enforced by a pidfile. -// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup. -func InitServer(job *engine.Job) engine.Status { - job.Logf("Creating server") - srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job)) - if err != nil { - return job.Error(err) - } - if srv.runtime.Config().Pidfile != "" { - job.Logf("Creating pidfile") - if err := utils.CreatePidFile(srv.runtime.Config().Pidfile); err != nil { - // FIXME: do we need fatal here instead of returning a job error? - log.Fatal(err) - } - } - job.Logf("Setting up signal traps") - c := make(chan os.Signal, 1) - gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) - go func() { - sig := <-c - log.Printf("Received signal '%v', exiting\n", sig) - utils.RemovePidFile(srv.runtime.Config().Pidfile) - srv.Close() - os.Exit(0) - }() - job.Eng.Hack_SetGlobalVar("httpapi.server", srv) - job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime) - - for name, handler := range map[string]engine.Handler{ - "export": srv.ContainerExport, - "create": srv.ContainerCreate, - "stop": srv.ContainerStop, - "restart": srv.ContainerRestart, - "start": srv.ContainerStart, - "kill": srv.ContainerKill, - "wait": srv.ContainerWait, - "tag": srv.ImageTag, - "resize": srv.ContainerResize, - "commit": srv.ContainerCommit, - "info": srv.DockerInfo, - "container_delete": srv.ContainerDestroy, - "image_export": srv.ImageExport, - "images": srv.Images, - "history": srv.ImageHistory, - "viz": srv.ImagesViz, - "container_copy": srv.ContainerCopy, - "insert": srv.ImageInsert, - "attach": srv.ContainerAttach, - "search": srv.ImagesSearch, - "changes": srv.ContainerChanges, - "top": srv.ContainerTop, - "version": srv.DockerVersion, - "load": srv.ImageLoad, - "build": srv.Build, - "pull": srv.ImagePull, - "import": srv.ImageImport, - "image_delete": srv.ImageDelete, - "inspect": srv.JobInspect, - "events": srv.Events, - "push": srv.ImagePush, - "containers": srv.Containers, - "auth": srv.Auth, - } { - if err := job.Eng.Register(name, handler); err != nil { - return job.Error(err) - } - } - return engine.StatusOK -} - -// simpleVersionInfo is a simple implementation of -// the interface VersionInfo, which is used -// to provide version information for some product, -// component, etc. It stores the product name and the version -// in string and returns them on calls to Name() and Version(). -type simpleVersionInfo struct { - name string - version string -} - -func (v *simpleVersionInfo) Name() string { - return v.name -} - -func (v *simpleVersionInfo) Version() string { - return v.version -} - -// ContainerKill send signal to the container -// If no signal is given (sig 0), then Kill with SIGKILL and wait -// for the container to exit. -// If a signal is given, then just send it to the container and return. -func (srv *Server) ContainerKill(job *engine.Job) engine.Status { - if n := len(job.Args); n < 1 || n > 2 { - return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) - } - var ( - name = job.Args[0] - sig uint64 - err error - ) - - // If we have a signal, look at it. Otherwise, do nothing - if len(job.Args) == 2 && job.Args[1] != "" { - // Check if we passed the signal as a number: - // The largest legal signal is 31, so let's parse on 5 bits - sig, err = strconv.ParseUint(job.Args[1], 10, 5) - if err != nil { - // The signal is not a number, treat it as a string - sig = uint64(signal.SignalMap[job.Args[1]]) - if sig == 0 { - return job.Errorf("Invalid signal: %s", job.Args[1]) - } - - } - } - - if container := srv.runtime.Get(name); container != nil { - // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) - if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { - if err := container.Kill(); err != nil { - return job.Errorf("Cannot kill container %s: %s", name, err) - } - srv.LogEvent("kill", container.ID, srv.runtime.Repositories().ImageName(container.Image)) - } else { - // Otherwise, just send the requested signal - if err := container.KillSig(int(sig)); err != nil { - return job.Errorf("Cannot kill container %s: %s", name, err) - } - // FIXME: Add event for signals - } - } else { - return job.Errorf("No such container: %s", name) - } - return engine.StatusOK -} - -func (srv *Server) Auth(job *engine.Job) engine.Status { - var ( - err error - authConfig = ®istry.AuthConfig{} - ) - - job.GetenvJson("authConfig", authConfig) - // TODO: this is only done here because auth and registry need to be merged into one pkg - if addr := authConfig.ServerAddress; addr != "" && addr != registry.IndexServerAddress() { - addr, err = registry.ExpandAndVerifyRegistryUrl(addr) - if err != nil { - return job.Error(err) - } - authConfig.ServerAddress = addr - } - status, err := registry.Login(authConfig, srv.HTTPRequestFactory(nil)) - if err != nil { - return job.Error(err) - } - job.Printf("%s\n", status) - return engine.StatusOK -} - -func (srv *Server) Events(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s FROM", job.Name) - } - - var ( - from = job.Args[0] - since = job.GetenvInt64("since") - ) - sendEvent := func(event *utils.JSONMessage) error { - b, err := json.Marshal(event) - if err != nil { - return fmt.Errorf("JSON error") - } - _, err = job.Stdout.Write(b) - if err != nil { - // On error, evict the listener - utils.Errorf("%s", err) - srv.Lock() - delete(srv.listeners, from) - srv.Unlock() - return err - } - return nil - } - - listener := make(chan utils.JSONMessage) - srv.Lock() - srv.listeners[from] = listener - srv.Unlock() - job.Stdout.Write(nil) // flush - if since != 0 { - // If since, send previous events that happened after the timestamp - for _, event := range srv.GetEvents() { - if event.Time >= since { - err := sendEvent(&event) - if err != nil && err.Error() == "JSON error" { - continue - } - if err != nil { - job.Error(err) - return engine.StatusErr - } - } - } - } - for event := range listener { - err := sendEvent(&event) - if err != nil && err.Error() == "JSON error" { - continue - } - if err != nil { - return job.Error(err) - } - } - return engine.StatusOK -} - -func (srv *Server) ContainerExport(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s container_id", job.Name) - } - name := job.Args[0] - if container := srv.runtime.Get(name); container != nil { - data, err := container.Export() - if err != nil { - return job.Errorf("%s: %s", name, err) - } - defer data.Close() - - // Stream the entire contents of the container (basically a volatile snapshot) - if _, err := io.Copy(job.Stdout, data); err != nil { - return job.Errorf("%s: %s", name, err) - } - // FIXME: factor job-specific LogEvent to engine.Job.Run() - srv.LogEvent("export", container.ID, srv.runtime.Repositories().ImageName(container.Image)) - return engine.StatusOK - } - return job.Errorf("No such container: %s", name) -} - -// ImageExport exports all images with the given tag. All versions -// containing the same tag are exported. The resulting output is an -// uncompressed tar ball. -// name is the set of tags to export. -// out is the writer where the images are written to. -func (srv *Server) ImageExport(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s CONTAINER\n", job.Name) - } - name := job.Args[0] - // get image json - tempdir, err := ioutil.TempDir("", "docker-export-") - if err != nil { - return job.Error(err) - } - defer os.RemoveAll(tempdir) - - utils.Debugf("Serializing %s", name) - - rootRepo, err := srv.runtime.Repositories().Get(name) - if err != nil { - return job.Error(err) - } - if rootRepo != nil { - for _, id := range rootRepo { - image, err := srv.ImageInspect(id) - if err != nil { - return job.Error(err) - } - - if err := srv.exportImage(image, tempdir); err != nil { - return job.Error(err) - } - } - - // write repositories - rootRepoMap := map[string]graph.Repository{} - rootRepoMap[name] = rootRepo - rootRepoJson, _ := json.Marshal(rootRepoMap) - - if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.ModeAppend); err != nil { - return job.Error(err) - } - } else { - image, err := srv.ImageInspect(name) - if err != nil { - return job.Error(err) - } - if err := srv.exportImage(image, tempdir); err != nil { - return job.Error(err) - } - } - - fs, err := archive.Tar(tempdir, archive.Uncompressed) - if err != nil { - return job.Error(err) - } - defer fs.Close() - - if _, err := io.Copy(job.Stdout, fs); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) exportImage(img *image.Image, tempdir string) error { - for i := img; i != nil; { - // temporary directory - tmpImageDir := path.Join(tempdir, i.ID) - if err := os.Mkdir(tmpImageDir, os.ModeDir); err != nil { - if os.IsExist(err) { - return nil - } - return err - } - - var version = "1.0" - var versionBuf = []byte(version) - - if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.ModeAppend); err != nil { - return err - } - - // serialize json - b, err := json.Marshal(i) - if err != nil { - return err - } - if err := ioutil.WriteFile(path.Join(tmpImageDir, "json"), b, os.ModeAppend); err != nil { - return err - } - - // serialize filesystem - fs, err := i.TarLayer() - if err != nil { - return err - } - defer fs.Close() - - fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) - if err != nil { - return err - } - if _, err = io.Copy(fsTar, fs); err != nil { - return err - } - fsTar.Close() - - // find parent - if i.Parent != "" { - i, err = srv.ImageInspect(i.Parent) - if err != nil { - return err - } - } else { - i = nil - } - } - return nil -} - -func (srv *Server) Build(job *engine.Job) engine.Status { - if len(job.Args) != 0 { - return job.Errorf("Usage: %s\n", job.Name) - } - var ( - remoteURL = job.Getenv("remote") - repoName = job.Getenv("t") - suppressOutput = job.GetenvBool("q") - noCache = job.GetenvBool("nocache") - rm = job.GetenvBool("rm") - authConfig = ®istry.AuthConfig{} - configFile = ®istry.ConfigFile{} - tag string - context io.ReadCloser - ) - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("configFile", configFile) - repoName, tag = utils.ParseRepositoryTag(repoName) - - if remoteURL == "" { - context = ioutil.NopCloser(job.Stdin) - } else if utils.IsGIT(remoteURL) { - if !strings.HasPrefix(remoteURL, "git://") { - remoteURL = "https://" + remoteURL - } - root, err := ioutil.TempDir("", "docker-build-git") - if err != nil { - return job.Error(err) - } - defer os.RemoveAll(root) - - if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { - return job.Errorf("Error trying to use git: %s (%s)", err, output) - } - - c, err := archive.Tar(root, archive.Uncompressed) - if err != nil { - return job.Error(err) - } - context = c - } else if utils.IsURL(remoteURL) { - f, err := utils.Download(remoteURL) - if err != nil { - return job.Error(err) - } - defer f.Body.Close() - dockerFile, err := ioutil.ReadAll(f.Body) - if err != nil { - return job.Error(err) - } - c, err := archive.Generate("Dockerfile", string(dockerFile)) - if err != nil { - return job.Error(err) - } - context = c - } - defer context.Close() - - sf := utils.NewStreamFormatter(job.GetenvBool("json")) - b := NewBuildFile(srv, - &StdoutFormater{ - Writer: job.Stdout, - StreamFormatter: sf, - }, - &StderrFormater{ - Writer: job.Stdout, - StreamFormatter: sf, - }, - !suppressOutput, !noCache, rm, job.Stdout, sf, authConfig, configFile) - id, err := b.Build(context) - if err != nil { - return job.Error(err) - } - if repoName != "" { - srv.runtime.Repositories().Set(repoName, tag, id, false) - } - return engine.StatusOK -} - -// Loads a set of images into the repository. This is the complementary of ImageExport. -// The input stream is an uncompressed tar ball containing images and metadata. -func (srv *Server) ImageLoad(job *engine.Job) engine.Status { - tmpImageDir, err := ioutil.TempDir("", "docker-import-") - if err != nil { - return job.Error(err) - } - defer os.RemoveAll(tmpImageDir) - - var ( - repoTarFile = path.Join(tmpImageDir, "repo.tar") - repoDir = path.Join(tmpImageDir, "repo") - ) - - tarFile, err := os.Create(repoTarFile) - if err != nil { - return job.Error(err) - } - if _, err := io.Copy(tarFile, job.Stdin); err != nil { - return job.Error(err) - } - tarFile.Close() - - repoFile, err := os.Open(repoTarFile) - if err != nil { - return job.Error(err) - } - if err := os.Mkdir(repoDir, os.ModeDir); err != nil { - return job.Error(err) - } - if err := archive.Untar(repoFile, repoDir, nil); err != nil { - return job.Error(err) - } - - dirs, err := ioutil.ReadDir(repoDir) - if err != nil { - return job.Error(err) - } - - for _, d := range dirs { - if d.IsDir() { - if err := srv.recursiveLoad(d.Name(), tmpImageDir); err != nil { - return job.Error(err) - } - } - } - - repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories")) - if err == nil { - repositories := map[string]graph.Repository{} - if err := json.Unmarshal(repositoriesJson, &repositories); err != nil { - return job.Error(err) - } - - for imageName, tagMap := range repositories { - for tag, address := range tagMap { - if err := srv.runtime.Repositories().Set(imageName, tag, address, true); err != nil { - return job.Error(err) - } - } - } - } else if !os.IsNotExist(err) { - return job.Error(err) - } - - return engine.StatusOK -} - -func (srv *Server) recursiveLoad(address, tmpImageDir string) error { - if _, err := srv.ImageInspect(address); err != nil { - utils.Debugf("Loading %s", address) - - imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) - if err != nil { - utils.Debugf("Error reading json", err) - return err - } - - layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar")) - if err != nil { - utils.Debugf("Error reading embedded tar", err) - return err - } - img, err := image.NewImgJSON(imageJson) - if err != nil { - utils.Debugf("Error unmarshalling json", err) - return err - } - if img.Parent != "" { - if !srv.runtime.Graph().Exists(img.Parent) { - if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil { - return err - } - } - } - if err := srv.runtime.Graph().Register(imageJson, layer, img); err != nil { - return err - } - } - utils.Debugf("Completed processing %s", address) - - return nil -} - -func (srv *Server) ImagesSearch(job *engine.Job) engine.Status { - if n := len(job.Args); n != 1 { - return job.Errorf("Usage: %s TERM", job.Name) - } - var ( - term = job.Args[0] - metaHeaders = map[string][]string{} - authConfig = ®istry.AuthConfig{} - ) - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("metaHeaders", metaHeaders) - - r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), registry.IndexServerAddress()) - if err != nil { - return job.Error(err) - } - results, err := r.SearchRepositories(term) - if err != nil { - return job.Error(err) - } - outs := engine.NewTable("star_count", 0) - for _, result := range results.Results { - out := &engine.Env{} - out.Import(result) - outs.Add(out) - } - outs.ReverseSort() - if _, err := outs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) ImageInsert(job *engine.Job) engine.Status { - if len(job.Args) != 3 { - return job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name) - } - - var ( - name = job.Args[0] - url = job.Args[1] - path = job.Args[2] - ) - - sf := utils.NewStreamFormatter(job.GetenvBool("json")) - - out := utils.NewWriteFlusher(job.Stdout) - img, err := srv.runtime.Repositories().LookupImage(name) - if err != nil { - return job.Error(err) - } - - file, err := utils.Download(url) - if err != nil { - return job.Error(err) - } - defer file.Body.Close() - - config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.SystemConfig()) - if err != nil { - return job.Error(err) - } - - c, _, err := srv.runtime.Create(config, "") - if err != nil { - return job.Error(err) - } - - if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, utils.TruncateID(img.ID), "Downloading"), path); err != nil { - return job.Error(err) - } - // FIXME: Handle custom repo, tag comment, author - img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil) - if err != nil { - out.Write(sf.FormatError(err)) - return engine.StatusErr - } - out.Write(sf.FormatStatus("", img.ID)) - return engine.StatusOK -} - -func (srv *Server) ImagesViz(job *engine.Job) engine.Status { - images, _ := srv.runtime.Graph().Map() - if images == nil { - return engine.StatusOK - } - job.Stdout.Write([]byte("digraph docker {\n")) - - var ( - parentImage *image.Image - err error - ) - for _, image := range images { - parentImage, err = image.GetParent() - if err != nil { - return job.Errorf("Error while getting parent image: %v", err) - } - if parentImage != nil { - job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) - } else { - job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) - } - } - - reporefs := make(map[string][]string) - - for name, repository := range srv.runtime.Repositories().Repositories { - for tag, id := range repository { - reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag)) - } - } - - for id, repos := range reporefs { - job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) - } - job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) - return engine.StatusOK -} - -func (srv *Server) Images(job *engine.Job) engine.Status { - var ( - allImages map[string]*image.Image - err error - ) - if job.GetenvBool("all") { - allImages, err = srv.runtime.Graph().Map() - } else { - allImages, err = srv.runtime.Graph().Heads() - } - if err != nil { - return job.Error(err) - } - lookup := make(map[string]*engine.Env) - for name, repository := range srv.runtime.Repositories().Repositories { - if job.Getenv("filter") != "" { - if match, _ := path.Match(job.Getenv("filter"), name); !match { - continue - } - } - for tag, id := range repository { - image, err := srv.runtime.Graph().Get(id) - if err != nil { - log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err) - continue - } - - if out, exists := lookup[id]; exists { - out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag))) - } else { - out := &engine.Env{} - delete(allImages, id) - out.Set("ParentId", image.Parent) - out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)}) - out.Set("Id", image.ID) - out.SetInt64("Created", image.Created.Unix()) - out.SetInt64("Size", image.Size) - out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) - lookup[id] = out - } - - } - } - - outs := engine.NewTable("Created", len(lookup)) - for _, value := range lookup { - outs.Add(value) - } - - // Display images which aren't part of a repository/tag - if job.Getenv("filter") == "" { - for _, image := range allImages { - out := &engine.Env{} - out.Set("ParentId", image.Parent) - out.SetList("RepoTags", []string{":"}) - out.Set("Id", image.ID) - out.SetInt64("Created", image.Created.Unix()) - out.SetInt64("Size", image.Size) - out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) - outs.Add(out) - } - } - - outs.ReverseSort() - if _, err := outs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) DockerInfo(job *engine.Job) engine.Status { - images, _ := srv.runtime.Graph().Map() - var imgcount int - if images == nil { - imgcount = 0 - } else { - imgcount = len(images) - } - kernelVersion := "" - if kv, err := utils.GetKernelVersion(); err == nil { - kernelVersion = kv.String() - } - - // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) - initPath := utils.DockerInitPath("") - if initPath == "" { - // if that fails, we'll just return the path from the runtime - initPath = srv.runtime.SystemInitPath() - } - - v := &engine.Env{} - v.SetInt("Containers", len(srv.runtime.List())) - v.SetInt("Images", imgcount) - v.Set("Driver", srv.runtime.GraphDriver().String()) - v.SetJson("DriverStatus", srv.runtime.GraphDriver().Status()) - v.SetBool("MemoryLimit", srv.runtime.SystemConfig().MemoryLimit) - v.SetBool("SwapLimit", srv.runtime.SystemConfig().SwapLimit) - v.SetBool("IPv4Forwarding", !srv.runtime.SystemConfig().IPv4ForwardingDisabled) - v.SetBool("Debug", os.Getenv("DEBUG") != "") - v.SetInt("NFd", utils.GetTotalUsedFds()) - v.SetInt("NGoroutines", goruntime.NumGoroutine()) - v.Set("ExecutionDriver", srv.runtime.ExecutionDriver().Name()) - v.SetInt("NEventsListener", len(srv.listeners)) - v.Set("KernelVersion", kernelVersion) - v.Set("IndexServerAddress", registry.IndexServerAddress()) - v.Set("InitSha1", dockerversion.INITSHA1) - v.Set("InitPath", initPath) - if _, err := v.WriteTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) DockerVersion(job *engine.Job) engine.Status { - v := &engine.Env{} - v.Set("Version", dockerversion.VERSION) - v.Set("GitCommit", dockerversion.GITCOMMIT) - v.Set("GoVersion", goruntime.Version()) - v.Set("Os", goruntime.GOOS) - v.Set("Arch", goruntime.GOARCH) - if kernelVersion, err := utils.GetKernelVersion(); err == nil { - v.Set("KernelVersion", kernelVersion.String()) - } - if _, err := v.WriteTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) ImageHistory(job *engine.Job) engine.Status { - if n := len(job.Args); n != 1 { - return job.Errorf("Usage: %s IMAGE", job.Name) - } - name := job.Args[0] - foundImage, err := srv.runtime.Repositories().LookupImage(name) - if err != nil { - return job.Error(err) - } - - lookupMap := make(map[string][]string) - for name, repository := range srv.runtime.Repositories().Repositories { - for tag, id := range repository { - // If the ID already has a reverse lookup, do not update it unless for "latest" - if _, exists := lookupMap[id]; !exists { - lookupMap[id] = []string{} - } - lookupMap[id] = append(lookupMap[id], name+":"+tag) - } - } - - outs := engine.NewTable("Created", 0) - err = foundImage.WalkHistory(func(img *image.Image) error { - out := &engine.Env{} - out.Set("Id", img.ID) - out.SetInt64("Created", img.Created.Unix()) - out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " ")) - out.SetList("Tags", lookupMap[img.ID]) - out.SetInt64("Size", img.Size) - outs.Add(out) - return nil - }) - outs.ReverseSort() - if _, err := outs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) ContainerTop(job *engine.Job) engine.Status { - if len(job.Args) != 1 && len(job.Args) != 2 { - return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name) - } - var ( - name = job.Args[0] - psArgs = "-ef" - ) - - if len(job.Args) == 2 && job.Args[1] != "" { - psArgs = job.Args[1] - } - - if container := srv.runtime.Get(name); container != nil { - if !container.State.IsRunning() { - return job.Errorf("Container %s is not running", name) - } - pids, err := srv.runtime.ExecutionDriver().GetPidsForContainer(container.ID) - if err != nil { - return job.Error(err) - } - output, err := exec.Command("ps", psArgs).Output() - if err != nil { - return job.Errorf("Error running ps: %s", err) - } - - lines := strings.Split(string(output), "\n") - header := strings.Fields(lines[0]) - out := &engine.Env{} - out.SetList("Titles", header) - - pidIndex := -1 - for i, name := range header { - if name == "PID" { - pidIndex = i - } - } - if pidIndex == -1 { - return job.Errorf("Couldn't find PID field in ps output") - } - - processes := [][]string{} - for _, line := range lines[1:] { - if len(line) == 0 { - continue - } - fields := strings.Fields(line) - p, err := strconv.Atoi(fields[pidIndex]) - if err != nil { - return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) - } - - for _, pid := range pids { - if pid == p { - // Make sure number of fields equals number of header titles - // merging "overhanging" fields - process := fields[:len(header)-1] - process = append(process, strings.Join(fields[len(header)-1:], " ")) - processes = append(processes, process) - } - } - } - out.SetJson("Processes", processes) - out.WriteTo(job.Stdout) - return engine.StatusOK - - } - return job.Errorf("No such container: %s", name) -} - -func (srv *Server) ContainerChanges(job *engine.Job) engine.Status { - if n := len(job.Args); n != 1 { - return job.Errorf("Usage: %s CONTAINER", job.Name) - } - name := job.Args[0] - if container := srv.runtime.Get(name); container != nil { - outs := engine.NewTable("", 0) - changes, err := container.Changes() - if err != nil { - return job.Error(err) - } - for _, change := range changes { - out := &engine.Env{} - if err := out.Import(change); err != nil { - return job.Error(err) - } - outs.Add(out) - } - if _, err := outs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - } else { - return job.Errorf("No such container: %s", name) - } - return engine.StatusOK -} - -func (srv *Server) Containers(job *engine.Job) engine.Status { - var ( - foundBefore bool - displayed int - all = job.GetenvBool("all") - since = job.Getenv("since") - before = job.Getenv("before") - n = job.GetenvInt("limit") - size = job.GetenvBool("size") - ) - outs := engine.NewTable("Created", 0) - - names := map[string][]string{} - srv.runtime.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error { - names[e.ID()] = append(names[e.ID()], p) - return nil - }, -1) - - for _, container := range srv.runtime.List() { - if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" { - continue - } - if before != "" && !foundBefore { - if container.ID == before || utils.TruncateID(container.ID) == before { - foundBefore = true - } - continue - } - if n > 0 && displayed == n { - break - } - if container.ID == since || utils.TruncateID(container.ID) == since { - break - } - displayed++ - out := &engine.Env{} - out.Set("Id", container.ID) - out.SetList("Names", names[container.ID]) - out.Set("Image", srv.runtime.Repositories().ImageName(container.Image)) - if len(container.Args) > 0 { - out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, strings.Join(container.Args, " "))) - } else { - out.Set("Command", fmt.Sprintf("\"%s\"", container.Path)) - } - out.SetInt64("Created", container.Created.Unix()) - out.Set("Status", container.State.String()) - str, err := container.NetworkSettings.PortMappingAPI().ToListString() - if err != nil { - return job.Error(err) - } - out.Set("Ports", str) - if size { - sizeRw, sizeRootFs := container.GetSize() - out.SetInt64("SizeRw", sizeRw) - out.SetInt64("SizeRootFs", sizeRootFs) - } - outs.Add(out) - } - outs.ReverseSort() - if _, err := outs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) ContainerCommit(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) - } - name := job.Args[0] - - container := srv.runtime.Get(name) - if container == nil { - return job.Errorf("No such container: %s", name) - } - var config = container.Config - var newConfig runconfig.Config - if err := job.GetenvJson("config", &newConfig); err != nil { - return job.Error(err) - } - - if err := runconfig.Merge(&newConfig, config); err != nil { - return job.Error(err) - } - - img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig) - if err != nil { - return job.Error(err) - } - job.Printf("%s\n", img.ID) - return engine.StatusOK -} - -func (srv *Server) ImageTag(job *engine.Job) engine.Status { - if len(job.Args) != 2 && len(job.Args) != 3 { - return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) - } - var tag string - if len(job.Args) == 3 { - tag = job.Args[2] - } - if err := srv.runtime.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error { - history, err := r.GetRemoteHistory(imgID, endpoint, token) - if err != nil { - return err - } - out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil)) - // FIXME: Try to stream the images? - // FIXME: Launch the getRemoteImage() in goroutines - - for i := len(history) - 1; i >= 0; i-- { - id := history[i] - - // ensure no two downloads of the same layer happen at the same time - if c, err := srv.poolAdd("pull", "layer:"+id); err != nil { - utils.Errorf("Image (id: %s) pull is already running, skipping: %v", id, err) - <-c - } - defer srv.poolRemove("pull", "layer:"+id) - - if !srv.runtime.Graph().Exists(id) { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) - var ( - imgJSON []byte - imgSize int - err error - img *image.Image - ) - retries := 5 - for j := 1; j <= retries; j++ { - imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) - if err != nil && j == retries { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) - return err - } else if err != nil { - time.Sleep(time.Duration(j) * 500 * time.Millisecond) - continue - } - img, err = image.NewImgJSON(imgJSON) - if err != nil && j == retries { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) - return fmt.Errorf("Failed to parse json: %s", err) - } else if err != nil { - time.Sleep(time.Duration(j) * 500 * time.Millisecond) - continue - } else { - break - } - } - - // Get the layer - out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling fs layer", nil)) - layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token) - if err != nil { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) - return err - } - defer layer.Close() - if err := srv.runtime.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil { - out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) - return err - } - } - out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil)) - - } - return nil -} - -func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool) error { - out.Write(sf.FormatStatus("", "Pulling repository %s", localName)) - - repoData, err := r.GetRepositoryData(remoteName) - if err != nil { - return err - } - - utils.Debugf("Retrieving the tag list") - tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens) - if err != nil { - utils.Errorf("%v", err) - return err - } - - for tag, id := range tagsList { - repoData.ImgList[id] = ®istry.ImgData{ - ID: id, - Tag: tag, - Checksum: "", - } - } - - utils.Debugf("Registering tags") - // If no tag has been specified, pull them all - if askedTag == "" { - for tag, id := range tagsList { - repoData.ImgList[id].Tag = tag - } - } else { - // Otherwise, check that the tag exists and use only that one - id, exists := tagsList[askedTag] - if !exists { - return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName) - } - repoData.ImgList[id].Tag = askedTag - } - - errors := make(chan error) - for _, image := range repoData.ImgList { - downloadImage := func(img *registry.ImgData) { - if askedTag != "" && img.Tag != askedTag { - utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID) - if parallel { - errors <- nil - } - return - } - - if img.Tag == "" { - utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) - if parallel { - errors <- nil - } - return - } - - // ensure no two downloads of the same image happen at the same time - if c, err := srv.poolAdd("pull", "img:"+img.ID); err != nil { - if c != nil { - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) - <-c - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) - } else { - utils.Errorf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) - } - if parallel { - errors <- nil - } - return - } - defer srv.poolRemove("pull", "img:"+img.ID) - - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil)) - success := false - var lastErr error - for _, ep := range repoData.Endpoints { - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil)) - if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { - // Its not ideal that only the last error is returned, it would be better to concatenate the errors. - // As the error is also given to the output stream the user will see the error. - lastErr = err - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil)) - continue - } - success = true - break - } - if !success { - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, %s", img.Tag, localName, lastErr), nil)) - if parallel { - errors <- fmt.Errorf("Could not find repository on any of the indexed registries.") - return - } - } - out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) - - if parallel { - errors <- nil - } - } - - if parallel { - go downloadImage(image) - } else { - downloadImage(image) - } - } - if parallel { - var lastError error - for i := 0; i < len(repoData.ImgList); i++ { - if err := <-errors; err != nil { - lastError = err - } - } - if lastError != nil { - return lastError - } - - } - for tag, id := range tagsList { - if askedTag != "" && tag != askedTag { - continue - } - if err := srv.runtime.Repositories().Set(localName, tag, id, true); err != nil { - return err - } - } - if err := srv.runtime.Repositories().Save(); err != nil { - return err - } - - return nil -} - -func (srv *Server) poolAdd(kind, key string) (chan struct{}, error) { - srv.Lock() - defer srv.Unlock() - - if c, exists := srv.pullingPool[key]; exists { - return c, fmt.Errorf("pull %s is already in progress", key) - } - if c, exists := srv.pushingPool[key]; exists { - return c, fmt.Errorf("push %s is already in progress", key) - } - - c := make(chan struct{}) - switch kind { - case "pull": - srv.pullingPool[key] = c - case "push": - srv.pushingPool[key] = c - default: - return nil, fmt.Errorf("Unknown pool type") - } - return c, nil -} - -func (srv *Server) poolRemove(kind, key string) error { - srv.Lock() - defer srv.Unlock() - switch kind { - case "pull": - if c, exists := srv.pullingPool[key]; exists { - close(c) - delete(srv.pullingPool, key) - } - case "push": - if c, exists := srv.pushingPool[key]; exists { - close(c) - delete(srv.pushingPool, key) - } - default: - return fmt.Errorf("Unknown pool type") - } - return nil -} - -func (srv *Server) ImagePull(job *engine.Job) engine.Status { - if n := len(job.Args); n != 1 && n != 2 { - return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) - } - var ( - localName = job.Args[0] - tag string - sf = utils.NewStreamFormatter(job.GetenvBool("json")) - authConfig = ®istry.AuthConfig{} - metaHeaders map[string][]string - ) - if len(job.Args) > 1 { - tag = job.Args[1] - } - - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("metaHeaders", metaHeaders) - - c, err := srv.poolAdd("pull", localName+":"+tag) - if err != nil { - if c != nil { - // Another pull of the same repository is already taking place; just wait for it to finish - job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) - <-c - return engine.StatusOK - } - return job.Error(err) - } - defer srv.poolRemove("pull", localName+":"+tag) - - // Resolve the Repository name from fqn to endpoint + name - hostname, remoteName, err := registry.ResolveRepositoryName(localName) - if err != nil { - return job.Error(err) - } - - endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) - if err != nil { - return job.Error(err) - } - - r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) - if err != nil { - return job.Error(err) - } - - if endpoint == registry.IndexServerAddress() { - // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" - localName = remoteName - } - - if err = srv.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil { - return job.Error(err) - } - - return engine.StatusOK -} - -// Retrieve the all the images to be uploaded in the correct order -func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[string][]string, error) { - var ( - imageList []string - imagesSeen map[string]bool = make(map[string]bool) - tagsByImage map[string][]string = make(map[string][]string) - ) - - for tag, id := range localRepo { - var imageListForThisTag []string - - tagsByImage[id] = append(tagsByImage[id], tag) - - for img, err := srv.runtime.Graph().Get(id); img != nil; img, err = img.GetParent() { - if err != nil { - return nil, nil, err - } - - if imagesSeen[img.ID] { - // This image is already on the list, we can ignore it and all its parents - break - } - - imagesSeen[img.ID] = true - imageListForThisTag = append(imageListForThisTag, img.ID) - } - - // reverse the image list for this tag (so the "most"-parent image is first) - for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 { - imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] - } - - // append to main image list - imageList = append(imageList, imageListForThisTag...) - } - - utils.Debugf("Image list: %v", imageList) - utils.Debugf("Tags by image: %v", tagsByImage) - - return imageList, tagsByImage, nil -} - -func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, sf *utils.StreamFormatter) error { - out = utils.NewWriteFlusher(out) - utils.Debugf("Local repo: %s", localRepo) - imgList, tagsByImage, err := srv.getImageList(localRepo) - if err != nil { - return err - } - - out.Write(sf.FormatStatus("", "Sending image list")) - - var repoData *registry.RepositoryData - var imageIndex []*registry.ImgData - - for _, imgId := range imgList { - if tags, exists := tagsByImage[imgId]; exists { - // If an image has tags you must add an entry in the image index - // for each tag - for _, tag := range tags { - imageIndex = append(imageIndex, ®istry.ImgData{ - ID: imgId, - Tag: tag, - }) - } - } else { - // If the image does not have a tag it still needs to be sent to the - // registry with an empty tag so that it is accociated with the repository - imageIndex = append(imageIndex, ®istry.ImgData{ - ID: imgId, - Tag: "", - }) - - } - } - - utils.Debugf("Preparing to push %s with the following images and tags\n", localRepo) - for _, data := range imageIndex { - utils.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag) - } - - // Register all the images in a repository with the registry - // If an image is not in this list it will not be associated with the repository - repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil) - if err != nil { - return err - } - - for _, ep := range repoData.Endpoints { - out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo))) - - for _, imgId := range imgList { - if r.LookupRemoteImage(imgId, ep, repoData.Tokens) { - out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId))) - } else { - if _, err := srv.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil { - // FIXME: Continue on error? - return err - } - } - - for _, tag := range tagsByImage[imgId] { - out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag)) - - if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil { - return err - } - } - } - } - - if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil { - return err - } - - return nil -} - -func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { - out = utils.NewWriteFlusher(out) - jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.Graph().Root, imgID, "json")) - if err != nil { - return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) - } - out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil)) - - imgData := ®istry.ImgData{ - ID: imgID, - } - - // Send the json - if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { - if err == registry.ErrAlreadyExists { - out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) - return "", nil - } - return "", err - } - - layerData, err := srv.runtime.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out) - if err != nil { - return "", fmt.Errorf("Failed to generate layer archive: %s", err) - } - defer os.RemoveAll(layerData.Name()) - - // Send the layer - checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) - if err != nil { - return "", err - } - imgData.Checksum = checksum - imgData.ChecksumPayload = checksumPayload - // Send the checksum - if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { - return "", err - } - - out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil)) - return imgData.Checksum, nil -} - -// FIXME: Allow to interrupt current push when new push of same image is done. -func (srv *Server) ImagePush(job *engine.Job) engine.Status { - if n := len(job.Args); n != 1 { - return job.Errorf("Usage: %s IMAGE", job.Name) - } - var ( - localName = job.Args[0] - sf = utils.NewStreamFormatter(job.GetenvBool("json")) - authConfig = ®istry.AuthConfig{} - metaHeaders map[string][]string - ) - - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("metaHeaders", metaHeaders) - if _, err := srv.poolAdd("push", localName); err != nil { - return job.Error(err) - } - defer srv.poolRemove("push", localName) - - // Resolve the Repository name from fqn to endpoint + name - hostname, remoteName, err := registry.ResolveRepositoryName(localName) - if err != nil { - return job.Error(err) - } - - endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) - if err != nil { - return job.Error(err) - } - - img, err := srv.runtime.Graph().Get(localName) - r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) - if err2 != nil { - return job.Error(err2) - } - - if err != nil { - reposLen := len(srv.runtime.Repositories().Repositories[localName]) - job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) - // If it fails, try to get the repository - if localRepo, exists := srv.runtime.Repositories().Repositories[localName]; exists { - if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, sf); err != nil { - return job.Error(err) - } - return engine.StatusOK - } - return job.Error(err) - } - - var token []string - job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) - if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) ImageImport(job *engine.Job) engine.Status { - if n := len(job.Args); n != 2 && n != 3 { - return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) - } - var ( - src = job.Args[0] - repo = job.Args[1] - tag string - sf = utils.NewStreamFormatter(job.GetenvBool("json")) - archive archive.ArchiveReader - resp *http.Response - ) - if len(job.Args) > 2 { - tag = job.Args[2] - } - - if src == "-" { - archive = job.Stdin - } else { - u, err := url.Parse(src) - if err != nil { - return job.Error(err) - } - if u.Scheme == "" { - u.Scheme = "http" - u.Host = src - u.Path = "" - } - job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) - // Download with curl (pretty progress bar) - // If curl is not available, fallback to http.Get() - resp, err = utils.Download(u.String()) - if err != nil { - return job.Error(err) - } - progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") - defer progressReader.Close() - archive = progressReader - } - img, err := srv.runtime.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil) - if err != nil { - return job.Error(err) - } - // Optionally register the image at REPO/TAG - if repo != "" { - if err := srv.runtime.Repositories().Set(repo, tag, img.ID, true); err != nil { - return job.Error(err) - } - } - job.Stdout.Write(sf.FormatStatus("", img.ID)) - return engine.StatusOK -} - -func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { - var name string - if len(job.Args) == 1 { - name = job.Args[0] - } else if len(job.Args) > 1 { - return job.Errorf("Usage: %s", job.Name) - } - config := runconfig.ContainerConfigFromJob(job) - if config.Memory != 0 && config.Memory < 524288 { - return job.Errorf("Minimum memory limit allowed is 512k") - } - if config.Memory > 0 && !srv.runtime.SystemConfig().MemoryLimit { - job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") - config.Memory = 0 - } - if config.Memory > 0 && !srv.runtime.SystemConfig().SwapLimit { - job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") - config.MemorySwap = -1 - } - resolvConf, err := utils.GetResolvConf() - if err != nil { - return job.Error(err) - } - if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.Config().Dns) == 0 && utils.CheckLocalDns(resolvConf) { - job.Errorf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", runtime.DefaultDns) - config.Dns = runtime.DefaultDns - } - - container, buildWarnings, err := srv.runtime.Create(config, name) - if err != nil { - if srv.runtime.Graph().IsNotExist(err) { - _, tag := utils.ParseRepositoryTag(config.Image) - if tag == "" { - tag = graph.DEFAULTTAG - } - return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) - } - return job.Error(err) - } - if !container.Config.NetworkDisabled && srv.runtime.SystemConfig().IPv4ForwardingDisabled { - job.Errorf("IPv4 forwarding is disabled.\n") - } - srv.LogEvent("create", container.ID, srv.runtime.Repositories().ImageName(container.Image)) - // FIXME: this is necessary because runtime.Create might return a nil container - // with a non-nil error. This should not happen! Once it's fixed we - // can remove this workaround. - if container != nil { - job.Printf("%s\n", container.ID) - } - for _, warning := range buildWarnings { - job.Errorf("%s\n", warning) - } - return engine.StatusOK -} - -func (srv *Server) ContainerRestart(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s CONTAINER\n", job.Name) - } - var ( - name = job.Args[0] - t = 10 - ) - if job.EnvExists("t") { - t = job.GetenvInt("t") - } - if container := srv.runtime.Get(name); container != nil { - if err := container.Restart(int(t)); err != nil { - return job.Errorf("Cannot restart container %s: %s\n", name, err) - } - srv.LogEvent("restart", container.ID, srv.runtime.Repositories().ImageName(container.Image)) - } else { - return job.Errorf("No such container: %s\n", name) - } - return engine.StatusOK -} - -func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) - } - name := job.Args[0] - removeVolume := job.GetenvBool("removeVolume") - removeLink := job.GetenvBool("removeLink") - forceRemove := job.GetenvBool("forceRemove") - - container := srv.runtime.Get(name) - - if removeLink { - if container == nil { - return job.Errorf("No such link: %s", name) - } - name, err := runtime.GetFullContainerName(name) - if err != nil { - job.Error(err) - } - parent, n := path.Split(name) - if parent == "/" { - return job.Errorf("Conflict, cannot remove the default name of the container") - } - pe := srv.runtime.ContainerGraph().Get(parent) - if pe == nil { - return job.Errorf("Cannot get parent %s for name %s", parent, name) - } - parentContainer := srv.runtime.Get(pe.ID()) - - if parentContainer != nil { - parentContainer.DisableLink(n) - } - - if err := srv.runtime.ContainerGraph().Delete(name); err != nil { - return job.Error(err) - } - return engine.StatusOK - } - - if container != nil { - if container.State.IsRunning() { - if forceRemove { - if err := container.Stop(5); err != nil { - return job.Errorf("Could not stop running container, cannot remove - %v", err) - } - } else { - return job.Errorf("Impossible to remove a running container, please stop it first or use -f") - } - } - if err := srv.runtime.Destroy(container); err != nil { - return job.Errorf("Cannot destroy container %s: %s", name, err) - } - srv.LogEvent("destroy", container.ID, srv.runtime.Repositories().ImageName(container.Image)) - - if removeVolume { - var ( - volumes = make(map[string]struct{}) - binds = make(map[string]struct{}) - usedVolumes = make(map[string]*runtime.Container) - ) - - // the volume id is always the base of the path - getVolumeId := func(p string) string { - return filepath.Base(strings.TrimSuffix(p, "/layer")) - } - - // populate bind map so that they can be skipped and not removed - for _, bind := range container.HostConfig().Binds { - source := strings.Split(bind, ":")[0] - // TODO: refactor all volume stuff, all of it - // this is very important that we eval the link - // or comparing the keys to container.Volumes will not work - p, err := filepath.EvalSymlinks(source) - if err != nil { - return job.Error(err) - } - source = p - binds[source] = struct{}{} - } - - // Store all the deleted containers volumes - for _, volumeId := range container.Volumes { - // Skip the volumes mounted from external - // bind mounts here will will be evaluated for a symlink - if _, exists := binds[volumeId]; exists { - continue - } - - volumeId = getVolumeId(volumeId) - volumes[volumeId] = struct{}{} - } - - // Retrieve all volumes from all remaining containers - for _, container := range srv.runtime.List() { - for _, containerVolumeId := range container.Volumes { - containerVolumeId = getVolumeId(containerVolumeId) - usedVolumes[containerVolumeId] = container - } - } - - for volumeId := range volumes { - // If the requested volu - if c, exists := usedVolumes[volumeId]; exists { - log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID) - continue - } - if err := srv.runtime.Volumes().Delete(volumeId); err != nil { - return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err) - } - } - } - } else { - return job.Errorf("No such container: %s", name) - } - return engine.StatusOK -} - -func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force bool) error { - var ( - repoName, tag string - tags = []string{} - ) - - repoName, tag = utils.ParseRepositoryTag(name) - if tag == "" { - tag = graph.DEFAULTTAG - } - - img, err := srv.runtime.Repositories().LookupImage(name) - if err != nil { - if r, _ := srv.runtime.Repositories().Get(repoName); r != nil { - return fmt.Errorf("No such image: %s:%s", repoName, tag) - } - return fmt.Errorf("No such image: %s", name) - } - - if strings.Contains(img.ID, name) { - repoName = "" - tag = "" - } - - byParents, err := srv.runtime.Graph().ByParent() - if err != nil { - return err - } - - //If delete by id, see if the id belong only to one repository - if repoName == "" { - for _, repoAndTag := range srv.runtime.Repositories().ByID()[img.ID] { - parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag) - if repoName == "" || repoName == parsedRepo { - repoName = parsedRepo - if parsedTag != "" { - tags = append(tags, parsedTag) - } - } else if repoName != parsedRepo && !force { - // the id belongs to multiple repos, like base:latest and user:test, - // in that case return conflict - return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name) - } - } - } else { - tags = append(tags, tag) - } - - if !first && len(tags) > 0 { - return nil - } - - //Untag the current image - for _, tag := range tags { - tagDeleted, err := srv.runtime.Repositories().Delete(repoName, tag) - if err != nil { - return err - } - if tagDeleted { - out := &engine.Env{} - out.Set("Untagged", repoName+":"+tag) - imgs.Add(out) - srv.LogEvent("untag", img.ID, "") - } - } - tags = srv.runtime.Repositories().ByID()[img.ID] - if (len(tags) <= 1 && repoName == "") || len(tags) == 0 { - if len(byParents[img.ID]) == 0 { - if err := srv.canDeleteImage(img.ID); err != nil { - return err - } - if err := srv.runtime.Repositories().DeleteAll(img.ID); err != nil { - return err - } - if err := srv.runtime.Graph().Delete(img.ID); err != nil { - return err - } - out := &engine.Env{} - out.Set("Deleted", img.ID) - imgs.Add(out) - srv.LogEvent("delete", img.ID, "") - if img.Parent != "" { - err := srv.DeleteImage(img.Parent, imgs, false, force) - if first { - return err - } - - } - - } - } - return nil -} - -func (srv *Server) ImageDelete(job *engine.Job) engine.Status { - if n := len(job.Args); n != 1 { - return job.Errorf("Usage: %s IMAGE", job.Name) - } - imgs := engine.NewTable("", 0) - if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force")); err != nil { - return job.Error(err) - } - if len(imgs.Data) == 0 { - return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) - } - if _, err := imgs.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK -} - -func (srv *Server) canDeleteImage(imgID string) error { - for _, container := range srv.runtime.List() { - parent, err := srv.runtime.Repositories().LookupImage(container.Image) - if err != nil { - return err - } - - if err := parent.WalkHistory(func(p *image.Image) error { - if imgID == p.ID { - return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it", utils.TruncateID(imgID), utils.TruncateID(container.ID)) - } - return nil - }); err != nil { - return err - } - } - return nil -} - -func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) { - // Retrieve all images - images, err := srv.runtime.Graph().Map() - if err != nil { - return nil, err - } - - // Store the tree in a map of map (map[parentId][childId]) - imageMap := make(map[string]map[string]struct{}) - for _, img := range images { - if _, exists := imageMap[img.Parent]; !exists { - imageMap[img.Parent] = make(map[string]struct{}) - } - imageMap[img.Parent][img.ID] = struct{}{} - } - - // Loop on the children of the given image and check the config - var match *image.Image - for elem := range imageMap[imgID] { - img, err := srv.runtime.Graph().Get(elem) - if err != nil { - return nil, err - } - if runconfig.Compare(&img.ContainerConfig, config) { - if match == nil || match.Created.Before(img.Created) { - match = img - } - } - } - return match, nil -} - -func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runconfig.HostConfig) error { - runtime := srv.runtime - - if hostConfig != nil && hostConfig.Links != nil { - for _, l := range hostConfig.Links { - parts, err := utils.PartParser("name:alias", l) - if err != nil { - return err - } - child, err := srv.runtime.GetByName(parts["name"]) - if err != nil { - return err - } - if child == nil { - return fmt.Errorf("Could not get container for %s", parts["name"]) - } - if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil { - return err - } - } - - // After we load all the links into the runtime - // set them to nil on the hostconfig - hostConfig.Links = nil - if err := container.WriteHostConfig(); err != nil { - return err - } - } - return nil -} - -func (srv *Server) ContainerStart(job *engine.Job) engine.Status { - if len(job.Args) < 1 { - return job.Errorf("Usage: %s container_id", job.Name) - } - name := job.Args[0] - runtime := srv.runtime - container := runtime.Get(name) - - if container == nil { - return job.Errorf("No such container: %s", name) - } - // If no environment was set, then no hostconfig was passed. - if len(job.Environ()) > 0 { - hostConfig := runconfig.ContainerHostConfigFromJob(job) - // Validate the HostConfig binds. Make sure that: - // 1) the source of a bind mount isn't / - // The bind mount "/:/foo" isn't allowed. - // 2) Check that the source exists - // The source to be bind mounted must exist. - for _, bind := range hostConfig.Binds { - splitBind := strings.Split(bind, ":") - source := splitBind[0] - - // refuse to bind mount "/" to the container - if source == "/" { - return job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind) - } - - // ensure the source exists on the host - _, err := os.Stat(source) - if err != nil && os.IsNotExist(err) { - err = os.MkdirAll(source, 0755) - if err != nil { - return job.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error()) - } - } - } - // Register any links from the host config before starting the container - if err := srv.RegisterLinks(container, hostConfig); err != nil { - return job.Error(err) - } - container.SetHostConfig(hostConfig) - container.ToDisk() - } - if err := container.Start(); err != nil { - return job.Errorf("Cannot start container %s: %s", name, err) - } - srv.LogEvent("start", container.ID, runtime.Repositories().ImageName(container.Image)) - - return engine.StatusOK -} - -func (srv *Server) ContainerStop(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s CONTAINER\n", job.Name) - } - var ( - name = job.Args[0] - t = 10 - ) - if job.EnvExists("t") { - t = job.GetenvInt("t") - } - if container := srv.runtime.Get(name); container != nil { - if err := container.Stop(int(t)); err != nil { - return job.Errorf("Cannot stop container %s: %s\n", name, err) - } - srv.LogEvent("stop", container.ID, srv.runtime.Repositories().ImageName(container.Image)) - } else { - return job.Errorf("No such container: %s\n", name) - } - return engine.StatusOK -} - -func (srv *Server) ContainerWait(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s", job.Name) - } - name := job.Args[0] - if container := srv.runtime.Get(name); container != nil { - status := container.Wait() - job.Printf("%d\n", status) - return engine.StatusOK - } - return job.Errorf("%s: no such container: %s", job.Name, name) -} - -func (srv *Server) ContainerResize(job *engine.Job) engine.Status { - if len(job.Args) != 3 { - return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) - } - name := job.Args[0] - height, err := strconv.Atoi(job.Args[1]) - if err != nil { - return job.Error(err) - } - width, err := strconv.Atoi(job.Args[2]) - if err != nil { - return job.Error(err) - } - if container := srv.runtime.Get(name); container != nil { - if err := container.Resize(height, width); err != nil { - return job.Error(err) - } - return engine.StatusOK - } - return job.Errorf("No such container: %s", name) -} - -func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { - if len(job.Args) != 1 { - return job.Errorf("Usage: %s CONTAINER\n", job.Name) - } - - var ( - name = job.Args[0] - logs = job.GetenvBool("logs") - stream = job.GetenvBool("stream") - stdin = job.GetenvBool("stdin") - stdout = job.GetenvBool("stdout") - stderr = job.GetenvBool("stderr") - ) - - container := srv.runtime.Get(name) - if container == nil { - return job.Errorf("No such container: %s", name) - } - - //logs - if logs { - cLog, err := container.ReadLog("json") - if err != nil && os.IsNotExist(err) { - // Legacy logs - utils.Debugf("Old logs format") - if stdout { - cLog, err := container.ReadLog("stdout") - if err != nil { - utils.Errorf("Error reading logs (stdout): %s", err) - } else if _, err := io.Copy(job.Stdout, cLog); err != nil { - utils.Errorf("Error streaming logs (stdout): %s", err) - } - } - if stderr { - cLog, err := container.ReadLog("stderr") - if err != nil { - utils.Errorf("Error reading logs (stderr): %s", err) - } else if _, err := io.Copy(job.Stderr, cLog); err != nil { - utils.Errorf("Error streaming logs (stderr): %s", err) - } - } - } else if err != nil { - utils.Errorf("Error reading logs (json): %s", err) - } else { - dec := json.NewDecoder(cLog) - for { - l := &utils.JSONLog{} - - if err := dec.Decode(l); err == io.EOF { - break - } else if err != nil { - utils.Errorf("Error streaming logs: %s", err) - break - } - if l.Stream == "stdout" && stdout { - fmt.Fprintf(job.Stdout, "%s", l.Log) - } - if l.Stream == "stderr" && stderr { - fmt.Fprintf(job.Stderr, "%s", l.Log) - } - } - } - } - - //stream - if stream { - if container.State.IsGhost() { - return job.Errorf("Impossible to attach to a ghost container") - } - - var ( - cStdin io.ReadCloser - cStdout, cStderr io.Writer - cStdinCloser io.Closer - ) - - if stdin { - r, w := io.Pipe() - go func() { - defer w.Close() - defer utils.Debugf("Closing buffered stdin pipe") - io.Copy(w, job.Stdin) - }() - cStdin = r - cStdinCloser = job.Stdin - } - if stdout { - cStdout = job.Stdout - } - if stderr { - cStderr = job.Stderr - } - - <-container.Attach(cStdin, cStdinCloser, cStdout, cStderr) - - // If we are in stdinonce mode, wait for the process to end - // otherwise, simply return - if container.Config.StdinOnce && !container.Config.Tty { - container.Wait() - } - } - return engine.StatusOK -} - -func (srv *Server) ContainerInspect(name string) (*runtime.Container, error) { - if container := srv.runtime.Get(name); container != nil { - return container, nil - } - return nil, fmt.Errorf("No such container: %s", name) -} - -func (srv *Server) ImageInspect(name string) (*image.Image, error) { - if image, err := srv.runtime.Repositories().LookupImage(name); err == nil && image != nil { - return image, nil - } - return nil, fmt.Errorf("No such image: %s", name) -} - -func (srv *Server) JobInspect(job *engine.Job) engine.Status { - // TODO: deprecate KIND/conflict - if n := len(job.Args); n != 2 { - return job.Errorf("Usage: %s CONTAINER|IMAGE KIND", job.Name) - } - var ( - name = job.Args[0] - kind = job.Args[1] - object interface{} - conflict = job.GetenvBool("conflict") //should the job detect conflict between containers and images - image, errImage = srv.ImageInspect(name) - container, errContainer = srv.ContainerInspect(name) - ) - - if conflict && image != nil && container != nil { - return job.Errorf("Conflict between containers and images") - } - - switch kind { - case "image": - if errImage != nil { - return job.Error(errImage) - } - object = image - case "container": - if errContainer != nil { - return job.Error(errContainer) - } - object = &struct { - *runtime.Container - HostConfig *runconfig.HostConfig - }{container, container.HostConfig()} - default: - return job.Errorf("Unknown kind: %s", kind) - } - - b, err := json.Marshal(object) - if err != nil { - return job.Error(err) - } - job.Stdout.Write(b) - return engine.StatusOK -} - -func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { - if len(job.Args) != 2 { - return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) - } - - var ( - name = job.Args[0] - resource = job.Args[1] - ) - - if container := srv.runtime.Get(name); container != nil { - - data, err := container.Copy(resource) - if err != nil { - return job.Error(err) - } - defer data.Close() - - if _, err := io.Copy(job.Stdout, data); err != nil { - return job.Error(err) - } - return engine.StatusOK - } - return job.Errorf("No such container: %s", name) -} - -func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) { - runtime, err := runtime.NewRuntime(config, eng) - if err != nil { - return nil, err - } - srv := &Server{ - Eng: eng, - runtime: runtime, - pullingPool: make(map[string]chan struct{}), - pushingPool: make(map[string]chan struct{}), - events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events - listeners: make(map[string]chan utils.JSONMessage), - running: true, - } - runtime.SetServer(srv) - return srv, nil -} - -func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory { - httpVersion := make([]utils.VersionInfo, 0, 4) - httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION}) - httpVersion = append(httpVersion, &simpleVersionInfo{"go", goruntime.Version()}) - httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT}) - if kernelVersion, err := utils.GetKernelVersion(); err == nil { - httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()}) - } - httpVersion = append(httpVersion, &simpleVersionInfo{"os", goruntime.GOOS}) - httpVersion = append(httpVersion, &simpleVersionInfo{"arch", goruntime.GOARCH}) - ud := utils.NewHTTPUserAgentDecorator(httpVersion...) - md := &utils.HTTPMetaHeadersDecorator{ - Headers: metaHeaders, - } - factory := utils.NewHTTPRequestFactory(ud, md) - return factory -} - -func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage { - now := time.Now().UTC().Unix() - jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now} - srv.AddEvent(jm) - for _, c := range srv.listeners { - select { // non blocking channel - case c <- jm: - default: - } - } - return &jm -} - -func (srv *Server) AddEvent(jm utils.JSONMessage) { - srv.Lock() - defer srv.Unlock() - srv.events = append(srv.events, jm) -} - -func (srv *Server) GetEvents() []utils.JSONMessage { - srv.RLock() - defer srv.RUnlock() - return srv.events -} - -func (srv *Server) SetRunning(status bool) { - srv.Lock() - defer srv.Unlock() - - srv.running = status -} - -func (srv *Server) IsRunning() bool { - srv.RLock() - defer srv.RUnlock() - return srv.running -} - -func (srv *Server) Close() error { - if srv == nil { - return nil - } - srv.SetRunning(false) - if srv.runtime == nil { - return nil - } - return srv.runtime.Close() -} - -type Server struct { - sync.RWMutex - runtime *runtime.Runtime - pullingPool map[string]chan struct{} - pushingPool map[string]chan struct{} - events []utils.JSONMessage - listeners map[string]chan utils.JSONMessage - Eng *engine.Engine - running bool -} diff --git a/server/buildfile.go b/server/buildfile.go new file mode 100644 index 0000000000..af6702cc1d --- /dev/null +++ b/server/buildfile.go @@ -0,0 +1,799 @@ +package server + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/registry" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/runtime" + "github.com/dotcloud/docker/utils" + "io" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" +) + +var ( + ErrDockerfileEmpty = errors.New("Dockerfile cannot be empty") +) + +type BuildFile interface { + Build(io.Reader) (string, error) + CmdFrom(string) error + CmdRun(string) error +} + +type buildFile struct { + runtime *runtime.Runtime + srv *Server + + image string + maintainer string + config *runconfig.Config + + contextPath string + context *utils.TarSum + + verbose bool + utilizeCache bool + rm bool + + authConfig *registry.AuthConfig + configFile *registry.ConfigFile + + tmpContainers map[string]struct{} + tmpImages map[string]struct{} + + outStream io.Writer + errStream io.Writer + + // Deprecated, original writer used for ImagePull. To be removed. + outOld io.Writer + sf *utils.StreamFormatter +} + +func (b *buildFile) clearTmp(containers map[string]struct{}) { + for c := range containers { + tmp := b.runtime.Get(c) + if err := b.runtime.Destroy(tmp); err != nil { + fmt.Fprintf(b.outStream, "Error removing intermediate container %s: %s\n", utils.TruncateID(c), err.Error()) + } else { + fmt.Fprintf(b.outStream, "Removing intermediate container %s\n", utils.TruncateID(c)) + } + } +} + +func (b *buildFile) CmdFrom(name string) error { + image, err := b.runtime.Repositories().LookupImage(name) + if err != nil { + if b.runtime.Graph().IsNotExist(err) { + remote, tag := utils.ParseRepositoryTag(name) + pullRegistryAuth := b.authConfig + if len(b.configFile.Configs) > 0 { + // The request came with a full auth config file, we prefer to use that + endpoint, _, err := registry.ResolveRepositoryName(remote) + if err != nil { + return err + } + resolvedAuth := b.configFile.ResolveAuthConfig(endpoint) + pullRegistryAuth = &resolvedAuth + } + job := b.srv.Eng.Job("pull", remote, tag) + job.SetenvBool("json", b.sf.Json()) + job.SetenvBool("parallel", true) + job.SetenvJson("authConfig", pullRegistryAuth) + job.Stdout.Add(b.outOld) + if err := job.Run(); err != nil { + return err + } + image, err = b.runtime.Repositories().LookupImage(name) + if err != nil { + return err + } + } else { + return err + } + } + b.image = image.ID + b.config = &runconfig.Config{} + if image.Config != nil { + b.config = image.Config + } + if b.config.Env == nil || len(b.config.Env) == 0 { + b.config.Env = append(b.config.Env, "HOME=/", "PATH="+runtime.DefaultPathEnv) + } + // Process ONBUILD triggers if they exist + if nTriggers := len(b.config.OnBuild); nTriggers != 0 { + fmt.Fprintf(b.errStream, "# Executing %d build triggers\n", nTriggers) + } + for n, step := range b.config.OnBuild { + splitStep := strings.Split(step, " ") + stepInstruction := strings.ToUpper(strings.Trim(splitStep[0], " ")) + switch stepInstruction { + case "ONBUILD": + return fmt.Errorf("Source image contains forbidden chained `ONBUILD ONBUILD` trigger: %s", step) + case "MAINTAINER", "FROM": + return fmt.Errorf("Source image contains forbidden %s trigger: %s", stepInstruction, step) + } + if err := b.BuildStep(fmt.Sprintf("onbuild-%d", n), step); err != nil { + return err + } + } + b.config.OnBuild = []string{} + return nil +} + +// The ONBUILD command declares a build instruction to be executed in any future build +// using the current image as a base. +func (b *buildFile) CmdOnbuild(trigger string) error { + splitTrigger := strings.Split(trigger, " ") + triggerInstruction := strings.ToUpper(strings.Trim(splitTrigger[0], " ")) + switch triggerInstruction { + case "ONBUILD": + return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + b.config.OnBuild = append(b.config.OnBuild, trigger) + return b.commit("", b.config.Cmd, fmt.Sprintf("ONBUILD %s", trigger)) +} + +func (b *buildFile) CmdMaintainer(name string) error { + b.maintainer = name + return b.commit("", b.config.Cmd, fmt.Sprintf("MAINTAINER %s", name)) +} + +// probeCache checks to see if image-caching is enabled (`b.utilizeCache`) +// and if so attempts to look up the current `b.image` and `b.config` pair +// in the current server `b.srv`. If an image is found, probeCache returns +// `(true, nil)`. If no image is found, it returns `(false, nil)`. If there +// is any error, it returns `(false, err)`. +func (b *buildFile) probeCache() (bool, error) { + if b.utilizeCache { + if cache, err := b.srv.ImageGetCached(b.image, b.config); err != nil { + return false, err + } else if cache != nil { + fmt.Fprintf(b.outStream, " ---> Using cache\n") + utils.Debugf("[BUILDER] Use cached version") + b.image = cache.ID + return true, nil + } else { + utils.Debugf("[BUILDER] Cache miss") + } + } + return false, nil +} + +func (b *buildFile) CmdRun(args string) error { + if b.image == "" { + return fmt.Errorf("Please provide a source image with `from` prior to run") + } + config, _, _, err := runconfig.Parse(append([]string{b.image}, b.buildCmdFromJson(args)...), nil) + if err != nil { + return err + } + + cmd := b.config.Cmd + b.config.Cmd = nil + runconfig.Merge(b.config, config) + + defer func(cmd []string) { b.config.Cmd = cmd }(cmd) + + utils.Debugf("Command to be executed: %v", b.config.Cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + c, err := b.create() + if err != nil { + return err + } + // Ensure that we keep the container mounted until the commit + // to avoid unmounting and then mounting directly again + c.Mount() + defer c.Unmount() + + err = b.run(c) + if err != nil { + return err + } + if err := b.commit(c.ID, cmd, "run"); err != nil { + return err + } + + return nil +} + +func (b *buildFile) FindEnvKey(key string) int { + for k, envVar := range b.config.Env { + envParts := strings.SplitN(envVar, "=", 2) + if key == envParts[0] { + return k + } + } + return -1 +} + +func (b *buildFile) ReplaceEnvMatches(value string) (string, error) { + exp, err := regexp.Compile("(\\\\\\\\+|[^\\\\]|\\b|\\A)\\$({?)([[:alnum:]_]+)(}?)") + if err != nil { + return value, err + } + matches := exp.FindAllString(value, -1) + for _, match := range matches { + match = match[strings.Index(match, "$"):] + matchKey := strings.Trim(match, "${}") + + for _, envVar := range b.config.Env { + envParts := strings.SplitN(envVar, "=", 2) + envKey := envParts[0] + envValue := envParts[1] + + if envKey == matchKey { + value = strings.Replace(value, match, envValue, -1) + break + } + } + } + return value, nil +} + +func (b *buildFile) CmdEnv(args string) error { + tmp := strings.SplitN(args, " ", 2) + if len(tmp) != 2 { + return fmt.Errorf("Invalid ENV format") + } + key := strings.Trim(tmp[0], " \t") + value := strings.Trim(tmp[1], " \t") + + envKey := b.FindEnvKey(key) + replacedValue, err := b.ReplaceEnvMatches(value) + if err != nil { + return err + } + replacedVar := fmt.Sprintf("%s=%s", key, replacedValue) + + if envKey >= 0 { + b.config.Env[envKey] = replacedVar + } else { + b.config.Env = append(b.config.Env, replacedVar) + } + return b.commit("", b.config.Cmd, fmt.Sprintf("ENV %s", replacedVar)) +} + +func (b *buildFile) buildCmdFromJson(args string) []string { + var cmd []string + if err := json.Unmarshal([]byte(args), &cmd); err != nil { + utils.Debugf("Error unmarshalling: %s, setting to /bin/sh -c", err) + cmd = []string{"/bin/sh", "-c", args} + } + return cmd +} + +func (b *buildFile) CmdCmd(args string) error { + cmd := b.buildCmdFromJson(args) + b.config.Cmd = cmd + if err := b.commit("", b.config.Cmd, fmt.Sprintf("CMD %v", cmd)); err != nil { + return err + } + return nil +} + +func (b *buildFile) CmdEntrypoint(args string) error { + entrypoint := b.buildCmdFromJson(args) + b.config.Entrypoint = entrypoint + if err := b.commit("", b.config.Cmd, fmt.Sprintf("ENTRYPOINT %v", entrypoint)); err != nil { + return err + } + return nil +} + +func (b *buildFile) CmdExpose(args string) error { + portsTab := strings.Split(args, " ") + + if b.config.ExposedPorts == nil { + b.config.ExposedPorts = make(nat.PortSet) + } + ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...)) + if err != nil { + return err + } + for port := range ports { + if _, exists := b.config.ExposedPorts[port]; !exists { + b.config.ExposedPorts[port] = struct{}{} + } + } + b.config.PortSpecs = nil + + return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports)) +} + +func (b *buildFile) CmdUser(args string) error { + b.config.User = args + return b.commit("", b.config.Cmd, fmt.Sprintf("USER %v", args)) +} + +func (b *buildFile) CmdInsert(args string) error { + return fmt.Errorf("INSERT has been deprecated. Please use ADD instead") +} + +func (b *buildFile) CmdCopy(args string) error { + return fmt.Errorf("COPY has been deprecated. Please use ADD instead") +} + +func (b *buildFile) CmdWorkdir(workdir string) error { + b.config.WorkingDir = workdir + return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) +} + +func (b *buildFile) CmdVolume(args string) error { + if args == "" { + return fmt.Errorf("Volume cannot be empty") + } + + var volume []string + if err := json.Unmarshal([]byte(args), &volume); err != nil { + volume = []string{args} + } + if b.config.Volumes == nil { + b.config.Volumes = map[string]struct{}{} + } + for _, v := range volume { + b.config.Volumes[v] = struct{}{} + } + if err := b.commit("", b.config.Cmd, fmt.Sprintf("VOLUME %s", args)); err != nil { + return err + } + return nil +} + +func (b *buildFile) checkPathForAddition(orig string) error { + origPath := path.Join(b.contextPath, orig) + if p, err := filepath.EvalSymlinks(origPath); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } else { + origPath = p + } + if !strings.HasPrefix(origPath, b.contextPath) { + return fmt.Errorf("Forbidden path outside the build context: %s (%s)", orig, origPath) + } + _, err := os.Stat(origPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + return nil +} + +func (b *buildFile) addContext(container *runtime.Container, orig, dest string, remote bool) error { + var ( + origPath = path.Join(b.contextPath, orig) + destPath = path.Join(container.RootfsPath(), dest) + ) + // Preserve the trailing '/' + if strings.HasSuffix(dest, "/") { + destPath = destPath + "/" + } + fi, err := os.Stat(origPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("%s: no such file or directory", orig) + } + return err + } + + if fi.IsDir() { + if err := archive.CopyWithTar(origPath, destPath); err != nil { + return err + } + return nil + } + + // First try to unpack the source as an archive + // to support the untar feature we need to clean up the path a little bit + // because tar is very forgiving. First we need to strip off the archive's + // filename from the path but this is only added if it does not end in / . + tarDest := destPath + if strings.HasSuffix(tarDest, "/") { + tarDest = filepath.Dir(destPath) + } + + // If we are adding a remote file, do not try to untar it + if !remote { + // try to successfully untar the orig + if err := archive.UntarPath(origPath, tarDest); err == nil { + return nil + } + utils.Debugf("Couldn't untar %s to %s: %s", origPath, destPath, err) + } + + // If that fails, just copy it as a regular file + // but do not use all the magic path handling for the tar path + if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { + return err + } + if err := archive.CopyWithTar(origPath, destPath); err != nil { + return err + } + return nil +} + +func (b *buildFile) CmdAdd(args string) error { + if b.context == nil { + return fmt.Errorf("No context given. Impossible to use ADD") + } + tmp := strings.SplitN(args, " ", 2) + if len(tmp) != 2 { + return fmt.Errorf("Invalid ADD format") + } + + orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) + if err != nil { + return err + } + + dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t")) + if err != nil { + return err + } + + cmd := b.config.Cmd + b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)} + b.config.Image = b.image + + var ( + origPath = orig + destPath = dest + remoteHash string + isRemote bool + ) + + if utils.IsURL(orig) { + isRemote = true + resp, err := utils.Download(orig) + if err != nil { + return err + } + tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") + if err != nil { + return err + } + tmpFileName := path.Join(tmpDirName, "tmp") + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return err + } + defer os.RemoveAll(tmpDirName) + if _, err = io.Copy(tmpFile, resp.Body); err != nil { + tmpFile.Close() + return err + } + origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) + tmpFile.Close() + + // Process the checksum + r, err := archive.Tar(tmpFileName, archive.Uncompressed) + if err != nil { + return err + } + tarSum := utils.TarSum{Reader: r, DisableCompression: true} + remoteHash = tarSum.Sum(nil) + r.Close() + + // If the destination is a directory, figure out the filename. + if strings.HasSuffix(dest, "/") { + u, err := url.Parse(orig) + if err != nil { + return err + } + path := u.Path + if strings.HasSuffix(path, "/") { + path = path[:len(path)-1] + } + parts := strings.Split(path, "/") + filename := parts[len(parts)-1] + if filename == "" { + return fmt.Errorf("cannot determine filename from url: %s", u) + } + destPath = dest + filename + } + } + + if err := b.checkPathForAddition(origPath); err != nil { + return err + } + + // Hash path and check the cache + if b.utilizeCache { + var ( + hash string + sums = b.context.GetSums() + ) + + if remoteHash != "" { + hash = remoteHash + } else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil { + return err + } else if fi.IsDir() { + var subfiles []string + for file, sum := range sums { + absFile := path.Join(b.contextPath, file) + absOrigPath := path.Join(b.contextPath, origPath) + if strings.HasPrefix(absFile, absOrigPath) { + subfiles = append(subfiles, sum) + } + } + sort.Strings(subfiles) + hasher := sha256.New() + hasher.Write([]byte(strings.Join(subfiles, ","))) + hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) + } else { + if origPath[0] == '/' && len(origPath) > 1 { + origPath = origPath[1:] + } + origPath = strings.TrimPrefix(origPath, "./") + if h, ok := sums[origPath]; ok { + hash = "file:" + h + } + } + b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", hash, dest)} + hit, err := b.probeCache() + if err != nil { + return err + } + // If we do not have a hash, never use the cache + if hit && hash != "" { + return nil + } + } + + // Create the container and start it + container, _, err := b.runtime.Create(b.config, "") + if err != nil { + return err + } + b.tmpContainers[container.ID] = struct{}{} + + if err := container.Mount(); err != nil { + return err + } + defer container.Unmount() + + if err := b.addContext(container, origPath, destPath, isRemote); err != nil { + return err + } + + if err := b.commit(container.ID, cmd, fmt.Sprintf("ADD %s in %s", orig, dest)); err != nil { + return err + } + b.config.Cmd = cmd + return nil +} + +func (b *buildFile) create() (*runtime.Container, error) { + if b.image == "" { + return nil, fmt.Errorf("Please provide a source image with `from` prior to run") + } + b.config.Image = b.image + + // Create the container and start it + c, _, err := b.runtime.Create(b.config, "") + if err != nil { + return nil, err + } + b.tmpContainers[c.ID] = struct{}{} + fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(c.ID)) + + // override the entry point that may have been picked up from the base image + c.Path = b.config.Cmd[0] + c.Args = b.config.Cmd[1:] + + return c, nil +} + +func (b *buildFile) run(c *runtime.Container) error { + var errCh chan error + + if b.verbose { + errCh = utils.Go(func() error { + return <-c.Attach(nil, nil, b.outStream, b.errStream) + }) + } + + //start the container + if err := c.Start(); err != nil { + return err + } + + if errCh != nil { + if err := <-errCh; err != nil { + return err + } + } + + // Wait for it to finish + if ret := c.Wait(); ret != 0 { + err := &utils.JSONError{ + Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret), + Code: ret, + } + return err + } + + return nil +} + +// Commit the container with the autorun command +func (b *buildFile) commit(id string, autoCmd []string, comment string) error { + if b.image == "" { + return fmt.Errorf("Please provide a source image with `from` prior to commit") + } + b.config.Image = b.image + if id == "" { + cmd := b.config.Cmd + b.config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} + defer func(cmd []string) { b.config.Cmd = cmd }(cmd) + + hit, err := b.probeCache() + if err != nil { + return err + } + if hit { + return nil + } + + container, warnings, err := b.runtime.Create(b.config, "") + if err != nil { + return err + } + for _, warning := range warnings { + fmt.Fprintf(b.outStream, " ---> [Warning] %s\n", warning) + } + b.tmpContainers[container.ID] = struct{}{} + fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID)) + id = container.ID + + if err := container.Mount(); err != nil { + return err + } + defer container.Unmount() + } + container := b.runtime.Get(id) + if container == nil { + return fmt.Errorf("An error occured while creating the container") + } + + // Note: Actually copy the struct + autoConfig := *b.config + autoConfig.Cmd = autoCmd + // Commit the container + image, err := b.runtime.Commit(container, "", "", "", b.maintainer, &autoConfig) + if err != nil { + return err + } + b.tmpImages[image.ID] = struct{}{} + b.image = image.ID + return nil +} + +// Long lines can be split with a backslash +var lineContinuation = regexp.MustCompile(`\s*\\\s*\n`) + +func (b *buildFile) Build(context io.Reader) (string, error) { + tmpdirPath, err := ioutil.TempDir("", "docker-build") + if err != nil { + return "", err + } + + decompressedStream, err := archive.DecompressStream(context) + if err != nil { + return "", err + } + + b.context = &utils.TarSum{Reader: decompressedStream, DisableCompression: true} + if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { + return "", err + } + defer os.RemoveAll(tmpdirPath) + + b.contextPath = tmpdirPath + filename := path.Join(tmpdirPath, "Dockerfile") + if _, err := os.Stat(filename); os.IsNotExist(err) { + return "", fmt.Errorf("Can't build a directory with no Dockerfile") + } + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return "", err + } + if len(fileBytes) == 0 { + return "", ErrDockerfileEmpty + } + dockerfile := string(fileBytes) + dockerfile = lineContinuation.ReplaceAllString(dockerfile, "") + stepN := 0 + for _, line := range strings.Split(dockerfile, "\n") { + line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n") + // Skip comments and empty line + if len(line) == 0 || line[0] == '#' { + continue + } + if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil { + return "", err + } + stepN += 1 + + } + if b.image != "" { + fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image)) + if b.rm { + b.clearTmp(b.tmpContainers) + } + return b.image, nil + } + return "", fmt.Errorf("No image was generated. This may be because the Dockerfile does not, like, do anything.\n") +} + +// BuildStep parses a single build step from `instruction` and executes it in the current context. +func (b *buildFile) BuildStep(name, expression string) error { + fmt.Fprintf(b.outStream, "Step %s : %s\n", name, expression) + tmp := strings.SplitN(expression, " ", 2) + if len(tmp) != 2 { + return fmt.Errorf("Invalid Dockerfile format") + } + instruction := strings.ToLower(strings.Trim(tmp[0], " ")) + arguments := strings.Trim(tmp[1], " ") + + method, exists := reflect.TypeOf(b).MethodByName("Cmd" + strings.ToUpper(instruction[:1]) + strings.ToLower(instruction[1:])) + if !exists { + fmt.Fprintf(b.errStream, "# Skipping unknown instruction %s\n", strings.ToUpper(instruction)) + return nil + } + + ret := method.Func.Call([]reflect.Value{reflect.ValueOf(b), reflect.ValueOf(arguments)})[0].Interface() + if ret != nil { + return ret.(error) + } + + fmt.Fprintf(b.outStream, " ---> %s\n", utils.TruncateID(b.image)) + return nil +} + +func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile { + return &buildFile{ + runtime: srv.runtime, + srv: srv, + config: &runconfig.Config{}, + outStream: outStream, + errStream: errStream, + tmpContainers: make(map[string]struct{}), + tmpImages: make(map[string]struct{}), + verbose: verbose, + utilizeCache: utilizeCache, + rm: rm, + sf: sf, + authConfig: auth, + configFile: authConfigFile, + outOld: outOld, + } +} diff --git a/server/server.go b/server/server.go new file mode 100644 index 0000000000..eb9a3a396b --- /dev/null +++ b/server/server.go @@ -0,0 +1,2426 @@ +package server + +import ( + "encoding/json" + "fmt" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/daemonconfig" + "github.com/dotcloud/docker/dockerversion" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/graph" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/pkg/graphdb" + "github.com/dotcloud/docker/pkg/signal" + "github.com/dotcloud/docker/registry" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/runtime" + "github.com/dotcloud/docker/utils" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "os/exec" + gosignal "os/signal" + "path" + "path/filepath" + goruntime "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" +) + +// jobInitApi runs the remote api server `srv` as a daemon, +// Only one api server can run at the same time - this is enforced by a pidfile. +// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup. +func InitServer(job *engine.Job) engine.Status { + job.Logf("Creating server") + srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job)) + if err != nil { + return job.Error(err) + } + if srv.runtime.Config().Pidfile != "" { + job.Logf("Creating pidfile") + if err := utils.CreatePidFile(srv.runtime.Config().Pidfile); err != nil { + // FIXME: do we need fatal here instead of returning a job error? + log.Fatal(err) + } + } + job.Logf("Setting up signal traps") + c := make(chan os.Signal, 1) + gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) + go func() { + sig := <-c + log.Printf("Received signal '%v', exiting\n", sig) + utils.RemovePidFile(srv.runtime.Config().Pidfile) + srv.Close() + os.Exit(0) + }() + job.Eng.Hack_SetGlobalVar("httpapi.server", srv) + job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime) + + for name, handler := range map[string]engine.Handler{ + "export": srv.ContainerExport, + "create": srv.ContainerCreate, + "stop": srv.ContainerStop, + "restart": srv.ContainerRestart, + "start": srv.ContainerStart, + "kill": srv.ContainerKill, + "wait": srv.ContainerWait, + "tag": srv.ImageTag, + "resize": srv.ContainerResize, + "commit": srv.ContainerCommit, + "info": srv.DockerInfo, + "container_delete": srv.ContainerDestroy, + "image_export": srv.ImageExport, + "images": srv.Images, + "history": srv.ImageHistory, + "viz": srv.ImagesViz, + "container_copy": srv.ContainerCopy, + "insert": srv.ImageInsert, + "attach": srv.ContainerAttach, + "search": srv.ImagesSearch, + "changes": srv.ContainerChanges, + "top": srv.ContainerTop, + "version": srv.DockerVersion, + "load": srv.ImageLoad, + "build": srv.Build, + "pull": srv.ImagePull, + "import": srv.ImageImport, + "image_delete": srv.ImageDelete, + "inspect": srv.JobInspect, + "events": srv.Events, + "push": srv.ImagePush, + "containers": srv.Containers, + "auth": srv.Auth, + } { + if err := job.Eng.Register(name, handler); err != nil { + return job.Error(err) + } + } + return engine.StatusOK +} + +// simpleVersionInfo is a simple implementation of +// the interface VersionInfo, which is used +// to provide version information for some product, +// component, etc. It stores the product name and the version +// in string and returns them on calls to Name() and Version(). +type simpleVersionInfo struct { + name string + version string +} + +func (v *simpleVersionInfo) Name() string { + return v.name +} + +func (v *simpleVersionInfo) Version() string { + return v.version +} + +// ContainerKill send signal to the container +// If no signal is given (sig 0), then Kill with SIGKILL and wait +// for the container to exit. +// If a signal is given, then just send it to the container and return. +func (srv *Server) ContainerKill(job *engine.Job) engine.Status { + if n := len(job.Args); n < 1 || n > 2 { + return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) + } + var ( + name = job.Args[0] + sig uint64 + err error + ) + + // If we have a signal, look at it. Otherwise, do nothing + if len(job.Args) == 2 && job.Args[1] != "" { + // Check if we passed the signal as a number: + // The largest legal signal is 31, so let's parse on 5 bits + sig, err = strconv.ParseUint(job.Args[1], 10, 5) + if err != nil { + // The signal is not a number, treat it as a string + sig = uint64(signal.SignalMap[job.Args[1]]) + if sig == 0 { + return job.Errorf("Invalid signal: %s", job.Args[1]) + } + + } + } + + if container := srv.runtime.Get(name); container != nil { + // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) + if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { + if err := container.Kill(); err != nil { + return job.Errorf("Cannot kill container %s: %s", name, err) + } + srv.LogEvent("kill", container.ID, srv.runtime.Repositories().ImageName(container.Image)) + } else { + // Otherwise, just send the requested signal + if err := container.KillSig(int(sig)); err != nil { + return job.Errorf("Cannot kill container %s: %s", name, err) + } + // FIXME: Add event for signals + } + } else { + return job.Errorf("No such container: %s", name) + } + return engine.StatusOK +} + +func (srv *Server) Auth(job *engine.Job) engine.Status { + var ( + err error + authConfig = ®istry.AuthConfig{} + ) + + job.GetenvJson("authConfig", authConfig) + // TODO: this is only done here because auth and registry need to be merged into one pkg + if addr := authConfig.ServerAddress; addr != "" && addr != registry.IndexServerAddress() { + addr, err = registry.ExpandAndVerifyRegistryUrl(addr) + if err != nil { + return job.Error(err) + } + authConfig.ServerAddress = addr + } + status, err := registry.Login(authConfig, srv.HTTPRequestFactory(nil)) + if err != nil { + return job.Error(err) + } + job.Printf("%s\n", status) + return engine.StatusOK +} + +func (srv *Server) Events(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s FROM", job.Name) + } + + var ( + from = job.Args[0] + since = job.GetenvInt64("since") + ) + sendEvent := func(event *utils.JSONMessage) error { + b, err := json.Marshal(event) + if err != nil { + return fmt.Errorf("JSON error") + } + _, err = job.Stdout.Write(b) + if err != nil { + // On error, evict the listener + utils.Errorf("%s", err) + srv.Lock() + delete(srv.listeners, from) + srv.Unlock() + return err + } + return nil + } + + listener := make(chan utils.JSONMessage) + srv.Lock() + srv.listeners[from] = listener + srv.Unlock() + job.Stdout.Write(nil) // flush + if since != 0 { + // If since, send previous events that happened after the timestamp + for _, event := range srv.GetEvents() { + if event.Time >= since { + err := sendEvent(&event) + if err != nil && err.Error() == "JSON error" { + continue + } + if err != nil { + job.Error(err) + return engine.StatusErr + } + } + } + } + for event := range listener { + err := sendEvent(&event) + if err != nil && err.Error() == "JSON error" { + continue + } + if err != nil { + return job.Error(err) + } + } + return engine.StatusOK +} + +func (srv *Server) ContainerExport(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s container_id", job.Name) + } + name := job.Args[0] + if container := srv.runtime.Get(name); container != nil { + data, err := container.Export() + if err != nil { + return job.Errorf("%s: %s", name, err) + } + defer data.Close() + + // Stream the entire contents of the container (basically a volatile snapshot) + if _, err := io.Copy(job.Stdout, data); err != nil { + return job.Errorf("%s: %s", name, err) + } + // FIXME: factor job-specific LogEvent to engine.Job.Run() + srv.LogEvent("export", container.ID, srv.runtime.Repositories().ImageName(container.Image)) + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} + +// ImageExport exports all images with the given tag. All versions +// containing the same tag are exported. The resulting output is an +// uncompressed tar ball. +// name is the set of tags to export. +// out is the writer where the images are written to. +func (srv *Server) ImageExport(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + name := job.Args[0] + // get image json + tempdir, err := ioutil.TempDir("", "docker-export-") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(tempdir) + + utils.Debugf("Serializing %s", name) + + rootRepo, err := srv.runtime.Repositories().Get(name) + if err != nil { + return job.Error(err) + } + if rootRepo != nil { + for _, id := range rootRepo { + image, err := srv.ImageInspect(id) + if err != nil { + return job.Error(err) + } + + if err := srv.exportImage(image, tempdir); err != nil { + return job.Error(err) + } + } + + // write repositories + rootRepoMap := map[string]graph.Repository{} + rootRepoMap[name] = rootRepo + rootRepoJson, _ := json.Marshal(rootRepoMap) + + if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.ModeAppend); err != nil { + return job.Error(err) + } + } else { + image, err := srv.ImageInspect(name) + if err != nil { + return job.Error(err) + } + if err := srv.exportImage(image, tempdir); err != nil { + return job.Error(err) + } + } + + fs, err := archive.Tar(tempdir, archive.Uncompressed) + if err != nil { + return job.Error(err) + } + defer fs.Close() + + if _, err := io.Copy(job.Stdout, fs); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +func (srv *Server) exportImage(img *image.Image, tempdir string) error { + for i := img; i != nil; { + // temporary directory + tmpImageDir := path.Join(tempdir, i.ID) + if err := os.Mkdir(tmpImageDir, os.ModeDir); err != nil { + if os.IsExist(err) { + return nil + } + return err + } + + var version = "1.0" + var versionBuf = []byte(version) + + if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.ModeAppend); err != nil { + return err + } + + // serialize json + b, err := json.Marshal(i) + if err != nil { + return err + } + if err := ioutil.WriteFile(path.Join(tmpImageDir, "json"), b, os.ModeAppend); err != nil { + return err + } + + // serialize filesystem + fs, err := i.TarLayer() + if err != nil { + return err + } + defer fs.Close() + + fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) + if err != nil { + return err + } + if _, err = io.Copy(fsTar, fs); err != nil { + return err + } + fsTar.Close() + + // find parent + if i.Parent != "" { + i, err = srv.ImageInspect(i.Parent) + if err != nil { + return err + } + } else { + i = nil + } + } + return nil +} + +func (srv *Server) Build(job *engine.Job) engine.Status { + if len(job.Args) != 0 { + return job.Errorf("Usage: %s\n", job.Name) + } + var ( + remoteURL = job.Getenv("remote") + repoName = job.Getenv("t") + suppressOutput = job.GetenvBool("q") + noCache = job.GetenvBool("nocache") + rm = job.GetenvBool("rm") + authConfig = ®istry.AuthConfig{} + configFile = ®istry.ConfigFile{} + tag string + context io.ReadCloser + ) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("configFile", configFile) + repoName, tag = utils.ParseRepositoryTag(repoName) + + if remoteURL == "" { + context = ioutil.NopCloser(job.Stdin) + } else if utils.IsGIT(remoteURL) { + if !strings.HasPrefix(remoteURL, "git://") { + remoteURL = "https://" + remoteURL + } + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(root) + + if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { + return job.Errorf("Error trying to use git: %s (%s)", err, output) + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return job.Error(err) + } + context = c + } else if utils.IsURL(remoteURL) { + f, err := utils.Download(remoteURL) + if err != nil { + return job.Error(err) + } + defer f.Body.Close() + dockerFile, err := ioutil.ReadAll(f.Body) + if err != nil { + return job.Error(err) + } + c, err := archive.Generate("Dockerfile", string(dockerFile)) + if err != nil { + return job.Error(err) + } + context = c + } + defer context.Close() + + sf := utils.NewStreamFormatter(job.GetenvBool("json")) + b := NewBuildFile(srv, + &utils.StdoutFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + &utils.StderrFormater{ + Writer: job.Stdout, + StreamFormatter: sf, + }, + !suppressOutput, !noCache, rm, job.Stdout, sf, authConfig, configFile) + id, err := b.Build(context) + if err != nil { + return job.Error(err) + } + if repoName != "" { + srv.runtime.Repositories().Set(repoName, tag, id, false) + } + return engine.StatusOK +} + +// Loads a set of images into the repository. This is the complementary of ImageExport. +// The input stream is an uncompressed tar ball containing images and metadata. +func (srv *Server) ImageLoad(job *engine.Job) engine.Status { + tmpImageDir, err := ioutil.TempDir("", "docker-import-") + if err != nil { + return job.Error(err) + } + defer os.RemoveAll(tmpImageDir) + + var ( + repoTarFile = path.Join(tmpImageDir, "repo.tar") + repoDir = path.Join(tmpImageDir, "repo") + ) + + tarFile, err := os.Create(repoTarFile) + if err != nil { + return job.Error(err) + } + if _, err := io.Copy(tarFile, job.Stdin); err != nil { + return job.Error(err) + } + tarFile.Close() + + repoFile, err := os.Open(repoTarFile) + if err != nil { + return job.Error(err) + } + if err := os.Mkdir(repoDir, os.ModeDir); err != nil { + return job.Error(err) + } + if err := archive.Untar(repoFile, repoDir, nil); err != nil { + return job.Error(err) + } + + dirs, err := ioutil.ReadDir(repoDir) + if err != nil { + return job.Error(err) + } + + for _, d := range dirs { + if d.IsDir() { + if err := srv.recursiveLoad(d.Name(), tmpImageDir); err != nil { + return job.Error(err) + } + } + } + + repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories")) + if err == nil { + repositories := map[string]graph.Repository{} + if err := json.Unmarshal(repositoriesJson, &repositories); err != nil { + return job.Error(err) + } + + for imageName, tagMap := range repositories { + for tag, address := range tagMap { + if err := srv.runtime.Repositories().Set(imageName, tag, address, true); err != nil { + return job.Error(err) + } + } + } + } else if !os.IsNotExist(err) { + return job.Error(err) + } + + return engine.StatusOK +} + +func (srv *Server) recursiveLoad(address, tmpImageDir string) error { + if _, err := srv.ImageInspect(address); err != nil { + utils.Debugf("Loading %s", address) + + imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) + if err != nil { + utils.Debugf("Error reading json", err) + return err + } + + layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar")) + if err != nil { + utils.Debugf("Error reading embedded tar", err) + return err + } + img, err := image.NewImgJSON(imageJson) + if err != nil { + utils.Debugf("Error unmarshalling json", err) + return err + } + if img.Parent != "" { + if !srv.runtime.Graph().Exists(img.Parent) { + if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil { + return err + } + } + } + if err := srv.runtime.Graph().Register(imageJson, layer, img); err != nil { + return err + } + } + utils.Debugf("Completed processing %s", address) + + return nil +} + +func (srv *Server) ImagesSearch(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s TERM", job.Name) + } + var ( + term = job.Args[0] + metaHeaders = map[string][]string{} + authConfig = ®istry.AuthConfig{} + ) + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", metaHeaders) + + r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), registry.IndexServerAddress()) + if err != nil { + return job.Error(err) + } + results, err := r.SearchRepositories(term) + if err != nil { + return job.Error(err) + } + outs := engine.NewTable("star_count", 0) + for _, result := range results.Results { + out := &engine.Env{} + out.Import(result) + outs.Add(out) + } + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +func (srv *Server) ImageInsert(job *engine.Job) engine.Status { + if len(job.Args) != 3 { + return job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name) + } + + var ( + name = job.Args[0] + url = job.Args[1] + path = job.Args[2] + ) + + sf := utils.NewStreamFormatter(job.GetenvBool("json")) + + out := utils.NewWriteFlusher(job.Stdout) + img, err := srv.runtime.Repositories().LookupImage(name) + if err != nil { + return job.Error(err) + } + + file, err := utils.Download(url) + if err != nil { + return job.Error(err) + } + defer file.Body.Close() + + config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.SystemConfig()) + if err != nil { + return job.Error(err) + } + + c, _, err := srv.runtime.Create(config, "") + if err != nil { + return job.Error(err) + } + + if err := c.Inject(utils.ProgressReader(file.Body, int(file.ContentLength), out, sf, false, utils.TruncateID(img.ID), "Downloading"), path); err != nil { + return job.Error(err) + } + // FIXME: Handle custom repo, tag comment, author + img, err = srv.runtime.Commit(c, "", "", img.Comment, img.Author, nil) + if err != nil { + out.Write(sf.FormatError(err)) + return engine.StatusErr + } + out.Write(sf.FormatStatus("", img.ID)) + return engine.StatusOK +} + +func (srv *Server) ImagesViz(job *engine.Job) engine.Status { + images, _ := srv.runtime.Graph().Map() + if images == nil { + return engine.StatusOK + } + job.Stdout.Write([]byte("digraph docker {\n")) + + var ( + parentImage *image.Image + err error + ) + for _, image := range images { + parentImage, err = image.GetParent() + if err != nil { + return job.Errorf("Error while getting parent image: %v", err) + } + if parentImage != nil { + job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) + } else { + job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) + } + } + + reporefs := make(map[string][]string) + + for name, repository := range srv.runtime.Repositories().Repositories { + for tag, id := range repository { + reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag)) + } + } + + for id, repos := range reporefs { + job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) + } + job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) + return engine.StatusOK +} + +func (srv *Server) Images(job *engine.Job) engine.Status { + var ( + allImages map[string]*image.Image + err error + ) + if job.GetenvBool("all") { + allImages, err = srv.runtime.Graph().Map() + } else { + allImages, err = srv.runtime.Graph().Heads() + } + if err != nil { + return job.Error(err) + } + lookup := make(map[string]*engine.Env) + for name, repository := range srv.runtime.Repositories().Repositories { + if job.Getenv("filter") != "" { + if match, _ := path.Match(job.Getenv("filter"), name); !match { + continue + } + } + for tag, id := range repository { + image, err := srv.runtime.Graph().Get(id) + if err != nil { + log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err) + continue + } + + if out, exists := lookup[id]; exists { + out.SetList("RepoTags", append(out.GetList("RepoTags"), fmt.Sprintf("%s:%s", name, tag))) + } else { + out := &engine.Env{} + delete(allImages, id) + out.Set("ParentId", image.Parent) + out.SetList("RepoTags", []string{fmt.Sprintf("%s:%s", name, tag)}) + out.Set("Id", image.ID) + out.SetInt64("Created", image.Created.Unix()) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + lookup[id] = out + } + + } + } + + outs := engine.NewTable("Created", len(lookup)) + for _, value := range lookup { + outs.Add(value) + } + + // Display images which aren't part of a repository/tag + if job.Getenv("filter") == "" { + for _, image := range allImages { + out := &engine.Env{} + out.Set("ParentId", image.Parent) + out.SetList("RepoTags", []string{":"}) + out.Set("Id", image.ID) + out.SetInt64("Created", image.Created.Unix()) + out.SetInt64("Size", image.Size) + out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) + outs.Add(out) + } + } + + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +func (srv *Server) DockerInfo(job *engine.Job) engine.Status { + images, _ := srv.runtime.Graph().Map() + var imgcount int + if images == nil { + imgcount = 0 + } else { + imgcount = len(images) + } + kernelVersion := "" + if kv, err := utils.GetKernelVersion(); err == nil { + kernelVersion = kv.String() + } + + // if we still have the original dockerinit binary from before we copied it locally, let's return the path to that, since that's more intuitive (the copied path is trivial to derive by hand given VERSION) + initPath := utils.DockerInitPath("") + if initPath == "" { + // if that fails, we'll just return the path from the runtime + initPath = srv.runtime.SystemInitPath() + } + + v := &engine.Env{} + v.SetInt("Containers", len(srv.runtime.List())) + v.SetInt("Images", imgcount) + v.Set("Driver", srv.runtime.GraphDriver().String()) + v.SetJson("DriverStatus", srv.runtime.GraphDriver().Status()) + v.SetBool("MemoryLimit", srv.runtime.SystemConfig().MemoryLimit) + v.SetBool("SwapLimit", srv.runtime.SystemConfig().SwapLimit) + v.SetBool("IPv4Forwarding", !srv.runtime.SystemConfig().IPv4ForwardingDisabled) + v.SetBool("Debug", os.Getenv("DEBUG") != "") + v.SetInt("NFd", utils.GetTotalUsedFds()) + v.SetInt("NGoroutines", goruntime.NumGoroutine()) + v.Set("ExecutionDriver", srv.runtime.ExecutionDriver().Name()) + v.SetInt("NEventsListener", len(srv.listeners)) + v.Set("KernelVersion", kernelVersion) + v.Set("IndexServerAddress", registry.IndexServerAddress()) + v.Set("InitSha1", dockerversion.INITSHA1) + v.Set("InitPath", initPath) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +func (srv *Server) DockerVersion(job *engine.Job) engine.Status { + v := &engine.Env{} + v.Set("Version", dockerversion.VERSION) + v.Set("GitCommit", dockerversion.GITCOMMIT) + v.Set("GoVersion", goruntime.Version()) + v.Set("Os", goruntime.GOOS) + v.Set("Arch", goruntime.GOARCH) + if kernelVersion, err := utils.GetKernelVersion(); err == nil { + v.Set("KernelVersion", kernelVersion.String()) + } + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +func (srv *Server) ImageHistory(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + name := job.Args[0] + foundImage, err := srv.runtime.Repositories().LookupImage(name) + if err != nil { + return job.Error(err) + } + + lookupMap := make(map[string][]string) + for name, repository := range srv.runtime.Repositories().Repositories { + for tag, id := range repository { + // If the ID already has a reverse lookup, do not update it unless for "latest" + if _, exists := lookupMap[id]; !exists { + lookupMap[id] = []string{} + } + lookupMap[id] = append(lookupMap[id], name+":"+tag) + } + } + + outs := engine.NewTable("Created", 0) + err = foundImage.WalkHistory(func(img *image.Image) error { + out := &engine.Env{} + out.Set("Id", img.ID) + out.SetInt64("Created", img.Created.Unix()) + out.Set("CreatedBy", strings.Join(img.ContainerConfig.Cmd, " ")) + out.SetList("Tags", lookupMap[img.ID]) + out.SetInt64("Size", img.Size) + outs.Add(out) + return nil + }) + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +func (srv *Server) ContainerTop(job *engine.Job) engine.Status { + if len(job.Args) != 1 && len(job.Args) != 2 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name) + } + var ( + name = job.Args[0] + psArgs = "-ef" + ) + + if len(job.Args) == 2 && job.Args[1] != "" { + psArgs = job.Args[1] + } + + if container := srv.runtime.Get(name); container != nil { + if !container.State.IsRunning() { + return job.Errorf("Container %s is not running", name) + } + pids, err := srv.runtime.ExecutionDriver().GetPidsForContainer(container.ID) + if err != nil { + return job.Error(err) + } + output, err := exec.Command("ps", psArgs).Output() + if err != nil { + return job.Errorf("Error running ps: %s", err) + } + + lines := strings.Split(string(output), "\n") + header := strings.Fields(lines[0]) + out := &engine.Env{} + out.SetList("Titles", header) + + pidIndex := -1 + for i, name := range header { + if name == "PID" { + pidIndex = i + } + } + if pidIndex == -1 { + return job.Errorf("Couldn't find PID field in ps output") + } + + processes := [][]string{} + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := strings.Fields(line) + p, err := strconv.Atoi(fields[pidIndex]) + if err != nil { + return job.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + } + + for _, pid := range pids { + if pid == p { + // Make sure number of fields equals number of header titles + // merging "overhanging" fields + process := fields[:len(header)-1] + process = append(process, strings.Join(fields[len(header)-1:], " ")) + processes = append(processes, process) + } + } + } + out.SetJson("Processes", processes) + out.WriteTo(job.Stdout) + return engine.StatusOK + + } + return job.Errorf("No such container: %s", name) +} + +func (srv *Server) ContainerChanges(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s CONTAINER", job.Name) + } + name := job.Args[0] + if container := srv.runtime.Get(name); container != nil { + outs := engine.NewTable("", 0) + changes, err := container.Changes() + if err != nil { + return job.Error(err) + } + for _, change := range changes { + out := &engine.Env{} + if err := out.Import(change); err != nil { + return job.Error(err) + } + outs.Add(out) + } + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + } else { + return job.Errorf("No such container: %s", name) + } + return engine.StatusOK +} + +func (srv *Server) Containers(job *engine.Job) engine.Status { + var ( + foundBefore bool + displayed int + all = job.GetenvBool("all") + since = job.Getenv("since") + before = job.Getenv("before") + n = job.GetenvInt("limit") + size = job.GetenvBool("size") + ) + outs := engine.NewTable("Created", 0) + + names := map[string][]string{} + srv.runtime.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error { + names[e.ID()] = append(names[e.ID()], p) + return nil + }, -1) + + for _, container := range srv.runtime.List() { + if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" { + continue + } + if before != "" && !foundBefore { + if container.ID == before || utils.TruncateID(container.ID) == before { + foundBefore = true + } + continue + } + if n > 0 && displayed == n { + break + } + if container.ID == since || utils.TruncateID(container.ID) == since { + break + } + displayed++ + out := &engine.Env{} + out.Set("Id", container.ID) + out.SetList("Names", names[container.ID]) + out.Set("Image", srv.runtime.Repositories().ImageName(container.Image)) + if len(container.Args) > 0 { + out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, strings.Join(container.Args, " "))) + } else { + out.Set("Command", fmt.Sprintf("\"%s\"", container.Path)) + } + out.SetInt64("Created", container.Created.Unix()) + out.Set("Status", container.State.String()) + str, err := container.NetworkSettings.PortMappingAPI().ToListString() + if err != nil { + return job.Error(err) + } + out.Set("Ports", str) + if size { + sizeRw, sizeRootFs := container.GetSize() + out.SetInt64("SizeRw", sizeRw) + out.SetInt64("SizeRootFs", sizeRootFs) + } + outs.Add(out) + } + outs.ReverseSort() + if _, err := outs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +func (srv *Server) ContainerCommit(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) + } + name := job.Args[0] + + container := srv.runtime.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + var config = container.Config + var newConfig runconfig.Config + if err := job.GetenvJson("config", &newConfig); err != nil { + return job.Error(err) + } + + if err := runconfig.Merge(&newConfig, config); err != nil { + return job.Error(err) + } + + img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig) + if err != nil { + return job.Error(err) + } + job.Printf("%s\n", img.ID) + return engine.StatusOK +} + +func (srv *Server) ImageTag(job *engine.Job) engine.Status { + if len(job.Args) != 2 && len(job.Args) != 3 { + return job.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) + } + var tag string + if len(job.Args) == 3 { + tag = job.Args[2] + } + if err := srv.runtime.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) error { + history, err := r.GetRemoteHistory(imgID, endpoint, token) + if err != nil { + return err + } + out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil)) + // FIXME: Try to stream the images? + // FIXME: Launch the getRemoteImage() in goroutines + + for i := len(history) - 1; i >= 0; i-- { + id := history[i] + + // ensure no two downloads of the same layer happen at the same time + if c, err := srv.poolAdd("pull", "layer:"+id); err != nil { + utils.Errorf("Image (id: %s) pull is already running, skipping: %v", id, err) + <-c + } + defer srv.poolRemove("pull", "layer:"+id) + + if !srv.runtime.Graph().Exists(id) { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) + var ( + imgJSON []byte + imgSize int + err error + img *image.Image + ) + retries := 5 + for j := 1; j <= retries; j++ { + imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) + if err != nil && j == retries { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return err + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } + img, err = image.NewImgJSON(imgJSON) + if err != nil && j == retries { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return fmt.Errorf("Failed to parse json: %s", err) + } else if err != nil { + time.Sleep(time.Duration(j) * 500 * time.Millisecond) + continue + } else { + break + } + } + + // Get the layer + out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling fs layer", nil)) + layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token) + if err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) + return err + } + defer layer.Close() + if err := srv.runtime.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil { + out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) + return err + } + } + out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil)) + + } + return nil +} + +func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool) error { + out.Write(sf.FormatStatus("", "Pulling repository %s", localName)) + + repoData, err := r.GetRepositoryData(remoteName) + if err != nil { + return err + } + + utils.Debugf("Retrieving the tag list") + tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens) + if err != nil { + utils.Errorf("%v", err) + return err + } + + for tag, id := range tagsList { + repoData.ImgList[id] = ®istry.ImgData{ + ID: id, + Tag: tag, + Checksum: "", + } + } + + utils.Debugf("Registering tags") + // If no tag has been specified, pull them all + if askedTag == "" { + for tag, id := range tagsList { + repoData.ImgList[id].Tag = tag + } + } else { + // Otherwise, check that the tag exists and use only that one + id, exists := tagsList[askedTag] + if !exists { + return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName) + } + repoData.ImgList[id].Tag = askedTag + } + + errors := make(chan error) + for _, image := range repoData.ImgList { + downloadImage := func(img *registry.ImgData) { + if askedTag != "" && img.Tag != askedTag { + utils.Debugf("(%s) does not match %s (id: %s), skipping", img.Tag, askedTag, img.ID) + if parallel { + errors <- nil + } + return + } + + if img.Tag == "" { + utils.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) + if parallel { + errors <- nil + } + return + } + + // ensure no two downloads of the same image happen at the same time + if c, err := srv.poolAdd("pull", "img:"+img.ID); err != nil { + if c != nil { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) + <-c + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + } else { + utils.Errorf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) + } + if parallel { + errors <- nil + } + return + } + defer srv.poolRemove("pull", "img:"+img.ID) + + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, localName), nil)) + success := false + var lastErr error + for _, ep := range repoData.Endpoints { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil)) + if err := srv.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { + // Its not ideal that only the last error is returned, it would be better to concatenate the errors. + // As the error is also given to the output stream the user will see the error. + lastErr = err + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil)) + continue + } + success = true + break + } + if !success { + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, %s", img.Tag, localName, lastErr), nil)) + if parallel { + errors <- fmt.Errorf("Could not find repository on any of the indexed registries.") + return + } + } + out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) + + if parallel { + errors <- nil + } + } + + if parallel { + go downloadImage(image) + } else { + downloadImage(image) + } + } + if parallel { + var lastError error + for i := 0; i < len(repoData.ImgList); i++ { + if err := <-errors; err != nil { + lastError = err + } + } + if lastError != nil { + return lastError + } + + } + for tag, id := range tagsList { + if askedTag != "" && tag != askedTag { + continue + } + if err := srv.runtime.Repositories().Set(localName, tag, id, true); err != nil { + return err + } + } + if err := srv.runtime.Repositories().Save(); err != nil { + return err + } + + return nil +} + +func (srv *Server) poolAdd(kind, key string) (chan struct{}, error) { + srv.Lock() + defer srv.Unlock() + + if c, exists := srv.pullingPool[key]; exists { + return c, fmt.Errorf("pull %s is already in progress", key) + } + if c, exists := srv.pushingPool[key]; exists { + return c, fmt.Errorf("push %s is already in progress", key) + } + + c := make(chan struct{}) + switch kind { + case "pull": + srv.pullingPool[key] = c + case "push": + srv.pushingPool[key] = c + default: + return nil, fmt.Errorf("Unknown pool type") + } + return c, nil +} + +func (srv *Server) poolRemove(kind, key string) error { + srv.Lock() + defer srv.Unlock() + switch kind { + case "pull": + if c, exists := srv.pullingPool[key]; exists { + close(c) + delete(srv.pullingPool, key) + } + case "push": + if c, exists := srv.pushingPool[key]; exists { + close(c) + delete(srv.pushingPool, key) + } + default: + return fmt.Errorf("Unknown pool type") + } + return nil +} + +func (srv *Server) ImagePull(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 && n != 2 { + return job.Errorf("Usage: %s IMAGE [TAG]", job.Name) + } + var ( + localName = job.Args[0] + tag string + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + authConfig = ®istry.AuthConfig{} + metaHeaders map[string][]string + ) + if len(job.Args) > 1 { + tag = job.Args[1] + } + + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", metaHeaders) + + c, err := srv.poolAdd("pull", localName+":"+tag) + if err != nil { + if c != nil { + // Another pull of the same repository is already taking place; just wait for it to finish + job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", localName)) + <-c + return engine.StatusOK + } + return job.Error(err) + } + defer srv.poolRemove("pull", localName+":"+tag) + + // Resolve the Repository name from fqn to endpoint + name + hostname, remoteName, err := registry.ResolveRepositoryName(localName) + if err != nil { + return job.Error(err) + } + + endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) + if err != nil { + return job.Error(err) + } + + r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) + if err != nil { + return job.Error(err) + } + + if endpoint == registry.IndexServerAddress() { + // If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar" + localName = remoteName + } + + if err = srv.pullRepository(r, job.Stdout, localName, remoteName, tag, sf, job.GetenvBool("parallel")); err != nil { + return job.Error(err) + } + + return engine.StatusOK +} + +// Retrieve the all the images to be uploaded in the correct order +func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[string][]string, error) { + var ( + imageList []string + imagesSeen map[string]bool = make(map[string]bool) + tagsByImage map[string][]string = make(map[string][]string) + ) + + for tag, id := range localRepo { + var imageListForThisTag []string + + tagsByImage[id] = append(tagsByImage[id], tag) + + for img, err := srv.runtime.Graph().Get(id); img != nil; img, err = img.GetParent() { + if err != nil { + return nil, nil, err + } + + if imagesSeen[img.ID] { + // This image is already on the list, we can ignore it and all its parents + break + } + + imagesSeen[img.ID] = true + imageListForThisTag = append(imageListForThisTag, img.ID) + } + + // reverse the image list for this tag (so the "most"-parent image is first) + for i, j := 0, len(imageListForThisTag)-1; i < j; i, j = i+1, j-1 { + imageListForThisTag[i], imageListForThisTag[j] = imageListForThisTag[j], imageListForThisTag[i] + } + + // append to main image list + imageList = append(imageList, imageListForThisTag...) + } + + utils.Debugf("Image list: %v", imageList) + utils.Debugf("Tags by image: %v", tagsByImage) + + return imageList, tagsByImage, nil +} + +func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, sf *utils.StreamFormatter) error { + out = utils.NewWriteFlusher(out) + utils.Debugf("Local repo: %s", localRepo) + imgList, tagsByImage, err := srv.getImageList(localRepo) + if err != nil { + return err + } + + out.Write(sf.FormatStatus("", "Sending image list")) + + var repoData *registry.RepositoryData + var imageIndex []*registry.ImgData + + for _, imgId := range imgList { + if tags, exists := tagsByImage[imgId]; exists { + // If an image has tags you must add an entry in the image index + // for each tag + for _, tag := range tags { + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: imgId, + Tag: tag, + }) + } + } else { + // If the image does not have a tag it still needs to be sent to the + // registry with an empty tag so that it is accociated with the repository + imageIndex = append(imageIndex, ®istry.ImgData{ + ID: imgId, + Tag: "", + }) + + } + } + + utils.Debugf("Preparing to push %s with the following images and tags\n", localRepo) + for _, data := range imageIndex { + utils.Debugf("Pushing ID: %s with Tag: %s\n", data.ID, data.Tag) + } + + // Register all the images in a repository with the registry + // If an image is not in this list it will not be associated with the repository + repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil) + if err != nil { + return err + } + + for _, ep := range repoData.Endpoints { + out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo))) + + for _, imgId := range imgList { + if r.LookupRemoteImage(imgId, ep, repoData.Tokens) { + out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId))) + } else { + if _, err := srv.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil { + // FIXME: Continue on error? + return err + } + } + + for _, tag := range tagsByImage[imgId] { + out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag)) + + if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil { + return err + } + } + } + } + + if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil { + return err + } + + return nil +} + +func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { + out = utils.NewWriteFlusher(out) + jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.Graph().Root, imgID, "json")) + if err != nil { + return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) + } + out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pushing", nil)) + + imgData := ®istry.ImgData{ + ID: imgID, + } + + // Send the json + if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { + if err == registry.ErrAlreadyExists { + out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) + return "", nil + } + return "", err + } + + layerData, err := srv.runtime.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out) + if err != nil { + return "", fmt.Errorf("Failed to generate layer archive: %s", err) + } + defer os.RemoveAll(layerData.Name()) + + // Send the layer + checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) + if err != nil { + return "", err + } + imgData.Checksum = checksum + imgData.ChecksumPayload = checksumPayload + // Send the checksum + if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { + return "", err + } + + out.Write(sf.FormatProgress(utils.TruncateID(imgData.ID), "Image successfully pushed", nil)) + return imgData.Checksum, nil +} + +// FIXME: Allow to interrupt current push when new push of same image is done. +func (srv *Server) ImagePush(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + var ( + localName = job.Args[0] + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + authConfig = ®istry.AuthConfig{} + metaHeaders map[string][]string + ) + + job.GetenvJson("authConfig", authConfig) + job.GetenvJson("metaHeaders", metaHeaders) + if _, err := srv.poolAdd("push", localName); err != nil { + return job.Error(err) + } + defer srv.poolRemove("push", localName) + + // Resolve the Repository name from fqn to endpoint + name + hostname, remoteName, err := registry.ResolveRepositoryName(localName) + if err != nil { + return job.Error(err) + } + + endpoint, err := registry.ExpandAndVerifyRegistryUrl(hostname) + if err != nil { + return job.Error(err) + } + + img, err := srv.runtime.Graph().Get(localName) + r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint) + if err2 != nil { + return job.Error(err2) + } + + if err != nil { + reposLen := len(srv.runtime.Repositories().Repositories[localName]) + job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) + // If it fails, try to get the repository + if localRepo, exists := srv.runtime.Repositories().Repositories[localName]; exists { + if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, sf); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Error(err) + } + + var token []string + job.Stdout.Write(sf.FormatStatus("", "The push refers to an image: [%s]", localName)) + if _, err := srv.pushImage(r, job.Stdout, remoteName, img.ID, endpoint, token, sf); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +func (srv *Server) ImageImport(job *engine.Job) engine.Status { + if n := len(job.Args); n != 2 && n != 3 { + return job.Errorf("Usage: %s SRC REPO [TAG]", job.Name) + } + var ( + src = job.Args[0] + repo = job.Args[1] + tag string + sf = utils.NewStreamFormatter(job.GetenvBool("json")) + archive archive.ArchiveReader + resp *http.Response + ) + if len(job.Args) > 2 { + tag = job.Args[2] + } + + if src == "-" { + archive = job.Stdin + } else { + u, err := url.Parse(src) + if err != nil { + return job.Error(err) + } + if u.Scheme == "" { + u.Scheme = "http" + u.Host = src + u.Path = "" + } + job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) + // Download with curl (pretty progress bar) + // If curl is not available, fallback to http.Get() + resp, err = utils.Download(u.String()) + if err != nil { + return job.Error(err) + } + progressReader := utils.ProgressReader(resp.Body, int(resp.ContentLength), job.Stdout, sf, true, "", "Importing") + defer progressReader.Close() + archive = progressReader + } + img, err := srv.runtime.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil) + if err != nil { + return job.Error(err) + } + // Optionally register the image at REPO/TAG + if repo != "" { + if err := srv.runtime.Repositories().Set(repo, tag, img.ID, true); err != nil { + return job.Error(err) + } + } + job.Stdout.Write(sf.FormatStatus("", img.ID)) + return engine.StatusOK +} + +func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { + var name string + if len(job.Args) == 1 { + name = job.Args[0] + } else if len(job.Args) > 1 { + return job.Errorf("Usage: %s", job.Name) + } + config := runconfig.ContainerConfigFromJob(job) + if config.Memory != 0 && config.Memory < 524288 { + return job.Errorf("Minimum memory limit allowed is 512k") + } + if config.Memory > 0 && !srv.runtime.SystemConfig().MemoryLimit { + job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") + config.Memory = 0 + } + if config.Memory > 0 && !srv.runtime.SystemConfig().SwapLimit { + job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") + config.MemorySwap = -1 + } + resolvConf, err := utils.GetResolvConf() + if err != nil { + return job.Error(err) + } + if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.Config().Dns) == 0 && utils.CheckLocalDns(resolvConf) { + job.Errorf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", runtime.DefaultDns) + config.Dns = runtime.DefaultDns + } + + container, buildWarnings, err := srv.runtime.Create(config, name) + if err != nil { + if srv.runtime.Graph().IsNotExist(err) { + _, tag := utils.ParseRepositoryTag(config.Image) + if tag == "" { + tag = graph.DEFAULTTAG + } + return job.Errorf("No such image: %s (tag: %s)", config.Image, tag) + } + return job.Error(err) + } + if !container.Config.NetworkDisabled && srv.runtime.SystemConfig().IPv4ForwardingDisabled { + job.Errorf("IPv4 forwarding is disabled.\n") + } + srv.LogEvent("create", container.ID, srv.runtime.Repositories().ImageName(container.Image)) + // FIXME: this is necessary because runtime.Create might return a nil container + // with a non-nil error. This should not happen! Once it's fixed we + // can remove this workaround. + if container != nil { + job.Printf("%s\n", container.ID) + } + for _, warning := range buildWarnings { + job.Errorf("%s\n", warning) + } + return engine.StatusOK +} + +func (srv *Server) ContainerRestart(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + var ( + name = job.Args[0] + t = 10 + ) + if job.EnvExists("t") { + t = job.GetenvInt("t") + } + if container := srv.runtime.Get(name); container != nil { + if err := container.Restart(int(t)); err != nil { + return job.Errorf("Cannot restart container %s: %s\n", name, err) + } + srv.LogEvent("restart", container.ID, srv.runtime.Repositories().ImageName(container.Image)) + } else { + return job.Errorf("No such container: %s\n", name) + } + return engine.StatusOK +} + +func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) + } + name := job.Args[0] + removeVolume := job.GetenvBool("removeVolume") + removeLink := job.GetenvBool("removeLink") + forceRemove := job.GetenvBool("forceRemove") + + container := srv.runtime.Get(name) + + if removeLink { + if container == nil { + return job.Errorf("No such link: %s", name) + } + name, err := runtime.GetFullContainerName(name) + if err != nil { + job.Error(err) + } + parent, n := path.Split(name) + if parent == "/" { + return job.Errorf("Conflict, cannot remove the default name of the container") + } + pe := srv.runtime.ContainerGraph().Get(parent) + if pe == nil { + return job.Errorf("Cannot get parent %s for name %s", parent, name) + } + parentContainer := srv.runtime.Get(pe.ID()) + + if parentContainer != nil { + parentContainer.DisableLink(n) + } + + if err := srv.runtime.ContainerGraph().Delete(name); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + + if container != nil { + if container.State.IsRunning() { + if forceRemove { + if err := container.Stop(5); err != nil { + return job.Errorf("Could not stop running container, cannot remove - %v", err) + } + } else { + return job.Errorf("Impossible to remove a running container, please stop it first or use -f") + } + } + if err := srv.runtime.Destroy(container); err != nil { + return job.Errorf("Cannot destroy container %s: %s", name, err) + } + srv.LogEvent("destroy", container.ID, srv.runtime.Repositories().ImageName(container.Image)) + + if removeVolume { + var ( + volumes = make(map[string]struct{}) + binds = make(map[string]struct{}) + usedVolumes = make(map[string]*runtime.Container) + ) + + // the volume id is always the base of the path + getVolumeId := func(p string) string { + return filepath.Base(strings.TrimSuffix(p, "/layer")) + } + + // populate bind map so that they can be skipped and not removed + for _, bind := range container.HostConfig().Binds { + source := strings.Split(bind, ":")[0] + // TODO: refactor all volume stuff, all of it + // this is very important that we eval the link + // or comparing the keys to container.Volumes will not work + p, err := filepath.EvalSymlinks(source) + if err != nil { + return job.Error(err) + } + source = p + binds[source] = struct{}{} + } + + // Store all the deleted containers volumes + for _, volumeId := range container.Volumes { + // Skip the volumes mounted from external + // bind mounts here will will be evaluated for a symlink + if _, exists := binds[volumeId]; exists { + continue + } + + volumeId = getVolumeId(volumeId) + volumes[volumeId] = struct{}{} + } + + // Retrieve all volumes from all remaining containers + for _, container := range srv.runtime.List() { + for _, containerVolumeId := range container.Volumes { + containerVolumeId = getVolumeId(containerVolumeId) + usedVolumes[containerVolumeId] = container + } + } + + for volumeId := range volumes { + // If the requested volu + if c, exists := usedVolumes[volumeId]; exists { + log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID) + continue + } + if err := srv.runtime.Volumes().Delete(volumeId); err != nil { + return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err) + } + } + } + } else { + return job.Errorf("No such container: %s", name) + } + return engine.StatusOK +} + +func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force bool) error { + var ( + repoName, tag string + tags = []string{} + ) + + repoName, tag = utils.ParseRepositoryTag(name) + if tag == "" { + tag = graph.DEFAULTTAG + } + + img, err := srv.runtime.Repositories().LookupImage(name) + if err != nil { + if r, _ := srv.runtime.Repositories().Get(repoName); r != nil { + return fmt.Errorf("No such image: %s:%s", repoName, tag) + } + return fmt.Errorf("No such image: %s", name) + } + + if strings.Contains(img.ID, name) { + repoName = "" + tag = "" + } + + byParents, err := srv.runtime.Graph().ByParent() + if err != nil { + return err + } + + //If delete by id, see if the id belong only to one repository + if repoName == "" { + for _, repoAndTag := range srv.runtime.Repositories().ByID()[img.ID] { + parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag) + if repoName == "" || repoName == parsedRepo { + repoName = parsedRepo + if parsedTag != "" { + tags = append(tags, parsedTag) + } + } else if repoName != parsedRepo && !force { + // the id belongs to multiple repos, like base:latest and user:test, + // in that case return conflict + return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name) + } + } + } else { + tags = append(tags, tag) + } + + if !first && len(tags) > 0 { + return nil + } + + //Untag the current image + for _, tag := range tags { + tagDeleted, err := srv.runtime.Repositories().Delete(repoName, tag) + if err != nil { + return err + } + if tagDeleted { + out := &engine.Env{} + out.Set("Untagged", repoName+":"+tag) + imgs.Add(out) + srv.LogEvent("untag", img.ID, "") + } + } + tags = srv.runtime.Repositories().ByID()[img.ID] + if (len(tags) <= 1 && repoName == "") || len(tags) == 0 { + if len(byParents[img.ID]) == 0 { + if err := srv.canDeleteImage(img.ID); err != nil { + return err + } + if err := srv.runtime.Repositories().DeleteAll(img.ID); err != nil { + return err + } + if err := srv.runtime.Graph().Delete(img.ID); err != nil { + return err + } + out := &engine.Env{} + out.Set("Deleted", img.ID) + imgs.Add(out) + srv.LogEvent("delete", img.ID, "") + if img.Parent != "" { + err := srv.DeleteImage(img.Parent, imgs, false, force) + if first { + return err + } + + } + + } + } + return nil +} + +func (srv *Server) ImageDelete(job *engine.Job) engine.Status { + if n := len(job.Args); n != 1 { + return job.Errorf("Usage: %s IMAGE", job.Name) + } + imgs := engine.NewTable("", 0) + if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force")); err != nil { + return job.Error(err) + } + if len(imgs.Data) == 0 { + return job.Errorf("Conflict, %s wasn't deleted", job.Args[0]) + } + if _, err := imgs.WriteListTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK +} + +func (srv *Server) canDeleteImage(imgID string) error { + for _, container := range srv.runtime.List() { + parent, err := srv.runtime.Repositories().LookupImage(container.Image) + if err != nil { + return err + } + + if err := parent.WalkHistory(func(p *image.Image) error { + if imgID == p.ID { + return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it", utils.TruncateID(imgID), utils.TruncateID(container.ID)) + } + return nil + }); err != nil { + return err + } + } + return nil +} + +func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) { + // Retrieve all images + images, err := srv.runtime.Graph().Map() + if err != nil { + return nil, err + } + + // Store the tree in a map of map (map[parentId][childId]) + imageMap := make(map[string]map[string]struct{}) + for _, img := range images { + if _, exists := imageMap[img.Parent]; !exists { + imageMap[img.Parent] = make(map[string]struct{}) + } + imageMap[img.Parent][img.ID] = struct{}{} + } + + // Loop on the children of the given image and check the config + var match *image.Image + for elem := range imageMap[imgID] { + img, err := srv.runtime.Graph().Get(elem) + if err != nil { + return nil, err + } + if runconfig.Compare(&img.ContainerConfig, config) { + if match == nil || match.Created.Before(img.Created) { + match = img + } + } + } + return match, nil +} + +func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runconfig.HostConfig) error { + runtime := srv.runtime + + if hostConfig != nil && hostConfig.Links != nil { + for _, l := range hostConfig.Links { + parts, err := utils.PartParser("name:alias", l) + if err != nil { + return err + } + child, err := srv.runtime.GetByName(parts["name"]) + if err != nil { + return err + } + if child == nil { + return fmt.Errorf("Could not get container for %s", parts["name"]) + } + if err := runtime.RegisterLink(container, child, parts["alias"]); err != nil { + return err + } + } + + // After we load all the links into the runtime + // set them to nil on the hostconfig + hostConfig.Links = nil + if err := container.WriteHostConfig(); err != nil { + return err + } + } + return nil +} + +func (srv *Server) ContainerStart(job *engine.Job) engine.Status { + if len(job.Args) < 1 { + return job.Errorf("Usage: %s container_id", job.Name) + } + name := job.Args[0] + runtime := srv.runtime + container := runtime.Get(name) + + if container == nil { + return job.Errorf("No such container: %s", name) + } + // If no environment was set, then no hostconfig was passed. + if len(job.Environ()) > 0 { + hostConfig := runconfig.ContainerHostConfigFromJob(job) + // Validate the HostConfig binds. Make sure that: + // 1) the source of a bind mount isn't / + // The bind mount "/:/foo" isn't allowed. + // 2) Check that the source exists + // The source to be bind mounted must exist. + for _, bind := range hostConfig.Binds { + splitBind := strings.Split(bind, ":") + source := splitBind[0] + + // refuse to bind mount "/" to the container + if source == "/" { + return job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind) + } + + // ensure the source exists on the host + _, err := os.Stat(source) + if err != nil && os.IsNotExist(err) { + err = os.MkdirAll(source, 0755) + if err != nil { + return job.Errorf("Could not create local directory '%s' for bind mount: %s!", source, err.Error()) + } + } + } + // Register any links from the host config before starting the container + if err := srv.RegisterLinks(container, hostConfig); err != nil { + return job.Error(err) + } + container.SetHostConfig(hostConfig) + container.ToDisk() + } + if err := container.Start(); err != nil { + return job.Errorf("Cannot start container %s: %s", name, err) + } + srv.LogEvent("start", container.ID, runtime.Repositories().ImageName(container.Image)) + + return engine.StatusOK +} + +func (srv *Server) ContainerStop(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + var ( + name = job.Args[0] + t = 10 + ) + if job.EnvExists("t") { + t = job.GetenvInt("t") + } + if container := srv.runtime.Get(name); container != nil { + if err := container.Stop(int(t)); err != nil { + return job.Errorf("Cannot stop container %s: %s\n", name, err) + } + srv.LogEvent("stop", container.ID, srv.runtime.Repositories().ImageName(container.Image)) + } else { + return job.Errorf("No such container: %s\n", name) + } + return engine.StatusOK +} + +func (srv *Server) ContainerWait(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s", job.Name) + } + name := job.Args[0] + if container := srv.runtime.Get(name); container != nil { + status := container.Wait() + job.Printf("%d\n", status) + return engine.StatusOK + } + return job.Errorf("%s: no such container: %s", job.Name, name) +} + +func (srv *Server) ContainerResize(job *engine.Job) engine.Status { + if len(job.Args) != 3 { + return job.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) + } + name := job.Args[0] + height, err := strconv.Atoi(job.Args[1]) + if err != nil { + return job.Error(err) + } + width, err := strconv.Atoi(job.Args[2]) + if err != nil { + return job.Error(err) + } + if container := srv.runtime.Get(name); container != nil { + if err := container.Resize(height, width); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} + +func (srv *Server) ContainerAttach(job *engine.Job) engine.Status { + if len(job.Args) != 1 { + return job.Errorf("Usage: %s CONTAINER\n", job.Name) + } + + var ( + name = job.Args[0] + logs = job.GetenvBool("logs") + stream = job.GetenvBool("stream") + stdin = job.GetenvBool("stdin") + stdout = job.GetenvBool("stdout") + stderr = job.GetenvBool("stderr") + ) + + container := srv.runtime.Get(name) + if container == nil { + return job.Errorf("No such container: %s", name) + } + + //logs + if logs { + cLog, err := container.ReadLog("json") + if err != nil && os.IsNotExist(err) { + // Legacy logs + utils.Debugf("Old logs format") + if stdout { + cLog, err := container.ReadLog("stdout") + if err != nil { + utils.Errorf("Error reading logs (stdout): %s", err) + } else if _, err := io.Copy(job.Stdout, cLog); err != nil { + utils.Errorf("Error streaming logs (stdout): %s", err) + } + } + if stderr { + cLog, err := container.ReadLog("stderr") + if err != nil { + utils.Errorf("Error reading logs (stderr): %s", err) + } else if _, err := io.Copy(job.Stderr, cLog); err != nil { + utils.Errorf("Error streaming logs (stderr): %s", err) + } + } + } else if err != nil { + utils.Errorf("Error reading logs (json): %s", err) + } else { + dec := json.NewDecoder(cLog) + for { + l := &utils.JSONLog{} + + if err := dec.Decode(l); err == io.EOF { + break + } else if err != nil { + utils.Errorf("Error streaming logs: %s", err) + break + } + if l.Stream == "stdout" && stdout { + fmt.Fprintf(job.Stdout, "%s", l.Log) + } + if l.Stream == "stderr" && stderr { + fmt.Fprintf(job.Stderr, "%s", l.Log) + } + } + } + } + + //stream + if stream { + if container.State.IsGhost() { + return job.Errorf("Impossible to attach to a ghost container") + } + + var ( + cStdin io.ReadCloser + cStdout, cStderr io.Writer + cStdinCloser io.Closer + ) + + if stdin { + r, w := io.Pipe() + go func() { + defer w.Close() + defer utils.Debugf("Closing buffered stdin pipe") + io.Copy(w, job.Stdin) + }() + cStdin = r + cStdinCloser = job.Stdin + } + if stdout { + cStdout = job.Stdout + } + if stderr { + cStderr = job.Stderr + } + + <-container.Attach(cStdin, cStdinCloser, cStdout, cStderr) + + // If we are in stdinonce mode, wait for the process to end + // otherwise, simply return + if container.Config.StdinOnce && !container.Config.Tty { + container.Wait() + } + } + return engine.StatusOK +} + +func (srv *Server) ContainerInspect(name string) (*runtime.Container, error) { + if container := srv.runtime.Get(name); container != nil { + return container, nil + } + return nil, fmt.Errorf("No such container: %s", name) +} + +func (srv *Server) ImageInspect(name string) (*image.Image, error) { + if image, err := srv.runtime.Repositories().LookupImage(name); err == nil && image != nil { + return image, nil + } + return nil, fmt.Errorf("No such image: %s", name) +} + +func (srv *Server) JobInspect(job *engine.Job) engine.Status { + // TODO: deprecate KIND/conflict + if n := len(job.Args); n != 2 { + return job.Errorf("Usage: %s CONTAINER|IMAGE KIND", job.Name) + } + var ( + name = job.Args[0] + kind = job.Args[1] + object interface{} + conflict = job.GetenvBool("conflict") //should the job detect conflict between containers and images + image, errImage = srv.ImageInspect(name) + container, errContainer = srv.ContainerInspect(name) + ) + + if conflict && image != nil && container != nil { + return job.Errorf("Conflict between containers and images") + } + + switch kind { + case "image": + if errImage != nil { + return job.Error(errImage) + } + object = image + case "container": + if errContainer != nil { + return job.Error(errContainer) + } + object = &struct { + *runtime.Container + HostConfig *runconfig.HostConfig + }{container, container.HostConfig()} + default: + return job.Errorf("Unknown kind: %s", kind) + } + + b, err := json.Marshal(object) + if err != nil { + return job.Error(err) + } + job.Stdout.Write(b) + return engine.StatusOK +} + +func (srv *Server) ContainerCopy(job *engine.Job) engine.Status { + if len(job.Args) != 2 { + return job.Errorf("Usage: %s CONTAINER RESOURCE\n", job.Name) + } + + var ( + name = job.Args[0] + resource = job.Args[1] + ) + + if container := srv.runtime.Get(name); container != nil { + + data, err := container.Copy(resource) + if err != nil { + return job.Error(err) + } + defer data.Close() + + if _, err := io.Copy(job.Stdout, data); err != nil { + return job.Error(err) + } + return engine.StatusOK + } + return job.Errorf("No such container: %s", name) +} + +func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) { + runtime, err := runtime.NewRuntime(config, eng) + if err != nil { + return nil, err + } + srv := &Server{ + Eng: eng, + runtime: runtime, + pullingPool: make(map[string]chan struct{}), + pushingPool: make(map[string]chan struct{}), + events: make([]utils.JSONMessage, 0, 64), //only keeps the 64 last events + listeners: make(map[string]chan utils.JSONMessage), + running: true, + } + runtime.SetServer(srv) + return srv, nil +} + +func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory { + httpVersion := make([]utils.VersionInfo, 0, 4) + httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION}) + httpVersion = append(httpVersion, &simpleVersionInfo{"go", goruntime.Version()}) + httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT}) + if kernelVersion, err := utils.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()}) + } + httpVersion = append(httpVersion, &simpleVersionInfo{"os", goruntime.GOOS}) + httpVersion = append(httpVersion, &simpleVersionInfo{"arch", goruntime.GOARCH}) + ud := utils.NewHTTPUserAgentDecorator(httpVersion...) + md := &utils.HTTPMetaHeadersDecorator{ + Headers: metaHeaders, + } + factory := utils.NewHTTPRequestFactory(ud, md) + return factory +} + +func (srv *Server) LogEvent(action, id, from string) *utils.JSONMessage { + now := time.Now().UTC().Unix() + jm := utils.JSONMessage{Status: action, ID: id, From: from, Time: now} + srv.AddEvent(jm) + for _, c := range srv.listeners { + select { // non blocking channel + case c <- jm: + default: + } + } + return &jm +} + +func (srv *Server) AddEvent(jm utils.JSONMessage) { + srv.Lock() + defer srv.Unlock() + srv.events = append(srv.events, jm) +} + +func (srv *Server) GetEvents() []utils.JSONMessage { + srv.RLock() + defer srv.RUnlock() + return srv.events +} + +func (srv *Server) SetRunning(status bool) { + srv.Lock() + defer srv.Unlock() + + srv.running = status +} + +func (srv *Server) IsRunning() bool { + srv.RLock() + defer srv.RUnlock() + return srv.running +} + +func (srv *Server) Close() error { + if srv == nil { + return nil + } + srv.SetRunning(false) + if srv.runtime == nil { + return nil + } + return srv.runtime.Close() +} + +type Server struct { + sync.RWMutex + runtime *runtime.Runtime + pullingPool map[string]chan struct{} + pushingPool map[string]chan struct{} + events []utils.JSONMessage + listeners map[string]chan utils.JSONMessage + Eng *engine.Engine + running bool +} diff --git a/server/server_unit_test.go b/server/server_unit_test.go new file mode 100644 index 0000000000..b471c5c581 --- /dev/null +++ b/server/server_unit_test.go @@ -0,0 +1,99 @@ +package server + +import ( + "github.com/dotcloud/docker/utils" + "testing" + "time" +) + +func TestPools(t *testing.T) { + srv := &Server{ + pullingPool: make(map[string]chan struct{}), + pushingPool: make(map[string]chan struct{}), + } + + if _, err := srv.poolAdd("pull", "test1"); err != nil { + t.Fatal(err) + } + if _, err := srv.poolAdd("pull", "test2"); err != nil { + t.Fatal(err) + } + if _, err := srv.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + if _, err := srv.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { + t.Fatalf("Expected `pull test1 is already in progress`") + } + if _, err := srv.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } + if err := srv.poolRemove("pull", "test2"); err != nil { + t.Fatal(err) + } + if err := srv.poolRemove("pull", "test2"); err != nil { + t.Fatal(err) + } + if err := srv.poolRemove("pull", "test1"); err != nil { + t.Fatal(err) + } + if err := srv.poolRemove("push", "test1"); err != nil { + t.Fatal(err) + } + if err := srv.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { + t.Fatalf("Expected `Unknown pool type`") + } +} + +func TestLogEvent(t *testing.T) { + srv := &Server{ + events: make([]utils.JSONMessage, 0, 64), + listeners: make(map[string]chan utils.JSONMessage), + } + + srv.LogEvent("fakeaction", "fakeid", "fakeimage") + + listener := make(chan utils.JSONMessage) + srv.Lock() + srv.listeners["test"] = listener + srv.Unlock() + + srv.LogEvent("fakeaction2", "fakeid", "fakeimage") + + numEvents := len(srv.GetEvents()) + if numEvents != 2 { + t.Fatalf("Expected 2 events, found %d", numEvents) + } + go func() { + time.Sleep(200 * time.Millisecond) + srv.LogEvent("fakeaction3", "fakeid", "fakeimage") + time.Sleep(200 * time.Millisecond) + srv.LogEvent("fakeaction4", "fakeid", "fakeimage") + }() + + setTimeout(t, "Listening for events timed out", 2*time.Second, func() { + for i := 2; i < 4; i++ { + event := <-listener + if event != srv.GetEvents()[i] { + t.Fatalf("Event received it different than expected") + } + } + }) +} + +// FIXME: this is duplicated from integration/commands_test.go +func setTimeout(t *testing.T, msg string, d time.Duration, f func()) { + c := make(chan bool) + + // Make sure we are not too long + go func() { + time.Sleep(d) + c <- true + }() + go func() { + f() + c <- false + }() + if <-c && msg != "" { + t.Fatal(msg) + } +} diff --git a/server_unit_test.go b/server_unit_test.go deleted file mode 100644 index 6a90ca5892..0000000000 --- a/server_unit_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package docker - -import ( - "github.com/dotcloud/docker/utils" - "testing" - "time" -) - -func TestPools(t *testing.T) { - srv := &Server{ - pullingPool: make(map[string]chan struct{}), - pushingPool: make(map[string]chan struct{}), - } - - if _, err := srv.poolAdd("pull", "test1"); err != nil { - t.Fatal(err) - } - if _, err := srv.poolAdd("pull", "test2"); err != nil { - t.Fatal(err) - } - if _, err := srv.poolAdd("push", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { - t.Fatalf("Expected `pull test1 is already in progress`") - } - if _, err := srv.poolAdd("pull", "test1"); err == nil || err.Error() != "pull test1 is already in progress" { - t.Fatalf("Expected `pull test1 is already in progress`") - } - if _, err := srv.poolAdd("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { - t.Fatalf("Expected `Unknown pool type`") - } - if err := srv.poolRemove("pull", "test2"); err != nil { - t.Fatal(err) - } - if err := srv.poolRemove("pull", "test2"); err != nil { - t.Fatal(err) - } - if err := srv.poolRemove("pull", "test1"); err != nil { - t.Fatal(err) - } - if err := srv.poolRemove("push", "test1"); err != nil { - t.Fatal(err) - } - if err := srv.poolRemove("wait", "test3"); err == nil || err.Error() != "Unknown pool type" { - t.Fatalf("Expected `Unknown pool type`") - } -} - -func TestLogEvent(t *testing.T) { - srv := &Server{ - events: make([]utils.JSONMessage, 0, 64), - listeners: make(map[string]chan utils.JSONMessage), - } - - srv.LogEvent("fakeaction", "fakeid", "fakeimage") - - listener := make(chan utils.JSONMessage) - srv.Lock() - srv.listeners["test"] = listener - srv.Unlock() - - srv.LogEvent("fakeaction2", "fakeid", "fakeimage") - - numEvents := len(srv.GetEvents()) - if numEvents != 2 { - t.Fatalf("Expected 2 events, found %d", numEvents) - } - go func() { - time.Sleep(200 * time.Millisecond) - srv.LogEvent("fakeaction3", "fakeid", "fakeimage") - time.Sleep(200 * time.Millisecond) - srv.LogEvent("fakeaction4", "fakeid", "fakeimage") - }() - - setTimeout(t, "Listening for events timed out", 2*time.Second, func() { - for i := 2; i < 4; i++ { - event := <-listener - if event != srv.GetEvents()[i] { - t.Fatalf("Event received it different than expected") - } - } - }) -} - -// FIXME: this is duplicated from integration/commands_test.go -func setTimeout(t *testing.T, msg string, d time.Duration, f func()) { - c := make(chan bool) - - // Make sure we are not too long - go func() { - time.Sleep(d) - c <- true - }() - go func() { - f() - c <- false - }() - if <-c && msg != "" { - t.Fatal(msg) - } -} diff --git a/utils/streamformatter.go b/utils/streamformatter.go index 8876fa5cb7..d2758d3ca6 100644 --- a/utils/streamformatter.go +++ b/utils/streamformatter.go @@ -3,6 +3,7 @@ package utils import ( "encoding/json" "fmt" + "io" ) type StreamFormatter struct { @@ -90,3 +91,31 @@ func (sf *StreamFormatter) Used() bool { func (sf *StreamFormatter) Json() bool { return sf.json } + +type StdoutFormater struct { + io.Writer + *StreamFormatter +} + +func (sf *StdoutFormater) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +type StderrFormater struct { + io.Writer + *StreamFormatter +} + +func (sf *StderrFormater) Write(buf []byte) (int, error) { + formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") + n, err := sf.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} -- cgit v1.2.1 From 8cf0b80a7843633018b66a35d9a55f30814a56b6 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 11 Mar 2014 10:44:23 -0700 Subject: Update integration tests for server pkg Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- integration/buildfile_test.go | 12 ++++++------ integration/server_test.go | 4 ++-- integration/utils_test.go | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go index f4ed61aaff..7f6e69ece3 100644 --- a/integration/buildfile_test.go +++ b/integration/buildfile_test.go @@ -2,11 +2,11 @@ package docker import ( "fmt" - "github.com/dotcloud/docker" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/server" "github.com/dotcloud/docker/utils" "io/ioutil" "net" @@ -384,7 +384,7 @@ func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, u } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) + buildfile := server.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) id, err := buildfile.Build(context.Archive(dockerfile, t)) if err != nil { return nil, err @@ -799,7 +799,7 @@ func TestForbiddenContextPath(t *testing.T) { } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) + buildfile := server.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) _, err = buildfile.Build(context.Archive(dockerfile, t)) if err == nil { @@ -845,7 +845,7 @@ func TestBuildADDFileNotFound(t *testing.T) { } dockerfile := constructDockerfile(context.dockerfile, ip, port) - buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) + buildfile := server.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil) _, err = buildfile.Build(context.Archive(dockerfile, t)) if err == nil { @@ -917,8 +917,8 @@ func TestBuildFails(t *testing.T) { func TestBuildFailsDockerfileEmpty(t *testing.T) { _, err := buildImage(testContextTemplate{``, nil, nil}, t, nil, true) - if err != docker.ErrDockerfileEmpty { - t.Fatal("Expected: %v, got: %v", docker.ErrDockerfileEmpty, err) + if err != server.ErrDockerfileEmpty { + t.Fatal("Expected: %v, got: %v", server.ErrDockerfileEmpty, err) } } diff --git a/integration/server_test.go b/integration/server_test.go index 54ee9a77a9..49bd15e36f 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -1,9 +1,9 @@ package docker import ( - "github.com/dotcloud/docker" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/server" "strings" "testing" "time" @@ -739,7 +739,7 @@ func TestListContainers(t *testing.T) { } } -func assertContainerList(srv *docker.Server, all bool, limit int, since, before string, expected []string) bool { +func assertContainerList(srv *server.Server, all bool, limit int, since, before string, expected []string) bool { job := srv.Eng.Job("containers") job.SetenvBool("all", all) job.SetenvInt("limit", limit) diff --git a/integration/utils_test.go b/integration/utils_test.go index 53b4674df7..8ad6ccb123 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -14,11 +14,11 @@ import ( "testing" "time" - "github.com/dotcloud/docker" "github.com/dotcloud/docker/builtins" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/runtime" + "github.com/dotcloud/docker/server" "github.com/dotcloud/docker/utils" ) @@ -149,14 +149,14 @@ func getContainer(eng *engine.Engine, id string, t utils.Fataler) *runtime.Conta return c } -func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Server { +func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *server.Server { iSrv := eng.Hack_GetGlobalVar("httpapi.server") if iSrv == nil { panic("Legacy server field not set in engine") } - srv, ok := iSrv.(*docker.Server) + srv, ok := iSrv.(*server.Server) if !ok { - panic("Legacy server field in engine does not cast to *docker.Server") + panic("Legacy server field in engine does not cast to *server.Server") } return srv } -- cgit v1.2.1 From 7294392c729de4c5884eb967f192b34a1d8857a7 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 13 Mar 2014 10:35:16 -0700 Subject: Add initial logging to libcontainer Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- execdriver/native/driver.go | 9 ++++--- pkg/libcontainer/nsinit/command.go | 3 ++- pkg/libcontainer/nsinit/exec.go | 16 ++++++++++-- pkg/libcontainer/nsinit/execin.go | 3 +++ pkg/libcontainer/nsinit/init.go | 11 ++++++-- pkg/libcontainer/nsinit/nsinit.go | 5 +++- pkg/libcontainer/nsinit/nsinit/main.go | 48 +++++++++++++++++++++++++--------- 7 files changed, 74 insertions(+), 21 deletions(-) diff --git a/execdriver/native/driver.go b/execdriver/native/driver.go index f6c7242620..989f2ff376 100644 --- a/execdriver/native/driver.go +++ b/execdriver/native/driver.go @@ -10,6 +10,7 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer/nsinit" "github.com/dotcloud/docker/pkg/system" "io/ioutil" + "log" "os" "os/exec" "path/filepath" @@ -27,7 +28,8 @@ func init() { execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { var ( container *libcontainer.Container - ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}) + logger = log.New(ioutil.Discard, "[nsinit] ", log.LstdFlags) + ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}, logger) ) f, err := os.Open(filepath.Join(args.Root, "container.json")) if err != nil { @@ -85,8 +87,9 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba c: c, dsw: &nsinit.DefaultStateWriter{filepath.Join(d.root, c.ID)}, } - ns = nsinit.NewNsInit(factory, stateWriter) - args = append([]string{c.Entrypoint}, c.Arguments...) + logger = log.New(ioutil.Discard, "[nsinit] ", log.LstdFlags) + ns = nsinit.NewNsInit(factory, stateWriter, logger) + args = append([]string{c.Entrypoint}, c.Arguments...) ) if err := d.createContainerRoot(c.ID); err != nil { return -1, err diff --git a/pkg/libcontainer/nsinit/command.go b/pkg/libcontainer/nsinit/command.go index 5546065b6d..1d7c591ee5 100644 --- a/pkg/libcontainer/nsinit/command.go +++ b/pkg/libcontainer/nsinit/command.go @@ -1,6 +1,7 @@ package nsinit import ( + "fmt" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/system" "os" @@ -25,7 +26,7 @@ func (c *DefaultCommandFactory) Create(container *libcontainer.Container, consol // get our binary name from arg0 so we can always reexec ourself command := exec.Command(os.Args[0], append([]string{ "-console", console, - "-pipe", "3", + "-pipe", fmt.Sprint(pipe.Fd()), "-root", c.Root, "init"}, args...)...) diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 4963f126e9..074492ae31 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -28,6 +28,7 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ } if container.Tty { + ns.logger.Println("creating master and console") master, console, err = system.CreateMasterAndConsole() if err != nil { return -1, err @@ -36,31 +37,40 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ } command := ns.commandFactory.Create(container, console, syncPipe.child, args) + ns.logger.Println("attach terminal to command") if err := term.Attach(command); err != nil { return -1, err } defer term.Close() + ns.logger.Println("starting command") if err := command.Start(); err != nil { return -1, err } + ns.logger.Printf("writting pid %d to file\n", command.Process.Pid) if err := ns.stateWriter.WritePid(command.Process.Pid); err != nil { command.Process.Kill() return -1, err } - defer ns.stateWriter.DeletePid() + defer func() { + ns.logger.Println("removing pid file") + ns.stateWriter.DeletePid() + }() // Do this before syncing with child so that no children // can escape the cgroup + ns.logger.Println("setting cgroups") if err := ns.SetupCgroups(container, command.Process.Pid); err != nil { command.Process.Kill() return -1, err } + ns.logger.Println("setting up network") if err := ns.InitializeNetworking(container, command.Process.Pid, syncPipe); err != nil { command.Process.Kill() return -1, err } + ns.logger.Println("closing sync pipe with child") // Sync with child syncPipe.Close() @@ -69,7 +79,9 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ return -1, err } } - return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil + status := command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + ns.logger.Printf("process exited with status %d\n", status) + return status, err } func (ns *linuxNs) SetupCgroups(container *libcontainer.Container, nspid int) error { diff --git a/pkg/libcontainer/nsinit/execin.go b/pkg/libcontainer/nsinit/execin.go index 488fe0e248..39df4761a0 100644 --- a/pkg/libcontainer/nsinit/execin.go +++ b/pkg/libcontainer/nsinit/execin.go @@ -14,6 +14,7 @@ import ( // ExecIn uses an existing pid and joins the pid's namespaces with the new command. func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) { + ns.logger.Println("unshare namespaces") for _, ns := range container.Namespaces { if err := system.Unshare(ns.Value); err != nil { return -1, err @@ -33,6 +34,7 @@ func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []s // foreach namespace fd, use setns to join an existing container's namespaces for _, fd := range fds { if fd > 0 { + ns.logger.Printf("setns on %d\n", fd) if err := system.Setns(fd, 0); err != nil { closeFds() return -1, fmt.Errorf("setns %s", err) @@ -44,6 +46,7 @@ func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []s // if the container has a new pid and mount namespace we need to // remount proc and sys to pick up the changes if container.Namespaces.Contains("NEWNS") && container.Namespaces.Contains("NEWPID") { + ns.logger.Println("forking to remount /proc and /sys") pid, err := system.Fork() if err != nil { return -1, err diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 5d47b95057..6b05905133 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -29,9 +29,11 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol syncPipe.Close() return err } + ns.logger.Println("received context from parent") syncPipe.Close() if console != "" { + ns.logger.Printf("setting up %s as console\n", console) slave, err := system.OpenTerminal(console, syscall.O_RDWR) if err != nil { return fmt.Errorf("open terminal %s", err) @@ -51,6 +53,7 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol if err := system.ParentDeathSignal(); err != nil { return fmt.Errorf("parent death signal %s", err) } + ns.logger.Println("setup mount namespace") if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot); err != nil { return fmt.Errorf("setup mount namespace %s", err) } @@ -64,9 +67,13 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol return fmt.Errorf("finalize namespace %s", err) } - if err := apparmor.ApplyProfile(os.Getpid(), container.Context["apparmor_profile"]); err != nil { - return err + if profile := container.Context["apparmor_profile"]; profile != "" { + ns.logger.Printf("setting apparmor prifile %s\n", profile) + if err := apparmor.ApplyProfile(os.Getpid(), profile); err != nil { + return err + } } + ns.logger.Printf("execing %s\n", args[0]) return system.Execv(args[0], args[0:], container.Env) } diff --git a/pkg/libcontainer/nsinit/nsinit.go b/pkg/libcontainer/nsinit/nsinit.go index f09a130aa2..c308692af6 100644 --- a/pkg/libcontainer/nsinit/nsinit.go +++ b/pkg/libcontainer/nsinit/nsinit.go @@ -2,6 +2,7 @@ package nsinit import ( "github.com/dotcloud/docker/pkg/libcontainer" + "log" ) // NsInit is an interface with the public facing methods to provide high level @@ -16,11 +17,13 @@ type linuxNs struct { root string commandFactory CommandFactory stateWriter StateWriter + logger *log.Logger } -func NewNsInit(command CommandFactory, state StateWriter) NsInit { +func NewNsInit(command CommandFactory, state StateWriter, logger *log.Logger) NsInit { return &linuxNs{ commandFactory: command, stateWriter: state, + logger: logger, } } diff --git a/pkg/libcontainer/nsinit/nsinit/main.go b/pkg/libcontainer/nsinit/nsinit/main.go index 916be6624e..df32d0b49e 100644 --- a/pkg/libcontainer/nsinit/nsinit/main.go +++ b/pkg/libcontainer/nsinit/nsinit/main.go @@ -5,6 +5,7 @@ import ( "flag" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/nsinit" + "io" "io/ioutil" "log" "os" @@ -13,14 +14,15 @@ import ( ) var ( - root, console string - pipeFd int + root, console, logs string + pipeFd int ) func registerFlags() { flag.StringVar(&console, "console", "", "console (pty slave) path") flag.IntVar(&pipeFd, "pipe", 0, "sync pipe fd") flag.StringVar(&root, "root", ".", "root for storing configuration data") + flag.StringVar(&logs, "log", "none", "set stderr or a filepath to enable logging") flag.Parse() } @@ -35,7 +37,12 @@ func main() { if err != nil { log.Fatalf("Unable to load container: %s", err) } - ns, err := newNsInit() + l, err := getLogger("[exec] ") + if err != nil { + log.Fatal(err) + } + + ns, err := newNsInit(l) if err != nil { log.Fatalf("Unable to initialize nsinit: %s", err) } @@ -46,7 +53,7 @@ func main() { nspid, err := readPid() if err != nil { if !os.IsNotExist(err) { - log.Fatalf("Unable to read pid: %s", err) + l.Fatalf("Unable to read pid: %s", err) } } if nspid > 0 { @@ -56,26 +63,26 @@ func main() { exitCode, err = ns.Exec(container, term, flag.Args()[1:]) } if err != nil { - log.Fatalf("Failed to exec: %s", err) + l.Fatalf("Failed to exec: %s", err) } os.Exit(exitCode) case "init": // this is executed inside of the namespace to setup the container cwd, err := os.Getwd() if err != nil { - log.Fatal(err) + l.Fatal(err) } if flag.NArg() < 2 { - log.Fatalf("wrong number of argments %d", flag.NArg()) + l.Fatalf("wrong number of argments %d", flag.NArg()) } syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(pipeFd)) if err != nil { - log.Fatalf("Unable to create sync pipe: %s", err) + l.Fatalf("Unable to create sync pipe: %s", err) } if err := ns.Init(container, cwd, console, syncPipe, flag.Args()[1:]); err != nil { - log.Fatalf("Unable to initialize for container: %s", err) + l.Fatalf("Unable to initialize for container: %s", err) } default: - log.Fatalf("command not supported for nsinit %s", flag.Arg(0)) + l.Fatalf("command not supported for nsinit %s", flag.Arg(0)) } } @@ -105,6 +112,23 @@ func readPid() (int, error) { return pid, nil } -func newNsInit() (nsinit.NsInit, error) { - return nsinit.NewNsInit(&nsinit.DefaultCommandFactory{root}, &nsinit.DefaultStateWriter{root}), nil +func newNsInit(l *log.Logger) (nsinit.NsInit, error) { + return nsinit.NewNsInit(&nsinit.DefaultCommandFactory{root}, &nsinit.DefaultStateWriter{root}, l), nil +} + +func getLogger(prefix string) (*log.Logger, error) { + var w io.Writer + switch logs { + case "", "none": + w = ioutil.Discard + case "stderr": + w = os.Stderr + default: // we have a filepath + f, err := os.OpenFile(logs, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755) + if err != nil { + return nil, err + } + w = f + } + return log.New(w, prefix, log.LstdFlags), nil } -- cgit v1.2.1 From 0e863a584a6edfa1c3ec383c586b646663b66bc7 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 13 Mar 2014 10:43:15 -0700 Subject: Add stderr log ouput if in debug Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- execdriver/native/driver.go | 20 +++++++++++++++----- pkg/libcontainer/nsinit/command.go | 3 +-- pkg/libcontainer/nsinit/exec.go | 1 + pkg/libcontainer/nsinit/init.go | 3 ++- 4 files changed, 19 insertions(+), 8 deletions(-) diff --git a/execdriver/native/driver.go b/execdriver/native/driver.go index 989f2ff376..9b49fd156f 100644 --- a/execdriver/native/driver.go +++ b/execdriver/native/driver.go @@ -9,6 +9,7 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer/apparmor" "github.com/dotcloud/docker/pkg/libcontainer/nsinit" "github.com/dotcloud/docker/pkg/system" + "io" "io/ioutil" "log" "os" @@ -28,8 +29,7 @@ func init() { execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { var ( container *libcontainer.Container - logger = log.New(ioutil.Discard, "[nsinit] ", log.LstdFlags) - ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}, logger) + ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}, createLogger("")) ) f, err := os.Open(filepath.Join(args.Root, "container.json")) if err != nil { @@ -87,9 +87,8 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba c: c, dsw: &nsinit.DefaultStateWriter{filepath.Join(d.root, c.ID)}, } - logger = log.New(ioutil.Discard, "[nsinit] ", log.LstdFlags) - ns = nsinit.NewNsInit(factory, stateWriter, logger) - args = append([]string{c.Entrypoint}, c.Arguments...) + ns = nsinit.NewNsInit(factory, stateWriter, createLogger(os.Getenv("DEBUG"))) + args = append([]string{c.Entrypoint}, c.Arguments...) ) if err := d.createContainerRoot(c.ID); err != nil { return -1, err @@ -254,3 +253,14 @@ func (d *dockerStateWriter) WritePid(pid int) error { func (d *dockerStateWriter) DeletePid() error { return d.dsw.DeletePid() } + +func createLogger(debug string) *log.Logger { + var w io.Writer + // if we are in debug mode set the logger to stderr + if debug != "" { + w = os.Stderr + } else { + w = ioutil.Discard + } + return log.New(w, "[libcontainer] ", log.LstdFlags) +} diff --git a/pkg/libcontainer/nsinit/command.go b/pkg/libcontainer/nsinit/command.go index 1d7c591ee5..5546065b6d 100644 --- a/pkg/libcontainer/nsinit/command.go +++ b/pkg/libcontainer/nsinit/command.go @@ -1,7 +1,6 @@ package nsinit import ( - "fmt" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/system" "os" @@ -26,7 +25,7 @@ func (c *DefaultCommandFactory) Create(container *libcontainer.Container, consol // get our binary name from arg0 so we can always reexec ourself command := exec.Command(os.Args[0], append([]string{ "-console", console, - "-pipe", fmt.Sprint(pipe.Fd()), + "-pipe", "3", "-root", c.Root, "init"}, args...)...) diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 074492ae31..61286cc13c 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -26,6 +26,7 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ if err != nil { return -1, err } + ns.logger.Printf("created sync pipe parent fd %d child fd %d\n", syncPipe.parent.Fd(), syncPipe.child.Fd()) if container.Tty { ns.logger.Println("creating master and console") diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 6b05905133..e165de3a8f 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -24,6 +24,7 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol } // We always read this as it is a way to sync with the parent as well + ns.logger.Printf("reading from sync pipe fd %d\n", syncPipe.child.Fd()) context, err := syncPipe.ReadFromParent() if err != nil { syncPipe.Close() @@ -68,7 +69,7 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol } if profile := container.Context["apparmor_profile"]; profile != "" { - ns.logger.Printf("setting apparmor prifile %s\n", profile) + ns.logger.Printf("setting apparmor profile %s\n", profile) if err := apparmor.ApplyProfile(os.Getpid(), profile); err != nil { return err } -- cgit v1.2.1 From a41f6d936754f66d1786fa5b840278443da8d93c Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 14 Mar 2014 17:35:41 +0000 Subject: update godoc and add MAINTAINERS for mflags Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- pkg/mflag/MAINTAINERS | 1 + pkg/mflag/flag.go | 14 +++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 pkg/mflag/MAINTAINERS diff --git a/pkg/mflag/MAINTAINERS b/pkg/mflag/MAINTAINERS new file mode 100644 index 0000000000..ceeb0cfd18 --- /dev/null +++ b/pkg/mflag/MAINTAINERS @@ -0,0 +1 @@ +Victor Vieux (@vieux) diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go index fc732d23a0..ed6fad3b46 100644 --- a/pkg/mflag/flag.go +++ b/pkg/mflag/flag.go @@ -10,7 +10,7 @@ Define flags using flag.String(), Bool(), Int(), etc. This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. - import "flag" + import "flag /github.com/dotcloud/docker/pkg/mflag" var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") If you like, you can bind the flag to a variable using the Var() functions. var flagvar int @@ -23,6 +23,18 @@ flag.Var(&flagVal, []string{"name"}, "help message for flagname") For such flags, the default value is just the initial value of the variable. + You can also add "deprecated" flags, they are still usable, bur are not shown + in the usage and will display a warning when you try to use them: + var ip = flag.Int([]string{"f", "#flagname", "-flagname"}, 1234, "help message for flagname") + this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` and + var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") + will display: `Warning: '-t' is deprecated, it will be removed soon. See usage.` + + You can also group one letter flags, bif you declare + var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") + var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") + you will be able to use the -vs or -sv + After all flags are defined, call flag.Parse() to parse the command line into the defined flags. -- cgit v1.2.1 From 123ebf905367f1da0d9480153d08912d58b721fc Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 14 Mar 2014 18:16:14 +0000 Subject: update TestCreateRmRunning Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- integration/server_test.go | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/integration/server_test.go b/integration/server_test.go index 54ee9a77a9..d5abff264e 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -210,8 +210,15 @@ func TestCreateRmRunning(t *testing.T) { id := createTestContainer(eng, config, t) - job := eng.Job("containers") - job.SetenvBool("all", true) + job := eng.Job("start", id) + if err := job.ImportEnv(hostConfig); err != nil { + t.Fatal(err) + } + if err := job.Run(); err != nil { + t.Fatal(err) + } + + job = eng.Job("containers") outs, err := job.Stdout.AddListTable() if err != nil { t.Fatal(err) @@ -224,19 +231,24 @@ func TestCreateRmRunning(t *testing.T) { t.Errorf("Expected 1 container, %v found", len(outs.Data)) } - job = eng.Job("start", id) - if err := job.ImportEnv(hostConfig); err != nil { + // Test cannot remove running container + job = eng.Job("container_delete", id) + job.SetenvBool("forceRemove", false) + if err := job.Run(); err == nil { + t.Fatal("Expected container delete to fail") + } + + job = eng.Job("containers") + outs, err = job.Stdout.AddListTable() + if err != nil { t.Fatal(err) } if err := job.Run(); err != nil { t.Fatal(err) } - // Test cannot remove running container - job = eng.Job("container_delete", id) - job.SetenvBool("forceRemove", false) - if err := job.Run(); err == nil { - t.Fatal("Expected container delete to fail") + if len(outs.Data) != 1 { + t.Errorf("Expected 1 container, %v found", len(outs.Data)) } // Test can force removal of running container -- cgit v1.2.1 From 50b12708e35c300d93e8f8e8d21f79101fc185d3 Mon Sep 17 00:00:00 2001 From: Andy Rothfusz Date: Fri, 14 Mar 2014 14:37:09 -0700 Subject: Updating base svg to remove references to aufs and lxc. Exported new png's from layers. Docker-DCO-1.1-Signed-off-by: Andy Rothfusz (github: metalivedev) --- .../terms/images/docker-filesystems-busyboxrw.png | Bin 121141 -> 113106 bytes .../terms/images/docker-filesystems-debian.png | Bin 77822 -> 64585 bytes .../terms/images/docker-filesystems-debianrw.png | Bin 94218 -> 80992 bytes .../terms/images/docker-filesystems-generic.png | Bin 78384 -> 67894 bytes .../terms/images/docker-filesystems-multilayer.png | Bin 127744 -> 104199 bytes .../terms/images/docker-filesystems-multiroot.png | Bin 72247 -> 63920 bytes docs/sources/terms/images/docker-filesystems.svg | 73 ++------------------- 7 files changed, 6 insertions(+), 67 deletions(-) diff --git a/docs/sources/terms/images/docker-filesystems-busyboxrw.png b/docs/sources/terms/images/docker-filesystems-busyboxrw.png index ad41c940e4..9ff8487b89 100644 Binary files a/docs/sources/terms/images/docker-filesystems-busyboxrw.png and b/docs/sources/terms/images/docker-filesystems-busyboxrw.png differ diff --git a/docs/sources/terms/images/docker-filesystems-debian.png b/docs/sources/terms/images/docker-filesystems-debian.png index 823a215d3e..61e5ddb2e3 100644 Binary files a/docs/sources/terms/images/docker-filesystems-debian.png and b/docs/sources/terms/images/docker-filesystems-debian.png differ diff --git a/docs/sources/terms/images/docker-filesystems-debianrw.png b/docs/sources/terms/images/docker-filesystems-debianrw.png index 97c69a9944..cacba4947b 100644 Binary files a/docs/sources/terms/images/docker-filesystems-debianrw.png and b/docs/sources/terms/images/docker-filesystems-debianrw.png differ diff --git a/docs/sources/terms/images/docker-filesystems-generic.png b/docs/sources/terms/images/docker-filesystems-generic.png index fb734b75c6..ae54b72e88 100644 Binary files a/docs/sources/terms/images/docker-filesystems-generic.png and b/docs/sources/terms/images/docker-filesystems-generic.png differ diff --git a/docs/sources/terms/images/docker-filesystems-multilayer.png b/docs/sources/terms/images/docker-filesystems-multilayer.png index 0b3ae19c2c..a4260004e7 100644 Binary files a/docs/sources/terms/images/docker-filesystems-multilayer.png and b/docs/sources/terms/images/docker-filesystems-multilayer.png differ diff --git a/docs/sources/terms/images/docker-filesystems-multiroot.png b/docs/sources/terms/images/docker-filesystems-multiroot.png index 5e864273f3..65b61d94f1 100644 Binary files a/docs/sources/terms/images/docker-filesystems-multiroot.png and b/docs/sources/terms/images/docker-filesystems-multiroot.png differ diff --git a/docs/sources/terms/images/docker-filesystems.svg b/docs/sources/terms/images/docker-filesystems.svg index d41aff2522..dc4dc8e687 100644 --- a/docs/sources/terms/images/docker-filesystems.svg +++ b/docs/sources/terms/images/docker-filesystems.svg @@ -11,7 +11,7 @@ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" inkscape:export-ydpi="90" inkscape:export-xdpi="90" - inkscape:export-filename="/Users/arothfusz/src/metalivedev/docker/docs/sources/terms/images/docker-filesystems-multiroot.png" + inkscape:export-filename="/Users/arothfusz/src/metalivedev/dockerclone/docs/sources/terms/images/docker-filesystems-busyboxrw.png" sodipodi:docname="docker-filesystems.svg" width="800" height="600" @@ -26,7 +26,7 @@ inkscape:pageopacity="0.0" inkscape:pageshadow="2" inkscape:zoom="0.82666667" - inkscape:cx="236.08871" + inkscape:cx="346.87978" inkscape:cy="300" inkscape:document-units="px" inkscape:current-layer="layer2" @@ -149,7 +149,7 @@ image/svg+xml - + @@ -294,67 +294,6 @@ d="m 514.91047,422.62215 c 0,0 -1.06434,42.27288 -1.06434,42.27288 0,0 4.45362,-2.8241 4.45362,-2.8241 0.2761,-0.17507 0.46813,-0.15759 0.57629,0.0523 0.10868,0.18619 0.15712,0.50328 0.14534,0.95133 -0.0112,0.42443 -0.0782,0.81493 -0.20113,1.17164 -0.12299,0.35687 -0.32235,0.62363 -0.59831,0.80035 0,0 -10.15763,6.50487 -10.15763,6.50487 -0.27917,0.17878 -0.476,0.16246 -0.5903,-0.0494 -0.11437,-0.21191 -0.16642,-0.53506 -0.15609,-0.96944 0.0109,-0.45857 0.0801,-0.85922 0.20776,-1.20182 0.12814,-0.36656 0.33197,-0.63844 0.61129,-0.81556 0,0 4.56188,-2.89274 4.56188,-2.89274 0,0 0.97884,-39.26779 0.97884,-39.26779 0,0 -3.35907,1.85407 -3.35907,1.85407 -0.27977,0.15447 -0.48159,0.1208 -0.60529,-0.10124 -0.11445,-0.22726 -0.16609,-0.57399 -0.15489,-1.04015 0.0106,-0.44163 0.0802,-0.843 0.20889,-1.204 0.12859,-0.36073 0.33761,-0.62003 0.62686,-0.77784 0,0 4.51628,-2.46343 4.51628,-2.46343" inkscape:connector-curvature="0" /> - - - - - - - - - - - - - - - - + + + referencesparentimage + -- cgit v1.2.1 From 39037a91f85a4a072e5aa7e585d8c2f6b211df8a Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 14 Mar 2014 11:42:01 -0700 Subject: Send sigterm to child instead of sigkill Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/init.go | 4 +++- pkg/system/calls_linux.go | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 5d47b95057..c702c79018 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -48,7 +48,9 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol return fmt.Errorf("setctty %s", err) } } - if err := system.ParentDeathSignal(); err != nil { + // this is our best effort to let the process know that the parent has died and that it + // should it should act on it how it sees fit + if err := system.ParentDeathSignal(uintptr(syscall.SIGTERM)); err != nil { return fmt.Errorf("parent death signal %s", err) } if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot); err != nil { diff --git a/pkg/system/calls_linux.go b/pkg/system/calls_linux.go index bf667c535b..43c00ed554 100644 --- a/pkg/system/calls_linux.go +++ b/pkg/system/calls_linux.go @@ -115,8 +115,8 @@ func Mknod(path string, mode uint32, dev int) error { return syscall.Mknod(path, mode, dev) } -func ParentDeathSignal() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, uintptr(syscall.SIGKILL), 0); err != 0 { +func ParentDeathSignal(sig uintptr) error { + if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 { return err } return nil -- cgit v1.2.1 From 2ba0861ad359477ad81346a81f1bac09cb5e2eb2 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Fri, 14 Mar 2014 17:20:22 -0600 Subject: Add Sam's Go "dockerclient" to the list of Client Libraries Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- docs/sources/reference/api/remote_api_client_libraries.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/reference/api/remote_api_client_libraries.rst b/docs/sources/reference/api/remote_api_client_libraries.rst index e58c7ced39..f74dd416bc 100644 --- a/docs/sources/reference/api/remote_api_client_libraries.rst +++ b/docs/sources/reference/api/remote_api_client_libraries.rst @@ -41,6 +41,8 @@ and we will add the libraries here. +----------------------+----------------+--------------------------------------------+----------+ | Go | go-dockerclient| https://github.com/fsouza/go-dockerclient | Active | +----------------------+----------------+--------------------------------------------+----------+ +| Go | dockerclient | https://github.com/samalba/dockerclient | Active | ++----------------------+----------------+--------------------------------------------+----------+ | PHP | Alvine | http://pear.alvine.io/ (alpha) | Active | +----------------------+----------------+--------------------------------------------+----------+ | PHP | Docker-PHP | http://stage1.github.io/docker-php/ | Active | -- cgit v1.2.1 From 5583774e29911bbd42181e8db2ece08761677cf3 Mon Sep 17 00:00:00 2001 From: Paul Nasrat Date: Sat, 15 Mar 2014 11:34:49 -0400 Subject: Fix spelling of benchmark test Docker-DCO-1.1-Signed-off-by: Paul Nasrat (github: pnasrat) --- integration/container_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/container_test.go b/integration/container_test.go index c32a8bcff7..010883a709 100644 --- a/integration/container_test.go +++ b/integration/container_test.go @@ -1109,7 +1109,7 @@ func TestEntrypointNoCmd(t *testing.T) { } } -func BenchmarkRunSequencial(b *testing.B) { +func BenchmarkRunSequential(b *testing.B) { runtime := mkRuntime(b) defer nuke(runtime) for i := 0; i < b.N; i++ { -- cgit v1.2.1 From 853c5e258fc9a3d8420e62aaed4817179073610a Mon Sep 17 00:00:00 2001 From: zqh Date: Sun, 16 Mar 2014 01:07:22 +0800 Subject: Update nodejs_web_app.rst the address of epel rpm has change to http://dl.fedoraproject.... --- docs/sources/examples/nodejs_web_app.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/examples/nodejs_web_app.rst b/docs/sources/examples/nodejs_web_app.rst index 68c073da7b..a9e9b1c5e3 100644 --- a/docs/sources/examples/nodejs_web_app.rst +++ b/docs/sources/examples/nodejs_web_app.rst @@ -91,7 +91,7 @@ To install the right package for CentOS, we’ll use the instructions from the .. code-block:: bash # Enable EPEL for Node.js - RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm + RUN rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm # Install Node.js and npm RUN yum install -y npm -- cgit v1.2.1 From 054b85a7b25e46935c0d91f544aac69dc3497468 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Sat, 15 Mar 2014 14:00:35 -0600 Subject: Add proper support for relative WORKDIR instructions Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- integration/buildfile_test.go | 17 +++++++++++++++++ server/buildfile.go | 9 ++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go index 7f6e69ece3..9c986d74c2 100644 --- a/integration/buildfile_test.go +++ b/integration/buildfile_test.go @@ -441,6 +441,23 @@ func TestBuildUser(t *testing.T) { } } +func TestBuildRelativeWorkdir(t *testing.T) { + img, err := buildImage(testContextTemplate{` + FROM {IMAGE} + RUN [ "$PWD" = '/' ] + WORKDIR /test1 + RUN [ "$PWD" = '/test1' ] + WORKDIR test2 + RUN [ "$PWD" = '/test1/test2' ] + `, nil, nil}, t, nil, true) + if err != nil { + t.Fatal(err) + } + if img.Config.WorkingDir != "/test1/test2" { + t.Fail() + } +} + func TestBuildEnv(t *testing.T) { img, err := buildImage(testContextTemplate{` from {IMAGE} diff --git a/server/buildfile.go b/server/buildfile.go index af6702cc1d..5d5fda4d8e 100644 --- a/server/buildfile.go +++ b/server/buildfile.go @@ -338,7 +338,14 @@ func (b *buildFile) CmdCopy(args string) error { } func (b *buildFile) CmdWorkdir(workdir string) error { - b.config.WorkingDir = workdir + if workdir[0] == '/' { + b.config.WorkingDir = workdir + } else { + if b.config.WorkingDir == "" { + b.config.WorkingDir = "/" + } + b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir) + } return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir)) } -- cgit v1.2.1 From 65051f4215e493928a211c411f775ee1cc7a763f Mon Sep 17 00:00:00 2001 From: Vladimir Rutsky Date: Sun, 16 Mar 2014 18:35:13 +0400 Subject: Fix external link on security of containers Docker-DCO-1.1-Signed-off-by: Vladimir Rutsky (github: rutsky) --- docs/sources/articles/security.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/articles/security.rst b/docs/sources/articles/security.rst index 3dc5780e85..e738e9a847 100644 --- a/docs/sources/articles/security.rst +++ b/docs/sources/articles/security.rst @@ -7,7 +7,7 @@ Docker Security =============== - *Adapted from* `Containers & Docker: How Secure are They? `_ + *Adapted from* `Containers & Docker: How Secure are They? `_ There are three major areas to consider when reviewing Docker security: @@ -261,7 +261,7 @@ with Docker, since everything is provided by the kernel anyway. For more context and especially for comparisons with VMs and other container systems, please also see the `original blog post -`_. +`_. .. _blogsecurity: http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/ -- cgit v1.2.1 From e32965dbb13973f61ba1c0496c8136cc8c9273a2 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Sun, 16 Mar 2014 16:28:13 +0000 Subject: In `docker ps`, sort by port instead of unsorted. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- api/common.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/api/common.go b/api/common.go index 10e7ddb4ae..5e5d2c5767 100644 --- a/api/common.go +++ b/api/common.go @@ -25,6 +25,8 @@ func ValidateHost(val string) (string, error) { //TODO remove, used on < 1.5 in getContainersJSON func displayablePorts(ports *engine.Table) string { result := []string{} + ports.SetKey("PublicPort") + ports.Sort() for _, port := range ports.Data { if port.Get("IP") == "" { result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type"))) -- cgit v1.2.1 From 9e69a042c50a3706c847addd68469dfe3eb698a0 Mon Sep 17 00:00:00 2001 From: Erik Hollensbe Date: Sun, 16 Mar 2014 17:48:46 +0000 Subject: Fix `docker cp` trying to untar files that do not exist. Docker-DCO-1.1-Signed-off-by: Erik Hollensbe (github: erikh) --- api/server.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/api/server.go b/api/server.go index 048c989540..2a5dacd9ea 100644 --- a/api/server.go +++ b/api/server.go @@ -894,6 +894,9 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp if copyData.Get("Resource") == "" { return fmt.Errorf("Path cannot be empty") } + + origResource := copyData.Get("Resource") + if copyData.Get("Resource")[0] == '/' { copyData.Set("Resource", copyData.Get("Resource")[1:]) } @@ -904,6 +907,8 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp utils.Errorf("%s", err.Error()) if strings.Contains(err.Error(), "No such container") { w.WriteHeader(http.StatusNotFound) + } else if strings.Contains(err.Error(), "no such file or directory") { + return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) } } return nil -- cgit v1.2.1 From 681d1d2f61b7c76da6612a5d24d41d5bd98e4df5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Stormacq?= Date: Mon, 17 Mar 2014 15:36:46 +0100 Subject: Include instruction for AMI 2014.03 Include instructions to install Docker from Amazon's Software Repository on new AMI 2014.03 (Release Candidate) Docker-DCO-1.1-Signed-off-by: Sebastien Stormacq (github: sebsto) --- docs/sources/installation/amazon.rst | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/sources/installation/amazon.rst b/docs/sources/installation/amazon.rst index b5465e25f8..b062a15e1e 100644 --- a/docs/sources/installation/amazon.rst +++ b/docs/sources/installation/amazon.rst @@ -9,6 +9,7 @@ Amazon EC2 There are several ways to install Docker on AWS EC2: +* :ref:`amazonquickstart_new` or * :ref:`amazonquickstart` or * :ref:`amazonstandard` @@ -61,6 +62,37 @@ for every Docker command. Once you've got Docker installed, you're ready to try it out -- head on over to the :doc:`../use/basics` or :doc:`../examples/index` section. +.. _amazonquickstart_new: + +Amazon QuickStart (Release Candidate - March 2014) +-------------------------------------------------- + +Amazon just published new Docker-ready AMIs (2014.03 Release Candidate). Docker packages +can now be installed from Amazon's provided Software Repository. + +1. **Choose an image:** + + * Launch the `Create Instance Wizard + `_ menu + on your AWS Console. + + * Click the ``Community AMI`` menu option on the left side + + * Search for '2014.03' and select one of the Amazon provided AMI, for example ``amzn-ami-pv-2014.03.rc-0.x86_64-ebs`` + + * For testing you can use the default (possibly free) + ``t1.micro`` instance (more info on `pricing + `_). + + * Click the ``Next: Configure Instance Details`` button at the bottom right. + +2. After a few more standard choices where defaults are probably ok, your Amazon + Linux instance should be running! + +3. SSH to your instance to install Docker : ``ssh -i ec2-user@`` + +4. Once connected to the instance, type ``sudo yum install -y docker ; sudo service docker start`` to install and start Docker + .. _amazonstandard: Standard Ubuntu Installation -- cgit v1.2.1 From 90b283c39a36f34ac97f9eb0d66da3a0a9992caa Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 17 Mar 2014 17:56:21 +0000 Subject: fix content-type detection in docker cp Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/server.go b/api/server.go index 048c989540..5c7b0b7a05 100644 --- a/api/server.go +++ b/api/server.go @@ -883,7 +883,7 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp var copyData engine.Env - if contentType := r.Header.Get("Content-Type"); contentType == "application/json" { + if contentType := r.Header.Get("Content-Type"); MatchesContentType(contentType, "application/json") { if err := copyData.Decode(r.Body); err != nil { return err } -- cgit v1.2.1 From 128381e0f0372f10f88a847087aa91a972770c4b Mon Sep 17 00:00:00 2001 From: Brandon Philips Date: Mon, 17 Mar 2014 10:16:34 -0700 Subject: refactor(libcontainer): rename to CapabilitiesMask The Capabilities field on libcontainer is actually used as a mask. Rename the field so that this is more clear. Docker-DCO-1.1-Signed-off-by: Brandon Philips (github: philips) --- execdriver/native/default_template.go | 4 ++-- pkg/libcontainer/README.md | 2 +- pkg/libcontainer/capabilities/capabilities.go | 8 ++++---- pkg/libcontainer/container.go | 26 +++++++++++++------------- pkg/libcontainer/container.json | 2 +- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/execdriver/native/default_template.go b/execdriver/native/default_template.go index 2798f3b084..5351911427 100644 --- a/execdriver/native/default_template.go +++ b/execdriver/native/default_template.go @@ -36,7 +36,7 @@ func createContainer(c *execdriver.Command) *libcontainer.Container { container.Cgroups.Name = c.ID if c.Privileged { - container.Capabilities = nil + container.CapabilitiesMask = nil container.Cgroups.DeviceAccess = true container.Context["apparmor_profile"] = "unconfined" } @@ -59,7 +59,7 @@ func createContainer(c *execdriver.Command) *libcontainer.Container { // the libcontainer configuration file func getDefaultTemplate() *libcontainer.Container { return &libcontainer.Container{ - Capabilities: libcontainer.Capabilities{ + CapabilitiesMask: libcontainer.Capabilities{ libcontainer.GetCapability("SETPCAP"), libcontainer.GetCapability("SYS_MODULE"), libcontainer.GetCapability("SYS_RAWIO"), diff --git a/pkg/libcontainer/README.md b/pkg/libcontainer/README.md index 2c85111b97..e967f6d76d 100644 --- a/pkg/libcontainer/README.md +++ b/pkg/libcontainer/README.md @@ -40,7 +40,7 @@ Sample `container.json` file: "HOSTNAME=11bb30683fb0", "TERM=xterm" ], - "capabilities" : [ + "capabilities_mask" : [ "SETPCAP", "SYS_MODULE", "SYS_RAWIO", diff --git a/pkg/libcontainer/capabilities/capabilities.go b/pkg/libcontainer/capabilities/capabilities.go index 3c6d752496..fbf73538e0 100644 --- a/pkg/libcontainer/capabilities/capabilities.go +++ b/pkg/libcontainer/capabilities/capabilities.go @@ -9,7 +9,7 @@ import ( // DropCapabilities drops capabilities for the current process based // on the container's configuration. func DropCapabilities(container *libcontainer.Container) error { - if drop := getCapabilities(container); len(drop) > 0 { + if drop := getCapabilitiesMask(container); len(drop) > 0 { c, err := capability.NewPid(os.Getpid()) if err != nil { return err @@ -23,10 +23,10 @@ func DropCapabilities(container *libcontainer.Container) error { return nil } -// getCapabilities returns the specific cap values for the libcontainer types -func getCapabilities(container *libcontainer.Container) []capability.Cap { +// getCapabilitiesMask returns the specific cap mask values for the libcontainer types +func getCapabilitiesMask(container *libcontainer.Container) []capability.Cap { drop := []capability.Cap{} - for _, c := range container.Capabilities { + for _, c := range container.CapabilitiesMask { drop = append(drop, c.Value) } return drop diff --git a/pkg/libcontainer/container.go b/pkg/libcontainer/container.go index 14b4b65db7..c7cac35428 100644 --- a/pkg/libcontainer/container.go +++ b/pkg/libcontainer/container.go @@ -11,19 +11,19 @@ type Context map[string]string // Container defines configuration options for how a // container is setup inside a directory and how a process should be executed type Container struct { - Hostname string `json:"hostname,omitempty"` // hostname - ReadonlyFs bool `json:"readonly_fs,omitempty"` // set the containers rootfs as readonly - NoPivotRoot bool `json:"no_pivot_root,omitempty"` // this can be enabled if you are running in ramdisk - User string `json:"user,omitempty"` // user to execute the process as - WorkingDir string `json:"working_dir,omitempty"` // current working directory - Env []string `json:"environment,omitempty"` // environment to set - Tty bool `json:"tty,omitempty"` // setup a proper tty or not - Namespaces Namespaces `json:"namespaces,omitempty"` // namespaces to apply - Capabilities Capabilities `json:"capabilities,omitempty"` // capabilities to drop - Networks []*Network `json:"networks,omitempty"` // nil for host's network stack - Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` // cgroups - Context Context `json:"context,omitempty"` // generic context for specific options (apparmor, selinux) - Mounts []Mount `json:"mounts,omitempty"` + Hostname string `json:"hostname,omitempty"` // hostname + ReadonlyFs bool `json:"readonly_fs,omitempty"` // set the containers rootfs as readonly + NoPivotRoot bool `json:"no_pivot_root,omitempty"` // this can be enabled if you are running in ramdisk + User string `json:"user,omitempty"` // user to execute the process as + WorkingDir string `json:"working_dir,omitempty"` // current working directory + Env []string `json:"environment,omitempty"` // environment to set + Tty bool `json:"tty,omitempty"` // setup a proper tty or not + Namespaces Namespaces `json:"namespaces,omitempty"` // namespaces to apply + CapabilitiesMask Capabilities `json:"capabilities_mask,omitempty"` // capabilities to drop + Networks []*Network `json:"networks,omitempty"` // nil for host's network stack + Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` // cgroups + Context Context `json:"context,omitempty"` // generic context for specific options (apparmor, selinux) + Mounts []Mount `json:"mounts,omitempty"` } // Network defines configuration for a container's networking stack diff --git a/pkg/libcontainer/container.json b/pkg/libcontainer/container.json index 83e407467c..f045315a41 100644 --- a/pkg/libcontainer/container.json +++ b/pkg/libcontainer/container.json @@ -14,7 +14,7 @@ "NEWUTS", "NEWNET" ], - "capabilities": [ + "capabilities_mask": [ "SETPCAP", "SYS_MODULE", "SYS_RAWIO", -- cgit v1.2.1 From ad7e7d612390d09d3a54fd82dda9687deb3b0cbe Mon Sep 17 00:00:00 2001 From: Brandon Philips Date: Mon, 17 Mar 2014 11:07:29 -0700 Subject: chore(libcontainer): small grammar fix in types_test Someone probably got really used to typing er on the end of contain :) Docker-DCO-1.1-Signed-off-by: Brandon Philips (github: philips) --- pkg/libcontainer/types_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/libcontainer/types_test.go b/pkg/libcontainer/types_test.go index 52b85a4db9..9735937b76 100644 --- a/pkg/libcontainer/types_test.go +++ b/pkg/libcontainer/types_test.go @@ -30,6 +30,6 @@ func TestCapabilitiesContains(t *testing.T) { t.Fatal("capabilities should not contain SYS_ADMIN") } if !caps.Contains("MKNOD") { - t.Fatal("capabilities should container MKNOD but does not") + t.Fatal("capabilities should contain MKNOD but does not") } } -- cgit v1.2.1 From a62c7215c6c4675a4f99b63871d89198b211c260 Mon Sep 17 00:00:00 2001 From: Ralph Bean Date: Sat, 15 Mar 2014 14:48:32 -0400 Subject: Update fedora.rst MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It looks like ``wmdocker`` does not have an update for Fedora 20: ``` ~❯ pkgwat releases wmdocker Starting new HTTPS connection (1): apps.fedoraproject.org +---------------+----------------+-----------------+ | release | stable_version | testing_version | +---------------+----------------+-----------------+ | Rawhide | 1.5-12.fc21 | None | | Fedora 20 | None | None | | Fedora 19 | None | None | | Fedora EPEL 7 | None | None | | Fedora EPEL 6 | None | None | | Fedora EPEL 5 | None | None | +---------------+----------------+-----------------+ ``` I'm not sure if the owner awjb is intending to create an F20 update or not, but either way -- these docs are incorrect as currently written. Docker-DCO-1.1-Signed-off-by: Ralph Bean (github: ralphbean) --- docs/sources/installation/fedora.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sources/installation/fedora.rst b/docs/sources/installation/fedora.rst index 7e0aee78fd..3b95f04f7f 100644 --- a/docs/sources/installation/fedora.rst +++ b/docs/sources/installation/fedora.rst @@ -23,15 +23,15 @@ The ``docker-io`` package provides Docker on Fedora. If you have the (unrelated) ``docker`` package installed already, it will conflict with ``docker-io``. There's a `bug report`_ filed for it. -To proceed with ``docker-io`` installation on Fedora 19, please remove -``docker`` first. +To proceed with ``docker-io`` installation on Fedora 19 or Fedora 20, please +remove ``docker`` first. .. code-block:: bash sudo yum -y remove docker -For Fedora 20 and later, the ``wmdocker`` package will provide the same -functionality as ``docker`` and will also not conflict with ``docker-io``. +For Fedora 21 and later, the ``wmdocker`` package will provide the same +functionality as the old ``docker`` and will also not conflict with ``docker-io``. .. code-block:: bash -- cgit v1.2.1 From 5921b186d17b172f205f3b0b6bda1f3a4e650d3f Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 17 Mar 2014 18:36:15 +0000 Subject: display command display in docker ps Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- runtime/container.go | 12 ++++++++++++ server/server.go | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/runtime/container.go b/runtime/container.go index ee545db201..72ee104d8b 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -402,6 +402,18 @@ func populateCommand(c *Container) { c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true} } +func (container *Container) ArgsAsString() string { + var args []string + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + return strings.Join(args, " ") +} + func (container *Container) Start() (err error) { container.Lock() defer container.Unlock() diff --git a/server/server.go b/server/server.go index eb9a3a396b..69b65ce4a5 100644 --- a/server/server.go +++ b/server/server.go @@ -1003,7 +1003,7 @@ func (srv *Server) Containers(job *engine.Job) engine.Status { out.SetList("Names", names[container.ID]) out.Set("Image", srv.runtime.Repositories().ImageName(container.Image)) if len(container.Args) > 0 { - out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, strings.Join(container.Args, " "))) + out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, container.ArgsAsString())) } else { out.Set("Command", fmt.Sprintf("\"%s\"", container.Path)) } -- cgit v1.2.1 From 2230c9b9a735d731cc2fee4137633eb98b9da9d5 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 14 Mar 2014 14:03:23 -0700 Subject: Move networking drivers into runtime top level pkg Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- builtins/builtins.go | 2 +- daemonconfig/config.go | 2 +- networkdriver/ipallocator/allocator.go | 159 ------- networkdriver/ipallocator/allocator_test.go | 241 ----------- networkdriver/lxc/driver.go | 482 --------------------- networkdriver/network.go | 10 - networkdriver/network_test.go | 190 -------- networkdriver/portallocator/portallocator.go | 165 ------- networkdriver/portallocator/portallocator_test.go | 184 -------- networkdriver/portmapper/mapper.go | 131 ------ networkdriver/portmapper/mapper_test.go | 107 ----- networkdriver/utils.go | 118 ----- runtime/networkdriver/ipallocator/allocator.go | 159 +++++++ .../networkdriver/ipallocator/allocator_test.go | 241 +++++++++++ runtime/networkdriver/lxc/driver.go | 482 +++++++++++++++++++++ runtime/networkdriver/network.go | 10 + runtime/networkdriver/network_test.go | 190 ++++++++ .../networkdriver/portallocator/portallocator.go | 165 +++++++ .../portallocator/portallocator_test.go | 184 ++++++++ runtime/networkdriver/portmapper/mapper.go | 131 ++++++ runtime/networkdriver/portmapper/mapper_test.go | 107 +++++ runtime/networkdriver/utils.go | 118 +++++ runtime/runtime.go | 4 +- 23 files changed, 1791 insertions(+), 1791 deletions(-) delete mode 100644 networkdriver/ipallocator/allocator.go delete mode 100644 networkdriver/ipallocator/allocator_test.go delete mode 100644 networkdriver/lxc/driver.go delete mode 100644 networkdriver/network.go delete mode 100644 networkdriver/network_test.go delete mode 100644 networkdriver/portallocator/portallocator.go delete mode 100644 networkdriver/portallocator/portallocator_test.go delete mode 100644 networkdriver/portmapper/mapper.go delete mode 100644 networkdriver/portmapper/mapper_test.go delete mode 100644 networkdriver/utils.go create mode 100644 runtime/networkdriver/ipallocator/allocator.go create mode 100644 runtime/networkdriver/ipallocator/allocator_test.go create mode 100644 runtime/networkdriver/lxc/driver.go create mode 100644 runtime/networkdriver/network.go create mode 100644 runtime/networkdriver/network_test.go create mode 100644 runtime/networkdriver/portallocator/portallocator.go create mode 100644 runtime/networkdriver/portallocator/portallocator_test.go create mode 100644 runtime/networkdriver/portmapper/mapper.go create mode 100644 runtime/networkdriver/portmapper/mapper_test.go create mode 100644 runtime/networkdriver/utils.go diff --git a/builtins/builtins.go b/builtins/builtins.go index eb4a0be874..86f3973c62 100644 --- a/builtins/builtins.go +++ b/builtins/builtins.go @@ -4,7 +4,7 @@ import ( "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/networkdriver/lxc" + "github.com/dotcloud/docker/runtime/networkdriver/lxc" "github.com/dotcloud/docker/server" ) diff --git a/daemonconfig/config.go b/daemonconfig/config.go index 0aee7e78ba..b26d3eec3a 100644 --- a/daemonconfig/config.go +++ b/daemonconfig/config.go @@ -4,7 +4,7 @@ import ( "net" "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/networkdriver" + "github.com/dotcloud/docker/runtime/networkdriver" ) const ( diff --git a/networkdriver/ipallocator/allocator.go b/networkdriver/ipallocator/allocator.go deleted file mode 100644 index 1c5a7b4cc2..0000000000 --- a/networkdriver/ipallocator/allocator.go +++ /dev/null @@ -1,159 +0,0 @@ -package ipallocator - -import ( - "encoding/binary" - "errors" - "github.com/dotcloud/docker/networkdriver" - "github.com/dotcloud/docker/pkg/collections" - "net" - "sync" -) - -type networkSet map[string]*collections.OrderedIntSet - -var ( - ErrNoAvailableIPs = errors.New("no available ip addresses on network") - ErrIPAlreadyAllocated = errors.New("ip already allocated") -) - -var ( - lock = sync.Mutex{} - allocatedIPs = networkSet{} - availableIPS = networkSet{} -) - -// RequestIP requests an available ip from the given network. It -// will return the next available ip if the ip provided is nil. If the -// ip provided is not nil it will validate that the provided ip is available -// for use or return an error -func RequestIP(address *net.IPNet, ip *net.IP) (*net.IP, error) { - lock.Lock() - defer lock.Unlock() - - checkAddress(address) - - if ip == nil { - next, err := getNextIp(address) - if err != nil { - return nil, err - } - return next, nil - } - - if err := registerIP(address, ip); err != nil { - return nil, err - } - return ip, nil -} - -// ReleaseIP adds the provided ip back into the pool of -// available ips to be returned for use. -func ReleaseIP(address *net.IPNet, ip *net.IP) error { - lock.Lock() - defer lock.Unlock() - - checkAddress(address) - - var ( - existing = allocatedIPs[address.String()] - available = availableIPS[address.String()] - pos = getPosition(address, ip) - ) - - existing.Remove(int(pos)) - available.Push(int(pos)) - - return nil -} - -// convert the ip into the position in the subnet. Only -// position are saved in the set -func getPosition(address *net.IPNet, ip *net.IP) int32 { - var ( - first, _ = networkdriver.NetworkRange(address) - base = ipToInt(&first) - i = ipToInt(ip) - ) - return i - base -} - -// return an available ip if one is currently available. If not, -// return the next available ip for the nextwork -func getNextIp(address *net.IPNet) (*net.IP, error) { - var ( - ownIP = ipToInt(&address.IP) - available = availableIPS[address.String()] - allocated = allocatedIPs[address.String()] - first, _ = networkdriver.NetworkRange(address) - base = ipToInt(&first) - size = int(networkdriver.NetworkSize(address.Mask)) - max = int32(size - 2) // size -1 for the broadcast address, -1 for the gateway address - pos = int32(available.Pop()) - ) - - // We pop and push the position not the ip - if pos != 0 { - ip := intToIP(int32(base + pos)) - allocated.Push(int(pos)) - - return ip, nil - } - - var ( - firstNetIP = address.IP.To4().Mask(address.Mask) - firstAsInt = ipToInt(&firstNetIP) + 1 - ) - - pos = int32(allocated.PullBack()) - for i := int32(0); i < max; i++ { - pos = pos%max + 1 - next := int32(base + pos) - - if next == ownIP || next == firstAsInt { - continue - } - - if !allocated.Exists(int(pos)) { - ip := intToIP(next) - allocated.Push(int(pos)) - return ip, nil - } - } - return nil, ErrNoAvailableIPs -} - -func registerIP(address *net.IPNet, ip *net.IP) error { - var ( - existing = allocatedIPs[address.String()] - available = availableIPS[address.String()] - pos = getPosition(address, ip) - ) - - if existing.Exists(int(pos)) { - return ErrIPAlreadyAllocated - } - available.Remove(int(pos)) - - return nil -} - -// Converts a 4 bytes IP into a 32 bit integer -func ipToInt(ip *net.IP) int32 { - return int32(binary.BigEndian.Uint32(ip.To4())) -} - -// Converts 32 bit integer into a 4 bytes IP address -func intToIP(n int32) *net.IP { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, uint32(n)) - ip := net.IP(b) - return &ip -} - -func checkAddress(address *net.IPNet) { - key := address.String() - if _, exists := allocatedIPs[key]; !exists { - allocatedIPs[key] = collections.NewOrderedIntSet() - availableIPS[key] = collections.NewOrderedIntSet() - } -} diff --git a/networkdriver/ipallocator/allocator_test.go b/networkdriver/ipallocator/allocator_test.go deleted file mode 100644 index 5e9fcfc983..0000000000 --- a/networkdriver/ipallocator/allocator_test.go +++ /dev/null @@ -1,241 +0,0 @@ -package ipallocator - -import ( - "fmt" - "net" - "testing" -) - -func reset() { - allocatedIPs = networkSet{} - availableIPS = networkSet{} -} - -func TestRequestNewIps(t *testing.T) { - defer reset() - network := &net.IPNet{ - IP: []byte{192, 168, 0, 1}, - Mask: []byte{255, 255, 255, 0}, - } - - for i := 2; i < 10; i++ { - ip, err := RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - - if expected := fmt.Sprintf("192.168.0.%d", i); ip.String() != expected { - t.Fatalf("Expected ip %s got %s", expected, ip.String()) - } - } -} - -func TestReleaseIp(t *testing.T) { - defer reset() - network := &net.IPNet{ - IP: []byte{192, 168, 0, 1}, - Mask: []byte{255, 255, 255, 0}, - } - - ip, err := RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - - if err := ReleaseIP(network, ip); err != nil { - t.Fatal(err) - } -} - -func TestGetReleasedIp(t *testing.T) { - defer reset() - network := &net.IPNet{ - IP: []byte{192, 168, 0, 1}, - Mask: []byte{255, 255, 255, 0}, - } - - ip, err := RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - - value := ip.String() - if err := ReleaseIP(network, ip); err != nil { - t.Fatal(err) - } - - ip, err = RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - - if ip.String() != value { - t.Fatalf("Expected to receive same ip %s got %s", value, ip.String()) - } -} - -func TestRequesetSpecificIp(t *testing.T) { - defer reset() - network := &net.IPNet{ - IP: []byte{192, 168, 0, 1}, - Mask: []byte{255, 255, 255, 0}, - } - - ip := net.ParseIP("192.168.1.5") - - if _, err := RequestIP(network, &ip); err != nil { - t.Fatal(err) - } -} - -func TestConversion(t *testing.T) { - ip := net.ParseIP("127.0.0.1") - i := ipToInt(&ip) - if i == 0 { - t.Fatal("converted to zero") - } - conv := intToIP(i) - if !ip.Equal(*conv) { - t.Error(conv.String()) - } -} - -func TestIPAllocator(t *testing.T) { - expectedIPs := []net.IP{ - 0: net.IPv4(127, 0, 0, 2), - 1: net.IPv4(127, 0, 0, 3), - 2: net.IPv4(127, 0, 0, 4), - 3: net.IPv4(127, 0, 0, 5), - 4: net.IPv4(127, 0, 0, 6), - } - - gwIP, n, _ := net.ParseCIDR("127.0.0.1/29") - network := &net.IPNet{IP: gwIP, Mask: n.Mask} - // Pool after initialisation (f = free, u = used) - // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) - // ↑ - - // Check that we get 5 IPs, from 127.0.0.2–127.0.0.6, in that - // order. - for i := 0; i < 5; i++ { - ip, err := RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - - assertIPEquals(t, &expectedIPs[i], ip) - } - // Before loop begin - // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 0 - // 2(u) - 3(f) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 1 - // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 2 - // 2(u) - 3(u) - 4(u) - 5(f) - 6(f) - // ↑ - - // After i = 3 - // 2(u) - 3(u) - 4(u) - 5(u) - 6(f) - // ↑ - - // After i = 4 - // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) - // ↑ - - // Check that there are no more IPs - ip, err := RequestIP(network, nil) - if err == nil { - t.Fatalf("There shouldn't be any IP addresses at this point, got %s\n", ip) - } - - // Release some IPs in non-sequential order - if err := ReleaseIP(network, &expectedIPs[3]); err != nil { - t.Fatal(err) - } - // 2(u) - 3(u) - 4(u) - 5(f) - 6(u) - // ↑ - - if err := ReleaseIP(network, &expectedIPs[2]); err != nil { - t.Fatal(err) - } - // 2(u) - 3(u) - 4(f) - 5(f) - 6(u) - // ↑ - - if err := ReleaseIP(network, &expectedIPs[4]); err != nil { - t.Fatal(err) - } - // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) - // ↑ - - // Make sure that IPs are reused in sequential order, starting - // with the first released IP - newIPs := make([]*net.IP, 3) - for i := 0; i < 3; i++ { - ip, err := RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - - newIPs[i] = ip - } - // Before loop begin - // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) - // ↑ - - // After i = 0 - // 2(u) - 3(u) - 4(f) - 5(u) - 6(f) - // ↑ - - // After i = 1 - // 2(u) - 3(u) - 4(f) - 5(u) - 6(u) - // ↑ - - // After i = 2 - // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) - // ↑ - - // Reordered these because the new set will always return the - // lowest ips first and not in the order that they were released - assertIPEquals(t, &expectedIPs[2], newIPs[0]) - assertIPEquals(t, &expectedIPs[3], newIPs[1]) - assertIPEquals(t, &expectedIPs[4], newIPs[2]) - - _, err = RequestIP(network, nil) - if err == nil { - t.Fatal("There shouldn't be any IP addresses at this point") - } -} - -func TestAllocateFirstIP(t *testing.T) { - defer reset() - network := &net.IPNet{ - IP: []byte{192, 168, 0, 0}, - Mask: []byte{255, 255, 255, 0}, - } - - firstIP := network.IP.To4().Mask(network.Mask) - first := ipToInt(&firstIP) + 1 - - ip, err := RequestIP(network, nil) - if err != nil { - t.Fatal(err) - } - allocated := ipToInt(ip) - - if allocated == first { - t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated) - } -} - -func assertIPEquals(t *testing.T, ip1, ip2 *net.IP) { - if !ip1.Equal(*ip2) { - t.Fatalf("Expected IP %s, got %s", ip1, ip2) - } -} diff --git a/networkdriver/lxc/driver.go b/networkdriver/lxc/driver.go deleted file mode 100644 index 6185c42752..0000000000 --- a/networkdriver/lxc/driver.go +++ /dev/null @@ -1,482 +0,0 @@ -package lxc - -import ( - "fmt" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/networkdriver" - "github.com/dotcloud/docker/networkdriver/ipallocator" - "github.com/dotcloud/docker/networkdriver/portallocator" - "github.com/dotcloud/docker/networkdriver/portmapper" - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/pkg/netlink" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "log" - "net" - "strings" - "syscall" - "unsafe" -) - -const ( - DefaultNetworkBridge = "docker0" - siocBRADDBR = 0x89a0 -) - -// Network interface represents the networking stack of a container -type networkInterface struct { - IP net.IP - PortMappings []net.Addr // there are mappings to the host interfaces -} - -var ( - addrs = []string{ - // Here we don't follow the convention of using the 1st IP of the range for the gateway. - // This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges. - // In theory this shouldn't matter - in practice there's bound to be a few scripts relying - // on the internal addressing or other stupid things like that. - // The shouldn't, but hey, let's not break them unless we really have to. - "172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23 - "10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive - "10.1.42.1/16", - "10.42.42.1/16", - "172.16.42.1/24", - "172.16.43.1/24", - "172.16.44.1/24", - "10.0.42.1/24", - "10.0.43.1/24", - "192.168.42.1/24", - "192.168.43.1/24", - "192.168.44.1/24", - } - - bridgeIface string - bridgeNetwork *net.IPNet - - defaultBindingIP = net.ParseIP("0.0.0.0") - currentInterfaces = make(map[string]*networkInterface) -) - -func InitDriver(job *engine.Job) engine.Status { - var ( - network *net.IPNet - enableIPTables = job.GetenvBool("EnableIptables") - icc = job.GetenvBool("InterContainerCommunication") - ipForward = job.GetenvBool("EnableIpForward") - bridgeIP = job.Getenv("BridgeIP") - ) - - if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" { - defaultBindingIP = net.ParseIP(defaultIP) - } - - bridgeIface = job.Getenv("BridgeIface") - if bridgeIface == "" { - bridgeIface = DefaultNetworkBridge - } - - addr, err := networkdriver.GetIfaceAddr(bridgeIface) - if err != nil { - // If the iface is not found, try to create it - job.Logf("creating new bridge for %s", bridgeIface) - if err := createBridge(bridgeIP); err != nil { - job.Error(err) - return engine.StatusErr - } - - job.Logf("getting iface addr") - addr, err = networkdriver.GetIfaceAddr(bridgeIface) - if err != nil { - job.Error(err) - return engine.StatusErr - } - network = addr.(*net.IPNet) - } else { - network = addr.(*net.IPNet) - } - - // Configure iptables for link support - if enableIPTables { - if err := setupIPTables(addr, icc); err != nil { - job.Error(err) - return engine.StatusErr - } - } - - if ipForward { - // Enable IPv4 forwarding - if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil { - job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err) - } - } - - // We can always try removing the iptables - if err := iptables.RemoveExistingChain("DOCKER"); err != nil { - job.Error(err) - return engine.StatusErr - } - - if enableIPTables { - chain, err := iptables.NewChain("DOCKER", bridgeIface) - if err != nil { - job.Error(err) - return engine.StatusErr - } - portmapper.SetIptablesChain(chain) - } - - bridgeNetwork = network - - // https://github.com/dotcloud/docker/issues/2768 - job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP) - - for name, f := range map[string]engine.Handler{ - "allocate_interface": Allocate, - "release_interface": Release, - "allocate_port": AllocatePort, - "link": LinkContainers, - } { - if err := job.Eng.Register(name, f); err != nil { - job.Error(err) - return engine.StatusErr - } - } - return engine.StatusOK -} - -func setupIPTables(addr net.Addr, icc bool) error { - // Enable NAT - natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-d", addr.String(), "-j", "MASQUERADE"} - - if !iptables.Exists(natArgs...) { - if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { - return fmt.Errorf("Unable to enable network bridge NAT: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error iptables postrouting: %s", output) - } - } - - var ( - args = []string{"FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-j"} - acceptArgs = append(args, "ACCEPT") - dropArgs = append(args, "DROP") - ) - - if !icc { - iptables.Raw(append([]string{"-D"}, acceptArgs...)...) - - if !iptables.Exists(dropArgs...) { - utils.Debugf("Disable inter-container communication") - if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil { - return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error disabling intercontainer communication: %s", output) - } - } - } else { - iptables.Raw(append([]string{"-D"}, dropArgs...)...) - - if !iptables.Exists(acceptArgs...) { - utils.Debugf("Enable inter-container communication") - if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil { - return fmt.Errorf("Unable to allow intercontainer communication: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error enabling intercontainer communication: %s", output) - } - } - } - - // Accept all non-intercontainer outgoing packets - outgoingArgs := []string{"FORWARD", "-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"} - if !iptables.Exists(outgoingArgs...) { - if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil { - return fmt.Errorf("Unable to allow outgoing packets: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error iptables allow outgoing: %s", output) - } - } - - // Accept incoming packets for existing connections - existingArgs := []string{"FORWARD", "-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"} - - if !iptables.Exists(existingArgs...) { - if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil { - return fmt.Errorf("Unable to allow incoming packets: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error iptables allow incoming: %s", output) - } - } - return nil -} - -// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`, -// and attempts to configure it with an address which doesn't conflict with any other interface on the host. -// If it can't find an address which doesn't conflict, it will return an error. -func createBridge(bridgeIP string) error { - nameservers := []string{} - resolvConf, _ := utils.GetResolvConf() - // we don't check for an error here, because we don't really care - // if we can't read /etc/resolv.conf. So instead we skip the append - // if resolvConf is nil. It either doesn't exist, or we can't read it - // for some reason. - if resolvConf != nil { - nameservers = append(nameservers, utils.GetNameserversAsCIDR(resolvConf)...) - } - - var ifaceAddr string - if len(bridgeIP) != 0 { - _, _, err := net.ParseCIDR(bridgeIP) - if err != nil { - return err - } - ifaceAddr = bridgeIP - } else { - for _, addr := range addrs { - _, dockerNetwork, err := net.ParseCIDR(addr) - if err != nil { - return err - } - if err := networkdriver.CheckNameserverOverlaps(nameservers, dockerNetwork); err == nil { - if err := networkdriver.CheckRouteOverlaps(dockerNetwork); err == nil { - ifaceAddr = addr - break - } else { - utils.Debugf("%s %s", addr, err) - } - } - } - } - - if ifaceAddr == "" { - return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface) - } - utils.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) - - if err := createBridgeIface(bridgeIface); err != nil { - return err - } - - iface, err := net.InterfaceByName(bridgeIface) - if err != nil { - return err - } - - ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr) - if err != nil { - return err - } - - if netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { - return fmt.Errorf("Unable to add private network: %s", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to start network bridge: %s", err) - } - return nil -} - -// Create the actual bridge device. This is more backward-compatible than -// netlink.NetworkLinkAdd and works on RHEL 6. -func createBridgeIface(name string) error { - s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP) - if err != nil { - utils.Debugf("Bridge socket creation failed IPv6 probably not enabled: %v", err) - s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP) - if err != nil { - return fmt.Errorf("Error creating bridge creation socket: %s", err) - } - } - defer syscall.Close(s) - - nameBytePtr, err := syscall.BytePtrFromString(name) - if err != nil { - return fmt.Errorf("Error converting bridge name %s to byte array: %s", name, err) - } - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), siocBRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { - return fmt.Errorf("Error creating bridge: %s", err) - } - return nil -} - -// Allocate a network interface -func Allocate(job *engine.Job) engine.Status { - var ( - ip *net.IP - err error - id = job.Args[0] - requestedIP = net.ParseIP(job.Getenv("RequestedIP")) - ) - - if requestedIP != nil { - ip, err = ipallocator.RequestIP(bridgeNetwork, &requestedIP) - } else { - ip, err = ipallocator.RequestIP(bridgeNetwork, nil) - } - if err != nil { - job.Error(err) - return engine.StatusErr - } - - out := engine.Env{} - out.Set("IP", ip.String()) - out.Set("Mask", bridgeNetwork.Mask.String()) - out.Set("Gateway", bridgeNetwork.IP.String()) - out.Set("Bridge", bridgeIface) - - size, _ := bridgeNetwork.Mask.Size() - out.SetInt("IPPrefixLen", size) - - currentInterfaces[id] = &networkInterface{ - IP: *ip, - } - - out.WriteTo(job.Stdout) - - return engine.StatusOK -} - -// release an interface for a select ip -func Release(job *engine.Job) engine.Status { - var ( - id = job.Args[0] - containerInterface = currentInterfaces[id] - ip net.IP - port int - proto string - ) - - if containerInterface == nil { - return job.Errorf("No network information to release for %s", id) - } - - for _, nat := range containerInterface.PortMappings { - if err := portmapper.Unmap(nat); err != nil { - log.Printf("Unable to unmap port %s: %s", nat, err) - } - - // this is host mappings - switch a := nat.(type) { - case *net.TCPAddr: - proto = "tcp" - ip = a.IP - port = a.Port - case *net.UDPAddr: - proto = "udp" - ip = a.IP - port = a.Port - } - - if err := portallocator.ReleasePort(ip, proto, port); err != nil { - log.Printf("Unable to release port %s", nat) - } - } - - if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil { - log.Printf("Unable to release ip %s\n", err) - } - return engine.StatusOK -} - -// Allocate an external port and map it to the interface -func AllocatePort(job *engine.Job) engine.Status { - var ( - err error - - ip = defaultBindingIP - id = job.Args[0] - hostIP = job.Getenv("HostIP") - hostPort = job.GetenvInt("HostPort") - containerPort = job.GetenvInt("ContainerPort") - proto = job.Getenv("Proto") - network = currentInterfaces[id] - ) - - if hostIP != "" { - ip = net.ParseIP(hostIP) - } - - // host ip, proto, and host port - hostPort, err = portallocator.RequestPort(ip, proto, hostPort) - if err != nil { - job.Error(err) - return engine.StatusErr - } - - var ( - container net.Addr - host net.Addr - ) - - if proto == "tcp" { - host = &net.TCPAddr{IP: ip, Port: hostPort} - container = &net.TCPAddr{IP: network.IP, Port: containerPort} - } else { - host = &net.UDPAddr{IP: ip, Port: hostPort} - container = &net.UDPAddr{IP: network.IP, Port: containerPort} - } - - if err := portmapper.Map(container, ip, hostPort); err != nil { - portallocator.ReleasePort(ip, proto, hostPort) - - job.Error(err) - return engine.StatusErr - } - network.PortMappings = append(network.PortMappings, host) - - out := engine.Env{} - out.Set("HostIP", ip.String()) - out.SetInt("HostPort", hostPort) - - if _, err := out.WriteTo(job.Stdout); err != nil { - job.Error(err) - return engine.StatusErr - } - return engine.StatusOK -} - -func LinkContainers(job *engine.Job) engine.Status { - var ( - action = job.Args[0] - childIP = job.Getenv("ChildIP") - parentIP = job.Getenv("ParentIP") - ignoreErrors = job.GetenvBool("IgnoreErrors") - ports = job.GetenvList("Ports") - ) - split := func(p string) (string, string) { - parts := strings.Split(p, "/") - return parts[0], parts[1] - } - - for _, p := range ports { - port, proto := split(p) - if output, err := iptables.Raw(action, "FORWARD", - "-i", bridgeIface, "-o", bridgeIface, - "-p", proto, - "-s", parentIP, - "--dport", port, - "-d", childIP, - "-j", "ACCEPT"); !ignoreErrors && err != nil { - job.Error(err) - return engine.StatusErr - } else if len(output) != 0 { - job.Errorf("Error toggle iptables forward: %s", output) - return engine.StatusErr - } - - if output, err := iptables.Raw(action, "FORWARD", - "-i", bridgeIface, "-o", bridgeIface, - "-p", proto, - "-s", childIP, - "--sport", port, - "-d", parentIP, - "-j", "ACCEPT"); !ignoreErrors && err != nil { - job.Error(err) - return engine.StatusErr - } else if len(output) != 0 { - job.Errorf("Error toggle iptables forward: %s", output) - return engine.StatusErr - } - } - return engine.StatusOK -} diff --git a/networkdriver/network.go b/networkdriver/network.go deleted file mode 100644 index 8dda789d2f..0000000000 --- a/networkdriver/network.go +++ /dev/null @@ -1,10 +0,0 @@ -package networkdriver - -import ( - "errors" -) - -var ( - ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver") - ErrNetworkOverlaps = errors.New("requested network overlaps with existing network") -) diff --git a/networkdriver/network_test.go b/networkdriver/network_test.go deleted file mode 100644 index 6224c2dffb..0000000000 --- a/networkdriver/network_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package networkdriver - -import ( - "github.com/dotcloud/docker/pkg/netlink" - "net" - "testing" -) - -func TestNonOverlapingNameservers(t *testing.T) { - network := &net.IPNet{ - IP: []byte{192, 168, 0, 1}, - Mask: []byte{255, 255, 255, 0}, - } - nameservers := []string{ - "127.0.0.1/32", - } - - if err := CheckNameserverOverlaps(nameservers, network); err != nil { - t.Fatal(err) - } -} - -func TestOverlapingNameservers(t *testing.T) { - network := &net.IPNet{ - IP: []byte{192, 168, 0, 1}, - Mask: []byte{255, 255, 255, 0}, - } - nameservers := []string{ - "192.168.0.1/32", - } - - if err := CheckNameserverOverlaps(nameservers, network); err == nil { - t.Fatalf("Expected error %s got %s", ErrNetworkOverlapsWithNameservers, err) - } -} - -func TestCheckRouteOverlaps(t *testing.T) { - orig := networkGetRoutesFct - defer func() { - networkGetRoutesFct = orig - }() - networkGetRoutesFct = func() ([]netlink.Route, error) { - routesData := []string{"10.0.2.0/32", "10.0.3.0/24", "10.0.42.0/24", "172.16.42.0/24", "192.168.142.0/24"} - - routes := []netlink.Route{} - for _, addr := range routesData { - _, netX, _ := net.ParseCIDR(addr) - routes = append(routes, netlink.Route{IPNet: netX}) - } - return routes, nil - } - - _, netX, _ := net.ParseCIDR("172.16.0.1/24") - if err := CheckRouteOverlaps(netX); err != nil { - t.Fatal(err) - } - - _, netX, _ = net.ParseCIDR("10.0.2.0/24") - if err := CheckRouteOverlaps(netX); err == nil { - t.Fatalf("10.0.2.0/24 and 10.0.2.0 should overlap but it doesn't") - } -} - -func TestCheckNameserverOverlaps(t *testing.T) { - nameservers := []string{"10.0.2.3/32", "192.168.102.1/32"} - - _, netX, _ := net.ParseCIDR("10.0.2.3/32") - - if err := CheckNameserverOverlaps(nameservers, netX); err == nil { - t.Fatalf("%s should overlap 10.0.2.3/32 but doesn't", netX) - } - - _, netX, _ = net.ParseCIDR("192.168.102.2/32") - - if err := CheckNameserverOverlaps(nameservers, netX); err != nil { - t.Fatalf("%s should not overlap %v but it does", netX, nameservers) - } -} - -func AssertOverlap(CIDRx string, CIDRy string, t *testing.T) { - _, netX, _ := net.ParseCIDR(CIDRx) - _, netY, _ := net.ParseCIDR(CIDRy) - if !NetworkOverlaps(netX, netY) { - t.Errorf("%v and %v should overlap", netX, netY) - } -} - -func AssertNoOverlap(CIDRx string, CIDRy string, t *testing.T) { - _, netX, _ := net.ParseCIDR(CIDRx) - _, netY, _ := net.ParseCIDR(CIDRy) - if NetworkOverlaps(netX, netY) { - t.Errorf("%v and %v should not overlap", netX, netY) - } -} - -func TestNetworkOverlaps(t *testing.T) { - //netY starts at same IP and ends within netX - AssertOverlap("172.16.0.1/24", "172.16.0.1/25", t) - //netY starts within netX and ends at same IP - AssertOverlap("172.16.0.1/24", "172.16.0.128/25", t) - //netY starts and ends within netX - AssertOverlap("172.16.0.1/24", "172.16.0.64/25", t) - //netY starts at same IP and ends outside of netX - AssertOverlap("172.16.0.1/24", "172.16.0.1/23", t) - //netY starts before and ends at same IP of netX - AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) - //netY starts before and ends outside of netX - AssertOverlap("172.16.1.1/24", "172.16.0.1/22", t) - //netY starts and ends before netX - AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t) - //netX starts and ends before netY - AssertNoOverlap("172.16.1.1/25", "172.16.2.1/24", t) -} - -func TestNetworkRange(t *testing.T) { - // Simple class C test - _, network, _ := net.ParseCIDR("192.168.0.1/24") - first, last := NetworkRange(network) - if !first.Equal(net.ParseIP("192.168.0.0")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("192.168.0.255")) { - t.Error(last.String()) - } - if size := NetworkSize(network.Mask); size != 256 { - t.Error(size) - } - - // Class A test - _, network, _ = net.ParseCIDR("10.0.0.1/8") - first, last = NetworkRange(network) - if !first.Equal(net.ParseIP("10.0.0.0")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.255.255.255")) { - t.Error(last.String()) - } - if size := NetworkSize(network.Mask); size != 16777216 { - t.Error(size) - } - - // Class A, random IP address - _, network, _ = net.ParseCIDR("10.1.2.3/8") - first, last = NetworkRange(network) - if !first.Equal(net.ParseIP("10.0.0.0")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.255.255.255")) { - t.Error(last.String()) - } - - // 32bit mask - _, network, _ = net.ParseCIDR("10.1.2.3/32") - first, last = NetworkRange(network) - if !first.Equal(net.ParseIP("10.1.2.3")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.1.2.3")) { - t.Error(last.String()) - } - if size := NetworkSize(network.Mask); size != 1 { - t.Error(size) - } - - // 31bit mask - _, network, _ = net.ParseCIDR("10.1.2.3/31") - first, last = NetworkRange(network) - if !first.Equal(net.ParseIP("10.1.2.2")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.1.2.3")) { - t.Error(last.String()) - } - if size := NetworkSize(network.Mask); size != 2 { - t.Error(size) - } - - // 26bit mask - _, network, _ = net.ParseCIDR("10.1.2.3/26") - first, last = NetworkRange(network) - if !first.Equal(net.ParseIP("10.1.2.0")) { - t.Error(first.String()) - } - if !last.Equal(net.ParseIP("10.1.2.63")) { - t.Error(last.String()) - } - if size := NetworkSize(network.Mask); size != 64 { - t.Error(size) - } -} diff --git a/networkdriver/portallocator/portallocator.go b/networkdriver/portallocator/portallocator.go deleted file mode 100644 index 71cac82703..0000000000 --- a/networkdriver/portallocator/portallocator.go +++ /dev/null @@ -1,165 +0,0 @@ -package portallocator - -import ( - "errors" - "github.com/dotcloud/docker/pkg/collections" - "net" - "sync" -) - -const ( - BeginPortRange = 49153 - EndPortRange = 65535 -) - -type ( - portMappings map[string]*collections.OrderedIntSet - ipMapping map[string]portMappings -) - -var ( - ErrPortAlreadyAllocated = errors.New("port has already been allocated") - ErrPortExceedsRange = errors.New("port exceeds upper range") - ErrUnknownProtocol = errors.New("unknown protocol") -) - -var ( - currentDynamicPort = map[string]int{ - "tcp": BeginPortRange - 1, - "udp": BeginPortRange - 1, - } - defaultIP = net.ParseIP("0.0.0.0") - defaultAllocatedPorts = portMappings{} - otherAllocatedPorts = ipMapping{} - lock = sync.Mutex{} -) - -func init() { - defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() - defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() -} - -// RequestPort returns an available port if the port is 0 -// If the provided port is not 0 then it will be checked if -// it is available for allocation -func RequestPort(ip net.IP, proto string, port int) (int, error) { - lock.Lock() - defer lock.Unlock() - - if err := validateProtocol(proto); err != nil { - return 0, err - } - - // If the user requested a specific port to be allocated - if port > 0 { - if err := registerSetPort(ip, proto, port); err != nil { - return 0, err - } - return port, nil - } - return registerDynamicPort(ip, proto) -} - -// ReleasePort will return the provided port back into the -// pool for reuse -func ReleasePort(ip net.IP, proto string, port int) error { - lock.Lock() - defer lock.Unlock() - - if err := validateProtocol(proto); err != nil { - return err - } - - allocated := defaultAllocatedPorts[proto] - allocated.Remove(port) - - if !equalsDefault(ip) { - registerIP(ip) - - // Remove the port for the specific ip address - allocated = otherAllocatedPorts[ip.String()][proto] - allocated.Remove(port) - } - return nil -} - -func ReleaseAll() error { - lock.Lock() - defer lock.Unlock() - - currentDynamicPort["tcp"] = BeginPortRange - 1 - currentDynamicPort["udp"] = BeginPortRange - 1 - - defaultAllocatedPorts = portMappings{} - defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() - defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() - - otherAllocatedPorts = ipMapping{} - - return nil -} - -func registerDynamicPort(ip net.IP, proto string) (int, error) { - allocated := defaultAllocatedPorts[proto] - - port := nextPort(proto) - if port > EndPortRange { - return 0, ErrPortExceedsRange - } - - if !equalsDefault(ip) { - registerIP(ip) - - ipAllocated := otherAllocatedPorts[ip.String()][proto] - ipAllocated.Push(port) - } else { - allocated.Push(port) - } - return port, nil -} - -func registerSetPort(ip net.IP, proto string, port int) error { - allocated := defaultAllocatedPorts[proto] - if allocated.Exists(port) { - return ErrPortAlreadyAllocated - } - - if !equalsDefault(ip) { - registerIP(ip) - - ipAllocated := otherAllocatedPorts[ip.String()][proto] - if ipAllocated.Exists(port) { - return ErrPortAlreadyAllocated - } - ipAllocated.Push(port) - } else { - allocated.Push(port) - } - return nil -} - -func equalsDefault(ip net.IP) bool { - return ip == nil || ip.Equal(defaultIP) -} - -func nextPort(proto string) int { - c := currentDynamicPort[proto] + 1 - currentDynamicPort[proto] = c - return c -} - -func registerIP(ip net.IP) { - if _, exists := otherAllocatedPorts[ip.String()]; !exists { - otherAllocatedPorts[ip.String()] = portMappings{ - "tcp": collections.NewOrderedIntSet(), - "udp": collections.NewOrderedIntSet(), - } - } -} - -func validateProtocol(proto string) error { - if _, exists := defaultAllocatedPorts[proto]; !exists { - return ErrUnknownProtocol - } - return nil -} diff --git a/networkdriver/portallocator/portallocator_test.go b/networkdriver/portallocator/portallocator_test.go deleted file mode 100644 index 603bd03bd7..0000000000 --- a/networkdriver/portallocator/portallocator_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package portallocator - -import ( - "net" - "testing" -) - -func reset() { - ReleaseAll() -} - -func TestRequestNewPort(t *testing.T) { - defer reset() - - port, err := RequestPort(defaultIP, "tcp", 0) - if err != nil { - t.Fatal(err) - } - - if expected := BeginPortRange; port != expected { - t.Fatalf("Expected port %d got %d", expected, port) - } -} - -func TestRequestSpecificPort(t *testing.T) { - defer reset() - - port, err := RequestPort(defaultIP, "tcp", 5000) - if err != nil { - t.Fatal(err) - } - if port != 5000 { - t.Fatalf("Expected port 5000 got %d", port) - } -} - -func TestReleasePort(t *testing.T) { - defer reset() - - port, err := RequestPort(defaultIP, "tcp", 5000) - if err != nil { - t.Fatal(err) - } - if port != 5000 { - t.Fatalf("Expected port 5000 got %d", port) - } - - if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { - t.Fatal(err) - } -} - -func TestReuseReleasedPort(t *testing.T) { - defer reset() - - port, err := RequestPort(defaultIP, "tcp", 5000) - if err != nil { - t.Fatal(err) - } - if port != 5000 { - t.Fatalf("Expected port 5000 got %d", port) - } - - if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { - t.Fatal(err) - } - - port, err = RequestPort(defaultIP, "tcp", 5000) - if err != nil { - t.Fatal(err) - } -} - -func TestReleaseUnreadledPort(t *testing.T) { - defer reset() - - port, err := RequestPort(defaultIP, "tcp", 5000) - if err != nil { - t.Fatal(err) - } - if port != 5000 { - t.Fatalf("Expected port 5000 got %d", port) - } - - port, err = RequestPort(defaultIP, "tcp", 5000) - if err != ErrPortAlreadyAllocated { - t.Fatalf("Expected error %s got %s", ErrPortAlreadyAllocated, err) - } -} - -func TestUnknowProtocol(t *testing.T) { - defer reset() - - if _, err := RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol { - t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err) - } -} - -func TestAllocateAllPorts(t *testing.T) { - defer reset() - - for i := 0; i <= EndPortRange-BeginPortRange; i++ { - port, err := RequestPort(defaultIP, "tcp", 0) - if err != nil { - t.Fatal(err) - } - - if expected := BeginPortRange + i; port != expected { - t.Fatalf("Expected port %d got %d", expected, port) - } - } - - if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrPortExceedsRange { - t.Fatalf("Expected error %s got %s", ErrPortExceedsRange, err) - } - - _, err := RequestPort(defaultIP, "udp", 0) - if err != nil { - t.Fatal(err) - } -} - -func BenchmarkAllocatePorts(b *testing.B) { - defer reset() - - for i := 0; i < b.N; i++ { - for i := 0; i <= EndPortRange-BeginPortRange; i++ { - port, err := RequestPort(defaultIP, "tcp", 0) - if err != nil { - b.Fatal(err) - } - - if expected := BeginPortRange + i; port != expected { - b.Fatalf("Expected port %d got %d", expected, port) - } - } - reset() - } -} - -func TestPortAllocation(t *testing.T) { - defer reset() - - ip := net.ParseIP("192.168.0.1") - ip2 := net.ParseIP("192.168.0.2") - if port, err := RequestPort(ip, "tcp", 80); err != nil { - t.Fatal(err) - } else if port != 80 { - t.Fatalf("Acquire(80) should return 80, not %d", port) - } - port, err := RequestPort(ip, "tcp", 0) - if err != nil { - t.Fatal(err) - } - if port <= 0 { - t.Fatalf("Acquire(0) should return a non-zero port") - } - - if _, err := RequestPort(ip, "tcp", port); err == nil { - t.Fatalf("Acquiring a port already in use should return an error") - } - - if newPort, err := RequestPort(ip, "tcp", 0); err != nil { - t.Fatal(err) - } else if newPort == port { - t.Fatalf("Acquire(0) allocated the same port twice: %d", port) - } - - if _, err := RequestPort(ip, "tcp", 80); err == nil { - t.Fatalf("Acquiring a port already in use should return an error") - } - if _, err := RequestPort(ip2, "tcp", 80); err != nil { - t.Fatalf("It should be possible to allocate the same port on a different interface") - } - if _, err := RequestPort(ip2, "tcp", 80); err == nil { - t.Fatalf("Acquiring a port already in use should return an error") - } - if err := ReleasePort(ip, "tcp", 80); err != nil { - t.Fatal(err) - } - if _, err := RequestPort(ip, "tcp", 80); err != nil { - t.Fatal(err) - } -} diff --git a/networkdriver/portmapper/mapper.go b/networkdriver/portmapper/mapper.go deleted file mode 100644 index e29959a245..0000000000 --- a/networkdriver/portmapper/mapper.go +++ /dev/null @@ -1,131 +0,0 @@ -package portmapper - -import ( - "errors" - "fmt" - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/pkg/proxy" - "net" - "sync" -) - -type mapping struct { - proto string - userlandProxy proxy.Proxy - host net.Addr - container net.Addr -} - -var ( - chain *iptables.Chain - lock sync.Mutex - - // udp:ip:port - currentMappings = make(map[string]*mapping) - newProxy = proxy.NewProxy -) - -var ( - ErrUnknownBackendAddressType = errors.New("unknown container address type not supported") - ErrPortMappedForIP = errors.New("port is already mapped to ip") - ErrPortNotMapped = errors.New("port is not mapped") -) - -func SetIptablesChain(c *iptables.Chain) { - chain = c -} - -func Map(container net.Addr, hostIP net.IP, hostPort int) error { - lock.Lock() - defer lock.Unlock() - - var m *mapping - switch container.(type) { - case *net.TCPAddr: - m = &mapping{ - proto: "tcp", - host: &net.TCPAddr{IP: hostIP, Port: hostPort}, - container: container, - } - case *net.UDPAddr: - m = &mapping{ - proto: "udp", - host: &net.UDPAddr{IP: hostIP, Port: hostPort}, - container: container, - } - default: - return ErrUnknownBackendAddressType - } - - key := getKey(m.host) - if _, exists := currentMappings[key]; exists { - return ErrPortMappedForIP - } - - containerIP, containerPort := getIPAndPort(m.container) - if err := forward(iptables.Add, m.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { - return err - } - - p, err := newProxy(m.host, m.container) - if err != nil { - // need to undo the iptables rules before we reutrn - forward(iptables.Delete, m.proto, hostIP, hostPort, containerIP.String(), containerPort) - return err - } - - m.userlandProxy = p - currentMappings[key] = m - - go p.Run() - - return nil -} - -func Unmap(host net.Addr) error { - lock.Lock() - defer lock.Unlock() - - key := getKey(host) - data, exists := currentMappings[key] - if !exists { - return ErrPortNotMapped - } - - data.userlandProxy.Close() - delete(currentMappings, key) - - containerIP, containerPort := getIPAndPort(data.container) - hostIP, hostPort := getIPAndPort(data.host) - if err := forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { - return err - } - return nil -} - -func getKey(a net.Addr) string { - switch t := a.(type) { - case *net.TCPAddr: - return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp") - case *net.UDPAddr: - return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp") - } - return "" -} - -func getIPAndPort(a net.Addr) (net.IP, int) { - switch t := a.(type) { - case *net.TCPAddr: - return t.IP, t.Port - case *net.UDPAddr: - return t.IP, t.Port - } - return nil, 0 -} - -func forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error { - if chain == nil { - return nil - } - return chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort) -} diff --git a/networkdriver/portmapper/mapper_test.go b/networkdriver/portmapper/mapper_test.go deleted file mode 100644 index 4c09f3c651..0000000000 --- a/networkdriver/portmapper/mapper_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package portmapper - -import ( - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/pkg/proxy" - "net" - "testing" -) - -func init() { - // override this func to mock out the proxy server - newProxy = proxy.NewStubProxy -} - -func reset() { - chain = nil - currentMappings = make(map[string]*mapping) -} - -func TestSetIptablesChain(t *testing.T) { - defer reset() - - c := &iptables.Chain{ - Name: "TEST", - Bridge: "192.168.1.1", - } - - if chain != nil { - t.Fatal("chain should be nil at init") - } - - SetIptablesChain(c) - if chain == nil { - t.Fatal("chain should not be nil after set") - } -} - -func TestMapPorts(t *testing.T) { - dstIp1 := net.ParseIP("192.168.0.1") - dstIp2 := net.ParseIP("192.168.0.2") - dstAddr1 := &net.TCPAddr{IP: dstIp1, Port: 80} - dstAddr2 := &net.TCPAddr{IP: dstIp2, Port: 80} - - srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} - srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")} - - if err := Map(srcAddr1, dstIp1, 80); err != nil { - t.Fatalf("Failed to allocate port: %s", err) - } - - if Map(srcAddr1, dstIp1, 80) == nil { - t.Fatalf("Port is in use - mapping should have failed") - } - - if Map(srcAddr2, dstIp1, 80) == nil { - t.Fatalf("Port is in use - mapping should have failed") - } - - if err := Map(srcAddr2, dstIp2, 80); err != nil { - t.Fatalf("Failed to allocate port: %s", err) - } - - if Unmap(dstAddr1) != nil { - t.Fatalf("Failed to release port") - } - - if Unmap(dstAddr2) != nil { - t.Fatalf("Failed to release port") - } - - if Unmap(dstAddr2) == nil { - t.Fatalf("Port already released, but no error reported") - } -} - -func TestGetUDPKey(t *testing.T) { - addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} - - key := getKey(addr) - - if expected := "192.168.1.5:53/udp"; key != expected { - t.Fatalf("expected key %s got %s", expected, key) - } -} - -func TestGetTCPKey(t *testing.T) { - addr := &net.TCPAddr{IP: net.ParseIP("192.168.1.5"), Port: 80} - - key := getKey(addr) - - if expected := "192.168.1.5:80/tcp"; key != expected { - t.Fatalf("expected key %s got %s", expected, key) - } -} - -func TestGetUDPIPAndPort(t *testing.T) { - addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} - - ip, port := getIPAndPort(addr) - if expected := "192.168.1.5"; ip.String() != expected { - t.Fatalf("expected ip %s got %s", expected, ip) - } - - if ep := 53; port != ep { - t.Fatalf("expected port %d got %d", ep, port) - } -} diff --git a/networkdriver/utils.go b/networkdriver/utils.go deleted file mode 100644 index 0a4ef70c95..0000000000 --- a/networkdriver/utils.go +++ /dev/null @@ -1,118 +0,0 @@ -package networkdriver - -import ( - "encoding/binary" - "errors" - "fmt" - "net" - - "github.com/dotcloud/docker/pkg/netlink" -) - -var ( - networkGetRoutesFct = netlink.NetworkGetRoutes - ErrNoDefaultRoute = errors.New("no default route") -) - -func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error { - if len(nameservers) > 0 { - for _, ns := range nameservers { - _, nsNetwork, err := net.ParseCIDR(ns) - if err != nil { - return err - } - if NetworkOverlaps(toCheck, nsNetwork) { - return ErrNetworkOverlapsWithNameservers - } - } - } - return nil -} - -func CheckRouteOverlaps(toCheck *net.IPNet) error { - networks, err := networkGetRoutesFct() - if err != nil { - return err - } - - for _, network := range networks { - if network.IPNet != nil && NetworkOverlaps(toCheck, network.IPNet) { - return ErrNetworkOverlaps - } - } - return nil -} - -// Detects overlap between one IPNet and another -func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool { - if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) { - return true - } - if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) { - return true - } - return false -} - -// Calculates the first and last IP addresses in an IPNet -func NetworkRange(network *net.IPNet) (net.IP, net.IP) { - var ( - netIP = network.IP.To4() - firstIP = netIP.Mask(network.Mask) - lastIP = net.IPv4(0, 0, 0, 0).To4() - ) - - for i := 0; i < len(lastIP); i++ { - lastIP[i] = netIP[i] | ^network.Mask[i] - } - return firstIP, lastIP -} - -// Given a netmask, calculates the number of available hosts -func NetworkSize(mask net.IPMask) int32 { - m := net.IPv4Mask(0, 0, 0, 0) - for i := 0; i < net.IPv4len; i++ { - m[i] = ^mask[i] - } - return int32(binary.BigEndian.Uint32(m)) + 1 -} - -// Return the IPv4 address of a network interface -func GetIfaceAddr(name string) (net.Addr, error) { - iface, err := net.InterfaceByName(name) - if err != nil { - return nil, err - } - addrs, err := iface.Addrs() - if err != nil { - return nil, err - } - var addrs4 []net.Addr - for _, addr := range addrs { - ip := (addr.(*net.IPNet)).IP - if ip4 := ip.To4(); len(ip4) == net.IPv4len { - addrs4 = append(addrs4, addr) - } - } - switch { - case len(addrs4) == 0: - return nil, fmt.Errorf("Interface %v has no IP addresses", name) - case len(addrs4) > 1: - fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n", - name, (addrs4[0].(*net.IPNet)).IP) - } - return addrs4[0], nil -} - -func GetDefaultRouteIface() (*net.Interface, error) { - rs, err := networkGetRoutesFct() - if err != nil { - return nil, fmt.Errorf("unable to get routes: %v", err) - } - for _, r := range rs { - if r.Default { - return r.Iface, nil - } - } - return nil, ErrNoDefaultRoute -} diff --git a/runtime/networkdriver/ipallocator/allocator.go b/runtime/networkdriver/ipallocator/allocator.go new file mode 100644 index 0000000000..2950e37003 --- /dev/null +++ b/runtime/networkdriver/ipallocator/allocator.go @@ -0,0 +1,159 @@ +package ipallocator + +import ( + "encoding/binary" + "errors" + "github.com/dotcloud/docker/runtime/networkdriver" + "github.com/dotcloud/docker/pkg/collections" + "net" + "sync" +) + +type networkSet map[string]*collections.OrderedIntSet + +var ( + ErrNoAvailableIPs = errors.New("no available ip addresses on network") + ErrIPAlreadyAllocated = errors.New("ip already allocated") +) + +var ( + lock = sync.Mutex{} + allocatedIPs = networkSet{} + availableIPS = networkSet{} +) + +// RequestIP requests an available ip from the given network. It +// will return the next available ip if the ip provided is nil. If the +// ip provided is not nil it will validate that the provided ip is available +// for use or return an error +func RequestIP(address *net.IPNet, ip *net.IP) (*net.IP, error) { + lock.Lock() + defer lock.Unlock() + + checkAddress(address) + + if ip == nil { + next, err := getNextIp(address) + if err != nil { + return nil, err + } + return next, nil + } + + if err := registerIP(address, ip); err != nil { + return nil, err + } + return ip, nil +} + +// ReleaseIP adds the provided ip back into the pool of +// available ips to be returned for use. +func ReleaseIP(address *net.IPNet, ip *net.IP) error { + lock.Lock() + defer lock.Unlock() + + checkAddress(address) + + var ( + existing = allocatedIPs[address.String()] + available = availableIPS[address.String()] + pos = getPosition(address, ip) + ) + + existing.Remove(int(pos)) + available.Push(int(pos)) + + return nil +} + +// convert the ip into the position in the subnet. Only +// position are saved in the set +func getPosition(address *net.IPNet, ip *net.IP) int32 { + var ( + first, _ = networkdriver.NetworkRange(address) + base = ipToInt(&first) + i = ipToInt(ip) + ) + return i - base +} + +// return an available ip if one is currently available. If not, +// return the next available ip for the nextwork +func getNextIp(address *net.IPNet) (*net.IP, error) { + var ( + ownIP = ipToInt(&address.IP) + available = availableIPS[address.String()] + allocated = allocatedIPs[address.String()] + first, _ = networkdriver.NetworkRange(address) + base = ipToInt(&first) + size = int(networkdriver.NetworkSize(address.Mask)) + max = int32(size - 2) // size -1 for the broadcast address, -1 for the gateway address + pos = int32(available.Pop()) + ) + + // We pop and push the position not the ip + if pos != 0 { + ip := intToIP(int32(base + pos)) + allocated.Push(int(pos)) + + return ip, nil + } + + var ( + firstNetIP = address.IP.To4().Mask(address.Mask) + firstAsInt = ipToInt(&firstNetIP) + 1 + ) + + pos = int32(allocated.PullBack()) + for i := int32(0); i < max; i++ { + pos = pos%max + 1 + next := int32(base + pos) + + if next == ownIP || next == firstAsInt { + continue + } + + if !allocated.Exists(int(pos)) { + ip := intToIP(next) + allocated.Push(int(pos)) + return ip, nil + } + } + return nil, ErrNoAvailableIPs +} + +func registerIP(address *net.IPNet, ip *net.IP) error { + var ( + existing = allocatedIPs[address.String()] + available = availableIPS[address.String()] + pos = getPosition(address, ip) + ) + + if existing.Exists(int(pos)) { + return ErrIPAlreadyAllocated + } + available.Remove(int(pos)) + + return nil +} + +// Converts a 4 bytes IP into a 32 bit integer +func ipToInt(ip *net.IP) int32 { + return int32(binary.BigEndian.Uint32(ip.To4())) +} + +// Converts 32 bit integer into a 4 bytes IP address +func intToIP(n int32) *net.IP { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(n)) + ip := net.IP(b) + return &ip +} + +func checkAddress(address *net.IPNet) { + key := address.String() + if _, exists := allocatedIPs[key]; !exists { + allocatedIPs[key] = collections.NewOrderedIntSet() + availableIPS[key] = collections.NewOrderedIntSet() + } +} diff --git a/runtime/networkdriver/ipallocator/allocator_test.go b/runtime/networkdriver/ipallocator/allocator_test.go new file mode 100644 index 0000000000..5e9fcfc983 --- /dev/null +++ b/runtime/networkdriver/ipallocator/allocator_test.go @@ -0,0 +1,241 @@ +package ipallocator + +import ( + "fmt" + "net" + "testing" +) + +func reset() { + allocatedIPs = networkSet{} + availableIPS = networkSet{} +} + +func TestRequestNewIps(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + for i := 2; i < 10; i++ { + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if expected := fmt.Sprintf("192.168.0.%d", i); ip.String() != expected { + t.Fatalf("Expected ip %s got %s", expected, ip.String()) + } + } +} + +func TestReleaseIp(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } +} + +func TestGetReleasedIp(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + value := ip.String() + if err := ReleaseIP(network, ip); err != nil { + t.Fatal(err) + } + + ip, err = RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + if ip.String() != value { + t.Fatalf("Expected to receive same ip %s got %s", value, ip.String()) + } +} + +func TestRequesetSpecificIp(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + + ip := net.ParseIP("192.168.1.5") + + if _, err := RequestIP(network, &ip); err != nil { + t.Fatal(err) + } +} + +func TestConversion(t *testing.T) { + ip := net.ParseIP("127.0.0.1") + i := ipToInt(&ip) + if i == 0 { + t.Fatal("converted to zero") + } + conv := intToIP(i) + if !ip.Equal(*conv) { + t.Error(conv.String()) + } +} + +func TestIPAllocator(t *testing.T) { + expectedIPs := []net.IP{ + 0: net.IPv4(127, 0, 0, 2), + 1: net.IPv4(127, 0, 0, 3), + 2: net.IPv4(127, 0, 0, 4), + 3: net.IPv4(127, 0, 0, 5), + 4: net.IPv4(127, 0, 0, 6), + } + + gwIP, n, _ := net.ParseCIDR("127.0.0.1/29") + network := &net.IPNet{IP: gwIP, Mask: n.Mask} + // Pool after initialisation (f = free, u = used) + // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) + // ↑ + + // Check that we get 5 IPs, from 127.0.0.2–127.0.0.6, in that + // order. + for i := 0; i < 5; i++ { + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + assertIPEquals(t, &expectedIPs[i], ip) + } + // Before loop begin + // 2(f) - 3(f) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 0 + // 2(u) - 3(f) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 1 + // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 2 + // 2(u) - 3(u) - 4(u) - 5(f) - 6(f) + // ↑ + + // After i = 3 + // 2(u) - 3(u) - 4(u) - 5(u) - 6(f) + // ↑ + + // After i = 4 + // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) + // ↑ + + // Check that there are no more IPs + ip, err := RequestIP(network, nil) + if err == nil { + t.Fatalf("There shouldn't be any IP addresses at this point, got %s\n", ip) + } + + // Release some IPs in non-sequential order + if err := ReleaseIP(network, &expectedIPs[3]); err != nil { + t.Fatal(err) + } + // 2(u) - 3(u) - 4(u) - 5(f) - 6(u) + // ↑ + + if err := ReleaseIP(network, &expectedIPs[2]); err != nil { + t.Fatal(err) + } + // 2(u) - 3(u) - 4(f) - 5(f) - 6(u) + // ↑ + + if err := ReleaseIP(network, &expectedIPs[4]); err != nil { + t.Fatal(err) + } + // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) + // ↑ + + // Make sure that IPs are reused in sequential order, starting + // with the first released IP + newIPs := make([]*net.IP, 3) + for i := 0; i < 3; i++ { + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + + newIPs[i] = ip + } + // Before loop begin + // 2(u) - 3(u) - 4(f) - 5(f) - 6(f) + // ↑ + + // After i = 0 + // 2(u) - 3(u) - 4(f) - 5(u) - 6(f) + // ↑ + + // After i = 1 + // 2(u) - 3(u) - 4(f) - 5(u) - 6(u) + // ↑ + + // After i = 2 + // 2(u) - 3(u) - 4(u) - 5(u) - 6(u) + // ↑ + + // Reordered these because the new set will always return the + // lowest ips first and not in the order that they were released + assertIPEquals(t, &expectedIPs[2], newIPs[0]) + assertIPEquals(t, &expectedIPs[3], newIPs[1]) + assertIPEquals(t, &expectedIPs[4], newIPs[2]) + + _, err = RequestIP(network, nil) + if err == nil { + t.Fatal("There shouldn't be any IP addresses at this point") + } +} + +func TestAllocateFirstIP(t *testing.T) { + defer reset() + network := &net.IPNet{ + IP: []byte{192, 168, 0, 0}, + Mask: []byte{255, 255, 255, 0}, + } + + firstIP := network.IP.To4().Mask(network.Mask) + first := ipToInt(&firstIP) + 1 + + ip, err := RequestIP(network, nil) + if err != nil { + t.Fatal(err) + } + allocated := ipToInt(ip) + + if allocated == first { + t.Fatalf("allocated ip should not equal first ip: %d == %d", first, allocated) + } +} + +func assertIPEquals(t *testing.T, ip1, ip2 *net.IP) { + if !ip1.Equal(*ip2) { + t.Fatalf("Expected IP %s, got %s", ip1, ip2) + } +} diff --git a/runtime/networkdriver/lxc/driver.go b/runtime/networkdriver/lxc/driver.go new file mode 100644 index 0000000000..746bcfb5b0 --- /dev/null +++ b/runtime/networkdriver/lxc/driver.go @@ -0,0 +1,482 @@ +package lxc + +import ( + "fmt" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/runtime/networkdriver" + "github.com/dotcloud/docker/runtime/networkdriver/ipallocator" + "github.com/dotcloud/docker/runtime/networkdriver/portallocator" + "github.com/dotcloud/docker/runtime/networkdriver/portmapper" + "github.com/dotcloud/docker/pkg/iptables" + "github.com/dotcloud/docker/pkg/netlink" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "log" + "net" + "strings" + "syscall" + "unsafe" +) + +const ( + DefaultNetworkBridge = "docker0" + siocBRADDBR = 0x89a0 +) + +// Network interface represents the networking stack of a container +type networkInterface struct { + IP net.IP + PortMappings []net.Addr // there are mappings to the host interfaces +} + +var ( + addrs = []string{ + // Here we don't follow the convention of using the 1st IP of the range for the gateway. + // This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges. + // In theory this shouldn't matter - in practice there's bound to be a few scripts relying + // on the internal addressing or other stupid things like that. + // The shouldn't, but hey, let's not break them unless we really have to. + "172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23 + "10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive + "10.1.42.1/16", + "10.42.42.1/16", + "172.16.42.1/24", + "172.16.43.1/24", + "172.16.44.1/24", + "10.0.42.1/24", + "10.0.43.1/24", + "192.168.42.1/24", + "192.168.43.1/24", + "192.168.44.1/24", + } + + bridgeIface string + bridgeNetwork *net.IPNet + + defaultBindingIP = net.ParseIP("0.0.0.0") + currentInterfaces = make(map[string]*networkInterface) +) + +func InitDriver(job *engine.Job) engine.Status { + var ( + network *net.IPNet + enableIPTables = job.GetenvBool("EnableIptables") + icc = job.GetenvBool("InterContainerCommunication") + ipForward = job.GetenvBool("EnableIpForward") + bridgeIP = job.Getenv("BridgeIP") + ) + + if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" { + defaultBindingIP = net.ParseIP(defaultIP) + } + + bridgeIface = job.Getenv("BridgeIface") + if bridgeIface == "" { + bridgeIface = DefaultNetworkBridge + } + + addr, err := networkdriver.GetIfaceAddr(bridgeIface) + if err != nil { + // If the iface is not found, try to create it + job.Logf("creating new bridge for %s", bridgeIface) + if err := createBridge(bridgeIP); err != nil { + job.Error(err) + return engine.StatusErr + } + + job.Logf("getting iface addr") + addr, err = networkdriver.GetIfaceAddr(bridgeIface) + if err != nil { + job.Error(err) + return engine.StatusErr + } + network = addr.(*net.IPNet) + } else { + network = addr.(*net.IPNet) + } + + // Configure iptables for link support + if enableIPTables { + if err := setupIPTables(addr, icc); err != nil { + job.Error(err) + return engine.StatusErr + } + } + + if ipForward { + // Enable IPv4 forwarding + if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil { + job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err) + } + } + + // We can always try removing the iptables + if err := iptables.RemoveExistingChain("DOCKER"); err != nil { + job.Error(err) + return engine.StatusErr + } + + if enableIPTables { + chain, err := iptables.NewChain("DOCKER", bridgeIface) + if err != nil { + job.Error(err) + return engine.StatusErr + } + portmapper.SetIptablesChain(chain) + } + + bridgeNetwork = network + + // https://github.com/dotcloud/docker/issues/2768 + job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP) + + for name, f := range map[string]engine.Handler{ + "allocate_interface": Allocate, + "release_interface": Release, + "allocate_port": AllocatePort, + "link": LinkContainers, + } { + if err := job.Eng.Register(name, f); err != nil { + job.Error(err) + return engine.StatusErr + } + } + return engine.StatusOK +} + +func setupIPTables(addr net.Addr, icc bool) error { + // Enable NAT + natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-d", addr.String(), "-j", "MASQUERADE"} + + if !iptables.Exists(natArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { + return fmt.Errorf("Unable to enable network bridge NAT: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables postrouting: %s", output) + } + } + + var ( + args = []string{"FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-j"} + acceptArgs = append(args, "ACCEPT") + dropArgs = append(args, "DROP") + ) + + if !icc { + iptables.Raw(append([]string{"-D"}, acceptArgs...)...) + + if !iptables.Exists(dropArgs...) { + utils.Debugf("Disable inter-container communication") + if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil { + return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error disabling intercontainer communication: %s", output) + } + } + } else { + iptables.Raw(append([]string{"-D"}, dropArgs...)...) + + if !iptables.Exists(acceptArgs...) { + utils.Debugf("Enable inter-container communication") + if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil { + return fmt.Errorf("Unable to allow intercontainer communication: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error enabling intercontainer communication: %s", output) + } + } + } + + // Accept all non-intercontainer outgoing packets + outgoingArgs := []string{"FORWARD", "-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"} + if !iptables.Exists(outgoingArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil { + return fmt.Errorf("Unable to allow outgoing packets: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables allow outgoing: %s", output) + } + } + + // Accept incoming packets for existing connections + existingArgs := []string{"FORWARD", "-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"} + + if !iptables.Exists(existingArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil { + return fmt.Errorf("Unable to allow incoming packets: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables allow incoming: %s", output) + } + } + return nil +} + +// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`, +// and attempts to configure it with an address which doesn't conflict with any other interface on the host. +// If it can't find an address which doesn't conflict, it will return an error. +func createBridge(bridgeIP string) error { + nameservers := []string{} + resolvConf, _ := utils.GetResolvConf() + // we don't check for an error here, because we don't really care + // if we can't read /etc/resolv.conf. So instead we skip the append + // if resolvConf is nil. It either doesn't exist, or we can't read it + // for some reason. + if resolvConf != nil { + nameservers = append(nameservers, utils.GetNameserversAsCIDR(resolvConf)...) + } + + var ifaceAddr string + if len(bridgeIP) != 0 { + _, _, err := net.ParseCIDR(bridgeIP) + if err != nil { + return err + } + ifaceAddr = bridgeIP + } else { + for _, addr := range addrs { + _, dockerNetwork, err := net.ParseCIDR(addr) + if err != nil { + return err + } + if err := networkdriver.CheckNameserverOverlaps(nameservers, dockerNetwork); err == nil { + if err := networkdriver.CheckRouteOverlaps(dockerNetwork); err == nil { + ifaceAddr = addr + break + } else { + utils.Debugf("%s %s", addr, err) + } + } + } + } + + if ifaceAddr == "" { + return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface) + } + utils.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) + + if err := createBridgeIface(bridgeIface); err != nil { + return err + } + + iface, err := net.InterfaceByName(bridgeIface) + if err != nil { + return err + } + + ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr) + if err != nil { + return err + } + + if netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { + return fmt.Errorf("Unable to add private network: %s", err) + } + if err := netlink.NetworkLinkUp(iface); err != nil { + return fmt.Errorf("Unable to start network bridge: %s", err) + } + return nil +} + +// Create the actual bridge device. This is more backward-compatible than +// netlink.NetworkLinkAdd and works on RHEL 6. +func createBridgeIface(name string) error { + s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP) + if err != nil { + utils.Debugf("Bridge socket creation failed IPv6 probably not enabled: %v", err) + s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP) + if err != nil { + return fmt.Errorf("Error creating bridge creation socket: %s", err) + } + } + defer syscall.Close(s) + + nameBytePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return fmt.Errorf("Error converting bridge name %s to byte array: %s", name, err) + } + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), siocBRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { + return fmt.Errorf("Error creating bridge: %s", err) + } + return nil +} + +// Allocate a network interface +func Allocate(job *engine.Job) engine.Status { + var ( + ip *net.IP + err error + id = job.Args[0] + requestedIP = net.ParseIP(job.Getenv("RequestedIP")) + ) + + if requestedIP != nil { + ip, err = ipallocator.RequestIP(bridgeNetwork, &requestedIP) + } else { + ip, err = ipallocator.RequestIP(bridgeNetwork, nil) + } + if err != nil { + job.Error(err) + return engine.StatusErr + } + + out := engine.Env{} + out.Set("IP", ip.String()) + out.Set("Mask", bridgeNetwork.Mask.String()) + out.Set("Gateway", bridgeNetwork.IP.String()) + out.Set("Bridge", bridgeIface) + + size, _ := bridgeNetwork.Mask.Size() + out.SetInt("IPPrefixLen", size) + + currentInterfaces[id] = &networkInterface{ + IP: *ip, + } + + out.WriteTo(job.Stdout) + + return engine.StatusOK +} + +// release an interface for a select ip +func Release(job *engine.Job) engine.Status { + var ( + id = job.Args[0] + containerInterface = currentInterfaces[id] + ip net.IP + port int + proto string + ) + + if containerInterface == nil { + return job.Errorf("No network information to release for %s", id) + } + + for _, nat := range containerInterface.PortMappings { + if err := portmapper.Unmap(nat); err != nil { + log.Printf("Unable to unmap port %s: %s", nat, err) + } + + // this is host mappings + switch a := nat.(type) { + case *net.TCPAddr: + proto = "tcp" + ip = a.IP + port = a.Port + case *net.UDPAddr: + proto = "udp" + ip = a.IP + port = a.Port + } + + if err := portallocator.ReleasePort(ip, proto, port); err != nil { + log.Printf("Unable to release port %s", nat) + } + } + + if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil { + log.Printf("Unable to release ip %s\n", err) + } + return engine.StatusOK +} + +// Allocate an external port and map it to the interface +func AllocatePort(job *engine.Job) engine.Status { + var ( + err error + + ip = defaultBindingIP + id = job.Args[0] + hostIP = job.Getenv("HostIP") + hostPort = job.GetenvInt("HostPort") + containerPort = job.GetenvInt("ContainerPort") + proto = job.Getenv("Proto") + network = currentInterfaces[id] + ) + + if hostIP != "" { + ip = net.ParseIP(hostIP) + } + + // host ip, proto, and host port + hostPort, err = portallocator.RequestPort(ip, proto, hostPort) + if err != nil { + job.Error(err) + return engine.StatusErr + } + + var ( + container net.Addr + host net.Addr + ) + + if proto == "tcp" { + host = &net.TCPAddr{IP: ip, Port: hostPort} + container = &net.TCPAddr{IP: network.IP, Port: containerPort} + } else { + host = &net.UDPAddr{IP: ip, Port: hostPort} + container = &net.UDPAddr{IP: network.IP, Port: containerPort} + } + + if err := portmapper.Map(container, ip, hostPort); err != nil { + portallocator.ReleasePort(ip, proto, hostPort) + + job.Error(err) + return engine.StatusErr + } + network.PortMappings = append(network.PortMappings, host) + + out := engine.Env{} + out.Set("HostIP", ip.String()) + out.SetInt("HostPort", hostPort) + + if _, err := out.WriteTo(job.Stdout); err != nil { + job.Error(err) + return engine.StatusErr + } + return engine.StatusOK +} + +func LinkContainers(job *engine.Job) engine.Status { + var ( + action = job.Args[0] + childIP = job.Getenv("ChildIP") + parentIP = job.Getenv("ParentIP") + ignoreErrors = job.GetenvBool("IgnoreErrors") + ports = job.GetenvList("Ports") + ) + split := func(p string) (string, string) { + parts := strings.Split(p, "/") + return parts[0], parts[1] + } + + for _, p := range ports { + port, proto := split(p) + if output, err := iptables.Raw(action, "FORWARD", + "-i", bridgeIface, "-o", bridgeIface, + "-p", proto, + "-s", parentIP, + "--dport", port, + "-d", childIP, + "-j", "ACCEPT"); !ignoreErrors && err != nil { + job.Error(err) + return engine.StatusErr + } else if len(output) != 0 { + job.Errorf("Error toggle iptables forward: %s", output) + return engine.StatusErr + } + + if output, err := iptables.Raw(action, "FORWARD", + "-i", bridgeIface, "-o", bridgeIface, + "-p", proto, + "-s", childIP, + "--sport", port, + "-d", parentIP, + "-j", "ACCEPT"); !ignoreErrors && err != nil { + job.Error(err) + return engine.StatusErr + } else if len(output) != 0 { + job.Errorf("Error toggle iptables forward: %s", output) + return engine.StatusErr + } + } + return engine.StatusOK +} diff --git a/runtime/networkdriver/network.go b/runtime/networkdriver/network.go new file mode 100644 index 0000000000..8dda789d2f --- /dev/null +++ b/runtime/networkdriver/network.go @@ -0,0 +1,10 @@ +package networkdriver + +import ( + "errors" +) + +var ( + ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver") + ErrNetworkOverlaps = errors.New("requested network overlaps with existing network") +) diff --git a/runtime/networkdriver/network_test.go b/runtime/networkdriver/network_test.go new file mode 100644 index 0000000000..6224c2dffb --- /dev/null +++ b/runtime/networkdriver/network_test.go @@ -0,0 +1,190 @@ +package networkdriver + +import ( + "github.com/dotcloud/docker/pkg/netlink" + "net" + "testing" +) + +func TestNonOverlapingNameservers(t *testing.T) { + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + nameservers := []string{ + "127.0.0.1/32", + } + + if err := CheckNameserverOverlaps(nameservers, network); err != nil { + t.Fatal(err) + } +} + +func TestOverlapingNameservers(t *testing.T) { + network := &net.IPNet{ + IP: []byte{192, 168, 0, 1}, + Mask: []byte{255, 255, 255, 0}, + } + nameservers := []string{ + "192.168.0.1/32", + } + + if err := CheckNameserverOverlaps(nameservers, network); err == nil { + t.Fatalf("Expected error %s got %s", ErrNetworkOverlapsWithNameservers, err) + } +} + +func TestCheckRouteOverlaps(t *testing.T) { + orig := networkGetRoutesFct + defer func() { + networkGetRoutesFct = orig + }() + networkGetRoutesFct = func() ([]netlink.Route, error) { + routesData := []string{"10.0.2.0/32", "10.0.3.0/24", "10.0.42.0/24", "172.16.42.0/24", "192.168.142.0/24"} + + routes := []netlink.Route{} + for _, addr := range routesData { + _, netX, _ := net.ParseCIDR(addr) + routes = append(routes, netlink.Route{IPNet: netX}) + } + return routes, nil + } + + _, netX, _ := net.ParseCIDR("172.16.0.1/24") + if err := CheckRouteOverlaps(netX); err != nil { + t.Fatal(err) + } + + _, netX, _ = net.ParseCIDR("10.0.2.0/24") + if err := CheckRouteOverlaps(netX); err == nil { + t.Fatalf("10.0.2.0/24 and 10.0.2.0 should overlap but it doesn't") + } +} + +func TestCheckNameserverOverlaps(t *testing.T) { + nameservers := []string{"10.0.2.3/32", "192.168.102.1/32"} + + _, netX, _ := net.ParseCIDR("10.0.2.3/32") + + if err := CheckNameserverOverlaps(nameservers, netX); err == nil { + t.Fatalf("%s should overlap 10.0.2.3/32 but doesn't", netX) + } + + _, netX, _ = net.ParseCIDR("192.168.102.2/32") + + if err := CheckNameserverOverlaps(nameservers, netX); err != nil { + t.Fatalf("%s should not overlap %v but it does", netX, nameservers) + } +} + +func AssertOverlap(CIDRx string, CIDRy string, t *testing.T) { + _, netX, _ := net.ParseCIDR(CIDRx) + _, netY, _ := net.ParseCIDR(CIDRy) + if !NetworkOverlaps(netX, netY) { + t.Errorf("%v and %v should overlap", netX, netY) + } +} + +func AssertNoOverlap(CIDRx string, CIDRy string, t *testing.T) { + _, netX, _ := net.ParseCIDR(CIDRx) + _, netY, _ := net.ParseCIDR(CIDRy) + if NetworkOverlaps(netX, netY) { + t.Errorf("%v and %v should not overlap", netX, netY) + } +} + +func TestNetworkOverlaps(t *testing.T) { + //netY starts at same IP and ends within netX + AssertOverlap("172.16.0.1/24", "172.16.0.1/25", t) + //netY starts within netX and ends at same IP + AssertOverlap("172.16.0.1/24", "172.16.0.128/25", t) + //netY starts and ends within netX + AssertOverlap("172.16.0.1/24", "172.16.0.64/25", t) + //netY starts at same IP and ends outside of netX + AssertOverlap("172.16.0.1/24", "172.16.0.1/23", t) + //netY starts before and ends at same IP of netX + AssertOverlap("172.16.1.1/24", "172.16.0.1/23", t) + //netY starts before and ends outside of netX + AssertOverlap("172.16.1.1/24", "172.16.0.1/22", t) + //netY starts and ends before netX + AssertNoOverlap("172.16.1.1/25", "172.16.0.1/24", t) + //netX starts and ends before netY + AssertNoOverlap("172.16.1.1/25", "172.16.2.1/24", t) +} + +func TestNetworkRange(t *testing.T) { + // Simple class C test + _, network, _ := net.ParseCIDR("192.168.0.1/24") + first, last := NetworkRange(network) + if !first.Equal(net.ParseIP("192.168.0.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("192.168.0.255")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 256 { + t.Error(size) + } + + // Class A test + _, network, _ = net.ParseCIDR("10.0.0.1/8") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.0.0.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.255.255.255")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 16777216 { + t.Error(size) + } + + // Class A, random IP address + _, network, _ = net.ParseCIDR("10.1.2.3/8") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.0.0.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.255.255.255")) { + t.Error(last.String()) + } + + // 32bit mask + _, network, _ = net.ParseCIDR("10.1.2.3/32") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.1.2.3")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.1.2.3")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 1 { + t.Error(size) + } + + // 31bit mask + _, network, _ = net.ParseCIDR("10.1.2.3/31") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.1.2.2")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.1.2.3")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 2 { + t.Error(size) + } + + // 26bit mask + _, network, _ = net.ParseCIDR("10.1.2.3/26") + first, last = NetworkRange(network) + if !first.Equal(net.ParseIP("10.1.2.0")) { + t.Error(first.String()) + } + if !last.Equal(net.ParseIP("10.1.2.63")) { + t.Error(last.String()) + } + if size := NetworkSize(network.Mask); size != 64 { + t.Error(size) + } +} diff --git a/runtime/networkdriver/portallocator/portallocator.go b/runtime/networkdriver/portallocator/portallocator.go new file mode 100644 index 0000000000..71cac82703 --- /dev/null +++ b/runtime/networkdriver/portallocator/portallocator.go @@ -0,0 +1,165 @@ +package portallocator + +import ( + "errors" + "github.com/dotcloud/docker/pkg/collections" + "net" + "sync" +) + +const ( + BeginPortRange = 49153 + EndPortRange = 65535 +) + +type ( + portMappings map[string]*collections.OrderedIntSet + ipMapping map[string]portMappings +) + +var ( + ErrPortAlreadyAllocated = errors.New("port has already been allocated") + ErrPortExceedsRange = errors.New("port exceeds upper range") + ErrUnknownProtocol = errors.New("unknown protocol") +) + +var ( + currentDynamicPort = map[string]int{ + "tcp": BeginPortRange - 1, + "udp": BeginPortRange - 1, + } + defaultIP = net.ParseIP("0.0.0.0") + defaultAllocatedPorts = portMappings{} + otherAllocatedPorts = ipMapping{} + lock = sync.Mutex{} +) + +func init() { + defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() + defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() +} + +// RequestPort returns an available port if the port is 0 +// If the provided port is not 0 then it will be checked if +// it is available for allocation +func RequestPort(ip net.IP, proto string, port int) (int, error) { + lock.Lock() + defer lock.Unlock() + + if err := validateProtocol(proto); err != nil { + return 0, err + } + + // If the user requested a specific port to be allocated + if port > 0 { + if err := registerSetPort(ip, proto, port); err != nil { + return 0, err + } + return port, nil + } + return registerDynamicPort(ip, proto) +} + +// ReleasePort will return the provided port back into the +// pool for reuse +func ReleasePort(ip net.IP, proto string, port int) error { + lock.Lock() + defer lock.Unlock() + + if err := validateProtocol(proto); err != nil { + return err + } + + allocated := defaultAllocatedPorts[proto] + allocated.Remove(port) + + if !equalsDefault(ip) { + registerIP(ip) + + // Remove the port for the specific ip address + allocated = otherAllocatedPorts[ip.String()][proto] + allocated.Remove(port) + } + return nil +} + +func ReleaseAll() error { + lock.Lock() + defer lock.Unlock() + + currentDynamicPort["tcp"] = BeginPortRange - 1 + currentDynamicPort["udp"] = BeginPortRange - 1 + + defaultAllocatedPorts = portMappings{} + defaultAllocatedPorts["tcp"] = collections.NewOrderedIntSet() + defaultAllocatedPorts["udp"] = collections.NewOrderedIntSet() + + otherAllocatedPorts = ipMapping{} + + return nil +} + +func registerDynamicPort(ip net.IP, proto string) (int, error) { + allocated := defaultAllocatedPorts[proto] + + port := nextPort(proto) + if port > EndPortRange { + return 0, ErrPortExceedsRange + } + + if !equalsDefault(ip) { + registerIP(ip) + + ipAllocated := otherAllocatedPorts[ip.String()][proto] + ipAllocated.Push(port) + } else { + allocated.Push(port) + } + return port, nil +} + +func registerSetPort(ip net.IP, proto string, port int) error { + allocated := defaultAllocatedPorts[proto] + if allocated.Exists(port) { + return ErrPortAlreadyAllocated + } + + if !equalsDefault(ip) { + registerIP(ip) + + ipAllocated := otherAllocatedPorts[ip.String()][proto] + if ipAllocated.Exists(port) { + return ErrPortAlreadyAllocated + } + ipAllocated.Push(port) + } else { + allocated.Push(port) + } + return nil +} + +func equalsDefault(ip net.IP) bool { + return ip == nil || ip.Equal(defaultIP) +} + +func nextPort(proto string) int { + c := currentDynamicPort[proto] + 1 + currentDynamicPort[proto] = c + return c +} + +func registerIP(ip net.IP) { + if _, exists := otherAllocatedPorts[ip.String()]; !exists { + otherAllocatedPorts[ip.String()] = portMappings{ + "tcp": collections.NewOrderedIntSet(), + "udp": collections.NewOrderedIntSet(), + } + } +} + +func validateProtocol(proto string) error { + if _, exists := defaultAllocatedPorts[proto]; !exists { + return ErrUnknownProtocol + } + return nil +} diff --git a/runtime/networkdriver/portallocator/portallocator_test.go b/runtime/networkdriver/portallocator/portallocator_test.go new file mode 100644 index 0000000000..603bd03bd7 --- /dev/null +++ b/runtime/networkdriver/portallocator/portallocator_test.go @@ -0,0 +1,184 @@ +package portallocator + +import ( + "net" + "testing" +) + +func reset() { + ReleaseAll() +} + +func TestRequestNewPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + t.Fatal(err) + } + + if expected := BeginPortRange; port != expected { + t.Fatalf("Expected port %d got %d", expected, port) + } +} + +func TestRequestSpecificPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } +} + +func TestReleasePort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } + + if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { + t.Fatal(err) + } +} + +func TestReuseReleasedPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } + + if err := ReleasePort(defaultIP, "tcp", 5000); err != nil { + t.Fatal(err) + } + + port, err = RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } +} + +func TestReleaseUnreadledPort(t *testing.T) { + defer reset() + + port, err := RequestPort(defaultIP, "tcp", 5000) + if err != nil { + t.Fatal(err) + } + if port != 5000 { + t.Fatalf("Expected port 5000 got %d", port) + } + + port, err = RequestPort(defaultIP, "tcp", 5000) + if err != ErrPortAlreadyAllocated { + t.Fatalf("Expected error %s got %s", ErrPortAlreadyAllocated, err) + } +} + +func TestUnknowProtocol(t *testing.T) { + defer reset() + + if _, err := RequestPort(defaultIP, "tcpp", 0); err != ErrUnknownProtocol { + t.Fatalf("Expected error %s got %s", ErrUnknownProtocol, err) + } +} + +func TestAllocateAllPorts(t *testing.T) { + defer reset() + + for i := 0; i <= EndPortRange-BeginPortRange; i++ { + port, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + t.Fatal(err) + } + + if expected := BeginPortRange + i; port != expected { + t.Fatalf("Expected port %d got %d", expected, port) + } + } + + if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrPortExceedsRange { + t.Fatalf("Expected error %s got %s", ErrPortExceedsRange, err) + } + + _, err := RequestPort(defaultIP, "udp", 0) + if err != nil { + t.Fatal(err) + } +} + +func BenchmarkAllocatePorts(b *testing.B) { + defer reset() + + for i := 0; i < b.N; i++ { + for i := 0; i <= EndPortRange-BeginPortRange; i++ { + port, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + b.Fatal(err) + } + + if expected := BeginPortRange + i; port != expected { + b.Fatalf("Expected port %d got %d", expected, port) + } + } + reset() + } +} + +func TestPortAllocation(t *testing.T) { + defer reset() + + ip := net.ParseIP("192.168.0.1") + ip2 := net.ParseIP("192.168.0.2") + if port, err := RequestPort(ip, "tcp", 80); err != nil { + t.Fatal(err) + } else if port != 80 { + t.Fatalf("Acquire(80) should return 80, not %d", port) + } + port, err := RequestPort(ip, "tcp", 0) + if err != nil { + t.Fatal(err) + } + if port <= 0 { + t.Fatalf("Acquire(0) should return a non-zero port") + } + + if _, err := RequestPort(ip, "tcp", port); err == nil { + t.Fatalf("Acquiring a port already in use should return an error") + } + + if newPort, err := RequestPort(ip, "tcp", 0); err != nil { + t.Fatal(err) + } else if newPort == port { + t.Fatalf("Acquire(0) allocated the same port twice: %d", port) + } + + if _, err := RequestPort(ip, "tcp", 80); err == nil { + t.Fatalf("Acquiring a port already in use should return an error") + } + if _, err := RequestPort(ip2, "tcp", 80); err != nil { + t.Fatalf("It should be possible to allocate the same port on a different interface") + } + if _, err := RequestPort(ip2, "tcp", 80); err == nil { + t.Fatalf("Acquiring a port already in use should return an error") + } + if err := ReleasePort(ip, "tcp", 80); err != nil { + t.Fatal(err) + } + if _, err := RequestPort(ip, "tcp", 80); err != nil { + t.Fatal(err) + } +} diff --git a/runtime/networkdriver/portmapper/mapper.go b/runtime/networkdriver/portmapper/mapper.go new file mode 100644 index 0000000000..e29959a245 --- /dev/null +++ b/runtime/networkdriver/portmapper/mapper.go @@ -0,0 +1,131 @@ +package portmapper + +import ( + "errors" + "fmt" + "github.com/dotcloud/docker/pkg/iptables" + "github.com/dotcloud/docker/pkg/proxy" + "net" + "sync" +) + +type mapping struct { + proto string + userlandProxy proxy.Proxy + host net.Addr + container net.Addr +} + +var ( + chain *iptables.Chain + lock sync.Mutex + + // udp:ip:port + currentMappings = make(map[string]*mapping) + newProxy = proxy.NewProxy +) + +var ( + ErrUnknownBackendAddressType = errors.New("unknown container address type not supported") + ErrPortMappedForIP = errors.New("port is already mapped to ip") + ErrPortNotMapped = errors.New("port is not mapped") +) + +func SetIptablesChain(c *iptables.Chain) { + chain = c +} + +func Map(container net.Addr, hostIP net.IP, hostPort int) error { + lock.Lock() + defer lock.Unlock() + + var m *mapping + switch container.(type) { + case *net.TCPAddr: + m = &mapping{ + proto: "tcp", + host: &net.TCPAddr{IP: hostIP, Port: hostPort}, + container: container, + } + case *net.UDPAddr: + m = &mapping{ + proto: "udp", + host: &net.UDPAddr{IP: hostIP, Port: hostPort}, + container: container, + } + default: + return ErrUnknownBackendAddressType + } + + key := getKey(m.host) + if _, exists := currentMappings[key]; exists { + return ErrPortMappedForIP + } + + containerIP, containerPort := getIPAndPort(m.container) + if err := forward(iptables.Add, m.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { + return err + } + + p, err := newProxy(m.host, m.container) + if err != nil { + // need to undo the iptables rules before we reutrn + forward(iptables.Delete, m.proto, hostIP, hostPort, containerIP.String(), containerPort) + return err + } + + m.userlandProxy = p + currentMappings[key] = m + + go p.Run() + + return nil +} + +func Unmap(host net.Addr) error { + lock.Lock() + defer lock.Unlock() + + key := getKey(host) + data, exists := currentMappings[key] + if !exists { + return ErrPortNotMapped + } + + data.userlandProxy.Close() + delete(currentMappings, key) + + containerIP, containerPort := getIPAndPort(data.container) + hostIP, hostPort := getIPAndPort(data.host) + if err := forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { + return err + } + return nil +} + +func getKey(a net.Addr) string { + switch t := a.(type) { + case *net.TCPAddr: + return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp") + case *net.UDPAddr: + return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp") + } + return "" +} + +func getIPAndPort(a net.Addr) (net.IP, int) { + switch t := a.(type) { + case *net.TCPAddr: + return t.IP, t.Port + case *net.UDPAddr: + return t.IP, t.Port + } + return nil, 0 +} + +func forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error { + if chain == nil { + return nil + } + return chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort) +} diff --git a/runtime/networkdriver/portmapper/mapper_test.go b/runtime/networkdriver/portmapper/mapper_test.go new file mode 100644 index 0000000000..4c09f3c651 --- /dev/null +++ b/runtime/networkdriver/portmapper/mapper_test.go @@ -0,0 +1,107 @@ +package portmapper + +import ( + "github.com/dotcloud/docker/pkg/iptables" + "github.com/dotcloud/docker/pkg/proxy" + "net" + "testing" +) + +func init() { + // override this func to mock out the proxy server + newProxy = proxy.NewStubProxy +} + +func reset() { + chain = nil + currentMappings = make(map[string]*mapping) +} + +func TestSetIptablesChain(t *testing.T) { + defer reset() + + c := &iptables.Chain{ + Name: "TEST", + Bridge: "192.168.1.1", + } + + if chain != nil { + t.Fatal("chain should be nil at init") + } + + SetIptablesChain(c) + if chain == nil { + t.Fatal("chain should not be nil after set") + } +} + +func TestMapPorts(t *testing.T) { + dstIp1 := net.ParseIP("192.168.0.1") + dstIp2 := net.ParseIP("192.168.0.2") + dstAddr1 := &net.TCPAddr{IP: dstIp1, Port: 80} + dstAddr2 := &net.TCPAddr{IP: dstIp2, Port: 80} + + srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")} + srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")} + + if err := Map(srcAddr1, dstIp1, 80); err != nil { + t.Fatalf("Failed to allocate port: %s", err) + } + + if Map(srcAddr1, dstIp1, 80) == nil { + t.Fatalf("Port is in use - mapping should have failed") + } + + if Map(srcAddr2, dstIp1, 80) == nil { + t.Fatalf("Port is in use - mapping should have failed") + } + + if err := Map(srcAddr2, dstIp2, 80); err != nil { + t.Fatalf("Failed to allocate port: %s", err) + } + + if Unmap(dstAddr1) != nil { + t.Fatalf("Failed to release port") + } + + if Unmap(dstAddr2) != nil { + t.Fatalf("Failed to release port") + } + + if Unmap(dstAddr2) == nil { + t.Fatalf("Port already released, but no error reported") + } +} + +func TestGetUDPKey(t *testing.T) { + addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} + + key := getKey(addr) + + if expected := "192.168.1.5:53/udp"; key != expected { + t.Fatalf("expected key %s got %s", expected, key) + } +} + +func TestGetTCPKey(t *testing.T) { + addr := &net.TCPAddr{IP: net.ParseIP("192.168.1.5"), Port: 80} + + key := getKey(addr) + + if expected := "192.168.1.5:80/tcp"; key != expected { + t.Fatalf("expected key %s got %s", expected, key) + } +} + +func TestGetUDPIPAndPort(t *testing.T) { + addr := &net.UDPAddr{IP: net.ParseIP("192.168.1.5"), Port: 53} + + ip, port := getIPAndPort(addr) + if expected := "192.168.1.5"; ip.String() != expected { + t.Fatalf("expected ip %s got %s", expected, ip) + } + + if ep := 53; port != ep { + t.Fatalf("expected port %d got %d", ep, port) + } +} diff --git a/runtime/networkdriver/utils.go b/runtime/networkdriver/utils.go new file mode 100644 index 0000000000..0a4ef70c95 --- /dev/null +++ b/runtime/networkdriver/utils.go @@ -0,0 +1,118 @@ +package networkdriver + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/dotcloud/docker/pkg/netlink" +) + +var ( + networkGetRoutesFct = netlink.NetworkGetRoutes + ErrNoDefaultRoute = errors.New("no default route") +) + +func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error { + if len(nameservers) > 0 { + for _, ns := range nameservers { + _, nsNetwork, err := net.ParseCIDR(ns) + if err != nil { + return err + } + if NetworkOverlaps(toCheck, nsNetwork) { + return ErrNetworkOverlapsWithNameservers + } + } + } + return nil +} + +func CheckRouteOverlaps(toCheck *net.IPNet) error { + networks, err := networkGetRoutesFct() + if err != nil { + return err + } + + for _, network := range networks { + if network.IPNet != nil && NetworkOverlaps(toCheck, network.IPNet) { + return ErrNetworkOverlaps + } + } + return nil +} + +// Detects overlap between one IPNet and another +func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool { + if firstIP, _ := NetworkRange(netX); netY.Contains(firstIP) { + return true + } + if firstIP, _ := NetworkRange(netY); netX.Contains(firstIP) { + return true + } + return false +} + +// Calculates the first and last IP addresses in an IPNet +func NetworkRange(network *net.IPNet) (net.IP, net.IP) { + var ( + netIP = network.IP.To4() + firstIP = netIP.Mask(network.Mask) + lastIP = net.IPv4(0, 0, 0, 0).To4() + ) + + for i := 0; i < len(lastIP); i++ { + lastIP[i] = netIP[i] | ^network.Mask[i] + } + return firstIP, lastIP +} + +// Given a netmask, calculates the number of available hosts +func NetworkSize(mask net.IPMask) int32 { + m := net.IPv4Mask(0, 0, 0, 0) + for i := 0; i < net.IPv4len; i++ { + m[i] = ^mask[i] + } + return int32(binary.BigEndian.Uint32(m)) + 1 +} + +// Return the IPv4 address of a network interface +func GetIfaceAddr(name string) (net.Addr, error) { + iface, err := net.InterfaceByName(name) + if err != nil { + return nil, err + } + addrs, err := iface.Addrs() + if err != nil { + return nil, err + } + var addrs4 []net.Addr + for _, addr := range addrs { + ip := (addr.(*net.IPNet)).IP + if ip4 := ip.To4(); len(ip4) == net.IPv4len { + addrs4 = append(addrs4, addr) + } + } + switch { + case len(addrs4) == 0: + return nil, fmt.Errorf("Interface %v has no IP addresses", name) + case len(addrs4) > 1: + fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n", + name, (addrs4[0].(*net.IPNet)).IP) + } + return addrs4[0], nil +} + +func GetDefaultRouteIface() (*net.Interface, error) { + rs, err := networkGetRoutesFct() + if err != nil { + return nil, fmt.Errorf("unable to get routes: %v", err) + } + for _, r := range rs { + if r.Default { + return r.Iface, nil + } + } + return nil, ErrNoDefaultRoute +} diff --git a/runtime/runtime.go b/runtime/runtime.go index 32584cbf6e..092b5a8130 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -17,8 +17,8 @@ import ( _ "github.com/dotcloud/docker/graphdriver/devmapper" _ "github.com/dotcloud/docker/graphdriver/vfs" "github.com/dotcloud/docker/image" - _ "github.com/dotcloud/docker/networkdriver/lxc" - "github.com/dotcloud/docker/networkdriver/portallocator" + _ "github.com/dotcloud/docker/runtime/networkdriver/lxc" + "github.com/dotcloud/docker/runtime/networkdriver/portallocator" "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/pkg/sysinfo" "github.com/dotcloud/docker/runconfig" -- cgit v1.2.1 From af385151ceedde097eda8a5195b431e8076cf76b Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 14 Mar 2014 14:07:32 -0700 Subject: Move execdrivers into runtime top level pkg Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- execdriver/MAINTAINERS | 2 - execdriver/driver.go | 135 -------- execdriver/execdrivers/execdrivers.go | 23 -- execdriver/lxc/driver.go | 396 ----------------------- execdriver/lxc/info.go | 50 --- execdriver/lxc/info_test.go | 36 --- execdriver/lxc/init.go | 176 ---------- execdriver/lxc/lxc_init_linux.go | 11 - execdriver/lxc/lxc_init_unsupported.go | 7 - execdriver/lxc/lxc_template.go | 155 --------- execdriver/lxc/lxc_template_unit_test.go | 125 ------- execdriver/native/default_template.go | 94 ------ execdriver/native/driver.go | 266 --------------- execdriver/native/info.go | 21 -- execdriver/native/term.go | 42 --- execdriver/pipes.go | 23 -- execdriver/termconsole.go | 126 -------- runtime/container.go | 2 +- runtime/execdriver/MAINTAINERS | 2 + runtime/execdriver/driver.go | 135 ++++++++ runtime/execdriver/execdrivers/execdrivers.go | 23 ++ runtime/execdriver/lxc/driver.go | 396 +++++++++++++++++++++++ runtime/execdriver/lxc/info.go | 50 +++ runtime/execdriver/lxc/info_test.go | 36 +++ runtime/execdriver/lxc/init.go | 176 ++++++++++ runtime/execdriver/lxc/lxc_init_linux.go | 11 + runtime/execdriver/lxc/lxc_init_unsupported.go | 7 + runtime/execdriver/lxc/lxc_template.go | 155 +++++++++ runtime/execdriver/lxc/lxc_template_unit_test.go | 125 +++++++ runtime/execdriver/native/default_template.go | 94 ++++++ runtime/execdriver/native/driver.go | 266 +++++++++++++++ runtime/execdriver/native/info.go | 21 ++ runtime/execdriver/native/term.go | 42 +++ runtime/execdriver/pipes.go | 23 ++ runtime/execdriver/termconsole.go | 126 ++++++++ runtime/runtime.go | 6 +- runtime/volumes.go | 2 +- sysinit/sysinit.go | 6 +- 38 files changed, 1696 insertions(+), 1696 deletions(-) delete mode 100644 execdriver/MAINTAINERS delete mode 100644 execdriver/driver.go delete mode 100644 execdriver/execdrivers/execdrivers.go delete mode 100644 execdriver/lxc/driver.go delete mode 100644 execdriver/lxc/info.go delete mode 100644 execdriver/lxc/info_test.go delete mode 100644 execdriver/lxc/init.go delete mode 100644 execdriver/lxc/lxc_init_linux.go delete mode 100644 execdriver/lxc/lxc_init_unsupported.go delete mode 100644 execdriver/lxc/lxc_template.go delete mode 100644 execdriver/lxc/lxc_template_unit_test.go delete mode 100644 execdriver/native/default_template.go delete mode 100644 execdriver/native/driver.go delete mode 100644 execdriver/native/info.go delete mode 100644 execdriver/native/term.go delete mode 100644 execdriver/pipes.go delete mode 100644 execdriver/termconsole.go create mode 100644 runtime/execdriver/MAINTAINERS create mode 100644 runtime/execdriver/driver.go create mode 100644 runtime/execdriver/execdrivers/execdrivers.go create mode 100644 runtime/execdriver/lxc/driver.go create mode 100644 runtime/execdriver/lxc/info.go create mode 100644 runtime/execdriver/lxc/info_test.go create mode 100644 runtime/execdriver/lxc/init.go create mode 100644 runtime/execdriver/lxc/lxc_init_linux.go create mode 100644 runtime/execdriver/lxc/lxc_init_unsupported.go create mode 100644 runtime/execdriver/lxc/lxc_template.go create mode 100644 runtime/execdriver/lxc/lxc_template_unit_test.go create mode 100644 runtime/execdriver/native/default_template.go create mode 100644 runtime/execdriver/native/driver.go create mode 100644 runtime/execdriver/native/info.go create mode 100644 runtime/execdriver/native/term.go create mode 100644 runtime/execdriver/pipes.go create mode 100644 runtime/execdriver/termconsole.go diff --git a/execdriver/MAINTAINERS b/execdriver/MAINTAINERS deleted file mode 100644 index 1cb551364d..0000000000 --- a/execdriver/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Michael Crosby (@crosbymichael) -Guillaume J. Charmes (@creack) diff --git a/execdriver/driver.go b/execdriver/driver.go deleted file mode 100644 index ff37b6bc5b..0000000000 --- a/execdriver/driver.go +++ /dev/null @@ -1,135 +0,0 @@ -package execdriver - -import ( - "errors" - "io" - "os" - "os/exec" -) - -var ( - ErrNotRunning = errors.New("Process could not be started") - ErrWaitTimeoutReached = errors.New("Wait timeout reached") - ErrDriverAlreadyRegistered = errors.New("A driver already registered this docker init function") - ErrDriverNotFound = errors.New("The requested docker init has not been found") -) - -var dockerInitFcts map[string]InitFunc - -type ( - StartCallback func(*Command) - InitFunc func(i *InitArgs) error -) - -func RegisterInitFunc(name string, fct InitFunc) error { - if dockerInitFcts == nil { - dockerInitFcts = make(map[string]InitFunc) - } - if _, ok := dockerInitFcts[name]; ok { - return ErrDriverAlreadyRegistered - } - dockerInitFcts[name] = fct - return nil -} - -func GetInitFunc(name string) (InitFunc, error) { - fct, ok := dockerInitFcts[name] - if !ok { - return nil, ErrDriverNotFound - } - return fct, nil -} - -// Args provided to the init function for a driver -type InitArgs struct { - User string - Gateway string - Ip string - WorkDir string - Privileged bool - Env []string - Args []string - Mtu int - Driver string - Console string - Pipe int - Root string -} - -// Driver specific information based on -// processes registered with the driver -type Info interface { - IsRunning() bool -} - -// Terminal in an interface for drivers to implement -// if they want to support Close and Resize calls from -// the core -type Terminal interface { - io.Closer - Resize(height, width int) error -} - -type TtyTerminal interface { - Master() *os.File -} - -type Driver interface { - Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code - Kill(c *Command, sig int) error - Name() string // Driver name - Info(id string) Info // "temporary" hack (until we move state from core to plugins) - GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. -} - -// Network settings of the container -type Network struct { - Gateway string `json:"gateway"` - IPAddress string `json:"ip"` - Bridge string `json:"bridge"` - IPPrefixLen int `json:"ip_prefix_len"` - Mtu int `json:"mtu"` -} - -type Resources struct { - Memory int64 `json:"memory"` - MemorySwap int64 `json:"memory_swap"` - CpuShares int64 `json:"cpu_shares"` -} - -type Mount struct { - Source string `json:"source"` - Destination string `json:"destination"` - Writable bool `json:"writable"` - Private bool `json:"private"` -} - -// Process wrapps an os/exec.Cmd to add more metadata -type Command struct { - exec.Cmd `json:"-"` - - ID string `json:"id"` - Privileged bool `json:"privileged"` - User string `json:"user"` - Rootfs string `json:"rootfs"` // root fs of the container - InitPath string `json:"initpath"` // dockerinit - Entrypoint string `json:"entrypoint"` - Arguments []string `json:"arguments"` - WorkingDir string `json:"working_dir"` - ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver - Tty bool `json:"tty"` - Network *Network `json:"network"` // if network is nil then networking is disabled - Config []string `json:"config"` // generic values that specific drivers can consume - Resources *Resources `json:"resources"` - Mounts []Mount `json:"mounts"` - - Terminal Terminal `json:"-"` // standard or tty terminal - Console string `json:"-"` // dev/console path - ContainerPid int `json:"container_pid"` // the pid for the process inside a container -} - -// Return the pid of the process -// If the process is nil -1 will be returned -func (c *Command) Pid() int { - return c.ContainerPid -} diff --git a/execdriver/execdrivers/execdrivers.go b/execdriver/execdrivers/execdrivers.go deleted file mode 100644 index 7486d649c1..0000000000 --- a/execdriver/execdrivers/execdrivers.go +++ /dev/null @@ -1,23 +0,0 @@ -package execdrivers - -import ( - "fmt" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/execdriver/lxc" - "github.com/dotcloud/docker/execdriver/native" - "github.com/dotcloud/docker/pkg/sysinfo" - "path" -) - -func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { - switch name { - case "lxc": - // we want to five the lxc driver the full docker root because it needs - // to access and write config and template files in /var/lib/docker/containers/* - // to be backwards compatible - return lxc.NewDriver(root, sysInfo.AppArmor) - case "native": - return native.NewDriver(path.Join(root, "execdriver", "native"), initPath) - } - return nil, fmt.Errorf("unknown exec driver %s", name) -} diff --git a/execdriver/lxc/driver.go b/execdriver/lxc/driver.go deleted file mode 100644 index 9abec8ac3f..0000000000 --- a/execdriver/lxc/driver.go +++ /dev/null @@ -1,396 +0,0 @@ -package lxc - -import ( - "fmt" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/pkg/cgroups" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "log" - "os" - "os/exec" - "path" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" -) - -const DriverName = "lxc" - -func init() { - execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { - if err := setupEnv(args); err != nil { - return err - } - - if err := setupHostname(args); err != nil { - return err - } - - if err := setupNetworking(args); err != nil { - return err - } - - if err := setupCapabilities(args); err != nil { - return err - } - - if err := setupWorkingDirectory(args); err != nil { - return err - } - - if err := changeUser(args); err != nil { - return err - } - - path, err := exec.LookPath(args.Args[0]) - if err != nil { - log.Printf("Unable to locate %v", args.Args[0]) - os.Exit(127) - } - if err := syscall.Exec(path, args.Args, os.Environ()); err != nil { - return fmt.Errorf("dockerinit unable to execute %s - %s", path, err) - } - panic("Unreachable") - }) -} - -type driver struct { - root string // root path for the driver to use - apparmor bool - sharedRoot bool -} - -func NewDriver(root string, apparmor bool) (*driver, error) { - // setup unconfined symlink - if err := linkLxcStart(root); err != nil { - return nil, err - } - return &driver{ - apparmor: apparmor, - root: root, - sharedRoot: rootIsShared(), - }, nil -} - -func (d *driver) Name() string { - version := d.version() - return fmt.Sprintf("%s-%s", DriverName, version) -} - -func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { - if err := execdriver.SetTerminal(c, pipes); err != nil { - return -1, err - } - configPath, err := d.generateLXCConfig(c) - if err != nil { - return -1, err - } - params := []string{ - "lxc-start", - "-n", c.ID, - "-f", configPath, - "--", - c.InitPath, - "-driver", - DriverName, - } - - if c.Network != nil { - params = append(params, - "-g", c.Network.Gateway, - "-i", fmt.Sprintf("%s/%d", c.Network.IPAddress, c.Network.IPPrefixLen), - "-mtu", strconv.Itoa(c.Network.Mtu), - ) - } - - if c.User != "" { - params = append(params, "-u", c.User) - } - - if c.Privileged { - if d.apparmor { - params[0] = path.Join(d.root, "lxc-start-unconfined") - - } - params = append(params, "-privileged") - } - - if c.WorkingDir != "" { - params = append(params, "-w", c.WorkingDir) - } - - params = append(params, "--", c.Entrypoint) - params = append(params, c.Arguments...) - - if d.sharedRoot { - // lxc-start really needs / to be non-shared, or all kinds of stuff break - // when lxc-start unmount things and those unmounts propagate to the main - // mount namespace. - // What we really want is to clone into a new namespace and then - // mount / MS_REC|MS_SLAVE, but since we can't really clone or fork - // without exec in go we have to do this horrible shell hack... - shellString := - "mount --make-rslave /; exec " + - utils.ShellQuoteArguments(params) - - params = []string{ - "unshare", "-m", "--", "/bin/sh", "-c", shellString, - } - } - - var ( - name = params[0] - arg = params[1:] - ) - aname, err := exec.LookPath(name) - if err != nil { - aname = name - } - c.Path = aname - c.Args = append([]string{name}, arg...) - - if err := c.Start(); err != nil { - return -1, err - } - - var ( - waitErr error - waitLock = make(chan struct{}) - ) - go func() { - if err := c.Wait(); err != nil { - if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0 - waitErr = err - } - } - close(waitLock) - }() - - // Poll lxc for RUNNING status - pid, err := d.waitForStart(c, waitLock) - if err != nil { - if c.Process != nil { - c.Process.Kill() - } - return -1, err - } - c.ContainerPid = pid - - if startCallback != nil { - startCallback(c) - } - - <-waitLock - - return getExitCode(c), waitErr -} - -/// Return the exit code of the process -// if the process has not exited -1 will be returned -func getExitCode(c *execdriver.Command) int { - if c.ProcessState == nil { - return -1 - } - return c.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() -} - -func (d *driver) Kill(c *execdriver.Command, sig int) error { - return KillLxc(c.ID, sig) -} - -func (d *driver) version() string { - var ( - version string - output []byte - err error - ) - if _, errPath := exec.LookPath("lxc-version"); errPath == nil { - output, err = exec.Command("lxc-version").CombinedOutput() - } else { - output, err = exec.Command("lxc-start", "--version").CombinedOutput() - } - if err == nil { - version = strings.TrimSpace(string(output)) - if parts := strings.SplitN(version, ":", 2); len(parts) == 2 { - version = strings.TrimSpace(parts[1]) - } - } - return version -} - -func KillLxc(id string, sig int) error { - var ( - err error - output []byte - ) - _, err = exec.LookPath("lxc-kill") - if err == nil { - output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput() - } else { - output, err = exec.Command("lxc-stop", "-k", "-n", id, strconv.Itoa(sig)).CombinedOutput() - } - if err != nil { - return fmt.Errorf("Err: %s Output: %s", err, output) - } - return nil -} - -// wait for the process to start and return the pid for the process -func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (int, error) { - var ( - err error - output []byte - ) - // We wait for the container to be fully running. - // Timeout after 5 seconds. In case of broken pipe, just retry. - // Note: The container can run and finish correctly before - // the end of this loop - for now := time.Now(); time.Since(now) < 5*time.Second; { - select { - case <-waitLock: - // If the process dies while waiting for it, just return - return -1, nil - default: - } - - output, err = d.getInfo(c.ID) - if err != nil { - output, err = d.getInfo(c.ID) - if err != nil { - return -1, err - } - } - info, err := parseLxcInfo(string(output)) - if err != nil { - return -1, err - } - if info.Running { - return info.Pid, nil - } - time.Sleep(50 * time.Millisecond) - } - return -1, execdriver.ErrNotRunning -} - -func (d *driver) getInfo(id string) ([]byte, error) { - return exec.Command("lxc-info", "-n", id).CombinedOutput() -} - -type info struct { - ID string - driver *driver -} - -func (i *info) IsRunning() bool { - var running bool - - output, err := i.driver.getInfo(i.ID) - if err != nil { - utils.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) - return false - } - if strings.Contains(string(output), "RUNNING") { - running = true - } - return running -} - -func (d *driver) Info(id string) execdriver.Info { - return &info{ - ID: id, - driver: d, - } -} - -func (d *driver) GetPidsForContainer(id string) ([]int, error) { - pids := []int{} - - // cpu is chosen because it is the only non optional subsystem in cgroups - subsystem := "cpu" - cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) - if err != nil { - return pids, err - } - - cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) - if err != nil { - return pids, err - } - - filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") - if _, err := os.Stat(filename); os.IsNotExist(err) { - // With more recent lxc versions use, cgroup will be in lxc/ - filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks") - } - - output, err := ioutil.ReadFile(filename) - if err != nil { - return pids, err - } - for _, p := range strings.Split(string(output), "\n") { - if len(p) == 0 { - continue - } - pid, err := strconv.Atoi(p) - if err != nil { - return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) - } - pids = append(pids, pid) - } - return pids, nil -} - -func linkLxcStart(root string) error { - sourcePath, err := exec.LookPath("lxc-start") - if err != nil { - return err - } - targetPath := path.Join(root, "lxc-start-unconfined") - - if _, err := os.Lstat(targetPath); err != nil && !os.IsNotExist(err) { - return err - } else if err == nil { - if err := os.Remove(targetPath); err != nil { - return err - } - } - return os.Symlink(sourcePath, targetPath) -} - -// TODO: This can be moved to the mountinfo reader in the mount pkg -func rootIsShared() bool { - if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil { - for _, line := range strings.Split(string(data), "\n") { - cols := strings.Split(line, " ") - if len(cols) >= 6 && cols[4] == "/" { - return strings.HasPrefix(cols[6], "shared") - } - } - } - - // No idea, probably safe to assume so - return true -} - -func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { - root := path.Join(d.root, "containers", c.ID, "config.lxc") - fo, err := os.Create(root) - if err != nil { - return "", err - } - defer fo.Close() - - if err := LxcTemplateCompiled.Execute(fo, struct { - *execdriver.Command - AppArmor bool - }{ - Command: c, - AppArmor: d.apparmor, - }); err != nil { - return "", err - } - return root, nil -} diff --git a/execdriver/lxc/info.go b/execdriver/lxc/info.go deleted file mode 100644 index 27b4c58604..0000000000 --- a/execdriver/lxc/info.go +++ /dev/null @@ -1,50 +0,0 @@ -package lxc - -import ( - "bufio" - "errors" - "strconv" - "strings" -) - -var ( - ErrCannotParse = errors.New("cannot parse raw input") -) - -type lxcInfo struct { - Running bool - Pid int -} - -func parseLxcInfo(raw string) (*lxcInfo, error) { - if raw == "" { - return nil, ErrCannotParse - } - var ( - err error - s = bufio.NewScanner(strings.NewReader(raw)) - info = &lxcInfo{} - ) - for s.Scan() { - text := s.Text() - - if s.Err() != nil { - return nil, s.Err() - } - - parts := strings.Split(text, ":") - if len(parts) < 2 { - continue - } - switch strings.ToLower(strings.TrimSpace(parts[0])) { - case "state": - info.Running = strings.TrimSpace(parts[1]) == "RUNNING" - case "pid": - info.Pid, err = strconv.Atoi(strings.TrimSpace(parts[1])) - if err != nil { - return nil, err - } - } - } - return info, nil -} diff --git a/execdriver/lxc/info_test.go b/execdriver/lxc/info_test.go deleted file mode 100644 index edafc02511..0000000000 --- a/execdriver/lxc/info_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package lxc - -import ( - "testing" -) - -func TestParseRunningInfo(t *testing.T) { - raw := ` - state: RUNNING - pid: 50` - - info, err := parseLxcInfo(raw) - if err != nil { - t.Fatal(err) - } - if !info.Running { - t.Fatal("info should return a running state") - } - if info.Pid != 50 { - t.Fatalf("info should have pid 50 got %d", info.Pid) - } -} - -func TestEmptyInfo(t *testing.T) { - _, err := parseLxcInfo("") - if err == nil { - t.Fatal("error should not be nil") - } -} - -func TestBadInfo(t *testing.T) { - _, err := parseLxcInfo("state") - if err != nil { - t.Fatal(err) - } -} diff --git a/execdriver/lxc/init.go b/execdriver/lxc/init.go deleted file mode 100644 index 0f134088a3..0000000000 --- a/execdriver/lxc/init.go +++ /dev/null @@ -1,176 +0,0 @@ -package lxc - -import ( - "encoding/json" - "fmt" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/pkg/netlink" - "github.com/dotcloud/docker/pkg/user" - "github.com/syndtr/gocapability/capability" - "io/ioutil" - "net" - "os" - "strings" - "syscall" -) - -// Clear environment pollution introduced by lxc-start -func setupEnv(args *execdriver.InitArgs) error { - // Get env - var env []string - content, err := ioutil.ReadFile(".dockerenv") - if err != nil { - return fmt.Errorf("Unable to load environment variables: %v", err) - } - if err := json.Unmarshal(content, &env); err != nil { - return fmt.Errorf("Unable to unmarshal environment variables: %v", err) - } - // Propagate the plugin-specific container env variable - env = append(env, "container="+os.Getenv("container")) - - args.Env = env - - os.Clearenv() - for _, kv := range args.Env { - parts := strings.SplitN(kv, "=", 2) - if len(parts) == 1 { - parts = append(parts, "") - } - os.Setenv(parts[0], parts[1]) - } - - return nil -} - -func setupHostname(args *execdriver.InitArgs) error { - hostname := getEnv(args, "HOSTNAME") - if hostname == "" { - return nil - } - return setHostname(hostname) -} - -// Setup networking -func setupNetworking(args *execdriver.InitArgs) error { - if args.Ip != "" { - // eth0 - iface, err := net.InterfaceByName("eth0") - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - ip, ipNet, err := net.ParseCIDR(args.Ip) - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkLinkAddIp(iface, ip, ipNet); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkSetMTU(iface, args.Mtu); err != nil { - return fmt.Errorf("Unable to set MTU: %v", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - - // loopback - iface, err = net.InterfaceByName("lo") - if err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - } - if args.Gateway != "" { - gw := net.ParseIP(args.Gateway) - if gw == nil { - return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.Gateway) - } - - if err := netlink.AddDefaultGw(gw); err != nil { - return fmt.Errorf("Unable to set up networking: %v", err) - } - } - - return nil -} - -// Setup working directory -func setupWorkingDirectory(args *execdriver.InitArgs) error { - if args.WorkDir == "" { - return nil - } - if err := syscall.Chdir(args.WorkDir); err != nil { - return fmt.Errorf("Unable to change dir to %v: %v", args.WorkDir, err) - } - return nil -} - -// Takes care of dropping privileges to the desired user -func changeUser(args *execdriver.InitArgs) error { - uid, gid, suppGids, err := user.GetUserGroupSupplementary( - args.User, - syscall.Getuid(), syscall.Getgid(), - ) - if err != nil { - return err - } - - if err := syscall.Setgroups(suppGids); err != nil { - return fmt.Errorf("Setgroups failed: %v", err) - } - if err := syscall.Setgid(gid); err != nil { - return fmt.Errorf("Setgid failed: %v", err) - } - if err := syscall.Setuid(uid); err != nil { - return fmt.Errorf("Setuid failed: %v", err) - } - - return nil -} - -func setupCapabilities(args *execdriver.InitArgs) error { - if args.Privileged { - return nil - } - - drop := []capability.Cap{ - capability.CAP_SETPCAP, - capability.CAP_SYS_MODULE, - capability.CAP_SYS_RAWIO, - capability.CAP_SYS_PACCT, - capability.CAP_SYS_ADMIN, - capability.CAP_SYS_NICE, - capability.CAP_SYS_RESOURCE, - capability.CAP_SYS_TIME, - capability.CAP_SYS_TTY_CONFIG, - capability.CAP_MKNOD, - capability.CAP_AUDIT_WRITE, - capability.CAP_AUDIT_CONTROL, - capability.CAP_MAC_OVERRIDE, - capability.CAP_MAC_ADMIN, - capability.CAP_NET_ADMIN, - } - - c, err := capability.NewPid(os.Getpid()) - if err != nil { - return err - } - - c.Unset(capability.CAPS|capability.BOUNDS, drop...) - - if err := c.Apply(capability.CAPS | capability.BOUNDS); err != nil { - return err - } - return nil -} - -func getEnv(args *execdriver.InitArgs, key string) string { - for _, kv := range args.Env { - parts := strings.SplitN(kv, "=", 2) - if parts[0] == key && len(parts) == 2 { - return parts[1] - } - } - return "" -} diff --git a/execdriver/lxc/lxc_init_linux.go b/execdriver/lxc/lxc_init_linux.go deleted file mode 100644 index 7288f5877b..0000000000 --- a/execdriver/lxc/lxc_init_linux.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build amd64 - -package lxc - -import ( - "syscall" -) - -func setHostname(hostname string) error { - return syscall.Sethostname([]byte(hostname)) -} diff --git a/execdriver/lxc/lxc_init_unsupported.go b/execdriver/lxc/lxc_init_unsupported.go deleted file mode 100644 index d68cb91a1e..0000000000 --- a/execdriver/lxc/lxc_init_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux !amd64 - -package lxc - -func setHostname(hostname string) error { - panic("Not supported on darwin") -} diff --git a/execdriver/lxc/lxc_template.go b/execdriver/lxc/lxc_template.go deleted file mode 100644 index 84cd4e442e..0000000000 --- a/execdriver/lxc/lxc_template.go +++ /dev/null @@ -1,155 +0,0 @@ -package lxc - -import ( - "github.com/dotcloud/docker/execdriver" - "strings" - "text/template" -) - -const LxcTemplate = ` -{{if .Network}} -# network configuration -lxc.network.type = veth -lxc.network.link = {{.Network.Bridge}} -lxc.network.name = eth0 -lxc.network.mtu = {{.Network.Mtu}} -{{else}} -# network is disabled (-n=false) -lxc.network.type = empty -lxc.network.flags = up -{{end}} - -# root filesystem -{{$ROOTFS := .Rootfs}} -lxc.rootfs = {{$ROOTFS}} - -# use a dedicated pts for the container (and limit the number of pseudo terminal -# available) -lxc.pts = 1024 - -# disable the main console -lxc.console = none - -# no controlling tty at all -lxc.tty = 1 - -{{if .Privileged}} -lxc.cgroup.devices.allow = a -{{else}} -# no implicit access to devices -lxc.cgroup.devices.deny = a - -# /dev/null and zero -lxc.cgroup.devices.allow = c 1:3 rwm -lxc.cgroup.devices.allow = c 1:5 rwm - -# consoles -lxc.cgroup.devices.allow = c 5:1 rwm -lxc.cgroup.devices.allow = c 5:0 rwm -lxc.cgroup.devices.allow = c 4:0 rwm -lxc.cgroup.devices.allow = c 4:1 rwm - -# /dev/urandom,/dev/random -lxc.cgroup.devices.allow = c 1:9 rwm -lxc.cgroup.devices.allow = c 1:8 rwm - -# /dev/pts/ - pts namespaces are "coming soon" -lxc.cgroup.devices.allow = c 136:* rwm -lxc.cgroup.devices.allow = c 5:2 rwm - -# tuntap -lxc.cgroup.devices.allow = c 10:200 rwm - -# fuse -#lxc.cgroup.devices.allow = c 10:229 rwm - -# rtc -#lxc.cgroup.devices.allow = c 254:0 rwm -{{end}} - -# standard mount point -# Use mnt.putold as per https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/986385 -lxc.pivotdir = lxc_putold - -# NOTICE: These mounts must be applied within the namespace - -# WARNING: procfs is a known attack vector and should probably be disabled -# if your userspace allows it. eg. see http://blog.zx2c4.com/749 -lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0 - -# WARNING: sysfs is a known attack vector and should probably be disabled -# if your userspace allows it. eg. see http://bit.ly/T9CkqJ -lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0 - -{{if .Tty}} -lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0 -{{end}} - -lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0 -lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0 - -{{range $value := .Mounts}} -{{if $value.Writable}} -lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,rw 0 0 -{{else}} -lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,ro 0 0 -{{end}} -{{end}} - -{{if .Privileged}} -{{if .AppArmor}} -lxc.aa_profile = unconfined -{{else}} -#lxc.aa_profile = unconfined -{{end}} -{{end}} - -# limits -{{if .Resources}} -{{if .Resources.Memory}} -lxc.cgroup.memory.limit_in_bytes = {{.Resources.Memory}} -lxc.cgroup.memory.soft_limit_in_bytes = {{.Resources.Memory}} -{{with $memSwap := getMemorySwap .Resources}} -lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}} -{{end}} -{{end}} -{{if .Resources.CpuShares}} -lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} -{{end}} -{{end}} - -{{if .Config}} -{{range $value := .Config}} -{{$value}} -{{end}} -{{end}} -` - -var LxcTemplateCompiled *template.Template - -// Escape spaces in strings according to the fstab documentation, which is the -// format for "lxc.mount.entry" lines in lxc.conf. See also "man 5 fstab". -func escapeFstabSpaces(field string) string { - return strings.Replace(field, " ", "\\040", -1) -} - -func getMemorySwap(v *execdriver.Resources) int64 { - // By default, MemorySwap is set to twice the size of RAM. - // If you want to omit MemorySwap, set it to `-1'. - if v.MemorySwap < 0 { - return 0 - } - return v.Memory * 2 -} - -func init() { - var err error - funcMap := template.FuncMap{ - "getMemorySwap": getMemorySwap, - "escapeFstabSpaces": escapeFstabSpaces, - } - LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate) - if err != nil { - panic(err) - } -} diff --git a/execdriver/lxc/lxc_template_unit_test.go b/execdriver/lxc/lxc_template_unit_test.go deleted file mode 100644 index 99d6e636f5..0000000000 --- a/execdriver/lxc/lxc_template_unit_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package lxc - -import ( - "bufio" - "fmt" - "github.com/dotcloud/docker/execdriver" - "io/ioutil" - "math/rand" - "os" - "path" - "strings" - "testing" - "time" -) - -func TestLXCConfig(t *testing.T) { - root, err := ioutil.TempDir("", "TestLXCConfig") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) - - os.MkdirAll(path.Join(root, "containers", "1"), 0777) - - // Memory is allocated randomly for testing - rand.Seed(time.Now().UTC().UnixNano()) - var ( - memMin = 33554432 - memMax = 536870912 - mem = memMin + rand.Intn(memMax-memMin) - cpuMin = 100 - cpuMax = 10000 - cpu = cpuMin + rand.Intn(cpuMax-cpuMin) - ) - - driver, err := NewDriver(root, false) - if err != nil { - t.Fatal(err) - } - command := &execdriver.Command{ - ID: "1", - Resources: &execdriver.Resources{ - Memory: int64(mem), - CpuShares: int64(cpu), - }, - } - p, err := driver.generateLXCConfig(command) - if err != nil { - t.Fatal(err) - } - grepFile(t, p, - fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) - - grepFile(t, p, - fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) -} - -func TestCustomLxcConfig(t *testing.T) { - root, err := ioutil.TempDir("", "TestCustomLxcConfig") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) - - os.MkdirAll(path.Join(root, "containers", "1"), 0777) - - driver, err := NewDriver(root, false) - if err != nil { - t.Fatal(err) - } - command := &execdriver.Command{ - ID: "1", - Privileged: false, - Config: []string{ - "lxc.utsname = docker", - "lxc.cgroup.cpuset.cpus = 0,1", - }, - } - - p, err := driver.generateLXCConfig(command) - if err != nil { - t.Fatal(err) - } - - grepFile(t, p, "lxc.utsname = docker") - grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") -} - -func grepFile(t *testing.T, path string, pattern string) { - f, err := os.Open(path) - if err != nil { - t.Fatal(err) - } - defer f.Close() - r := bufio.NewReader(f) - var ( - line string - ) - err = nil - for err == nil { - line, err = r.ReadString('\n') - if strings.Contains(line, pattern) == true { - return - } - } - t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) -} - -func TestEscapeFstabSpaces(t *testing.T) { - var testInputs = map[string]string{ - " ": "\\040", - "": "", - "/double space": "/double\\040\\040space", - "/some long test string": "/some\\040long\\040test\\040string", - "/var/lib/docker": "/var/lib/docker", - " leading": "\\040leading", - "trailing ": "trailing\\040", - } - for in, exp := range testInputs { - if out := escapeFstabSpaces(in); exp != out { - t.Logf("Expected %s got %s", exp, out) - t.Fail() - } - } -} diff --git a/execdriver/native/default_template.go b/execdriver/native/default_template.go deleted file mode 100644 index 5351911427..0000000000 --- a/execdriver/native/default_template.go +++ /dev/null @@ -1,94 +0,0 @@ -package native - -import ( - "fmt" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/pkg/cgroups" - "github.com/dotcloud/docker/pkg/libcontainer" - "os" -) - -// createContainer populates and configures the container type with the -// data provided by the execdriver.Command -func createContainer(c *execdriver.Command) *libcontainer.Container { - container := getDefaultTemplate() - - container.Hostname = getEnv("HOSTNAME", c.Env) - container.Tty = c.Tty - container.User = c.User - container.WorkingDir = c.WorkingDir - container.Env = c.Env - - if c.Network != nil { - container.Networks = []*libcontainer.Network{ - { - Mtu: c.Network.Mtu, - Address: fmt.Sprintf("%s/%d", c.Network.IPAddress, c.Network.IPPrefixLen), - Gateway: c.Network.Gateway, - Type: "veth", - Context: libcontainer.Context{ - "prefix": "veth", - "bridge": c.Network.Bridge, - }, - }, - } - } - - container.Cgroups.Name = c.ID - if c.Privileged { - container.CapabilitiesMask = nil - container.Cgroups.DeviceAccess = true - container.Context["apparmor_profile"] = "unconfined" - } - if c.Resources != nil { - container.Cgroups.CpuShares = c.Resources.CpuShares - container.Cgroups.Memory = c.Resources.Memory - container.Cgroups.MemorySwap = c.Resources.MemorySwap - } - // check to see if we are running in ramdisk to disable pivot root - container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" - - for _, m := range c.Mounts { - container.Mounts = append(container.Mounts, libcontainer.Mount{m.Source, m.Destination, m.Writable, m.Private}) - } - - return container -} - -// getDefaultTemplate returns the docker default for -// the libcontainer configuration file -func getDefaultTemplate() *libcontainer.Container { - return &libcontainer.Container{ - CapabilitiesMask: libcontainer.Capabilities{ - libcontainer.GetCapability("SETPCAP"), - libcontainer.GetCapability("SYS_MODULE"), - libcontainer.GetCapability("SYS_RAWIO"), - libcontainer.GetCapability("SYS_PACCT"), - libcontainer.GetCapability("SYS_ADMIN"), - libcontainer.GetCapability("SYS_NICE"), - libcontainer.GetCapability("SYS_RESOURCE"), - libcontainer.GetCapability("SYS_TIME"), - libcontainer.GetCapability("SYS_TTY_CONFIG"), - libcontainer.GetCapability("MKNOD"), - libcontainer.GetCapability("AUDIT_WRITE"), - libcontainer.GetCapability("AUDIT_CONTROL"), - libcontainer.GetCapability("MAC_OVERRIDE"), - libcontainer.GetCapability("MAC_ADMIN"), - libcontainer.GetCapability("NET_ADMIN"), - }, - Namespaces: libcontainer.Namespaces{ - libcontainer.GetNamespace("NEWNS"), - libcontainer.GetNamespace("NEWUTS"), - libcontainer.GetNamespace("NEWIPC"), - libcontainer.GetNamespace("NEWPID"), - libcontainer.GetNamespace("NEWNET"), - }, - Cgroups: &cgroups.Cgroup{ - Parent: "docker", - DeviceAccess: false, - }, - Context: libcontainer.Context{ - "apparmor_profile": "docker-default", - }, - } -} diff --git a/execdriver/native/driver.go b/execdriver/native/driver.go deleted file mode 100644 index 9b49fd156f..0000000000 --- a/execdriver/native/driver.go +++ /dev/null @@ -1,266 +0,0 @@ -package native - -import ( - "encoding/json" - "fmt" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/pkg/cgroups" - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/apparmor" - "github.com/dotcloud/docker/pkg/libcontainer/nsinit" - "github.com/dotcloud/docker/pkg/system" - "io" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "syscall" -) - -const ( - DriverName = "native" - Version = "0.1" -) - -func init() { - execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { - var ( - container *libcontainer.Container - ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}, createLogger("")) - ) - f, err := os.Open(filepath.Join(args.Root, "container.json")) - if err != nil { - return err - } - if err := json.NewDecoder(f).Decode(&container); err != nil { - f.Close() - return err - } - f.Close() - - cwd, err := os.Getwd() - if err != nil { - return err - } - syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(args.Pipe)) - if err != nil { - return err - } - if err := ns.Init(container, cwd, args.Console, syncPipe, args.Args); err != nil { - return err - } - return nil - }) -} - -type driver struct { - root string - initPath string -} - -func NewDriver(root, initPath string) (*driver, error) { - if err := os.MkdirAll(root, 0700); err != nil { - return nil, err - } - if err := apparmor.InstallDefaultProfile(); err != nil { - return nil, err - } - return &driver{ - root: root, - initPath: initPath, - }, nil -} - -func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { - if err := d.validateCommand(c); err != nil { - return -1, err - } - var ( - term nsinit.Terminal - container = createContainer(c) - factory = &dockerCommandFactory{c: c, driver: d} - stateWriter = &dockerStateWriter{ - callback: startCallback, - c: c, - dsw: &nsinit.DefaultStateWriter{filepath.Join(d.root, c.ID)}, - } - ns = nsinit.NewNsInit(factory, stateWriter, createLogger(os.Getenv("DEBUG"))) - args = append([]string{c.Entrypoint}, c.Arguments...) - ) - if err := d.createContainerRoot(c.ID); err != nil { - return -1, err - } - defer d.removeContainerRoot(c.ID) - - if c.Tty { - term = &dockerTtyTerm{ - pipes: pipes, - } - } else { - term = &dockerStdTerm{ - pipes: pipes, - } - } - c.Terminal = term - if err := d.writeContainerFile(container, c.ID); err != nil { - return -1, err - } - return ns.Exec(container, term, args) -} - -func (d *driver) Kill(p *execdriver.Command, sig int) error { - err := syscall.Kill(p.Process.Pid, syscall.Signal(sig)) - d.removeContainerRoot(p.ID) - return err -} - -func (d *driver) Info(id string) execdriver.Info { - return &info{ - ID: id, - driver: d, - } -} - -func (d *driver) Name() string { - return fmt.Sprintf("%s-%s", DriverName, Version) -} - -// TODO: this can be improved with our driver -// there has to be a better way to do this -func (d *driver) GetPidsForContainer(id string) ([]int, error) { - pids := []int{} - - subsystem := "devices" - cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) - if err != nil { - return pids, err - } - cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) - if err != nil { - return pids, err - } - - filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") - if _, err := os.Stat(filename); os.IsNotExist(err) { - filename = filepath.Join(cgroupRoot, cgroupDir, "docker", id, "tasks") - } - - output, err := ioutil.ReadFile(filename) - if err != nil { - return pids, err - } - for _, p := range strings.Split(string(output), "\n") { - if len(p) == 0 { - continue - } - pid, err := strconv.Atoi(p) - if err != nil { - return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) - } - pids = append(pids, pid) - } - return pids, nil -} - -func (d *driver) writeContainerFile(container *libcontainer.Container, id string) error { - data, err := json.Marshal(container) - if err != nil { - return err - } - return ioutil.WriteFile(filepath.Join(d.root, id, "container.json"), data, 0655) -} - -func (d *driver) createContainerRoot(id string) error { - return os.MkdirAll(filepath.Join(d.root, id), 0655) -} - -func (d *driver) removeContainerRoot(id string) error { - return os.RemoveAll(filepath.Join(d.root, id)) -} - -func (d *driver) validateCommand(c *execdriver.Command) error { - // we need to check the Config of the command to make sure that we - // do not have any of the lxc-conf variables - for _, conf := range c.Config { - if strings.Contains(conf, "lxc") { - return fmt.Errorf("%s is not supported by the native driver", conf) - } - } - return nil -} - -func getEnv(key string, env []string) string { - for _, pair := range env { - parts := strings.Split(pair, "=") - if parts[0] == key { - return parts[1] - } - } - return "" -} - -type dockerCommandFactory struct { - c *execdriver.Command - driver *driver -} - -// createCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces -// defined on the container's configuration and use the current binary as the init with the -// args provided -func (d *dockerCommandFactory) Create(container *libcontainer.Container, console string, syncFile *os.File, args []string) *exec.Cmd { - // we need to join the rootfs because nsinit will setup the rootfs and chroot - initPath := filepath.Join(d.c.Rootfs, d.c.InitPath) - - d.c.Path = d.driver.initPath - d.c.Args = append([]string{ - initPath, - "-driver", DriverName, - "-console", console, - "-pipe", "3", - "-root", filepath.Join(d.driver.root, d.c.ID), - "--", - }, args...) - - // set this to nil so that when we set the clone flags anything else is reset - d.c.SysProcAttr = nil - system.SetCloneFlags(&d.c.Cmd, uintptr(nsinit.GetNamespaceFlags(container.Namespaces))) - d.c.ExtraFiles = []*os.File{syncFile} - - d.c.Env = container.Env - d.c.Dir = d.c.Rootfs - - return &d.c.Cmd -} - -type dockerStateWriter struct { - dsw nsinit.StateWriter - c *execdriver.Command - callback execdriver.StartCallback -} - -func (d *dockerStateWriter) WritePid(pid int) error { - d.c.ContainerPid = pid - err := d.dsw.WritePid(pid) - if d.callback != nil { - d.callback(d.c) - } - return err -} - -func (d *dockerStateWriter) DeletePid() error { - return d.dsw.DeletePid() -} - -func createLogger(debug string) *log.Logger { - var w io.Writer - // if we are in debug mode set the logger to stderr - if debug != "" { - w = os.Stderr - } else { - w = ioutil.Discard - } - return log.New(w, "[libcontainer] ", log.LstdFlags) -} diff --git a/execdriver/native/info.go b/execdriver/native/info.go deleted file mode 100644 index aef2f85c6b..0000000000 --- a/execdriver/native/info.go +++ /dev/null @@ -1,21 +0,0 @@ -package native - -import ( - "os" - "path/filepath" -) - -type info struct { - ID string - driver *driver -} - -// IsRunning is determined by looking for the -// pid file for a container. If the file exists then the -// container is currently running -func (i *info) IsRunning() bool { - if _, err := os.Stat(filepath.Join(i.driver.root, i.ID, "pid")); err == nil { - return true - } - return false -} diff --git a/execdriver/native/term.go b/execdriver/native/term.go deleted file mode 100644 index ec69820f75..0000000000 --- a/execdriver/native/term.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - These types are wrappers around the libcontainer Terminal interface so that - we can resuse the docker implementations where possible. -*/ -package native - -import ( - "github.com/dotcloud/docker/execdriver" - "io" - "os" - "os/exec" -) - -type dockerStdTerm struct { - execdriver.StdConsole - pipes *execdriver.Pipes -} - -func (d *dockerStdTerm) Attach(cmd *exec.Cmd) error { - return d.AttachPipes(cmd, d.pipes) -} - -func (d *dockerStdTerm) SetMaster(master *os.File) { - // do nothing -} - -type dockerTtyTerm struct { - execdriver.TtyConsole - pipes *execdriver.Pipes -} - -func (t *dockerTtyTerm) Attach(cmd *exec.Cmd) error { - go io.Copy(t.pipes.Stdout, t.MasterPty) - if t.pipes.Stdin != nil { - go io.Copy(t.MasterPty, t.pipes.Stdin) - } - return nil -} - -func (t *dockerTtyTerm) SetMaster(master *os.File) { - t.MasterPty = master -} diff --git a/execdriver/pipes.go b/execdriver/pipes.go deleted file mode 100644 index 158219f0c5..0000000000 --- a/execdriver/pipes.go +++ /dev/null @@ -1,23 +0,0 @@ -package execdriver - -import ( - "io" -) - -// Pipes is a wrapper around a containers output for -// stdin, stdout, stderr -type Pipes struct { - Stdin io.ReadCloser - Stdout, Stderr io.Writer -} - -func NewPipes(stdin io.ReadCloser, stdout, stderr io.Writer, useStdin bool) *Pipes { - p := &Pipes{ - Stdout: stdout, - Stderr: stderr, - } - if useStdin { - p.Stdin = stdin - } - return p -} diff --git a/execdriver/termconsole.go b/execdriver/termconsole.go deleted file mode 100644 index af6b88d3d1..0000000000 --- a/execdriver/termconsole.go +++ /dev/null @@ -1,126 +0,0 @@ -package execdriver - -import ( - "github.com/dotcloud/docker/pkg/term" - "github.com/kr/pty" - "io" - "os" - "os/exec" -) - -func SetTerminal(command *Command, pipes *Pipes) error { - var ( - term Terminal - err error - ) - if command.Tty { - term, err = NewTtyConsole(command, pipes) - } else { - term, err = NewStdConsole(command, pipes) - } - if err != nil { - return err - } - command.Terminal = term - return nil -} - -type TtyConsole struct { - MasterPty *os.File - SlavePty *os.File -} - -func NewTtyConsole(command *Command, pipes *Pipes) (*TtyConsole, error) { - ptyMaster, ptySlave, err := pty.Open() - if err != nil { - return nil, err - } - tty := &TtyConsole{ - MasterPty: ptyMaster, - SlavePty: ptySlave, - } - if err := tty.AttachPipes(&command.Cmd, pipes); err != nil { - tty.Close() - return nil, err - } - command.Console = tty.SlavePty.Name() - return tty, nil -} - -func (t *TtyConsole) Master() *os.File { - return t.MasterPty -} - -func (t *TtyConsole) Resize(h, w int) error { - return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) -} - -func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { - command.Stdout = t.SlavePty - command.Stderr = t.SlavePty - - go func() { - if wb, ok := pipes.Stdout.(interface { - CloseWriters() error - }); ok { - defer wb.CloseWriters() - } - io.Copy(pipes.Stdout, t.MasterPty) - }() - - if pipes.Stdin != nil { - command.Stdin = t.SlavePty - command.SysProcAttr.Setctty = true - - go func() { - defer pipes.Stdin.Close() - io.Copy(t.MasterPty, pipes.Stdin) - }() - } - return nil -} - -func (t *TtyConsole) Close() error { - t.SlavePty.Close() - return t.MasterPty.Close() -} - -type StdConsole struct { -} - -func NewStdConsole(command *Command, pipes *Pipes) (*StdConsole, error) { - std := &StdConsole{} - - if err := std.AttachPipes(&command.Cmd, pipes); err != nil { - return nil, err - } - return std, nil -} - -func (s *StdConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { - command.Stdout = pipes.Stdout - command.Stderr = pipes.Stderr - - if pipes.Stdin != nil { - stdin, err := command.StdinPipe() - if err != nil { - return err - } - - go func() { - defer stdin.Close() - io.Copy(stdin, pipes.Stdin) - }() - } - return nil -} - -func (s *StdConsole) Resize(h, w int) error { - // we do not need to reside a non tty - return nil -} - -func (s *StdConsole) Close() error { - // nothing to close here - return nil -} diff --git a/runtime/container.go b/runtime/container.go index ee545db201..f4de40a16a 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -6,7 +6,7 @@ import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/execdriver" + "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/links" diff --git a/runtime/execdriver/MAINTAINERS b/runtime/execdriver/MAINTAINERS new file mode 100644 index 0000000000..1cb551364d --- /dev/null +++ b/runtime/execdriver/MAINTAINERS @@ -0,0 +1,2 @@ +Michael Crosby (@crosbymichael) +Guillaume J. Charmes (@creack) diff --git a/runtime/execdriver/driver.go b/runtime/execdriver/driver.go new file mode 100644 index 0000000000..ff37b6bc5b --- /dev/null +++ b/runtime/execdriver/driver.go @@ -0,0 +1,135 @@ +package execdriver + +import ( + "errors" + "io" + "os" + "os/exec" +) + +var ( + ErrNotRunning = errors.New("Process could not be started") + ErrWaitTimeoutReached = errors.New("Wait timeout reached") + ErrDriverAlreadyRegistered = errors.New("A driver already registered this docker init function") + ErrDriverNotFound = errors.New("The requested docker init has not been found") +) + +var dockerInitFcts map[string]InitFunc + +type ( + StartCallback func(*Command) + InitFunc func(i *InitArgs) error +) + +func RegisterInitFunc(name string, fct InitFunc) error { + if dockerInitFcts == nil { + dockerInitFcts = make(map[string]InitFunc) + } + if _, ok := dockerInitFcts[name]; ok { + return ErrDriverAlreadyRegistered + } + dockerInitFcts[name] = fct + return nil +} + +func GetInitFunc(name string) (InitFunc, error) { + fct, ok := dockerInitFcts[name] + if !ok { + return nil, ErrDriverNotFound + } + return fct, nil +} + +// Args provided to the init function for a driver +type InitArgs struct { + User string + Gateway string + Ip string + WorkDir string + Privileged bool + Env []string + Args []string + Mtu int + Driver string + Console string + Pipe int + Root string +} + +// Driver specific information based on +// processes registered with the driver +type Info interface { + IsRunning() bool +} + +// Terminal in an interface for drivers to implement +// if they want to support Close and Resize calls from +// the core +type Terminal interface { + io.Closer + Resize(height, width int) error +} + +type TtyTerminal interface { + Master() *os.File +} + +type Driver interface { + Run(c *Command, pipes *Pipes, startCallback StartCallback) (int, error) // Run executes the process and blocks until the process exits and returns the exit code + Kill(c *Command, sig int) error + Name() string // Driver name + Info(id string) Info // "temporary" hack (until we move state from core to plugins) + GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. +} + +// Network settings of the container +type Network struct { + Gateway string `json:"gateway"` + IPAddress string `json:"ip"` + Bridge string `json:"bridge"` + IPPrefixLen int `json:"ip_prefix_len"` + Mtu int `json:"mtu"` +} + +type Resources struct { + Memory int64 `json:"memory"` + MemorySwap int64 `json:"memory_swap"` + CpuShares int64 `json:"cpu_shares"` +} + +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Private bool `json:"private"` +} + +// Process wrapps an os/exec.Cmd to add more metadata +type Command struct { + exec.Cmd `json:"-"` + + ID string `json:"id"` + Privileged bool `json:"privileged"` + User string `json:"user"` + Rootfs string `json:"rootfs"` // root fs of the container + InitPath string `json:"initpath"` // dockerinit + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + WorkingDir string `json:"working_dir"` + ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver + Tty bool `json:"tty"` + Network *Network `json:"network"` // if network is nil then networking is disabled + Config []string `json:"config"` // generic values that specific drivers can consume + Resources *Resources `json:"resources"` + Mounts []Mount `json:"mounts"` + + Terminal Terminal `json:"-"` // standard or tty terminal + Console string `json:"-"` // dev/console path + ContainerPid int `json:"container_pid"` // the pid for the process inside a container +} + +// Return the pid of the process +// If the process is nil -1 will be returned +func (c *Command) Pid() int { + return c.ContainerPid +} diff --git a/runtime/execdriver/execdrivers/execdrivers.go b/runtime/execdriver/execdrivers/execdrivers.go new file mode 100644 index 0000000000..29fa5b44f9 --- /dev/null +++ b/runtime/execdriver/execdrivers/execdrivers.go @@ -0,0 +1,23 @@ +package execdrivers + +import ( + "fmt" + "github.com/dotcloud/docker/runtime/execdriver" + "github.com/dotcloud/docker/runtime/execdriver/lxc" + "github.com/dotcloud/docker/runtime/execdriver/native" + "github.com/dotcloud/docker/pkg/sysinfo" + "path" +) + +func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { + switch name { + case "lxc": + // we want to five the lxc driver the full docker root because it needs + // to access and write config and template files in /var/lib/docker/containers/* + // to be backwards compatible + return lxc.NewDriver(root, sysInfo.AppArmor) + case "native": + return native.NewDriver(path.Join(root, "execdriver", "native"), initPath) + } + return nil, fmt.Errorf("unknown exec driver %s", name) +} diff --git a/runtime/execdriver/lxc/driver.go b/runtime/execdriver/lxc/driver.go new file mode 100644 index 0000000000..fa2ecf9d77 --- /dev/null +++ b/runtime/execdriver/lxc/driver.go @@ -0,0 +1,396 @@ +package lxc + +import ( + "fmt" + "github.com/dotcloud/docker/runtime/execdriver" + "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" +) + +const DriverName = "lxc" + +func init() { + execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { + if err := setupEnv(args); err != nil { + return err + } + + if err := setupHostname(args); err != nil { + return err + } + + if err := setupNetworking(args); err != nil { + return err + } + + if err := setupCapabilities(args); err != nil { + return err + } + + if err := setupWorkingDirectory(args); err != nil { + return err + } + + if err := changeUser(args); err != nil { + return err + } + + path, err := exec.LookPath(args.Args[0]) + if err != nil { + log.Printf("Unable to locate %v", args.Args[0]) + os.Exit(127) + } + if err := syscall.Exec(path, args.Args, os.Environ()); err != nil { + return fmt.Errorf("dockerinit unable to execute %s - %s", path, err) + } + panic("Unreachable") + }) +} + +type driver struct { + root string // root path for the driver to use + apparmor bool + sharedRoot bool +} + +func NewDriver(root string, apparmor bool) (*driver, error) { + // setup unconfined symlink + if err := linkLxcStart(root); err != nil { + return nil, err + } + return &driver{ + apparmor: apparmor, + root: root, + sharedRoot: rootIsShared(), + }, nil +} + +func (d *driver) Name() string { + version := d.version() + return fmt.Sprintf("%s-%s", DriverName, version) +} + +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + if err := execdriver.SetTerminal(c, pipes); err != nil { + return -1, err + } + configPath, err := d.generateLXCConfig(c) + if err != nil { + return -1, err + } + params := []string{ + "lxc-start", + "-n", c.ID, + "-f", configPath, + "--", + c.InitPath, + "-driver", + DriverName, + } + + if c.Network != nil { + params = append(params, + "-g", c.Network.Gateway, + "-i", fmt.Sprintf("%s/%d", c.Network.IPAddress, c.Network.IPPrefixLen), + "-mtu", strconv.Itoa(c.Network.Mtu), + ) + } + + if c.User != "" { + params = append(params, "-u", c.User) + } + + if c.Privileged { + if d.apparmor { + params[0] = path.Join(d.root, "lxc-start-unconfined") + + } + params = append(params, "-privileged") + } + + if c.WorkingDir != "" { + params = append(params, "-w", c.WorkingDir) + } + + params = append(params, "--", c.Entrypoint) + params = append(params, c.Arguments...) + + if d.sharedRoot { + // lxc-start really needs / to be non-shared, or all kinds of stuff break + // when lxc-start unmount things and those unmounts propagate to the main + // mount namespace. + // What we really want is to clone into a new namespace and then + // mount / MS_REC|MS_SLAVE, but since we can't really clone or fork + // without exec in go we have to do this horrible shell hack... + shellString := + "mount --make-rslave /; exec " + + utils.ShellQuoteArguments(params) + + params = []string{ + "unshare", "-m", "--", "/bin/sh", "-c", shellString, + } + } + + var ( + name = params[0] + arg = params[1:] + ) + aname, err := exec.LookPath(name) + if err != nil { + aname = name + } + c.Path = aname + c.Args = append([]string{name}, arg...) + + if err := c.Start(); err != nil { + return -1, err + } + + var ( + waitErr error + waitLock = make(chan struct{}) + ) + go func() { + if err := c.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0 + waitErr = err + } + } + close(waitLock) + }() + + // Poll lxc for RUNNING status + pid, err := d.waitForStart(c, waitLock) + if err != nil { + if c.Process != nil { + c.Process.Kill() + } + return -1, err + } + c.ContainerPid = pid + + if startCallback != nil { + startCallback(c) + } + + <-waitLock + + return getExitCode(c), waitErr +} + +/// Return the exit code of the process +// if the process has not exited -1 will be returned +func getExitCode(c *execdriver.Command) int { + if c.ProcessState == nil { + return -1 + } + return c.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() +} + +func (d *driver) Kill(c *execdriver.Command, sig int) error { + return KillLxc(c.ID, sig) +} + +func (d *driver) version() string { + var ( + version string + output []byte + err error + ) + if _, errPath := exec.LookPath("lxc-version"); errPath == nil { + output, err = exec.Command("lxc-version").CombinedOutput() + } else { + output, err = exec.Command("lxc-start", "--version").CombinedOutput() + } + if err == nil { + version = strings.TrimSpace(string(output)) + if parts := strings.SplitN(version, ":", 2); len(parts) == 2 { + version = strings.TrimSpace(parts[1]) + } + } + return version +} + +func KillLxc(id string, sig int) error { + var ( + err error + output []byte + ) + _, err = exec.LookPath("lxc-kill") + if err == nil { + output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput() + } else { + output, err = exec.Command("lxc-stop", "-k", "-n", id, strconv.Itoa(sig)).CombinedOutput() + } + if err != nil { + return fmt.Errorf("Err: %s Output: %s", err, output) + } + return nil +} + +// wait for the process to start and return the pid for the process +func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (int, error) { + var ( + err error + output []byte + ) + // We wait for the container to be fully running. + // Timeout after 5 seconds. In case of broken pipe, just retry. + // Note: The container can run and finish correctly before + // the end of this loop + for now := time.Now(); time.Since(now) < 5*time.Second; { + select { + case <-waitLock: + // If the process dies while waiting for it, just return + return -1, nil + default: + } + + output, err = d.getInfo(c.ID) + if err != nil { + output, err = d.getInfo(c.ID) + if err != nil { + return -1, err + } + } + info, err := parseLxcInfo(string(output)) + if err != nil { + return -1, err + } + if info.Running { + return info.Pid, nil + } + time.Sleep(50 * time.Millisecond) + } + return -1, execdriver.ErrNotRunning +} + +func (d *driver) getInfo(id string) ([]byte, error) { + return exec.Command("lxc-info", "-n", id).CombinedOutput() +} + +type info struct { + ID string + driver *driver +} + +func (i *info) IsRunning() bool { + var running bool + + output, err := i.driver.getInfo(i.ID) + if err != nil { + utils.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) + return false + } + if strings.Contains(string(output), "RUNNING") { + running = true + } + return running +} + +func (d *driver) Info(id string) execdriver.Info { + return &info{ + ID: id, + driver: d, + } +} + +func (d *driver) GetPidsForContainer(id string) ([]int, error) { + pids := []int{} + + // cpu is chosen because it is the only non optional subsystem in cgroups + subsystem := "cpu" + cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) + if err != nil { + return pids, err + } + + cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) + if err != nil { + return pids, err + } + + filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") + if _, err := os.Stat(filename); os.IsNotExist(err) { + // With more recent lxc versions use, cgroup will be in lxc/ + filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks") + } + + output, err := ioutil.ReadFile(filename) + if err != nil { + return pids, err + } + for _, p := range strings.Split(string(output), "\n") { + if len(p) == 0 { + continue + } + pid, err := strconv.Atoi(p) + if err != nil { + return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) + } + pids = append(pids, pid) + } + return pids, nil +} + +func linkLxcStart(root string) error { + sourcePath, err := exec.LookPath("lxc-start") + if err != nil { + return err + } + targetPath := path.Join(root, "lxc-start-unconfined") + + if _, err := os.Lstat(targetPath); err != nil && !os.IsNotExist(err) { + return err + } else if err == nil { + if err := os.Remove(targetPath); err != nil { + return err + } + } + return os.Symlink(sourcePath, targetPath) +} + +// TODO: This can be moved to the mountinfo reader in the mount pkg +func rootIsShared() bool { + if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil { + for _, line := range strings.Split(string(data), "\n") { + cols := strings.Split(line, " ") + if len(cols) >= 6 && cols[4] == "/" { + return strings.HasPrefix(cols[6], "shared") + } + } + } + + // No idea, probably safe to assume so + return true +} + +func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { + root := path.Join(d.root, "containers", c.ID, "config.lxc") + fo, err := os.Create(root) + if err != nil { + return "", err + } + defer fo.Close() + + if err := LxcTemplateCompiled.Execute(fo, struct { + *execdriver.Command + AppArmor bool + }{ + Command: c, + AppArmor: d.apparmor, + }); err != nil { + return "", err + } + return root, nil +} diff --git a/runtime/execdriver/lxc/info.go b/runtime/execdriver/lxc/info.go new file mode 100644 index 0000000000..27b4c58604 --- /dev/null +++ b/runtime/execdriver/lxc/info.go @@ -0,0 +1,50 @@ +package lxc + +import ( + "bufio" + "errors" + "strconv" + "strings" +) + +var ( + ErrCannotParse = errors.New("cannot parse raw input") +) + +type lxcInfo struct { + Running bool + Pid int +} + +func parseLxcInfo(raw string) (*lxcInfo, error) { + if raw == "" { + return nil, ErrCannotParse + } + var ( + err error + s = bufio.NewScanner(strings.NewReader(raw)) + info = &lxcInfo{} + ) + for s.Scan() { + text := s.Text() + + if s.Err() != nil { + return nil, s.Err() + } + + parts := strings.Split(text, ":") + if len(parts) < 2 { + continue + } + switch strings.ToLower(strings.TrimSpace(parts[0])) { + case "state": + info.Running = strings.TrimSpace(parts[1]) == "RUNNING" + case "pid": + info.Pid, err = strconv.Atoi(strings.TrimSpace(parts[1])) + if err != nil { + return nil, err + } + } + } + return info, nil +} diff --git a/runtime/execdriver/lxc/info_test.go b/runtime/execdriver/lxc/info_test.go new file mode 100644 index 0000000000..edafc02511 --- /dev/null +++ b/runtime/execdriver/lxc/info_test.go @@ -0,0 +1,36 @@ +package lxc + +import ( + "testing" +) + +func TestParseRunningInfo(t *testing.T) { + raw := ` + state: RUNNING + pid: 50` + + info, err := parseLxcInfo(raw) + if err != nil { + t.Fatal(err) + } + if !info.Running { + t.Fatal("info should return a running state") + } + if info.Pid != 50 { + t.Fatalf("info should have pid 50 got %d", info.Pid) + } +} + +func TestEmptyInfo(t *testing.T) { + _, err := parseLxcInfo("") + if err == nil { + t.Fatal("error should not be nil") + } +} + +func TestBadInfo(t *testing.T) { + _, err := parseLxcInfo("state") + if err != nil { + t.Fatal(err) + } +} diff --git a/runtime/execdriver/lxc/init.go b/runtime/execdriver/lxc/init.go new file mode 100644 index 0000000000..946c8c930f --- /dev/null +++ b/runtime/execdriver/lxc/init.go @@ -0,0 +1,176 @@ +package lxc + +import ( + "encoding/json" + "fmt" + "github.com/dotcloud/docker/runtime/execdriver" + "github.com/dotcloud/docker/pkg/netlink" + "github.com/dotcloud/docker/pkg/user" + "github.com/syndtr/gocapability/capability" + "io/ioutil" + "net" + "os" + "strings" + "syscall" +) + +// Clear environment pollution introduced by lxc-start +func setupEnv(args *execdriver.InitArgs) error { + // Get env + var env []string + content, err := ioutil.ReadFile(".dockerenv") + if err != nil { + return fmt.Errorf("Unable to load environment variables: %v", err) + } + if err := json.Unmarshal(content, &env); err != nil { + return fmt.Errorf("Unable to unmarshal environment variables: %v", err) + } + // Propagate the plugin-specific container env variable + env = append(env, "container="+os.Getenv("container")) + + args.Env = env + + os.Clearenv() + for _, kv := range args.Env { + parts := strings.SplitN(kv, "=", 2) + if len(parts) == 1 { + parts = append(parts, "") + } + os.Setenv(parts[0], parts[1]) + } + + return nil +} + +func setupHostname(args *execdriver.InitArgs) error { + hostname := getEnv(args, "HOSTNAME") + if hostname == "" { + return nil + } + return setHostname(hostname) +} + +// Setup networking +func setupNetworking(args *execdriver.InitArgs) error { + if args.Ip != "" { + // eth0 + iface, err := net.InterfaceByName("eth0") + if err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + ip, ipNet, err := net.ParseCIDR(args.Ip) + if err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + if err := netlink.NetworkLinkAddIp(iface, ip, ipNet); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + if err := netlink.NetworkSetMTU(iface, args.Mtu); err != nil { + return fmt.Errorf("Unable to set MTU: %v", err) + } + if err := netlink.NetworkLinkUp(iface); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + + // loopback + iface, err = net.InterfaceByName("lo") + if err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + if err := netlink.NetworkLinkUp(iface); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + } + if args.Gateway != "" { + gw := net.ParseIP(args.Gateway) + if gw == nil { + return fmt.Errorf("Unable to set up networking, %s is not a valid gateway IP", args.Gateway) + } + + if err := netlink.AddDefaultGw(gw); err != nil { + return fmt.Errorf("Unable to set up networking: %v", err) + } + } + + return nil +} + +// Setup working directory +func setupWorkingDirectory(args *execdriver.InitArgs) error { + if args.WorkDir == "" { + return nil + } + if err := syscall.Chdir(args.WorkDir); err != nil { + return fmt.Errorf("Unable to change dir to %v: %v", args.WorkDir, err) + } + return nil +} + +// Takes care of dropping privileges to the desired user +func changeUser(args *execdriver.InitArgs) error { + uid, gid, suppGids, err := user.GetUserGroupSupplementary( + args.User, + syscall.Getuid(), syscall.Getgid(), + ) + if err != nil { + return err + } + + if err := syscall.Setgroups(suppGids); err != nil { + return fmt.Errorf("Setgroups failed: %v", err) + } + if err := syscall.Setgid(gid); err != nil { + return fmt.Errorf("Setgid failed: %v", err) + } + if err := syscall.Setuid(uid); err != nil { + return fmt.Errorf("Setuid failed: %v", err) + } + + return nil +} + +func setupCapabilities(args *execdriver.InitArgs) error { + if args.Privileged { + return nil + } + + drop := []capability.Cap{ + capability.CAP_SETPCAP, + capability.CAP_SYS_MODULE, + capability.CAP_SYS_RAWIO, + capability.CAP_SYS_PACCT, + capability.CAP_SYS_ADMIN, + capability.CAP_SYS_NICE, + capability.CAP_SYS_RESOURCE, + capability.CAP_SYS_TIME, + capability.CAP_SYS_TTY_CONFIG, + capability.CAP_MKNOD, + capability.CAP_AUDIT_WRITE, + capability.CAP_AUDIT_CONTROL, + capability.CAP_MAC_OVERRIDE, + capability.CAP_MAC_ADMIN, + capability.CAP_NET_ADMIN, + } + + c, err := capability.NewPid(os.Getpid()) + if err != nil { + return err + } + + c.Unset(capability.CAPS|capability.BOUNDS, drop...) + + if err := c.Apply(capability.CAPS | capability.BOUNDS); err != nil { + return err + } + return nil +} + +func getEnv(args *execdriver.InitArgs, key string) string { + for _, kv := range args.Env { + parts := strings.SplitN(kv, "=", 2) + if parts[0] == key && len(parts) == 2 { + return parts[1] + } + } + return "" +} diff --git a/runtime/execdriver/lxc/lxc_init_linux.go b/runtime/execdriver/lxc/lxc_init_linux.go new file mode 100644 index 0000000000..7288f5877b --- /dev/null +++ b/runtime/execdriver/lxc/lxc_init_linux.go @@ -0,0 +1,11 @@ +// +build amd64 + +package lxc + +import ( + "syscall" +) + +func setHostname(hostname string) error { + return syscall.Sethostname([]byte(hostname)) +} diff --git a/runtime/execdriver/lxc/lxc_init_unsupported.go b/runtime/execdriver/lxc/lxc_init_unsupported.go new file mode 100644 index 0000000000..d68cb91a1e --- /dev/null +++ b/runtime/execdriver/lxc/lxc_init_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux !amd64 + +package lxc + +func setHostname(hostname string) error { + panic("Not supported on darwin") +} diff --git a/runtime/execdriver/lxc/lxc_template.go b/runtime/execdriver/lxc/lxc_template.go new file mode 100644 index 0000000000..db55287522 --- /dev/null +++ b/runtime/execdriver/lxc/lxc_template.go @@ -0,0 +1,155 @@ +package lxc + +import ( + "github.com/dotcloud/docker/runtime/execdriver" + "strings" + "text/template" +) + +const LxcTemplate = ` +{{if .Network}} +# network configuration +lxc.network.type = veth +lxc.network.link = {{.Network.Bridge}} +lxc.network.name = eth0 +lxc.network.mtu = {{.Network.Mtu}} +{{else}} +# network is disabled (-n=false) +lxc.network.type = empty +lxc.network.flags = up +{{end}} + +# root filesystem +{{$ROOTFS := .Rootfs}} +lxc.rootfs = {{$ROOTFS}} + +# use a dedicated pts for the container (and limit the number of pseudo terminal +# available) +lxc.pts = 1024 + +# disable the main console +lxc.console = none + +# no controlling tty at all +lxc.tty = 1 + +{{if .Privileged}} +lxc.cgroup.devices.allow = a +{{else}} +# no implicit access to devices +lxc.cgroup.devices.deny = a + +# /dev/null and zero +lxc.cgroup.devices.allow = c 1:3 rwm +lxc.cgroup.devices.allow = c 1:5 rwm + +# consoles +lxc.cgroup.devices.allow = c 5:1 rwm +lxc.cgroup.devices.allow = c 5:0 rwm +lxc.cgroup.devices.allow = c 4:0 rwm +lxc.cgroup.devices.allow = c 4:1 rwm + +# /dev/urandom,/dev/random +lxc.cgroup.devices.allow = c 1:9 rwm +lxc.cgroup.devices.allow = c 1:8 rwm + +# /dev/pts/ - pts namespaces are "coming soon" +lxc.cgroup.devices.allow = c 136:* rwm +lxc.cgroup.devices.allow = c 5:2 rwm + +# tuntap +lxc.cgroup.devices.allow = c 10:200 rwm + +# fuse +#lxc.cgroup.devices.allow = c 10:229 rwm + +# rtc +#lxc.cgroup.devices.allow = c 254:0 rwm +{{end}} + +# standard mount point +# Use mnt.putold as per https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/986385 +lxc.pivotdir = lxc_putold + +# NOTICE: These mounts must be applied within the namespace + +# WARNING: procfs is a known attack vector and should probably be disabled +# if your userspace allows it. eg. see http://blog.zx2c4.com/749 +lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0 + +# WARNING: sysfs is a known attack vector and should probably be disabled +# if your userspace allows it. eg. see http://bit.ly/T9CkqJ +lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0 + +{{if .Tty}} +lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0 +{{end}} + +lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0 +lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0 + +{{range $value := .Mounts}} +{{if $value.Writable}} +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,rw 0 0 +{{else}} +lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,ro 0 0 +{{end}} +{{end}} + +{{if .Privileged}} +{{if .AppArmor}} +lxc.aa_profile = unconfined +{{else}} +#lxc.aa_profile = unconfined +{{end}} +{{end}} + +# limits +{{if .Resources}} +{{if .Resources.Memory}} +lxc.cgroup.memory.limit_in_bytes = {{.Resources.Memory}} +lxc.cgroup.memory.soft_limit_in_bytes = {{.Resources.Memory}} +{{with $memSwap := getMemorySwap .Resources}} +lxc.cgroup.memory.memsw.limit_in_bytes = {{$memSwap}} +{{end}} +{{end}} +{{if .Resources.CpuShares}} +lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} +{{end}} +{{end}} + +{{if .Config}} +{{range $value := .Config}} +{{$value}} +{{end}} +{{end}} +` + +var LxcTemplateCompiled *template.Template + +// Escape spaces in strings according to the fstab documentation, which is the +// format for "lxc.mount.entry" lines in lxc.conf. See also "man 5 fstab". +func escapeFstabSpaces(field string) string { + return strings.Replace(field, " ", "\\040", -1) +} + +func getMemorySwap(v *execdriver.Resources) int64 { + // By default, MemorySwap is set to twice the size of RAM. + // If you want to omit MemorySwap, set it to `-1'. + if v.MemorySwap < 0 { + return 0 + } + return v.Memory * 2 +} + +func init() { + var err error + funcMap := template.FuncMap{ + "getMemorySwap": getMemorySwap, + "escapeFstabSpaces": escapeFstabSpaces, + } + LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate) + if err != nil { + panic(err) + } +} diff --git a/runtime/execdriver/lxc/lxc_template_unit_test.go b/runtime/execdriver/lxc/lxc_template_unit_test.go new file mode 100644 index 0000000000..ae66371836 --- /dev/null +++ b/runtime/execdriver/lxc/lxc_template_unit_test.go @@ -0,0 +1,125 @@ +package lxc + +import ( + "bufio" + "fmt" + "github.com/dotcloud/docker/runtime/execdriver" + "io/ioutil" + "math/rand" + "os" + "path" + "strings" + "testing" + "time" +) + +func TestLXCConfig(t *testing.T) { + root, err := ioutil.TempDir("", "TestLXCConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + + // Memory is allocated randomly for testing + rand.Seed(time.Now().UTC().UnixNano()) + var ( + memMin = 33554432 + memMax = 536870912 + mem = memMin + rand.Intn(memMax-memMin) + cpuMin = 100 + cpuMax = 10000 + cpu = cpuMin + rand.Intn(cpuMax-cpuMin) + ) + + driver, err := NewDriver(root, false) + if err != nil { + t.Fatal(err) + } + command := &execdriver.Command{ + ID: "1", + Resources: &execdriver.Resources{ + Memory: int64(mem), + CpuShares: int64(cpu), + }, + } + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + grepFile(t, p, + fmt.Sprintf("lxc.cgroup.memory.limit_in_bytes = %d", mem)) + + grepFile(t, p, + fmt.Sprintf("lxc.cgroup.memory.memsw.limit_in_bytes = %d", mem*2)) +} + +func TestCustomLxcConfig(t *testing.T) { + root, err := ioutil.TempDir("", "TestCustomLxcConfig") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(root) + + os.MkdirAll(path.Join(root, "containers", "1"), 0777) + + driver, err := NewDriver(root, false) + if err != nil { + t.Fatal(err) + } + command := &execdriver.Command{ + ID: "1", + Privileged: false, + Config: []string{ + "lxc.utsname = docker", + "lxc.cgroup.cpuset.cpus = 0,1", + }, + } + + p, err := driver.generateLXCConfig(command) + if err != nil { + t.Fatal(err) + } + + grepFile(t, p, "lxc.utsname = docker") + grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") +} + +func grepFile(t *testing.T, path string, pattern string) { + f, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := bufio.NewReader(f) + var ( + line string + ) + err = nil + for err == nil { + line, err = r.ReadString('\n') + if strings.Contains(line, pattern) == true { + return + } + } + t.Fatalf("grepFile: pattern \"%s\" not found in \"%s\"", pattern, path) +} + +func TestEscapeFstabSpaces(t *testing.T) { + var testInputs = map[string]string{ + " ": "\\040", + "": "", + "/double space": "/double\\040\\040space", + "/some long test string": "/some\\040long\\040test\\040string", + "/var/lib/docker": "/var/lib/docker", + " leading": "\\040leading", + "trailing ": "trailing\\040", + } + for in, exp := range testInputs { + if out := escapeFstabSpaces(in); exp != out { + t.Logf("Expected %s got %s", exp, out) + t.Fail() + } + } +} diff --git a/runtime/execdriver/native/default_template.go b/runtime/execdriver/native/default_template.go new file mode 100644 index 0000000000..0c382059e9 --- /dev/null +++ b/runtime/execdriver/native/default_template.go @@ -0,0 +1,94 @@ +package native + +import ( + "fmt" + "github.com/dotcloud/docker/runtime/execdriver" + "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer" + "os" +) + +// createContainer populates and configures the container type with the +// data provided by the execdriver.Command +func createContainer(c *execdriver.Command) *libcontainer.Container { + container := getDefaultTemplate() + + container.Hostname = getEnv("HOSTNAME", c.Env) + container.Tty = c.Tty + container.User = c.User + container.WorkingDir = c.WorkingDir + container.Env = c.Env + + if c.Network != nil { + container.Networks = []*libcontainer.Network{ + { + Mtu: c.Network.Mtu, + Address: fmt.Sprintf("%s/%d", c.Network.IPAddress, c.Network.IPPrefixLen), + Gateway: c.Network.Gateway, + Type: "veth", + Context: libcontainer.Context{ + "prefix": "veth", + "bridge": c.Network.Bridge, + }, + }, + } + } + + container.Cgroups.Name = c.ID + if c.Privileged { + container.CapabilitiesMask = nil + container.Cgroups.DeviceAccess = true + container.Context["apparmor_profile"] = "unconfined" + } + if c.Resources != nil { + container.Cgroups.CpuShares = c.Resources.CpuShares + container.Cgroups.Memory = c.Resources.Memory + container.Cgroups.MemorySwap = c.Resources.MemorySwap + } + // check to see if we are running in ramdisk to disable pivot root + container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" + + for _, m := range c.Mounts { + container.Mounts = append(container.Mounts, libcontainer.Mount{m.Source, m.Destination, m.Writable, m.Private}) + } + + return container +} + +// getDefaultTemplate returns the docker default for +// the libcontainer configuration file +func getDefaultTemplate() *libcontainer.Container { + return &libcontainer.Container{ + CapabilitiesMask: libcontainer.Capabilities{ + libcontainer.GetCapability("SETPCAP"), + libcontainer.GetCapability("SYS_MODULE"), + libcontainer.GetCapability("SYS_RAWIO"), + libcontainer.GetCapability("SYS_PACCT"), + libcontainer.GetCapability("SYS_ADMIN"), + libcontainer.GetCapability("SYS_NICE"), + libcontainer.GetCapability("SYS_RESOURCE"), + libcontainer.GetCapability("SYS_TIME"), + libcontainer.GetCapability("SYS_TTY_CONFIG"), + libcontainer.GetCapability("MKNOD"), + libcontainer.GetCapability("AUDIT_WRITE"), + libcontainer.GetCapability("AUDIT_CONTROL"), + libcontainer.GetCapability("MAC_OVERRIDE"), + libcontainer.GetCapability("MAC_ADMIN"), + libcontainer.GetCapability("NET_ADMIN"), + }, + Namespaces: libcontainer.Namespaces{ + libcontainer.GetNamespace("NEWNS"), + libcontainer.GetNamespace("NEWUTS"), + libcontainer.GetNamespace("NEWIPC"), + libcontainer.GetNamespace("NEWPID"), + libcontainer.GetNamespace("NEWNET"), + }, + Cgroups: &cgroups.Cgroup{ + Parent: "docker", + DeviceAccess: false, + }, + Context: libcontainer.Context{ + "apparmor_profile": "docker-default", + }, + } +} diff --git a/runtime/execdriver/native/driver.go b/runtime/execdriver/native/driver.go new file mode 100644 index 0000000000..ff6c541cf9 --- /dev/null +++ b/runtime/execdriver/native/driver.go @@ -0,0 +1,266 @@ +package native + +import ( + "encoding/json" + "fmt" + "github.com/dotcloud/docker/runtime/execdriver" + "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/apparmor" + "github.com/dotcloud/docker/pkg/libcontainer/nsinit" + "github.com/dotcloud/docker/pkg/system" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "syscall" +) + +const ( + DriverName = "native" + Version = "0.1" +) + +func init() { + execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error { + var ( + container *libcontainer.Container + ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}, createLogger("")) + ) + f, err := os.Open(filepath.Join(args.Root, "container.json")) + if err != nil { + return err + } + if err := json.NewDecoder(f).Decode(&container); err != nil { + f.Close() + return err + } + f.Close() + + cwd, err := os.Getwd() + if err != nil { + return err + } + syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(args.Pipe)) + if err != nil { + return err + } + if err := ns.Init(container, cwd, args.Console, syncPipe, args.Args); err != nil { + return err + } + return nil + }) +} + +type driver struct { + root string + initPath string +} + +func NewDriver(root, initPath string) (*driver, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + if err := apparmor.InstallDefaultProfile(); err != nil { + return nil, err + } + return &driver{ + root: root, + initPath: initPath, + }, nil +} + +func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + if err := d.validateCommand(c); err != nil { + return -1, err + } + var ( + term nsinit.Terminal + container = createContainer(c) + factory = &dockerCommandFactory{c: c, driver: d} + stateWriter = &dockerStateWriter{ + callback: startCallback, + c: c, + dsw: &nsinit.DefaultStateWriter{filepath.Join(d.root, c.ID)}, + } + ns = nsinit.NewNsInit(factory, stateWriter, createLogger(os.Getenv("DEBUG"))) + args = append([]string{c.Entrypoint}, c.Arguments...) + ) + if err := d.createContainerRoot(c.ID); err != nil { + return -1, err + } + defer d.removeContainerRoot(c.ID) + + if c.Tty { + term = &dockerTtyTerm{ + pipes: pipes, + } + } else { + term = &dockerStdTerm{ + pipes: pipes, + } + } + c.Terminal = term + if err := d.writeContainerFile(container, c.ID); err != nil { + return -1, err + } + return ns.Exec(container, term, args) +} + +func (d *driver) Kill(p *execdriver.Command, sig int) error { + err := syscall.Kill(p.Process.Pid, syscall.Signal(sig)) + d.removeContainerRoot(p.ID) + return err +} + +func (d *driver) Info(id string) execdriver.Info { + return &info{ + ID: id, + driver: d, + } +} + +func (d *driver) Name() string { + return fmt.Sprintf("%s-%s", DriverName, Version) +} + +// TODO: this can be improved with our driver +// there has to be a better way to do this +func (d *driver) GetPidsForContainer(id string) ([]int, error) { + pids := []int{} + + subsystem := "devices" + cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) + if err != nil { + return pids, err + } + cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) + if err != nil { + return pids, err + } + + filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") + if _, err := os.Stat(filename); os.IsNotExist(err) { + filename = filepath.Join(cgroupRoot, cgroupDir, "docker", id, "tasks") + } + + output, err := ioutil.ReadFile(filename) + if err != nil { + return pids, err + } + for _, p := range strings.Split(string(output), "\n") { + if len(p) == 0 { + continue + } + pid, err := strconv.Atoi(p) + if err != nil { + return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) + } + pids = append(pids, pid) + } + return pids, nil +} + +func (d *driver) writeContainerFile(container *libcontainer.Container, id string) error { + data, err := json.Marshal(container) + if err != nil { + return err + } + return ioutil.WriteFile(filepath.Join(d.root, id, "container.json"), data, 0655) +} + +func (d *driver) createContainerRoot(id string) error { + return os.MkdirAll(filepath.Join(d.root, id), 0655) +} + +func (d *driver) removeContainerRoot(id string) error { + return os.RemoveAll(filepath.Join(d.root, id)) +} + +func (d *driver) validateCommand(c *execdriver.Command) error { + // we need to check the Config of the command to make sure that we + // do not have any of the lxc-conf variables + for _, conf := range c.Config { + if strings.Contains(conf, "lxc") { + return fmt.Errorf("%s is not supported by the native driver", conf) + } + } + return nil +} + +func getEnv(key string, env []string) string { + for _, pair := range env { + parts := strings.Split(pair, "=") + if parts[0] == key { + return parts[1] + } + } + return "" +} + +type dockerCommandFactory struct { + c *execdriver.Command + driver *driver +} + +// createCommand will return an exec.Cmd with the Cloneflags set to the proper namespaces +// defined on the container's configuration and use the current binary as the init with the +// args provided +func (d *dockerCommandFactory) Create(container *libcontainer.Container, console string, syncFile *os.File, args []string) *exec.Cmd { + // we need to join the rootfs because nsinit will setup the rootfs and chroot + initPath := filepath.Join(d.c.Rootfs, d.c.InitPath) + + d.c.Path = d.driver.initPath + d.c.Args = append([]string{ + initPath, + "-driver", DriverName, + "-console", console, + "-pipe", "3", + "-root", filepath.Join(d.driver.root, d.c.ID), + "--", + }, args...) + + // set this to nil so that when we set the clone flags anything else is reset + d.c.SysProcAttr = nil + system.SetCloneFlags(&d.c.Cmd, uintptr(nsinit.GetNamespaceFlags(container.Namespaces))) + d.c.ExtraFiles = []*os.File{syncFile} + + d.c.Env = container.Env + d.c.Dir = d.c.Rootfs + + return &d.c.Cmd +} + +type dockerStateWriter struct { + dsw nsinit.StateWriter + c *execdriver.Command + callback execdriver.StartCallback +} + +func (d *dockerStateWriter) WritePid(pid int) error { + d.c.ContainerPid = pid + err := d.dsw.WritePid(pid) + if d.callback != nil { + d.callback(d.c) + } + return err +} + +func (d *dockerStateWriter) DeletePid() error { + return d.dsw.DeletePid() +} + +func createLogger(debug string) *log.Logger { + var w io.Writer + // if we are in debug mode set the logger to stderr + if debug != "" { + w = os.Stderr + } else { + w = ioutil.Discard + } + return log.New(w, "[libcontainer] ", log.LstdFlags) +} diff --git a/runtime/execdriver/native/info.go b/runtime/execdriver/native/info.go new file mode 100644 index 0000000000..aef2f85c6b --- /dev/null +++ b/runtime/execdriver/native/info.go @@ -0,0 +1,21 @@ +package native + +import ( + "os" + "path/filepath" +) + +type info struct { + ID string + driver *driver +} + +// IsRunning is determined by looking for the +// pid file for a container. If the file exists then the +// container is currently running +func (i *info) IsRunning() bool { + if _, err := os.Stat(filepath.Join(i.driver.root, i.ID, "pid")); err == nil { + return true + } + return false +} diff --git a/runtime/execdriver/native/term.go b/runtime/execdriver/native/term.go new file mode 100644 index 0000000000..0d5298d388 --- /dev/null +++ b/runtime/execdriver/native/term.go @@ -0,0 +1,42 @@ +/* + These types are wrappers around the libcontainer Terminal interface so that + we can resuse the docker implementations where possible. +*/ +package native + +import ( + "github.com/dotcloud/docker/runtime/execdriver" + "io" + "os" + "os/exec" +) + +type dockerStdTerm struct { + execdriver.StdConsole + pipes *execdriver.Pipes +} + +func (d *dockerStdTerm) Attach(cmd *exec.Cmd) error { + return d.AttachPipes(cmd, d.pipes) +} + +func (d *dockerStdTerm) SetMaster(master *os.File) { + // do nothing +} + +type dockerTtyTerm struct { + execdriver.TtyConsole + pipes *execdriver.Pipes +} + +func (t *dockerTtyTerm) Attach(cmd *exec.Cmd) error { + go io.Copy(t.pipes.Stdout, t.MasterPty) + if t.pipes.Stdin != nil { + go io.Copy(t.MasterPty, t.pipes.Stdin) + } + return nil +} + +func (t *dockerTtyTerm) SetMaster(master *os.File) { + t.MasterPty = master +} diff --git a/runtime/execdriver/pipes.go b/runtime/execdriver/pipes.go new file mode 100644 index 0000000000..158219f0c5 --- /dev/null +++ b/runtime/execdriver/pipes.go @@ -0,0 +1,23 @@ +package execdriver + +import ( + "io" +) + +// Pipes is a wrapper around a containers output for +// stdin, stdout, stderr +type Pipes struct { + Stdin io.ReadCloser + Stdout, Stderr io.Writer +} + +func NewPipes(stdin io.ReadCloser, stdout, stderr io.Writer, useStdin bool) *Pipes { + p := &Pipes{ + Stdout: stdout, + Stderr: stderr, + } + if useStdin { + p.Stdin = stdin + } + return p +} diff --git a/runtime/execdriver/termconsole.go b/runtime/execdriver/termconsole.go new file mode 100644 index 0000000000..af6b88d3d1 --- /dev/null +++ b/runtime/execdriver/termconsole.go @@ -0,0 +1,126 @@ +package execdriver + +import ( + "github.com/dotcloud/docker/pkg/term" + "github.com/kr/pty" + "io" + "os" + "os/exec" +) + +func SetTerminal(command *Command, pipes *Pipes) error { + var ( + term Terminal + err error + ) + if command.Tty { + term, err = NewTtyConsole(command, pipes) + } else { + term, err = NewStdConsole(command, pipes) + } + if err != nil { + return err + } + command.Terminal = term + return nil +} + +type TtyConsole struct { + MasterPty *os.File + SlavePty *os.File +} + +func NewTtyConsole(command *Command, pipes *Pipes) (*TtyConsole, error) { + ptyMaster, ptySlave, err := pty.Open() + if err != nil { + return nil, err + } + tty := &TtyConsole{ + MasterPty: ptyMaster, + SlavePty: ptySlave, + } + if err := tty.AttachPipes(&command.Cmd, pipes); err != nil { + tty.Close() + return nil, err + } + command.Console = tty.SlavePty.Name() + return tty, nil +} + +func (t *TtyConsole) Master() *os.File { + return t.MasterPty +} + +func (t *TtyConsole) Resize(h, w int) error { + return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) +} + +func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { + command.Stdout = t.SlavePty + command.Stderr = t.SlavePty + + go func() { + if wb, ok := pipes.Stdout.(interface { + CloseWriters() error + }); ok { + defer wb.CloseWriters() + } + io.Copy(pipes.Stdout, t.MasterPty) + }() + + if pipes.Stdin != nil { + command.Stdin = t.SlavePty + command.SysProcAttr.Setctty = true + + go func() { + defer pipes.Stdin.Close() + io.Copy(t.MasterPty, pipes.Stdin) + }() + } + return nil +} + +func (t *TtyConsole) Close() error { + t.SlavePty.Close() + return t.MasterPty.Close() +} + +type StdConsole struct { +} + +func NewStdConsole(command *Command, pipes *Pipes) (*StdConsole, error) { + std := &StdConsole{} + + if err := std.AttachPipes(&command.Cmd, pipes); err != nil { + return nil, err + } + return std, nil +} + +func (s *StdConsole) AttachPipes(command *exec.Cmd, pipes *Pipes) error { + command.Stdout = pipes.Stdout + command.Stderr = pipes.Stderr + + if pipes.Stdin != nil { + stdin, err := command.StdinPipe() + if err != nil { + return err + } + + go func() { + defer stdin.Close() + io.Copy(stdin, pipes.Stdin) + }() + } + return nil +} + +func (s *StdConsole) Resize(h, w int) error { + // we do not need to reside a non tty + return nil +} + +func (s *StdConsole) Close() error { + // nothing to close here + return nil +} diff --git a/runtime/runtime.go b/runtime/runtime.go index 092b5a8130..f75a4df048 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -7,9 +7,9 @@ import ( "github.com/dotcloud/docker/daemonconfig" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/execdriver" - "github.com/dotcloud/docker/execdriver/execdrivers" - "github.com/dotcloud/docker/execdriver/lxc" + "github.com/dotcloud/docker/runtime/execdriver" + "github.com/dotcloud/docker/runtime/execdriver/execdrivers" + "github.com/dotcloud/docker/runtime/execdriver/lxc" "github.com/dotcloud/docker/graph" "github.com/dotcloud/docker/graphdriver" "github.com/dotcloud/docker/graphdriver/aufs" diff --git a/runtime/volumes.go b/runtime/volumes.go index 9cb66aae44..1bbb14a369 100644 --- a/runtime/volumes.go +++ b/runtime/volumes.go @@ -3,7 +3,7 @@ package runtime import ( "fmt" "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/execdriver" + "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/utils" "io/ioutil" "os" diff --git a/sysinit/sysinit.go b/sysinit/sysinit.go index 56508b105d..50c858296f 100644 --- a/sysinit/sysinit.go +++ b/sysinit/sysinit.go @@ -3,9 +3,9 @@ package sysinit import ( "flag" "fmt" - "github.com/dotcloud/docker/execdriver" - _ "github.com/dotcloud/docker/execdriver/lxc" - _ "github.com/dotcloud/docker/execdriver/native" + "github.com/dotcloud/docker/runtime/execdriver" + _ "github.com/dotcloud/docker/runtime/execdriver/lxc" + _ "github.com/dotcloud/docker/runtime/execdriver/native" "log" "os" ) -- cgit v1.2.1 From 96c4816cef592a98a235010924bb2417c8451079 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 14 Mar 2014 14:11:43 -0700 Subject: Move graphdrivers into runtime top level pkg Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- contrib/docker-device-tool/device_tool.go | 2 +- graph/graph.go | 2 +- graph/tags_unit_test.go | 4 +- graphdriver/aufs/aufs.go | 401 -------- graphdriver/aufs/aufs_test.go | 697 ------------- graphdriver/aufs/dirs.go | 46 - graphdriver/aufs/migrate.go | 194 ---- graphdriver/aufs/mount.go | 17 - graphdriver/aufs/mount_linux.go | 11 - graphdriver/aufs/mount_unsupported.go | 11 - graphdriver/btrfs/btrfs.go | 213 ---- graphdriver/btrfs/dummy_unsupported.go | 3 - graphdriver/devmapper/attach_loopback.go | 126 --- graphdriver/devmapper/deviceset.go | 1088 -------------------- graphdriver/devmapper/devmapper.go | 595 ----------- graphdriver/devmapper/devmapper_doc.go | 106 -- graphdriver/devmapper/devmapper_log.go | 15 - graphdriver/devmapper/devmapper_test.go | 287 ------ graphdriver/devmapper/devmapper_wrapper.go | 229 ---- graphdriver/devmapper/driver.go | 143 --- graphdriver/devmapper/driver_test.go | 886 ---------------- graphdriver/devmapper/ioctl.go | 71 -- graphdriver/devmapper/mount.go | 27 - graphdriver/devmapper/sys.go | 57 - graphdriver/driver.go | 93 -- graphdriver/vfs/driver.go | 95 -- image/graph.go | 2 +- image/image.go | 2 +- integration/graph_test.go | 2 +- runtime/container.go | 2 +- runtime/graphdriver/aufs/aufs.go | 401 ++++++++ runtime/graphdriver/aufs/aufs_test.go | 697 +++++++++++++ runtime/graphdriver/aufs/dirs.go | 46 + runtime/graphdriver/aufs/migrate.go | 194 ++++ runtime/graphdriver/aufs/mount.go | 17 + runtime/graphdriver/aufs/mount_linux.go | 11 + runtime/graphdriver/aufs/mount_unsupported.go | 11 + runtime/graphdriver/btrfs/btrfs.go | 213 ++++ runtime/graphdriver/btrfs/dummy_unsupported.go | 3 + runtime/graphdriver/devmapper/attach_loopback.go | 126 +++ runtime/graphdriver/devmapper/deviceset.go | 1088 ++++++++++++++++++++ runtime/graphdriver/devmapper/devmapper.go | 595 +++++++++++ runtime/graphdriver/devmapper/devmapper_doc.go | 106 ++ runtime/graphdriver/devmapper/devmapper_log.go | 15 + runtime/graphdriver/devmapper/devmapper_test.go | 287 ++++++ runtime/graphdriver/devmapper/devmapper_wrapper.go | 229 ++++ runtime/graphdriver/devmapper/driver.go | 143 +++ runtime/graphdriver/devmapper/driver_test.go | 886 ++++++++++++++++ runtime/graphdriver/devmapper/ioctl.go | 71 ++ runtime/graphdriver/devmapper/mount.go | 27 + runtime/graphdriver/devmapper/sys.go | 57 + runtime/graphdriver/driver.go | 93 ++ runtime/graphdriver/vfs/driver.go | 95 ++ runtime/runtime.go | 10 +- 54 files changed, 5424 insertions(+), 5424 deletions(-) delete mode 100644 graphdriver/aufs/aufs.go delete mode 100644 graphdriver/aufs/aufs_test.go delete mode 100644 graphdriver/aufs/dirs.go delete mode 100644 graphdriver/aufs/migrate.go delete mode 100644 graphdriver/aufs/mount.go delete mode 100644 graphdriver/aufs/mount_linux.go delete mode 100644 graphdriver/aufs/mount_unsupported.go delete mode 100644 graphdriver/btrfs/btrfs.go delete mode 100644 graphdriver/btrfs/dummy_unsupported.go delete mode 100644 graphdriver/devmapper/attach_loopback.go delete mode 100644 graphdriver/devmapper/deviceset.go delete mode 100644 graphdriver/devmapper/devmapper.go delete mode 100644 graphdriver/devmapper/devmapper_doc.go delete mode 100644 graphdriver/devmapper/devmapper_log.go delete mode 100644 graphdriver/devmapper/devmapper_test.go delete mode 100644 graphdriver/devmapper/devmapper_wrapper.go delete mode 100644 graphdriver/devmapper/driver.go delete mode 100644 graphdriver/devmapper/driver_test.go delete mode 100644 graphdriver/devmapper/ioctl.go delete mode 100644 graphdriver/devmapper/mount.go delete mode 100644 graphdriver/devmapper/sys.go delete mode 100644 graphdriver/driver.go delete mode 100644 graphdriver/vfs/driver.go create mode 100644 runtime/graphdriver/aufs/aufs.go create mode 100644 runtime/graphdriver/aufs/aufs_test.go create mode 100644 runtime/graphdriver/aufs/dirs.go create mode 100644 runtime/graphdriver/aufs/migrate.go create mode 100644 runtime/graphdriver/aufs/mount.go create mode 100644 runtime/graphdriver/aufs/mount_linux.go create mode 100644 runtime/graphdriver/aufs/mount_unsupported.go create mode 100644 runtime/graphdriver/btrfs/btrfs.go create mode 100644 runtime/graphdriver/btrfs/dummy_unsupported.go create mode 100644 runtime/graphdriver/devmapper/attach_loopback.go create mode 100644 runtime/graphdriver/devmapper/deviceset.go create mode 100644 runtime/graphdriver/devmapper/devmapper.go create mode 100644 runtime/graphdriver/devmapper/devmapper_doc.go create mode 100644 runtime/graphdriver/devmapper/devmapper_log.go create mode 100644 runtime/graphdriver/devmapper/devmapper_test.go create mode 100644 runtime/graphdriver/devmapper/devmapper_wrapper.go create mode 100644 runtime/graphdriver/devmapper/driver.go create mode 100644 runtime/graphdriver/devmapper/driver_test.go create mode 100644 runtime/graphdriver/devmapper/ioctl.go create mode 100644 runtime/graphdriver/devmapper/mount.go create mode 100644 runtime/graphdriver/devmapper/sys.go create mode 100644 runtime/graphdriver/driver.go create mode 100644 runtime/graphdriver/vfs/driver.go diff --git a/contrib/docker-device-tool/device_tool.go b/contrib/docker-device-tool/device_tool.go index 4d1ee0cea5..12c762a7f3 100644 --- a/contrib/docker-device-tool/device_tool.go +++ b/contrib/docker-device-tool/device_tool.go @@ -3,7 +3,7 @@ package main import ( "flag" "fmt" - "github.com/dotcloud/docker/graphdriver/devmapper" + "github.com/dotcloud/docker/runtime/graphdriver/devmapper" "os" "path" "sort" diff --git a/graph/graph.go b/graph/graph.go index 01659b549f..f71b8a003e 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go index 153f94db3d..cae5c2916e 100644 --- a/graph/tags_unit_test.go +++ b/graph/tags_unit_test.go @@ -2,8 +2,8 @@ package graph import ( "bytes" - "github.com/dotcloud/docker/graphdriver" - _ "github.com/dotcloud/docker/graphdriver/vfs" // import the vfs driver so it is used in the tests + "github.com/dotcloud/docker/runtime/graphdriver" + _ "github.com/dotcloud/docker/runtime/graphdriver/vfs" // import the vfs driver so it is used in the tests "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" diff --git a/graphdriver/aufs/aufs.go b/graphdriver/aufs/aufs.go deleted file mode 100644 index a15cf6b273..0000000000 --- a/graphdriver/aufs/aufs.go +++ /dev/null @@ -1,401 +0,0 @@ -/* - -aufs driver directory structure - -. -├── layers // Metadata of layers -│   ├── 1 -│   ├── 2 -│   └── 3 -├── diffs // Content of the layer -│   ├── 1 // Contains layers that need to be mounted for the id -│   ├── 2 -│   └── 3 -└── mnt // Mount points for the rw layers to be mounted - ├── 1 - ├── 2 - └── 3 - -*/ - -package aufs - -import ( - "bufio" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/graphdriver" - mountpk "github.com/dotcloud/docker/pkg/mount" - "github.com/dotcloud/docker/utils" - "os" - "os/exec" - "path" - "strings" - "sync" -) - -var ( - ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") -) - -func init() { - graphdriver.Register("aufs", Init) -} - -type Driver struct { - root string - sync.Mutex // Protects concurrent modification to active - active map[string]int -} - -// New returns a new AUFS driver. -// An error is returned if AUFS is not supported. -func Init(root string) (graphdriver.Driver, error) { - // Try to load the aufs kernel module - if err := supportsAufs(); err != nil { - return nil, err - } - paths := []string{ - "mnt", - "diff", - "layers", - } - - a := &Driver{ - root: root, - active: make(map[string]int), - } - - // Create the root aufs driver dir and return - // if it already exists - // If not populate the dir structure - if err := os.MkdirAll(root, 0755); err != nil { - if os.IsExist(err) { - return a, nil - } - return nil, err - } - - for _, p := range paths { - if err := os.MkdirAll(path.Join(root, p), 0755); err != nil { - return nil, err - } - } - return a, nil -} - -// Return a nil error if the kernel supports aufs -// We cannot modprobe because inside dind modprobe fails -// to run -func supportsAufs() error { - // We can try to modprobe aufs first before looking at - // proc/filesystems for when aufs is supported - exec.Command("modprobe", "aufs").Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if strings.Contains(s.Text(), "aufs") { - return nil - } - } - return ErrAufsNotSupported -} - -func (a Driver) rootPath() string { - return a.root -} - -func (Driver) String() string { - return "aufs" -} - -func (a Driver) Status() [][2]string { - ids, _ := loadIds(path.Join(a.rootPath(), "layers")) - return [][2]string{ - {"Root Dir", a.rootPath()}, - {"Dirs", fmt.Sprintf("%d", len(ids))}, - } -} - -// Exists returns true if the given id is registered with -// this driver -func (a Driver) Exists(id string) bool { - if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { - return false - } - return true -} - -// Three folders are created for each id -// mnt, layers, and diff -func (a *Driver) Create(id, parent string) error { - if err := a.createDirsFor(id); err != nil { - return err - } - // Write the layers metadata - f, err := os.Create(path.Join(a.rootPath(), "layers", id)) - if err != nil { - return err - } - defer f.Close() - - if parent != "" { - ids, err := getParentIds(a.rootPath(), parent) - if err != nil { - return err - } - - if _, err := fmt.Fprintln(f, parent); err != nil { - return err - } - for _, i := range ids { - if _, err := fmt.Fprintln(f, i); err != nil { - return err - } - } - } - return nil -} - -func (a *Driver) createDirsFor(id string) error { - paths := []string{ - "mnt", - "diff", - } - - for _, p := range paths { - if err := os.MkdirAll(path.Join(a.rootPath(), p, id), 0755); err != nil { - return err - } - } - return nil -} - -// Unmount and remove the dir information -func (a *Driver) Remove(id string) error { - // Protect the a.active from concurrent access - a.Lock() - defer a.Unlock() - - if a.active[id] != 0 { - utils.Errorf("Warning: removing active id %s\n", id) - } - - // Make sure the dir is umounted first - if err := a.unmount(id); err != nil { - return err - } - tmpDirs := []string{ - "mnt", - "diff", - } - - // Atomically remove each directory in turn by first moving it out of the - // way (so that docker doesn't find it anymore) before doing removal of - // the whole tree. - for _, p := range tmpDirs { - - realPath := path.Join(a.rootPath(), p, id) - tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) - if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { - return err - } - defer os.RemoveAll(tmpPath) - } - - // Remove the layers file for the id - if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// Return the rootfs path for the id -// This will mount the dir at it's given path -func (a *Driver) Get(id string) (string, error) { - ids, err := getParentIds(a.rootPath(), id) - if err != nil { - if !os.IsNotExist(err) { - return "", err - } - ids = []string{} - } - - // Protect the a.active from concurrent access - a.Lock() - defer a.Unlock() - - count := a.active[id] - - // If a dir does not have a parent ( no layers )do not try to mount - // just return the diff path to the data - out := path.Join(a.rootPath(), "diff", id) - if len(ids) > 0 { - out = path.Join(a.rootPath(), "mnt", id) - - if count == 0 { - if err := a.mount(id); err != nil { - return "", err - } - } - } - - a.active[id] = count + 1 - - return out, nil -} - -func (a *Driver) Put(id string) { - // Protect the a.active from concurrent access - a.Lock() - defer a.Unlock() - - if count := a.active[id]; count > 1 { - a.active[id] = count - 1 - } else { - ids, _ := getParentIds(a.rootPath(), id) - // We only mounted if there are any parents - if ids != nil && len(ids) > 0 { - a.unmount(id) - } - delete(a.active, id) - } -} - -// Returns an archive of the contents for the id -func (a *Driver) Diff(id string) (archive.Archive, error) { - return archive.TarFilter(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - Compression: archive.Uncompressed, - }) -} - -func (a *Driver) ApplyDiff(id string, diff archive.ArchiveReader) error { - return archive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) -} - -// Returns the size of the contents for the id -func (a *Driver) DiffSize(id string) (int64, error) { - return utils.TreeSize(path.Join(a.rootPath(), "diff", id)) -} - -func (a *Driver) Changes(id string) ([]archive.Change, error) { - layers, err := a.getParentLayerPaths(id) - if err != nil { - return nil, err - } - return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) -} - -func (a *Driver) getParentLayerPaths(id string) ([]string, error) { - parentIds, err := getParentIds(a.rootPath(), id) - if err != nil { - return nil, err - } - if len(parentIds) == 0 { - return nil, fmt.Errorf("Dir %s does not have any parent layers", id) - } - layers := make([]string, len(parentIds)) - - // Get the diff paths for all the parent ids - for i, p := range parentIds { - layers[i] = path.Join(a.rootPath(), "diff", p) - } - return layers, nil -} - -func (a *Driver) mount(id string) error { - // If the id is mounted or we get an error return - if mounted, err := a.mounted(id); err != nil || mounted { - return err - } - - var ( - target = path.Join(a.rootPath(), "mnt", id) - rw = path.Join(a.rootPath(), "diff", id) - ) - - layers, err := a.getParentLayerPaths(id) - if err != nil { - return err - } - - if err := a.aufsMount(layers, rw, target); err != nil { - return err - } - return nil -} - -func (a *Driver) unmount(id string) error { - if mounted, err := a.mounted(id); err != nil || !mounted { - return err - } - target := path.Join(a.rootPath(), "mnt", id) - return Unmount(target) -} - -func (a *Driver) mounted(id string) (bool, error) { - target := path.Join(a.rootPath(), "mnt", id) - return mountpk.Mounted(target) -} - -// During cleanup aufs needs to unmount all mountpoints -func (a *Driver) Cleanup() error { - ids, err := loadIds(path.Join(a.rootPath(), "layers")) - if err != nil { - return err - } - for _, id := range ids { - if err := a.unmount(id); err != nil { - utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) - } - } - return nil -} - -func (a *Driver) aufsMount(ro []string, rw, target string) (err error) { - defer func() { - if err != nil { - Unmount(target) - } - }() - - if err = a.tryMount(ro, rw, target); err != nil { - if err = a.mountRw(rw, target); err != nil { - return - } - - for _, layer := range ro { - branch := fmt.Sprintf("append:%s=ro+wh", layer) - if err = mount("none", target, "aufs", MsRemount, branch); err != nil { - return - } - } - } - return -} - -// Try to mount using the aufs fast path, if this fails then -// append ro layers. -func (a *Driver) tryMount(ro []string, rw, target string) (err error) { - var ( - rwBranch = fmt.Sprintf("%s=rw", rw) - roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:")) - ) - return mount("none", target, "aufs", 0, fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches)) -} - -func (a *Driver) mountRw(rw, target string) error { - return mount("none", target, "aufs", 0, fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw)) -} - -func rollbackMount(target string, err error) { - if err != nil { - Unmount(target) - } -} diff --git a/graphdriver/aufs/aufs_test.go b/graphdriver/aufs/aufs_test.go deleted file mode 100644 index 6002bec5a1..0000000000 --- a/graphdriver/aufs/aufs_test.go +++ /dev/null @@ -1,697 +0,0 @@ -package aufs - -import ( - "crypto/sha256" - "encoding/hex" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/graphdriver" - "io/ioutil" - "os" - "path" - "testing" -) - -var ( - tmp = path.Join(os.TempDir(), "aufs-tests", "aufs") -) - -func testInit(dir string, t *testing.T) graphdriver.Driver { - d, err := Init(dir) - if err != nil { - if err == ErrAufsNotSupported { - t.Skip(err) - } else { - t.Fatal(err) - } - } - return d -} - -func newDriver(t *testing.T) *Driver { - if err := os.MkdirAll(tmp, 0755); err != nil { - t.Fatal(err) - } - - d := testInit(tmp, t) - return d.(*Driver) -} - -func TestNewDriver(t *testing.T) { - if err := os.MkdirAll(tmp, 0755); err != nil { - t.Fatal(err) - } - - d := testInit(tmp, t) - defer os.RemoveAll(tmp) - if d == nil { - t.Fatalf("Driver should not be nil") - } -} - -func TestAufsString(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if d.String() != "aufs" { - t.Fatalf("Expected aufs got %s", d.String()) - } -} - -func TestCreateDirStructure(t *testing.T) { - newDriver(t) - defer os.RemoveAll(tmp) - - paths := []string{ - "mnt", - "layers", - "diff", - } - - for _, p := range paths { - if _, err := os.Stat(path.Join(tmp, p)); err != nil { - t.Fatal(err) - } - } -} - -// We should be able to create two drivers with the same dir structure -func TestNewDriverFromExistingDir(t *testing.T) { - if err := os.MkdirAll(tmp, 0755); err != nil { - t.Fatal(err) - } - - testInit(tmp, t) - testInit(tmp, t) - os.RemoveAll(tmp) -} - -func TestCreateNewDir(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } -} - -func TestCreateNewDirStructure(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - paths := []string{ - "mnt", - "diff", - "layers", - } - - for _, p := range paths { - if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { - t.Fatal(err) - } - } -} - -func TestRemoveImage(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - if err := d.Remove("1"); err != nil { - t.Fatal(err) - } - - paths := []string{ - "mnt", - "diff", - "layers", - } - - for _, p := range paths { - if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { - t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) - } - } -} - -func TestGetWithoutParent(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - expected := path.Join(tmp, "diff", "1") - if diffPath != expected { - t.Fatalf("Expected path %s got %s", expected, diffPath) - } -} - -func TestCleanupWithNoDirs(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } -} - -func TestCleanupWithDir(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } -} - -func TestMountedFalseResponse(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - response, err := d.mounted("1") - if err != nil { - t.Fatal(err) - } - - if response != false { - t.Fatalf("Response if dir id 1 is mounted should be false") - } -} - -func TestMountedTrueReponse(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - _, err := d.Get("2") - if err != nil { - t.Fatal(err) - } - - response, err := d.mounted("2") - if err != nil { - t.Fatal(err) - } - - if response != true { - t.Fatalf("Response if dir id 2 is mounted should be true") - } -} - -func TestMountWithParent(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - defer func() { - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - }() - - mntPath, err := d.Get("2") - if err != nil { - t.Fatal(err) - } - if mntPath == "" { - t.Fatal("mntPath should not be empty string") - } - - expected := path.Join(tmp, "mnt", "2") - if mntPath != expected { - t.Fatalf("Expected %s got %s", expected, mntPath) - } -} - -func TestRemoveMountedDir(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - defer func() { - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - }() - - mntPath, err := d.Get("2") - if err != nil { - t.Fatal(err) - } - if mntPath == "" { - t.Fatal("mntPath should not be empty string") - } - - mounted, err := d.mounted("2") - if err != nil { - t.Fatal(err) - } - - if !mounted { - t.Fatalf("Dir id 2 should be mounted") - } - - if err := d.Remove("2"); err != nil { - t.Fatal(err) - } -} - -func TestCreateWithInvalidParent(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", "docker"); err == nil { - t.Fatalf("Error should not be nil with parent does not exist") - } -} - -func TestGetDiff(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - - // Add a file to the diff path with a fixed size - size := int64(1024) - - f, err := os.Create(path.Join(diffPath, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - f.Close() - - a, err := d.Diff("1") - if err != nil { - t.Fatal(err) - } - if a == nil { - t.Fatalf("Archive should not be nil") - } -} - -func TestChanges(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - defer func() { - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - }() - - mntPoint, err := d.Get("2") - if err != nil { - t.Fatal(err) - } - - // Create a file to save in the mountpoint - f, err := os.Create(path.Join(mntPoint, "test.txt")) - if err != nil { - t.Fatal(err) - } - - if _, err := f.WriteString("testline"); err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } - - changes, err := d.Changes("2") - if err != nil { - t.Fatal(err) - } - if len(changes) != 1 { - t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) - } - change := changes[0] - - expectedPath := "/test.txt" - if change.Path != expectedPath { - t.Fatalf("Expected path %s got %s", expectedPath, change.Path) - } - - if change.Kind != archive.ChangeAdd { - t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) - } - - if err := d.Create("3", "2"); err != nil { - t.Fatal(err) - } - mntPoint, err = d.Get("3") - if err != nil { - t.Fatal(err) - } - - // Create a file to save in the mountpoint - f, err = os.Create(path.Join(mntPoint, "test2.txt")) - if err != nil { - t.Fatal(err) - } - - if _, err := f.WriteString("testline"); err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } - - changes, err = d.Changes("3") - if err != nil { - t.Fatal(err) - } - - if len(changes) != 1 { - t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) - } - change = changes[0] - - expectedPath = "/test2.txt" - if change.Path != expectedPath { - t.Fatalf("Expected path %s got %s", expectedPath, change.Path) - } - - if change.Kind != archive.ChangeAdd { - t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) - } -} - -func TestDiffSize(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - - // Add a file to the diff path with a fixed size - size := int64(1024) - - f, err := os.Create(path.Join(diffPath, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - s, err := f.Stat() - if err != nil { - t.Fatal(err) - } - size = s.Size() - if err := f.Close(); err != nil { - t.Fatal(err) - } - - diffSize, err := d.DiffSize("1") - if err != nil { - t.Fatal(err) - } - if diffSize != size { - t.Fatalf("Expected size to be %d got %d", size, diffSize) - } -} - -func TestChildDiffSize(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - - // Add a file to the diff path with a fixed size - size := int64(1024) - - f, err := os.Create(path.Join(diffPath, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - s, err := f.Stat() - if err != nil { - t.Fatal(err) - } - size = s.Size() - if err := f.Close(); err != nil { - t.Fatal(err) - } - - diffSize, err := d.DiffSize("1") - if err != nil { - t.Fatal(err) - } - if diffSize != size { - t.Fatalf("Expected size to be %d got %d", size, diffSize) - } - - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - diffSize, err = d.DiffSize("2") - if err != nil { - t.Fatal(err) - } - // The diff size for the child should be zero - if diffSize != 0 { - t.Fatalf("Expected size to be %d got %d", 0, diffSize) - } -} - -func TestExists(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - if d.Exists("none") { - t.Fatal("id name should not exist in the driver") - } - - if !d.Exists("1") { - t.Fatal("id 1 should exist in the driver") - } -} - -func TestStatus(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - status := d.Status() - if status == nil || len(status) == 0 { - t.Fatal("Status should not be nil or empty") - } - rootDir := status[0] - dirs := status[1] - if rootDir[0] != "Root Dir" { - t.Fatalf("Expected Root Dir got %s", rootDir[0]) - } - if rootDir[1] != d.rootPath() { - t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) - } - if dirs[0] != "Dirs" { - t.Fatalf("Expected Dirs got %s", dirs[0]) - } - if dirs[1] != "1" { - t.Fatalf("Expected 1 got %s", dirs[1]) - } -} - -func TestApplyDiff(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - - // Add a file to the diff path with a fixed size - size := int64(1024) - - f, err := os.Create(path.Join(diffPath, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - f.Close() - - diff, err := d.Diff("1") - if err != nil { - t.Fatal(err) - } - - if err := d.Create("2", ""); err != nil { - t.Fatal(err) - } - if err := d.Create("3", "2"); err != nil { - t.Fatal(err) - } - - if err := d.ApplyDiff("3", diff); err != nil { - t.Fatal(err) - } - - // Ensure that the file is in the mount point for id 3 - - mountPoint, err := d.Get("3") - if err != nil { - t.Fatal(err) - } - if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { - t.Fatal(err) - } -} - -func hash(c string) string { - h := sha256.New() - fmt.Fprint(h, c) - return hex.EncodeToString(h.Sum(nil)) -} - -func TestMountMoreThan42Layers(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - var last string - var expected int - - for i := 1; i < 127; i++ { - expected++ - var ( - parent = fmt.Sprintf("%d", i-1) - current = fmt.Sprintf("%d", i) - ) - - if parent == "0" { - parent = "" - } else { - parent = hash(parent) - } - current = hash(current) - - if err := d.Create(current, parent); err != nil { - t.Logf("Current layer %d", i) - t.Fatal(err) - } - point, err := d.Get(current) - if err != nil { - t.Logf("Current layer %d", i) - t.Fatal(err) - } - f, err := os.Create(path.Join(point, current)) - if err != nil { - t.Logf("Current layer %d", i) - t.Fatal(err) - } - f.Close() - - if i%10 == 0 { - if err := os.Remove(path.Join(point, parent)); err != nil { - t.Logf("Current layer %d", i) - t.Fatal(err) - } - expected-- - } - last = current - } - - // Perform the actual mount for the top most image - point, err := d.Get(last) - if err != nil { - t.Fatal(err) - } - files, err := ioutil.ReadDir(point) - if err != nil { - t.Fatal(err) - } - if len(files) != expected { - t.Fatalf("Expected %d got %d", expected, len(files)) - } -} diff --git a/graphdriver/aufs/dirs.go b/graphdriver/aufs/dirs.go deleted file mode 100644 index fb9b81edd2..0000000000 --- a/graphdriver/aufs/dirs.go +++ /dev/null @@ -1,46 +0,0 @@ -package aufs - -import ( - "bufio" - "io/ioutil" - "os" - "path" -) - -// Return all the directories -func loadIds(root string) ([]string, error) { - dirs, err := ioutil.ReadDir(root) - if err != nil { - return nil, err - } - out := []string{} - for _, d := range dirs { - if !d.IsDir() { - out = append(out, d.Name()) - } - } - return out, nil -} - -// Read the layers file for the current id and return all the -// layers represented by new lines in the file -// -// If there are no lines in the file then the id has no parent -// and an empty slice is returned. -func getParentIds(root, id string) ([]string, error) { - f, err := os.Open(path.Join(root, "layers", id)) - if err != nil { - return nil, err - } - defer f.Close() - - out := []string{} - s := bufio.NewScanner(f) - - for s.Scan() { - if t := s.Text(); t != "" { - out = append(out, s.Text()) - } - } - return out, s.Err() -} diff --git a/graphdriver/aufs/migrate.go b/graphdriver/aufs/migrate.go deleted file mode 100644 index 6018342d6c..0000000000 --- a/graphdriver/aufs/migrate.go +++ /dev/null @@ -1,194 +0,0 @@ -package aufs - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path" -) - -type metadata struct { - ID string `json:"id"` - ParentID string `json:"parent,omitempty"` - Image string `json:"Image,omitempty"` - - parent *metadata -} - -func pathExists(pth string) bool { - if _, err := os.Stat(pth); err != nil { - return false - } - return true -} - -// Migrate existing images and containers from docker < 0.7.x -// -// The format pre 0.7 is for docker to store the metadata and filesystem -// content in the same directory. For the migration to work we need to move Image layer -// data from /var/lib/docker/graph//layers to the diff of the registered id. -// -// Next we need to migrate the container's rw layer to diff of the driver. After the -// contents are migrated we need to register the image and container ids with the -// driver. -// -// For the migration we try to move the folder containing the layer files, if that -// fails because the data is currently mounted we will fallback to creating a -// symlink. -func (a *Driver) Migrate(pth string, setupInit func(p string) error) error { - if pathExists(path.Join(pth, "graph")) { - if err := a.migrateRepositories(pth); err != nil { - return err - } - if err := a.migrateImages(path.Join(pth, "graph")); err != nil { - return err - } - return a.migrateContainers(path.Join(pth, "containers"), setupInit) - } - return nil -} - -func (a *Driver) migrateRepositories(pth string) error { - name := path.Join(pth, "repositories") - if err := os.Rename(name, name+"-aufs"); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) error { - fis, err := ioutil.ReadDir(pth) - if err != nil { - return err - } - - for _, fi := range fis { - if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "rw")) { - if err := tryRelocate(path.Join(pth, id, "rw"), path.Join(a.rootPath(), "diff", id)); err != nil { - return err - } - - if !a.Exists(id) { - - metadata, err := loadMetadata(path.Join(pth, id, "config.json")) - if err != nil { - return err - } - - initID := fmt.Sprintf("%s-init", id) - if err := a.Create(initID, metadata.Image); err != nil { - return err - } - - initPath, err := a.Get(initID) - if err != nil { - return err - } - // setup init layer - if err := setupInit(initPath); err != nil { - return err - } - - if err := a.Create(id, initID); err != nil { - return err - } - } - } - } - return nil -} - -func (a *Driver) migrateImages(pth string) error { - fis, err := ioutil.ReadDir(pth) - if err != nil { - return err - } - var ( - m = make(map[string]*metadata) - current *metadata - exists bool - ) - - for _, fi := range fis { - if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "layer")) { - if current, exists = m[id]; !exists { - current, err = loadMetadata(path.Join(pth, id, "json")) - if err != nil { - return err - } - m[id] = current - } - } - } - - for _, v := range m { - v.parent = m[v.ParentID] - } - - migrated := make(map[string]bool) - for _, v := range m { - if err := a.migrateImage(v, pth, migrated); err != nil { - return err - } - } - return nil -} - -func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool) error { - if !migrated[m.ID] { - if m.parent != nil { - a.migrateImage(m.parent, pth, migrated) - } - if err := tryRelocate(path.Join(pth, m.ID, "layer"), path.Join(a.rootPath(), "diff", m.ID)); err != nil { - return err - } - if !a.Exists(m.ID) { - if err := a.Create(m.ID, m.ParentID); err != nil { - return err - } - } - migrated[m.ID] = true - } - return nil -} - -// tryRelocate will try to rename the old path to the new pack and if -// the operation fails, it will fallback to a symlink -func tryRelocate(oldPath, newPath string) error { - s, err := os.Lstat(newPath) - if err != nil && !os.IsNotExist(err) { - return err - } - // If the destination is a symlink then we already tried to relocate once before - // and it failed so we delete it and try to remove - if s != nil && s.Mode()&os.ModeSymlink == os.ModeSymlink { - if err := os.RemoveAll(newPath); err != nil { - return err - } - } - if err := os.Rename(oldPath, newPath); err != nil { - if sErr := os.Symlink(oldPath, newPath); sErr != nil { - return fmt.Errorf("Unable to relocate %s to %s: Rename err %s Symlink err %s", oldPath, newPath, err, sErr) - } - } - return nil -} - -func loadMetadata(pth string) (*metadata, error) { - f, err := os.Open(pth) - if err != nil { - return nil, err - } - defer f.Close() - - var ( - out = &metadata{} - dec = json.NewDecoder(f) - ) - - if err := dec.Decode(out); err != nil { - return nil, err - } - return out, nil -} diff --git a/graphdriver/aufs/mount.go b/graphdriver/aufs/mount.go deleted file mode 100644 index 1f1d98f809..0000000000 --- a/graphdriver/aufs/mount.go +++ /dev/null @@ -1,17 +0,0 @@ -package aufs - -import ( - "github.com/dotcloud/docker/utils" - "os/exec" - "syscall" -) - -func Unmount(target string) error { - if err := exec.Command("auplink", target, "flush").Run(); err != nil { - utils.Errorf("[warning]: couldn't run auplink before unmount: %s", err) - } - if err := syscall.Unmount(target, 0); err != nil { - return err - } - return nil -} diff --git a/graphdriver/aufs/mount_linux.go b/graphdriver/aufs/mount_linux.go deleted file mode 100644 index 6082d9f240..0000000000 --- a/graphdriver/aufs/mount_linux.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build amd64 - -package aufs - -import "syscall" - -const MsRemount = syscall.MS_REMOUNT - -func mount(source string, target string, fstype string, flags uintptr, data string) error { - return syscall.Mount(source, target, fstype, flags, data) -} diff --git a/graphdriver/aufs/mount_unsupported.go b/graphdriver/aufs/mount_unsupported.go deleted file mode 100644 index 2735624112..0000000000 --- a/graphdriver/aufs/mount_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux !amd64 - -package aufs - -import "errors" - -const MsRemount = 0 - -func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { - return errors.New("mount is not implemented on darwin") -} diff --git a/graphdriver/btrfs/btrfs.go b/graphdriver/btrfs/btrfs.go deleted file mode 100644 index 592e058458..0000000000 --- a/graphdriver/btrfs/btrfs.go +++ /dev/null @@ -1,213 +0,0 @@ -// +build linux,amd64 - -package btrfs - -/* -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "github.com/dotcloud/docker/graphdriver" - "os" - "path" - "syscall" - "unsafe" -) - -func init() { - graphdriver.Register("btrfs", Init) -} - -func Init(home string) (graphdriver.Driver, error) { - rootdir := path.Dir(home) - - var buf syscall.Statfs_t - if err := syscall.Statfs(rootdir, &buf); err != nil { - return nil, err - } - - if buf.Type != 0x9123683E { - return nil, fmt.Errorf("%s is not a btrfs filesystem", rootdir) - } - - return &Driver{ - home: home, - }, nil -} - -type Driver struct { - home string -} - -func (d *Driver) String() string { - return "btrfs" -} - -func (d *Driver) Status() [][2]string { - return nil -} - -func (d *Driver) Cleanup() error { - return nil -} - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func openDir(path string) (*C.DIR, error) { - Cpath := C.CString(path) - defer free(Cpath) - - dir := C.opendir(Cpath) - if dir == nil { - return nil, fmt.Errorf("Can't open dir") - } - return dir, nil -} - -func closeDir(dir *C.DIR) { - if dir != nil { - C.closedir(dir) - } -} - -func getDirFd(dir *C.DIR) uintptr { - return uintptr(C.dirfd(dir)) -} - -func subvolCreate(path, name string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_vol_args - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) - } - return nil -} - -func subvolSnapshot(src, dest, name string) error { - srcDir, err := openDir(src) - if err != nil { - return err - } - defer closeDir(srcDir) - - destDir, err := openDir(dest) - if err != nil { - return err - } - defer closeDir(destDir) - - var args C.struct_btrfs_ioctl_vol_args_v2 - args.fd = C.__s64(getDirFd(srcDir)) - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) - } - return nil -} - -func subvolDelete(path, name string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_vol_args - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to destroy btrfs snapshot: %v", errno.Error()) - } - return nil -} - -func (d *Driver) subvolumesDir() string { - return path.Join(d.home, "subvolumes") -} - -func (d *Driver) subvolumesDirId(id string) string { - return path.Join(d.subvolumesDir(), id) -} - -func (d *Driver) Create(id string, parent string) error { - subvolumes := path.Join(d.home, "subvolumes") - if err := os.MkdirAll(subvolumes, 0700); err != nil { - return err - } - if parent == "" { - if err := subvolCreate(subvolumes, id); err != nil { - return err - } - } else { - parentDir, err := d.Get(parent) - if err != nil { - return err - } - if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { - return err - } - } - return nil -} - -func (d *Driver) Remove(id string) error { - dir := d.subvolumesDirId(id) - if _, err := os.Stat(dir); err != nil { - return err - } - if err := subvolDelete(d.subvolumesDir(), id); err != nil { - return err - } - return os.RemoveAll(dir) -} - -func (d *Driver) Get(id string) (string, error) { - dir := d.subvolumesDirId(id) - st, err := os.Stat(dir) - if err != nil { - return "", err - } - - if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) - } - - return dir, nil -} - -func (d *Driver) Put(id string) { - // Get() creates no runtime resources (like e.g. mounts) - // so this doesn't need to do anything. -} - -func (d *Driver) Exists(id string) bool { - dir := d.subvolumesDirId(id) - _, err := os.Stat(dir) - return err == nil -} diff --git a/graphdriver/btrfs/dummy_unsupported.go b/graphdriver/btrfs/dummy_unsupported.go deleted file mode 100644 index 6c44615763..0000000000 --- a/graphdriver/btrfs/dummy_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux !amd64 - -package btrfs diff --git a/graphdriver/devmapper/attach_loopback.go b/graphdriver/devmapper/attach_loopback.go deleted file mode 100644 index 23339076e8..0000000000 --- a/graphdriver/devmapper/attach_loopback.go +++ /dev/null @@ -1,126 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "fmt" - "github.com/dotcloud/docker/utils" -) - -func stringToLoopName(src string) [LoNameSize]uint8 { - var dst [LoNameSize]uint8 - copy(dst[:], src[:]) - return dst -} - -func getNextFreeLoopbackIndex() (int, error) { - f, err := osOpenFile("/dev/loop-control", osORdOnly, 0644) - if err != nil { - return 0, err - } - defer f.Close() - - index, err := ioctlLoopCtlGetFree(f.Fd()) - if index < 0 { - index = 0 - } - return index, err -} - -func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, err error) { - // Start looking for a free /dev/loop - for { - target := fmt.Sprintf("/dev/loop%d", index) - index++ - - fi, err := osStat(target) - if err != nil { - if osIsNotExist(err) { - utils.Errorf("There are no more loopback device available.") - } - return nil, ErrAttachLoopbackDevice - } - - if fi.Mode()&osModeDevice != osModeDevice { - utils.Errorf("Loopback device %s is not a block device.", target) - continue - } - - // OpenFile adds O_CLOEXEC - loopFile, err = osOpenFile(target, osORdWr, 0644) - if err != nil { - utils.Errorf("Error openning loopback device: %s", err) - return nil, ErrAttachLoopbackDevice - } - - // Try to attach to the loop file - if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { - loopFile.Close() - - // If the error is EBUSY, then try the next loopback - if err != sysEBusy { - utils.Errorf("Cannot set up loopback device %s: %s", target, err) - return nil, ErrAttachLoopbackDevice - } - - // Otherwise, we keep going with the loop - continue - } - // In case of success, we finished. Break the loop. - break - } - - // This can't happen, but let's be sure - if loopFile == nil { - utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) - return nil, ErrAttachLoopbackDevice - } - - return loopFile, nil -} - -// attachLoopDevice attaches the given sparse file to the next -// available loopback device. It returns an opened *osFile. -func attachLoopDevice(sparseName string) (loop *osFile, err error) { - - // Try to retrieve the next available loopback device via syscall. - // If it fails, we discard error and start loopking for a - // loopback from index 0. - startIndex, err := getNextFreeLoopbackIndex() - if err != nil { - utils.Debugf("Error retrieving the next available loopback: %s", err) - } - - // OpenFile adds O_CLOEXEC - sparseFile, err := osOpenFile(sparseName, osORdWr, 0644) - if err != nil { - utils.Errorf("Error openning sparse file %s: %s", sparseName, err) - return nil, ErrAttachLoopbackDevice - } - defer sparseFile.Close() - - loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) - if err != nil { - return nil, err - } - - // Set the status of the loopback device - loopInfo := &LoopInfo64{ - loFileName: stringToLoopName(loopFile.Name()), - loOffset: 0, - loFlags: LoFlagsAutoClear, - } - - if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { - utils.Errorf("Cannot set up loopback device info: %s", err) - - // If the call failed, then free the loopback device - if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { - utils.Errorf("Error while cleaning up the loopback device") - } - loopFile.Close() - return nil, ErrAttachLoopbackDevice - } - - return loopFile, nil -} diff --git a/graphdriver/devmapper/deviceset.go b/graphdriver/devmapper/deviceset.go deleted file mode 100644 index f6b26655a3..0000000000 --- a/graphdriver/devmapper/deviceset.go +++ /dev/null @@ -1,1088 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "encoding/json" - "errors" - "fmt" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "time" -) - -var ( - DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 - DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 - DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 -) - -type DevInfo struct { - Hash string `json:"-"` - DeviceId int `json:"device_id"` - Size uint64 `json:"size"` - TransactionId uint64 `json:"transaction_id"` - Initialized bool `json:"initialized"` - devices *DeviceSet `json:"-"` - - mountCount int `json:"-"` - mountPath string `json:"-"` - // A floating mount means one reference is not owned and - // will be stolen by the next mount. This allows us to - // avoid unmounting directly after creation before the - // first get (since we need to mount to set up the device - // a bit first). - floating bool `json:"-"` - - // The global DeviceSet lock guarantees that we serialize all - // the calls to libdevmapper (which is not threadsafe), but we - // sometimes release that lock while sleeping. In that case - // this per-device lock is still held, protecting against - // other accesses to the device that we're doing the wait on. - lock sync.Mutex `json:"-"` -} - -type MetaData struct { - Devices map[string]*DevInfo `json:devices` -} - -type DeviceSet struct { - MetaData - sync.Mutex // Protects Devices map and serializes calls into libdevmapper - root string - devicePrefix string - TransactionId uint64 - NewTransactionId uint64 - nextFreeDevice int - sawBusy bool -} - -type DiskUsage struct { - Used uint64 - Total uint64 -} - -type Status struct { - PoolName string - DataLoopback string - MetadataLoopback string - Data DiskUsage - Metadata DiskUsage - SectorSize uint64 -} - -type DevStatus struct { - DeviceId int - Size uint64 - TransactionId uint64 - SizeInSectors uint64 - MappedSectors uint64 - HighestMappedSector uint64 -} - -type UnmountMode int - -const ( - UnmountRegular UnmountMode = iota - UnmountFloat - UnmountSink -) - -func getDevName(name string) string { - return "/dev/mapper/" + name -} - -func (info *DevInfo) Name() string { - hash := info.Hash - if hash == "" { - hash = "base" - } - return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) -} - -func (info *DevInfo) DevName() string { - return getDevName(info.Name()) -} - -func (devices *DeviceSet) loopbackDir() string { - return path.Join(devices.root, "devicemapper") -} - -func (devices *DeviceSet) jsonFile() string { - return path.Join(devices.loopbackDir(), "json") -} - -func (devices *DeviceSet) getPoolName() string { - return devices.devicePrefix + "-pool" -} - -func (devices *DeviceSet) getPoolDevName() string { - return getDevName(devices.getPoolName()) -} - -func (devices *DeviceSet) hasImage(name string) bool { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - _, err := osStat(filename) - return err == nil -} - -// ensureImage creates a sparse file of bytes at the path -// /devicemapper/. -// If the file already exists, it does nothing. -// Either way it returns the full path. -func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - if err := osMkdirAll(dirname, 0700); err != nil && !osIsExist(err) { - return "", err - } - - if _, err := osStat(filename); err != nil { - if !osIsNotExist(err) { - return "", err - } - utils.Debugf("Creating loopback file %s for device-manage use", filename) - file, err := osOpenFile(filename, osORdWr|osOCreate, 0600) - if err != nil { - return "", err - } - defer file.Close() - - if err = file.Truncate(size); err != nil { - return "", err - } - } - return filename, nil -} - -func (devices *DeviceSet) allocateDeviceId() int { - // TODO: Add smarter reuse of deleted devices - id := devices.nextFreeDevice - devices.nextFreeDevice = devices.nextFreeDevice + 1 - return id -} - -func (devices *DeviceSet) allocateTransactionId() uint64 { - devices.NewTransactionId = devices.NewTransactionId + 1 - return devices.NewTransactionId -} - -func (devices *DeviceSet) saveMetadata() error { - jsonData, err := json.Marshal(devices.MetaData) - if err != nil { - return fmt.Errorf("Error encoding metadata to json: %s", err) - } - tmpFile, err := ioutil.TempFile(filepath.Dir(devices.jsonFile()), ".json") - if err != nil { - return fmt.Errorf("Error creating metadata file: %s", err) - } - - n, err := tmpFile.Write(jsonData) - if err != nil { - return fmt.Errorf("Error writing metadata to %s: %s", tmpFile.Name(), err) - } - if n < len(jsonData) { - return io.ErrShortWrite - } - if err := tmpFile.Sync(); err != nil { - return fmt.Errorf("Error syncing metadata file %s: %s", tmpFile.Name(), err) - } - if err := tmpFile.Close(); err != nil { - return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err) - } - if err := osRename(tmpFile.Name(), devices.jsonFile()); err != nil { - return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err) - } - - if devices.NewTransactionId != devices.TransactionId { - if err = setTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { - return fmt.Errorf("Error setting devmapper transition ID: %s", err) - } - devices.TransactionId = devices.NewTransactionId - } - return nil -} - -func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) { - utils.Debugf("registerDevice(%v, %v)", id, hash) - info := &DevInfo{ - Hash: hash, - DeviceId: id, - Size: size, - TransactionId: devices.allocateTransactionId(), - Initialized: false, - devices: devices, - } - - devices.Devices[hash] = info - if err := devices.saveMetadata(); err != nil { - // Try to remove unused device - delete(devices.Devices, hash) - return nil, err - } - - return info, nil -} - -func (devices *DeviceSet) activateDeviceIfNeeded(hash string) error { - utils.Debugf("activateDeviceIfNeeded(%v)", hash) - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } - - if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { - return nil - } - - return activateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) -} - -func (devices *DeviceSet) createFilesystem(info *DevInfo) error { - devname := info.DevName() - - err := execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0,lazy_journal_init=0", devname) - if err != nil { - err = execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0", devname) - } - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - return nil -} - -func (devices *DeviceSet) loadMetaData() error { - utils.Debugf("loadMetadata()") - defer utils.Debugf("loadMetadata END") - _, _, _, params, err := getStatus(devices.getPoolName()) - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - if _, err := fmt.Sscanf(params, "%d", &devices.TransactionId); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - devices.NewTransactionId = devices.TransactionId - - jsonData, err := ioutil.ReadFile(devices.jsonFile()) - if err != nil && !osIsNotExist(err) { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - devices.MetaData.Devices = make(map[string]*DevInfo) - if jsonData != nil { - if err := json.Unmarshal(jsonData, &devices.MetaData); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - } - - for hash, d := range devices.Devices { - d.Hash = hash - d.devices = devices - - if d.DeviceId >= devices.nextFreeDevice { - devices.nextFreeDevice = d.DeviceId + 1 - } - - // If the transaction id is larger than the actual one we lost the device due to some crash - if d.TransactionId > devices.TransactionId { - utils.Debugf("Removing lost device %s with id %d", hash, d.TransactionId) - delete(devices.Devices, hash) - } - } - return nil -} - -func (devices *DeviceSet) setupBaseImage() error { - oldInfo := devices.Devices[""] - if oldInfo != nil && oldInfo.Initialized { - return nil - } - - if oldInfo != nil && !oldInfo.Initialized { - utils.Debugf("Removing uninitialized base image") - if err := devices.deleteDevice(""); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - } - - utils.Debugf("Initializing base device-manager snapshot") - - id := devices.allocateDeviceId() - - // Create initial device - if err := createDevice(devices.getPoolDevName(), id); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - utils.Debugf("Registering base device (id %v) with FS size %v", id, DefaultBaseFsSize) - info, err := devices.registerDevice(id, "", DefaultBaseFsSize) - if err != nil { - _ = deleteDevice(devices.getPoolDevName(), id) - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - utils.Debugf("Creating filesystem on base device-manager snapshot") - - if err = devices.activateDeviceIfNeeded(""); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - if err := devices.createFilesystem(info); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - info.Initialized = true - if err = devices.saveMetadata(); err != nil { - info.Initialized = false - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - return nil -} - -func setCloseOnExec(name string) { - if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { - for _, i := range fileInfos { - link, _ := osReadlink(filepath.Join("/proc/self/fd", i.Name())) - if link == name { - fd, err := strconv.Atoi(i.Name()) - if err == nil { - sysCloseOnExec(fd) - } - } - } - } -} - -func (devices *DeviceSet) log(level int, file string, line int, dmError int, message string) { - if level >= 7 { - return // Ignore _LOG_DEBUG - } - - if strings.Contains(message, "busy") { - devices.sawBusy = true - } - - utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -func (devices *DeviceSet) ResizePool(size int64) error { - dirname := devices.loopbackDir() - datafilename := path.Join(dirname, "data") - metadatafilename := path.Join(dirname, "metadata") - - datafile, err := osOpenFile(datafilename, osORdWr, 0) - if datafile == nil { - return err - } - defer datafile.Close() - - fi, err := datafile.Stat() - if fi == nil { - return err - } - - if fi.Size() > size { - return fmt.Errorf("Can't shrink file") - } - - dataloopback := FindLoopDeviceFor(datafile) - if dataloopback == nil { - return fmt.Errorf("Unable to find loopback mount for: %s", datafilename) - } - defer dataloopback.Close() - - metadatafile, err := osOpenFile(metadatafilename, osORdWr, 0) - if metadatafile == nil { - return err - } - defer metadatafile.Close() - - metadataloopback := FindLoopDeviceFor(metadatafile) - if metadataloopback == nil { - return fmt.Errorf("Unable to find loopback mount for: %s", metadatafilename) - } - defer metadataloopback.Close() - - // Grow loopback file - if err := datafile.Truncate(size); err != nil { - return fmt.Errorf("Unable to grow loopback file: %s", err) - } - - // Reload size for loopback device - if err := LoopbackSetCapacity(dataloopback); err != nil { - return fmt.Errorf("Unable to update loopback capacity: %s", err) - } - - // Suspend the pool - if err := suspendDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("Unable to suspend pool: %s", err) - } - - // Reload with the new block sizes - if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback); err != nil { - return fmt.Errorf("Unable to reload pool: %s", err) - } - - // Resume the pool - if err := resumeDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("Unable to resume pool: %s", err) - } - - return nil -} - -func (devices *DeviceSet) initDevmapper(doInit bool) error { - logInit(devices) - - // Make sure the sparse images exist in /devicemapper/data and - // /devicemapper/metadata - - hasData := devices.hasImage("data") - hasMetadata := devices.hasImage("metadata") - - if !doInit && !hasData { - return errors.New("Loopback data file not found") - } - - if !doInit && !hasMetadata { - return errors.New("Loopback metadata file not found") - } - - createdLoopback := !hasData || !hasMetadata - data, err := devices.ensureImage("data", DefaultDataLoopbackSize) - if err != nil { - utils.Debugf("Error device ensureImage (data): %s\n", err) - return err - } - metadata, err := devices.ensureImage("metadata", DefaultMetaDataLoopbackSize) - if err != nil { - utils.Debugf("Error device ensureImage (metadata): %s\n", err) - return err - } - - // Set the device prefix from the device id and inode of the docker root dir - - st, err := osStat(devices.root) - if err != nil { - return fmt.Errorf("Error looking up dir %s: %s", devices.root, err) - } - sysSt := toSysStatT(st.Sys()) - // "reg-" stands for "regular file". - // In the future we might use "dev-" for "device file", etc. - // docker-maj,min[-inode] stands for: - // - Managed by docker - // - The target of this device is at major and minor - // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. - devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) - utils.Debugf("Generated prefix: %s", devices.devicePrefix) - - // Check for the existence of the device -pool - utils.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) - info, err := getInfo(devices.getPoolName()) - if info == nil { - utils.Debugf("Error device getInfo: %s", err) - return err - } - - // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files - // that are not Close-on-exec, and lxc-start will die if it inherits any unexpected files, - // so we add this badhack to make sure it closes itself - setCloseOnExec("/dev/mapper/control") - - // If the pool doesn't exist, create it - if info.Exists == 0 { - utils.Debugf("Pool doesn't exist. Creating it.") - - dataFile, err := attachLoopDevice(data) - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - defer dataFile.Close() - - metadataFile, err := attachLoopDevice(metadata) - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - defer metadataFile.Close() - - if err := createPool(devices.getPoolName(), dataFile, metadataFile); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - } - - // If we didn't just create the data or metadata image, we need to - // load the metadata from the existing file. - if !createdLoopback { - if err = devices.loadMetaData(); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - } - - // Setup the base image - if doInit { - if err := devices.setupBaseImage(); err != nil { - utils.Debugf("Error device setupBaseImage: %s\n", err) - return err - } - } - - return nil -} - -func (devices *DeviceSet) AddDevice(hash, baseHash string) error { - devices.Lock() - defer devices.Unlock() - - if devices.Devices[hash] != nil { - return fmt.Errorf("hash %s already exists", hash) - } - - baseInfo := devices.Devices[baseHash] - if baseInfo == nil { - return fmt.Errorf("Error adding device for '%s': can't find device for parent '%s'", hash, baseHash) - } - - baseInfo.lock.Lock() - defer baseInfo.lock.Unlock() - - deviceId := devices.allocateDeviceId() - - if err := devices.createSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { - utils.Debugf("Error creating snap device: %s\n", err) - return err - } - - if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { - deleteDevice(devices.getPoolDevName(), deviceId) - utils.Debugf("Error registering device: %s\n", err) - return err - } - return nil -} - -func (devices *DeviceSet) deleteDevice(hash string) error { - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("hash %s doesn't exists", hash) - } - - // This is a workaround for the kernel not discarding block so - // on the thin pool when we remove a thinp device, so we do it - // manually - if err := devices.activateDeviceIfNeeded(hash); err == nil { - if err := BlockDeviceDiscard(info.DevName()); err != nil { - utils.Debugf("Error discarding block on device: %s (ignoring)\n", err) - } - } - - devinfo, _ := getInfo(info.Name()) - if devinfo != nil && devinfo.Exists != 0 { - if err := devices.removeDeviceAndWait(info.Name()); err != nil { - utils.Debugf("Error removing device: %s\n", err) - return err - } - } - - if info.Initialized { - info.Initialized = false - if err := devices.saveMetadata(); err != nil { - utils.Debugf("Error saving meta data: %s\n", err) - return err - } - } - - if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { - utils.Debugf("Error deleting device: %s\n", err) - return err - } - - devices.allocateTransactionId() - delete(devices.Devices, info.Hash) - - if err := devices.saveMetadata(); err != nil { - devices.Devices[info.Hash] = info - utils.Debugf("Error saving meta data: %s\n", err) - return err - } - - return nil -} - -func (devices *DeviceSet) DeleteDevice(hash string) error { - devices.Lock() - defer devices.Unlock() - - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - return devices.deleteDevice(hash) -} - -func (devices *DeviceSet) deactivatePool() error { - utils.Debugf("[devmapper] deactivatePool()") - defer utils.Debugf("[devmapper] deactivatePool END") - devname := devices.getPoolDevName() - devinfo, err := getInfo(devname) - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - if devinfo.Exists != 0 { - return removeDevice(devname) - } - - return nil -} - -func (devices *DeviceSet) deactivateDevice(hash string) error { - utils.Debugf("[devmapper] deactivateDevice(%s)", hash) - defer utils.Debugf("[devmapper] deactivateDevice END") - - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } - devinfo, err := getInfo(info.Name()) - if err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - if devinfo.Exists != 0 { - if err := devices.removeDeviceAndWait(info.Name()); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - } - - return nil -} - -// Issues the underlying dm remove operation and then waits -// for it to finish. -func (devices *DeviceSet) removeDeviceAndWait(devname string) error { - var err error - - for i := 0; i < 1000; i++ { - devices.sawBusy = false - err = removeDevice(devname) - if err == nil { - break - } - if !devices.sawBusy { - return err - } - - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(10 * time.Millisecond) - devices.Lock() - } - if err != nil { - return err - } - - if err := devices.waitRemove(devname); err != nil { - return err - } - return nil -} - -// waitRemove blocks until either: -// a) the device registered at - is removed, -// or b) the 1 second timeout expires. -func (devices *DeviceSet) waitRemove(devname string) error { - utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) - defer utils.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) - i := 0 - for ; i < 1000; i += 1 { - devinfo, err := getInfo(devname) - if err != nil { - // If there is an error we assume the device doesn't exist. - // The error might actually be something else, but we can't differentiate. - return nil - } - if i%100 == 0 { - utils.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists) - } - if devinfo.Exists == 0 { - break - } - - devices.Unlock() - time.Sleep(10 * time.Millisecond) - devices.Lock() - } - if i == 1000 { - return fmt.Errorf("Timeout while waiting for device %s to be removed", devname) - } - return nil -} - -// waitClose blocks until either: -// a) the device registered at - is closed, -// or b) the 1 second timeout expires. -func (devices *DeviceSet) waitClose(hash string) error { - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } - i := 0 - for ; i < 1000; i += 1 { - devinfo, err := getInfo(info.Name()) - if err != nil { - return err - } - if i%100 == 0 { - utils.Debugf("Waiting for unmount of %s: opencount=%d", hash, devinfo.OpenCount) - } - if devinfo.OpenCount == 0 { - break - } - time.Sleep(1 * time.Millisecond) - } - if i == 1000 { - return fmt.Errorf("Timeout while waiting for device %s to close", hash) - } - return nil -} - -func (devices *DeviceSet) Shutdown() error { - devices.Lock() - defer devices.Unlock() - - utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) - utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) - defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) - - for _, info := range devices.Devices { - info.lock.Lock() - if info.mountCount > 0 { - if err := sysUnmount(info.mountPath, 0); err != nil { - utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) - } - } - info.lock.Unlock() - } - - for _, d := range devices.Devices { - d.lock.Lock() - - if err := devices.waitClose(d.Hash); err != nil { - utils.Errorf("Warning: error waiting for device %s to unmount: %s\n", d.Hash, err) - } - if err := devices.deactivateDevice(d.Hash); err != nil { - utils.Debugf("Shutdown deactivate %s , error: %s\n", d.Hash, err) - } - - d.lock.Unlock() - } - - if err := devices.deactivatePool(); err != nil { - utils.Debugf("Shutdown deactivate pool , error: %s\n", err) - } - - return nil -} - -func (devices *DeviceSet) MountDevice(hash, path string) error { - devices.Lock() - defer devices.Unlock() - - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - if info.mountCount > 0 { - if path != info.mountPath { - return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) - } - - if info.floating { - // Steal floating ref - info.floating = false - } else { - info.mountCount++ - } - return nil - } - - if err := devices.activateDeviceIfNeeded(hash); err != nil { - return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) - } - - var flags uintptr = sysMsMgcVal - - err := sysMount(info.DevName(), path, "ext4", flags, "discard") - if err != nil && err == sysEInval { - err = sysMount(info.DevName(), path, "ext4", flags, "") - } - if err != nil { - return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) - } - - info.mountCount = 1 - info.mountPath = path - info.floating = false - - return devices.setInitialized(hash) -} - -func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { - utils.Debugf("[devmapper] UnmountDevice(hash=%s, mode=%d)", hash, mode) - defer utils.Debugf("[devmapper] UnmountDevice END") - devices.Lock() - defer devices.Unlock() - - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("UnmountDevice: no such device %s\n", hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - if mode == UnmountFloat { - if info.floating { - return fmt.Errorf("UnmountDevice: can't float floating reference %s\n", hash) - } - - // Leave this reference floating - info.floating = true - return nil - } - - if mode == UnmountSink { - if !info.floating { - // Someone already sunk this - return nil - } - // Otherwise, treat this as a regular unmount - } - - if info.mountCount == 0 { - return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash) - } - - info.mountCount-- - if info.mountCount > 0 { - return nil - } - - utils.Debugf("[devmapper] Unmount(%s)", info.mountPath) - if err := sysUnmount(info.mountPath, 0); err != nil { - utils.Debugf("\n--->Err: %s\n", err) - return err - } - utils.Debugf("[devmapper] Unmount done") - // Wait for the unmount to be effective, - // by watching the value of Info.OpenCount for the device - if err := devices.waitClose(hash); err != nil { - return err - } - - devices.deactivateDevice(hash) - - info.mountPath = "" - - return nil -} - -func (devices *DeviceSet) HasDevice(hash string) bool { - devices.Lock() - defer devices.Unlock() - - return devices.Devices[hash] != nil -} - -func (devices *DeviceSet) HasInitializedDevice(hash string) bool { - devices.Lock() - defer devices.Unlock() - - info := devices.Devices[hash] - return info != nil && info.Initialized -} - -func (devices *DeviceSet) HasActivatedDevice(hash string) bool { - devices.Lock() - defer devices.Unlock() - - info := devices.Devices[hash] - if info == nil { - return false - } - - info.lock.Lock() - defer info.lock.Unlock() - - devinfo, _ := getInfo(info.Name()) - return devinfo != nil && devinfo.Exists != 0 -} - -func (devices *DeviceSet) setInitialized(hash string) error { - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } - - info.Initialized = true - if err := devices.saveMetadata(); err != nil { - info.Initialized = false - utils.Debugf("\n--->Err: %s\n", err) - return err - } - - return nil -} - -func (devices *DeviceSet) List() []string { - devices.Lock() - defer devices.Unlock() - - ids := make([]string, len(devices.Devices)) - i := 0 - for k := range devices.Devices { - ids[i] = k - i++ - } - return ids -} - -func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { - var params string - _, sizeInSectors, _, params, err = getStatus(devName) - if err != nil { - return - } - if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { - return - } - return -} - -func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { - devices.Lock() - defer devices.Unlock() - - info := devices.Devices[hash] - if info == nil { - return nil, fmt.Errorf("No device %s", hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - status := &DevStatus{ - DeviceId: info.DeviceId, - Size: info.Size, - TransactionId: info.TransactionId, - } - - if err := devices.activateDeviceIfNeeded(hash); err != nil { - return nil, fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) - } - - if sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()); err != nil { - return nil, err - } else { - status.SizeInSectors = sizeInSectors - status.MappedSectors = mappedSectors - status.HighestMappedSector = highestMappedSector - } - - return status, nil -} - -func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionId, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { - var params string - if _, totalSizeInSectors, _, params, err = getStatus(devices.getPoolName()); err == nil { - _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionId, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) - } - return -} - -func (devices *DeviceSet) Status() *Status { - devices.Lock() - defer devices.Unlock() - - status := &Status{} - - status.PoolName = devices.getPoolName() - status.DataLoopback = path.Join(devices.loopbackDir(), "data") - status.MetadataLoopback = path.Join(devices.loopbackDir(), "metadata") - - totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() - if err == nil { - // Convert from blocks to bytes - blockSizeInSectors := totalSizeInSectors / dataTotal - - status.Data.Used = dataUsed * blockSizeInSectors * 512 - status.Data.Total = dataTotal * blockSizeInSectors * 512 - - // metadata blocks are always 4k - status.Metadata.Used = metadataUsed * 4096 - status.Metadata.Total = metadataTotal * 4096 - - status.SectorSize = blockSizeInSectors * 512 - } - - return status -} - -func NewDeviceSet(root string, doInit bool) (*DeviceSet, error) { - SetDevDir("/dev") - - devices := &DeviceSet{ - root: root, - MetaData: MetaData{Devices: make(map[string]*DevInfo)}, - } - - if err := devices.initDevmapper(doInit); err != nil { - return nil, err - } - - return devices, nil -} diff --git a/graphdriver/devmapper/devmapper.go b/graphdriver/devmapper/devmapper.go deleted file mode 100644 index 7317118dcf..0000000000 --- a/graphdriver/devmapper/devmapper.go +++ /dev/null @@ -1,595 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "errors" - "fmt" - "github.com/dotcloud/docker/utils" - "runtime" - "syscall" -) - -type DevmapperLogger interface { - log(level int, file string, line int, dmError int, message string) -} - -const ( - DeviceCreate TaskType = iota - DeviceReload - DeviceRemove - DeviceRemoveAll - DeviceSuspend - DeviceResume - DeviceInfo - DeviceDeps - DeviceRename - DeviceVersion - DeviceStatus - DeviceTable - DeviceWaitevent - DeviceList - DeviceClear - DeviceMknodes - DeviceListVersions - DeviceTargetMsg - DeviceSetGeometry -) - -const ( - AddNodeOnResume AddNodeType = iota - AddNodeOnCreate -) - -var ( - ErrTaskRun = errors.New("dm_task_run failed") - ErrTaskSetName = errors.New("dm_task_set_name failed") - ErrTaskSetMessage = errors.New("dm_task_set_message failed") - ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") - ErrTaskSetRo = errors.New("dm_task_set_ro failed") - ErrTaskAddTarget = errors.New("dm_task_add_target failed") - ErrTaskSetSector = errors.New("dm_task_set_sector failed") - ErrTaskGetInfo = errors.New("dm_task_get_info failed") - ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") - ErrNilCookie = errors.New("cookie ptr can't be nil") - ErrAttachLoopbackDevice = errors.New("loopback mounting failed") - ErrGetBlockSize = errors.New("Can't get block size") - ErrUdevWait = errors.New("wait on udev cookie failed") - ErrSetDevDir = errors.New("dm_set_dev_dir failed") - ErrGetLibraryVersion = errors.New("dm_get_library_version failed") - ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") - ErrRunRemoveDevice = errors.New("running removeDevice failed") - ErrInvalidAddNode = errors.New("Invalide AddNoce type") - ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") - ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") -) - -type ( - Task struct { - unmanaged *CDmTask - } - Info struct { - Exists int - Suspended int - LiveTable int - InactiveTable int - OpenCount int32 - EventNr uint32 - Major uint32 - Minor uint32 - ReadOnly int - TargetCount int32 - } - TaskType int - AddNodeType int -) - -func (t *Task) destroy() { - if t != nil { - DmTaskDestroy(t.unmanaged) - runtime.SetFinalizer(t, nil) - } -} - -func TaskCreate(tasktype TaskType) *Task { - Ctask := DmTaskCreate(int(tasktype)) - if Ctask == nil { - return nil - } - task := &Task{unmanaged: Ctask} - runtime.SetFinalizer(task, (*Task).destroy) - return task -} - -func (t *Task) Run() error { - if res := DmTaskRun(t.unmanaged); res != 1 { - return ErrTaskRun - } - return nil -} - -func (t *Task) SetName(name string) error { - if res := DmTaskSetName(t.unmanaged, name); res != 1 { - return ErrTaskSetName - } - return nil -} - -func (t *Task) SetMessage(message string) error { - if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { - return ErrTaskSetMessage - } - return nil -} - -func (t *Task) SetSector(sector uint64) error { - if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { - return ErrTaskSetSector - } - return nil -} - -func (t *Task) SetCookie(cookie *uint, flags uint16) error { - if cookie == nil { - return ErrNilCookie - } - if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { - return ErrTaskSetCookie - } - return nil -} - -func (t *Task) SetAddNode(addNode AddNodeType) error { - if addNode != AddNodeOnResume && addNode != AddNodeOnCreate { - return ErrInvalidAddNode - } - if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { - return ErrTaskSetAddNode - } - return nil -} - -func (t *Task) SetRo() error { - if res := DmTaskSetRo(t.unmanaged); res != 1 { - return ErrTaskSetRo - } - return nil -} - -func (t *Task) AddTarget(start, size uint64, ttype, params string) error { - if res := DmTaskAddTarget(t.unmanaged, start, size, - ttype, params); res != 1 { - return ErrTaskAddTarget - } - return nil -} - -func (t *Task) GetInfo() (*Info, error) { - info := &Info{} - if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { - return nil, ErrTaskGetInfo - } - return info, nil -} - -func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, - length uint64, targetType string, params string) { - - return DmGetNextTarget(t.unmanaged, next, &start, &length, - &targetType, ¶ms), - start, length, targetType, params -} - -func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) { - loopInfo, err := ioctlLoopGetStatus64(file.Fd()) - if err != nil { - utils.Errorf("Error get loopback backing file: %s\n", err) - return 0, 0, ErrGetLoopbackBackingFile - } - return loopInfo.loDevice, loopInfo.loInode, nil -} - -func LoopbackSetCapacity(file *osFile) error { - if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { - utils.Errorf("Error loopbackSetCapacity: %s", err) - return ErrLoopbackSetCapacity - } - return nil -} - -func FindLoopDeviceFor(file *osFile) *osFile { - stat, err := file.Stat() - if err != nil { - return nil - } - targetInode := stat.Sys().(*sysStatT).Ino - targetDevice := stat.Sys().(*sysStatT).Dev - - for i := 0; true; i++ { - path := fmt.Sprintf("/dev/loop%d", i) - - file, err := osOpenFile(path, osORdWr, 0) - if err != nil { - if osIsNotExist(err) { - return nil - } - - // Ignore all errors until the first not-exist - // we want to continue looking for the file - continue - } - - dev, inode, err := getLoopbackBackingFile(file) - if err == nil && dev == targetDevice && inode == targetInode { - return file - } - file.Close() - } - - return nil -} - -func UdevWait(cookie uint) error { - if res := DmUdevWait(cookie); res != 1 { - utils.Debugf("Failed to wait on udev cookie %d", cookie) - return ErrUdevWait - } - return nil -} - -func LogInitVerbose(level int) { - DmLogInitVerbose(level) -} - -var dmLogger DevmapperLogger = nil - -func logInit(logger DevmapperLogger) { - dmLogger = logger - LogWithErrnoInit() -} - -func SetDevDir(dir string) error { - if res := DmSetDevDir(dir); res != 1 { - utils.Debugf("Error dm_set_dev_dir") - return ErrSetDevDir - } - return nil -} - -func GetLibraryVersion() (string, error) { - var version string - if res := DmGetLibraryVersion(&version); res != 1 { - return "", ErrGetLibraryVersion - } - return version, nil -} - -// Useful helper for cleanup -func RemoveDevice(name string) error { - task := TaskCreate(DeviceRemove) - if task == nil { - return ErrCreateRemoveTask - } - if err := task.SetName(name); err != nil { - utils.Debugf("Can't set task name %s", name) - return err - } - if err := task.Run(); err != nil { - return ErrRunRemoveDevice - } - return nil -} - -func GetBlockDeviceSize(file *osFile) (uint64, error) { - size, err := ioctlBlkGetSize64(file.Fd()) - if err != nil { - utils.Errorf("Error getblockdevicesize: %s", err) - return 0, ErrGetBlockSize - } - return uint64(size), nil -} - -func BlockDeviceDiscard(path string) error { - file, err := osOpenFile(path, osORdWr, 0) - if err != nil { - return err - } - defer file.Close() - - size, err := GetBlockDeviceSize(file) - if err != nil { - return err - } - - if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { - return err - } - - // Without this sometimes the remove of the device that happens after - // discard fails with EBUSY. - syscall.Sync() - - return nil -} - -// This is the programmatic example of "dmsetup create" -func createPool(poolName string, dataFile, metadataFile *osFile) error { - task, err := createTask(DeviceCreate, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("Can't get data size") - } - - params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing" - if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("Can't add target") - } - - var cookie uint = 0 - if err := task.SetCookie(&cookie, 0); err != nil { - return fmt.Errorf("Can't set cookie") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceCreate (createPool)") - } - - UdevWait(cookie) - - return nil -} - -func reloadPool(poolName string, dataFile, metadataFile *osFile) error { - task, err := createTask(DeviceReload, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("Can't get data size") - } - - params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768" - if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("Can't add target") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceCreate") - } - - return nil -} - -func createTask(t TaskType, name string) (*Task, error) { - task := TaskCreate(t) - if task == nil { - return nil, fmt.Errorf("Can't create task of type %d", int(t)) - } - if err := task.SetName(name); err != nil { - return nil, fmt.Errorf("Can't set task name %s", name) - } - return task, nil -} - -func getInfo(name string) (*Info, error) { - task, err := createTask(DeviceInfo, name) - if task == nil { - return nil, err - } - if err := task.Run(); err != nil { - return nil, err - } - return task.GetInfo() -} - -func getStatus(name string) (uint64, uint64, string, string, error) { - task, err := createTask(DeviceStatus, name) - if task == nil { - utils.Debugf("getStatus: Error createTask: %s", err) - return 0, 0, "", "", err - } - if err := task.Run(); err != nil { - utils.Debugf("getStatus: Error Run: %s", err) - return 0, 0, "", "", err - } - - devinfo, err := task.GetInfo() - if err != nil { - utils.Debugf("getStatus: Error GetInfo: %s", err) - return 0, 0, "", "", err - } - if devinfo.Exists == 0 { - utils.Debugf("getStatus: Non existing device %s", name) - return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) - } - - _, start, length, targetType, params := task.GetNextTarget(0) - return start, length, targetType, params, nil -} - -func setTransactionId(poolName string, oldId uint64, newId uint64) error { - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.SetSector(0); err != nil { - return fmt.Errorf("Can't set sector") - } - - if err := task.SetMessage(fmt.Sprintf("set_transaction_id %d %d", oldId, newId)); err != nil { - return fmt.Errorf("Can't set message") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running setTransactionId") - } - return nil -} - -func suspendDevice(name string) error { - task, err := createTask(DeviceSuspend, name) - if task == nil { - return err - } - if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceSuspend: %s", err) - } - return nil -} - -func resumeDevice(name string) error { - task, err := createTask(DeviceResume, name) - if task == nil { - return err - } - - var cookie uint = 0 - if err := task.SetCookie(&cookie, 0); err != nil { - return fmt.Errorf("Can't set cookie") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceResume") - } - - UdevWait(cookie) - - return nil -} - -func createDevice(poolName string, deviceId int) error { - utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, deviceId) - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.SetSector(0); err != nil { - return fmt.Errorf("Can't set sector") - } - - if err := task.SetMessage(fmt.Sprintf("create_thin %d", deviceId)); err != nil { - return fmt.Errorf("Can't set message") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running createDevice") - } - return nil -} - -func deleteDevice(poolName string, deviceId int) error { - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.SetSector(0); err != nil { - return fmt.Errorf("Can't set sector") - } - - if err := task.SetMessage(fmt.Sprintf("delete %d", deviceId)); err != nil { - return fmt.Errorf("Can't set message") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running deleteDevice") - } - return nil -} - -func removeDevice(name string) error { - utils.Debugf("[devmapper] removeDevice START") - defer utils.Debugf("[devmapper] removeDevice END") - task, err := createTask(DeviceRemove, name) - if task == nil { - return err - } - if err = task.Run(); err != nil { - return fmt.Errorf("Error running removeDevice") - } - return nil -} - -func activateDevice(poolName string, name string, deviceId int, size uint64) error { - task, err := createTask(DeviceCreate, name) - if task == nil { - return err - } - - params := fmt.Sprintf("%s %d", poolName, deviceId) - if err := task.AddTarget(0, size/512, "thin", params); err != nil { - return fmt.Errorf("Can't add target") - } - if err := task.SetAddNode(AddNodeOnCreate); err != nil { - return fmt.Errorf("Can't add node") - } - - var cookie uint = 0 - if err := task.SetCookie(&cookie, 0); err != nil { - return fmt.Errorf("Can't set cookie") - } - - if err := task.Run(); err != nil { - return fmt.Errorf("Error running DeviceCreate (activateDevice)") - } - - UdevWait(cookie) - - return nil -} - -func (devices *DeviceSet) createSnapDevice(poolName string, deviceId int, baseName string, baseDeviceId int) error { - devinfo, _ := getInfo(baseName) - doSuspend := devinfo != nil && devinfo.Exists != 0 - - if doSuspend { - if err := suspendDevice(baseName); err != nil { - return err - } - } - - task, err := createTask(DeviceTargetMsg, poolName) - if task == nil { - if doSuspend { - resumeDevice(baseName) - } - return err - } - - if err := task.SetSector(0); err != nil { - if doSuspend { - resumeDevice(baseName) - } - return fmt.Errorf("Can't set sector") - } - - if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", deviceId, baseDeviceId)); err != nil { - if doSuspend { - resumeDevice(baseName) - } - return fmt.Errorf("Can't set message") - } - - if err := task.Run(); err != nil { - if doSuspend { - resumeDevice(baseName) - } - return fmt.Errorf("Error running DeviceCreate (createSnapDevice)") - } - - if doSuspend { - if err := resumeDevice(baseName); err != nil { - return err - } - } - - return nil -} diff --git a/graphdriver/devmapper/devmapper_doc.go b/graphdriver/devmapper/devmapper_doc.go deleted file mode 100644 index c1c3e3891b..0000000000 --- a/graphdriver/devmapper/devmapper_doc.go +++ /dev/null @@ -1,106 +0,0 @@ -package devmapper - -// Definition of struct dm_task and sub structures (from lvm2) -// -// struct dm_ioctl { -// /* -// * The version number is made up of three parts: -// * major - no backward or forward compatibility, -// * minor - only backwards compatible, -// * patch - both backwards and forwards compatible. -// * -// * All clients of the ioctl interface should fill in the -// * version number of the interface that they were -// * compiled with. -// * -// * All recognised ioctl commands (ie. those that don't -// * return -ENOTTY) fill out this field, even if the -// * command failed. -// */ -// uint32_t version[3]; /* in/out */ -// uint32_t data_size; /* total size of data passed in -// * including this struct */ - -// uint32_t data_start; /* offset to start of data -// * relative to start of this struct */ - -// uint32_t target_count; /* in/out */ -// int32_t open_count; /* out */ -// uint32_t flags; /* in/out */ - -// /* -// * event_nr holds either the event number (input and output) or the -// * udev cookie value (input only). -// * The DM_DEV_WAIT ioctl takes an event number as input. -// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls -// * use the field as a cookie to return in the DM_COOKIE -// * variable with the uevents they issue. -// * For output, the ioctls return the event number, not the cookie. -// */ -// uint32_t event_nr; /* in/out */ -// uint32_t padding; - -// uint64_t dev; /* in/out */ - -// char name[DM_NAME_LEN]; /* device name */ -// char uuid[DM_UUID_LEN]; /* unique identifier for -// * the block device */ -// char data[7]; /* padding or data */ -// }; - -// struct target { -// uint64_t start; -// uint64_t length; -// char *type; -// char *params; - -// struct target *next; -// }; - -// typedef enum { -// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ -// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ -// } dm_add_node_t; - -// struct dm_task { -// int type; -// char *dev_name; -// char *mangled_dev_name; - -// struct target *head, *tail; - -// int read_only; -// uint32_t event_nr; -// int major; -// int minor; -// int allow_default_major_fallback; -// uid_t uid; -// gid_t gid; -// mode_t mode; -// uint32_t read_ahead; -// uint32_t read_ahead_flags; -// union { -// struct dm_ioctl *v4; -// } dmi; -// char *newname; -// char *message; -// char *geometry; -// uint64_t sector; -// int no_flush; -// int no_open_count; -// int skip_lockfs; -// int query_inactive_table; -// int suppress_identical_reload; -// dm_add_node_t add_node; -// uint64_t existing_table_size; -// int cookie_set; -// int new_uuid; -// int secure_data; -// int retry_remove; -// int enable_checks; -// int expected_errno; - -// char *uuid; -// char *mangled_uuid; -// }; -// diff --git a/graphdriver/devmapper/devmapper_log.go b/graphdriver/devmapper/devmapper_log.go deleted file mode 100644 index 18dde7cca5..0000000000 --- a/graphdriver/devmapper/devmapper_log.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import "C" - -// Due to the way cgo works this has to be in a separate file, as devmapper.go has -// definitions in the cgo block, which is incompatible with using "//export" - -//export DevmapperLogCallback -func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_class C.int, message *C.char) { - if dmLogger != nil { - dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), C.GoString(message)) - } -} diff --git a/graphdriver/devmapper/devmapper_test.go b/graphdriver/devmapper/devmapper_test.go deleted file mode 100644 index 3ffa163ceb..0000000000 --- a/graphdriver/devmapper/devmapper_test.go +++ /dev/null @@ -1,287 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "testing" -) - -func TestTaskCreate(t *testing.T) { - t.Skip("FIXME: not a unit test") - // Test success - taskCreate(t, DeviceInfo) - - // Test Failure - DmTaskCreate = dmTaskCreateFail - defer func() { DmTaskCreate = dmTaskCreateFct }() - if task := TaskCreate(-1); task != nil { - t.Fatalf("An error should have occured while creating an invalid task.") - } -} - -func TestTaskRun(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - // Perform the RUN - if err := task.Run(); err != nil { - t.Fatal(err) - } - // Make sure we don't have error with GetInfo - if _, err := task.GetInfo(); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskRun = dmTaskRunFail - defer func() { DmTaskRun = dmTaskRunFct }() - - task = taskCreate(t, DeviceInfo) - // Perform the RUN - if err := task.Run(); err != ErrTaskRun { - t.Fatalf("An error should have occured while running task.") - } - // Make sure GetInfo also fails - if _, err := task.GetInfo(); err != ErrTaskGetInfo { - t.Fatalf("GetInfo should fail if task.Run() failed.") - } -} - -func TestTaskSetName(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetName("test"); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskSetName = dmTaskSetNameFail - defer func() { DmTaskSetName = dmTaskSetNameFct }() - - if err := task.SetName("test"); err != ErrTaskSetName { - t.Fatalf("An error should have occured while runnign SetName.") - } -} - -func TestTaskSetMessage(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetMessage("test"); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskSetMessage = dmTaskSetMessageFail - defer func() { DmTaskSetMessage = dmTaskSetMessageFct }() - - if err := task.SetMessage("test"); err != ErrTaskSetMessage { - t.Fatalf("An error should have occured while runnign SetMessage.") - } -} - -func TestTaskSetSector(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetSector(128); err != nil { - t.Fatal(err) - } - - DmTaskSetSector = dmTaskSetSectorFail - defer func() { DmTaskSetSector = dmTaskSetSectorFct }() - - // Test failure - if err := task.SetSector(0); err != ErrTaskSetSector { - t.Fatalf("An error should have occured while running SetSector.") - } -} - -func TestTaskSetCookie(t *testing.T) { - t.Skip("FIXME: not a unit test") - var ( - cookie uint = 0 - task = taskCreate(t, DeviceInfo) - ) - - // Test success - if err := task.SetCookie(&cookie, 0); err != nil { - t.Fatal(err) - } - - // Test failure - if err := task.SetCookie(nil, 0); err != ErrNilCookie { - t.Fatalf("An error should have occured while running SetCookie with nil cookie.") - } - - DmTaskSetCookie = dmTaskSetCookieFail - defer func() { DmTaskSetCookie = dmTaskSetCookieFct }() - - if err := task.SetCookie(&cookie, 0); err != ErrTaskSetCookie { - t.Fatalf("An error should have occured while running SetCookie.") - } -} - -func TestTaskSetAddNode(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetAddNode(0); err != nil { - t.Fatal(err) - } - - // Test failure - if err := task.SetAddNode(-1); err != ErrInvalidAddNode { - t.Fatalf("An error should have occured running SetAddNode with wrong node.") - } - - DmTaskSetAddNode = dmTaskSetAddNodeFail - defer func() { DmTaskSetAddNode = dmTaskSetAddNodeFct }() - - if err := task.SetAddNode(0); err != ErrTaskSetAddNode { - t.Fatalf("An error should have occured running SetAddNode.") - } -} - -func TestTaskSetRo(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.SetRo(); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskSetRo = dmTaskSetRoFail - defer func() { DmTaskSetRo = dmTaskSetRoFct }() - - if err := task.SetRo(); err != ErrTaskSetRo { - t.Fatalf("An error should have occured running SetRo.") - } -} - -func TestTaskAddTarget(t *testing.T) { - t.Skip("FIXME: not a unit test") - task := taskCreate(t, DeviceInfo) - - // Test success - if err := task.AddTarget(0, 128, "thinp", ""); err != nil { - t.Fatal(err) - } - - // Test failure - DmTaskAddTarget = dmTaskAddTargetFail - defer func() { DmTaskAddTarget = dmTaskAddTargetFct }() - - if err := task.AddTarget(0, 128, "thinp", ""); err != ErrTaskAddTarget { - t.Fatalf("An error should have occured running AddTarget.") - } -} - -// func TestTaskGetInfo(t *testing.T) { -// task := taskCreate(t, DeviceInfo) - -// // Test success -// if _, err := task.GetInfo(); err != nil { -// t.Fatal(err) -// } - -// // Test failure -// DmTaskGetInfo = dmTaskGetInfoFail -// defer func() { DmTaskGetInfo = dmTaskGetInfoFct }() - -// if _, err := task.GetInfo(); err != ErrTaskGetInfo { -// t.Fatalf("An error should have occured running GetInfo.") -// } -// } - -// func TestTaskGetNextTarget(t *testing.T) { -// task := taskCreate(t, DeviceInfo) - -// if next, _, _, _, _ := task.GetNextTarget(0); next == 0 { -// t.Fatalf("The next target should not be 0.") -// } -// } - -/// Utils -func taskCreate(t *testing.T, taskType TaskType) *Task { - task := TaskCreate(taskType) - if task == nil { - t.Fatalf("Error creating task") - } - return task -} - -/// Failure function replacement -func dmTaskCreateFail(t int) *CDmTask { - return nil -} - -func dmTaskRunFail(task *CDmTask) int { - return -1 -} - -func dmTaskSetNameFail(task *CDmTask, name string) int { - return -1 -} - -func dmTaskSetMessageFail(task *CDmTask, message string) int { - return -1 -} - -func dmTaskSetSectorFail(task *CDmTask, sector uint64) int { - return -1 -} - -func dmTaskSetCookieFail(task *CDmTask, cookie *uint, flags uint16) int { - return -1 -} - -func dmTaskSetAddNodeFail(task *CDmTask, addNode AddNodeType) int { - return -1 -} - -func dmTaskSetRoFail(task *CDmTask) int { - return -1 -} - -func dmTaskAddTargetFail(task *CDmTask, - start, size uint64, ttype, params string) int { - return -1 -} - -func dmTaskGetInfoFail(task *CDmTask, info *Info) int { - return -1 -} - -func dmGetNextTargetFail(task *CDmTask, next uintptr, start, length *uint64, - target, params *string) uintptr { - return 0 -} - -func dmAttachLoopDeviceFail(filename string, fd *int) string { - return "" -} - -func sysGetBlockSizeFail(fd uintptr, size *uint64) sysErrno { - return 1 -} - -func dmUdevWaitFail(cookie uint) int { - return -1 -} - -func dmSetDevDirFail(dir string) int { - return -1 -} - -func dmGetLibraryVersionFail(version *string) int { - return -1 -} diff --git a/graphdriver/devmapper/devmapper_wrapper.go b/graphdriver/devmapper/devmapper_wrapper.go deleted file mode 100644 index bf558affc8..0000000000 --- a/graphdriver/devmapper/devmapper_wrapper.go +++ /dev/null @@ -1,229 +0,0 @@ -// +build linux,amd64 - -package devmapper - -/* -#cgo LDFLAGS: -L. -ldevmapper -#include -#include // FIXME: present only for defines, maybe we can remove it? -#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? - -#ifndef LOOP_CTL_GET_FREE - #define LOOP_CTL_GET_FREE 0x4C82 -#endif - -#ifndef LO_FLAGS_PARTSCAN - #define LO_FLAGS_PARTSCAN 8 -#endif - -// FIXME: Can't we find a way to do the logging in pure Go? -extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); - -static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) -{ - char buffer[256]; - va_list ap; - - va_start(ap, f); - vsnprintf(buffer, 256, f, ap); - va_end(ap); - - DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); -} - -static void log_with_errno_init() -{ - dm_log_with_errno_init(log_cb); -} -*/ -import "C" - -import ( - "unsafe" -) - -type ( - CDmTask C.struct_dm_task - - CLoopInfo64 C.struct_loop_info64 - LoopInfo64 struct { - loDevice uint64 /* ioctl r/o */ - loInode uint64 /* ioctl r/o */ - loRdevice uint64 /* ioctl r/o */ - loOffset uint64 - loSizelimit uint64 /* bytes, 0 == max available */ - loNumber uint32 /* ioctl r/o */ - loEncrypt_type uint32 - loEncrypt_key_size uint32 /* ioctl w/o */ - loFlags uint32 /* ioctl r/o */ - loFileName [LoNameSize]uint8 - loCryptName [LoNameSize]uint8 - loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ - loInit [2]uint64 - } -) - -// IOCTL consts -const ( - BlkGetSize64 = C.BLKGETSIZE64 - BlkDiscard = C.BLKDISCARD - - LoopSetFd = C.LOOP_SET_FD - LoopCtlGetFree = C.LOOP_CTL_GET_FREE - LoopGetStatus64 = C.LOOP_GET_STATUS64 - LoopSetStatus64 = C.LOOP_SET_STATUS64 - LoopClrFd = C.LOOP_CLR_FD - LoopSetCapacity = C.LOOP_SET_CAPACITY -) - -const ( - LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR - LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY - LoFlagsPartScan = C.LO_FLAGS_PARTSCAN - LoKeySize = C.LO_KEY_SIZE - LoNameSize = C.LO_NAME_SIZE -) - -var ( - DmGetLibraryVersion = dmGetLibraryVersionFct - DmGetNextTarget = dmGetNextTargetFct - DmLogInitVerbose = dmLogInitVerboseFct - DmSetDevDir = dmSetDevDirFct - DmTaskAddTarget = dmTaskAddTargetFct - DmTaskCreate = dmTaskCreateFct - DmTaskDestroy = dmTaskDestroyFct - DmTaskGetInfo = dmTaskGetInfoFct - DmTaskRun = dmTaskRunFct - DmTaskSetAddNode = dmTaskSetAddNodeFct - DmTaskSetCookie = dmTaskSetCookieFct - DmTaskSetMessage = dmTaskSetMessageFct - DmTaskSetName = dmTaskSetNameFct - DmTaskSetRo = dmTaskSetRoFct - DmTaskSetSector = dmTaskSetSectorFct - DmUdevWait = dmUdevWaitFct - LogWithErrnoInit = logWithErrnoInitFct -) - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func dmTaskDestroyFct(task *CDmTask) { - C.dm_task_destroy((*C.struct_dm_task)(task)) -} - -func dmTaskCreateFct(taskType int) *CDmTask { - return (*CDmTask)(C.dm_task_create(C.int(taskType))) -} - -func dmTaskRunFct(task *CDmTask) int { - ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) - return int(ret) -} - -func dmTaskSetNameFct(task *CDmTask, name string) int { - Cname := C.CString(name) - defer free(Cname) - - return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) -} - -func dmTaskSetMessageFct(task *CDmTask, message string) int { - Cmessage := C.CString(message) - defer free(Cmessage) - - return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) -} - -func dmTaskSetSectorFct(task *CDmTask, sector uint64) int { - return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) -} - -func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int { - cCookie := C.uint32_t(*cookie) - defer func() { - *cookie = uint(cCookie) - }() - return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) -} - -func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int { - return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) -} - -func dmTaskSetRoFct(task *CDmTask) int { - return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) -} - -func dmTaskAddTargetFct(task *CDmTask, - start, size uint64, ttype, params string) int { - - Cttype := C.CString(ttype) - defer free(Cttype) - - Cparams := C.CString(params) - defer free(Cparams) - - return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) -} - -func dmTaskGetInfoFct(task *CDmTask, info *Info) int { - Cinfo := C.struct_dm_info{} - defer func() { - info.Exists = int(Cinfo.exists) - info.Suspended = int(Cinfo.suspended) - info.LiveTable = int(Cinfo.live_table) - info.InactiveTable = int(Cinfo.inactive_table) - info.OpenCount = int32(Cinfo.open_count) - info.EventNr = uint32(Cinfo.event_nr) - info.Major = uint32(Cinfo.major) - info.Minor = uint32(Cinfo.minor) - info.ReadOnly = int(Cinfo.read_only) - info.TargetCount = int32(Cinfo.target_count) - }() - return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) -} - -func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { - var ( - Cstart, Clength C.uint64_t - CtargetType, Cparams *C.char - ) - defer func() { - *start = uint64(Cstart) - *length = uint64(Clength) - *target = C.GoString(CtargetType) - *params = C.GoString(Cparams) - }() - - nextp := C.dm_get_next_target((*C.struct_dm_task)(task), unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams) - return uintptr(nextp) -} - -func dmUdevWaitFct(cookie uint) int { - return int(C.dm_udev_wait(C.uint32_t(cookie))) -} - -func dmLogInitVerboseFct(level int) { - C.dm_log_init_verbose(C.int(level)) -} - -func logWithErrnoInitFct() { - C.log_with_errno_init() -} - -func dmSetDevDirFct(dir string) int { - Cdir := C.CString(dir) - defer free(Cdir) - - return int(C.dm_set_dev_dir(Cdir)) -} - -func dmGetLibraryVersionFct(version *string) int { - buffer := C.CString(string(make([]byte, 128))) - defer free(buffer) - defer func() { - *version = C.GoString(buffer) - }() - return int(C.dm_get_library_version(buffer, 128)) -} diff --git a/graphdriver/devmapper/driver.go b/graphdriver/devmapper/driver.go deleted file mode 100644 index 8c5a19eea0..0000000000 --- a/graphdriver/devmapper/driver.go +++ /dev/null @@ -1,143 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "fmt" - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "os" - "path" -) - -func init() { - graphdriver.Register("devicemapper", Init) -} - -// Placeholder interfaces, to be replaced -// at integration. - -// End of placeholder interfaces. - -type Driver struct { - *DeviceSet - home string -} - -var Init = func(home string) (graphdriver.Driver, error) { - deviceSet, err := NewDeviceSet(home, true) - if err != nil { - return nil, err - } - d := &Driver{ - DeviceSet: deviceSet, - home: home, - } - return d, nil -} - -func (d *Driver) String() string { - return "devicemapper" -} - -func (d *Driver) Status() [][2]string { - s := d.DeviceSet.Status() - - status := [][2]string{ - {"Pool Name", s.PoolName}, - {"Data file", s.DataLoopback}, - {"Metadata file", s.MetadataLoopback}, - {"Data Space Used", fmt.Sprintf("%.1f Mb", float64(s.Data.Used)/(1024*1024))}, - {"Data Space Total", fmt.Sprintf("%.1f Mb", float64(s.Data.Total)/(1024*1024))}, - {"Metadata Space Used", fmt.Sprintf("%.1f Mb", float64(s.Metadata.Used)/(1024*1024))}, - {"Metadata Space Total", fmt.Sprintf("%.1f Mb", float64(s.Metadata.Total)/(1024*1024))}, - } - return status -} - -func (d *Driver) Cleanup() error { - return d.DeviceSet.Shutdown() -} - -func (d *Driver) Create(id, parent string) error { - if err := d.DeviceSet.AddDevice(id, parent); err != nil { - return err - } - - mp := path.Join(d.home, "mnt", id) - if err := d.mount(id, mp); err != nil { - return err - } - - if err := osMkdirAll(path.Join(mp, "rootfs"), 0755); err != nil && !osIsExist(err) { - return err - } - - // Create an "id" file with the container/image id in it to help reconscruct this in case - // of later problems - if err := ioutil.WriteFile(path.Join(mp, "id"), []byte(id), 0600); err != nil { - return err - } - - // We float this reference so that the next Get call can - // steal it, so we don't have to unmount - if err := d.DeviceSet.UnmountDevice(id, UnmountFloat); err != nil { - return err - } - - return nil -} - -func (d *Driver) Remove(id string) error { - if !d.DeviceSet.HasDevice(id) { - // Consider removing a non-existing device a no-op - // This is useful to be able to progress on container removal - // if the underlying device has gone away due to earlier errors - return nil - } - - // Sink the float from create in case no Get() call was made - if err := d.DeviceSet.UnmountDevice(id, UnmountSink); err != nil { - return err - } - // This assumes the device has been properly Get/Put:ed and thus is unmounted - if err := d.DeviceSet.DeleteDevice(id); err != nil { - return err - } - - mp := path.Join(d.home, "mnt", id) - if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { - return err - } - - return nil -} - -func (d *Driver) Get(id string) (string, error) { - mp := path.Join(d.home, "mnt", id) - if err := d.mount(id, mp); err != nil { - return "", err - } - - return path.Join(mp, "rootfs"), nil -} - -func (d *Driver) Put(id string) { - if err := d.DeviceSet.UnmountDevice(id, UnmountRegular); err != nil { - utils.Errorf("Warning: error unmounting device %s: %s\n", id, err) - } -} - -func (d *Driver) mount(id, mountPoint string) error { - // Create the target directories if they don't exist - if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) { - return err - } - // Mount the device - return d.DeviceSet.MountDevice(id, mountPoint) -} - -func (d *Driver) Exists(id string) bool { - return d.Devices[id] != nil -} diff --git a/graphdriver/devmapper/driver_test.go b/graphdriver/devmapper/driver_test.go deleted file mode 100644 index 68699f208e..0000000000 --- a/graphdriver/devmapper/driver_test.go +++ /dev/null @@ -1,886 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "fmt" - "github.com/dotcloud/docker/graphdriver" - "io/ioutil" - "path" - "runtime" - "strings" - "syscall" - "testing" -) - -func init() { - // Reduce the size the the base fs and loopback for the tests - DefaultDataLoopbackSize = 300 * 1024 * 1024 - DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 - DefaultBaseFsSize = 300 * 1024 * 1024 -} - -// denyAllDevmapper mocks all calls to libdevmapper in the unit tests, and denies them by default -func denyAllDevmapper() { - // Hijack all calls to libdevmapper with default panics. - // Authorized calls are selectively hijacked in each tests. - DmTaskCreate = func(t int) *CDmTask { - panic("DmTaskCreate: this method should not be called here") - } - DmTaskRun = func(task *CDmTask) int { - panic("DmTaskRun: this method should not be called here") - } - DmTaskSetName = func(task *CDmTask, name string) int { - panic("DmTaskSetName: this method should not be called here") - } - DmTaskSetMessage = func(task *CDmTask, message string) int { - panic("DmTaskSetMessage: this method should not be called here") - } - DmTaskSetSector = func(task *CDmTask, sector uint64) int { - panic("DmTaskSetSector: this method should not be called here") - } - DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { - panic("DmTaskSetCookie: this method should not be called here") - } - DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { - panic("DmTaskSetAddNode: this method should not be called here") - } - DmTaskSetRo = func(task *CDmTask) int { - panic("DmTaskSetRo: this method should not be called here") - } - DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { - panic("DmTaskAddTarget: this method should not be called here") - } - DmTaskGetInfo = func(task *CDmTask, info *Info) int { - panic("DmTaskGetInfo: this method should not be called here") - } - DmGetNextTarget = func(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { - panic("DmGetNextTarget: this method should not be called here") - } - DmUdevWait = func(cookie uint) int { - panic("DmUdevWait: this method should not be called here") - } - DmSetDevDir = func(dir string) int { - panic("DmSetDevDir: this method should not be called here") - } - DmGetLibraryVersion = func(version *string) int { - panic("DmGetLibraryVersion: this method should not be called here") - } - DmLogInitVerbose = func(level int) { - panic("DmLogInitVerbose: this method should not be called here") - } - DmTaskDestroy = func(task *CDmTask) { - panic("DmTaskDestroy: this method should not be called here") - } - LogWithErrnoInit = func() { - panic("LogWithErrnoInit: this method should not be called here") - } -} - -func denyAllSyscall() { - sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { - panic("sysMount: this method should not be called here") - } - sysUnmount = func(target string, flags int) (err error) { - panic("sysUnmount: this method should not be called here") - } - sysCloseOnExec = func(fd int) { - panic("sysCloseOnExec: this method should not be called here") - } - sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - panic("sysSyscall: this method should not be called here") - } - // Not a syscall, but forbidding it here anyway - Mounted = func(mnt string) (bool, error) { - panic("devmapper.Mounted: this method should not be called here") - } - // osOpenFile = os.OpenFile - // osNewFile = os.NewFile - // osCreate = os.Create - // osStat = os.Stat - // osIsNotExist = os.IsNotExist - // osIsExist = os.IsExist - // osMkdirAll = os.MkdirAll - // osRemoveAll = os.RemoveAll - // osRename = os.Rename - // osReadlink = os.Readlink - - // execRun = func(name string, args ...string) error { - // return exec.Command(name, args...).Run() - // } -} - -func mkTestDirectory(t *testing.T) string { - dir, err := ioutil.TempDir("", "docker-test-devmapper-") - if err != nil { - t.Fatal(err) - } - return dir -} - -func newDriver(t *testing.T) *Driver { - home := mkTestDirectory(t) - d, err := Init(home) - if err != nil { - t.Fatal(err) - } - return d.(*Driver) -} - -func cleanup(d *Driver) { - d.Cleanup() - osRemoveAll(d.home) -} - -type Set map[string]bool - -func (r Set) Assert(t *testing.T, names ...string) { - for _, key := range names { - required := true - if strings.HasPrefix(key, "?") { - key = key[1:] - required = false - } - if _, exists := r[key]; !exists && required { - t.Fatalf("Key not set: %s", key) - } - delete(r, key) - } - if len(r) != 0 { - t.Fatalf("Unexpected keys: %v", r) - } -} - -func TestInit(t *testing.T) { - var ( - calls = make(Set) - taskMessages = make(Set) - taskTypes = make(Set) - home = mkTestDirectory(t) - ) - defer osRemoveAll(home) - - func() { - denyAllDevmapper() - DmSetDevDir = func(dir string) int { - calls["DmSetDevDir"] = true - expectedDir := "/dev" - if dir != expectedDir { - t.Fatalf("Wrong libdevmapper call\nExpected: DmSetDevDir(%v)\nReceived: DmSetDevDir(%v)\n", expectedDir, dir) - } - return 0 - } - LogWithErrnoInit = func() { - calls["DmLogWithErrnoInit"] = true - } - var task1 CDmTask - DmTaskCreate = func(taskType int) *CDmTask { - calls["DmTaskCreate"] = true - taskTypes[fmt.Sprintf("%d", taskType)] = true - return &task1 - } - DmTaskSetName = func(task *CDmTask, name string) int { - calls["DmTaskSetName"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", expectedTask, task) - } - // FIXME: use Set.AssertRegexp() - if !strings.HasPrefix(name, "docker-") && !strings.HasPrefix(name, "/dev/mapper/docker-") || - !strings.HasSuffix(name, "-pool") && !strings.HasSuffix(name, "-base") { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", "docker-...-pool", name) - } - return 1 - } - DmTaskRun = func(task *CDmTask) int { - calls["DmTaskRun"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskRun(%v)\nReceived: DmTaskRun(%v)\n", expectedTask, task) - } - return 1 - } - DmTaskGetInfo = func(task *CDmTask, info *Info) int { - calls["DmTaskGetInfo"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskGetInfo(%v)\nReceived: DmTaskGetInfo(%v)\n", expectedTask, task) - } - // This will crash if info is not dereferenceable - info.Exists = 0 - return 1 - } - DmTaskSetSector = func(task *CDmTask, sector uint64) int { - calls["DmTaskSetSector"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task) - } - if expectedSector := uint64(0); sector != expectedSector { - t.Fatalf("Wrong libdevmapper call to DmTaskSetSector\nExpected: %v\nReceived: %v\n", expectedSector, sector) - } - return 1 - } - DmTaskSetMessage = func(task *CDmTask, message string) int { - calls["DmTaskSetMessage"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task) - } - taskMessages[message] = true - return 1 - } - DmTaskDestroy = func(task *CDmTask) { - calls["DmTaskDestroy"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) - } - } - DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { - calls["DmTaskSetTarget"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) - } - if start != 0 { - t.Fatalf("Wrong start: %d != %d", start, 0) - } - if ttype != "thin" && ttype != "thin-pool" { - t.Fatalf("Wrong ttype: %s", ttype) - } - // Quick smoke test - if params == "" { - t.Fatalf("Params should not be empty") - } - return 1 - } - fakeCookie := uint(4321) - DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { - calls["DmTaskSetCookie"] = true - expectedTask := &task1 - if task != expectedTask { - t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) - } - if flags != 0 { - t.Fatalf("Cookie flags should be 0 (not %x)", flags) - } - *cookie = fakeCookie - return 1 - } - DmUdevWait = func(cookie uint) int { - calls["DmUdevWait"] = true - if cookie != fakeCookie { - t.Fatalf("Wrong cookie: %d != %d", cookie, fakeCookie) - } - return 1 - } - DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { - if addNode != AddNodeOnCreate { - t.Fatalf("Wrong AddNoteType: %v (expected %v)", addNode, AddNodeOnCreate) - } - calls["DmTaskSetAddNode"] = true - return 1 - } - execRun = func(name string, args ...string) error { - calls["execRun"] = true - if name != "mkfs.ext4" { - t.Fatalf("Expected %s to be executed, not %s", "mkfs.ext4", name) - } - return nil - } - driver, err := Init(home) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := driver.Cleanup(); err != nil { - t.Fatal(err) - } - }() - }() - // Put all tests in a function to make sure the garbage collection will - // occur. - - // Call GC to cleanup runtime.Finalizers - runtime.GC() - - calls.Assert(t, - "DmSetDevDir", - "DmLogWithErrnoInit", - "DmTaskSetName", - "DmTaskRun", - "DmTaskGetInfo", - "DmTaskDestroy", - "execRun", - "DmTaskCreate", - "DmTaskSetTarget", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetSector", - "DmTaskSetMessage", - "DmTaskSetAddNode", - ) - taskTypes.Assert(t, "0", "6", "17") - taskMessages.Assert(t, "create_thin 0", "set_transaction_id 0 1") -} - -func fakeInit() func(home string) (graphdriver.Driver, error) { - oldInit := Init - Init = func(home string) (graphdriver.Driver, error) { - return &Driver{ - home: home, - }, nil - } - return oldInit -} - -func restoreInit(init func(home string) (graphdriver.Driver, error)) { - Init = init -} - -func mockAllDevmapper(calls Set) { - DmSetDevDir = func(dir string) int { - calls["DmSetDevDir"] = true - return 0 - } - LogWithErrnoInit = func() { - calls["DmLogWithErrnoInit"] = true - } - DmTaskCreate = func(taskType int) *CDmTask { - calls["DmTaskCreate"] = true - return &CDmTask{} - } - DmTaskSetName = func(task *CDmTask, name string) int { - calls["DmTaskSetName"] = true - return 1 - } - DmTaskRun = func(task *CDmTask) int { - calls["DmTaskRun"] = true - return 1 - } - DmTaskGetInfo = func(task *CDmTask, info *Info) int { - calls["DmTaskGetInfo"] = true - return 1 - } - DmTaskSetSector = func(task *CDmTask, sector uint64) int { - calls["DmTaskSetSector"] = true - return 1 - } - DmTaskSetMessage = func(task *CDmTask, message string) int { - calls["DmTaskSetMessage"] = true - return 1 - } - DmTaskDestroy = func(task *CDmTask) { - calls["DmTaskDestroy"] = true - } - DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { - calls["DmTaskSetTarget"] = true - return 1 - } - DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { - calls["DmTaskSetCookie"] = true - return 1 - } - DmUdevWait = func(cookie uint) int { - calls["DmUdevWait"] = true - return 1 - } - DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { - calls["DmTaskSetAddNode"] = true - return 1 - } - execRun = func(name string, args ...string) error { - calls["execRun"] = true - return nil - } -} - -func TestDriverName(t *testing.T) { - denyAllDevmapper() - defer denyAllDevmapper() - - oldInit := fakeInit() - defer restoreInit(oldInit) - - d := newDriver(t) - if d.String() != "devicemapper" { - t.Fatalf("Expected driver name to be devicemapper got %s", d.String()) - } -} - -func TestDriverCreate(t *testing.T) { - denyAllDevmapper() - denyAllSyscall() - defer denyAllSyscall() - defer denyAllDevmapper() - - calls := make(Set) - mockAllDevmapper(calls) - - sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { - calls["sysMount"] = true - // FIXME: compare the exact source and target strings (inodes + devname) - if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source) - } - if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) - } - if expectedFstype := "ext4"; fstype != expectedFstype { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype) - } - if expectedFlags := uintptr(3236757504); flags != expectedFlags { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) - } - return nil - } - - Mounted = func(mnt string) (bool, error) { - calls["Mounted"] = true - if !strings.HasPrefix(mnt, "/tmp/docker-test-devmapper-") || !strings.HasSuffix(mnt, "/mnt/1") { - t.Fatalf("Wrong mounted call\nExpected: Mounted(%v)\nReceived: Mounted(%v)\n", "/tmp/docker-test-devmapper-.../mnt/1", mnt) - } - return false, nil - } - - sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - calls["sysSyscall"] = true - if trap != sysSysIoctl { - t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap) - } - switch a2 { - case LoopSetFd: - calls["ioctl.loopsetfd"] = true - case LoopCtlGetFree: - calls["ioctl.loopctlgetfree"] = true - case LoopGetStatus64: - calls["ioctl.loopgetstatus"] = true - case LoopSetStatus64: - calls["ioctl.loopsetstatus"] = true - case LoopClrFd: - calls["ioctl.loopclrfd"] = true - case LoopSetCapacity: - calls["ioctl.loopsetcapacity"] = true - case BlkGetSize64: - calls["ioctl.blkgetsize"] = true - default: - t.Fatalf("Unexpected IOCTL. Received %d", a2) - } - return 0, 0, 0 - } - - func() { - d := newDriver(t) - - calls.Assert(t, - "DmSetDevDir", - "DmLogWithErrnoInit", - "DmTaskSetName", - "DmTaskRun", - "DmTaskGetInfo", - "execRun", - "DmTaskCreate", - "DmTaskSetTarget", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetSector", - "DmTaskSetMessage", - "DmTaskSetAddNode", - "sysSyscall", - "ioctl.blkgetsize", - "ioctl.loopsetfd", - "ioctl.loopsetstatus", - "?ioctl.loopctlgetfree", - ) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - calls.Assert(t, - "DmTaskCreate", - "DmTaskGetInfo", - "sysMount", - "DmTaskRun", - "DmTaskSetTarget", - "DmTaskSetSector", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetName", - "DmTaskSetMessage", - "DmTaskSetAddNode", - ) - - }() - - runtime.GC() - - calls.Assert(t, - "DmTaskDestroy", - ) -} - -func TestDriverRemove(t *testing.T) { - denyAllDevmapper() - denyAllSyscall() - defer denyAllSyscall() - defer denyAllDevmapper() - - calls := make(Set) - mockAllDevmapper(calls) - - sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { - calls["sysMount"] = true - // FIXME: compare the exact source and target strings (inodes + devname) - if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source) - } - if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) - } - if expectedFstype := "ext4"; fstype != expectedFstype { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype) - } - if expectedFlags := uintptr(3236757504); flags != expectedFlags { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) - } - return nil - } - sysUnmount = func(target string, flags int) (err error) { - calls["sysUnmount"] = true - // FIXME: compare the exact source and target strings (inodes + devname) - if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) - } - if expectedFlags := 0; flags != expectedFlags { - t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) - } - return nil - } - Mounted = func(mnt string) (bool, error) { - calls["Mounted"] = true - return false, nil - } - - sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - calls["sysSyscall"] = true - if trap != sysSysIoctl { - t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap) - } - switch a2 { - case LoopSetFd: - calls["ioctl.loopsetfd"] = true - case LoopCtlGetFree: - calls["ioctl.loopctlgetfree"] = true - case LoopGetStatus64: - calls["ioctl.loopgetstatus"] = true - case LoopSetStatus64: - calls["ioctl.loopsetstatus"] = true - case LoopClrFd: - calls["ioctl.loopclrfd"] = true - case LoopSetCapacity: - calls["ioctl.loopsetcapacity"] = true - case BlkGetSize64: - calls["ioctl.blkgetsize"] = true - default: - t.Fatalf("Unexpected IOCTL. Received %d", a2) - } - return 0, 0, 0 - } - - func() { - d := newDriver(t) - - calls.Assert(t, - "DmSetDevDir", - "DmLogWithErrnoInit", - "DmTaskSetName", - "DmTaskRun", - "DmTaskGetInfo", - "execRun", - "DmTaskCreate", - "DmTaskSetTarget", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetSector", - "DmTaskSetMessage", - "DmTaskSetAddNode", - "sysSyscall", - "ioctl.blkgetsize", - "ioctl.loopsetfd", - "ioctl.loopsetstatus", - "?ioctl.loopctlgetfree", - ) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - calls.Assert(t, - "DmTaskCreate", - "DmTaskGetInfo", - "sysMount", - "DmTaskRun", - "DmTaskSetTarget", - "DmTaskSetSector", - "DmTaskSetCookie", - "DmUdevWait", - "DmTaskSetName", - "DmTaskSetMessage", - "DmTaskSetAddNode", - ) - - Mounted = func(mnt string) (bool, error) { - calls["Mounted"] = true - return true, nil - } - - if err := d.Remove("1"); err != nil { - t.Fatal(err) - } - - calls.Assert(t, - "DmTaskRun", - "DmTaskSetSector", - "DmTaskSetName", - "DmTaskSetMessage", - "DmTaskCreate", - "DmTaskGetInfo", - "DmTaskSetCookie", - "DmTaskSetTarget", - "DmTaskSetAddNode", - "DmUdevWait", - "sysUnmount", - ) - }() - runtime.GC() - - calls.Assert(t, - "DmTaskDestroy", - ) -} - -func TestCleanup(t *testing.T) { - t.Skip("FIXME: not a unit test") - t.Skip("Unimplemented") - d := newDriver(t) - defer osRemoveAll(d.home) - - mountPoints := make([]string, 2) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - // Mount the id - p, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - mountPoints[0] = p - - if err := d.Create("2", "1"); err != nil { - t.Fatal(err) - } - - p, err = d.Get("2") - if err != nil { - t.Fatal(err) - } - mountPoints[1] = p - - // Ensure that all the mount points are currently mounted - for _, p := range mountPoints { - if mounted, err := Mounted(p); err != nil { - t.Fatal(err) - } else if !mounted { - t.Fatalf("Expected %s to be mounted", p) - } - } - - // Ensure that devices are active - for _, p := range []string{"1", "2"} { - if !d.HasActivatedDevice(p) { - t.Fatalf("Expected %s to have an active device", p) - } - } - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - - // Ensure that all the mount points are no longer mounted - for _, p := range mountPoints { - if mounted, err := Mounted(p); err != nil { - t.Fatal(err) - } else if mounted { - t.Fatalf("Expected %s to not be mounted", p) - } - } - - // Ensure that devices are no longer activated - for _, p := range []string{"1", "2"} { - if d.HasActivatedDevice(p) { - t.Fatalf("Expected %s not be an active device", p) - } - } -} - -func TestNotMounted(t *testing.T) { - t.Skip("FIXME: not a unit test") - t.Skip("Not implemented") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - mounted, err := Mounted(path.Join(d.home, "mnt", "1")) - if err != nil { - t.Fatal(err) - } - if mounted { - t.Fatal("Id 1 should not be mounted") - } -} - -func TestMounted(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if _, err := d.Get("1"); err != nil { - t.Fatal(err) - } - - mounted, err := Mounted(path.Join(d.home, "mnt", "1")) - if err != nil { - t.Fatal(err) - } - if !mounted { - t.Fatal("Id 1 should be mounted") - } -} - -func TestInitCleanedDriver(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - if _, err := d.Get("1"); err != nil { - t.Fatal(err) - } - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - - driver, err := Init(d.home) - if err != nil { - t.Fatal(err) - } - d = driver.(*Driver) - defer cleanup(d) - - if _, err := d.Get("1"); err != nil { - t.Fatal(err) - } -} - -func TestMountMountedDriver(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - // Perform get on same id to ensure that it will - // not be mounted twice - if _, err := d.Get("1"); err != nil { - t.Fatal(err) - } - if _, err := d.Get("1"); err != nil { - t.Fatal(err) - } -} - -func TestGetReturnsValidDevice(t *testing.T) { - t.Skip("FIXME: not a unit test") - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - if !d.HasDevice("1") { - t.Fatalf("Expected id 1 to be in device set") - } - - if _, err := d.Get("1"); err != nil { - t.Fatal(err) - } - - if !d.HasActivatedDevice("1") { - t.Fatalf("Expected id 1 to be activated") - } - - if !d.HasInitializedDevice("1") { - t.Fatalf("Expected id 1 to be initialized") - } -} - -func TestDriverGetSize(t *testing.T) { - t.Skip("FIXME: not a unit test") - t.Skipf("Size is currently not implemented") - - d := newDriver(t) - defer cleanup(d) - - if err := d.Create("1", ""); err != nil { - t.Fatal(err) - } - - mountPoint, err := d.Get("1") - if err != nil { - t.Fatal(err) - } - - size := int64(1024) - - f, err := osCreate(path.Join(mountPoint, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - f.Close() - - // diffSize, err := d.DiffSize("1") - // if err != nil { - // t.Fatal(err) - // } - // if diffSize != size { - // t.Fatalf("Expected size %d got %d", size, diffSize) - // } -} - -func assertMap(t *testing.T, m map[string]bool, keys ...string) { - for _, key := range keys { - if _, exists := m[key]; !exists { - t.Fatalf("Key not set: %s", key) - } - delete(m, key) - } - if len(m) != 0 { - t.Fatalf("Unexpected keys: %v", m) - } -} diff --git a/graphdriver/devmapper/ioctl.go b/graphdriver/devmapper/ioctl.go deleted file mode 100644 index 30bafff943..0000000000 --- a/graphdriver/devmapper/ioctl.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "unsafe" -) - -func ioctlLoopCtlGetFree(fd uintptr) (int, error) { - index, _, err := sysSyscall(sysSysIoctl, fd, LoopCtlGetFree, 0) - if err != 0 { - return 0, err - } - return int(index), nil -} - -func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetFd, sparseFd); err != 0 { - return err - } - return nil -} - -func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { - return err - } - return nil -} - -func ioctlLoopClrFd(loopFd uintptr) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopClrFd, 0); err != 0 { - return err - } - return nil -} - -func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) { - loopInfo := &LoopInfo64{} - - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { - return nil, err - } - return loopInfo, nil -} - -func ioctlLoopSetCapacity(loopFd uintptr, value int) error { - if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { - return err - } - return nil -} - -func ioctlBlkGetSize64(fd uintptr) (int64, error) { - var size int64 - if _, _, err := sysSyscall(sysSysIoctl, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { - return 0, err - } - return size, nil -} - -func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { - var r [2]uint64 - r[0] = offset - r[1] = length - - if _, _, err := sysSyscall(sysSysIoctl, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { - return err - } - return nil -} diff --git a/graphdriver/devmapper/mount.go b/graphdriver/devmapper/mount.go deleted file mode 100644 index 4f19109bf8..0000000000 --- a/graphdriver/devmapper/mount.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "path/filepath" -) - -// FIXME: this is copy-pasted from the aufs driver. -// It should be moved into the core. - -var Mounted = func(mountpoint string) (bool, error) { - mntpoint, err := osStat(mountpoint) - if err != nil { - if osIsNotExist(err) { - return false, nil - } - return false, err - } - parent, err := osStat(filepath.Join(mountpoint, "..")) - if err != nil { - return false, err - } - mntpointSt := toSysStatT(mntpoint.Sys()) - parentSt := toSysStatT(parent.Sys()) - return mntpointSt.Dev != parentSt.Dev, nil -} diff --git a/graphdriver/devmapper/sys.go b/graphdriver/devmapper/sys.go deleted file mode 100644 index 5a9ab4d74b..0000000000 --- a/graphdriver/devmapper/sys.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build linux,amd64 - -package devmapper - -import ( - "os" - "os/exec" - "syscall" -) - -type ( - sysStatT syscall.Stat_t - sysErrno syscall.Errno - - osFile struct{ *os.File } -) - -var ( - sysMount = syscall.Mount - sysUnmount = syscall.Unmount - sysCloseOnExec = syscall.CloseOnExec - sysSyscall = syscall.Syscall - - osOpenFile = func(name string, flag int, perm os.FileMode) (*osFile, error) { - f, err := os.OpenFile(name, flag, perm) - return &osFile{File: f}, err - } - osOpen = func(name string) (*osFile, error) { f, err := os.Open(name); return &osFile{File: f}, err } - osNewFile = os.NewFile - osCreate = os.Create - osStat = os.Stat - osIsNotExist = os.IsNotExist - osIsExist = os.IsExist - osMkdirAll = os.MkdirAll - osRemoveAll = os.RemoveAll - osRename = os.Rename - osReadlink = os.Readlink - - execRun = func(name string, args ...string) error { return exec.Command(name, args...).Run() } -) - -const ( - sysMsMgcVal = syscall.MS_MGC_VAL - sysMsRdOnly = syscall.MS_RDONLY - sysEInval = syscall.EINVAL - sysSysIoctl = syscall.SYS_IOCTL - sysEBusy = syscall.EBUSY - - osORdOnly = os.O_RDONLY - osORdWr = os.O_RDWR - osOCreate = os.O_CREATE - osModeDevice = os.ModeDevice -) - -func toSysStatT(i interface{}) *sysStatT { - return (*sysStatT)(i.(*syscall.Stat_t)) -} diff --git a/graphdriver/driver.go b/graphdriver/driver.go deleted file mode 100644 index 89fd03a624..0000000000 --- a/graphdriver/driver.go +++ /dev/null @@ -1,93 +0,0 @@ -package graphdriver - -import ( - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/utils" - "os" - "path" -) - -type InitFunc func(root string) (Driver, error) - -type Driver interface { - String() string - - Create(id, parent string) error - Remove(id string) error - - Get(id string) (dir string, err error) - Put(id string) - Exists(id string) bool - - Status() [][2]string - - Cleanup() error -} - -type Differ interface { - Diff(id string) (archive.Archive, error) - Changes(id string) ([]archive.Change, error) - ApplyDiff(id string, diff archive.ArchiveReader) error - DiffSize(id string) (bytes int64, err error) -} - -var ( - DefaultDriver string - // All registred drivers - drivers map[string]InitFunc - // Slice of drivers that should be used in an order - priority = []string{ - "aufs", - "devicemapper", - "vfs", - // experimental, has to be enabled manually for now - "btrfs", - } -) - -func init() { - drivers = make(map[string]InitFunc) -} - -func Register(name string, initFunc InitFunc) error { - if _, exists := drivers[name]; exists { - return fmt.Errorf("Name already registered %s", name) - } - drivers[name] = initFunc - - return nil -} - -func GetDriver(name, home string) (Driver, error) { - if initFunc, exists := drivers[name]; exists { - return initFunc(path.Join(home, name)) - } - return nil, fmt.Errorf("No such driver: %s", name) -} - -func New(root string) (driver Driver, err error) { - for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} { - if name != "" { - return GetDriver(name, root) - } - } - - // Check for priority drivers first - for _, name := range priority { - if driver, err = GetDriver(name, root); err != nil { - utils.Debugf("Error loading driver %s: %s", name, err) - continue - } - return driver, nil - } - - // Check all registered drivers if no priority driver is found - for _, initFunc := range drivers { - if driver, err = initFunc(root); err != nil { - continue - } - return driver, nil - } - return nil, err -} diff --git a/graphdriver/vfs/driver.go b/graphdriver/vfs/driver.go deleted file mode 100644 index 21da63878a..0000000000 --- a/graphdriver/vfs/driver.go +++ /dev/null @@ -1,95 +0,0 @@ -package vfs - -import ( - "fmt" - "github.com/dotcloud/docker/graphdriver" - "os" - "os/exec" - "path" -) - -func init() { - graphdriver.Register("vfs", Init) -} - -func Init(home string) (graphdriver.Driver, error) { - d := &Driver{ - home: home, - } - return d, nil -} - -type Driver struct { - home string -} - -func (d *Driver) String() string { - return "vfs" -} - -func (d *Driver) Status() [][2]string { - return nil -} - -func (d *Driver) Cleanup() error { - return nil -} - -func copyDir(src, dst string) error { - if output, err := exec.Command("cp", "-aT", "--reflink=auto", src, dst).CombinedOutput(); err != nil { - return fmt.Errorf("Error VFS copying directory: %s (%s)", err, output) - } - return nil -} - -func (d *Driver) Create(id string, parent string) error { - dir := d.dir(id) - if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { - return err - } - if err := os.Mkdir(dir, 0700); err != nil { - return err - } - if parent == "" { - return nil - } - parentDir, err := d.Get(parent) - if err != nil { - return fmt.Errorf("%s: %s", parent, err) - } - if err := copyDir(parentDir, dir); err != nil { - return err - } - return nil -} - -func (d *Driver) dir(id string) string { - return path.Join(d.home, "dir", path.Base(id)) -} - -func (d *Driver) Remove(id string) error { - if _, err := os.Stat(d.dir(id)); err != nil { - return err - } - return os.RemoveAll(d.dir(id)) -} - -func (d *Driver) Get(id string) (string, error) { - dir := d.dir(id) - if st, err := os.Stat(dir); err != nil { - return "", err - } else if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) - } - return dir, nil -} - -func (d *Driver) Put(id string) { - // The vfs driver has no runtime resources (e.g. mounts) - // to clean up, so we don't need anything here -} - -func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) - return err == nil -} diff --git a/image/graph.go b/image/graph.go index 857c09edd9..dd0136b00e 100644 --- a/image/graph.go +++ b/image/graph.go @@ -1,7 +1,7 @@ package image import ( - "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/runtime/graphdriver" ) type Graph interface { diff --git a/image/image.go b/image/image.go index e091879049..b2ddb03b0b 100644 --- a/image/image.go +++ b/image/image.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io/ioutil" diff --git a/integration/graph_test.go b/integration/graph_test.go index e575a252f3..ea9ddc7ae9 100644 --- a/integration/graph_test.go +++ b/integration/graph_test.go @@ -5,7 +5,7 @@ import ( "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/graph" - "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/utils" "io" diff --git a/runtime/container.go b/runtime/container.go index f4de40a16a..3c7aa22751 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -7,7 +7,7 @@ import ( "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/runtime/execdriver" - "github.com/dotcloud/docker/graphdriver" + "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/links" "github.com/dotcloud/docker/nat" diff --git a/runtime/graphdriver/aufs/aufs.go b/runtime/graphdriver/aufs/aufs.go new file mode 100644 index 0000000000..83a6579bc6 --- /dev/null +++ b/runtime/graphdriver/aufs/aufs.go @@ -0,0 +1,401 @@ +/* + +aufs driver directory structure + +. +├── layers // Metadata of layers +│   ├── 1 +│   ├── 2 +│   └── 3 +├── diffs // Content of the layer +│   ├── 1 // Contains layers that need to be mounted for the id +│   ├── 2 +│   └── 3 +└── mnt // Mount points for the rw layers to be mounted + ├── 1 + ├── 2 + └── 3 + +*/ + +package aufs + +import ( + "bufio" + "fmt" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/runtime/graphdriver" + mountpk "github.com/dotcloud/docker/pkg/mount" + "github.com/dotcloud/docker/utils" + "os" + "os/exec" + "path" + "strings" + "sync" +) + +var ( + ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") +) + +func init() { + graphdriver.Register("aufs", Init) +} + +type Driver struct { + root string + sync.Mutex // Protects concurrent modification to active + active map[string]int +} + +// New returns a new AUFS driver. +// An error is returned if AUFS is not supported. +func Init(root string) (graphdriver.Driver, error) { + // Try to load the aufs kernel module + if err := supportsAufs(); err != nil { + return nil, err + } + paths := []string{ + "mnt", + "diff", + "layers", + } + + a := &Driver{ + root: root, + active: make(map[string]int), + } + + // Create the root aufs driver dir and return + // if it already exists + // If not populate the dir structure + if err := os.MkdirAll(root, 0755); err != nil { + if os.IsExist(err) { + return a, nil + } + return nil, err + } + + for _, p := range paths { + if err := os.MkdirAll(path.Join(root, p), 0755); err != nil { + return nil, err + } + } + return a, nil +} + +// Return a nil error if the kernel supports aufs +// We cannot modprobe because inside dind modprobe fails +// to run +func supportsAufs() error { + // We can try to modprobe aufs first before looking at + // proc/filesystems for when aufs is supported + exec.Command("modprobe", "aufs").Run() + + f, err := os.Open("/proc/filesystems") + if err != nil { + return err + } + defer f.Close() + + s := bufio.NewScanner(f) + for s.Scan() { + if strings.Contains(s.Text(), "aufs") { + return nil + } + } + return ErrAufsNotSupported +} + +func (a Driver) rootPath() string { + return a.root +} + +func (Driver) String() string { + return "aufs" +} + +func (a Driver) Status() [][2]string { + ids, _ := loadIds(path.Join(a.rootPath(), "layers")) + return [][2]string{ + {"Root Dir", a.rootPath()}, + {"Dirs", fmt.Sprintf("%d", len(ids))}, + } +} + +// Exists returns true if the given id is registered with +// this driver +func (a Driver) Exists(id string) bool { + if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + return false + } + return true +} + +// Three folders are created for each id +// mnt, layers, and diff +func (a *Driver) Create(id, parent string) error { + if err := a.createDirsFor(id); err != nil { + return err + } + // Write the layers metadata + f, err := os.Create(path.Join(a.rootPath(), "layers", id)) + if err != nil { + return err + } + defer f.Close() + + if parent != "" { + ids, err := getParentIds(a.rootPath(), parent) + if err != nil { + return err + } + + if _, err := fmt.Fprintln(f, parent); err != nil { + return err + } + for _, i := range ids { + if _, err := fmt.Fprintln(f, i); err != nil { + return err + } + } + } + return nil +} + +func (a *Driver) createDirsFor(id string) error { + paths := []string{ + "mnt", + "diff", + } + + for _, p := range paths { + if err := os.MkdirAll(path.Join(a.rootPath(), p, id), 0755); err != nil { + return err + } + } + return nil +} + +// Unmount and remove the dir information +func (a *Driver) Remove(id string) error { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + if a.active[id] != 0 { + utils.Errorf("Warning: removing active id %s\n", id) + } + + // Make sure the dir is umounted first + if err := a.unmount(id); err != nil { + return err + } + tmpDirs := []string{ + "mnt", + "diff", + } + + // Atomically remove each directory in turn by first moving it out of the + // way (so that docker doesn't find it anymore) before doing removal of + // the whole tree. + for _, p := range tmpDirs { + + realPath := path.Join(a.rootPath(), p, id) + tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) + if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { + return err + } + defer os.RemoveAll(tmpPath) + } + + // Remove the layers file for the id + if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// Return the rootfs path for the id +// This will mount the dir at it's given path +func (a *Driver) Get(id string) (string, error) { + ids, err := getParentIds(a.rootPath(), id) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + ids = []string{} + } + + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + count := a.active[id] + + // If a dir does not have a parent ( no layers )do not try to mount + // just return the diff path to the data + out := path.Join(a.rootPath(), "diff", id) + if len(ids) > 0 { + out = path.Join(a.rootPath(), "mnt", id) + + if count == 0 { + if err := a.mount(id); err != nil { + return "", err + } + } + } + + a.active[id] = count + 1 + + return out, nil +} + +func (a *Driver) Put(id string) { + // Protect the a.active from concurrent access + a.Lock() + defer a.Unlock() + + if count := a.active[id]; count > 1 { + a.active[id] = count - 1 + } else { + ids, _ := getParentIds(a.rootPath(), id) + // We only mounted if there are any parents + if ids != nil && len(ids) > 0 { + a.unmount(id) + } + delete(a.active, id) + } +} + +// Returns an archive of the contents for the id +func (a *Driver) Diff(id string) (archive.Archive, error) { + return archive.TarFilter(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ + Compression: archive.Uncompressed, + }) +} + +func (a *Driver) ApplyDiff(id string, diff archive.ArchiveReader) error { + return archive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) +} + +// Returns the size of the contents for the id +func (a *Driver) DiffSize(id string) (int64, error) { + return utils.TreeSize(path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) Changes(id string) ([]archive.Change, error) { + layers, err := a.getParentLayerPaths(id) + if err != nil { + return nil, err + } + return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) +} + +func (a *Driver) getParentLayerPaths(id string) ([]string, error) { + parentIds, err := getParentIds(a.rootPath(), id) + if err != nil { + return nil, err + } + if len(parentIds) == 0 { + return nil, fmt.Errorf("Dir %s does not have any parent layers", id) + } + layers := make([]string, len(parentIds)) + + // Get the diff paths for all the parent ids + for i, p := range parentIds { + layers[i] = path.Join(a.rootPath(), "diff", p) + } + return layers, nil +} + +func (a *Driver) mount(id string) error { + // If the id is mounted or we get an error return + if mounted, err := a.mounted(id); err != nil || mounted { + return err + } + + var ( + target = path.Join(a.rootPath(), "mnt", id) + rw = path.Join(a.rootPath(), "diff", id) + ) + + layers, err := a.getParentLayerPaths(id) + if err != nil { + return err + } + + if err := a.aufsMount(layers, rw, target); err != nil { + return err + } + return nil +} + +func (a *Driver) unmount(id string) error { + if mounted, err := a.mounted(id); err != nil || !mounted { + return err + } + target := path.Join(a.rootPath(), "mnt", id) + return Unmount(target) +} + +func (a *Driver) mounted(id string) (bool, error) { + target := path.Join(a.rootPath(), "mnt", id) + return mountpk.Mounted(target) +} + +// During cleanup aufs needs to unmount all mountpoints +func (a *Driver) Cleanup() error { + ids, err := loadIds(path.Join(a.rootPath(), "layers")) + if err != nil { + return err + } + for _, id := range ids { + if err := a.unmount(id); err != nil { + utils.Errorf("Unmounting %s: %s", utils.TruncateID(id), err) + } + } + return nil +} + +func (a *Driver) aufsMount(ro []string, rw, target string) (err error) { + defer func() { + if err != nil { + Unmount(target) + } + }() + + if err = a.tryMount(ro, rw, target); err != nil { + if err = a.mountRw(rw, target); err != nil { + return + } + + for _, layer := range ro { + branch := fmt.Sprintf("append:%s=ro+wh", layer) + if err = mount("none", target, "aufs", MsRemount, branch); err != nil { + return + } + } + } + return +} + +// Try to mount using the aufs fast path, if this fails then +// append ro layers. +func (a *Driver) tryMount(ro []string, rw, target string) (err error) { + var ( + rwBranch = fmt.Sprintf("%s=rw", rw) + roBranches = fmt.Sprintf("%s=ro+wh:", strings.Join(ro, "=ro+wh:")) + ) + return mount("none", target, "aufs", 0, fmt.Sprintf("br:%v:%v,xino=/dev/shm/aufs.xino", rwBranch, roBranches)) +} + +func (a *Driver) mountRw(rw, target string) error { + return mount("none", target, "aufs", 0, fmt.Sprintf("br:%s,xino=/dev/shm/aufs.xino", rw)) +} + +func rollbackMount(target string, err error) { + if err != nil { + Unmount(target) + } +} diff --git a/runtime/graphdriver/aufs/aufs_test.go b/runtime/graphdriver/aufs/aufs_test.go new file mode 100644 index 0000000000..cb417c3b26 --- /dev/null +++ b/runtime/graphdriver/aufs/aufs_test.go @@ -0,0 +1,697 @@ +package aufs + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/runtime/graphdriver" + "io/ioutil" + "os" + "path" + "testing" +) + +var ( + tmp = path.Join(os.TempDir(), "aufs-tests", "aufs") +) + +func testInit(dir string, t *testing.T) graphdriver.Driver { + d, err := Init(dir) + if err != nil { + if err == ErrAufsNotSupported { + t.Skip(err) + } else { + t.Fatal(err) + } + } + return d +} + +func newDriver(t *testing.T) *Driver { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + return d.(*Driver) +} + +func TestNewDriver(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + d := testInit(tmp, t) + defer os.RemoveAll(tmp) + if d == nil { + t.Fatalf("Driver should not be nil") + } +} + +func TestAufsString(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if d.String() != "aufs" { + t.Fatalf("Expected aufs got %s", d.String()) + } +} + +func TestCreateDirStructure(t *testing.T) { + newDriver(t) + defer os.RemoveAll(tmp) + + paths := []string{ + "mnt", + "layers", + "diff", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p)); err != nil { + t.Fatal(err) + } + } +} + +// We should be able to create two drivers with the same dir structure +func TestNewDriverFromExistingDir(t *testing.T) { + if err := os.MkdirAll(tmp, 0755); err != nil { + t.Fatal(err) + } + + testInit(tmp, t) + testInit(tmp, t) + os.RemoveAll(tmp) +} + +func TestCreateNewDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } +} + +func TestCreateNewDirStructure(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { + t.Fatal(err) + } + } +} + +func TestRemoveImage(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if err := d.Remove("1"); err != nil { + t.Fatal(err) + } + + paths := []string{ + "mnt", + "diff", + "layers", + } + + for _, p := range paths { + if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { + t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) + } + } +} + +func TestGetWithoutParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1") + if err != nil { + t.Fatal(err) + } + expected := path.Join(tmp, "diff", "1") + if diffPath != expected { + t.Fatalf("Expected path %s got %s", expected, diffPath) + } +} + +func TestCleanupWithNoDirs(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestCleanupWithDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } +} + +func TestMountedFalseResponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + response, err := d.mounted("1") + if err != nil { + t.Fatal(err) + } + + if response != false { + t.Fatalf("Response if dir id 1 is mounted should be false") + } +} + +func TestMountedTrueReponse(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + _, err := d.Get("2") + if err != nil { + t.Fatal(err) + } + + response, err := d.mounted("2") + if err != nil { + t.Fatal(err) + } + + if response != true { + t.Fatalf("Response if dir id 2 is mounted should be true") + } +} + +func TestMountWithParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + expected := path.Join(tmp, "mnt", "2") + if mntPath != expected { + t.Fatalf("Expected %s got %s", expected, mntPath) + } +} + +func TestRemoveMountedDir(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPath, err := d.Get("2") + if err != nil { + t.Fatal(err) + } + if mntPath == "" { + t.Fatal("mntPath should not be empty string") + } + + mounted, err := d.mounted("2") + if err != nil { + t.Fatal(err) + } + + if !mounted { + t.Fatalf("Dir id 2 should be mounted") + } + + if err := d.Remove("2"); err != nil { + t.Fatal(err) + } +} + +func TestCreateWithInvalidParent(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", "docker"); err == nil { + t.Fatalf("Error should not be nil with parent does not exist") + } +} + +func TestGetDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + a, err := d.Diff("1") + if err != nil { + t.Fatal(err) + } + if a == nil { + t.Fatalf("Archive should not be nil") + } +} + +func TestChanges(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + defer func() { + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + }() + + mntPoint, err := d.Get("2") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err := os.Create(path.Join(mntPoint, "test.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err := d.Changes("2") + if err != nil { + t.Fatal(err) + } + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change := changes[0] + + expectedPath := "/test.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } + + if err := d.Create("3", "2"); err != nil { + t.Fatal(err) + } + mntPoint, err = d.Get("3") + if err != nil { + t.Fatal(err) + } + + // Create a file to save in the mountpoint + f, err = os.Create(path.Join(mntPoint, "test2.txt")) + if err != nil { + t.Fatal(err) + } + + if _, err := f.WriteString("testline"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + + changes, err = d.Changes("3") + if err != nil { + t.Fatal(err) + } + + if len(changes) != 1 { + t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) + } + change = changes[0] + + expectedPath = "/test2.txt" + if change.Path != expectedPath { + t.Fatalf("Expected path %s got %s", expectedPath, change.Path) + } + + if change.Kind != archive.ChangeAdd { + t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) + } +} + +func TestDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } +} + +func TestChildDiffSize(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + s, err := f.Stat() + if err != nil { + t.Fatal(err) + } + size = s.Size() + if err := f.Close(); err != nil { + t.Fatal(err) + } + + diffSize, err := d.DiffSize("1") + if err != nil { + t.Fatal(err) + } + if diffSize != size { + t.Fatalf("Expected size to be %d got %d", size, diffSize) + } + + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + diffSize, err = d.DiffSize("2") + if err != nil { + t.Fatal(err) + } + // The diff size for the child should be zero + if diffSize != 0 { + t.Fatalf("Expected size to be %d got %d", 0, diffSize) + } +} + +func TestExists(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if d.Exists("none") { + t.Fatal("id name should not exist in the driver") + } + + if !d.Exists("1") { + t.Fatal("id 1 should exist in the driver") + } +} + +func TestStatus(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + status := d.Status() + if status == nil || len(status) == 0 { + t.Fatal("Status should not be nil or empty") + } + rootDir := status[0] + dirs := status[1] + if rootDir[0] != "Root Dir" { + t.Fatalf("Expected Root Dir got %s", rootDir[0]) + } + if rootDir[1] != d.rootPath() { + t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) + } + if dirs[0] != "Dirs" { + t.Fatalf("Expected Dirs got %s", dirs[0]) + } + if dirs[1] != "1" { + t.Fatalf("Expected 1 got %s", dirs[1]) + } +} + +func TestApplyDiff(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + diffPath, err := d.Get("1") + if err != nil { + t.Fatal(err) + } + + // Add a file to the diff path with a fixed size + size := int64(1024) + + f, err := os.Create(path.Join(diffPath, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + diff, err := d.Diff("1") + if err != nil { + t.Fatal(err) + } + + if err := d.Create("2", ""); err != nil { + t.Fatal(err) + } + if err := d.Create("3", "2"); err != nil { + t.Fatal(err) + } + + if err := d.ApplyDiff("3", diff); err != nil { + t.Fatal(err) + } + + // Ensure that the file is in the mount point for id 3 + + mountPoint, err := d.Get("3") + if err != nil { + t.Fatal(err) + } + if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { + t.Fatal(err) + } +} + +func hash(c string) string { + h := sha256.New() + fmt.Fprint(h, c) + return hex.EncodeToString(h.Sum(nil)) +} + +func TestMountMoreThan42Layers(t *testing.T) { + d := newDriver(t) + defer os.RemoveAll(tmp) + defer d.Cleanup() + var last string + var expected int + + for i := 1; i < 127; i++ { + expected++ + var ( + parent = fmt.Sprintf("%d", i-1) + current = fmt.Sprintf("%d", i) + ) + + if parent == "0" { + parent = "" + } else { + parent = hash(parent) + } + current = hash(current) + + if err := d.Create(current, parent); err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + point, err := d.Get(current) + if err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + f, err := os.Create(path.Join(point, current)) + if err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + f.Close() + + if i%10 == 0 { + if err := os.Remove(path.Join(point, parent)); err != nil { + t.Logf("Current layer %d", i) + t.Fatal(err) + } + expected-- + } + last = current + } + + // Perform the actual mount for the top most image + point, err := d.Get(last) + if err != nil { + t.Fatal(err) + } + files, err := ioutil.ReadDir(point) + if err != nil { + t.Fatal(err) + } + if len(files) != expected { + t.Fatalf("Expected %d got %d", expected, len(files)) + } +} diff --git a/runtime/graphdriver/aufs/dirs.go b/runtime/graphdriver/aufs/dirs.go new file mode 100644 index 0000000000..fb9b81edd2 --- /dev/null +++ b/runtime/graphdriver/aufs/dirs.go @@ -0,0 +1,46 @@ +package aufs + +import ( + "bufio" + "io/ioutil" + "os" + "path" +) + +// Return all the directories +func loadIds(root string) ([]string, error) { + dirs, err := ioutil.ReadDir(root) + if err != nil { + return nil, err + } + out := []string{} + for _, d := range dirs { + if !d.IsDir() { + out = append(out, d.Name()) + } + } + return out, nil +} + +// Read the layers file for the current id and return all the +// layers represented by new lines in the file +// +// If there are no lines in the file then the id has no parent +// and an empty slice is returned. +func getParentIds(root, id string) ([]string, error) { + f, err := os.Open(path.Join(root, "layers", id)) + if err != nil { + return nil, err + } + defer f.Close() + + out := []string{} + s := bufio.NewScanner(f) + + for s.Scan() { + if t := s.Text(); t != "" { + out = append(out, s.Text()) + } + } + return out, s.Err() +} diff --git a/runtime/graphdriver/aufs/migrate.go b/runtime/graphdriver/aufs/migrate.go new file mode 100644 index 0000000000..6018342d6c --- /dev/null +++ b/runtime/graphdriver/aufs/migrate.go @@ -0,0 +1,194 @@ +package aufs + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" +) + +type metadata struct { + ID string `json:"id"` + ParentID string `json:"parent,omitempty"` + Image string `json:"Image,omitempty"` + + parent *metadata +} + +func pathExists(pth string) bool { + if _, err := os.Stat(pth); err != nil { + return false + } + return true +} + +// Migrate existing images and containers from docker < 0.7.x +// +// The format pre 0.7 is for docker to store the metadata and filesystem +// content in the same directory. For the migration to work we need to move Image layer +// data from /var/lib/docker/graph//layers to the diff of the registered id. +// +// Next we need to migrate the container's rw layer to diff of the driver. After the +// contents are migrated we need to register the image and container ids with the +// driver. +// +// For the migration we try to move the folder containing the layer files, if that +// fails because the data is currently mounted we will fallback to creating a +// symlink. +func (a *Driver) Migrate(pth string, setupInit func(p string) error) error { + if pathExists(path.Join(pth, "graph")) { + if err := a.migrateRepositories(pth); err != nil { + return err + } + if err := a.migrateImages(path.Join(pth, "graph")); err != nil { + return err + } + return a.migrateContainers(path.Join(pth, "containers"), setupInit) + } + return nil +} + +func (a *Driver) migrateRepositories(pth string) error { + name := path.Join(pth, "repositories") + if err := os.Rename(name, name+"-aufs"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) error { + fis, err := ioutil.ReadDir(pth) + if err != nil { + return err + } + + for _, fi := range fis { + if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "rw")) { + if err := tryRelocate(path.Join(pth, id, "rw"), path.Join(a.rootPath(), "diff", id)); err != nil { + return err + } + + if !a.Exists(id) { + + metadata, err := loadMetadata(path.Join(pth, id, "config.json")) + if err != nil { + return err + } + + initID := fmt.Sprintf("%s-init", id) + if err := a.Create(initID, metadata.Image); err != nil { + return err + } + + initPath, err := a.Get(initID) + if err != nil { + return err + } + // setup init layer + if err := setupInit(initPath); err != nil { + return err + } + + if err := a.Create(id, initID); err != nil { + return err + } + } + } + } + return nil +} + +func (a *Driver) migrateImages(pth string) error { + fis, err := ioutil.ReadDir(pth) + if err != nil { + return err + } + var ( + m = make(map[string]*metadata) + current *metadata + exists bool + ) + + for _, fi := range fis { + if id := fi.Name(); fi.IsDir() && pathExists(path.Join(pth, id, "layer")) { + if current, exists = m[id]; !exists { + current, err = loadMetadata(path.Join(pth, id, "json")) + if err != nil { + return err + } + m[id] = current + } + } + } + + for _, v := range m { + v.parent = m[v.ParentID] + } + + migrated := make(map[string]bool) + for _, v := range m { + if err := a.migrateImage(v, pth, migrated); err != nil { + return err + } + } + return nil +} + +func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool) error { + if !migrated[m.ID] { + if m.parent != nil { + a.migrateImage(m.parent, pth, migrated) + } + if err := tryRelocate(path.Join(pth, m.ID, "layer"), path.Join(a.rootPath(), "diff", m.ID)); err != nil { + return err + } + if !a.Exists(m.ID) { + if err := a.Create(m.ID, m.ParentID); err != nil { + return err + } + } + migrated[m.ID] = true + } + return nil +} + +// tryRelocate will try to rename the old path to the new pack and if +// the operation fails, it will fallback to a symlink +func tryRelocate(oldPath, newPath string) error { + s, err := os.Lstat(newPath) + if err != nil && !os.IsNotExist(err) { + return err + } + // If the destination is a symlink then we already tried to relocate once before + // and it failed so we delete it and try to remove + if s != nil && s.Mode()&os.ModeSymlink == os.ModeSymlink { + if err := os.RemoveAll(newPath); err != nil { + return err + } + } + if err := os.Rename(oldPath, newPath); err != nil { + if sErr := os.Symlink(oldPath, newPath); sErr != nil { + return fmt.Errorf("Unable to relocate %s to %s: Rename err %s Symlink err %s", oldPath, newPath, err, sErr) + } + } + return nil +} + +func loadMetadata(pth string) (*metadata, error) { + f, err := os.Open(pth) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + out = &metadata{} + dec = json.NewDecoder(f) + ) + + if err := dec.Decode(out); err != nil { + return nil, err + } + return out, nil +} diff --git a/runtime/graphdriver/aufs/mount.go b/runtime/graphdriver/aufs/mount.go new file mode 100644 index 0000000000..1f1d98f809 --- /dev/null +++ b/runtime/graphdriver/aufs/mount.go @@ -0,0 +1,17 @@ +package aufs + +import ( + "github.com/dotcloud/docker/utils" + "os/exec" + "syscall" +) + +func Unmount(target string) error { + if err := exec.Command("auplink", target, "flush").Run(); err != nil { + utils.Errorf("[warning]: couldn't run auplink before unmount: %s", err) + } + if err := syscall.Unmount(target, 0); err != nil { + return err + } + return nil +} diff --git a/runtime/graphdriver/aufs/mount_linux.go b/runtime/graphdriver/aufs/mount_linux.go new file mode 100644 index 0000000000..6082d9f240 --- /dev/null +++ b/runtime/graphdriver/aufs/mount_linux.go @@ -0,0 +1,11 @@ +// +build amd64 + +package aufs + +import "syscall" + +const MsRemount = syscall.MS_REMOUNT + +func mount(source string, target string, fstype string, flags uintptr, data string) error { + return syscall.Mount(source, target, fstype, flags, data) +} diff --git a/runtime/graphdriver/aufs/mount_unsupported.go b/runtime/graphdriver/aufs/mount_unsupported.go new file mode 100644 index 0000000000..2735624112 --- /dev/null +++ b/runtime/graphdriver/aufs/mount_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux !amd64 + +package aufs + +import "errors" + +const MsRemount = 0 + +func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + return errors.New("mount is not implemented on darwin") +} diff --git a/runtime/graphdriver/btrfs/btrfs.go b/runtime/graphdriver/btrfs/btrfs.go new file mode 100644 index 0000000000..b0530be92b --- /dev/null +++ b/runtime/graphdriver/btrfs/btrfs.go @@ -0,0 +1,213 @@ +// +build linux,amd64 + +package btrfs + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "github.com/dotcloud/docker/runtime/graphdriver" + "os" + "path" + "syscall" + "unsafe" +) + +func init() { + graphdriver.Register("btrfs", Init) +} + +func Init(home string) (graphdriver.Driver, error) { + rootdir := path.Dir(home) + + var buf syscall.Statfs_t + if err := syscall.Statfs(rootdir, &buf); err != nil { + return nil, err + } + + if buf.Type != 0x9123683E { + return nil, fmt.Errorf("%s is not a btrfs filesystem", rootdir) + } + + return &Driver{ + home: home, + }, nil +} + +type Driver struct { + home string +} + +func (d *Driver) String() string { + return "btrfs" +} + +func (d *Driver) Status() [][2]string { + return nil +} + +func (d *Driver) Cleanup() error { + return nil +} + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func openDir(path string) (*C.DIR, error) { + Cpath := C.CString(path) + defer free(Cpath) + + dir := C.opendir(Cpath) + if dir == nil { + return nil, fmt.Errorf("Can't open dir") + } + return dir, nil +} + +func closeDir(dir *C.DIR) { + if dir != nil { + C.closedir(dir) + } +} + +func getDirFd(dir *C.DIR) uintptr { + return uintptr(C.dirfd(dir)) +} + +func subvolCreate(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) + } + return nil +} + +func subvolSnapshot(src, dest, name string) error { + srcDir, err := openDir(src) + if err != nil { + return err + } + defer closeDir(srcDir) + + destDir, err := openDir(dest) + if err != nil { + return err + } + defer closeDir(destDir) + + var args C.struct_btrfs_ioctl_vol_args_v2 + args.fd = C.__s64(getDirFd(srcDir)) + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func subvolDelete(path, name string) error { + dir, err := openDir(path) + if err != nil { + return err + } + defer closeDir(dir) + + var args C.struct_btrfs_ioctl_vol_args + for i, c := range []byte(name) { + args.name[i] = C.char(c) + } + + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, + uintptr(unsafe.Pointer(&args))) + if errno != 0 { + return fmt.Errorf("Failed to destroy btrfs snapshot: %v", errno.Error()) + } + return nil +} + +func (d *Driver) subvolumesDir() string { + return path.Join(d.home, "subvolumes") +} + +func (d *Driver) subvolumesDirId(id string) string { + return path.Join(d.subvolumesDir(), id) +} + +func (d *Driver) Create(id string, parent string) error { + subvolumes := path.Join(d.home, "subvolumes") + if err := os.MkdirAll(subvolumes, 0700); err != nil { + return err + } + if parent == "" { + if err := subvolCreate(subvolumes, id); err != nil { + return err + } + } else { + parentDir, err := d.Get(parent) + if err != nil { + return err + } + if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { + return err + } + } + return nil +} + +func (d *Driver) Remove(id string) error { + dir := d.subvolumesDirId(id) + if _, err := os.Stat(dir); err != nil { + return err + } + if err := subvolDelete(d.subvolumesDir(), id); err != nil { + return err + } + return os.RemoveAll(dir) +} + +func (d *Driver) Get(id string) (string, error) { + dir := d.subvolumesDirId(id) + st, err := os.Stat(dir) + if err != nil { + return "", err + } + + if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + + return dir, nil +} + +func (d *Driver) Put(id string) { + // Get() creates no runtime resources (like e.g. mounts) + // so this doesn't need to do anything. +} + +func (d *Driver) Exists(id string) bool { + dir := d.subvolumesDirId(id) + _, err := os.Stat(dir) + return err == nil +} diff --git a/runtime/graphdriver/btrfs/dummy_unsupported.go b/runtime/graphdriver/btrfs/dummy_unsupported.go new file mode 100644 index 0000000000..6c44615763 --- /dev/null +++ b/runtime/graphdriver/btrfs/dummy_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux !amd64 + +package btrfs diff --git a/runtime/graphdriver/devmapper/attach_loopback.go b/runtime/graphdriver/devmapper/attach_loopback.go new file mode 100644 index 0000000000..23339076e8 --- /dev/null +++ b/runtime/graphdriver/devmapper/attach_loopback.go @@ -0,0 +1,126 @@ +// +build linux,amd64 + +package devmapper + +import ( + "fmt" + "github.com/dotcloud/docker/utils" +) + +func stringToLoopName(src string) [LoNameSize]uint8 { + var dst [LoNameSize]uint8 + copy(dst[:], src[:]) + return dst +} + +func getNextFreeLoopbackIndex() (int, error) { + f, err := osOpenFile("/dev/loop-control", osORdOnly, 0644) + if err != nil { + return 0, err + } + defer f.Close() + + index, err := ioctlLoopCtlGetFree(f.Fd()) + if index < 0 { + index = 0 + } + return index, err +} + +func openNextAvailableLoopback(index int, sparseFile *osFile) (loopFile *osFile, err error) { + // Start looking for a free /dev/loop + for { + target := fmt.Sprintf("/dev/loop%d", index) + index++ + + fi, err := osStat(target) + if err != nil { + if osIsNotExist(err) { + utils.Errorf("There are no more loopback device available.") + } + return nil, ErrAttachLoopbackDevice + } + + if fi.Mode()&osModeDevice != osModeDevice { + utils.Errorf("Loopback device %s is not a block device.", target) + continue + } + + // OpenFile adds O_CLOEXEC + loopFile, err = osOpenFile(target, osORdWr, 0644) + if err != nil { + utils.Errorf("Error openning loopback device: %s", err) + return nil, ErrAttachLoopbackDevice + } + + // Try to attach to the loop file + if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { + loopFile.Close() + + // If the error is EBUSY, then try the next loopback + if err != sysEBusy { + utils.Errorf("Cannot set up loopback device %s: %s", target, err) + return nil, ErrAttachLoopbackDevice + } + + // Otherwise, we keep going with the loop + continue + } + // In case of success, we finished. Break the loop. + break + } + + // This can't happen, but let's be sure + if loopFile == nil { + utils.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} + +// attachLoopDevice attaches the given sparse file to the next +// available loopback device. It returns an opened *osFile. +func attachLoopDevice(sparseName string) (loop *osFile, err error) { + + // Try to retrieve the next available loopback device via syscall. + // If it fails, we discard error and start loopking for a + // loopback from index 0. + startIndex, err := getNextFreeLoopbackIndex() + if err != nil { + utils.Debugf("Error retrieving the next available loopback: %s", err) + } + + // OpenFile adds O_CLOEXEC + sparseFile, err := osOpenFile(sparseName, osORdWr, 0644) + if err != nil { + utils.Errorf("Error openning sparse file %s: %s", sparseName, err) + return nil, ErrAttachLoopbackDevice + } + defer sparseFile.Close() + + loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) + if err != nil { + return nil, err + } + + // Set the status of the loopback device + loopInfo := &LoopInfo64{ + loFileName: stringToLoopName(loopFile.Name()), + loOffset: 0, + loFlags: LoFlagsAutoClear, + } + + if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { + utils.Errorf("Cannot set up loopback device info: %s", err) + + // If the call failed, then free the loopback device + if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { + utils.Errorf("Error while cleaning up the loopback device") + } + loopFile.Close() + return nil, ErrAttachLoopbackDevice + } + + return loopFile, nil +} diff --git a/runtime/graphdriver/devmapper/deviceset.go b/runtime/graphdriver/devmapper/deviceset.go new file mode 100644 index 0000000000..f6b26655a3 --- /dev/null +++ b/runtime/graphdriver/devmapper/deviceset.go @@ -0,0 +1,1088 @@ +// +build linux,amd64 + +package devmapper + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/dotcloud/docker/utils" + "io" + "io/ioutil" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "time" +) + +var ( + DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 + DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 + DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 +) + +type DevInfo struct { + Hash string `json:"-"` + DeviceId int `json:"device_id"` + Size uint64 `json:"size"` + TransactionId uint64 `json:"transaction_id"` + Initialized bool `json:"initialized"` + devices *DeviceSet `json:"-"` + + mountCount int `json:"-"` + mountPath string `json:"-"` + // A floating mount means one reference is not owned and + // will be stolen by the next mount. This allows us to + // avoid unmounting directly after creation before the + // first get (since we need to mount to set up the device + // a bit first). + floating bool `json:"-"` + + // The global DeviceSet lock guarantees that we serialize all + // the calls to libdevmapper (which is not threadsafe), but we + // sometimes release that lock while sleeping. In that case + // this per-device lock is still held, protecting against + // other accesses to the device that we're doing the wait on. + lock sync.Mutex `json:"-"` +} + +type MetaData struct { + Devices map[string]*DevInfo `json:devices` +} + +type DeviceSet struct { + MetaData + sync.Mutex // Protects Devices map and serializes calls into libdevmapper + root string + devicePrefix string + TransactionId uint64 + NewTransactionId uint64 + nextFreeDevice int + sawBusy bool +} + +type DiskUsage struct { + Used uint64 + Total uint64 +} + +type Status struct { + PoolName string + DataLoopback string + MetadataLoopback string + Data DiskUsage + Metadata DiskUsage + SectorSize uint64 +} + +type DevStatus struct { + DeviceId int + Size uint64 + TransactionId uint64 + SizeInSectors uint64 + MappedSectors uint64 + HighestMappedSector uint64 +} + +type UnmountMode int + +const ( + UnmountRegular UnmountMode = iota + UnmountFloat + UnmountSink +) + +func getDevName(name string) string { + return "/dev/mapper/" + name +} + +func (info *DevInfo) Name() string { + hash := info.Hash + if hash == "" { + hash = "base" + } + return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) +} + +func (info *DevInfo) DevName() string { + return getDevName(info.Name()) +} + +func (devices *DeviceSet) loopbackDir() string { + return path.Join(devices.root, "devicemapper") +} + +func (devices *DeviceSet) jsonFile() string { + return path.Join(devices.loopbackDir(), "json") +} + +func (devices *DeviceSet) getPoolName() string { + return devices.devicePrefix + "-pool" +} + +func (devices *DeviceSet) getPoolDevName() string { + return getDevName(devices.getPoolName()) +} + +func (devices *DeviceSet) hasImage(name string) bool { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + _, err := osStat(filename) + return err == nil +} + +// ensureImage creates a sparse file of bytes at the path +// /devicemapper/. +// If the file already exists, it does nothing. +// Either way it returns the full path. +func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { + dirname := devices.loopbackDir() + filename := path.Join(dirname, name) + + if err := osMkdirAll(dirname, 0700); err != nil && !osIsExist(err) { + return "", err + } + + if _, err := osStat(filename); err != nil { + if !osIsNotExist(err) { + return "", err + } + utils.Debugf("Creating loopback file %s for device-manage use", filename) + file, err := osOpenFile(filename, osORdWr|osOCreate, 0600) + if err != nil { + return "", err + } + defer file.Close() + + if err = file.Truncate(size); err != nil { + return "", err + } + } + return filename, nil +} + +func (devices *DeviceSet) allocateDeviceId() int { + // TODO: Add smarter reuse of deleted devices + id := devices.nextFreeDevice + devices.nextFreeDevice = devices.nextFreeDevice + 1 + return id +} + +func (devices *DeviceSet) allocateTransactionId() uint64 { + devices.NewTransactionId = devices.NewTransactionId + 1 + return devices.NewTransactionId +} + +func (devices *DeviceSet) saveMetadata() error { + jsonData, err := json.Marshal(devices.MetaData) + if err != nil { + return fmt.Errorf("Error encoding metadata to json: %s", err) + } + tmpFile, err := ioutil.TempFile(filepath.Dir(devices.jsonFile()), ".json") + if err != nil { + return fmt.Errorf("Error creating metadata file: %s", err) + } + + n, err := tmpFile.Write(jsonData) + if err != nil { + return fmt.Errorf("Error writing metadata to %s: %s", tmpFile.Name(), err) + } + if n < len(jsonData) { + return io.ErrShortWrite + } + if err := tmpFile.Sync(); err != nil { + return fmt.Errorf("Error syncing metadata file %s: %s", tmpFile.Name(), err) + } + if err := tmpFile.Close(); err != nil { + return fmt.Errorf("Error closing metadata file %s: %s", tmpFile.Name(), err) + } + if err := osRename(tmpFile.Name(), devices.jsonFile()); err != nil { + return fmt.Errorf("Error committing metadata file %s: %s", tmpFile.Name(), err) + } + + if devices.NewTransactionId != devices.TransactionId { + if err = setTransactionId(devices.getPoolDevName(), devices.TransactionId, devices.NewTransactionId); err != nil { + return fmt.Errorf("Error setting devmapper transition ID: %s", err) + } + devices.TransactionId = devices.NewTransactionId + } + return nil +} + +func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) { + utils.Debugf("registerDevice(%v, %v)", id, hash) + info := &DevInfo{ + Hash: hash, + DeviceId: id, + Size: size, + TransactionId: devices.allocateTransactionId(), + Initialized: false, + devices: devices, + } + + devices.Devices[hash] = info + if err := devices.saveMetadata(); err != nil { + // Try to remove unused device + delete(devices.Devices, hash) + return nil, err + } + + return info, nil +} + +func (devices *DeviceSet) activateDeviceIfNeeded(hash string) error { + utils.Debugf("activateDeviceIfNeeded(%v)", hash) + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("Unknown device %s", hash) + } + + if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { + return nil + } + + return activateDevice(devices.getPoolDevName(), info.Name(), info.DeviceId, info.Size) +} + +func (devices *DeviceSet) createFilesystem(info *DevInfo) error { + devname := info.DevName() + + err := execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0,lazy_journal_init=0", devname) + if err != nil { + err = execRun("mkfs.ext4", "-E", "discard,lazy_itable_init=0", devname) + } + if err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + return nil +} + +func (devices *DeviceSet) loadMetaData() error { + utils.Debugf("loadMetadata()") + defer utils.Debugf("loadMetadata END") + _, _, _, params, err := getStatus(devices.getPoolName()) + if err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + + if _, err := fmt.Sscanf(params, "%d", &devices.TransactionId); err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + devices.NewTransactionId = devices.TransactionId + + jsonData, err := ioutil.ReadFile(devices.jsonFile()) + if err != nil && !osIsNotExist(err) { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + + devices.MetaData.Devices = make(map[string]*DevInfo) + if jsonData != nil { + if err := json.Unmarshal(jsonData, &devices.MetaData); err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + } + + for hash, d := range devices.Devices { + d.Hash = hash + d.devices = devices + + if d.DeviceId >= devices.nextFreeDevice { + devices.nextFreeDevice = d.DeviceId + 1 + } + + // If the transaction id is larger than the actual one we lost the device due to some crash + if d.TransactionId > devices.TransactionId { + utils.Debugf("Removing lost device %s with id %d", hash, d.TransactionId) + delete(devices.Devices, hash) + } + } + return nil +} + +func (devices *DeviceSet) setupBaseImage() error { + oldInfo := devices.Devices[""] + if oldInfo != nil && oldInfo.Initialized { + return nil + } + + if oldInfo != nil && !oldInfo.Initialized { + utils.Debugf("Removing uninitialized base image") + if err := devices.deleteDevice(""); err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + } + + utils.Debugf("Initializing base device-manager snapshot") + + id := devices.allocateDeviceId() + + // Create initial device + if err := createDevice(devices.getPoolDevName(), id); err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + + utils.Debugf("Registering base device (id %v) with FS size %v", id, DefaultBaseFsSize) + info, err := devices.registerDevice(id, "", DefaultBaseFsSize) + if err != nil { + _ = deleteDevice(devices.getPoolDevName(), id) + utils.Debugf("\n--->Err: %s\n", err) + return err + } + + utils.Debugf("Creating filesystem on base device-manager snapshot") + + if err = devices.activateDeviceIfNeeded(""); err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + + if err := devices.createFilesystem(info); err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + + info.Initialized = true + if err = devices.saveMetadata(); err != nil { + info.Initialized = false + utils.Debugf("\n--->Err: %s\n", err) + return err + } + + return nil +} + +func setCloseOnExec(name string) { + if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { + for _, i := range fileInfos { + link, _ := osReadlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + sysCloseOnExec(fd) + } + } + } + } +} + +func (devices *DeviceSet) log(level int, file string, line int, dmError int, message string) { + if level >= 7 { + return // Ignore _LOG_DEBUG + } + + if strings.Contains(message, "busy") { + devices.sawBusy = true + } + + utils.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} + +func (devices *DeviceSet) ResizePool(size int64) error { + dirname := devices.loopbackDir() + datafilename := path.Join(dirname, "data") + metadatafilename := path.Join(dirname, "metadata") + + datafile, err := osOpenFile(datafilename, osORdWr, 0) + if datafile == nil { + return err + } + defer datafile.Close() + + fi, err := datafile.Stat() + if fi == nil { + return err + } + + if fi.Size() > size { + return fmt.Errorf("Can't shrink file") + } + + dataloopback := FindLoopDeviceFor(datafile) + if dataloopback == nil { + return fmt.Errorf("Unable to find loopback mount for: %s", datafilename) + } + defer dataloopback.Close() + + metadatafile, err := osOpenFile(metadatafilename, osORdWr, 0) + if metadatafile == nil { + return err + } + defer metadatafile.Close() + + metadataloopback := FindLoopDeviceFor(metadatafile) + if metadataloopback == nil { + return fmt.Errorf("Unable to find loopback mount for: %s", metadatafilename) + } + defer metadataloopback.Close() + + // Grow loopback file + if err := datafile.Truncate(size); err != nil { + return fmt.Errorf("Unable to grow loopback file: %s", err) + } + + // Reload size for loopback device + if err := LoopbackSetCapacity(dataloopback); err != nil { + return fmt.Errorf("Unable to update loopback capacity: %s", err) + } + + // Suspend the pool + if err := suspendDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("Unable to suspend pool: %s", err) + } + + // Reload with the new block sizes + if err := reloadPool(devices.getPoolName(), dataloopback, metadataloopback); err != nil { + return fmt.Errorf("Unable to reload pool: %s", err) + } + + // Resume the pool + if err := resumeDevice(devices.getPoolName()); err != nil { + return fmt.Errorf("Unable to resume pool: %s", err) + } + + return nil +} + +func (devices *DeviceSet) initDevmapper(doInit bool) error { + logInit(devices) + + // Make sure the sparse images exist in /devicemapper/data and + // /devicemapper/metadata + + hasData := devices.hasImage("data") + hasMetadata := devices.hasImage("metadata") + + if !doInit && !hasData { + return errors.New("Loopback data file not found") + } + + if !doInit && !hasMetadata { + return errors.New("Loopback metadata file not found") + } + + createdLoopback := !hasData || !hasMetadata + data, err := devices.ensureImage("data", DefaultDataLoopbackSize) + if err != nil { + utils.Debugf("Error device ensureImage (data): %s\n", err) + return err + } + metadata, err := devices.ensureImage("metadata", DefaultMetaDataLoopbackSize) + if err != nil { + utils.Debugf("Error device ensureImage (metadata): %s\n", err) + return err + } + + // Set the device prefix from the device id and inode of the docker root dir + + st, err := osStat(devices.root) + if err != nil { + return fmt.Errorf("Error looking up dir %s: %s", devices.root, err) + } + sysSt := toSysStatT(st.Sys()) + // "reg-" stands for "regular file". + // In the future we might use "dev-" for "device file", etc. + // docker-maj,min[-inode] stands for: + // - Managed by docker + // - The target of this device is at major and minor + // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. + devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) + utils.Debugf("Generated prefix: %s", devices.devicePrefix) + + // Check for the existence of the device -pool + utils.Debugf("Checking for existence of the pool '%s'", devices.getPoolName()) + info, err := getInfo(devices.getPoolName()) + if info == nil { + utils.Debugf("Error device getInfo: %s", err) + return err + } + + // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files + // that are not Close-on-exec, and lxc-start will die if it inherits any unexpected files, + // so we add this badhack to make sure it closes itself + setCloseOnExec("/dev/mapper/control") + + // If the pool doesn't exist, create it + if info.Exists == 0 { + utils.Debugf("Pool doesn't exist. Creating it.") + + dataFile, err := attachLoopDevice(data) + if err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + defer dataFile.Close() + + metadataFile, err := attachLoopDevice(metadata) + if err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + defer metadataFile.Close() + + if err := createPool(devices.getPoolName(), dataFile, metadataFile); err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + } + + // If we didn't just create the data or metadata image, we need to + // load the metadata from the existing file. + if !createdLoopback { + if err = devices.loadMetaData(); err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + } + + // Setup the base image + if doInit { + if err := devices.setupBaseImage(); err != nil { + utils.Debugf("Error device setupBaseImage: %s\n", err) + return err + } + } + + return nil +} + +func (devices *DeviceSet) AddDevice(hash, baseHash string) error { + devices.Lock() + defer devices.Unlock() + + if devices.Devices[hash] != nil { + return fmt.Errorf("hash %s already exists", hash) + } + + baseInfo := devices.Devices[baseHash] + if baseInfo == nil { + return fmt.Errorf("Error adding device for '%s': can't find device for parent '%s'", hash, baseHash) + } + + baseInfo.lock.Lock() + defer baseInfo.lock.Unlock() + + deviceId := devices.allocateDeviceId() + + if err := devices.createSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { + utils.Debugf("Error creating snap device: %s\n", err) + return err + } + + if _, err := devices.registerDevice(deviceId, hash, baseInfo.Size); err != nil { + deleteDevice(devices.getPoolDevName(), deviceId) + utils.Debugf("Error registering device: %s\n", err) + return err + } + return nil +} + +func (devices *DeviceSet) deleteDevice(hash string) error { + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("hash %s doesn't exists", hash) + } + + // This is a workaround for the kernel not discarding block so + // on the thin pool when we remove a thinp device, so we do it + // manually + if err := devices.activateDeviceIfNeeded(hash); err == nil { + if err := BlockDeviceDiscard(info.DevName()); err != nil { + utils.Debugf("Error discarding block on device: %s (ignoring)\n", err) + } + } + + devinfo, _ := getInfo(info.Name()) + if devinfo != nil && devinfo.Exists != 0 { + if err := devices.removeDeviceAndWait(info.Name()); err != nil { + utils.Debugf("Error removing device: %s\n", err) + return err + } + } + + if info.Initialized { + info.Initialized = false + if err := devices.saveMetadata(); err != nil { + utils.Debugf("Error saving meta data: %s\n", err) + return err + } + } + + if err := deleteDevice(devices.getPoolDevName(), info.DeviceId); err != nil { + utils.Debugf("Error deleting device: %s\n", err) + return err + } + + devices.allocateTransactionId() + delete(devices.Devices, info.Hash) + + if err := devices.saveMetadata(); err != nil { + devices.Devices[info.Hash] = info + utils.Debugf("Error saving meta data: %s\n", err) + return err + } + + return nil +} + +func (devices *DeviceSet) DeleteDevice(hash string) error { + devices.Lock() + defer devices.Unlock() + + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("Unknown device %s", hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + return devices.deleteDevice(hash) +} + +func (devices *DeviceSet) deactivatePool() error { + utils.Debugf("[devmapper] deactivatePool()") + defer utils.Debugf("[devmapper] deactivatePool END") + devname := devices.getPoolDevName() + devinfo, err := getInfo(devname) + if err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + if devinfo.Exists != 0 { + return removeDevice(devname) + } + + return nil +} + +func (devices *DeviceSet) deactivateDevice(hash string) error { + utils.Debugf("[devmapper] deactivateDevice(%s)", hash) + defer utils.Debugf("[devmapper] deactivateDevice END") + + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("Unknown device %s", hash) + } + devinfo, err := getInfo(info.Name()) + if err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + if devinfo.Exists != 0 { + if err := devices.removeDeviceAndWait(info.Name()); err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + } + + return nil +} + +// Issues the underlying dm remove operation and then waits +// for it to finish. +func (devices *DeviceSet) removeDeviceAndWait(devname string) error { + var err error + + for i := 0; i < 1000; i++ { + devices.sawBusy = false + err = removeDevice(devname) + if err == nil { + break + } + if !devices.sawBusy { + return err + } + + // If we see EBUSY it may be a transient error, + // sleep a bit a retry a few times. + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() + } + if err != nil { + return err + } + + if err := devices.waitRemove(devname); err != nil { + return err + } + return nil +} + +// waitRemove blocks until either: +// a) the device registered at - is removed, +// or b) the 1 second timeout expires. +func (devices *DeviceSet) waitRemove(devname string) error { + utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) + defer utils.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) + i := 0 + for ; i < 1000; i += 1 { + devinfo, err := getInfo(devname) + if err != nil { + // If there is an error we assume the device doesn't exist. + // The error might actually be something else, but we can't differentiate. + return nil + } + if i%100 == 0 { + utils.Debugf("Waiting for removal of %s: exists=%d", devname, devinfo.Exists) + } + if devinfo.Exists == 0 { + break + } + + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() + } + if i == 1000 { + return fmt.Errorf("Timeout while waiting for device %s to be removed", devname) + } + return nil +} + +// waitClose blocks until either: +// a) the device registered at - is closed, +// or b) the 1 second timeout expires. +func (devices *DeviceSet) waitClose(hash string) error { + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("Unknown device %s", hash) + } + i := 0 + for ; i < 1000; i += 1 { + devinfo, err := getInfo(info.Name()) + if err != nil { + return err + } + if i%100 == 0 { + utils.Debugf("Waiting for unmount of %s: opencount=%d", hash, devinfo.OpenCount) + } + if devinfo.OpenCount == 0 { + break + } + time.Sleep(1 * time.Millisecond) + } + if i == 1000 { + return fmt.Errorf("Timeout while waiting for device %s to close", hash) + } + return nil +} + +func (devices *DeviceSet) Shutdown() error { + devices.Lock() + defer devices.Unlock() + + utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) + utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) + defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) + + for _, info := range devices.Devices { + info.lock.Lock() + if info.mountCount > 0 { + if err := sysUnmount(info.mountPath, 0); err != nil { + utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) + } + } + info.lock.Unlock() + } + + for _, d := range devices.Devices { + d.lock.Lock() + + if err := devices.waitClose(d.Hash); err != nil { + utils.Errorf("Warning: error waiting for device %s to unmount: %s\n", d.Hash, err) + } + if err := devices.deactivateDevice(d.Hash); err != nil { + utils.Debugf("Shutdown deactivate %s , error: %s\n", d.Hash, err) + } + + d.lock.Unlock() + } + + if err := devices.deactivatePool(); err != nil { + utils.Debugf("Shutdown deactivate pool , error: %s\n", err) + } + + return nil +} + +func (devices *DeviceSet) MountDevice(hash, path string) error { + devices.Lock() + defer devices.Unlock() + + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("Unknown device %s", hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + if info.mountCount > 0 { + if path != info.mountPath { + return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) + } + + if info.floating { + // Steal floating ref + info.floating = false + } else { + info.mountCount++ + } + return nil + } + + if err := devices.activateDeviceIfNeeded(hash); err != nil { + return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) + } + + var flags uintptr = sysMsMgcVal + + err := sysMount(info.DevName(), path, "ext4", flags, "discard") + if err != nil && err == sysEInval { + err = sysMount(info.DevName(), path, "ext4", flags, "") + } + if err != nil { + return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) + } + + info.mountCount = 1 + info.mountPath = path + info.floating = false + + return devices.setInitialized(hash) +} + +func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { + utils.Debugf("[devmapper] UnmountDevice(hash=%s, mode=%d)", hash, mode) + defer utils.Debugf("[devmapper] UnmountDevice END") + devices.Lock() + defer devices.Unlock() + + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("UnmountDevice: no such device %s\n", hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + if mode == UnmountFloat { + if info.floating { + return fmt.Errorf("UnmountDevice: can't float floating reference %s\n", hash) + } + + // Leave this reference floating + info.floating = true + return nil + } + + if mode == UnmountSink { + if !info.floating { + // Someone already sunk this + return nil + } + // Otherwise, treat this as a regular unmount + } + + if info.mountCount == 0 { + return fmt.Errorf("UnmountDevice: device not-mounted id %s\n", hash) + } + + info.mountCount-- + if info.mountCount > 0 { + return nil + } + + utils.Debugf("[devmapper] Unmount(%s)", info.mountPath) + if err := sysUnmount(info.mountPath, 0); err != nil { + utils.Debugf("\n--->Err: %s\n", err) + return err + } + utils.Debugf("[devmapper] Unmount done") + // Wait for the unmount to be effective, + // by watching the value of Info.OpenCount for the device + if err := devices.waitClose(hash); err != nil { + return err + } + + devices.deactivateDevice(hash) + + info.mountPath = "" + + return nil +} + +func (devices *DeviceSet) HasDevice(hash string) bool { + devices.Lock() + defer devices.Unlock() + + return devices.Devices[hash] != nil +} + +func (devices *DeviceSet) HasInitializedDevice(hash string) bool { + devices.Lock() + defer devices.Unlock() + + info := devices.Devices[hash] + return info != nil && info.Initialized +} + +func (devices *DeviceSet) HasActivatedDevice(hash string) bool { + devices.Lock() + defer devices.Unlock() + + info := devices.Devices[hash] + if info == nil { + return false + } + + info.lock.Lock() + defer info.lock.Unlock() + + devinfo, _ := getInfo(info.Name()) + return devinfo != nil && devinfo.Exists != 0 +} + +func (devices *DeviceSet) setInitialized(hash string) error { + info := devices.Devices[hash] + if info == nil { + return fmt.Errorf("Unknown device %s", hash) + } + + info.Initialized = true + if err := devices.saveMetadata(); err != nil { + info.Initialized = false + utils.Debugf("\n--->Err: %s\n", err) + return err + } + + return nil +} + +func (devices *DeviceSet) List() []string { + devices.Lock() + defer devices.Unlock() + + ids := make([]string, len(devices.Devices)) + i := 0 + for k := range devices.Devices { + ids[i] = k + i++ + } + return ids +} + +func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { + var params string + _, sizeInSectors, _, params, err = getStatus(devName) + if err != nil { + return + } + if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { + return + } + return +} + +func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { + devices.Lock() + defer devices.Unlock() + + info := devices.Devices[hash] + if info == nil { + return nil, fmt.Errorf("No device %s", hash) + } + + info.lock.Lock() + defer info.lock.Unlock() + + status := &DevStatus{ + DeviceId: info.DeviceId, + Size: info.Size, + TransactionId: info.TransactionId, + } + + if err := devices.activateDeviceIfNeeded(hash); err != nil { + return nil, fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) + } + + if sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()); err != nil { + return nil, err + } else { + status.SizeInSectors = sizeInSectors + status.MappedSectors = mappedSectors + status.HighestMappedSector = highestMappedSector + } + + return status, nil +} + +func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionId, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { + var params string + if _, totalSizeInSectors, _, params, err = getStatus(devices.getPoolName()); err == nil { + _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionId, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) + } + return +} + +func (devices *DeviceSet) Status() *Status { + devices.Lock() + defer devices.Unlock() + + status := &Status{} + + status.PoolName = devices.getPoolName() + status.DataLoopback = path.Join(devices.loopbackDir(), "data") + status.MetadataLoopback = path.Join(devices.loopbackDir(), "metadata") + + totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() + if err == nil { + // Convert from blocks to bytes + blockSizeInSectors := totalSizeInSectors / dataTotal + + status.Data.Used = dataUsed * blockSizeInSectors * 512 + status.Data.Total = dataTotal * blockSizeInSectors * 512 + + // metadata blocks are always 4k + status.Metadata.Used = metadataUsed * 4096 + status.Metadata.Total = metadataTotal * 4096 + + status.SectorSize = blockSizeInSectors * 512 + } + + return status +} + +func NewDeviceSet(root string, doInit bool) (*DeviceSet, error) { + SetDevDir("/dev") + + devices := &DeviceSet{ + root: root, + MetaData: MetaData{Devices: make(map[string]*DevInfo)}, + } + + if err := devices.initDevmapper(doInit); err != nil { + return nil, err + } + + return devices, nil +} diff --git a/runtime/graphdriver/devmapper/devmapper.go b/runtime/graphdriver/devmapper/devmapper.go new file mode 100644 index 0000000000..7317118dcf --- /dev/null +++ b/runtime/graphdriver/devmapper/devmapper.go @@ -0,0 +1,595 @@ +// +build linux,amd64 + +package devmapper + +import ( + "errors" + "fmt" + "github.com/dotcloud/docker/utils" + "runtime" + "syscall" +) + +type DevmapperLogger interface { + log(level int, file string, line int, dmError int, message string) +} + +const ( + DeviceCreate TaskType = iota + DeviceReload + DeviceRemove + DeviceRemoveAll + DeviceSuspend + DeviceResume + DeviceInfo + DeviceDeps + DeviceRename + DeviceVersion + DeviceStatus + DeviceTable + DeviceWaitevent + DeviceList + DeviceClear + DeviceMknodes + DeviceListVersions + DeviceTargetMsg + DeviceSetGeometry +) + +const ( + AddNodeOnResume AddNodeType = iota + AddNodeOnCreate +) + +var ( + ErrTaskRun = errors.New("dm_task_run failed") + ErrTaskSetName = errors.New("dm_task_set_name failed") + ErrTaskSetMessage = errors.New("dm_task_set_message failed") + ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") + ErrTaskSetRo = errors.New("dm_task_set_ro failed") + ErrTaskAddTarget = errors.New("dm_task_add_target failed") + ErrTaskSetSector = errors.New("dm_task_set_sector failed") + ErrTaskGetInfo = errors.New("dm_task_get_info failed") + ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") + ErrNilCookie = errors.New("cookie ptr can't be nil") + ErrAttachLoopbackDevice = errors.New("loopback mounting failed") + ErrGetBlockSize = errors.New("Can't get block size") + ErrUdevWait = errors.New("wait on udev cookie failed") + ErrSetDevDir = errors.New("dm_set_dev_dir failed") + ErrGetLibraryVersion = errors.New("dm_get_library_version failed") + ErrCreateRemoveTask = errors.New("Can't create task of type DeviceRemove") + ErrRunRemoveDevice = errors.New("running removeDevice failed") + ErrInvalidAddNode = errors.New("Invalide AddNoce type") + ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") + ErrLoopbackSetCapacity = errors.New("Unable set loopback capacity") +) + +type ( + Task struct { + unmanaged *CDmTask + } + Info struct { + Exists int + Suspended int + LiveTable int + InactiveTable int + OpenCount int32 + EventNr uint32 + Major uint32 + Minor uint32 + ReadOnly int + TargetCount int32 + } + TaskType int + AddNodeType int +) + +func (t *Task) destroy() { + if t != nil { + DmTaskDestroy(t.unmanaged) + runtime.SetFinalizer(t, nil) + } +} + +func TaskCreate(tasktype TaskType) *Task { + Ctask := DmTaskCreate(int(tasktype)) + if Ctask == nil { + return nil + } + task := &Task{unmanaged: Ctask} + runtime.SetFinalizer(task, (*Task).destroy) + return task +} + +func (t *Task) Run() error { + if res := DmTaskRun(t.unmanaged); res != 1 { + return ErrTaskRun + } + return nil +} + +func (t *Task) SetName(name string) error { + if res := DmTaskSetName(t.unmanaged, name); res != 1 { + return ErrTaskSetName + } + return nil +} + +func (t *Task) SetMessage(message string) error { + if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { + return ErrTaskSetMessage + } + return nil +} + +func (t *Task) SetSector(sector uint64) error { + if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { + return ErrTaskSetSector + } + return nil +} + +func (t *Task) SetCookie(cookie *uint, flags uint16) error { + if cookie == nil { + return ErrNilCookie + } + if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { + return ErrTaskSetCookie + } + return nil +} + +func (t *Task) SetAddNode(addNode AddNodeType) error { + if addNode != AddNodeOnResume && addNode != AddNodeOnCreate { + return ErrInvalidAddNode + } + if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { + return ErrTaskSetAddNode + } + return nil +} + +func (t *Task) SetRo() error { + if res := DmTaskSetRo(t.unmanaged); res != 1 { + return ErrTaskSetRo + } + return nil +} + +func (t *Task) AddTarget(start, size uint64, ttype, params string) error { + if res := DmTaskAddTarget(t.unmanaged, start, size, + ttype, params); res != 1 { + return ErrTaskAddTarget + } + return nil +} + +func (t *Task) GetInfo() (*Info, error) { + info := &Info{} + if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { + return nil, ErrTaskGetInfo + } + return info, nil +} + +func (t *Task) GetNextTarget(next uintptr) (nextPtr uintptr, start uint64, + length uint64, targetType string, params string) { + + return DmGetNextTarget(t.unmanaged, next, &start, &length, + &targetType, ¶ms), + start, length, targetType, params +} + +func getLoopbackBackingFile(file *osFile) (uint64, uint64, error) { + loopInfo, err := ioctlLoopGetStatus64(file.Fd()) + if err != nil { + utils.Errorf("Error get loopback backing file: %s\n", err) + return 0, 0, ErrGetLoopbackBackingFile + } + return loopInfo.loDevice, loopInfo.loInode, nil +} + +func LoopbackSetCapacity(file *osFile) error { + if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { + utils.Errorf("Error loopbackSetCapacity: %s", err) + return ErrLoopbackSetCapacity + } + return nil +} + +func FindLoopDeviceFor(file *osFile) *osFile { + stat, err := file.Stat() + if err != nil { + return nil + } + targetInode := stat.Sys().(*sysStatT).Ino + targetDevice := stat.Sys().(*sysStatT).Dev + + for i := 0; true; i++ { + path := fmt.Sprintf("/dev/loop%d", i) + + file, err := osOpenFile(path, osORdWr, 0) + if err != nil { + if osIsNotExist(err) { + return nil + } + + // Ignore all errors until the first not-exist + // we want to continue looking for the file + continue + } + + dev, inode, err := getLoopbackBackingFile(file) + if err == nil && dev == targetDevice && inode == targetInode { + return file + } + file.Close() + } + + return nil +} + +func UdevWait(cookie uint) error { + if res := DmUdevWait(cookie); res != 1 { + utils.Debugf("Failed to wait on udev cookie %d", cookie) + return ErrUdevWait + } + return nil +} + +func LogInitVerbose(level int) { + DmLogInitVerbose(level) +} + +var dmLogger DevmapperLogger = nil + +func logInit(logger DevmapperLogger) { + dmLogger = logger + LogWithErrnoInit() +} + +func SetDevDir(dir string) error { + if res := DmSetDevDir(dir); res != 1 { + utils.Debugf("Error dm_set_dev_dir") + return ErrSetDevDir + } + return nil +} + +func GetLibraryVersion() (string, error) { + var version string + if res := DmGetLibraryVersion(&version); res != 1 { + return "", ErrGetLibraryVersion + } + return version, nil +} + +// Useful helper for cleanup +func RemoveDevice(name string) error { + task := TaskCreate(DeviceRemove) + if task == nil { + return ErrCreateRemoveTask + } + if err := task.SetName(name); err != nil { + utils.Debugf("Can't set task name %s", name) + return err + } + if err := task.Run(); err != nil { + return ErrRunRemoveDevice + } + return nil +} + +func GetBlockDeviceSize(file *osFile) (uint64, error) { + size, err := ioctlBlkGetSize64(file.Fd()) + if err != nil { + utils.Errorf("Error getblockdevicesize: %s", err) + return 0, ErrGetBlockSize + } + return uint64(size), nil +} + +func BlockDeviceDiscard(path string) error { + file, err := osOpenFile(path, osORdWr, 0) + if err != nil { + return err + } + defer file.Close() + + size, err := GetBlockDeviceSize(file) + if err != nil { + return err + } + + if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { + return err + } + + // Without this sometimes the remove of the device that happens after + // discard fails with EBUSY. + syscall.Sync() + + return nil +} + +// This is the programmatic example of "dmsetup create" +func createPool(poolName string, dataFile, metadataFile *osFile) error { + task, err := createTask(DeviceCreate, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("Can't get data size") + } + + params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768 1 skip_block_zeroing" + if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("Can't add target") + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie") + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate (createPool)") + } + + UdevWait(cookie) + + return nil +} + +func reloadPool(poolName string, dataFile, metadataFile *osFile) error { + task, err := createTask(DeviceReload, poolName) + if task == nil { + return err + } + + size, err := GetBlockDeviceSize(dataFile) + if err != nil { + return fmt.Errorf("Can't get data size") + } + + params := metadataFile.Name() + " " + dataFile.Name() + " 128 32768" + if err := task.AddTarget(0, size/512, "thin-pool", params); err != nil { + return fmt.Errorf("Can't add target") + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate") + } + + return nil +} + +func createTask(t TaskType, name string) (*Task, error) { + task := TaskCreate(t) + if task == nil { + return nil, fmt.Errorf("Can't create task of type %d", int(t)) + } + if err := task.SetName(name); err != nil { + return nil, fmt.Errorf("Can't set task name %s", name) + } + return task, nil +} + +func getInfo(name string) (*Info, error) { + task, err := createTask(DeviceInfo, name) + if task == nil { + return nil, err + } + if err := task.Run(); err != nil { + return nil, err + } + return task.GetInfo() +} + +func getStatus(name string) (uint64, uint64, string, string, error) { + task, err := createTask(DeviceStatus, name) + if task == nil { + utils.Debugf("getStatus: Error createTask: %s", err) + return 0, 0, "", "", err + } + if err := task.Run(); err != nil { + utils.Debugf("getStatus: Error Run: %s", err) + return 0, 0, "", "", err + } + + devinfo, err := task.GetInfo() + if err != nil { + utils.Debugf("getStatus: Error GetInfo: %s", err) + return 0, 0, "", "", err + } + if devinfo.Exists == 0 { + utils.Debugf("getStatus: Non existing device %s", name) + return 0, 0, "", "", fmt.Errorf("Non existing device %s", name) + } + + _, start, length, targetType, params := task.GetNextTarget(0) + return start, length, targetType, params, nil +} + +func setTransactionId(poolName string, oldId uint64, newId uint64) error { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector") + } + + if err := task.SetMessage(fmt.Sprintf("set_transaction_id %d %d", oldId, newId)); err != nil { + return fmt.Errorf("Can't set message") + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running setTransactionId") + } + return nil +} + +func suspendDevice(name string) error { + task, err := createTask(DeviceSuspend, name) + if task == nil { + return err + } + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceSuspend: %s", err) + } + return nil +} + +func resumeDevice(name string) error { + task, err := createTask(DeviceResume, name) + if task == nil { + return err + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie") + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceResume") + } + + UdevWait(cookie) + + return nil +} + +func createDevice(poolName string, deviceId int) error { + utils.Debugf("[devmapper] createDevice(poolName=%v, deviceId=%v)", poolName, deviceId) + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector") + } + + if err := task.SetMessage(fmt.Sprintf("create_thin %d", deviceId)); err != nil { + return fmt.Errorf("Can't set message") + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running createDevice") + } + return nil +} + +func deleteDevice(poolName string, deviceId int) error { + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + return err + } + + if err := task.SetSector(0); err != nil { + return fmt.Errorf("Can't set sector") + } + + if err := task.SetMessage(fmt.Sprintf("delete %d", deviceId)); err != nil { + return fmt.Errorf("Can't set message") + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running deleteDevice") + } + return nil +} + +func removeDevice(name string) error { + utils.Debugf("[devmapper] removeDevice START") + defer utils.Debugf("[devmapper] removeDevice END") + task, err := createTask(DeviceRemove, name) + if task == nil { + return err + } + if err = task.Run(); err != nil { + return fmt.Errorf("Error running removeDevice") + } + return nil +} + +func activateDevice(poolName string, name string, deviceId int, size uint64) error { + task, err := createTask(DeviceCreate, name) + if task == nil { + return err + } + + params := fmt.Sprintf("%s %d", poolName, deviceId) + if err := task.AddTarget(0, size/512, "thin", params); err != nil { + return fmt.Errorf("Can't add target") + } + if err := task.SetAddNode(AddNodeOnCreate); err != nil { + return fmt.Errorf("Can't add node") + } + + var cookie uint = 0 + if err := task.SetCookie(&cookie, 0); err != nil { + return fmt.Errorf("Can't set cookie") + } + + if err := task.Run(); err != nil { + return fmt.Errorf("Error running DeviceCreate (activateDevice)") + } + + UdevWait(cookie) + + return nil +} + +func (devices *DeviceSet) createSnapDevice(poolName string, deviceId int, baseName string, baseDeviceId int) error { + devinfo, _ := getInfo(baseName) + doSuspend := devinfo != nil && devinfo.Exists != 0 + + if doSuspend { + if err := suspendDevice(baseName); err != nil { + return err + } + } + + task, err := createTask(DeviceTargetMsg, poolName) + if task == nil { + if doSuspend { + resumeDevice(baseName) + } + return err + } + + if err := task.SetSector(0); err != nil { + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Can't set sector") + } + + if err := task.SetMessage(fmt.Sprintf("create_snap %d %d", deviceId, baseDeviceId)); err != nil { + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Can't set message") + } + + if err := task.Run(); err != nil { + if doSuspend { + resumeDevice(baseName) + } + return fmt.Errorf("Error running DeviceCreate (createSnapDevice)") + } + + if doSuspend { + if err := resumeDevice(baseName); err != nil { + return err + } + } + + return nil +} diff --git a/runtime/graphdriver/devmapper/devmapper_doc.go b/runtime/graphdriver/devmapper/devmapper_doc.go new file mode 100644 index 0000000000..c1c3e3891b --- /dev/null +++ b/runtime/graphdriver/devmapper/devmapper_doc.go @@ -0,0 +1,106 @@ +package devmapper + +// Definition of struct dm_task and sub structures (from lvm2) +// +// struct dm_ioctl { +// /* +// * The version number is made up of three parts: +// * major - no backward or forward compatibility, +// * minor - only backwards compatible, +// * patch - both backwards and forwards compatible. +// * +// * All clients of the ioctl interface should fill in the +// * version number of the interface that they were +// * compiled with. +// * +// * All recognised ioctl commands (ie. those that don't +// * return -ENOTTY) fill out this field, even if the +// * command failed. +// */ +// uint32_t version[3]; /* in/out */ +// uint32_t data_size; /* total size of data passed in +// * including this struct */ + +// uint32_t data_start; /* offset to start of data +// * relative to start of this struct */ + +// uint32_t target_count; /* in/out */ +// int32_t open_count; /* out */ +// uint32_t flags; /* in/out */ + +// /* +// * event_nr holds either the event number (input and output) or the +// * udev cookie value (input only). +// * The DM_DEV_WAIT ioctl takes an event number as input. +// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls +// * use the field as a cookie to return in the DM_COOKIE +// * variable with the uevents they issue. +// * For output, the ioctls return the event number, not the cookie. +// */ +// uint32_t event_nr; /* in/out */ +// uint32_t padding; + +// uint64_t dev; /* in/out */ + +// char name[DM_NAME_LEN]; /* device name */ +// char uuid[DM_UUID_LEN]; /* unique identifier for +// * the block device */ +// char data[7]; /* padding or data */ +// }; + +// struct target { +// uint64_t start; +// uint64_t length; +// char *type; +// char *params; + +// struct target *next; +// }; + +// typedef enum { +// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ +// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ +// } dm_add_node_t; + +// struct dm_task { +// int type; +// char *dev_name; +// char *mangled_dev_name; + +// struct target *head, *tail; + +// int read_only; +// uint32_t event_nr; +// int major; +// int minor; +// int allow_default_major_fallback; +// uid_t uid; +// gid_t gid; +// mode_t mode; +// uint32_t read_ahead; +// uint32_t read_ahead_flags; +// union { +// struct dm_ioctl *v4; +// } dmi; +// char *newname; +// char *message; +// char *geometry; +// uint64_t sector; +// int no_flush; +// int no_open_count; +// int skip_lockfs; +// int query_inactive_table; +// int suppress_identical_reload; +// dm_add_node_t add_node; +// uint64_t existing_table_size; +// int cookie_set; +// int new_uuid; +// int secure_data; +// int retry_remove; +// int enable_checks; +// int expected_errno; + +// char *uuid; +// char *mangled_uuid; +// }; +// diff --git a/runtime/graphdriver/devmapper/devmapper_log.go b/runtime/graphdriver/devmapper/devmapper_log.go new file mode 100644 index 0000000000..18dde7cca5 --- /dev/null +++ b/runtime/graphdriver/devmapper/devmapper_log.go @@ -0,0 +1,15 @@ +// +build linux,amd64 + +package devmapper + +import "C" + +// Due to the way cgo works this has to be in a separate file, as devmapper.go has +// definitions in the cgo block, which is incompatible with using "//export" + +//export DevmapperLogCallback +func DevmapperLogCallback(level C.int, file *C.char, line C.int, dm_errno_or_class C.int, message *C.char) { + if dmLogger != nil { + dmLogger.log(int(level), C.GoString(file), int(line), int(dm_errno_or_class), C.GoString(message)) + } +} diff --git a/runtime/graphdriver/devmapper/devmapper_test.go b/runtime/graphdriver/devmapper/devmapper_test.go new file mode 100644 index 0000000000..3ffa163ceb --- /dev/null +++ b/runtime/graphdriver/devmapper/devmapper_test.go @@ -0,0 +1,287 @@ +// +build linux,amd64 + +package devmapper + +import ( + "testing" +) + +func TestTaskCreate(t *testing.T) { + t.Skip("FIXME: not a unit test") + // Test success + taskCreate(t, DeviceInfo) + + // Test Failure + DmTaskCreate = dmTaskCreateFail + defer func() { DmTaskCreate = dmTaskCreateFct }() + if task := TaskCreate(-1); task != nil { + t.Fatalf("An error should have occured while creating an invalid task.") + } +} + +func TestTaskRun(t *testing.T) { + t.Skip("FIXME: not a unit test") + task := taskCreate(t, DeviceInfo) + + // Test success + // Perform the RUN + if err := task.Run(); err != nil { + t.Fatal(err) + } + // Make sure we don't have error with GetInfo + if _, err := task.GetInfo(); err != nil { + t.Fatal(err) + } + + // Test failure + DmTaskRun = dmTaskRunFail + defer func() { DmTaskRun = dmTaskRunFct }() + + task = taskCreate(t, DeviceInfo) + // Perform the RUN + if err := task.Run(); err != ErrTaskRun { + t.Fatalf("An error should have occured while running task.") + } + // Make sure GetInfo also fails + if _, err := task.GetInfo(); err != ErrTaskGetInfo { + t.Fatalf("GetInfo should fail if task.Run() failed.") + } +} + +func TestTaskSetName(t *testing.T) { + t.Skip("FIXME: not a unit test") + task := taskCreate(t, DeviceInfo) + + // Test success + if err := task.SetName("test"); err != nil { + t.Fatal(err) + } + + // Test failure + DmTaskSetName = dmTaskSetNameFail + defer func() { DmTaskSetName = dmTaskSetNameFct }() + + if err := task.SetName("test"); err != ErrTaskSetName { + t.Fatalf("An error should have occured while runnign SetName.") + } +} + +func TestTaskSetMessage(t *testing.T) { + t.Skip("FIXME: not a unit test") + task := taskCreate(t, DeviceInfo) + + // Test success + if err := task.SetMessage("test"); err != nil { + t.Fatal(err) + } + + // Test failure + DmTaskSetMessage = dmTaskSetMessageFail + defer func() { DmTaskSetMessage = dmTaskSetMessageFct }() + + if err := task.SetMessage("test"); err != ErrTaskSetMessage { + t.Fatalf("An error should have occured while runnign SetMessage.") + } +} + +func TestTaskSetSector(t *testing.T) { + t.Skip("FIXME: not a unit test") + task := taskCreate(t, DeviceInfo) + + // Test success + if err := task.SetSector(128); err != nil { + t.Fatal(err) + } + + DmTaskSetSector = dmTaskSetSectorFail + defer func() { DmTaskSetSector = dmTaskSetSectorFct }() + + // Test failure + if err := task.SetSector(0); err != ErrTaskSetSector { + t.Fatalf("An error should have occured while running SetSector.") + } +} + +func TestTaskSetCookie(t *testing.T) { + t.Skip("FIXME: not a unit test") + var ( + cookie uint = 0 + task = taskCreate(t, DeviceInfo) + ) + + // Test success + if err := task.SetCookie(&cookie, 0); err != nil { + t.Fatal(err) + } + + // Test failure + if err := task.SetCookie(nil, 0); err != ErrNilCookie { + t.Fatalf("An error should have occured while running SetCookie with nil cookie.") + } + + DmTaskSetCookie = dmTaskSetCookieFail + defer func() { DmTaskSetCookie = dmTaskSetCookieFct }() + + if err := task.SetCookie(&cookie, 0); err != ErrTaskSetCookie { + t.Fatalf("An error should have occured while running SetCookie.") + } +} + +func TestTaskSetAddNode(t *testing.T) { + t.Skip("FIXME: not a unit test") + task := taskCreate(t, DeviceInfo) + + // Test success + if err := task.SetAddNode(0); err != nil { + t.Fatal(err) + } + + // Test failure + if err := task.SetAddNode(-1); err != ErrInvalidAddNode { + t.Fatalf("An error should have occured running SetAddNode with wrong node.") + } + + DmTaskSetAddNode = dmTaskSetAddNodeFail + defer func() { DmTaskSetAddNode = dmTaskSetAddNodeFct }() + + if err := task.SetAddNode(0); err != ErrTaskSetAddNode { + t.Fatalf("An error should have occured running SetAddNode.") + } +} + +func TestTaskSetRo(t *testing.T) { + t.Skip("FIXME: not a unit test") + task := taskCreate(t, DeviceInfo) + + // Test success + if err := task.SetRo(); err != nil { + t.Fatal(err) + } + + // Test failure + DmTaskSetRo = dmTaskSetRoFail + defer func() { DmTaskSetRo = dmTaskSetRoFct }() + + if err := task.SetRo(); err != ErrTaskSetRo { + t.Fatalf("An error should have occured running SetRo.") + } +} + +func TestTaskAddTarget(t *testing.T) { + t.Skip("FIXME: not a unit test") + task := taskCreate(t, DeviceInfo) + + // Test success + if err := task.AddTarget(0, 128, "thinp", ""); err != nil { + t.Fatal(err) + } + + // Test failure + DmTaskAddTarget = dmTaskAddTargetFail + defer func() { DmTaskAddTarget = dmTaskAddTargetFct }() + + if err := task.AddTarget(0, 128, "thinp", ""); err != ErrTaskAddTarget { + t.Fatalf("An error should have occured running AddTarget.") + } +} + +// func TestTaskGetInfo(t *testing.T) { +// task := taskCreate(t, DeviceInfo) + +// // Test success +// if _, err := task.GetInfo(); err != nil { +// t.Fatal(err) +// } + +// // Test failure +// DmTaskGetInfo = dmTaskGetInfoFail +// defer func() { DmTaskGetInfo = dmTaskGetInfoFct }() + +// if _, err := task.GetInfo(); err != ErrTaskGetInfo { +// t.Fatalf("An error should have occured running GetInfo.") +// } +// } + +// func TestTaskGetNextTarget(t *testing.T) { +// task := taskCreate(t, DeviceInfo) + +// if next, _, _, _, _ := task.GetNextTarget(0); next == 0 { +// t.Fatalf("The next target should not be 0.") +// } +// } + +/// Utils +func taskCreate(t *testing.T, taskType TaskType) *Task { + task := TaskCreate(taskType) + if task == nil { + t.Fatalf("Error creating task") + } + return task +} + +/// Failure function replacement +func dmTaskCreateFail(t int) *CDmTask { + return nil +} + +func dmTaskRunFail(task *CDmTask) int { + return -1 +} + +func dmTaskSetNameFail(task *CDmTask, name string) int { + return -1 +} + +func dmTaskSetMessageFail(task *CDmTask, message string) int { + return -1 +} + +func dmTaskSetSectorFail(task *CDmTask, sector uint64) int { + return -1 +} + +func dmTaskSetCookieFail(task *CDmTask, cookie *uint, flags uint16) int { + return -1 +} + +func dmTaskSetAddNodeFail(task *CDmTask, addNode AddNodeType) int { + return -1 +} + +func dmTaskSetRoFail(task *CDmTask) int { + return -1 +} + +func dmTaskAddTargetFail(task *CDmTask, + start, size uint64, ttype, params string) int { + return -1 +} + +func dmTaskGetInfoFail(task *CDmTask, info *Info) int { + return -1 +} + +func dmGetNextTargetFail(task *CDmTask, next uintptr, start, length *uint64, + target, params *string) uintptr { + return 0 +} + +func dmAttachLoopDeviceFail(filename string, fd *int) string { + return "" +} + +func sysGetBlockSizeFail(fd uintptr, size *uint64) sysErrno { + return 1 +} + +func dmUdevWaitFail(cookie uint) int { + return -1 +} + +func dmSetDevDirFail(dir string) int { + return -1 +} + +func dmGetLibraryVersionFail(version *string) int { + return -1 +} diff --git a/runtime/graphdriver/devmapper/devmapper_wrapper.go b/runtime/graphdriver/devmapper/devmapper_wrapper.go new file mode 100644 index 0000000000..bf558affc8 --- /dev/null +++ b/runtime/graphdriver/devmapper/devmapper_wrapper.go @@ -0,0 +1,229 @@ +// +build linux,amd64 + +package devmapper + +/* +#cgo LDFLAGS: -L. -ldevmapper +#include +#include // FIXME: present only for defines, maybe we can remove it? +#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? + +#ifndef LOOP_CTL_GET_FREE + #define LOOP_CTL_GET_FREE 0x4C82 +#endif + +#ifndef LO_FLAGS_PARTSCAN + #define LO_FLAGS_PARTSCAN 8 +#endif + +// FIXME: Can't we find a way to do the logging in pure Go? +extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); + +static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) +{ + char buffer[256]; + va_list ap; + + va_start(ap, f); + vsnprintf(buffer, 256, f, ap); + va_end(ap); + + DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); +} + +static void log_with_errno_init() +{ + dm_log_with_errno_init(log_cb); +} +*/ +import "C" + +import ( + "unsafe" +) + +type ( + CDmTask C.struct_dm_task + + CLoopInfo64 C.struct_loop_info64 + LoopInfo64 struct { + loDevice uint64 /* ioctl r/o */ + loInode uint64 /* ioctl r/o */ + loRdevice uint64 /* ioctl r/o */ + loOffset uint64 + loSizelimit uint64 /* bytes, 0 == max available */ + loNumber uint32 /* ioctl r/o */ + loEncrypt_type uint32 + loEncrypt_key_size uint32 /* ioctl w/o */ + loFlags uint32 /* ioctl r/o */ + loFileName [LoNameSize]uint8 + loCryptName [LoNameSize]uint8 + loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ + loInit [2]uint64 + } +) + +// IOCTL consts +const ( + BlkGetSize64 = C.BLKGETSIZE64 + BlkDiscard = C.BLKDISCARD + + LoopSetFd = C.LOOP_SET_FD + LoopCtlGetFree = C.LOOP_CTL_GET_FREE + LoopGetStatus64 = C.LOOP_GET_STATUS64 + LoopSetStatus64 = C.LOOP_SET_STATUS64 + LoopClrFd = C.LOOP_CLR_FD + LoopSetCapacity = C.LOOP_SET_CAPACITY +) + +const ( + LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR + LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY + LoFlagsPartScan = C.LO_FLAGS_PARTSCAN + LoKeySize = C.LO_KEY_SIZE + LoNameSize = C.LO_NAME_SIZE +) + +var ( + DmGetLibraryVersion = dmGetLibraryVersionFct + DmGetNextTarget = dmGetNextTargetFct + DmLogInitVerbose = dmLogInitVerboseFct + DmSetDevDir = dmSetDevDirFct + DmTaskAddTarget = dmTaskAddTargetFct + DmTaskCreate = dmTaskCreateFct + DmTaskDestroy = dmTaskDestroyFct + DmTaskGetInfo = dmTaskGetInfoFct + DmTaskRun = dmTaskRunFct + DmTaskSetAddNode = dmTaskSetAddNodeFct + DmTaskSetCookie = dmTaskSetCookieFct + DmTaskSetMessage = dmTaskSetMessageFct + DmTaskSetName = dmTaskSetNameFct + DmTaskSetRo = dmTaskSetRoFct + DmTaskSetSector = dmTaskSetSectorFct + DmUdevWait = dmUdevWaitFct + LogWithErrnoInit = logWithErrnoInitFct +) + +func free(p *C.char) { + C.free(unsafe.Pointer(p)) +} + +func dmTaskDestroyFct(task *CDmTask) { + C.dm_task_destroy((*C.struct_dm_task)(task)) +} + +func dmTaskCreateFct(taskType int) *CDmTask { + return (*CDmTask)(C.dm_task_create(C.int(taskType))) +} + +func dmTaskRunFct(task *CDmTask) int { + ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) + return int(ret) +} + +func dmTaskSetNameFct(task *CDmTask, name string) int { + Cname := C.CString(name) + defer free(Cname) + + return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) +} + +func dmTaskSetMessageFct(task *CDmTask, message string) int { + Cmessage := C.CString(message) + defer free(Cmessage) + + return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) +} + +func dmTaskSetSectorFct(task *CDmTask, sector uint64) int { + return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) +} + +func dmTaskSetCookieFct(task *CDmTask, cookie *uint, flags uint16) int { + cCookie := C.uint32_t(*cookie) + defer func() { + *cookie = uint(cCookie) + }() + return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) +} + +func dmTaskSetAddNodeFct(task *CDmTask, addNode AddNodeType) int { + return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) +} + +func dmTaskSetRoFct(task *CDmTask) int { + return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) +} + +func dmTaskAddTargetFct(task *CDmTask, + start, size uint64, ttype, params string) int { + + Cttype := C.CString(ttype) + defer free(Cttype) + + Cparams := C.CString(params) + defer free(Cparams) + + return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) +} + +func dmTaskGetInfoFct(task *CDmTask, info *Info) int { + Cinfo := C.struct_dm_info{} + defer func() { + info.Exists = int(Cinfo.exists) + info.Suspended = int(Cinfo.suspended) + info.LiveTable = int(Cinfo.live_table) + info.InactiveTable = int(Cinfo.inactive_table) + info.OpenCount = int32(Cinfo.open_count) + info.EventNr = uint32(Cinfo.event_nr) + info.Major = uint32(Cinfo.major) + info.Minor = uint32(Cinfo.minor) + info.ReadOnly = int(Cinfo.read_only) + info.TargetCount = int32(Cinfo.target_count) + }() + return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) +} + +func dmGetNextTargetFct(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { + var ( + Cstart, Clength C.uint64_t + CtargetType, Cparams *C.char + ) + defer func() { + *start = uint64(Cstart) + *length = uint64(Clength) + *target = C.GoString(CtargetType) + *params = C.GoString(Cparams) + }() + + nextp := C.dm_get_next_target((*C.struct_dm_task)(task), unsafe.Pointer(next), &Cstart, &Clength, &CtargetType, &Cparams) + return uintptr(nextp) +} + +func dmUdevWaitFct(cookie uint) int { + return int(C.dm_udev_wait(C.uint32_t(cookie))) +} + +func dmLogInitVerboseFct(level int) { + C.dm_log_init_verbose(C.int(level)) +} + +func logWithErrnoInitFct() { + C.log_with_errno_init() +} + +func dmSetDevDirFct(dir string) int { + Cdir := C.CString(dir) + defer free(Cdir) + + return int(C.dm_set_dev_dir(Cdir)) +} + +func dmGetLibraryVersionFct(version *string) int { + buffer := C.CString(string(make([]byte, 128))) + defer free(buffer) + defer func() { + *version = C.GoString(buffer) + }() + return int(C.dm_get_library_version(buffer, 128)) +} diff --git a/runtime/graphdriver/devmapper/driver.go b/runtime/graphdriver/devmapper/driver.go new file mode 100644 index 0000000000..33c7a0f483 --- /dev/null +++ b/runtime/graphdriver/devmapper/driver.go @@ -0,0 +1,143 @@ +// +build linux,amd64 + +package devmapper + +import ( + "fmt" + "github.com/dotcloud/docker/runtime/graphdriver" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "os" + "path" +) + +func init() { + graphdriver.Register("devicemapper", Init) +} + +// Placeholder interfaces, to be replaced +// at integration. + +// End of placeholder interfaces. + +type Driver struct { + *DeviceSet + home string +} + +var Init = func(home string) (graphdriver.Driver, error) { + deviceSet, err := NewDeviceSet(home, true) + if err != nil { + return nil, err + } + d := &Driver{ + DeviceSet: deviceSet, + home: home, + } + return d, nil +} + +func (d *Driver) String() string { + return "devicemapper" +} + +func (d *Driver) Status() [][2]string { + s := d.DeviceSet.Status() + + status := [][2]string{ + {"Pool Name", s.PoolName}, + {"Data file", s.DataLoopback}, + {"Metadata file", s.MetadataLoopback}, + {"Data Space Used", fmt.Sprintf("%.1f Mb", float64(s.Data.Used)/(1024*1024))}, + {"Data Space Total", fmt.Sprintf("%.1f Mb", float64(s.Data.Total)/(1024*1024))}, + {"Metadata Space Used", fmt.Sprintf("%.1f Mb", float64(s.Metadata.Used)/(1024*1024))}, + {"Metadata Space Total", fmt.Sprintf("%.1f Mb", float64(s.Metadata.Total)/(1024*1024))}, + } + return status +} + +func (d *Driver) Cleanup() error { + return d.DeviceSet.Shutdown() +} + +func (d *Driver) Create(id, parent string) error { + if err := d.DeviceSet.AddDevice(id, parent); err != nil { + return err + } + + mp := path.Join(d.home, "mnt", id) + if err := d.mount(id, mp); err != nil { + return err + } + + if err := osMkdirAll(path.Join(mp, "rootfs"), 0755); err != nil && !osIsExist(err) { + return err + } + + // Create an "id" file with the container/image id in it to help reconscruct this in case + // of later problems + if err := ioutil.WriteFile(path.Join(mp, "id"), []byte(id), 0600); err != nil { + return err + } + + // We float this reference so that the next Get call can + // steal it, so we don't have to unmount + if err := d.DeviceSet.UnmountDevice(id, UnmountFloat); err != nil { + return err + } + + return nil +} + +func (d *Driver) Remove(id string) error { + if !d.DeviceSet.HasDevice(id) { + // Consider removing a non-existing device a no-op + // This is useful to be able to progress on container removal + // if the underlying device has gone away due to earlier errors + return nil + } + + // Sink the float from create in case no Get() call was made + if err := d.DeviceSet.UnmountDevice(id, UnmountSink); err != nil { + return err + } + // This assumes the device has been properly Get/Put:ed and thus is unmounted + if err := d.DeviceSet.DeleteDevice(id); err != nil { + return err + } + + mp := path.Join(d.home, "mnt", id) + if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +func (d *Driver) Get(id string) (string, error) { + mp := path.Join(d.home, "mnt", id) + if err := d.mount(id, mp); err != nil { + return "", err + } + + return path.Join(mp, "rootfs"), nil +} + +func (d *Driver) Put(id string) { + if err := d.DeviceSet.UnmountDevice(id, UnmountRegular); err != nil { + utils.Errorf("Warning: error unmounting device %s: %s\n", id, err) + } +} + +func (d *Driver) mount(id, mountPoint string) error { + // Create the target directories if they don't exist + if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) { + return err + } + // Mount the device + return d.DeviceSet.MountDevice(id, mountPoint) +} + +func (d *Driver) Exists(id string) bool { + return d.Devices[id] != nil +} diff --git a/runtime/graphdriver/devmapper/driver_test.go b/runtime/graphdriver/devmapper/driver_test.go new file mode 100644 index 0000000000..9af71a00b3 --- /dev/null +++ b/runtime/graphdriver/devmapper/driver_test.go @@ -0,0 +1,886 @@ +// +build linux,amd64 + +package devmapper + +import ( + "fmt" + "github.com/dotcloud/docker/runtime/graphdriver" + "io/ioutil" + "path" + "runtime" + "strings" + "syscall" + "testing" +) + +func init() { + // Reduce the size the the base fs and loopback for the tests + DefaultDataLoopbackSize = 300 * 1024 * 1024 + DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 + DefaultBaseFsSize = 300 * 1024 * 1024 +} + +// denyAllDevmapper mocks all calls to libdevmapper in the unit tests, and denies them by default +func denyAllDevmapper() { + // Hijack all calls to libdevmapper with default panics. + // Authorized calls are selectively hijacked in each tests. + DmTaskCreate = func(t int) *CDmTask { + panic("DmTaskCreate: this method should not be called here") + } + DmTaskRun = func(task *CDmTask) int { + panic("DmTaskRun: this method should not be called here") + } + DmTaskSetName = func(task *CDmTask, name string) int { + panic("DmTaskSetName: this method should not be called here") + } + DmTaskSetMessage = func(task *CDmTask, message string) int { + panic("DmTaskSetMessage: this method should not be called here") + } + DmTaskSetSector = func(task *CDmTask, sector uint64) int { + panic("DmTaskSetSector: this method should not be called here") + } + DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { + panic("DmTaskSetCookie: this method should not be called here") + } + DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { + panic("DmTaskSetAddNode: this method should not be called here") + } + DmTaskSetRo = func(task *CDmTask) int { + panic("DmTaskSetRo: this method should not be called here") + } + DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { + panic("DmTaskAddTarget: this method should not be called here") + } + DmTaskGetInfo = func(task *CDmTask, info *Info) int { + panic("DmTaskGetInfo: this method should not be called here") + } + DmGetNextTarget = func(task *CDmTask, next uintptr, start, length *uint64, target, params *string) uintptr { + panic("DmGetNextTarget: this method should not be called here") + } + DmUdevWait = func(cookie uint) int { + panic("DmUdevWait: this method should not be called here") + } + DmSetDevDir = func(dir string) int { + panic("DmSetDevDir: this method should not be called here") + } + DmGetLibraryVersion = func(version *string) int { + panic("DmGetLibraryVersion: this method should not be called here") + } + DmLogInitVerbose = func(level int) { + panic("DmLogInitVerbose: this method should not be called here") + } + DmTaskDestroy = func(task *CDmTask) { + panic("DmTaskDestroy: this method should not be called here") + } + LogWithErrnoInit = func() { + panic("LogWithErrnoInit: this method should not be called here") + } +} + +func denyAllSyscall() { + sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { + panic("sysMount: this method should not be called here") + } + sysUnmount = func(target string, flags int) (err error) { + panic("sysUnmount: this method should not be called here") + } + sysCloseOnExec = func(fd int) { + panic("sysCloseOnExec: this method should not be called here") + } + sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + panic("sysSyscall: this method should not be called here") + } + // Not a syscall, but forbidding it here anyway + Mounted = func(mnt string) (bool, error) { + panic("devmapper.Mounted: this method should not be called here") + } + // osOpenFile = os.OpenFile + // osNewFile = os.NewFile + // osCreate = os.Create + // osStat = os.Stat + // osIsNotExist = os.IsNotExist + // osIsExist = os.IsExist + // osMkdirAll = os.MkdirAll + // osRemoveAll = os.RemoveAll + // osRename = os.Rename + // osReadlink = os.Readlink + + // execRun = func(name string, args ...string) error { + // return exec.Command(name, args...).Run() + // } +} + +func mkTestDirectory(t *testing.T) string { + dir, err := ioutil.TempDir("", "docker-test-devmapper-") + if err != nil { + t.Fatal(err) + } + return dir +} + +func newDriver(t *testing.T) *Driver { + home := mkTestDirectory(t) + d, err := Init(home) + if err != nil { + t.Fatal(err) + } + return d.(*Driver) +} + +func cleanup(d *Driver) { + d.Cleanup() + osRemoveAll(d.home) +} + +type Set map[string]bool + +func (r Set) Assert(t *testing.T, names ...string) { + for _, key := range names { + required := true + if strings.HasPrefix(key, "?") { + key = key[1:] + required = false + } + if _, exists := r[key]; !exists && required { + t.Fatalf("Key not set: %s", key) + } + delete(r, key) + } + if len(r) != 0 { + t.Fatalf("Unexpected keys: %v", r) + } +} + +func TestInit(t *testing.T) { + var ( + calls = make(Set) + taskMessages = make(Set) + taskTypes = make(Set) + home = mkTestDirectory(t) + ) + defer osRemoveAll(home) + + func() { + denyAllDevmapper() + DmSetDevDir = func(dir string) int { + calls["DmSetDevDir"] = true + expectedDir := "/dev" + if dir != expectedDir { + t.Fatalf("Wrong libdevmapper call\nExpected: DmSetDevDir(%v)\nReceived: DmSetDevDir(%v)\n", expectedDir, dir) + } + return 0 + } + LogWithErrnoInit = func() { + calls["DmLogWithErrnoInit"] = true + } + var task1 CDmTask + DmTaskCreate = func(taskType int) *CDmTask { + calls["DmTaskCreate"] = true + taskTypes[fmt.Sprintf("%d", taskType)] = true + return &task1 + } + DmTaskSetName = func(task *CDmTask, name string) int { + calls["DmTaskSetName"] = true + expectedTask := &task1 + if task != expectedTask { + t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", expectedTask, task) + } + // FIXME: use Set.AssertRegexp() + if !strings.HasPrefix(name, "docker-") && !strings.HasPrefix(name, "/dev/mapper/docker-") || + !strings.HasSuffix(name, "-pool") && !strings.HasSuffix(name, "-base") { + t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetName(%v)\nReceived: DmTaskSetName(%v)\n", "docker-...-pool", name) + } + return 1 + } + DmTaskRun = func(task *CDmTask) int { + calls["DmTaskRun"] = true + expectedTask := &task1 + if task != expectedTask { + t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskRun(%v)\nReceived: DmTaskRun(%v)\n", expectedTask, task) + } + return 1 + } + DmTaskGetInfo = func(task *CDmTask, info *Info) int { + calls["DmTaskGetInfo"] = true + expectedTask := &task1 + if task != expectedTask { + t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskGetInfo(%v)\nReceived: DmTaskGetInfo(%v)\n", expectedTask, task) + } + // This will crash if info is not dereferenceable + info.Exists = 0 + return 1 + } + DmTaskSetSector = func(task *CDmTask, sector uint64) int { + calls["DmTaskSetSector"] = true + expectedTask := &task1 + if task != expectedTask { + t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task) + } + if expectedSector := uint64(0); sector != expectedSector { + t.Fatalf("Wrong libdevmapper call to DmTaskSetSector\nExpected: %v\nReceived: %v\n", expectedSector, sector) + } + return 1 + } + DmTaskSetMessage = func(task *CDmTask, message string) int { + calls["DmTaskSetMessage"] = true + expectedTask := &task1 + if task != expectedTask { + t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskSetSector(%v)\nReceived: DmTaskSetSector(%v)\n", expectedTask, task) + } + taskMessages[message] = true + return 1 + } + DmTaskDestroy = func(task *CDmTask) { + calls["DmTaskDestroy"] = true + expectedTask := &task1 + if task != expectedTask { + t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) + } + } + DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { + calls["DmTaskSetTarget"] = true + expectedTask := &task1 + if task != expectedTask { + t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) + } + if start != 0 { + t.Fatalf("Wrong start: %d != %d", start, 0) + } + if ttype != "thin" && ttype != "thin-pool" { + t.Fatalf("Wrong ttype: %s", ttype) + } + // Quick smoke test + if params == "" { + t.Fatalf("Params should not be empty") + } + return 1 + } + fakeCookie := uint(4321) + DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { + calls["DmTaskSetCookie"] = true + expectedTask := &task1 + if task != expectedTask { + t.Fatalf("Wrong libdevmapper call\nExpected: DmTaskDestroy(%v)\nReceived: DmTaskDestroy(%v)\n", expectedTask, task) + } + if flags != 0 { + t.Fatalf("Cookie flags should be 0 (not %x)", flags) + } + *cookie = fakeCookie + return 1 + } + DmUdevWait = func(cookie uint) int { + calls["DmUdevWait"] = true + if cookie != fakeCookie { + t.Fatalf("Wrong cookie: %d != %d", cookie, fakeCookie) + } + return 1 + } + DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { + if addNode != AddNodeOnCreate { + t.Fatalf("Wrong AddNoteType: %v (expected %v)", addNode, AddNodeOnCreate) + } + calls["DmTaskSetAddNode"] = true + return 1 + } + execRun = func(name string, args ...string) error { + calls["execRun"] = true + if name != "mkfs.ext4" { + t.Fatalf("Expected %s to be executed, not %s", "mkfs.ext4", name) + } + return nil + } + driver, err := Init(home) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := driver.Cleanup(); err != nil { + t.Fatal(err) + } + }() + }() + // Put all tests in a function to make sure the garbage collection will + // occur. + + // Call GC to cleanup runtime.Finalizers + runtime.GC() + + calls.Assert(t, + "DmSetDevDir", + "DmLogWithErrnoInit", + "DmTaskSetName", + "DmTaskRun", + "DmTaskGetInfo", + "DmTaskDestroy", + "execRun", + "DmTaskCreate", + "DmTaskSetTarget", + "DmTaskSetCookie", + "DmUdevWait", + "DmTaskSetSector", + "DmTaskSetMessage", + "DmTaskSetAddNode", + ) + taskTypes.Assert(t, "0", "6", "17") + taskMessages.Assert(t, "create_thin 0", "set_transaction_id 0 1") +} + +func fakeInit() func(home string) (graphdriver.Driver, error) { + oldInit := Init + Init = func(home string) (graphdriver.Driver, error) { + return &Driver{ + home: home, + }, nil + } + return oldInit +} + +func restoreInit(init func(home string) (graphdriver.Driver, error)) { + Init = init +} + +func mockAllDevmapper(calls Set) { + DmSetDevDir = func(dir string) int { + calls["DmSetDevDir"] = true + return 0 + } + LogWithErrnoInit = func() { + calls["DmLogWithErrnoInit"] = true + } + DmTaskCreate = func(taskType int) *CDmTask { + calls["DmTaskCreate"] = true + return &CDmTask{} + } + DmTaskSetName = func(task *CDmTask, name string) int { + calls["DmTaskSetName"] = true + return 1 + } + DmTaskRun = func(task *CDmTask) int { + calls["DmTaskRun"] = true + return 1 + } + DmTaskGetInfo = func(task *CDmTask, info *Info) int { + calls["DmTaskGetInfo"] = true + return 1 + } + DmTaskSetSector = func(task *CDmTask, sector uint64) int { + calls["DmTaskSetSector"] = true + return 1 + } + DmTaskSetMessage = func(task *CDmTask, message string) int { + calls["DmTaskSetMessage"] = true + return 1 + } + DmTaskDestroy = func(task *CDmTask) { + calls["DmTaskDestroy"] = true + } + DmTaskAddTarget = func(task *CDmTask, start, size uint64, ttype, params string) int { + calls["DmTaskSetTarget"] = true + return 1 + } + DmTaskSetCookie = func(task *CDmTask, cookie *uint, flags uint16) int { + calls["DmTaskSetCookie"] = true + return 1 + } + DmUdevWait = func(cookie uint) int { + calls["DmUdevWait"] = true + return 1 + } + DmTaskSetAddNode = func(task *CDmTask, addNode AddNodeType) int { + calls["DmTaskSetAddNode"] = true + return 1 + } + execRun = func(name string, args ...string) error { + calls["execRun"] = true + return nil + } +} + +func TestDriverName(t *testing.T) { + denyAllDevmapper() + defer denyAllDevmapper() + + oldInit := fakeInit() + defer restoreInit(oldInit) + + d := newDriver(t) + if d.String() != "devicemapper" { + t.Fatalf("Expected driver name to be devicemapper got %s", d.String()) + } +} + +func TestDriverCreate(t *testing.T) { + denyAllDevmapper() + denyAllSyscall() + defer denyAllSyscall() + defer denyAllDevmapper() + + calls := make(Set) + mockAllDevmapper(calls) + + sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { + calls["sysMount"] = true + // FIXME: compare the exact source and target strings (inodes + devname) + if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) { + t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source) + } + if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { + t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) + } + if expectedFstype := "ext4"; fstype != expectedFstype { + t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype) + } + if expectedFlags := uintptr(3236757504); flags != expectedFlags { + t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) + } + return nil + } + + Mounted = func(mnt string) (bool, error) { + calls["Mounted"] = true + if !strings.HasPrefix(mnt, "/tmp/docker-test-devmapper-") || !strings.HasSuffix(mnt, "/mnt/1") { + t.Fatalf("Wrong mounted call\nExpected: Mounted(%v)\nReceived: Mounted(%v)\n", "/tmp/docker-test-devmapper-.../mnt/1", mnt) + } + return false, nil + } + + sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + calls["sysSyscall"] = true + if trap != sysSysIoctl { + t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap) + } + switch a2 { + case LoopSetFd: + calls["ioctl.loopsetfd"] = true + case LoopCtlGetFree: + calls["ioctl.loopctlgetfree"] = true + case LoopGetStatus64: + calls["ioctl.loopgetstatus"] = true + case LoopSetStatus64: + calls["ioctl.loopsetstatus"] = true + case LoopClrFd: + calls["ioctl.loopclrfd"] = true + case LoopSetCapacity: + calls["ioctl.loopsetcapacity"] = true + case BlkGetSize64: + calls["ioctl.blkgetsize"] = true + default: + t.Fatalf("Unexpected IOCTL. Received %d", a2) + } + return 0, 0, 0 + } + + func() { + d := newDriver(t) + + calls.Assert(t, + "DmSetDevDir", + "DmLogWithErrnoInit", + "DmTaskSetName", + "DmTaskRun", + "DmTaskGetInfo", + "execRun", + "DmTaskCreate", + "DmTaskSetTarget", + "DmTaskSetCookie", + "DmUdevWait", + "DmTaskSetSector", + "DmTaskSetMessage", + "DmTaskSetAddNode", + "sysSyscall", + "ioctl.blkgetsize", + "ioctl.loopsetfd", + "ioctl.loopsetstatus", + "?ioctl.loopctlgetfree", + ) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + calls.Assert(t, + "DmTaskCreate", + "DmTaskGetInfo", + "sysMount", + "DmTaskRun", + "DmTaskSetTarget", + "DmTaskSetSector", + "DmTaskSetCookie", + "DmUdevWait", + "DmTaskSetName", + "DmTaskSetMessage", + "DmTaskSetAddNode", + ) + + }() + + runtime.GC() + + calls.Assert(t, + "DmTaskDestroy", + ) +} + +func TestDriverRemove(t *testing.T) { + denyAllDevmapper() + denyAllSyscall() + defer denyAllSyscall() + defer denyAllDevmapper() + + calls := make(Set) + mockAllDevmapper(calls) + + sysMount = func(source, target, fstype string, flags uintptr, data string) (err error) { + calls["sysMount"] = true + // FIXME: compare the exact source and target strings (inodes + devname) + if expectedSource := "/dev/mapper/docker-"; !strings.HasPrefix(source, expectedSource) { + t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedSource, source) + } + if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { + t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) + } + if expectedFstype := "ext4"; fstype != expectedFstype { + t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFstype, fstype) + } + if expectedFlags := uintptr(3236757504); flags != expectedFlags { + t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) + } + return nil + } + sysUnmount = func(target string, flags int) (err error) { + calls["sysUnmount"] = true + // FIXME: compare the exact source and target strings (inodes + devname) + if expectedTarget := "/tmp/docker-test-devmapper-"; !strings.HasPrefix(target, expectedTarget) { + t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedTarget, target) + } + if expectedFlags := 0; flags != expectedFlags { + t.Fatalf("Wrong syscall call\nExpected: Mount(%v)\nReceived: Mount(%v)\n", expectedFlags, flags) + } + return nil + } + Mounted = func(mnt string) (bool, error) { + calls["Mounted"] = true + return false, nil + } + + sysSyscall = func(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + calls["sysSyscall"] = true + if trap != sysSysIoctl { + t.Fatalf("Unexpected syscall. Expecting SYS_IOCTL, received: %d", trap) + } + switch a2 { + case LoopSetFd: + calls["ioctl.loopsetfd"] = true + case LoopCtlGetFree: + calls["ioctl.loopctlgetfree"] = true + case LoopGetStatus64: + calls["ioctl.loopgetstatus"] = true + case LoopSetStatus64: + calls["ioctl.loopsetstatus"] = true + case LoopClrFd: + calls["ioctl.loopclrfd"] = true + case LoopSetCapacity: + calls["ioctl.loopsetcapacity"] = true + case BlkGetSize64: + calls["ioctl.blkgetsize"] = true + default: + t.Fatalf("Unexpected IOCTL. Received %d", a2) + } + return 0, 0, 0 + } + + func() { + d := newDriver(t) + + calls.Assert(t, + "DmSetDevDir", + "DmLogWithErrnoInit", + "DmTaskSetName", + "DmTaskRun", + "DmTaskGetInfo", + "execRun", + "DmTaskCreate", + "DmTaskSetTarget", + "DmTaskSetCookie", + "DmUdevWait", + "DmTaskSetSector", + "DmTaskSetMessage", + "DmTaskSetAddNode", + "sysSyscall", + "ioctl.blkgetsize", + "ioctl.loopsetfd", + "ioctl.loopsetstatus", + "?ioctl.loopctlgetfree", + ) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + calls.Assert(t, + "DmTaskCreate", + "DmTaskGetInfo", + "sysMount", + "DmTaskRun", + "DmTaskSetTarget", + "DmTaskSetSector", + "DmTaskSetCookie", + "DmUdevWait", + "DmTaskSetName", + "DmTaskSetMessage", + "DmTaskSetAddNode", + ) + + Mounted = func(mnt string) (bool, error) { + calls["Mounted"] = true + return true, nil + } + + if err := d.Remove("1"); err != nil { + t.Fatal(err) + } + + calls.Assert(t, + "DmTaskRun", + "DmTaskSetSector", + "DmTaskSetName", + "DmTaskSetMessage", + "DmTaskCreate", + "DmTaskGetInfo", + "DmTaskSetCookie", + "DmTaskSetTarget", + "DmTaskSetAddNode", + "DmUdevWait", + "sysUnmount", + ) + }() + runtime.GC() + + calls.Assert(t, + "DmTaskDestroy", + ) +} + +func TestCleanup(t *testing.T) { + t.Skip("FIXME: not a unit test") + t.Skip("Unimplemented") + d := newDriver(t) + defer osRemoveAll(d.home) + + mountPoints := make([]string, 2) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + // Mount the id + p, err := d.Get("1") + if err != nil { + t.Fatal(err) + } + mountPoints[0] = p + + if err := d.Create("2", "1"); err != nil { + t.Fatal(err) + } + + p, err = d.Get("2") + if err != nil { + t.Fatal(err) + } + mountPoints[1] = p + + // Ensure that all the mount points are currently mounted + for _, p := range mountPoints { + if mounted, err := Mounted(p); err != nil { + t.Fatal(err) + } else if !mounted { + t.Fatalf("Expected %s to be mounted", p) + } + } + + // Ensure that devices are active + for _, p := range []string{"1", "2"} { + if !d.HasActivatedDevice(p) { + t.Fatalf("Expected %s to have an active device", p) + } + } + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + + // Ensure that all the mount points are no longer mounted + for _, p := range mountPoints { + if mounted, err := Mounted(p); err != nil { + t.Fatal(err) + } else if mounted { + t.Fatalf("Expected %s to not be mounted", p) + } + } + + // Ensure that devices are no longer activated + for _, p := range []string{"1", "2"} { + if d.HasActivatedDevice(p) { + t.Fatalf("Expected %s not be an active device", p) + } + } +} + +func TestNotMounted(t *testing.T) { + t.Skip("FIXME: not a unit test") + t.Skip("Not implemented") + d := newDriver(t) + defer cleanup(d) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + mounted, err := Mounted(path.Join(d.home, "mnt", "1")) + if err != nil { + t.Fatal(err) + } + if mounted { + t.Fatal("Id 1 should not be mounted") + } +} + +func TestMounted(t *testing.T) { + t.Skip("FIXME: not a unit test") + d := newDriver(t) + defer cleanup(d) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if _, err := d.Get("1"); err != nil { + t.Fatal(err) + } + + mounted, err := Mounted(path.Join(d.home, "mnt", "1")) + if err != nil { + t.Fatal(err) + } + if !mounted { + t.Fatal("Id 1 should be mounted") + } +} + +func TestInitCleanedDriver(t *testing.T) { + t.Skip("FIXME: not a unit test") + d := newDriver(t) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + if _, err := d.Get("1"); err != nil { + t.Fatal(err) + } + + if err := d.Cleanup(); err != nil { + t.Fatal(err) + } + + driver, err := Init(d.home) + if err != nil { + t.Fatal(err) + } + d = driver.(*Driver) + defer cleanup(d) + + if _, err := d.Get("1"); err != nil { + t.Fatal(err) + } +} + +func TestMountMountedDriver(t *testing.T) { + t.Skip("FIXME: not a unit test") + d := newDriver(t) + defer cleanup(d) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + // Perform get on same id to ensure that it will + // not be mounted twice + if _, err := d.Get("1"); err != nil { + t.Fatal(err) + } + if _, err := d.Get("1"); err != nil { + t.Fatal(err) + } +} + +func TestGetReturnsValidDevice(t *testing.T) { + t.Skip("FIXME: not a unit test") + d := newDriver(t) + defer cleanup(d) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + if !d.HasDevice("1") { + t.Fatalf("Expected id 1 to be in device set") + } + + if _, err := d.Get("1"); err != nil { + t.Fatal(err) + } + + if !d.HasActivatedDevice("1") { + t.Fatalf("Expected id 1 to be activated") + } + + if !d.HasInitializedDevice("1") { + t.Fatalf("Expected id 1 to be initialized") + } +} + +func TestDriverGetSize(t *testing.T) { + t.Skip("FIXME: not a unit test") + t.Skipf("Size is currently not implemented") + + d := newDriver(t) + defer cleanup(d) + + if err := d.Create("1", ""); err != nil { + t.Fatal(err) + } + + mountPoint, err := d.Get("1") + if err != nil { + t.Fatal(err) + } + + size := int64(1024) + + f, err := osCreate(path.Join(mountPoint, "test_file")) + if err != nil { + t.Fatal(err) + } + if err := f.Truncate(size); err != nil { + t.Fatal(err) + } + f.Close() + + // diffSize, err := d.DiffSize("1") + // if err != nil { + // t.Fatal(err) + // } + // if diffSize != size { + // t.Fatalf("Expected size %d got %d", size, diffSize) + // } +} + +func assertMap(t *testing.T, m map[string]bool, keys ...string) { + for _, key := range keys { + if _, exists := m[key]; !exists { + t.Fatalf("Key not set: %s", key) + } + delete(m, key) + } + if len(m) != 0 { + t.Fatalf("Unexpected keys: %v", m) + } +} diff --git a/runtime/graphdriver/devmapper/ioctl.go b/runtime/graphdriver/devmapper/ioctl.go new file mode 100644 index 0000000000..30bafff943 --- /dev/null +++ b/runtime/graphdriver/devmapper/ioctl.go @@ -0,0 +1,71 @@ +// +build linux,amd64 + +package devmapper + +import ( + "unsafe" +) + +func ioctlLoopCtlGetFree(fd uintptr) (int, error) { + index, _, err := sysSyscall(sysSysIoctl, fd, LoopCtlGetFree, 0) + if err != 0 { + return 0, err + } + return int(index), nil +} + +func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { + if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetFd, sparseFd); err != 0 { + return err + } + return nil +} + +func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *LoopInfo64) error { + if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return err + } + return nil +} + +func ioctlLoopClrFd(loopFd uintptr) error { + if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopClrFd, 0); err != 0 { + return err + } + return nil +} + +func ioctlLoopGetStatus64(loopFd uintptr) (*LoopInfo64, error) { + loopInfo := &LoopInfo64{} + + if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { + return nil, err + } + return loopInfo, nil +} + +func ioctlLoopSetCapacity(loopFd uintptr, value int) error { + if _, _, err := sysSyscall(sysSysIoctl, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + return err + } + return nil +} + +func ioctlBlkGetSize64(fd uintptr) (int64, error) { + var size int64 + if _, _, err := sysSyscall(sysSysIoctl, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, err + } + return size, nil +} + +func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { + var r [2]uint64 + r[0] = offset + r[1] = length + + if _, _, err := sysSyscall(sysSysIoctl, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { + return err + } + return nil +} diff --git a/runtime/graphdriver/devmapper/mount.go b/runtime/graphdriver/devmapper/mount.go new file mode 100644 index 0000000000..4f19109bf8 --- /dev/null +++ b/runtime/graphdriver/devmapper/mount.go @@ -0,0 +1,27 @@ +// +build linux,amd64 + +package devmapper + +import ( + "path/filepath" +) + +// FIXME: this is copy-pasted from the aufs driver. +// It should be moved into the core. + +var Mounted = func(mountpoint string) (bool, error) { + mntpoint, err := osStat(mountpoint) + if err != nil { + if osIsNotExist(err) { + return false, nil + } + return false, err + } + parent, err := osStat(filepath.Join(mountpoint, "..")) + if err != nil { + return false, err + } + mntpointSt := toSysStatT(mntpoint.Sys()) + parentSt := toSysStatT(parent.Sys()) + return mntpointSt.Dev != parentSt.Dev, nil +} diff --git a/runtime/graphdriver/devmapper/sys.go b/runtime/graphdriver/devmapper/sys.go new file mode 100644 index 0000000000..5a9ab4d74b --- /dev/null +++ b/runtime/graphdriver/devmapper/sys.go @@ -0,0 +1,57 @@ +// +build linux,amd64 + +package devmapper + +import ( + "os" + "os/exec" + "syscall" +) + +type ( + sysStatT syscall.Stat_t + sysErrno syscall.Errno + + osFile struct{ *os.File } +) + +var ( + sysMount = syscall.Mount + sysUnmount = syscall.Unmount + sysCloseOnExec = syscall.CloseOnExec + sysSyscall = syscall.Syscall + + osOpenFile = func(name string, flag int, perm os.FileMode) (*osFile, error) { + f, err := os.OpenFile(name, flag, perm) + return &osFile{File: f}, err + } + osOpen = func(name string) (*osFile, error) { f, err := os.Open(name); return &osFile{File: f}, err } + osNewFile = os.NewFile + osCreate = os.Create + osStat = os.Stat + osIsNotExist = os.IsNotExist + osIsExist = os.IsExist + osMkdirAll = os.MkdirAll + osRemoveAll = os.RemoveAll + osRename = os.Rename + osReadlink = os.Readlink + + execRun = func(name string, args ...string) error { return exec.Command(name, args...).Run() } +) + +const ( + sysMsMgcVal = syscall.MS_MGC_VAL + sysMsRdOnly = syscall.MS_RDONLY + sysEInval = syscall.EINVAL + sysSysIoctl = syscall.SYS_IOCTL + sysEBusy = syscall.EBUSY + + osORdOnly = os.O_RDONLY + osORdWr = os.O_RDWR + osOCreate = os.O_CREATE + osModeDevice = os.ModeDevice +) + +func toSysStatT(i interface{}) *sysStatT { + return (*sysStatT)(i.(*syscall.Stat_t)) +} diff --git a/runtime/graphdriver/driver.go b/runtime/graphdriver/driver.go new file mode 100644 index 0000000000..89fd03a624 --- /dev/null +++ b/runtime/graphdriver/driver.go @@ -0,0 +1,93 @@ +package graphdriver + +import ( + "fmt" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/utils" + "os" + "path" +) + +type InitFunc func(root string) (Driver, error) + +type Driver interface { + String() string + + Create(id, parent string) error + Remove(id string) error + + Get(id string) (dir string, err error) + Put(id string) + Exists(id string) bool + + Status() [][2]string + + Cleanup() error +} + +type Differ interface { + Diff(id string) (archive.Archive, error) + Changes(id string) ([]archive.Change, error) + ApplyDiff(id string, diff archive.ArchiveReader) error + DiffSize(id string) (bytes int64, err error) +} + +var ( + DefaultDriver string + // All registred drivers + drivers map[string]InitFunc + // Slice of drivers that should be used in an order + priority = []string{ + "aufs", + "devicemapper", + "vfs", + // experimental, has to be enabled manually for now + "btrfs", + } +) + +func init() { + drivers = make(map[string]InitFunc) +} + +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +func GetDriver(name, home string) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(path.Join(home, name)) + } + return nil, fmt.Errorf("No such driver: %s", name) +} + +func New(root string) (driver Driver, err error) { + for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} { + if name != "" { + return GetDriver(name, root) + } + } + + // Check for priority drivers first + for _, name := range priority { + if driver, err = GetDriver(name, root); err != nil { + utils.Debugf("Error loading driver %s: %s", name, err) + continue + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for _, initFunc := range drivers { + if driver, err = initFunc(root); err != nil { + continue + } + return driver, nil + } + return nil, err +} diff --git a/runtime/graphdriver/vfs/driver.go b/runtime/graphdriver/vfs/driver.go new file mode 100644 index 0000000000..10a7b223a4 --- /dev/null +++ b/runtime/graphdriver/vfs/driver.go @@ -0,0 +1,95 @@ +package vfs + +import ( + "fmt" + "github.com/dotcloud/docker/runtime/graphdriver" + "os" + "os/exec" + "path" +) + +func init() { + graphdriver.Register("vfs", Init) +} + +func Init(home string) (graphdriver.Driver, error) { + d := &Driver{ + home: home, + } + return d, nil +} + +type Driver struct { + home string +} + +func (d *Driver) String() string { + return "vfs" +} + +func (d *Driver) Status() [][2]string { + return nil +} + +func (d *Driver) Cleanup() error { + return nil +} + +func copyDir(src, dst string) error { + if output, err := exec.Command("cp", "-aT", "--reflink=auto", src, dst).CombinedOutput(); err != nil { + return fmt.Errorf("Error VFS copying directory: %s (%s)", err, output) + } + return nil +} + +func (d *Driver) Create(id string, parent string) error { + dir := d.dir(id) + if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { + return err + } + if err := os.Mkdir(dir, 0700); err != nil { + return err + } + if parent == "" { + return nil + } + parentDir, err := d.Get(parent) + if err != nil { + return fmt.Errorf("%s: %s", parent, err) + } + if err := copyDir(parentDir, dir); err != nil { + return err + } + return nil +} + +func (d *Driver) dir(id string) string { + return path.Join(d.home, "dir", path.Base(id)) +} + +func (d *Driver) Remove(id string) error { + if _, err := os.Stat(d.dir(id)); err != nil { + return err + } + return os.RemoveAll(d.dir(id)) +} + +func (d *Driver) Get(id string) (string, error) { + dir := d.dir(id) + if st, err := os.Stat(dir); err != nil { + return "", err + } else if !st.IsDir() { + return "", fmt.Errorf("%s: not a directory", dir) + } + return dir, nil +} + +func (d *Driver) Put(id string) { + // The vfs driver has no runtime resources (e.g. mounts) + // to clean up, so we don't need anything here +} + +func (d *Driver) Exists(id string) bool { + _, err := os.Stat(d.dir(id)) + return err == nil +} diff --git a/runtime/runtime.go b/runtime/runtime.go index f75a4df048..25edd774d8 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -11,11 +11,11 @@ import ( "github.com/dotcloud/docker/runtime/execdriver/execdrivers" "github.com/dotcloud/docker/runtime/execdriver/lxc" "github.com/dotcloud/docker/graph" - "github.com/dotcloud/docker/graphdriver" - "github.com/dotcloud/docker/graphdriver/aufs" - _ "github.com/dotcloud/docker/graphdriver/btrfs" - _ "github.com/dotcloud/docker/graphdriver/devmapper" - _ "github.com/dotcloud/docker/graphdriver/vfs" + "github.com/dotcloud/docker/runtime/graphdriver" + "github.com/dotcloud/docker/runtime/graphdriver/aufs" + _ "github.com/dotcloud/docker/runtime/graphdriver/btrfs" + _ "github.com/dotcloud/docker/runtime/graphdriver/devmapper" + _ "github.com/dotcloud/docker/runtime/graphdriver/vfs" "github.com/dotcloud/docker/image" _ "github.com/dotcloud/docker/runtime/networkdriver/lxc" "github.com/dotcloud/docker/runtime/networkdriver/portallocator" -- cgit v1.2.1 From edd8d2d3511b0b632149d1c1f2cfd2bad2df4679 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 14 Mar 2014 01:02:28 +0000 Subject: add no prune to rmi Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/client.go | 8 ++++++-- api/server.go | 1 + server/server.go | 8 ++++---- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/api/client.go b/api/client.go index 8ee61f6c22..1aceed50e7 100644 --- a/api/client.go +++ b/api/client.go @@ -817,8 +817,9 @@ func (cli *DockerCli) CmdPort(args ...string) error { // 'docker rmi IMAGE' removes all images with the name IMAGE func (cli *DockerCli) CmdRmi(args ...string) error { var ( - cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images") - force = cmd.Bool([]string{"f", "-force"}, false, "Force") + cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images") + force = cmd.Bool([]string{"f", "-force"}, false, "Force") + noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") ) if err := cmd.Parse(args); err != nil { return nil @@ -832,6 +833,9 @@ func (cli *DockerCli) CmdRmi(args ...string) error { if *force { v.Set("force", "1") } + if *noprune { + v.Set("noprune", "1") + } var encounteredError error for _, name := range cmd.Args() { diff --git a/api/server.go b/api/server.go index 2d878b957a..774e3131ef 100644 --- a/api/server.go +++ b/api/server.go @@ -624,6 +624,7 @@ func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWr var job = eng.Job("image_delete", vars["name"]) streamJSON(job, w, false) job.Setenv("force", r.Form.Get("force")) + job.Setenv("noprune", r.Form.Get("noprune")) return job.Run() } diff --git a/server/server.go b/server/server.go index eb9a3a396b..736d54ae52 100644 --- a/server/server.go +++ b/server/server.go @@ -1839,7 +1839,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status { return engine.StatusOK } -func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force bool) error { +func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, noprune bool) error { var ( repoName, tag string tags = []string{} @@ -1920,8 +1920,8 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo out.Set("Deleted", img.ID) imgs.Add(out) srv.LogEvent("delete", img.ID, "") - if img.Parent != "" { - err := srv.DeleteImage(img.Parent, imgs, false, force) + if img.Parent != "" && !noprune { + err := srv.DeleteImage(img.Parent, imgs, false, force, noprune) if first { return err } @@ -1938,7 +1938,7 @@ func (srv *Server) ImageDelete(job *engine.Job) engine.Status { return job.Errorf("Usage: %s IMAGE", job.Name) } imgs := engine.NewTable("", 0) - if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force")); err != nil { + if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil { return job.Error(err) } if len(imgs.Data) == 0 { -- cgit v1.2.1 From afcaaffd0bb84d0c97f4a3ef54f4d35ba3942f65 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 14 Mar 2014 01:04:35 +0000 Subject: update doc Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- docs/sources/reference/api/docker_remote_api.rst | 1 + docs/sources/reference/api/docker_remote_api_v1.10.rst | 1 + 2 files changed, 2 insertions(+) diff --git a/docs/sources/reference/api/docker_remote_api.rst b/docs/sources/reference/api/docker_remote_api.rst index 93558fa974..ca7463f351 100644 --- a/docs/sources/reference/api/docker_remote_api.rst +++ b/docs/sources/reference/api/docker_remote_api.rst @@ -50,6 +50,7 @@ What's new **New!** You can now use the force parameter to force delete of an image, even if it's tagged in multiple repositories. + **New!** You can now use the noprune parameter to prevent the deletion of parent images .. http:delete:: /containers/(id) diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.rst b/docs/sources/reference/api/docker_remote_api_v1.10.rst index 20af253f0e..649f58196e 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.10.rst @@ -931,6 +931,7 @@ Remove an image ] :query force: 1/True/true or 0/False/false, default false + :query noprune: 1/True/true or 0/False/false, default false :statuscode 200: no error :statuscode 404: no such image :statuscode 409: conflict -- cgit v1.2.1 From a18d08177c1b11fd1e88c27d78a7256b5d498d64 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 14 Mar 2014 17:13:11 +0000 Subject: Add missing client doc Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- docs/sources/reference/commandline/cli.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 6d55a0aedc..f302862b9e 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -1092,6 +1092,7 @@ containers will not be deleted. Remove one or more images -f, --force=false: Force + --no-prune=false: Do not delete untagged parents Removing tagged images ~~~~~~~~~~~~~~~~~~~~~~ -- cgit v1.2.1 From 2bddcd68b4b927d36ffadd80e098f6d4ae2cf5d6 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 14 Mar 2014 15:07:52 -0700 Subject: Gofmt imports Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- graph/graph.go | 2 +- graph/tags_unit_test.go | 2 +- image/image.go | 2 +- integration/graph_test.go | 2 +- runtime/container.go | 4 ++-- runtime/execdriver/execdrivers/execdrivers.go | 2 +- runtime/execdriver/lxc/driver.go | 2 +- runtime/execdriver/lxc/init.go | 2 +- runtime/execdriver/native/default_template.go | 2 +- runtime/execdriver/native/driver.go | 2 +- runtime/graphdriver/aufs/aufs.go | 2 +- runtime/networkdriver/ipallocator/allocator.go | 2 +- runtime/networkdriver/lxc/driver.go | 4 ++-- runtime/runtime.go | 10 +++++----- 14 files changed, 20 insertions(+), 20 deletions(-) diff --git a/graph/graph.go b/graph/graph.go index f71b8a003e..4349cac129 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -4,9 +4,9 @@ import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/utils" "io" "io/ioutil" diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go index cae5c2916e..17773912cf 100644 --- a/graph/tags_unit_test.go +++ b/graph/tags_unit_test.go @@ -2,9 +2,9 @@ package graph import ( "bytes" + "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/runtime/graphdriver" _ "github.com/dotcloud/docker/runtime/graphdriver/vfs" // import the vfs driver so it is used in the tests - "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/utils" "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" diff --git a/image/image.go b/image/image.go index b2ddb03b0b..33503bad5a 100644 --- a/image/image.go +++ b/image/image.go @@ -4,8 +4,8 @@ import ( "encoding/json" "fmt" "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/utils" "io/ioutil" "os" diff --git a/integration/graph_test.go b/integration/graph_test.go index ea9ddc7ae9..5602b3938d 100644 --- a/integration/graph_test.go +++ b/integration/graph_test.go @@ -5,8 +5,8 @@ import ( "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/graph" - "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/utils" "io" "io/ioutil" diff --git a/runtime/container.go b/runtime/container.go index 3c7aa22751..9b138c89c1 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -6,12 +6,12 @@ import ( "fmt" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/runtime/execdriver" - "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/links" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/runtime/execdriver" + "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/utils" "io" "io/ioutil" diff --git a/runtime/execdriver/execdrivers/execdrivers.go b/runtime/execdriver/execdrivers/execdrivers.go index 29fa5b44f9..9e277c86df 100644 --- a/runtime/execdriver/execdrivers/execdrivers.go +++ b/runtime/execdriver/execdrivers/execdrivers.go @@ -2,10 +2,10 @@ package execdrivers import ( "fmt" + "github.com/dotcloud/docker/pkg/sysinfo" "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/runtime/execdriver/lxc" "github.com/dotcloud/docker/runtime/execdriver/native" - "github.com/dotcloud/docker/pkg/sysinfo" "path" ) diff --git a/runtime/execdriver/lxc/driver.go b/runtime/execdriver/lxc/driver.go index fa2ecf9d77..b7311cc1ff 100644 --- a/runtime/execdriver/lxc/driver.go +++ b/runtime/execdriver/lxc/driver.go @@ -2,8 +2,8 @@ package lxc import ( "fmt" - "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/utils" "io/ioutil" "log" diff --git a/runtime/execdriver/lxc/init.go b/runtime/execdriver/lxc/init.go index 946c8c930f..a64bca15b2 100644 --- a/runtime/execdriver/lxc/init.go +++ b/runtime/execdriver/lxc/init.go @@ -3,9 +3,9 @@ package lxc import ( "encoding/json" "fmt" - "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/pkg/netlink" "github.com/dotcloud/docker/pkg/user" + "github.com/dotcloud/docker/runtime/execdriver" "github.com/syndtr/gocapability/capability" "io/ioutil" "net" diff --git a/runtime/execdriver/native/default_template.go b/runtime/execdriver/native/default_template.go index 0c382059e9..e76be6ebec 100644 --- a/runtime/execdriver/native/default_template.go +++ b/runtime/execdriver/native/default_template.go @@ -2,9 +2,9 @@ package native import ( "fmt" - "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/runtime/execdriver" "os" ) diff --git a/runtime/execdriver/native/driver.go b/runtime/execdriver/native/driver.go index ff6c541cf9..bf7e8ccdec 100644 --- a/runtime/execdriver/native/driver.go +++ b/runtime/execdriver/native/driver.go @@ -3,12 +3,12 @@ package native import ( "encoding/json" "fmt" - "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/apparmor" "github.com/dotcloud/docker/pkg/libcontainer/nsinit" "github.com/dotcloud/docker/pkg/system" + "github.com/dotcloud/docker/runtime/execdriver" "io" "io/ioutil" "log" diff --git a/runtime/graphdriver/aufs/aufs.go b/runtime/graphdriver/aufs/aufs.go index 83a6579bc6..6f05ddd025 100644 --- a/runtime/graphdriver/aufs/aufs.go +++ b/runtime/graphdriver/aufs/aufs.go @@ -24,8 +24,8 @@ import ( "bufio" "fmt" "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/runtime/graphdriver" mountpk "github.com/dotcloud/docker/pkg/mount" + "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/utils" "os" "os/exec" diff --git a/runtime/networkdriver/ipallocator/allocator.go b/runtime/networkdriver/ipallocator/allocator.go index 2950e37003..70a7028bbe 100644 --- a/runtime/networkdriver/ipallocator/allocator.go +++ b/runtime/networkdriver/ipallocator/allocator.go @@ -3,8 +3,8 @@ package ipallocator import ( "encoding/binary" "errors" - "github.com/dotcloud/docker/runtime/networkdriver" "github.com/dotcloud/docker/pkg/collections" + "github.com/dotcloud/docker/runtime/networkdriver" "net" "sync" ) diff --git a/runtime/networkdriver/lxc/driver.go b/runtime/networkdriver/lxc/driver.go index 746bcfb5b0..827de2a609 100644 --- a/runtime/networkdriver/lxc/driver.go +++ b/runtime/networkdriver/lxc/driver.go @@ -3,12 +3,12 @@ package lxc import ( "fmt" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/pkg/iptables" + "github.com/dotcloud/docker/pkg/netlink" "github.com/dotcloud/docker/runtime/networkdriver" "github.com/dotcloud/docker/runtime/networkdriver/ipallocator" "github.com/dotcloud/docker/runtime/networkdriver/portallocator" "github.com/dotcloud/docker/runtime/networkdriver/portmapper" - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/pkg/netlink" "github.com/dotcloud/docker/utils" "io/ioutil" "log" diff --git a/runtime/runtime.go b/runtime/runtime.go index 25edd774d8..677d52acc5 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -7,21 +7,21 @@ import ( "github.com/dotcloud/docker/daemonconfig" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/graph" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/pkg/graphdb" + "github.com/dotcloud/docker/pkg/sysinfo" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/runtime/execdriver/execdrivers" "github.com/dotcloud/docker/runtime/execdriver/lxc" - "github.com/dotcloud/docker/graph" "github.com/dotcloud/docker/runtime/graphdriver" "github.com/dotcloud/docker/runtime/graphdriver/aufs" _ "github.com/dotcloud/docker/runtime/graphdriver/btrfs" _ "github.com/dotcloud/docker/runtime/graphdriver/devmapper" _ "github.com/dotcloud/docker/runtime/graphdriver/vfs" - "github.com/dotcloud/docker/image" _ "github.com/dotcloud/docker/runtime/networkdriver/lxc" "github.com/dotcloud/docker/runtime/networkdriver/portallocator" - "github.com/dotcloud/docker/pkg/graphdb" - "github.com/dotcloud/docker/pkg/sysinfo" - "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" "io" "io/ioutil" -- cgit v1.2.1 From 8b5cf51d600dc4f3611cf063c52cf3448e7b01e5 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Fri, 14 Mar 2014 22:33:41 -0400 Subject: Disable automatic killing of containers when docker stop fails Docker-DCO-1.1-Signed-off-by: Brian Goff (github: cpuguy83) --- api/client.go | 6 +++--- docs/sources/reference/api/docker_remote_api_v1.9.rst | 4 ++-- docs/sources/reference/commandline/cli.rst | 6 +++--- runtime/container.go | 12 ++---------- 4 files changed, 10 insertions(+), 18 deletions(-) diff --git a/api/client.go b/api/client.go index 715f58ab06..6191f1b001 100644 --- a/api/client.go +++ b/api/client.go @@ -477,8 +477,8 @@ func (cli *DockerCli) CmdInfo(args ...string) error { } func (cli *DockerCli) CmdStop(args ...string) error { - cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)") - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.") + cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM)") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop.") if err := cmd.Parse(args); err != nil { return nil } @@ -505,7 +505,7 @@ func (cli *DockerCli) CmdStop(args ...string) error { func (cli *DockerCli) CmdRestart(args ...string) error { cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container") - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop. Default=10") if err := cmd.Parse(args); err != nil { return nil } diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.rst b/docs/sources/reference/api/docker_remote_api_v1.9.rst index 27812457bb..def38edd55 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.9.rst @@ -432,7 +432,7 @@ Stop a container HTTP/1.1 204 OK - :query t: number of seconds to wait before killing the container + :query t: number of seconds to wait for the container to stop :statuscode 204: no error :statuscode 404: no such container :statuscode 500: server error @@ -457,7 +457,7 @@ Restart a container HTTP/1.1 204 OK - :query t: number of seconds to wait before killing the container + :query t: number of seconds to wait for the container to stop :statuscode 204: no error :statuscode 404: no such container :statuscode 500: server error diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 2371ed1b5f..c780445179 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -1366,11 +1366,11 @@ This example shows 5 containers that might be set up to test a web application c Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] - Stop a running container (Send SIGTERM, and then SIGKILL after grace period) + Stop a running container (Send SIGTERM) - -t, --time=10: Number of seconds to wait for the container to stop before killing it. + -t, --time=10: Number of seconds to wait for the container to stop. -The main process inside the container will receive SIGTERM, and after a grace period, SIGKILL +The main process inside the container will receive SIGTERM. .. _cli_tag: diff --git a/runtime/container.go b/runtime/container.go index ee545db201..acda4db169 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -890,20 +890,12 @@ func (container *Container) Stop(seconds int) error { // 1. Send a SIGTERM if err := container.KillSig(15); err != nil { - utils.Debugf("Error sending kill SIGTERM: %s", err) - log.Print("Failed to send SIGTERM to the process, force killing") - if err := container.KillSig(9); err != nil { - return err - } + return err } // 2. Wait for the process to exit on its own if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil { - log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) - // 3. If it doesn't, then send SIGKILL - if err := container.Kill(); err != nil { - return err - } + return err } return nil } -- cgit v1.2.1 From 326f6a4b4d942bf75ecb003e5c0d439bcb9d67cf Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 17 Mar 2014 19:17:40 +0000 Subject: fix tests Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- integration/commands_test.go | 2 +- integration/server_test.go | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/integration/commands_test.go b/integration/commands_test.go index dba15842c7..5804d9f351 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -1062,7 +1062,7 @@ func TestContainerOrphaning(t *testing.T) { // remove the second image by name resp := engine.NewTable("", 0) - if err := srv.DeleteImage(imageName, resp, true, false); err == nil { + if err := srv.DeleteImage(imageName, resp, true, false, false); err == nil { t.Fatal("Expected error, got none") } diff --git a/integration/server_test.go b/integration/server_test.go index 2455c766e3..a401f1306e 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -36,7 +36,7 @@ func TestImageTagImageDelete(t *testing.T) { t.Errorf("Expected %d images, %d found", nExpected, nActual) } - if err := srv.DeleteImage("utest/docker:tag2", engine.NewTable("", 0), true, false); err != nil { + if err := srv.DeleteImage("utest/docker:tag2", engine.NewTable("", 0), true, false, false); err != nil { t.Fatal(err) } @@ -48,7 +48,7 @@ func TestImageTagImageDelete(t *testing.T) { t.Errorf("Expected %d images, %d found", nExpected, nActual) } - if err := srv.DeleteImage("utest:5000/docker:tag3", engine.NewTable("", 0), true, false); err != nil { + if err := srv.DeleteImage("utest:5000/docker:tag3", engine.NewTable("", 0), true, false, false); err != nil { t.Fatal(err) } @@ -57,7 +57,7 @@ func TestImageTagImageDelete(t *testing.T) { nExpected = len(initialImages.Data[0].GetList("RepoTags")) + 1 nActual = len(images.Data[0].GetList("RepoTags")) - if err := srv.DeleteImage("utest:tag1", engine.NewTable("", 0), true, false); err != nil { + if err := srv.DeleteImage("utest:tag1", engine.NewTable("", 0), true, false, false); err != nil { t.Fatal(err) } @@ -579,7 +579,7 @@ func TestRmi(t *testing.T) { t.Fatalf("Expected 2 new images, found %d.", images.Len()-initialImages.Len()) } - if err = srv.DeleteImage(imageID, engine.NewTable("", 0), true, false); err != nil { + if err = srv.DeleteImage(imageID, engine.NewTable("", 0), true, false, false); err != nil { t.Fatal(err) } @@ -815,7 +815,7 @@ func TestDeleteTagWithExistingContainers(t *testing.T) { // Try to remove the tag imgs := engine.NewTable("", 0) - if err := srv.DeleteImage("utest:tag1", imgs, true, false); err != nil { + if err := srv.DeleteImage("utest:tag1", imgs, true, false, false); err != nil { t.Fatal(err) } -- cgit v1.2.1 From 1dfc44073399aadb226c1b4c1909fb15c033d44a Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 17 Mar 2014 19:33:40 +0000 Subject: fix panic in monitor Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- runtime/container.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/container.go b/runtime/container.go index 9b138c89c1..5241c5cfe5 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -785,7 +785,7 @@ func (container *Container) monitor(callback execdriver.StartCallback) error { utils.Errorf("Error running container: %s", err) } - if container.runtime.srv.IsRunning() { + if container.runtime != nil && container.runtime.srv != nil && container.runtime.srv.IsRunning() { container.State.SetStopped(exitCode) // FIXME: there is a race condition here which causes this to fail during the unit tests. -- cgit v1.2.1 From 53c5b1856d91093fed1c2d3f038d227f5fdef4b8 Mon Sep 17 00:00:00 2001 From: Timothy Hobbs Date: Sat, 15 Mar 2014 23:04:36 +0100 Subject: Add failing test case for issue #4681 Add a failing test case for an issue where docker is not creating a loopback device if networking is dissabled. Docker-DCO-1.1-Signed-off-by: Timothy Hobbs (github: https://github.com/timthelion) --- integration/container_test.go | 69 +++++++++++++++++++++++++++++-------------- 1 file changed, 47 insertions(+), 22 deletions(-) diff --git a/integration/container_test.go b/integration/container_test.go index 010883a709..663b350638 100644 --- a/integration/container_test.go +++ b/integration/container_test.go @@ -434,28 +434,6 @@ func TestOutput(t *testing.T) { } } -func TestContainerNetwork(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"ping", "-c", "1", "127.0.0.1"}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - if err := container.Run(); err != nil { - t.Fatal(err) - } - if code := container.State.GetExitCode(); code != 0 { - t.Fatalf("Unexpected ping 127.0.0.1 exit code %d (expected 0)", code) - } -} - func TestKillDifferentUser(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) @@ -1523,6 +1501,53 @@ func TestVolumesFromWithVolumes(t *testing.T) { } } +func TestContainerNetwork(t *testing.T) { + runtime := mkRuntime(t) + defer nuke(runtime) + container, _, err := runtime.Create( + &runconfig.Config{ + Image: GetTestImage(runtime).ID, + // If I change this to ping 8.8.8.8 it fails. Any idea why? - timthelion + Cmd: []string{"ping", "-c", "1", "127.0.0.1"}, + }, + "", + ) + if err != nil { + t.Fatal(err) + } + defer runtime.Destroy(container) + if err := container.Run(); err != nil { + t.Fatal(err) + } + if code := container.State.GetExitCode(); code != 0 { + t.Fatalf("Unexpected ping 127.0.0.1 exit code %d (expected 0)", code) + } +} + +// Issue #4681 +func TestLoopbackFunctionsWhenNetworkingIsDissabled(t *testing.T) { + runtime := mkRuntime(t) + defer nuke(runtime) + container, _, err := runtime.Create( + &runconfig.Config{ + Image: GetTestImage(runtime).ID, + Cmd: []string{"ping", "-c", "1", "127.0.0.1"}, + NetworkDisabled: true, + }, + "", + ) + if err != nil { + t.Fatal(err) + } + defer runtime.Destroy(container) + if err := container.Run(); err != nil { + t.Fatal(err) + } + if code := container.State.GetExitCode(); code != 0 { + t.Fatalf("Unexpected ping 127.0.0.1 exit code %d (expected 0)", code) + } +} + func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) -- cgit v1.2.1 From 353df19ab7009f6555dee506841ae0b690a08768 Mon Sep 17 00:00:00 2001 From: Timothy Hobbs Date: Sun, 16 Mar 2014 01:01:31 +0100 Subject: Fix issue #4681 - No loopback interface within container when networking is disabled. Docker-DCO-1.1-Signed-off-by: Timothy Hobbs (github: https://github.com/timthelion) Remove loopback code from veth strategy Docker-DCO-1.1-Signed-off-by: Timothy Hobbs (github: https://github.com/timthelion) Looback strategy: Get rid of uneeded code in Create Docker-DCO-1.1-Signed-off-by: Timothy Hobbs (github: https://github.com/timthelion) Use append when building network strategy list Docker-DCO-1.1-Signed-off-by: Timothy Hobbs (github: https://github.com/timthelion) Swap loopback and veth strategies in Networks list Docker-DCO-1.1-Signed-off-by: Timothy Hobbs (github: https://github.com/timthelion) Revert "Swap loopback and veth strategies in Networks list" This reverts commit 3b8b2c8454171d79bed5e9a80165172617e92fc7. Docker-DCO-1.1-Signed-off-by: Timothy Hobbs (github: https://github.com/timthelion) When initializing networks, only return from the loop if there is an error Docker-DCO-1.1-Signed-off-by: Timothy Hobbs (github: https://github.com/timthelion) --- pkg/libcontainer/network/loopback.go | 24 ++++++++++++++++++ pkg/libcontainer/network/strategy.go | 3 ++- pkg/libcontainer/network/veth.go | 6 ----- pkg/libcontainer/nsinit/init.go | 6 ++++- runtime/execdriver/native/default_template.go | 35 +++++++++++++++++++-------- 5 files changed, 56 insertions(+), 18 deletions(-) create mode 100644 pkg/libcontainer/network/loopback.go diff --git a/pkg/libcontainer/network/loopback.go b/pkg/libcontainer/network/loopback.go new file mode 100644 index 0000000000..6215061dc2 --- /dev/null +++ b/pkg/libcontainer/network/loopback.go @@ -0,0 +1,24 @@ +package network + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/libcontainer" +) + +// Loopback is a network strategy that provides a basic loopback device +type Loopback struct { +} + +func (l *Loopback) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error { + return nil +} + +func (l *Loopback) Initialize(config *libcontainer.Network, context libcontainer.Context) error { + if err := SetMtu("lo", config.Mtu); err != nil { + return fmt.Errorf("set lo mtu to %d %s", config.Mtu, err) + } + if err := InterfaceUp("lo"); err != nil { + return fmt.Errorf("lo up %s", err) + } + return nil +} diff --git a/pkg/libcontainer/network/strategy.go b/pkg/libcontainer/network/strategy.go index 234fcc0aa2..693790d280 100644 --- a/pkg/libcontainer/network/strategy.go +++ b/pkg/libcontainer/network/strategy.go @@ -10,7 +10,8 @@ var ( ) var strategies = map[string]NetworkStrategy{ - "veth": &Veth{}, + "veth": &Veth{}, + "loopback": &Loopback{}, } // NetworkStrategy represents a specific network configuration for diff --git a/pkg/libcontainer/network/veth.go b/pkg/libcontainer/network/veth.go index 3ab1b2393b..3df0cd61ee 100644 --- a/pkg/libcontainer/network/veth.go +++ b/pkg/libcontainer/network/veth.go @@ -68,12 +68,6 @@ func (v *Veth) Initialize(config *libcontainer.Network, context libcontainer.Con if err := InterfaceUp("eth0"); err != nil { return fmt.Errorf("eth0 up %s", err) } - if err := SetMtu("lo", config.Mtu); err != nil { - return fmt.Errorf("set lo mtu to %d %s", config.Mtu, err) - } - if err := InterfaceUp("lo"); err != nil { - return fmt.Errorf("lo up %s", err) - } if config.Gateway != "" { if err := SetDefaultGateway(config.Gateway); err != nil { return fmt.Errorf("set gateway to %s %s", config.Gateway, err) diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index e329becbdf..117ae875ed 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -134,7 +134,11 @@ func setupNetwork(container *libcontainer.Container, context libcontainer.Contex if err != nil { return err } - return strategy.Initialize(config, context) + + err1 := strategy.Initialize(config, context) + if err1 != nil { + return err1 + } } return nil } diff --git a/runtime/execdriver/native/default_template.go b/runtime/execdriver/native/default_template.go index e76be6ebec..47b19c9d66 100644 --- a/runtime/execdriver/native/default_template.go +++ b/runtime/execdriver/native/default_template.go @@ -19,19 +19,34 @@ func createContainer(c *execdriver.Command) *libcontainer.Container { container.WorkingDir = c.WorkingDir container.Env = c.Env + loopbackNetwork := libcontainer.Network{ + // Using constants here because + // when networking is disabled + // These settings simply don't exist: + // https://github.com/dotcloud/docker/blob/c7ea6e5da80af3d9ba7558f876efbf0801d988d8/runtime/container.go#L367 + Mtu: 1500, + Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0), + Gateway: "localhost", + Type: "loopback", + Context: libcontainer.Context{}, + } + + container.Networks = []*libcontainer.Network{ + &loopbackNetwork, + } + if c.Network != nil { - container.Networks = []*libcontainer.Network{ - { - Mtu: c.Network.Mtu, - Address: fmt.Sprintf("%s/%d", c.Network.IPAddress, c.Network.IPPrefixLen), - Gateway: c.Network.Gateway, - Type: "veth", - Context: libcontainer.Context{ - "prefix": "veth", - "bridge": c.Network.Bridge, - }, + vethNetwork := libcontainer.Network{ + Mtu: c.Network.Mtu, + Address: fmt.Sprintf("%s/%d", c.Network.IPAddress, c.Network.IPPrefixLen), + Gateway: c.Network.Gateway, + Type: "veth", + Context: libcontainer.Context{ + "prefix": "veth", + "bridge": c.Network.Bridge, }, } + container.Networks = append(container.Networks, &vethNetwork) } container.Cgroups.Name = c.ID -- cgit v1.2.1 From 659b719aa66e7ed0c3104d3704fa61035050ad82 Mon Sep 17 00:00:00 2001 From: Timothy Hobbs Date: Sun, 16 Mar 2014 20:52:27 +0100 Subject: Refactor out interface specific information from execdriver.Network Docker-DCO-1.1-Signed-off-by: Timothy Hobbs (github: https://github.com/timthelion) --- runtime/container.go | 8 ++++++-- runtime/execdriver/driver.go | 10 +++++++--- runtime/execdriver/lxc/driver.go | 10 ++++++---- runtime/execdriver/lxc/lxc_template.go | 6 +++--- runtime/execdriver/lxc/lxc_template_unit_test.go | 8 ++++++++ runtime/execdriver/native/default_template.go | 14 +++++--------- 6 files changed, 35 insertions(+), 21 deletions(-) diff --git a/runtime/container.go b/runtime/container.go index 2a30715206..73bad67d6a 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -364,14 +364,18 @@ func populateCommand(c *Container) { driverConfig []string ) + en = &execdriver.Network{ + Mtu: c.runtime.config.Mtu, + Interface: nil, + } + if !c.Config.NetworkDisabled { network := c.NetworkSettings - en = &execdriver.Network{ + en.Interface = &execdriver.NetworkInterface{ Gateway: network.Gateway, Bridge: network.Bridge, IPAddress: network.IPAddress, IPPrefixLen: network.IPPrefixLen, - Mtu: c.runtime.config.Mtu, } } diff --git a/runtime/execdriver/driver.go b/runtime/execdriver/driver.go index ff37b6bc5b..23e31ee8d9 100644 --- a/runtime/execdriver/driver.go +++ b/runtime/execdriver/driver.go @@ -84,11 +84,15 @@ type Driver interface { // Network settings of the container type Network struct { + Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled + Mtu int `json:"mtu"` +} + +type NetworkInterface struct { Gateway string `json:"gateway"` IPAddress string `json:"ip"` Bridge string `json:"bridge"` IPPrefixLen int `json:"ip_prefix_len"` - Mtu int `json:"mtu"` } type Resources struct { @@ -118,8 +122,8 @@ type Command struct { WorkingDir string `json:"working_dir"` ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver Tty bool `json:"tty"` - Network *Network `json:"network"` // if network is nil then networking is disabled - Config []string `json:"config"` // generic values that specific drivers can consume + Network *Network `json:"network"` + Config []string `json:"config"` // generic values that specific drivers can consume Resources *Resources `json:"resources"` Mounts []Mount `json:"mounts"` diff --git a/runtime/execdriver/lxc/driver.go b/runtime/execdriver/lxc/driver.go index b7311cc1ff..086e35f643 100644 --- a/runtime/execdriver/lxc/driver.go +++ b/runtime/execdriver/lxc/driver.go @@ -98,13 +98,15 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba DriverName, } - if c.Network != nil { + if c.Network.Interface != nil { params = append(params, - "-g", c.Network.Gateway, - "-i", fmt.Sprintf("%s/%d", c.Network.IPAddress, c.Network.IPPrefixLen), - "-mtu", strconv.Itoa(c.Network.Mtu), + "-g", c.Network.Interface.Gateway, + "-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), ) } + params = append(params, + "-mtu", strconv.Itoa(c.Network.Mtu), + ) if c.User != "" { params = append(params, "-u", c.User) diff --git a/runtime/execdriver/lxc/lxc_template.go b/runtime/execdriver/lxc/lxc_template.go index db55287522..ce9d90469f 100644 --- a/runtime/execdriver/lxc/lxc_template.go +++ b/runtime/execdriver/lxc/lxc_template.go @@ -7,17 +7,17 @@ import ( ) const LxcTemplate = ` -{{if .Network}} +{{if .Network.Interface}} # network configuration lxc.network.type = veth -lxc.network.link = {{.Network.Bridge}} +lxc.network.link = {{.Network.Interface.Bridge}} lxc.network.name = eth0 -lxc.network.mtu = {{.Network.Mtu}} {{else}} # network is disabled (-n=false) lxc.network.type = empty lxc.network.flags = up {{end}} +lxc.network.mtu = {{.Network.Mtu}} # root filesystem {{$ROOTFS := .Rootfs}} diff --git a/runtime/execdriver/lxc/lxc_template_unit_test.go b/runtime/execdriver/lxc/lxc_template_unit_test.go index ae66371836..e613adf7a9 100644 --- a/runtime/execdriver/lxc/lxc_template_unit_test.go +++ b/runtime/execdriver/lxc/lxc_template_unit_test.go @@ -43,6 +43,10 @@ func TestLXCConfig(t *testing.T) { Memory: int64(mem), CpuShares: int64(cpu), }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: nil, + }, } p, err := driver.generateLXCConfig(command) if err != nil { @@ -75,6 +79,10 @@ func TestCustomLxcConfig(t *testing.T) { "lxc.utsname = docker", "lxc.cgroup.cpuset.cpus = 0,1", }, + Network: &execdriver.Network{ + Mtu: 1500, + Interface: nil, + }, } p, err := driver.generateLXCConfig(command) diff --git a/runtime/execdriver/native/default_template.go b/runtime/execdriver/native/default_template.go index 47b19c9d66..d744ab382f 100644 --- a/runtime/execdriver/native/default_template.go +++ b/runtime/execdriver/native/default_template.go @@ -20,11 +20,7 @@ func createContainer(c *execdriver.Command) *libcontainer.Container { container.Env = c.Env loopbackNetwork := libcontainer.Network{ - // Using constants here because - // when networking is disabled - // These settings simply don't exist: - // https://github.com/dotcloud/docker/blob/c7ea6e5da80af3d9ba7558f876efbf0801d988d8/runtime/container.go#L367 - Mtu: 1500, + Mtu: c.Network.Mtu, Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0), Gateway: "localhost", Type: "loopback", @@ -35,15 +31,15 @@ func createContainer(c *execdriver.Command) *libcontainer.Container { &loopbackNetwork, } - if c.Network != nil { + if c.Network.Interface != nil { vethNetwork := libcontainer.Network{ Mtu: c.Network.Mtu, - Address: fmt.Sprintf("%s/%d", c.Network.IPAddress, c.Network.IPPrefixLen), - Gateway: c.Network.Gateway, + Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), + Gateway: c.Network.Interface.Gateway, Type: "veth", Context: libcontainer.Context{ "prefix": "veth", - "bridge": c.Network.Bridge, + "bridge": c.Network.Interface.Bridge, }, } container.Networks = append(container.Networks, &vethNetwork) -- cgit v1.2.1 From 25218f9b239784e6f38550a6e320bce56aaca3e1 Mon Sep 17 00:00:00 2001 From: Isabel Jimenez Date: Mon, 17 Mar 2014 01:40:36 -0700 Subject: adding configuration for timeout and disable it by default Docker-DCO-1.1-Signed-off-by: Isabel Jimenez (github: jimenez) --- api/server.go | 5 ++--- pkg/listenbuffer/buffer.go | 27 ++++++--------------------- 2 files changed, 8 insertions(+), 24 deletions(-) diff --git a/api/server.go b/api/server.go index 048c989540..8871fb06f8 100644 --- a/api/server.go +++ b/api/server.go @@ -26,7 +26,6 @@ import ( "strconv" "strings" "syscall" - "time" ) var ( @@ -1130,7 +1129,7 @@ func changeGroup(addr string, nameOrGid string) error { // ListenAndServe sets up the required http.Server and gets it listening for // each addr passed in and does protocol specific checking. -func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors bool, dockerVersion string, socketGroup string) error { +func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors bool, dockerVersion, socketGroup string) error { r, err := createRouter(eng, logging, enableCors, dockerVersion) if err != nil { @@ -1147,7 +1146,7 @@ func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors } } - l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock, 15*time.Minute) + l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock) if err != nil { return err } diff --git a/pkg/listenbuffer/buffer.go b/pkg/listenbuffer/buffer.go index c350805a7d..17572c8a0e 100644 --- a/pkg/listenbuffer/buffer.go +++ b/pkg/listenbuffer/buffer.go @@ -5,15 +5,10 @@ */ package listenbuffer -import ( - "fmt" - "net" - "time" -) +import "net" -// NewListenBuffer returns a listener listening on addr with the protocol. It sets the -// timeout to wait on first connection before an error is returned -func NewListenBuffer(proto, addr string, activate chan struct{}, timeout time.Duration) (net.Listener, error) { +// NewListenBuffer returns a listener listening on addr with the protocol. +func NewListenBuffer(proto, addr string, activate chan struct{}) (net.Listener, error) { wrapped, err := net.Listen(proto, addr) if err != nil { return nil, err @@ -22,7 +17,6 @@ func NewListenBuffer(proto, addr string, activate chan struct{}, timeout time.Du return &defaultListener{ wrapped: wrapped, activate: activate, - timeout: timeout, }, nil } @@ -30,7 +24,6 @@ type defaultListener struct { wrapped net.Listener // the real listener to wrap ready bool // is the listner ready to start accpeting connections activate chan struct{} - timeout time.Duration // how long to wait before we consider this an error } func (l *defaultListener) Close() error { @@ -47,15 +40,7 @@ func (l *defaultListener) Accept() (net.Conn, error) { if l.ready { return l.wrapped.Accept() } - - select { - case <-time.After(l.timeout): - // close the connection so any clients are disconnected - l.Close() - return nil, fmt.Errorf("timeout (%s) reached waiting for listener to become ready", l.timeout.String()) - case <-l.activate: - l.ready = true - return l.Accept() - } - panic("unreachable") + <-l.activate + l.ready = true + return l.Accept() } -- cgit v1.2.1 From 782eb5f03a9dde970172895da37be8c656fef3bd Mon Sep 17 00:00:00 2001 From: Andrea Turli Date: Mon, 17 Mar 2014 23:11:46 +0100 Subject: add softlayer installation instructions Docker-DCO-1.1-Signed-off-by: Andrea Turli (github: andreaturli) --- docs/sources/installation/index.rst | 1 + docs/sources/installation/softlayer.rst | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 docs/sources/installation/softlayer.rst diff --git a/docs/sources/installation/index.rst b/docs/sources/installation/index.rst index 39c1f6a292..ae0e9196fa 100644 --- a/docs/sources/installation/index.rst +++ b/docs/sources/installation/index.rst @@ -30,4 +30,5 @@ Contents: amazon rackspace google + softlayer binaries diff --git a/docs/sources/installation/softlayer.rst b/docs/sources/installation/softlayer.rst new file mode 100644 index 0000000000..ff65029f62 --- /dev/null +++ b/docs/sources/installation/softlayer.rst @@ -0,0 +1,27 @@ +:title: Installation on IBM SoftLayer +:description: Please note this project is currently under heavy development. It should not be used in production. +:keywords: IBM SoftLayer, virtualization, cloud, docker, documentation, installation + +IBM SoftLayer +============= + +.. include:: install_header.inc + +There are several ways to install Docker on IBM SoftLayer, but probably the simplest way is the following: + +IBM SoftLayer QuickStart +------------------------- + +1. Create an `IBM SoftLayer account `_. +2. Log in to the `SoftLayer Console `_. +3. Go to `Order Hourly Computing Instance `_ on your SoftLayer Console. +4. Create a new CCI using the default values for all the fields and choose: + +- *First Available* as ``Datacenter`` and +- *Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit)* as ``Operating System``. + +5. Click the *Continue Your Order* button at the bottom right and select *Go to checkout*. +6. Insert the required *User Metadata* and place the order. +7. Then continue with the :ref:`ubuntu_linux` instructions. + +Continue with the :ref:`hello_world` example. \ No newline at end of file -- cgit v1.2.1 From 8b159fca8a4bc8692c77db3536f7098a583270ad Mon Sep 17 00:00:00 2001 From: Andrea Turli Date: Mon, 17 Mar 2014 23:39:02 +0100 Subject: address comments from @jamtur01 Docker-DCO-1.1-Signed-off-by: Andrea Turli (github: andreaturli) --- docs/sources/installation/softlayer.rst | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/sources/installation/softlayer.rst b/docs/sources/installation/softlayer.rst index ff65029f62..0fe3d6df5a 100644 --- a/docs/sources/installation/softlayer.rst +++ b/docs/sources/installation/softlayer.rst @@ -7,15 +7,13 @@ IBM SoftLayer .. include:: install_header.inc -There are several ways to install Docker on IBM SoftLayer, but probably the simplest way is the following: - IBM SoftLayer QuickStart ------------------------- 1. Create an `IBM SoftLayer account `_. 2. Log in to the `SoftLayer Console `_. -3. Go to `Order Hourly Computing Instance `_ on your SoftLayer Console. -4. Create a new CCI using the default values for all the fields and choose: +3. Go to `Order Hourly Computing Instance Wizard `_ on your SoftLayer Console. +4. Create a new *CloudLayer Computing Instance* (CCI) using the default values for all the fields and choose: - *First Available* as ``Datacenter`` and - *Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit)* as ``Operating System``. -- cgit v1.2.1 From 15a267b57d2394dd5cb697f9a80b6df4fc939a76 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 18 Mar 2014 01:34:43 +0000 Subject: add time since exit in docker ps Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- runtime/state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/state.go b/runtime/state.go index cce6912b46..1c682acd26 100644 --- a/runtime/state.go +++ b/runtime/state.go @@ -28,7 +28,7 @@ func (s *State) String() string { } return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } - return fmt.Sprintf("Exit %d", s.ExitCode) + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, utils.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) } func (s *State) IsRunning() bool { -- cgit v1.2.1 From c1f492755b8774005b3627da8ee001ee0b2df4eb Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 17 Mar 2014 19:26:08 -0600 Subject: Improve WORKDIR test to cover more edge cases Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- integration/buildfile_test.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go index 9c986d74c2..95d5abb8a7 100644 --- a/integration/buildfile_test.go +++ b/integration/buildfile_test.go @@ -445,16 +445,18 @@ func TestBuildRelativeWorkdir(t *testing.T) { img, err := buildImage(testContextTemplate{` FROM {IMAGE} RUN [ "$PWD" = '/' ] - WORKDIR /test1 + WORKDIR test1 RUN [ "$PWD" = '/test1' ] - WORKDIR test2 - RUN [ "$PWD" = '/test1/test2' ] + WORKDIR /test2 + RUN [ "$PWD" = '/test2' ] + WORKDIR test3 + RUN [ "$PWD" = '/test2/test3' ] `, nil, nil}, t, nil, true) if err != nil { t.Fatal(err) } - if img.Config.WorkingDir != "/test1/test2" { - t.Fail() + if img.Config.WorkingDir != "/test2/test3" { + t.Fatalf("Expected workdir to be '/test2/test3', received '%s'", img.Config.WorkingDir) } } -- cgit v1.2.1 From 69299f041f78f8a86bf810142e740bbd72fe4b1b Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 17 Mar 2014 19:26:39 -0600 Subject: Add some documentation about relative WORKDIR values Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- docs/sources/reference/builder.rst | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/sources/reference/builder.rst b/docs/sources/reference/builder.rst index 0d8d750a04..1c8331e98f 100644 --- a/docs/sources/reference/builder.rst +++ b/docs/sources/reference/builder.rst @@ -407,7 +407,16 @@ the image. The ``WORKDIR`` instruction sets the working directory for the ``RUN``, ``CMD`` and ``ENTRYPOINT`` Dockerfile commands that follow it. -It can be used multiple times in the one Dockerfile. +It can be used multiple times in the one Dockerfile. If a relative path is +provided, it will be relative to the path of the previous ``WORKDIR`` +instruction. For example: + + WORKDIR /a + WORKDIR b + WORKDIR c + RUN pwd + +The output of the final ``pwd`` command in this Dockerfile would be ``/a/b/c``. 3.11 ONBUILD ------------ -- cgit v1.2.1 From 4b1513f9c394fbfdf21998db4318251b4e8b6bc0 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 17 Mar 2014 17:42:16 -0700 Subject: Only unshare the mount namespace for execin Fixes #4728 Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/execin.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/libcontainer/nsinit/execin.go b/pkg/libcontainer/nsinit/execin.go index 39df4761a0..f8b8931390 100644 --- a/pkg/libcontainer/nsinit/execin.go +++ b/pkg/libcontainer/nsinit/execin.go @@ -14,10 +14,12 @@ import ( // ExecIn uses an existing pid and joins the pid's namespaces with the new command. func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) { - ns.logger.Println("unshare namespaces") - for _, ns := range container.Namespaces { - if err := system.Unshare(ns.Value); err != nil { - return -1, err + for _, nsv := range container.Namespaces { + // skip the PID namespace on unshare because it it not supported + if nsv.Key != "NEWPID" { + if err := system.Unshare(nsv.Value); err != nil { + return -1, err + } } } fds, err := ns.getNsFds(nspid, container) -- cgit v1.2.1 From 670ce98c60dbac1d46a59bd69bd20b569f4794f1 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 14 Mar 2014 14:23:54 -0400 Subject: graphdriver: build tags Enable build tags for all the graphdrivers to be excludable. As an example: ``` $ go build $ ls -l docker -rwxr-xr-x 1 vbatts vbatts 18400158 Mar 14 14:22 docker* $ go build -tags "exclude_graphdriver_aufs exclude_graphdriver_vfs exclude_graphdriver_devicemapper" $ ls -l docker -rwxr-xr-x 1 vbatts vbatts 17467068 Mar 14 14:22 docker* ``` Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- hack/PACKAGERS.md | 17 +++++++++++++++++ runtime/runtime.go | 11 +++-------- runtime/runtime_aufs.go | 22 ++++++++++++++++++++++ runtime/runtime_devicemapper.go | 7 +++++++ runtime/runtime_no_aufs.go | 11 +++++++++++ runtime/runtime_vfs.go | 7 +++++++ 6 files changed, 67 insertions(+), 8 deletions(-) create mode 100644 runtime/runtime_aufs.go create mode 100644 runtime/runtime_devicemapper.go create mode 100644 runtime/runtime_no_aufs.go create mode 100644 runtime/runtime_vfs.go diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md index 5dcb120689..0e513cd4fa 100644 --- a/hack/PACKAGERS.md +++ b/hack/PACKAGERS.md @@ -157,6 +157,23 @@ AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: export DOCKER_BUILDTAGS='apparmor' ``` +There are build tags for disabling graphdrivers as well. By default, support +for all graphdrivers are built in. + +To disable vfs +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_vfs' +``` + +To disable devicemapper +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' +``` +To disable aufs +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' +``` + ### Static Daemon If it is feasible within the constraints of your distribution, you should diff --git a/runtime/runtime.go b/runtime/runtime.go index 677d52acc5..be15cb562d 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -16,10 +16,7 @@ import ( "github.com/dotcloud/docker/runtime/execdriver/execdrivers" "github.com/dotcloud/docker/runtime/execdriver/lxc" "github.com/dotcloud/docker/runtime/graphdriver" - "github.com/dotcloud/docker/runtime/graphdriver/aufs" _ "github.com/dotcloud/docker/runtime/graphdriver/btrfs" - _ "github.com/dotcloud/docker/runtime/graphdriver/devmapper" - _ "github.com/dotcloud/docker/runtime/graphdriver/vfs" _ "github.com/dotcloud/docker/runtime/networkdriver/lxc" "github.com/dotcloud/docker/runtime/networkdriver/portallocator" "github.com/dotcloud/docker/utils" @@ -652,11 +649,9 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (* return nil, err } - if ad, ok := driver.(*aufs.Driver); ok { - utils.Debugf("Migrating existing containers") - if err := ad.Migrate(config.Root, graph.SetupInitLayer); err != nil { - return nil, err - } + // Migrate the container if it is aufs and aufs is enabled + if err = migrateIfAufs(driver, config.Root); err != nil { + return nil, err } utils.Debugf("Creating images graph") diff --git a/runtime/runtime_aufs.go b/runtime/runtime_aufs.go new file mode 100644 index 0000000000..5a32615df5 --- /dev/null +++ b/runtime/runtime_aufs.go @@ -0,0 +1,22 @@ +// +build !exclude_graphdriver_aufs + +package runtime + +import ( + "github.com/dotcloud/docker/graph" + "github.com/dotcloud/docker/runtime/graphdriver" + "github.com/dotcloud/docker/runtime/graphdriver/aufs" + "github.com/dotcloud/docker/utils" +) + +// Given the graphdriver ad, if it is aufs, then migrate it. +// If aufs driver is not built, this func is a noop. +func migrateIfAufs(driver graphdriver.Driver, root string) error { + if ad, ok := driver.(*aufs.Driver); ok { + utils.Debugf("Migrating existing containers") + if err := ad.Migrate(root, graph.SetupInitLayer); err != nil { + return err + } + } + return nil +} diff --git a/runtime/runtime_devicemapper.go b/runtime/runtime_devicemapper.go new file mode 100644 index 0000000000..5b418b377a --- /dev/null +++ b/runtime/runtime_devicemapper.go @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_devicemapper + +package runtime + +import ( + _ "github.com/dotcloud/docker/runtime/graphdriver/devmapper" +) diff --git a/runtime/runtime_no_aufs.go b/runtime/runtime_no_aufs.go new file mode 100644 index 0000000000..05a01fe151 --- /dev/null +++ b/runtime/runtime_no_aufs.go @@ -0,0 +1,11 @@ +// +build exclude_graphdriver_aufs + +package runtime + +import ( + "github.com/dotcloud/docker/runtime/graphdriver" +) + +func migrateIfAufs(driver graphdriver.Driver, root string) error { + return nil +} diff --git a/runtime/runtime_vfs.go b/runtime/runtime_vfs.go new file mode 100644 index 0000000000..e1db736083 --- /dev/null +++ b/runtime/runtime_vfs.go @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_vfs + +package runtime + +import ( + _ "github.com/dotcloud/docker/runtime/graphdriver/vfs" +) -- cgit v1.2.1 From 448b64164df7795cdbd9be0d663269e6e4e4beeb Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 14 Mar 2014 16:10:07 -0400 Subject: runtime: no build tags for vfs driver Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- hack/PACKAGERS.md | 5 ----- runtime/runtime.go | 1 + runtime/runtime_vfs.go | 7 ------- 3 files changed, 1 insertion(+), 12 deletions(-) delete mode 100644 runtime/runtime_vfs.go diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md index 0e513cd4fa..5afa381005 100644 --- a/hack/PACKAGERS.md +++ b/hack/PACKAGERS.md @@ -160,11 +160,6 @@ export DOCKER_BUILDTAGS='apparmor' There are build tags for disabling graphdrivers as well. By default, support for all graphdrivers are built in. -To disable vfs -```bash -export DOCKER_BUILDTAGS='exclude_graphdriver_vfs' -``` - To disable devicemapper ```bash export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' diff --git a/runtime/runtime.go b/runtime/runtime.go index be15cb562d..43230488a2 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -17,6 +17,7 @@ import ( "github.com/dotcloud/docker/runtime/execdriver/lxc" "github.com/dotcloud/docker/runtime/graphdriver" _ "github.com/dotcloud/docker/runtime/graphdriver/btrfs" + _ "github.com/dotcloud/docker/runtime/graphdriver/vfs" _ "github.com/dotcloud/docker/runtime/networkdriver/lxc" "github.com/dotcloud/docker/runtime/networkdriver/portallocator" "github.com/dotcloud/docker/utils" diff --git a/runtime/runtime_vfs.go b/runtime/runtime_vfs.go deleted file mode 100644 index e1db736083..0000000000 --- a/runtime/runtime_vfs.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !exclude_graphdriver_vfs - -package runtime - -import ( - _ "github.com/dotcloud/docker/runtime/graphdriver/vfs" -) -- cgit v1.2.1 From e5cbb5c906d37b14dbf0180d253f58f8995de571 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 18 Mar 2014 22:29:11 +1000 Subject: add env hint Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/use/working_with_links_names.rst | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/sources/use/working_with_links_names.rst b/docs/sources/use/working_with_links_names.rst index dc370c01c9..4acb6079c1 100644 --- a/docs/sources/use/working_with_links_names.rst +++ b/docs/sources/use/working_with_links_names.rst @@ -112,8 +112,16 @@ Accessing the network information along with the environment of the child container allows us to easily connect to the Redis service on the specific IP and port in the environment. +.. note:: + These Environment variables are only set for the first process in + the container. Similarly, some daemons (such as ``sshd``) will + scrub them when spawning shells for connection. + + You can work around this by storing the initial ``env`` in a file, + or looking at ``/proc/1/environ``. + Running ``docker ps`` shows the 2 containers, and the ``webapp/db`` -alias name for the redis container. +alias name for the Redis container. .. code-block:: bash -- cgit v1.2.1 From a70beda1ecfb049a3f80ad5b159ba51d653fd067 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 18 Mar 2014 15:52:00 +0100 Subject: devmapper: Increase timeout in waitClose to 10sec As reported in https://github.com/dotcloud/docker/issues/4389 we're currently seeing timeouts in waitClose on some systems. We already bumped the timeout in waitRemove() in https://github.com/dotcloud/docker/issues/4504. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- runtime/graphdriver/devmapper/deviceset.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/runtime/graphdriver/devmapper/deviceset.go b/runtime/graphdriver/devmapper/deviceset.go index f6b26655a3..4d33e243e0 100644 --- a/runtime/graphdriver/devmapper/deviceset.go +++ b/runtime/graphdriver/devmapper/deviceset.go @@ -729,7 +729,7 @@ func (devices *DeviceSet) removeDeviceAndWait(devname string) error { // waitRemove blocks until either: // a) the device registered at - is removed, -// or b) the 1 second timeout expires. +// or b) the 10 second timeout expires. func (devices *DeviceSet) waitRemove(devname string) error { utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname) defer utils.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname) @@ -760,7 +760,7 @@ func (devices *DeviceSet) waitRemove(devname string) error { // waitClose blocks until either: // a) the device registered at - is closed, -// or b) the 1 second timeout expires. +// or b) the 10 second timeout expires. func (devices *DeviceSet) waitClose(hash string) error { info := devices.Devices[hash] if info == nil { @@ -778,7 +778,9 @@ func (devices *DeviceSet) waitClose(hash string) error { if devinfo.OpenCount == 0 { break } - time.Sleep(1 * time.Millisecond) + devices.Unlock() + time.Sleep(10 * time.Millisecond) + devices.Lock() } if i == 1000 { return fmt.Errorf("Timeout while waiting for device %s to close", hash) -- cgit v1.2.1 From ae1dd52b19d075c2f4ba75cf0549c644725834e8 Mon Sep 17 00:00:00 2001 From: Viktor Vojnovski Date: Tue, 18 Mar 2014 17:10:22 +0100 Subject: Update ubuntulinux.rst Adding the Docker repository key fails if port 11371 not open. This would probably work for more people. --- docs/sources/installation/ubuntulinux.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index 6e79fb8cbc..6998be8571 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -72,7 +72,7 @@ First add the Docker repository key to your local keychain. .. code-block:: bash - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 + sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 Add the Docker repository to your apt sources list, update and install the ``lxc-docker`` package. -- cgit v1.2.1 From f7b6fbbd7664634481c8519e58844d572423f3e1 Mon Sep 17 00:00:00 2001 From: Andy Kipp Date: Wed, 12 Mar 2014 17:22:57 -0400 Subject: Prevent dynamic allocation of previously allocated ports Docker-DCO-1.1-Signed-off-by: Andy Kipp (github: kippandrew) --- .../networkdriver/portallocator/portallocator.go | 32 +++++++++++++++++----- .../portallocator/portallocator_test.go | 7 +++++ 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/runtime/networkdriver/portallocator/portallocator.go b/runtime/networkdriver/portallocator/portallocator.go index 71cac82703..18ae9469e5 100644 --- a/runtime/networkdriver/portallocator/portallocator.go +++ b/runtime/networkdriver/portallocator/portallocator.go @@ -100,22 +100,30 @@ func ReleaseAll() error { } func registerDynamicPort(ip net.IP, proto string) (int, error) { - allocated := defaultAllocatedPorts[proto] - - port := nextPort(proto) - if port > EndPortRange { - return 0, ErrPortExceedsRange - } if !equalsDefault(ip) { registerIP(ip) ipAllocated := otherAllocatedPorts[ip.String()][proto] + + port, err := findNextPort(proto, ipAllocated) + if err != nil { + return 0, err + } ipAllocated.Push(port) + return port, nil + } else { + + allocated := defaultAllocatedPorts[proto] + + port, err := findNextPort(proto, allocated) + if err != nil { + return 0, err + } allocated.Push(port) + return port, nil } - return port, nil } func registerSetPort(ip net.IP, proto string, port int) error { @@ -142,6 +150,16 @@ func equalsDefault(ip net.IP) bool { return ip == nil || ip.Equal(defaultIP) } +func findNextPort(proto string, allocated *collections.OrderedIntSet) (int, error) { + port := 0 + for port = nextPort(proto); allocated.Exists(port); port = nextPort(proto) { + } + if port > EndPortRange { + return 0, ErrPortExceedsRange + } + return port, nil +} + func nextPort(proto string) int { c := currentDynamicPort[proto] + 1 currentDynamicPort[proto] = c diff --git a/runtime/networkdriver/portallocator/portallocator_test.go b/runtime/networkdriver/portallocator/portallocator_test.go index 603bd03bd7..3f3afa657b 100644 --- a/runtime/networkdriver/portallocator/portallocator_test.go +++ b/runtime/networkdriver/portallocator/portallocator_test.go @@ -181,4 +181,11 @@ func TestPortAllocation(t *testing.T) { if _, err := RequestPort(ip, "tcp", 80); err != nil { t.Fatal(err) } + + port, err = RequestPort(ip, "tcp", 0) + port2, err := RequestPort(ip, "tcp", port+1) + port3, err := RequestPort(ip, "tcp", 0) + if port3 == port2 { + t.Fatal("A dynamic port should never allocate a used port") + } } -- cgit v1.2.1 From 7a1db291fcedd50ce99649e95109187c76da255c Mon Sep 17 00:00:00 2001 From: Andy Kipp Date: Wed, 12 Mar 2014 17:26:17 -0400 Subject: Better test error message Docker-DCO-1.1-Signed-off-by: Andy Kipp (github: kippandrew) --- runtime/networkdriver/portallocator/portallocator_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/networkdriver/portallocator/portallocator_test.go b/runtime/networkdriver/portallocator/portallocator_test.go index 3f3afa657b..356da855b6 100644 --- a/runtime/networkdriver/portallocator/portallocator_test.go +++ b/runtime/networkdriver/portallocator/portallocator_test.go @@ -186,6 +186,6 @@ func TestPortAllocation(t *testing.T) { port2, err := RequestPort(ip, "tcp", port+1) port3, err := RequestPort(ip, "tcp", 0) if port3 == port2 { - t.Fatal("A dynamic port should never allocate a used port") + t.Fatal("Requesting a dynamic port should never allocate a used port") } } -- cgit v1.2.1 From 73c416a20db8fe48302a6cf0db4c1c0585ed0739 Mon Sep 17 00:00:00 2001 From: Andy Kipp Date: Thu, 13 Mar 2014 13:30:07 -0400 Subject: Be more explicit in finding next port to allocate Docker-DCO-1.1-Signed-off-by: Andy Kipp (github: kippandrew) --- runtime/networkdriver/portallocator/portallocator.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/runtime/networkdriver/portallocator/portallocator.go b/runtime/networkdriver/portallocator/portallocator.go index 18ae9469e5..4d698f2de2 100644 --- a/runtime/networkdriver/portallocator/portallocator.go +++ b/runtime/networkdriver/portallocator/portallocator.go @@ -151,8 +151,9 @@ func equalsDefault(ip net.IP) bool { } func findNextPort(proto string, allocated *collections.OrderedIntSet) (int, error) { - port := 0 - for port = nextPort(proto); allocated.Exists(port); port = nextPort(proto) { + port := nextPort(proto) + for allocated.Exists(port) { + port = nextPort(proto) } if port > EndPortRange { return 0, ErrPortExceedsRange -- cgit v1.2.1 From 555416fd02b9e062385dcdaf0c4b9f5de61df388 Mon Sep 17 00:00:00 2001 From: Andy Kipp Date: Tue, 18 Mar 2014 13:29:24 -0400 Subject: Add err checks for port allocator tests Docker-DCO-1.1-Signed-off-by: Andy Kipp (github: kippandrew) --- runtime/networkdriver/portallocator/portallocator_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/runtime/networkdriver/portallocator/portallocator_test.go b/runtime/networkdriver/portallocator/portallocator_test.go index 356da855b6..f01bcfc99e 100644 --- a/runtime/networkdriver/portallocator/portallocator_test.go +++ b/runtime/networkdriver/portallocator/portallocator_test.go @@ -183,8 +183,17 @@ func TestPortAllocation(t *testing.T) { } port, err = RequestPort(ip, "tcp", 0) + if err != nil { + t.Fatal(err) + } port2, err := RequestPort(ip, "tcp", port+1) + if err != nil { + t.Fatal(err) + } port3, err := RequestPort(ip, "tcp", 0) + if err != nil { + t.Fatal(err) + } if port3 == port2 { t.Fatal("Requesting a dynamic port should never allocate a used port") } -- cgit v1.2.1 From 85a62d9b779bfb351e159f38c2fc95900a0532cd Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 13 Mar 2014 15:10:35 -0400 Subject: btrfs: build tags Default to the same build behavior, but allow a go build tag to disable building of the btrfs graphdriver go build -tags no_btrfs' ... $ go build $ objdump -S docker | grep btrfs | wc -l 194 $ go build -tags no_btrfs $ objdump -S docker | grep btrfs | wc -l 1 # that is a comment ;-) Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- runtime/runtime.go | 1 - runtime/runtime_btfs.go | 7 +++++++ 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 runtime/runtime_btfs.go diff --git a/runtime/runtime.go b/runtime/runtime.go index 43230488a2..4408e13902 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -16,7 +16,6 @@ import ( "github.com/dotcloud/docker/runtime/execdriver/execdrivers" "github.com/dotcloud/docker/runtime/execdriver/lxc" "github.com/dotcloud/docker/runtime/graphdriver" - _ "github.com/dotcloud/docker/runtime/graphdriver/btrfs" _ "github.com/dotcloud/docker/runtime/graphdriver/vfs" _ "github.com/dotcloud/docker/runtime/networkdriver/lxc" "github.com/dotcloud/docker/runtime/networkdriver/portallocator" diff --git a/runtime/runtime_btfs.go b/runtime/runtime_btfs.go new file mode 100644 index 0000000000..5e941386c3 --- /dev/null +++ b/runtime/runtime_btfs.go @@ -0,0 +1,7 @@ +// +build !no_btrfs + +package runtime + +import ( + _ "github.com/dotcloud/docker/runtime/graphdriver/btrfs" +) -- cgit v1.2.1 From 5cfea26bcfc218ca72eac7115fa257833f28b9f2 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 13 Mar 2014 17:38:54 -0400 Subject: btrfs: build tags correct filename and make the tag more readable Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- runtime/runtime_btfs.go | 7 ------- runtime/runtime_btrfs.go | 7 +++++++ 2 files changed, 7 insertions(+), 7 deletions(-) delete mode 100644 runtime/runtime_btfs.go create mode 100644 runtime/runtime_btrfs.go diff --git a/runtime/runtime_btfs.go b/runtime/runtime_btfs.go deleted file mode 100644 index 5e941386c3..0000000000 --- a/runtime/runtime_btfs.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !no_btrfs - -package runtime - -import ( - _ "github.com/dotcloud/docker/runtime/graphdriver/btrfs" -) diff --git a/runtime/runtime_btrfs.go b/runtime/runtime_btrfs.go new file mode 100644 index 0000000000..c59b103ff9 --- /dev/null +++ b/runtime/runtime_btrfs.go @@ -0,0 +1,7 @@ +// +build !exclude_graphdriver_btrfs + +package runtime + +import ( + _ "github.com/dotcloud/docker/runtime/graphdriver/btrfs" +) -- cgit v1.2.1 From 29c45e7f4fc616290e416f1b541e1739820af60c Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 13 Mar 2014 17:39:25 -0400 Subject: packagers: btrfs build tag docs Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- hack/PACKAGERS.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md index 5afa381005..297d1500db 100644 --- a/hack/PACKAGERS.md +++ b/hack/PACKAGERS.md @@ -160,15 +160,23 @@ export DOCKER_BUILDTAGS='apparmor' There are build tags for disabling graphdrivers as well. By default, support for all graphdrivers are built in. -To disable devicemapper +To disable btrfs: +```bash +export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs' +``` + +To disable devicemapper: ```bash export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' ``` -To disable aufs + +To disable aufs: ```bash export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' ``` +NOTE: if you need to set more than one build tag, space seperate them. + ### Static Daemon If it is feasible within the constraints of your distribution, you should -- cgit v1.2.1 From c76def2dd23cf90fdc80224f08530205b6dcba73 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 14 Mar 2014 04:19:05 +0000 Subject: typo Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- hack/PACKAGERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md index 297d1500db..47e8413bf3 100644 --- a/hack/PACKAGERS.md +++ b/hack/PACKAGERS.md @@ -175,7 +175,7 @@ To disable aufs: export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' ``` -NOTE: if you need to set more than one build tag, space seperate them. +NOTE: if you need to set more than one build tag, space separate them. ### Static Daemon -- cgit v1.2.1 From c1f2abd89958a8731211ceb2885eed238a3bea8d Mon Sep 17 00:00:00 2001 From: LK4D4 Date: Tue, 18 Mar 2014 21:38:56 +0400 Subject: Using names in docker ps --since-id/--before-id, resolves #3565 Also renames --since-id/--before-id to --since/--before and add errors on non-existent containers. Docker-DCO-1.1-Signed-off-by: Alexandr Morozov (github: LK4D4) --- api/client.go | 4 ++-- contrib/completion/bash/docker | 9 +++------ contrib/completion/fish/docker.fish | 4 ++-- docs/sources/reference/commandline/cli.rst | 4 ++-- server/server.go | 23 ++++++++++++++++++++--- 5 files changed, 29 insertions(+), 15 deletions(-) diff --git a/api/client.go b/api/client.go index 10dd9406dc..8f515639a7 100644 --- a/api/client.go +++ b/api/client.go @@ -1337,8 +1337,8 @@ func (cli *DockerCli) CmdPs(args ...string) error { all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.") - since := cmd.String([]string{"#sinceId", "-since-id"}, "", "Show only containers created since Id, include non-running ones.") - before := cmd.String([]string{"#beforeId", "-before-id"}, "", "Show only container created before Id, include non-running ones.") + since := cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show only containers created since Id or Name, include non-running ones.") + before := cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.") last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.") if err := cmd.Parse(args); err != nil { diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index 1449330986..e6a191d32b 100755 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -392,11 +392,8 @@ _docker_port() _docker_ps() { case "$prev" in - --since-id|--before-id) - COMPREPLY=( $( compgen -W "$( __docker_q ps -a -q )" -- "$cur" ) ) - # TODO replace this with __docker_containers_all - # see https://github.com/dotcloud/docker/issues/3565 - return + --since|--before) + __docker_containers_all ;; -n) return @@ -407,7 +404,7 @@ _docker_ps() case "$cur" in -*) - COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since-id --before-id -n" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) ) ;; *) ;; diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index b0c5f38a96..9c4339fe2b 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -154,13 +154,13 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print # ps complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before-id -d 'Show only container created before Id, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display sizes' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since-id -d 'Show only containers created since Id, include non-running ones.' +complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' # pull complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from the docker registry server' diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index dc6529ab6a..ebcf021115 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -967,13 +967,13 @@ new output from the container's stdout and stderr. List containers -a, --all=false: Show all containers. Only running containers are shown by default. - --before-id="": Show only container created before Id, include non-running ones. + --before="": Show only container created before Id or Name, include non-running ones. -l, --latest=false: Show only the latest created container, include non-running ones. -n=-1: Show n last created containers, include non-running ones. --no-trunc=false: Don't truncate output -q, --quiet=false: Only display numeric IDs -s, --size=false: Display sizes, not to be used with -q - --since-id="": Show only containers created since Id, include non-running ones. + --since="": Show only containers created since Id or Name, include non-running ones. Running ``docker ps`` showing 2 linked containers. diff --git a/server/server.go b/server/server.go index 93fc7b0bb1..e6243971a4 100644 --- a/server/server.go +++ b/server/server.go @@ -981,12 +981,27 @@ func (srv *Server) Containers(job *engine.Job) engine.Status { return nil }, -1) + var beforeCont, sinceCont *runtime.Container + if before != "" { + beforeCont = srv.runtime.Get(before) + if beforeCont == nil { + return job.Error(fmt.Errorf("Could not find container with name or id %s", before)) + } + } + + if since != "" { + sinceCont = srv.runtime.Get(since) + if sinceCont == nil { + return job.Error(fmt.Errorf("Could not find container with name or id %s", since)) + } + } + for _, container := range srv.runtime.List() { if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" { continue } if before != "" && !foundBefore { - if container.ID == before || utils.TruncateID(container.ID) == before { + if container.ID == beforeCont.ID { foundBefore = true } continue @@ -994,8 +1009,10 @@ func (srv *Server) Containers(job *engine.Job) engine.Status { if n > 0 && displayed == n { break } - if container.ID == since || utils.TruncateID(container.ID) == since { - break + if since != "" { + if container.ID == sinceCont.ID { + break + } } displayed++ out := &engine.Env{} -- cgit v1.2.1 From e27c635c06dcad61ce7185d95debef9ef7c9f7e0 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Tue, 18 Mar 2014 16:26:21 -0400 Subject: Add upstart nofile/noproc similar to systemd init Docker-DCO-1.1-Signed-off-by: Brian Goff (github: cpuguy83) --- contrib/init/upstart/docker.conf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf index 047f21c092..907a536c9c 100644 --- a/contrib/init/upstart/docker.conf +++ b/contrib/init/upstart/docker.conf @@ -2,6 +2,8 @@ description "Docker daemon" start on filesystem stop on runlevel [!2345] +limit nofile 524288 1048576 +limit nproc 524288 1048576 respawn -- cgit v1.2.1 From f52b2fdcbb29258c5b492fdb2479d473fcb42ca0 Mon Sep 17 00:00:00 2001 From: Johan Euphrosine Date: Mon, 3 Mar 2014 14:41:38 -0800 Subject: libcontainer/network: add netns strategy Docker-DCO-1.1-Signed-off-by: Johan Euphrosine (github: proppy) --- pkg/libcontainer/network/netns.go | 42 ++++++++++++++++++++++++++++++++++++ pkg/libcontainer/network/strategy.go | 2 ++ 2 files changed, 44 insertions(+) create mode 100644 pkg/libcontainer/network/netns.go diff --git a/pkg/libcontainer/network/netns.go b/pkg/libcontainer/network/netns.go new file mode 100644 index 0000000000..3eb8ee587a --- /dev/null +++ b/pkg/libcontainer/network/netns.go @@ -0,0 +1,42 @@ +package network + +import ( + "fmt" + "os" + "syscall" + + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/system" +) + +// crosbymichael: could make a network strategy that instead of returning veth pair names it returns a pid to an existing network namespace +type NetNS struct { +} + +func (v *NetNS) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error { + nsname, exists := n.Context["nsname"] + + if !exists { + return fmt.Errorf("nspath does not exist in network context") + } + + context["nspath"] = fmt.Sprintf("/var/run/netns/%s", nsname) + return nil +} + +func (v *NetNS) Initialize(config *libcontainer.Network, context libcontainer.Context) error { + nspath, exists := context["nspath"] + if !exists { + return fmt.Errorf("nspath does not exist in network context") + } + + f, err := os.OpenFile(nspath, os.O_RDONLY, 0) + if err != nil { + return fmt.Errorf("failed get network namespace fd: %v", err) + } + + if err := system.Setns(f.Fd(), syscall.CLONE_NEWNET); err != nil { + return fmt.Errorf("failed to setns current network namespace: %v", err) + } + return nil +} diff --git a/pkg/libcontainer/network/strategy.go b/pkg/libcontainer/network/strategy.go index 693790d280..e41ecc3ea6 100644 --- a/pkg/libcontainer/network/strategy.go +++ b/pkg/libcontainer/network/strategy.go @@ -2,6 +2,7 @@ package network import ( "errors" + "github.com/dotcloud/docker/pkg/libcontainer" ) @@ -12,6 +13,7 @@ var ( var strategies = map[string]NetworkStrategy{ "veth": &Veth{}, "loopback": &Loopback{}, + "netns": &NetNS{}, } // NetworkStrategy represents a specific network configuration for -- cgit v1.2.1 From b10b950b110c93db34a399753706dc79c71b94d3 Mon Sep 17 00:00:00 2001 From: Johan Euphrosine Date: Mon, 3 Mar 2014 21:46:49 -0800 Subject: libcontainer/nsinit/init: move mount namespace after network Docker-DCO-1.1-Signed-off-by: Johan Euphrosine (github: proppy) --- pkg/libcontainer/nsinit/init.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 117ae875ed..c39928d459 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -4,6 +4,9 @@ package nsinit import ( "fmt" + "os" + "syscall" + "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/apparmor" "github.com/dotcloud/docker/pkg/libcontainer/capabilities" @@ -11,8 +14,6 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer/utils" "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/pkg/user" - "os" - "syscall" ) // Init is the init process that first runs inside a new namespace to setup mounts, users, networking, @@ -56,13 +57,13 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol if err := system.ParentDeathSignal(uintptr(syscall.SIGTERM)); err != nil { return fmt.Errorf("parent death signal %s", err) } + if err := setupNetwork(container, context); err != nil { + return fmt.Errorf("setup networking %s", err) + } ns.logger.Println("setup mount namespace") if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot); err != nil { return fmt.Errorf("setup mount namespace %s", err) } - if err := setupNetwork(container, context); err != nil { - return fmt.Errorf("setup networking %s", err) - } if err := system.Sethostname(container.Hostname); err != nil { return fmt.Errorf("sethostname %s", err) } -- cgit v1.2.1 From f58757a699cf47c3b8770e13f371e1bbf493b5b1 Mon Sep 17 00:00:00 2001 From: Johan Euphrosine Date: Mon, 3 Mar 2014 21:47:03 -0800 Subject: libcontainer: goimports Docker-DCO-1.1-Signed-off-by: Johan Euphrosine (github: proppy) --- pkg/libcontainer/nsinit/exec.go | 7 ++++--- pkg/libcontainer/nsinit/nsinit/main.go | 8 ++++++-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 61286cc13c..6e902d1916 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -3,12 +3,13 @@ package nsinit import ( - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/network" - "github.com/dotcloud/docker/pkg/system" "os" "os/exec" "syscall" + + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/network" + "github.com/dotcloud/docker/pkg/system" ) // Exec performes setup outside of a namespace so that a container can be diff --git a/pkg/libcontainer/nsinit/nsinit/main.go b/pkg/libcontainer/nsinit/nsinit/main.go index df32d0b49e..3725fb5534 100644 --- a/pkg/libcontainer/nsinit/nsinit/main.go +++ b/pkg/libcontainer/nsinit/nsinit/main.go @@ -3,14 +3,18 @@ package main import ( "encoding/json" "flag" - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/nsinit" "io" "io/ioutil" "log" "os" "path/filepath" "strconv" + + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/nsinit" + + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/pkg/libcontainer/nsinit" ) var ( -- cgit v1.2.1 From 041ae08a2c8f6f345ac0d7f5fea4b79655e28dc5 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 18 Mar 2014 16:24:45 -0700 Subject: Add image size to history docs Fixes #3147 Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- docs/sources/reference/commandline/cli.rst | 37 +++++++----------------------- 1 file changed, 8 insertions(+), 29 deletions(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index dc6529ab6a..089c1e14b2 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -569,35 +569,14 @@ To see how the ``docker:latest`` image was built: .. code-block:: bash $ docker history docker - ID CREATED CREATED BY - docker:latest 19 hours ago /bin/sh -c #(nop) ADD . in /go/src/github.com/dotcloud/docker - cf5f2467662d 2 weeks ago /bin/sh -c #(nop) ENTRYPOINT ["hack/dind"] - 3538fbe372bf 2 weeks ago /bin/sh -c #(nop) WORKDIR /go/src/github.com/dotcloud/docker - 7450f65072e5 2 weeks ago /bin/sh -c #(nop) VOLUME /var/lib/docker - b79d62b97328 2 weeks ago /bin/sh -c apt-get install -y -q lxc - 36714852a550 2 weeks ago /bin/sh -c apt-get install -y -q iptables - 8c4c706df1d6 2 weeks ago /bin/sh -c /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEYn' > /.s3cfg - b89989433c48 2 weeks ago /bin/sh -c pip install python-magic - a23e640d85b5 2 weeks ago /bin/sh -c pip install s3cmd - 41f54fec7e79 2 weeks ago /bin/sh -c apt-get install -y -q python-pip - d9bc04add907 2 weeks ago /bin/sh -c apt-get install -y -q reprepro dpkg-sig - e74f4760fa70 2 weeks ago /bin/sh -c gem install --no-rdoc --no-ri fpm - 1e43224726eb 2 weeks ago /bin/sh -c apt-get install -y -q ruby1.9.3 rubygems libffi-dev - 460953ae9d7f 2 weeks ago /bin/sh -c #(nop) ENV GOPATH=/go:/go/src/github.com/dotcloud/docker/vendor - 8b63eb1d666b 2 weeks ago /bin/sh -c #(nop) ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/goroot/bin - 3087f3bcedf2 2 weeks ago /bin/sh -c #(nop) ENV GOROOT=/goroot - 635840d198e5 2 weeks ago /bin/sh -c cd /goroot/src && ./make.bash - 439f4a0592ba 2 weeks ago /bin/sh -c curl -s https://go.googlecode.com/files/go1.1.2.src.tar.gz | tar -v -C / -xz && mv /go /goroot - 13967ed36e93 2 weeks ago /bin/sh -c #(nop) ENV CGO_ENABLED=0 - bf7424458437 2 weeks ago /bin/sh -c apt-get install -y -q build-essential - a89ec997c3bf 2 weeks ago /bin/sh -c apt-get install -y -q mercurial - b9f165c6e749 2 weeks ago /bin/sh -c apt-get install -y -q git - 17a64374afa7 2 weeks ago /bin/sh -c apt-get install -y -q curl - d5e85dc5b1d8 2 weeks ago /bin/sh -c apt-get update - 13e642467c11 2 weeks ago /bin/sh -c echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list - ae6dde92a94e 2 weeks ago /bin/sh -c #(nop) MAINTAINER Solomon Hykes - ubuntu:12.04 6 months ago - + IMAGE CREATED CREATED BY SIZE + 3e23a5875458790b7a806f95f7ec0d0b2a5c1659bfc899c89f939f6d5b8f7094 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B + 8578938dd17054dce7993d21de79e96a037400e8d28e15e7290fea4f65128a36 8 days ago /bin/sh -c dpkg-reconfigure locales && locale-gen C.UTF-8 && /usr/sbin/update-locale LANG=C.UTF-8 1.245 MB + be51b77efb42f67a5e96437b3e102f81e0a1399038f77bf28cea0ed23a65cf60 8 days ago /bin/sh -c apt-get update && apt-get install -y git libxml2-dev python build-essential make gcc python-dev locales python-pip 338.3 MB + 4b137612be55ca69776c7f30c2d2dd0aa2e7d72059820abf3e25b629f887a084 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB + 750d58736b4b6cc0f9a9abe8f258cef269e3e9dceced1146503522be9f985ada 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -t jessie.tar.xz jessie http://http.debian.net/debian 0 B + 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 9 months ago 0 B + .. _cli_images: ``images`` -- cgit v1.2.1 From 5dbfe310fe624c66714f8c5017692f528af4c87f Mon Sep 17 00:00:00 2001 From: Johan Euphrosine Date: Tue, 18 Mar 2014 16:25:26 -0700 Subject: libcontainer: remove duplicate imports Docker-DCO-1.1-Signed-off-by: Johan Euphrosine (github: proppy) --- pkg/libcontainer/nsinit/nsinit/main.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/libcontainer/nsinit/nsinit/main.go b/pkg/libcontainer/nsinit/nsinit/main.go index 3725fb5534..37aa784981 100644 --- a/pkg/libcontainer/nsinit/nsinit/main.go +++ b/pkg/libcontainer/nsinit/nsinit/main.go @@ -12,9 +12,6 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/nsinit" - - "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/pkg/libcontainer/nsinit" ) var ( -- cgit v1.2.1 From 3b1d590269466217ddf203edcef295f6cec9fcee Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 18 Mar 2014 23:12:39 +0000 Subject: cleanup container.stop Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- runtime/container.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/runtime/container.go b/runtime/container.go index 35b01deac7..6194a19c8c 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -886,11 +886,8 @@ func (container *Container) Kill() error { // 2. Wait for the process to die, in last resort, try to kill the process directly if err := container.WaitTimeout(10 * time.Second); err != nil { - if container.command == nil { - return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", utils.TruncateID(container.ID)) - } - log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", utils.TruncateID(container.ID)) - if err := container.runtime.Kill(container, 9); err != nil { + log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID)) + if err := syscall.Kill(container.State.Pid, 9); err != nil { return err } } -- cgit v1.2.1 From 38a3fc3e0e9b56400b2c7d2fce3bfc7b15395d14 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 18 Mar 2014 17:07:45 -0700 Subject: Add sudo clause if your using osx or tcp Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- docs/sources/examples/example_header.inc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/sources/examples/example_header.inc b/docs/sources/examples/example_header.inc index 0621b39794..5841141e59 100644 --- a/docs/sources/examples/example_header.inc +++ b/docs/sources/examples/example_header.inc @@ -4,4 +4,5 @@ * This example assumes you have Docker running in daemon mode. For more information please see :ref:`running_examples`. * **If you don't like sudo** then see :ref:`dockergroup` + * **If you're using OS X or docker via TCP** then you shouldn't use `sudo` -- cgit v1.2.1 From 7822b053cbd2288b6c8d9c51a8f495368bc77f35 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 18 Mar 2014 17:24:06 -0700 Subject: Be explicit about binding to all interfaces in redis example Fixes #4021 Moved to debian because the redis installed in ubuntu is really old and does not support args via the cli. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- docs/sources/examples/running_redis_service.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sources/examples/running_redis_service.rst b/docs/sources/examples/running_redis_service.rst index 50f1471f17..5a5a1b003f 100644 --- a/docs/sources/examples/running_redis_service.rst +++ b/docs/sources/examples/running_redis_service.rst @@ -18,11 +18,11 @@ Firstly, we create a ``Dockerfile`` for our new Redis image. .. code-block:: bash - FROM ubuntu:12.10 - RUN apt-get update - RUN apt-get -y install redis-server + FROM debian:jessie + RUN apt-get update && apt-get install -y redis-server EXPOSE 6379 ENTRYPOINT ["/usr/bin/redis-server"] + CMD ["--bind", "0.0.0.0"] Next we build an image from our ``Dockerfile``. Replace ```` with your own user name. -- cgit v1.2.1 From 92194f613e37aeb8a387920c3bee42480da0d2ac Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 19 Mar 2014 14:12:47 +1000 Subject: use this horrible complex bit of shell to make sure that curl doesn't hand the poor user a broken docker client Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/installation/mac.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/sources/installation/mac.rst b/docs/sources/installation/mac.rst index 5139324d0b..f4c771cf9f 100644 --- a/docs/sources/installation/mac.rst +++ b/docs/sources/installation/mac.rst @@ -65,11 +65,12 @@ Run the following commands to get it downloaded and set up: .. code-block:: bash - # Get the file - curl -o docker https://get.docker.io/builds/Darwin/x86_64/docker-latest - - # Mark it executable - chmod +x docker + # Get the docker client file + DIR=$(mktemp -d) && \ + curl -f -o $DIR/ld.tgz https://get.docker.io/builds/Darwin/x86_64/docker-latest.tgz && \ + gunzip $DIR/ld.tgz && \ + tar xvf $DIR/ld.tar -C $DIR/ && \ + cp $DIR/usr/local/bin/docker ./docker # Set the environment variable for the docker daemon export DOCKER_HOST=tcp://127.0.0.1:4243 -- cgit v1.2.1 From 53dc2d67fb65037d9891e2fa0f6559d5e4e2ddcc Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 19 Mar 2014 14:24:49 +1000 Subject: mention the tgz - other people might like to know Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/installation/binaries.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/sources/installation/binaries.rst b/docs/sources/installation/binaries.rst index bfdfbe211f..fe03d21859 100644 --- a/docs/sources/installation/binaries.rst +++ b/docs/sources/installation/binaries.rst @@ -49,6 +49,9 @@ Get the docker binary: wget https://get.docker.io/builds/Linux/x86_64/docker-latest -O docker chmod +x docker +.. note:: + If you have trouble downloading the binary, you can also get the smaller + compressed release file: https://get.docker.io/builds/Linux/x86_64/docker-latest.tgz Run the docker daemon --------------------- -- cgit v1.2.1 From 4fd82db4beba03a126dfc557c86d5d52e9066dae Mon Sep 17 00:00:00 2001 From: Viktor Vojnovski Date: Wed, 19 Mar 2014 00:00:48 +0100 Subject: refactor($hack,$docs): be consistent in apt-key keyserver URI usage, as done in #4740 In #4740, the apt-key call in docs is changed to use the keyserver port 80 instead of port 11371, as the previous call would fail with a restrictive firewall or proxy. This commit extends the change to all apt-key calls in the repository. Docker-DCO-1.1-Signed-off-by: Viktor Vojnovski (github: vojnovski) --- docs/sources/examples/postgresql_service.Dockerfile | 2 +- docs/sources/installation/ubuntulinux.rst | 2 +- hack/infrastructure/docker-ci/Dockerfile | 2 +- hack/install.sh | 4 ++-- hack/release.sh | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/sources/examples/postgresql_service.Dockerfile b/docs/sources/examples/postgresql_service.Dockerfile index af1423f258..219a537882 100644 --- a/docs/sources/examples/postgresql_service.Dockerfile +++ b/docs/sources/examples/postgresql_service.Dockerfile @@ -7,7 +7,7 @@ MAINTAINER SvenDowideit@docker.com # Add the PostgreSQL PGP key to verify their Debian packages. # It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc -RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 # Add PostgreSQL's repository. It contains the most recent stable release # of PostgreSQL, ``9.3``. diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index 6998be8571..a163c62da7 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -144,7 +144,7 @@ First add the Docker repository key to your local keychain. .. code-block:: bash - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 + sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 Add the Docker repository to your apt sources list, update and install the ``lxc-docker`` package. diff --git a/hack/infrastructure/docker-ci/Dockerfile b/hack/infrastructure/docker-ci/Dockerfile index 789c794f54..5c6eec9663 100644 --- a/hack/infrastructure/docker-ci/Dockerfile +++ b/hack/infrastructure/docker-ci/Dockerfile @@ -16,7 +16,7 @@ RUN apt-get install -y --no-install-recommends python2.7 python-dev \ RUN cd /tmp; wget http://python-distribute.org/distribute_setup.py RUN cd /tmp; python distribute_setup.py; easy_install pip; rm distribute_setup.py -RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 RUN echo 'deb http://get.docker.io/ubuntu docker main' > \ /etc/apt/sources.list.d/docker.list; apt-get update RUN apt-get install -y lxc-docker-0.8.0 diff --git a/hack/install.sh b/hack/install.sh index 65e34f9659..1fa8a47480 100755 --- a/hack/install.sh +++ b/hack/install.sh @@ -111,9 +111,9 @@ case "$lsb_dist" in ( set -x if [ "https://get.docker.io/" = "$url" ]; then - $sh_c "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9" + $sh_c "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9" elif [ "https://test.docker.io/" = "$url" ]; then - $sh_c "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6" + $sh_c "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6" else $sh_c "$curl ${url}gpg | apt-key add -" fi diff --git a/hack/release.sh b/hack/release.sh index c380d2239a..1f249a5c5e 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -246,7 +246,7 @@ EOF # Add the repository to your APT sources echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list # Then import the repository key -apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 +apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 # Install docker apt-get update ; apt-get install -y lxc-docker -- cgit v1.2.1 From fbfac21ed4de550ce72d993810dc07a2c4877a88 Mon Sep 17 00:00:00 2001 From: Daniel Norberg Date: Fri, 7 Feb 2014 11:48:14 -0500 Subject: configurable dns search domains Add a --dns-search parameter and a DnsSearch configuration field for specifying dns search domains. Docker-DCO-1.1-Signed-off-by: Daniel Norberg (github: danielnorberg) --- daemonconfig/config.go | 4 ++ docker/docker.go | 3 ++ docs/sources/reference/commandline/cli.rst | 9 +++- opts/opts.go | 13 ++++++ opts/opts_test.go | 54 ++++++++++++++++++++++ runconfig/compare.go | 6 +++ runconfig/config.go | 4 ++ runconfig/config_test.go | 16 +++++++ runconfig/merge.go | 6 +++ runconfig/parse.go | 3 ++ runtime/runtime.go | 17 +++++-- utils/utils.go | 72 ++++++++++++++++++++---------- utils/utils_test.go | 51 +++++++++++++++++++++ 13 files changed, 229 insertions(+), 29 deletions(-) diff --git a/daemonconfig/config.go b/daemonconfig/config.go index b26d3eec3a..6cb3659e18 100644 --- a/daemonconfig/config.go +++ b/daemonconfig/config.go @@ -18,6 +18,7 @@ type Config struct { Root string AutoRestart bool Dns []string + DnsSearch []string EnableIptables bool EnableIpForward bool DefaultIp net.IP @@ -49,6 +50,9 @@ func ConfigFromJob(job *engine.Job) *Config { if dns := job.GetenvList("Dns"); dns != nil { config.Dns = dns } + if dnsSearch := job.GetenvList("DnsSearch"); dnsSearch != nil { + config.DnsSearch = dnsSearch + } if mtu := job.GetenvInt("Mtu"); mtu != 0 { config.Mtu = mtu } else { diff --git a/docker/docker.go b/docker/docker.go index 749857a640..e62b9494d5 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -35,6 +35,7 @@ func main() { flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group") flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API") flDns = opts.NewListOpts(opts.ValidateIp4Address) + flDnsSearch = opts.NewListOpts(opts.ValidateDomain) flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules") flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward") flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports") @@ -45,6 +46,7 @@ func main() { flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available") ) flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers") + flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains") flag.Var(&flHosts, []string{"H", "-host"}, "tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified") flag.Parse() @@ -115,6 +117,7 @@ func main() { job.Setenv("Root", realRoot) job.SetenvBool("AutoRestart", *flAutoRestart) job.SetenvList("Dns", flDns.GetAll()) + job.SetenvList("DnsSearch", flDnsSearch.GetAll()) job.SetenvBool("EnableIptables", *flEnableIptables) job.SetenvBool("EnableIpForward", *flEnableIpForward) job.Setenv("BridgeIface", *bridgeName) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 0f33b05ec4..7b16a7d2ec 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -77,6 +77,7 @@ Commands --bip="": Use this CIDR notation address for the network bridge's IP, not compatible with -b -d, --daemon=false: Enable daemon mode --dns=[]: Force docker to use specific DNS servers + --dns-search=[]: Force Docker to use specific DNS search domains -g, --graph="/var/lib/docker": Path to use as the root of the docker runtime --icc=true: Enable inter-container communication --ip="0.0.0.0": Default IP address to use when binding container ports @@ -96,6 +97,8 @@ To force Docker to use devicemapper as the storage driver, use ``docker -d -s de To set the DNS server for all Docker containers, use ``docker -d --dns 8.8.8.8``. +To set the a DNS search domain for all Docker containers, use ``docker -d --dns-search example.com``. + To run the daemon with debug output, use ``docker -d -D``. To use lxc as the execution driver, use ``docker -d -e lxc``. @@ -396,6 +399,7 @@ not overridden in the JSON hash will be merged in. "VolumesFrom" : "", "Cmd" : ["cat", "-e", "/etc/resolv.conf"], "Dns" : ["8.8.8.8", "8.8.4.4"], + "DnsSearch" : ["example.com"], "MemorySwap" : 0, "AttachStdin" : false, "AttachStderr" : false, @@ -1131,6 +1135,7 @@ image is removed. -t, --tty=false: Allocate a pseudo-tty -u, --user="": Username or UID --dns=[]: Set custom dns servers for the container + --dns-search=[]: Set custom DNS search domains for the container -v, --volume=[]: Create a bind mount to a directory or file with: [host-path]:[container-path]:[rw|ro]. If a directory "container-path" is missing, then docker creates a new volume. --volumes-from="": Mount all volumes from the given container(s) --entrypoint="": Overwrite the default entrypoint set by the image @@ -1288,7 +1293,7 @@ A complete example $ sudo docker run -d --name static static-web-files sh $ sudo docker run -d --expose=8098 --name riak riakserver $ sudo docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro --name app appserver - $ sudo docker run -d -p 1443:443 --dns=dns.dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver + $ sudo docker run -d -p 1443:443 --dns=dns.dev.org --dns-search=dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver $ sudo docker run -t -i --rm --volumes-from web -w /var/log/httpd busybox tail -f access.log This example shows 5 containers that might be set up to test a web application change: @@ -1296,7 +1301,7 @@ This example shows 5 containers that might be set up to test a web application c 1. Start a pre-prepared volume image ``static-web-files`` (in the background) that has CSS, image and static HTML in it, (with a ``VOLUME`` instruction in the ``Dockerfile`` to allow the web server to use those files); 2. Start a pre-prepared ``riakserver`` image, give the container name ``riak`` and expose port ``8098`` to any containers that link to it; 3. Start the ``appserver`` image, restricting its memory usage to 100MB, setting two environment variables ``DEVELOPMENT`` and ``BRANCH`` and bind-mounting the current directory (``$(pwd)``) in the container in read-only mode as ``/app/bin``; -4. Start the ``webserver``, mapping port ``443`` in the container to port ``1443`` on the Docker server, setting the DNS server to ``dns.dev.org``, creating a volume to put the log files into (so we can access it from another container), then importing the files from the volume exposed by the ``static`` container, and linking to all exposed ports from ``riak`` and ``app``. Lastly, we set the hostname to ``web.sven.dev.org`` so its consistent with the pre-generated SSL certificate; +4. Start the ``webserver``, mapping port ``443`` in the container to port ``1443`` on the Docker server, setting the DNS server to ``dns.dev.org`` and DNS search domain to ``dev.org``, creating a volume to put the log files into (so we can access it from another container), then importing the files from the volume exposed by the ``static`` container, and linking to all exposed ports from ``riak`` and ``app``. Lastly, we set the hostname to ``web.sven.dev.org`` so its consistent with the pre-generated SSL certificate; 5. Finally, we create a container that runs ``tail -f access.log`` using the logs volume from the ``web`` container, setting the workdir to ``/var/log/httpd``. The ``--rm`` option means that when the container exits, the container's layer is removed. diff --git a/opts/opts.go b/opts/opts.go index 4f5897c796..b2f21db30b 100644 --- a/opts/opts.go +++ b/opts/opts.go @@ -136,3 +136,16 @@ func ValidateIp4Address(val string) (string, error) { } return "", fmt.Errorf("%s is not an ip4 address", val) } + +func ValidateDomain(val string) (string, error) { + alpha := regexp.MustCompile(`[a-zA-Z]`) + if alpha.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + re := regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) + var ns = re.FindSubmatch([]byte(val)) + if len(ns) > 0 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} diff --git a/opts/opts_test.go b/opts/opts_test.go index a5c1fac9ca..299cbfe503 100644 --- a/opts/opts_test.go +++ b/opts/opts_test.go @@ -22,3 +22,57 @@ func TestValidateIP4(t *testing.T) { } } + +func TestValidateDomain(t *testing.T) { + valid := []string{ + `a`, + `a.`, + `1.foo`, + `17.foo`, + `foo.bar`, + `foo.bar.baz`, + `foo.bar.`, + `foo.bar.baz`, + `foo1.bar2`, + `foo1.bar2.baz`, + `1foo.2bar.`, + `1foo.2bar.baz`, + `foo-1.bar-2`, + `foo-1.bar-2.baz`, + `foo-1.bar-2.`, + `foo-1.bar-2.baz`, + `1-foo.2-bar`, + `1-foo.2-bar.baz`, + `1-foo.2-bar.`, + `1-foo.2-bar.baz`, + } + + invalid := []string{ + ``, + `.`, + `17`, + `17.`, + `.17`, + `17-.`, + `17-.foo`, + `.foo`, + `foo-.bar`, + `-foo.bar`, + `foo.bar-`, + `foo.bar-.baz`, + `foo.-bar`, + `foo.-bar.baz`, + } + + for _, domain := range valid { + if ret, err := ValidateDomain(domain); err != nil || ret == "" { + t.Fatalf("ValidateDomain(`"+domain+"`) got %s %s", ret, err) + } + } + + for _, domain := range invalid { + if ret, err := ValidateDomain(domain); err == nil || ret != "" { + t.Fatalf("ValidateDomain(`"+domain+"`) got %s %s", ret, err) + } + } +} diff --git a/runconfig/compare.go b/runconfig/compare.go index c09f897716..6ed7405246 100644 --- a/runconfig/compare.go +++ b/runconfig/compare.go @@ -20,6 +20,7 @@ func Compare(a, b *Config) bool { } if len(a.Cmd) != len(b.Cmd) || len(a.Dns) != len(b.Dns) || + len(a.DnsSearch) != len(b.DnsSearch) || len(a.Env) != len(b.Env) || len(a.PortSpecs) != len(b.PortSpecs) || len(a.ExposedPorts) != len(b.ExposedPorts) || @@ -38,6 +39,11 @@ func Compare(a, b *Config) bool { return false } } + for i := 0; i < len(a.DnsSearch); i++ { + if a.DnsSearch[i] != b.DnsSearch[i] { + return false + } + } for i := 0; i < len(a.Env); i++ { if a.Env[i] != b.Env[i] { return false diff --git a/runconfig/config.go b/runconfig/config.go index 9faa823a57..e961d659d7 100644 --- a/runconfig/config.go +++ b/runconfig/config.go @@ -26,6 +26,7 @@ type Config struct { Env []string Cmd []string Dns []string + DnsSearch []string Image string // Name of the image as it was passed by the operator (eg. could be symbolic) Volumes map[string]struct{} VolumesFrom string @@ -68,6 +69,9 @@ func ContainerConfigFromJob(job *engine.Job) *Config { if Dns := job.GetenvList("Dns"); Dns != nil { config.Dns = Dns } + if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { + config.DnsSearch = DnsSearch + } if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { config.Entrypoint = Entrypoint } diff --git a/runconfig/config_test.go b/runconfig/config_test.go index 46e4691b93..84846e5b1d 100644 --- a/runconfig/config_test.go +++ b/runconfig/config_test.go @@ -164,6 +164,7 @@ func TestCompare(t *testing.T) { volumes1["/test1"] = struct{}{} config1 := Config{ Dns: []string{"1.1.1.1", "2.2.2.2"}, + DnsSearch: []string{"foo", "bar"}, PortSpecs: []string{"1111:1111", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "11111111", @@ -171,6 +172,7 @@ func TestCompare(t *testing.T) { } config2 := Config{ Dns: []string{"0.0.0.0", "2.2.2.2"}, + DnsSearch: []string{"foo", "bar"}, PortSpecs: []string{"1111:1111", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "11111111", @@ -178,6 +180,7 @@ func TestCompare(t *testing.T) { } config3 := Config{ Dns: []string{"1.1.1.1", "2.2.2.2"}, + DnsSearch: []string{"foo", "bar"}, PortSpecs: []string{"0000:0000", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "11111111", @@ -185,6 +188,7 @@ func TestCompare(t *testing.T) { } config4 := Config{ Dns: []string{"1.1.1.1", "2.2.2.2"}, + DnsSearch: []string{"foo", "bar"}, PortSpecs: []string{"0000:0000", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "22222222", @@ -194,11 +198,20 @@ func TestCompare(t *testing.T) { volumes2["/test2"] = struct{}{} config5 := Config{ Dns: []string{"1.1.1.1", "2.2.2.2"}, + DnsSearch: []string{"foo", "bar"}, PortSpecs: []string{"0000:0000", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "11111111", Volumes: volumes2, } + config6 := Config{ + Dns: []string{"1.1.1.1", "2.2.2.2"}, + DnsSearch: []string{"foos", "bars"}, + PortSpecs: []string{"1111:1111", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + VolumesFrom: "11111111", + Volumes: volumes1, + } if Compare(&config1, &config2) { t.Fatalf("Compare should return false, Dns are different") } @@ -211,6 +224,9 @@ func TestCompare(t *testing.T) { if Compare(&config1, &config5) { t.Fatalf("Compare should return false, Volumes are different") } + if Compare(&config1, &config6) { + t.Fatalf("Compare should return false, DnsSearch are different") + } if !Compare(&config1, &config1) { t.Fatalf("Compare should return true") } diff --git a/runconfig/merge.go b/runconfig/merge.go index 3b91aa2af0..34faaf75e7 100644 --- a/runconfig/merge.go +++ b/runconfig/merge.go @@ -100,6 +100,12 @@ func Merge(userConf, imageConf *Config) error { //duplicates aren't an issue here userConf.Dns = append(userConf.Dns, imageConf.Dns...) } + if userConf.DnsSearch == nil || len(userConf.DnsSearch) == 0 { + userConf.DnsSearch = imageConf.DnsSearch + } else { + //duplicates aren't an issue here + userConf.DnsSearch = append(userConf.DnsSearch, imageConf.DnsSearch...) + } if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 { userConf.Entrypoint = imageConf.Entrypoint } diff --git a/runconfig/parse.go b/runconfig/parse.go index 2138f4e68c..cc33188ad5 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -42,6 +42,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flPublish opts.ListOpts flExpose opts.ListOpts flDns opts.ListOpts + flDnsSearch = opts.NewListOpts(opts.ValidateDomain) flVolumesFrom opts.ListOpts flLxcOpts opts.ListOpts @@ -73,6 +74,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers") + cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom dns search domains") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") @@ -196,6 +198,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf Env: flEnv.GetAll(), Cmd: runCmd, Dns: flDns.GetAll(), + DnsSearch: flDnsSearch.GetAll(), Image: image, Volumes: flVolumes.GetMap(), VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","), diff --git a/runtime/runtime.go b/runtime/runtime.go index 4408e13902..38a1beccd2 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -493,13 +493,19 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe } // If custom dns exists, then create a resolv.conf for the container - if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 { - var dns []string + if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(runtime.config.DnsSearch) > 0 { + dns := utils.GetNameservers(resolvConf) + dnsSearch := utils.GetSearchDomains(resolvConf) if len(config.Dns) > 0 { dns = config.Dns - } else { + } else if len(runtime.config.Dns) > 0 { dns = runtime.config.Dns } + if len(config.DnsSearch) > 0 { + dnsSearch = config.DnsSearch + } else if len(runtime.config.DnsSearch) > 0 { + dnsSearch = runtime.config.DnsSearch + } container.ResolvConfPath = path.Join(container.root, "resolv.conf") f, err := os.Create(container.ResolvConfPath) if err != nil { @@ -511,6 +517,11 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe return nil, nil, err } } + if len(dnsSearch) > 0 { + if _, err := f.Write([]byte("search " + strings.Join(dnsSearch, " ") + "\n")); err != nil { + return nil, nil, err + } + } } else { container.ResolvConfPath = "/etc/resolv.conf" } diff --git a/utils/utils.go b/utils/utils.go index 57a8200a7c..2702555973 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -731,54 +731,78 @@ func GetResolvConf() ([]byte, error) { // CheckLocalDns looks into the /etc/resolv.conf, // it returns true if there is a local nameserver or if there is no nameserver. func CheckLocalDns(resolvConf []byte) bool { - var parsedResolvConf = StripComments(resolvConf, []byte("#")) - if !bytes.Contains(parsedResolvConf, []byte("nameserver")) { - return true - } - for _, ip := range [][]byte{ - []byte("127.0.0.1"), - []byte("127.0.1.1"), - } { - if bytes.Contains(parsedResolvConf, ip) { - return true + for _, line := range GetLines(resolvConf, []byte("#")) { + if !bytes.Contains(line, []byte("nameserver")) { + continue + } + for _, ip := range [][]byte{ + []byte("127.0.0.1"), + []byte("127.0.1.1"), + } { + if bytes.Contains(line, ip) { + return true + } } + return false } - return false + return true } -// StripComments parses input into lines and strips away comments. -func StripComments(input []byte, commentMarker []byte) []byte { +// GetLines parses input into lines and strips away comments. +func GetLines(input []byte, commentMarker []byte) [][]byte { lines := bytes.Split(input, []byte("\n")) - var output []byte + var output [][]byte for _, currentLine := range lines { var commentIndex = bytes.Index(currentLine, commentMarker) if commentIndex == -1 { - output = append(output, currentLine...) + output = append(output, currentLine) } else { - output = append(output, currentLine[:commentIndex]...) + output = append(output, currentLine[:commentIndex]) } - output = append(output, []byte("\n")...) } return output } -// GetNameserversAsCIDR returns nameservers (if any) listed in -// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") -// This function's output is intended for net.ParseCIDR -func GetNameserversAsCIDR(resolvConf []byte) []string { - var parsedResolvConf = StripComments(resolvConf, []byte("#")) +// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf +func GetNameservers(resolvConf []byte) []string { nameservers := []string{} re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`) - for _, line := range bytes.Split(parsedResolvConf, []byte("\n")) { + for _, line := range GetLines(resolvConf, []byte("#")) { var ns = re.FindSubmatch(line) if len(ns) > 0 { - nameservers = append(nameservers, string(ns[1])+"/32") + nameservers = append(nameservers, string(ns[1])) } } + return nameservers +} +// GetNameserversAsCIDR returns nameservers (if any) listed in +// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") +// This function's output is intended for net.ParseCIDR +func GetNameserversAsCIDR(resolvConf []byte) []string { + nameservers := []string{} + for _, nameserver := range GetNameservers(resolvConf) { + nameservers = append(nameservers, nameserver+"/32") + } return nameservers } +// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf +// If more than one search line is encountered, only the contents of the last +// one is returned. +func GetSearchDomains(resolvConf []byte) []string { + re := regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) + domains := []string{} + for _, line := range GetLines(resolvConf, []byte("#")) { + match := re.FindSubmatch(line) + if match == nil { + continue + } + domains = strings.Fields(string(match[1])) + } + return domains +} + // FIXME: Change this not to receive default value as parameter func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) { var ( diff --git a/utils/utils_test.go b/utils/utils_test.go index 444d2a2428..177d3667e1 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -444,6 +444,30 @@ func TestParsePortMapping(t *testing.T) { } } +func TestGetNameservers(t *testing.T) { + for resolv, result := range map[string][]string{` +nameserver 1.2.3.4 +nameserver 40.3.200.10 +search example.com`: {"1.2.3.4", "40.3.200.10"}, + `search example.com`: {}, + `nameserver 1.2.3.4 +search example.com +nameserver 4.30.20.100`: {"1.2.3.4", "4.30.20.100"}, + ``: {}, + ` nameserver 1.2.3.4 `: {"1.2.3.4"}, + `search example.com +nameserver 1.2.3.4 +#nameserver 4.3.2.1`: {"1.2.3.4"}, + `search example.com +nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4"}, + } { + test := GetNameservers([]byte(resolv)) + if !StrSlicesEqual(test, result) { + t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + func TestGetNameserversAsCIDR(t *testing.T) { for resolv, result := range map[string][]string{` nameserver 1.2.3.4 @@ -468,6 +492,33 @@ nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"}, } } +func TestGetSearchDomains(t *testing.T) { + for resolv, result := range map[string][]string{ + `search example.com`: {"example.com"}, + `search example.com # ignored`: {"example.com"}, + ` search example.com `: {"example.com"}, + ` search example.com # ignored`: {"example.com"}, + `search foo.example.com example.com`: {"foo.example.com", "example.com"}, + ` search foo.example.com example.com `: {"foo.example.com", "example.com"}, + ` search foo.example.com example.com # ignored`: {"foo.example.com", "example.com"}, + ``: {}, + `# ignored`: {}, + `nameserver 1.2.3.4 +search foo.example.com example.com`: {"foo.example.com", "example.com"}, + `nameserver 1.2.3.4 +search dup1.example.com dup2.example.com +search foo.example.com example.com`: {"foo.example.com", "example.com"}, + `nameserver 1.2.3.4 +search foo.example.com example.com +nameserver 4.30.20.100`: {"foo.example.com", "example.com"}, + } { + test := GetSearchDomains([]byte(resolv)) + if !StrSlicesEqual(test, result) { + t.Fatalf("Wrong search domain string {%s} should be %v. Input: %s", test, result, resolv) + } + } +} + func StrSlicesEqual(a, b []string) bool { if len(a) != len(b) { return false -- cgit v1.2.1 From ec3257921da9da0d37df76e26a842f8f4775def0 Mon Sep 17 00:00:00 2001 From: Aditya Date: Sun, 16 Mar 2014 21:35:01 +0100 Subject: Docker-DCO-1.1-Signed-off-by: Aditya (github: netroy) document `DisableNetwork` config flag --- docs/sources/reference/api/docker_remote_api_v1.10.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.rst b/docs/sources/reference/api/docker_remote_api_v1.10.rst index 20af253f0e..4fa9a04c03 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.10.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.10.rst @@ -136,6 +136,7 @@ Create a container }, "VolumesFrom":"", "WorkingDir":"", + "DisableNetwork": false, "ExposedPorts":{ "22/tcp": {} } -- cgit v1.2.1 From 5127732c7911988c81eda7bb31ac77fc1dd36ac2 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Wed, 19 Mar 2014 14:30:13 -0400 Subject: docker save: --output flag for those that do not care to redirect stdout Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- api/client.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/api/client.go b/api/client.go index 8f515639a7..343a24078c 100644 --- a/api/client.go +++ b/api/client.go @@ -2044,6 +2044,8 @@ func (cli *DockerCli) CmdCp(args ...string) error { func (cli *DockerCli) CmdSave(args ...string) error { cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout)") + outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT") + if err := cmd.Parse(args); err != nil { return err } @@ -2053,8 +2055,16 @@ func (cli *DockerCli) CmdSave(args ...string) error { return nil } + var output io.Writer = cli.out + var err error + if *outfile != "" { + output, err = os.Create(*outfile) + if err != nil { + return err + } + } image := cmd.Arg(0) - if err := cli.stream("GET", "/images/"+image+"/get", nil, cli.out, nil); err != nil { + if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil { return err } return nil -- cgit v1.2.1 From 367a679b9270dd9ec6bd647998b6ffe594cfa6ab Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Wed, 19 Mar 2014 14:34:12 -0400 Subject: images: assurance and debug info on image layers when pushing or saving layers, report sizes for validation. And ensure that the files written are sync'ed. Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- archive/archive.go | 3 +++ server/server.go | 11 +++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index eace5a5158..2fac18e99f 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -617,6 +617,9 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) { if _, err := io.Copy(f, src); err != nil { return nil, err } + if err = f.Sync(); err != nil { + return nil, err + } if _, err := f.Seek(0, 0); err != nil { return nil, err } diff --git a/server/server.go b/server/server.go index e6243971a4..840a70357d 100644 --- a/server/server.go +++ b/server/server.go @@ -378,10 +378,15 @@ func (srv *Server) exportImage(img *image.Image, tempdir string) error { if err != nil { return err } - if _, err = io.Copy(fsTar, fs); err != nil { + if written, err := io.Copy(fsTar, fs); err != nil { + return err + } else { + utils.Debugf("rendered layer for %s of [%d] size", i.ID, written) + } + + if err = fsTar.Close(); err != nil { return err } - fsTar.Close() // find parent if i.Parent != "" { @@ -1537,6 +1542,8 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, defer os.RemoveAll(layerData.Name()) // Send the layer + utils.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size) + checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw) if err != nil { return "", err -- cgit v1.2.1 From e93a16ab48f75311aab155548f32776cbd21dfe6 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Wed, 19 Mar 2014 14:47:20 -0400 Subject: docker save: add and improve docs add usage examples for `docker save ...` Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- api/client.go | 2 +- docs/sources/reference/commandline/cli.rst | 23 ++++++++++++++++++++--- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/api/client.go b/api/client.go index 343a24078c..5a69700704 100644 --- a/api/client.go +++ b/api/client.go @@ -2043,7 +2043,7 @@ func (cli *DockerCli) CmdCp(args ...string) error { } func (cli *DockerCli) CmdSave(args ...string) error { - cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout)") + cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout by default)") outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT") if err := cmd.Parse(args); err != nil { diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 0f33b05ec4..294e1d0544 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -1307,10 +1307,27 @@ This example shows 5 containers that might be set up to test a web application c :: - Usage: docker save image > repository.tar + Usage: docker save IMAGE + + Save an image to a tar archive (streamed to stdout by default) + + -o, --output="": Write to an file, instead of STDOUT + + +Produces a tarred repository to the standard output stream. +Contains all parent layers, and all tags + versions, or specified repo:tag. + +.. code-block:: bash + + $ sudo docker save busybox > busybox.tar + $ ls -sh b.tar + 2.7M b.tar + $ sudo docker save --output busybox.tar busybox + $ ls -sh b.tar + 2.7M b.tar + $ sudo docker save -o fedora-all.tar fedora + $ sudo docker save -o fedora-latest.tar fedora:latest - Streams a tarred repository to the standard output stream. - Contains all parent layers, and all tags + versions. .. _cli_search: -- cgit v1.2.1 From 48cb2f03177732823b4091fd3ddd44b2bef2c58e Mon Sep 17 00:00:00 2001 From: LK4D4 Date: Wed, 19 Mar 2014 21:01:20 +0400 Subject: Remove duplication of Dns in config merging. Fixes #4714 Docker-DCO-1.1-Signed-off-by: Alexandr Morozov (github: LK4D4) --- runconfig/config_test.go | 2 +- runconfig/merge.go | 11 +++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/runconfig/config_test.go b/runconfig/config_test.go index 46e4691b93..8bbad9effa 100644 --- a/runconfig/config_test.go +++ b/runconfig/config_test.go @@ -231,7 +231,7 @@ func TestMerge(t *testing.T) { volumesUser := make(map[string]struct{}) volumesUser["/test3"] = struct{}{} configUser := &Config{ - Dns: []string{"3.3.3.3"}, + Dns: []string{"2.2.2.2", "3.3.3.3"}, PortSpecs: []string{"3333:2222", "3333:3333"}, Env: []string{"VAR2=3", "VAR3=3"}, Volumes: volumesUser, diff --git a/runconfig/merge.go b/runconfig/merge.go index 3b91aa2af0..79e3951271 100644 --- a/runconfig/merge.go +++ b/runconfig/merge.go @@ -97,8 +97,15 @@ func Merge(userConf, imageConf *Config) error { if userConf.Dns == nil || len(userConf.Dns) == 0 { userConf.Dns = imageConf.Dns } else { - //duplicates aren't an issue here - userConf.Dns = append(userConf.Dns, imageConf.Dns...) + dnsSet := make(map[string]struct{}, len(userConf.Dns)) + for _, dns := range userConf.Dns { + dnsSet[dns] = struct{}{} + } + for _, dns := range imageConf.Dns { + if _, exists := dnsSet[dns]; !exists { + userConf.Dns = append(userConf.Dns, dns) + } + } } if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 { userConf.Entrypoint = imageConf.Entrypoint -- cgit v1.2.1 From 698ca9f38d7ccee2c36b98821c74114b95db631b Mon Sep 17 00:00:00 2001 From: Daniel Norberg Date: Wed, 19 Mar 2014 15:20:36 -0400 Subject: fix typo in documentation Docker-DCO-1.1-Signed-off-by: Daniel Norberg (github: danielnorberg) --- docs/sources/reference/commandline/cli.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 7b16a7d2ec..c483551b46 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -97,7 +97,7 @@ To force Docker to use devicemapper as the storage driver, use ``docker -d -s de To set the DNS server for all Docker containers, use ``docker -d --dns 8.8.8.8``. -To set the a DNS search domain for all Docker containers, use ``docker -d --dns-search example.com``. +To set the DNS search domain for all Docker containers, use ``docker -d --dns-search example.com``. To run the daemon with debug output, use ``docker -d -D``. -- cgit v1.2.1 From c657603c612650117b4def976ff40d98ba7c3a21 Mon Sep 17 00:00:00 2001 From: Daniel Norberg Date: Wed, 19 Mar 2014 16:00:46 -0400 Subject: variable declaration cleanup Docker-DCO-1.1-Signed-off-by: Daniel Norberg (github: danielnorberg) --- opts/opts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opts/opts.go b/opts/opts.go index b2f21db30b..67f1c8fd48 100644 --- a/opts/opts.go +++ b/opts/opts.go @@ -143,7 +143,7 @@ func ValidateDomain(val string) (string, error) { return "", fmt.Errorf("%s is not a valid domain", val) } re := regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) - var ns = re.FindSubmatch([]byte(val)) + ns := re.FindSubmatch([]byte(val)) if len(ns) > 0 { return string(ns[1]), nil } -- cgit v1.2.1 From 78a0105eaf80ed85e2ee236632a2cc16998228f9 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Wed, 19 Mar 2014 17:09:12 -0400 Subject: api/client: var style tweak Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- api/client.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/api/client.go b/api/client.go index 5a69700704..e7abf04cb5 100644 --- a/api/client.go +++ b/api/client.go @@ -2055,8 +2055,10 @@ func (cli *DockerCli) CmdSave(args ...string) error { return nil } - var output io.Writer = cli.out - var err error + var ( + output io.Writer = cli.out + err error + ) if *outfile != "" { output, err = os.Create(*outfile) if err != nil { -- cgit v1.2.1 From 4434dcee89f7d0d0239f6b492b24e940cdbafb21 Mon Sep 17 00:00:00 2001 From: unclejack Date: Wed, 19 Mar 2014 23:23:45 +0200 Subject: fix failing test to use kill instead of stop TestCreateStartRestartStopStartKillRm was failing because stop has been changed to not kill containers. Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- integration/server_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/integration/server_test.go b/integration/server_test.go index a401f1306e..617f81fa4d 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -416,7 +416,7 @@ func TestRestartKillWait(t *testing.T) { }) } -func TestCreateStartRestartStopStartKillRm(t *testing.T) { +func TestCreateStartRestartKillStartKillRm(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) defer mkRuntimeFromEngine(eng, t).Nuke() @@ -456,8 +456,7 @@ func TestCreateStartRestartStopStartKillRm(t *testing.T) { t.Fatal(err) } - job = eng.Job("stop", id) - job.SetenvInt("t", 15) + job = eng.Job("kill", id) if err := job.Run(); err != nil { t.Fatal(err) } -- cgit v1.2.1 From f3765f96cfb37f6ea9f925f0d3174fe18c4152be Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 20 Mar 2014 09:08:52 +1000 Subject: add a link to the security documentation when we mention the docker group (or -G) Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/articles/security.rst | 2 ++ docs/sources/installation/binaries.rst | 3 ++- docs/sources/installation/ubuntulinux.rst | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/sources/articles/security.rst b/docs/sources/articles/security.rst index e738e9a847..ec2ab9bffd 100644 --- a/docs/sources/articles/security.rst +++ b/docs/sources/articles/security.rst @@ -82,6 +82,8 @@ when some applications start to misbehave. Control Groups have been around for a while as well: the code was started in 2006, and initially merged in kernel 2.6.24. +.. _dockersecurity_daemon: + Docker Daemon Attack Surface ---------------------------- diff --git a/docs/sources/installation/binaries.rst b/docs/sources/installation/binaries.rst index bfdfbe211f..a070599338 100644 --- a/docs/sources/installation/binaries.rst +++ b/docs/sources/installation/binaries.rst @@ -77,7 +77,8 @@ always run as the root user, but if you run the ``docker`` client as a user in the *docker* group then you don't need to add ``sudo`` to all the client commands. -.. warning:: The *docker* group is root-equivalent. +.. warning:: The *docker* group (or the group specified with ``-G``) is + root-equivalent; see :ref:`dockersecurity_daemon` details. Upgrades diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index 6998be8571..776090bff5 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -186,7 +186,7 @@ client commands. As of 0.9.0, you can specify that a group other than ``docker`` should own the Unix socket with the ``-G`` option. .. warning:: The *docker* group (or the group specified with ``-G``) is - root-equivalent. + root-equivalent; see :ref:`dockersecurity_daemon` details. **Example:** -- cgit v1.2.1 From 179e2c92d8d02d029d8aa54d53edb82b3fbcea2b Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Sun, 16 Mar 2014 21:10:59 +0000 Subject: Generate md5 and sha265 hashes when building, and upload them in hack/release.sh Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- hack/make/binary | 8 ++++ hack/release.sh | 122 ++++++++++++++++++++++++++++++++++--------------------- 2 files changed, 84 insertions(+), 46 deletions(-) mode change 100644 => 100755 hack/make/binary diff --git a/hack/make/binary b/hack/make/binary old mode 100644 new mode 100755 index 7272b1ede0..dee7d98dc6 --- a/hack/make/binary +++ b/hack/make/binary @@ -11,3 +11,11 @@ go build \ " \ ./docker echo "Created binary: $DEST/docker-$VERSION" + +if command -v md5sum &> /dev/null; then + md5sum "$DEST/docker-$VERSION" > "$DEST/docker-$VERSION.md5" +fi +if command -v sha256sum &> /dev/null; then + sha256sum "$DEST/docker-$VERSION" > "$DEST/docker-$VERSION.sha256" +fi + diff --git a/hack/release.sh b/hack/release.sh index c380d2239a..edcee98f38 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -55,33 +55,16 @@ RELEASE_BUNDLES=( if [ "$1" != '--release-regardless-of-test-failure' ]; then RELEASE_BUNDLES=( test "${RELEASE_BUNDLES[@]}" ) fi - -if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then - echo >&2 - echo >&2 'The build or tests appear to have failed.' - echo >&2 - echo >&2 'You, as the release maintainer, now have a couple options:' - echo >&2 '- delay release and fix issues' - echo >&2 '- delay release and fix issues' - echo >&2 '- did we mention how important this is? issues need fixing :)' - echo >&2 - echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' - echo >&2 ' really knows all the hairy problems at hand with the current release' - echo >&2 ' issues) may bypass this checking by running this script again with the' - echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' - echo >&2 ' running the test suite, and will only build the binaries and packages. Please' - echo >&2 ' avoid using this if at all possible.' - echo >&2 - echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' - echo >&2 ' should be used. If there are release issues, we should always err on the' - echo >&2 ' side of caution.' - echo >&2 - exit 1 -fi - + VERSION=$(cat VERSION) BUCKET=$AWS_S3_BUCKET +# These are the 2 keys we've used to sign the deb's +# release (get.docker.io +# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" +# test (test.docker.io) +# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" + setup_s3() { # Try creating the bucket. Ignore errors (it might already exist). s3cmd mb s3://$BUCKET 2>/dev/null || true @@ -114,12 +97,40 @@ s3_url() { esac } +build_all() { + if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then + echo >&2 + echo >&2 'The build or tests appear to have failed.' + echo >&2 + echo >&2 'You, as the release maintainer, now have a couple options:' + echo >&2 '- delay release and fix issues' + echo >&2 '- delay release and fix issues' + echo >&2 '- did we mention how important this is? issues need fixing :)' + echo >&2 + echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' + echo >&2 ' really knows all the hairy problems at hand with the current release' + echo >&2 ' issues) may bypass this checking by running this script again with the' + echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' + echo >&2 ' running the test suite, and will only build the binaries and packages. Please' + echo >&2 ' avoid using this if at all possible.' + echo >&2 + echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' + echo >&2 ' should be used. If there are release issues, we should always err on the' + echo >&2 ' side of caution.' + echo >&2 + exit 1 + fi +} + release_build() { GOOS=$1 GOARCH=$2 - BINARY=bundles/$VERSION/cross/$GOOS/$GOARCH/docker-$VERSION - TGZ=bundles/$VERSION/tgz/$GOOS/$GOARCH/docker-$VERSION.tgz + SOURCE_DIR=bundles/$VERSION/cross/$GOOS/$GOARCH + BINARY=docker-$VERSION + BINARY_MD5=docker-$VERSION.md5 + BINARY_SHA256=docker-$VERSION.sha256 + TGZ=docker-$VERSION.tgz # we need to map our GOOS and GOARCH to uname values # see https://en.wikipedia.org/wiki/Uname @@ -172,17 +183,29 @@ release_build() { fi echo "Uploading $BINARY to $S3OS/$S3ARCH/docker-$VERSION" - s3cmd --follow-symlinks --preserve --acl-public put $BINARY $S3DIR/docker-$VERSION + s3cmd --follow-symlinks --preserve --acl-public put $SOURCE_DIR/$BINARY $S3DIR/$BINARY + + echo "Uploading $BINARY_MD5 to $S3OS/$S3ARCH/docker-$VERSION.md5" + s3cmd --follow-symlinks --preserve --acl-public put $SOURCE_DIR/$BINARY_MD5 $S3DIR/$BINARY_MD5 + + echo "Uploading $BINARY_BINARY_SHA256 to $S3OS/$S3ARCH/docker-$VERSION.sha256" + s3cmd --follow-symlinks --preserve --acl-public put $SOURCE_DIR/$BINARY_SHA256 $S3DIR/$BINARY_SHA256 echo "Uploading $TGZ to $S3OS/$S3ARCH/docker-$VERSION.tgz" - s3cmd --follow-symlinks --preserve --acl-public put $TGZ $S3DIR/docker-$VERSION.tgz + s3cmd --follow-symlinks --preserve --acl-public put $SOURCE_DIR/$TGZ $S3DIR/$TGZ if [ -z "$NOLATEST" ]; then - echo "Copying $S3OS/$S3ARCH/docker-$VERSION to $S3OS/$S3ARCH/docker-latest" - s3cmd --acl-public cp $S3DIR/docker-$VERSION $S3DIR/docker-latest + echo "Copying $S3DIR/$BINARY to $S3DIR/docker-latest" + s3cmd --acl-public cp $S3DIR/$BINARY $S3DIR/docker-latest - echo "Copying $S3OS/$S3ARCH/docker-$VERSION.tgz to $S3OS/$S3ARCH/docker-latest.tgz" - s3cmd --acl-public cp $S3DIR/docker-$VERSION.tgz $S3DIR/docker-latest.tgz + echo "Copying $S3DIR/$BINARY_MD5 to $S3DIR/docker-latest.md5" + s3cmd --acl-public cp $S3DIR/$BINARY_MD5 $S3DIR/docker-latest.md5 + + echo "Copying $S3DIR/$BINARY_SHA256 to $S3DIR/docker-latest.sha256" + s3cmd --acl-public cp $S3DIR/$BINARY_SHA256 $S3DIR/docker-latest.sha256 + + echo "Copying $S3DIR/$TGZ $S3DIR/docker-latest.tgz" + s3cmd --acl-public cp $S3DIR/$TGZ $S3DIR/docker-latest.tgz fi } @@ -194,21 +217,8 @@ release_ubuntu() { echo >&2 './hack/make.sh must be run before release_ubuntu' exit 1 } - # Make sure that we have our keys - mkdir -p /.gnupg/ + s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true - gpg --list-keys releasedocker >/dev/null || { - gpg --gen-key --batch </dev/null || { + gpg --gen-key --batch < Date: Thu, 20 Mar 2014 09:35:58 +1000 Subject: whitespace-blind Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- hack/make/binary | 1 - hack/release.sh | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/hack/make/binary b/hack/make/binary index dee7d98dc6..f220d2dae6 100755 --- a/hack/make/binary +++ b/hack/make/binary @@ -18,4 +18,3 @@ fi if command -v sha256sum &> /dev/null; then sha256sum "$DEST/docker-$VERSION" > "$DEST/docker-$VERSION.sha256" fi - diff --git a/hack/release.sh b/hack/release.sh index edcee98f38..76acad4991 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -60,7 +60,7 @@ VERSION=$(cat VERSION) BUCKET=$AWS_S3_BUCKET # These are the 2 keys we've used to sign the deb's -# release (get.docker.io +# release (get.docker.io) # GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" # test (test.docker.io) # GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" @@ -342,7 +342,6 @@ main() { release_test } - main echo -- cgit v1.2.1 From e38e977a0410b754b6f318ff973dc15e6d756023 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Djibril=20Kon=C3=A9?= Date: Tue, 18 Mar 2014 21:18:36 +0100 Subject: Harmonize / across all name-related commands MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Docker-DCO-1.1-Signed-off-by: Djibril Koné (github: enokd) Harmonize / across all name-related commands Docker-DCO-1.1-Signed-off-by: Djibril Koné (github: enokd) Harmonize / across all name-related commands:Return an error when repeated / Docker-DCO-1.1-Signed-off-by: Djibril Koné (github: enokd) --- api/client.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/api/client.go b/api/client.go index 10dd9406dc..07a3d25e5c 100644 --- a/api/client.go +++ b/api/client.go @@ -1452,6 +1452,11 @@ func (cli *DockerCli) CmdCommit(args ...string) error { return nil } + re := regexp.MustCompile("/{2}") + if re.MatchString(repository) { + return fmt.Errorf("Error: Bad image name. Please rename your image in the format /") + } + v := url.Values{} v.Set("container", name) v.Set("repo", repository) -- cgit v1.2.1 From a8cc6ebb181abf58b12ed6ee037711f0b2f1eff2 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 20 Mar 2014 09:59:54 +1000 Subject: I'm not looking forward to documenting cli arguments that may or may not show depending on what plugins / drviers you choose Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/reference/commandline/cli.rst | 2 +- docs/sources/reference/run.rst | 15 ++++++++------- runconfig/parse.go | 2 +- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index d398b16e53..5f228f55b4 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -1145,7 +1145,7 @@ image is removed. --volumes-from="": Mount all volumes from the given container(s) --entrypoint="": Overwrite the default entrypoint set by the image -w, --workdir="": Working directory inside the container - --lxc-conf=[]: Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode) --expose=[]: Expose a port from the container without publishing it to your host --link="": Add link to another container (name:alias) diff --git a/docs/sources/reference/run.rst b/docs/sources/reference/run.rst index 0b4f7eebf4..d2fe449c22 100644 --- a/docs/sources/reference/run.rst +++ b/docs/sources/reference/run.rst @@ -194,7 +194,7 @@ Runtime Privilege and LXC Configuration :: --privileged=false: Give extended privileges to this container - --lxc-conf=[]: Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" + --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" By default, Docker containers are "unprivileged" and cannot, for example, run a Docker daemon inside a Docker container. This is @@ -211,12 +211,13 @@ host. Additional information about running with ``--privileged`` is available on the `Docker Blog `_. -An operator can also specify LXC options using one or more -``--lxc-conf`` parameters. These can be new parameters or override -existing parameters from the lxc-template.go_. Note that in the -future, a given host's Docker daemon may not use LXC, so this is an -implementation-specific configuration meant for operators already -familiar with using LXC directly. +If the Docker daemon was started using the ``lxc`` exec-driver +(``docker -d --exec-driver=lxc``) then the operator can also specify +LXC options using one or more ``--lxc-conf`` parameters. These can be +new parameters or override existing parameters from the lxc-template.go_. +Note that in the future, a given host's Docker daemon may not use LXC, +so this is an implementation-specific configuration meant for operators +already familiar with using LXC directly. .. _lxc-template.go: https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go diff --git a/runconfig/parse.go b/runconfig/parse.go index cc33188ad5..c2591722d5 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -76,7 +76,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers") cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom dns search domains") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") - cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") + cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "(lxc exec-driver only) Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") if err := cmd.Parse(args); err != nil { return nil, nil, cmd, err -- cgit v1.2.1 From 62eb23aed50c9c820836c4b3f515cba2660b5c20 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Thu, 20 Mar 2014 10:18:08 +1000 Subject: missed a bug Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- hack/release.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hack/release.sh b/hack/release.sh index 76acad4991..d42fd41ee9 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -173,8 +173,8 @@ release_build() { S3DIR=s3://$BUCKET/builds/$S3OS/$S3ARCH - if [ ! -x "$BINARY" ]; then - echo >&2 "error: can't find $BINARY - was it compiled properly?" + if [ ! -x "$SOURCE_DIR/$BINARY" ]; then + echo >&2 "error: can't find $SOURCE_DIR/$BINARY - was it compiled properly?" exit 1 fi if [ ! -f "$TGZ" ]; then @@ -188,7 +188,7 @@ release_build() { echo "Uploading $BINARY_MD5 to $S3OS/$S3ARCH/docker-$VERSION.md5" s3cmd --follow-symlinks --preserve --acl-public put $SOURCE_DIR/$BINARY_MD5 $S3DIR/$BINARY_MD5 - echo "Uploading $BINARY_BINARY_SHA256 to $S3OS/$S3ARCH/docker-$VERSION.sha256" + echo "Uploading $BINARY_SHA256 to $S3OS/$S3ARCH/docker-$VERSION.sha256" s3cmd --follow-symlinks --preserve --acl-public put $SOURCE_DIR/$BINARY_SHA256 $S3DIR/$BINARY_SHA256 echo "Uploading $TGZ to $S3OS/$S3ARCH/docker-$VERSION.tgz" -- cgit v1.2.1 From 6b46a09186b6a53959d567014b8d0e1cff761bc8 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Wed, 19 Mar 2014 19:58:39 -0600 Subject: Fix a lot of the sha256 and md5 stuff to be more DRY and extendible, and on more things (specifically, the tgz files too) Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/make.sh | 21 +++++++++ hack/make/binary | 9 +--- hack/make/dynbinary | 6 ++- hack/make/tgz | 2 + hack/release.sh | 121 ++++++++++++++++++++++++++++++---------------------- 5 files changed, 100 insertions(+), 59 deletions(-) diff --git a/hack/make.sh b/hack/make.sh index 994da8d9ad..b77e9b7f44 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -149,6 +149,27 @@ find_dirs() { \) -name "$1" -print0 | xargs -0n1 dirname | sort -u } +hash_files() { + while [ $# -gt 0 ]; do + f="$1" + shift + dir="$(dirname "$f")" + base="$(basename "$f")" + for hashAlgo in md5 sha256; do + if command -v "${hashAlgo}sum" &> /dev/null; then + ( + # subshell and cd so that we get output files like: + # $HASH docker-$VERSION + # instead of: + # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION + cd "$dir" + "${hashAlgo}sum" "$base" > "$base.$hashAlgo" + ) + fi + done + done +} + bundle() { bundlescript=$1 bundle=$(basename $bundlescript) diff --git a/hack/make/binary b/hack/make/binary index f220d2dae6..7b4d7b5b5b 100755 --- a/hack/make/binary +++ b/hack/make/binary @@ -3,7 +3,7 @@ DEST=$1 go build \ - -o $DEST/docker-$VERSION \ + -o "$DEST/docker-$VERSION" \ "${BUILDFLAGS[@]}" \ -ldflags " $LDFLAGS @@ -12,9 +12,4 @@ go build \ ./docker echo "Created binary: $DEST/docker-$VERSION" -if command -v md5sum &> /dev/null; then - md5sum "$DEST/docker-$VERSION" > "$DEST/docker-$VERSION.md5" -fi -if command -v sha256sum &> /dev/null; then - sha256sum "$DEST/docker-$VERSION" > "$DEST/docker-$VERSION.sha256" -fi +hash_files "$DEST/docker-$VERSION" diff --git a/hack/make/dynbinary b/hack/make/dynbinary index d4f583fb62..75cffe3dcc 100644 --- a/hack/make/dynbinary +++ b/hack/make/dynbinary @@ -5,7 +5,7 @@ DEST=$1 if [ -z "$DOCKER_CLIENTONLY" ]; then # dockerinit still needs to be a static binary, even if docker is dynamic go build \ - -o $DEST/dockerinit-$VERSION \ + -o "$DEST/dockerinit-$VERSION" \ "${BUILDFLAGS[@]}" \ -ldflags " $LDFLAGS @@ -14,7 +14,9 @@ if [ -z "$DOCKER_CLIENTONLY" ]; then " \ ./dockerinit echo "Created binary: $DEST/dockerinit-$VERSION" - ln -sf dockerinit-$VERSION $DEST/dockerinit + ln -sf "dockerinit-$VERSION" "$DEST/dockerinit" + + hash_files "$DEST/dockerinit-$VERSION" sha1sum= if command -v sha1sum &> /dev/null; then diff --git a/hack/make/tgz b/hack/make/tgz index 5d03306322..120339976b 100644 --- a/hack/make/tgz +++ b/hack/make/tgz @@ -23,6 +23,8 @@ for d in "$CROSS/"*/*; do tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr + hash_files "$TGZ" + rm -rf "$DEST/build" echo "Created tgz: $TGZ" diff --git a/hack/release.sh b/hack/release.sh index 46a93af70c..6f9df8c7e6 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -122,91 +122,113 @@ build_all() { fi } +upload_release_build() { + src="$1" + dst="$2" + latest="$3" + + echo + echo "Uploading $src" + echo " to $dst" + echo + s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst" + if [ "$latest" ]; then + echo + echo "Copying to $latest" + echo + s3cmd --acl-public cp "$dst" "$latest" + fi + + # get hash files too (see hash_files() in hack/make.sh) + for hashAlgo in md5 sha256; do + if [ -e "$src.$hashAlgo" ]; then + echo + echo "Uploading $src.$hashAlgo" + echo " to $dst.$hashAlgo" + echo + s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo" + if [ "$latest" ]; then + echo + echo "Copying to $latest.$hashAlgo" + echo + s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo" + fi + fi + done +} + release_build() { GOOS=$1 GOARCH=$2 - SOURCE_DIR=bundles/$VERSION/cross/$GOOS/$GOARCH - BINARY=docker-$VERSION - BINARY_MD5=docker-$VERSION.md5 - BINARY_SHA256=docker-$VERSION.sha256 - TGZ=docker-$VERSION.tgz + binDir=bundles/$VERSION/cross/$GOOS/$GOARCH + tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH + binary=docker-$VERSION + tgz=docker-$VERSION.tgz + + latestBase= + if [ -z "$NOLATEST" ]; then + latestBase=docker-latest + fi # we need to map our GOOS and GOARCH to uname values # see https://en.wikipedia.org/wiki/Uname # ie, GOOS=linux -> "uname -s"=Linux - S3OS=$GOOS - case "$S3OS" in + s3Os=$GOOS + case "$s3Os" in darwin) - S3OS=Darwin + s3Os=Darwin ;; freebsd) - S3OS=FreeBSD + s3Os=FreeBSD ;; linux) - S3OS=Linux + s3Os=Linux ;; *) - echo >&2 "error: can't convert $S3OS to an appropriate value for 'uname -s'" + echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" exit 1 ;; esac - S3ARCH=$GOARCH - case "$S3ARCH" in + s3Arch=$GOARCH + case "$s3Arch" in amd64) - S3ARCH=x86_64 + s3Arch=x86_64 ;; 386) - S3ARCH=i386 + s3Arch=i386 ;; arm) - S3ARCH=armel + s3Arch=armel # someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too ;; *) - echo >&2 "error: can't convert $S3ARCH to an appropriate value for 'uname -m'" + echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" exit 1 ;; esac - S3DIR=s3://$BUCKET/builds/$S3OS/$S3ARCH + s3Dir=s3://$BUCKET/builds/$s3Os/$s3Arch + latest= + latestTgz= + if [ "$latestBase" ]; then + latest="$s3Dir/$latestBase" + latestTgz="$s3Dir/$latestBase.tgz" + fi - if [ ! -x "$SOURCE_DIR/$BINARY" ]; then - echo >&2 "error: can't find $SOURCE_DIR/$BINARY - was it compiled properly?" + if [ ! -x "$binDir/$binary" ]; then + echo >&2 "error: can't find $binDir/$binary - was it compiled properly?" exit 1 fi - if [ ! -f "$TGZ" ]; then - echo >&2 "error: can't find $TGZ - was it packaged properly?" + if [ ! -f "$tgzDir/$tgz" ]; then + echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" exit 1 fi - echo "Uploading $BINARY to $S3OS/$S3ARCH/docker-$VERSION" - s3cmd --follow-symlinks --preserve --acl-public put $SOURCE_DIR/$BINARY $S3DIR/$BINARY - - echo "Uploading $BINARY_MD5 to $S3OS/$S3ARCH/docker-$VERSION.md5" - s3cmd --follow-symlinks --preserve --acl-public put $SOURCE_DIR/$BINARY_MD5 $S3DIR/$BINARY_MD5 - - echo "Uploading $BINARY_SHA256 to $S3OS/$S3ARCH/docker-$VERSION.sha256" - s3cmd --follow-symlinks --preserve --acl-public put $SOURCE_DIR/$BINARY_SHA256 $S3DIR/$BINARY_SHA256 - - echo "Uploading $TGZ to $S3OS/$S3ARCH/docker-$VERSION.tgz" - s3cmd --follow-symlinks --preserve --acl-public put $SOURCE_DIR/$TGZ $S3DIR/$TGZ - - if [ -z "$NOLATEST" ]; then - echo "Copying $S3DIR/$BINARY to $S3DIR/docker-latest" - s3cmd --acl-public cp $S3DIR/$BINARY $S3DIR/docker-latest - - echo "Copying $S3DIR/$BINARY_MD5 to $S3DIR/docker-latest.md5" - s3cmd --acl-public cp $S3DIR/$BINARY_MD5 $S3DIR/docker-latest.md5 - - echo "Copying $S3DIR/$BINARY_SHA256 to $S3DIR/docker-latest.sha256" - s3cmd --acl-public cp $S3DIR/$BINARY_SHA256 $S3DIR/docker-latest.sha256 - - echo "Copying $S3DIR/$TGZ $S3DIR/docker-latest.tgz" - s3cmd --acl-public cp $S3DIR/$TGZ $S3DIR/docker-latest.tgz - fi + upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" + upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" } # Upload the 'ubuntu' bundle to S3: @@ -217,8 +239,6 @@ release_ubuntu() { echo >&2 './hack/make.sh must be run before release_ubuntu' exit 1 } - - s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true # Sign our packages dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \ @@ -318,10 +338,11 @@ release_test() { setup_gpg() { # Make sure that we have our keys mkdir -p /.gnupg/ + s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true gpg --list-keys releasedocker >/dev/null || { gpg --gen-key --batch < Date: Thu, 20 Mar 2014 17:32:59 +0100 Subject: devicemapper: Better/faster shutdown Right now shutdown is looping over *all* devicemapper devices and actively deactivating them, this is pretty slow if you have a lot of non-active containers. We instead only deactivate the devices that are mounted. We also do the shutdown unmount using MNT_DETACH which forces the unmount in the global namespace, even if it is busy because of some container having it mounted. This means the device will be freed when that container exits. Also, we move the call to waitClose to deactivateDevice because all callers of any of them call both anyway. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- runtime/graphdriver/devmapper/deviceset.go | 36 ++++++++++++++---------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/runtime/graphdriver/devmapper/deviceset.go b/runtime/graphdriver/devmapper/deviceset.go index 4d33e243e0..dfdb180bb2 100644 --- a/runtime/graphdriver/devmapper/deviceset.go +++ b/runtime/graphdriver/devmapper/deviceset.go @@ -14,6 +14,7 @@ import ( "strconv" "strings" "sync" + "syscall" "time" ) @@ -677,6 +678,12 @@ func (devices *DeviceSet) deactivateDevice(hash string) error { utils.Debugf("[devmapper] deactivateDevice(%s)", hash) defer utils.Debugf("[devmapper] deactivateDevice END") + // Wait for the unmount to be effective, + // by watching the value of Info.OpenCount for the device + if err := devices.waitClose(hash); err != nil { + utils.Errorf("Warning: error waiting for device %s to close: %s\n", hash, err) + } + info := devices.Devices[hash] if info == nil { return fmt.Errorf("Unknown device %s", hash) @@ -799,24 +806,18 @@ func (devices *DeviceSet) Shutdown() error { for _, info := range devices.Devices { info.lock.Lock() if info.mountCount > 0 { - if err := sysUnmount(info.mountPath, 0); err != nil { + // We use MNT_DETACH here in case it is still busy in some running + // container. This means it'll go away from the global scope directly, + // and the device will be released when that container dies. + if err := sysUnmount(info.mountPath, syscall.MNT_DETACH); err != nil { utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) } - } - info.lock.Unlock() - } - - for _, d := range devices.Devices { - d.lock.Lock() - if err := devices.waitClose(d.Hash); err != nil { - utils.Errorf("Warning: error waiting for device %s to unmount: %s\n", d.Hash, err) - } - if err := devices.deactivateDevice(d.Hash); err != nil { - utils.Debugf("Shutdown deactivate %s , error: %s\n", d.Hash, err) + if err := devices.deactivateDevice(info.Hash); err != nil { + utils.Debugf("Shutdown deactivate %s , error: %s\n", info.Hash, err) + } } - - d.lock.Unlock() + info.lock.Unlock() } if err := devices.deactivatePool(); err != nil { @@ -920,14 +921,11 @@ func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { return err } utils.Debugf("[devmapper] Unmount done") - // Wait for the unmount to be effective, - // by watching the value of Info.OpenCount for the device - if err := devices.waitClose(hash); err != nil { + + if err := devices.deactivateDevice(hash); err != nil { return err } - devices.deactivateDevice(hash) - info.mountPath = "" return nil -- cgit v1.2.1 From fbd6fee4ab9b98f477f365307a641b879badd282 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Thu, 20 Mar 2014 13:09:34 -0400 Subject: Fix double single dash arg issues in docs Docker-DCO-1.1-Signed-off-by: Brian Goff (github: cpuguy83) --- docs/sources/articles/runmetrics.rst | 2 +- docs/sources/examples/postgresql_service.rst | 2 +- docs/sources/use/working_with_volumes.rst | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sources/articles/runmetrics.rst b/docs/sources/articles/runmetrics.rst index afb7f82e39..6b705fb737 100644 --- a/docs/sources/articles/runmetrics.rst +++ b/docs/sources/articles/runmetrics.rst @@ -63,7 +63,7 @@ For Docker containers using cgroups, the container name will be the full ID or long ID of the container. If a container shows up as ae836c95b4c3 in ``docker ps``, its long ID might be something like ``ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79``. You -can look it up with ``docker inspect`` or ``docker ps -notrunc``. +can look it up with ``docker inspect`` or ``docker ps --no-trunc``. Putting everything together to look at the memory metrics for a Docker container, take a look at ``/sys/fs/cgroup/memory/lxc//``. diff --git a/docs/sources/examples/postgresql_service.rst b/docs/sources/examples/postgresql_service.rst index 66b0fd7aa5..488e1530b2 100644 --- a/docs/sources/examples/postgresql_service.rst +++ b/docs/sources/examples/postgresql_service.rst @@ -37,7 +37,7 @@ And run the PostgreSQL server container (in the foreground): .. code-block:: bash - $ sudo docker run --rm -P -name pg_test eg_postgresql + $ sudo docker run --rm -P --name pg_test eg_postgresql There are 2 ways to connect to the PostgreSQL server. We can use :ref:`working_with_links_names`, or we can access it from our host (or the network). diff --git a/docs/sources/use/working_with_volumes.rst b/docs/sources/use/working_with_volumes.rst index 02f4e71b13..d2f035dc84 100644 --- a/docs/sources/use/working_with_volumes.rst +++ b/docs/sources/use/working_with_volumes.rst @@ -129,7 +129,7 @@ because they are external to images. Instead you can use ``--volumes-from`` to start a new container that can access the data-container's volume. For example:: - $ sudo docker run -rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data + $ sudo docker run --rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data * ``--rm`` - remove the container when it exits * ``--volumes-from DATA`` - attach to the volumes shared by the ``DATA`` container @@ -140,7 +140,7 @@ data-container's volume. For example:: Then to restore to the same container, or another that you've made elsewhere:: # create a new data container - $ sudo docker run -v /data -name DATA2 busybox true + $ sudo docker run -v /data --name DATA2 busybox true # untar the backup files into the new container's data volume $ sudo docker run --rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar data/ -- cgit v1.2.1 From ab0c9b385c47d818a2105c2114573f5beedbd3ba Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 20 Mar 2014 14:59:40 -0600 Subject: Remove the inotifywait hack from our upstart host-integration example that is no longer necessary Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- docs/sources/use/host_integration.rst | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/sources/use/host_integration.rst b/docs/sources/use/host_integration.rst index ed341cd4bc..cb920a5908 100644 --- a/docs/sources/use/host_integration.rst +++ b/docs/sources/use/host_integration.rst @@ -43,11 +43,6 @@ into it: stop on runlevel [!2345] respawn script - # Wait for docker to finish starting up first. - FILE=/var/run/docker.sock - while [ ! -e $FILE ] ; do - inotifywait -t 2 -e create $(dirname $FILE) - done /usr/bin/docker start -a redis_server end script -- cgit v1.2.1 From 8944fb2e9b07d5a764f8d48065b9afd73364f640 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Thu, 20 Mar 2014 21:51:28 +0000 Subject: rename lxc to bridge Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- builtins/builtins.go | 7 +- runtime/networkdriver/bridge/driver.go | 482 +++++++++++++++++++++++++++++++++ runtime/networkdriver/lxc/driver.go | 482 --------------------------------- runtime/runtime.go | 2 +- 4 files changed, 486 insertions(+), 487 deletions(-) create mode 100644 runtime/networkdriver/bridge/driver.go delete mode 100644 runtime/networkdriver/lxc/driver.go diff --git a/builtins/builtins.go b/builtins/builtins.go index 86f3973c62..10ee9b19e6 100644 --- a/builtins/builtins.go +++ b/builtins/builtins.go @@ -1,10 +1,9 @@ package builtins import ( - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/runtime/networkdriver/lxc" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/runtime/networkdriver/bridge" "github.com/dotcloud/docker/server" ) @@ -35,5 +34,5 @@ func remote(eng *engine.Engine) { // func daemon(eng *engine.Engine) { eng.Register("initserver", server.InitServer) - eng.Register("init_networkdriver", lxc.InitDriver) + eng.Register("init_networkdriver", bridge.InitDriver) } diff --git a/runtime/networkdriver/bridge/driver.go b/runtime/networkdriver/bridge/driver.go new file mode 100644 index 0000000000..41588b1c27 --- /dev/null +++ b/runtime/networkdriver/bridge/driver.go @@ -0,0 +1,482 @@ +package bridge + +import ( + "fmt" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/pkg/iptables" + "github.com/dotcloud/docker/pkg/netlink" + "github.com/dotcloud/docker/runtime/networkdriver" + "github.com/dotcloud/docker/runtime/networkdriver/ipallocator" + "github.com/dotcloud/docker/runtime/networkdriver/portallocator" + "github.com/dotcloud/docker/runtime/networkdriver/portmapper" + "github.com/dotcloud/docker/utils" + "io/ioutil" + "log" + "net" + "strings" + "syscall" + "unsafe" +) + +const ( + DefaultNetworkBridge = "docker0" + siocBRADDBR = 0x89a0 +) + +// Network interface represents the networking stack of a container +type networkInterface struct { + IP net.IP + PortMappings []net.Addr // there are mappings to the host interfaces +} + +var ( + addrs = []string{ + // Here we don't follow the convention of using the 1st IP of the range for the gateway. + // This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges. + // In theory this shouldn't matter - in practice there's bound to be a few scripts relying + // on the internal addressing or other stupid things like that. + // The shouldn't, but hey, let's not break them unless we really have to. + "172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23 + "10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive + "10.1.42.1/16", + "10.42.42.1/16", + "172.16.42.1/24", + "172.16.43.1/24", + "172.16.44.1/24", + "10.0.42.1/24", + "10.0.43.1/24", + "192.168.42.1/24", + "192.168.43.1/24", + "192.168.44.1/24", + } + + bridgeIface string + bridgeNetwork *net.IPNet + + defaultBindingIP = net.ParseIP("0.0.0.0") + currentInterfaces = make(map[string]*networkInterface) +) + +func InitDriver(job *engine.Job) engine.Status { + var ( + network *net.IPNet + enableIPTables = job.GetenvBool("EnableIptables") + icc = job.GetenvBool("InterContainerCommunication") + ipForward = job.GetenvBool("EnableIpForward") + bridgeIP = job.Getenv("BridgeIP") + ) + + if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" { + defaultBindingIP = net.ParseIP(defaultIP) + } + + bridgeIface = job.Getenv("BridgeIface") + if bridgeIface == "" { + bridgeIface = DefaultNetworkBridge + } + + addr, err := networkdriver.GetIfaceAddr(bridgeIface) + if err != nil { + // If the iface is not found, try to create it + job.Logf("creating new bridge for %s", bridgeIface) + if err := createBridge(bridgeIP); err != nil { + job.Error(err) + return engine.StatusErr + } + + job.Logf("getting iface addr") + addr, err = networkdriver.GetIfaceAddr(bridgeIface) + if err != nil { + job.Error(err) + return engine.StatusErr + } + network = addr.(*net.IPNet) + } else { + network = addr.(*net.IPNet) + } + + // Configure iptables for link support + if enableIPTables { + if err := setupIPTables(addr, icc); err != nil { + job.Error(err) + return engine.StatusErr + } + } + + if ipForward { + // Enable IPv4 forwarding + if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil { + job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err) + } + } + + // We can always try removing the iptables + if err := iptables.RemoveExistingChain("DOCKER"); err != nil { + job.Error(err) + return engine.StatusErr + } + + if enableIPTables { + chain, err := iptables.NewChain("DOCKER", bridgeIface) + if err != nil { + job.Error(err) + return engine.StatusErr + } + portmapper.SetIptablesChain(chain) + } + + bridgeNetwork = network + + // https://github.com/dotcloud/docker/issues/2768 + job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP) + + for name, f := range map[string]engine.Handler{ + "allocate_interface": Allocate, + "release_interface": Release, + "allocate_port": AllocatePort, + "link": LinkContainers, + } { + if err := job.Eng.Register(name, f); err != nil { + job.Error(err) + return engine.StatusErr + } + } + return engine.StatusOK +} + +func setupIPTables(addr net.Addr, icc bool) error { + // Enable NAT + natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-d", addr.String(), "-j", "MASQUERADE"} + + if !iptables.Exists(natArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { + return fmt.Errorf("Unable to enable network bridge NAT: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables postrouting: %s", output) + } + } + + var ( + args = []string{"FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-j"} + acceptArgs = append(args, "ACCEPT") + dropArgs = append(args, "DROP") + ) + + if !icc { + iptables.Raw(append([]string{"-D"}, acceptArgs...)...) + + if !iptables.Exists(dropArgs...) { + utils.Debugf("Disable inter-container communication") + if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil { + return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error disabling intercontainer communication: %s", output) + } + } + } else { + iptables.Raw(append([]string{"-D"}, dropArgs...)...) + + if !iptables.Exists(acceptArgs...) { + utils.Debugf("Enable inter-container communication") + if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil { + return fmt.Errorf("Unable to allow intercontainer communication: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error enabling intercontainer communication: %s", output) + } + } + } + + // Accept all non-intercontainer outgoing packets + outgoingArgs := []string{"FORWARD", "-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"} + if !iptables.Exists(outgoingArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil { + return fmt.Errorf("Unable to allow outgoing packets: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables allow outgoing: %s", output) + } + } + + // Accept incoming packets for existing connections + existingArgs := []string{"FORWARD", "-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"} + + if !iptables.Exists(existingArgs...) { + if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil { + return fmt.Errorf("Unable to allow incoming packets: %s", err) + } else if len(output) != 0 { + return fmt.Errorf("Error iptables allow incoming: %s", output) + } + } + return nil +} + +// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`, +// and attempts to configure it with an address which doesn't conflict with any other interface on the host. +// If it can't find an address which doesn't conflict, it will return an error. +func createBridge(bridgeIP string) error { + nameservers := []string{} + resolvConf, _ := utils.GetResolvConf() + // we don't check for an error here, because we don't really care + // if we can't read /etc/resolv.conf. So instead we skip the append + // if resolvConf is nil. It either doesn't exist, or we can't read it + // for some reason. + if resolvConf != nil { + nameservers = append(nameservers, utils.GetNameserversAsCIDR(resolvConf)...) + } + + var ifaceAddr string + if len(bridgeIP) != 0 { + _, _, err := net.ParseCIDR(bridgeIP) + if err != nil { + return err + } + ifaceAddr = bridgeIP + } else { + for _, addr := range addrs { + _, dockerNetwork, err := net.ParseCIDR(addr) + if err != nil { + return err + } + if err := networkdriver.CheckNameserverOverlaps(nameservers, dockerNetwork); err == nil { + if err := networkdriver.CheckRouteOverlaps(dockerNetwork); err == nil { + ifaceAddr = addr + break + } else { + utils.Debugf("%s %s", addr, err) + } + } + } + } + + if ifaceAddr == "" { + return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface) + } + utils.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) + + if err := createBridgeIface(bridgeIface); err != nil { + return err + } + + iface, err := net.InterfaceByName(bridgeIface) + if err != nil { + return err + } + + ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr) + if err != nil { + return err + } + + if netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { + return fmt.Errorf("Unable to add private network: %s", err) + } + if err := netlink.NetworkLinkUp(iface); err != nil { + return fmt.Errorf("Unable to start network bridge: %s", err) + } + return nil +} + +// Create the actual bridge device. This is more backward-compatible than +// netlink.NetworkLinkAdd and works on RHEL 6. +func createBridgeIface(name string) error { + s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP) + if err != nil { + utils.Debugf("Bridge socket creation failed IPv6 probably not enabled: %v", err) + s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP) + if err != nil { + return fmt.Errorf("Error creating bridge creation socket: %s", err) + } + } + defer syscall.Close(s) + + nameBytePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return fmt.Errorf("Error converting bridge name %s to byte array: %s", name, err) + } + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), siocBRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { + return fmt.Errorf("Error creating bridge: %s", err) + } + return nil +} + +// Allocate a network interface +func Allocate(job *engine.Job) engine.Status { + var ( + ip *net.IP + err error + id = job.Args[0] + requestedIP = net.ParseIP(job.Getenv("RequestedIP")) + ) + + if requestedIP != nil { + ip, err = ipallocator.RequestIP(bridgeNetwork, &requestedIP) + } else { + ip, err = ipallocator.RequestIP(bridgeNetwork, nil) + } + if err != nil { + job.Error(err) + return engine.StatusErr + } + + out := engine.Env{} + out.Set("IP", ip.String()) + out.Set("Mask", bridgeNetwork.Mask.String()) + out.Set("Gateway", bridgeNetwork.IP.String()) + out.Set("Bridge", bridgeIface) + + size, _ := bridgeNetwork.Mask.Size() + out.SetInt("IPPrefixLen", size) + + currentInterfaces[id] = &networkInterface{ + IP: *ip, + } + + out.WriteTo(job.Stdout) + + return engine.StatusOK +} + +// release an interface for a select ip +func Release(job *engine.Job) engine.Status { + var ( + id = job.Args[0] + containerInterface = currentInterfaces[id] + ip net.IP + port int + proto string + ) + + if containerInterface == nil { + return job.Errorf("No network information to release for %s", id) + } + + for _, nat := range containerInterface.PortMappings { + if err := portmapper.Unmap(nat); err != nil { + log.Printf("Unable to unmap port %s: %s", nat, err) + } + + // this is host mappings + switch a := nat.(type) { + case *net.TCPAddr: + proto = "tcp" + ip = a.IP + port = a.Port + case *net.UDPAddr: + proto = "udp" + ip = a.IP + port = a.Port + } + + if err := portallocator.ReleasePort(ip, proto, port); err != nil { + log.Printf("Unable to release port %s", nat) + } + } + + if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil { + log.Printf("Unable to release ip %s\n", err) + } + return engine.StatusOK +} + +// Allocate an external port and map it to the interface +func AllocatePort(job *engine.Job) engine.Status { + var ( + err error + + ip = defaultBindingIP + id = job.Args[0] + hostIP = job.Getenv("HostIP") + hostPort = job.GetenvInt("HostPort") + containerPort = job.GetenvInt("ContainerPort") + proto = job.Getenv("Proto") + network = currentInterfaces[id] + ) + + if hostIP != "" { + ip = net.ParseIP(hostIP) + } + + // host ip, proto, and host port + hostPort, err = portallocator.RequestPort(ip, proto, hostPort) + if err != nil { + job.Error(err) + return engine.StatusErr + } + + var ( + container net.Addr + host net.Addr + ) + + if proto == "tcp" { + host = &net.TCPAddr{IP: ip, Port: hostPort} + container = &net.TCPAddr{IP: network.IP, Port: containerPort} + } else { + host = &net.UDPAddr{IP: ip, Port: hostPort} + container = &net.UDPAddr{IP: network.IP, Port: containerPort} + } + + if err := portmapper.Map(container, ip, hostPort); err != nil { + portallocator.ReleasePort(ip, proto, hostPort) + + job.Error(err) + return engine.StatusErr + } + network.PortMappings = append(network.PortMappings, host) + + out := engine.Env{} + out.Set("HostIP", ip.String()) + out.SetInt("HostPort", hostPort) + + if _, err := out.WriteTo(job.Stdout); err != nil { + job.Error(err) + return engine.StatusErr + } + return engine.StatusOK +} + +func LinkContainers(job *engine.Job) engine.Status { + var ( + action = job.Args[0] + childIP = job.Getenv("ChildIP") + parentIP = job.Getenv("ParentIP") + ignoreErrors = job.GetenvBool("IgnoreErrors") + ports = job.GetenvList("Ports") + ) + split := func(p string) (string, string) { + parts := strings.Split(p, "/") + return parts[0], parts[1] + } + + for _, p := range ports { + port, proto := split(p) + if output, err := iptables.Raw(action, "FORWARD", + "-i", bridgeIface, "-o", bridgeIface, + "-p", proto, + "-s", parentIP, + "--dport", port, + "-d", childIP, + "-j", "ACCEPT"); !ignoreErrors && err != nil { + job.Error(err) + return engine.StatusErr + } else if len(output) != 0 { + job.Errorf("Error toggle iptables forward: %s", output) + return engine.StatusErr + } + + if output, err := iptables.Raw(action, "FORWARD", + "-i", bridgeIface, "-o", bridgeIface, + "-p", proto, + "-s", childIP, + "--sport", port, + "-d", parentIP, + "-j", "ACCEPT"); !ignoreErrors && err != nil { + job.Error(err) + return engine.StatusErr + } else if len(output) != 0 { + job.Errorf("Error toggle iptables forward: %s", output) + return engine.StatusErr + } + } + return engine.StatusOK +} diff --git a/runtime/networkdriver/lxc/driver.go b/runtime/networkdriver/lxc/driver.go deleted file mode 100644 index 827de2a609..0000000000 --- a/runtime/networkdriver/lxc/driver.go +++ /dev/null @@ -1,482 +0,0 @@ -package lxc - -import ( - "fmt" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/pkg/iptables" - "github.com/dotcloud/docker/pkg/netlink" - "github.com/dotcloud/docker/runtime/networkdriver" - "github.com/dotcloud/docker/runtime/networkdriver/ipallocator" - "github.com/dotcloud/docker/runtime/networkdriver/portallocator" - "github.com/dotcloud/docker/runtime/networkdriver/portmapper" - "github.com/dotcloud/docker/utils" - "io/ioutil" - "log" - "net" - "strings" - "syscall" - "unsafe" -) - -const ( - DefaultNetworkBridge = "docker0" - siocBRADDBR = 0x89a0 -) - -// Network interface represents the networking stack of a container -type networkInterface struct { - IP net.IP - PortMappings []net.Addr // there are mappings to the host interfaces -} - -var ( - addrs = []string{ - // Here we don't follow the convention of using the 1st IP of the range for the gateway. - // This is to use the same gateway IPs as the /24 ranges, which predate the /16 ranges. - // In theory this shouldn't matter - in practice there's bound to be a few scripts relying - // on the internal addressing or other stupid things like that. - // The shouldn't, but hey, let's not break them unless we really have to. - "172.17.42.1/16", // Don't use 172.16.0.0/16, it conflicts with EC2 DNS 172.16.0.23 - "10.0.42.1/16", // Don't even try using the entire /8, that's too intrusive - "10.1.42.1/16", - "10.42.42.1/16", - "172.16.42.1/24", - "172.16.43.1/24", - "172.16.44.1/24", - "10.0.42.1/24", - "10.0.43.1/24", - "192.168.42.1/24", - "192.168.43.1/24", - "192.168.44.1/24", - } - - bridgeIface string - bridgeNetwork *net.IPNet - - defaultBindingIP = net.ParseIP("0.0.0.0") - currentInterfaces = make(map[string]*networkInterface) -) - -func InitDriver(job *engine.Job) engine.Status { - var ( - network *net.IPNet - enableIPTables = job.GetenvBool("EnableIptables") - icc = job.GetenvBool("InterContainerCommunication") - ipForward = job.GetenvBool("EnableIpForward") - bridgeIP = job.Getenv("BridgeIP") - ) - - if defaultIP := job.Getenv("DefaultBindingIP"); defaultIP != "" { - defaultBindingIP = net.ParseIP(defaultIP) - } - - bridgeIface = job.Getenv("BridgeIface") - if bridgeIface == "" { - bridgeIface = DefaultNetworkBridge - } - - addr, err := networkdriver.GetIfaceAddr(bridgeIface) - if err != nil { - // If the iface is not found, try to create it - job.Logf("creating new bridge for %s", bridgeIface) - if err := createBridge(bridgeIP); err != nil { - job.Error(err) - return engine.StatusErr - } - - job.Logf("getting iface addr") - addr, err = networkdriver.GetIfaceAddr(bridgeIface) - if err != nil { - job.Error(err) - return engine.StatusErr - } - network = addr.(*net.IPNet) - } else { - network = addr.(*net.IPNet) - } - - // Configure iptables for link support - if enableIPTables { - if err := setupIPTables(addr, icc); err != nil { - job.Error(err) - return engine.StatusErr - } - } - - if ipForward { - // Enable IPv4 forwarding - if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte{'1', '\n'}, 0644); err != nil { - job.Logf("WARNING: unable to enable IPv4 forwarding: %s\n", err) - } - } - - // We can always try removing the iptables - if err := iptables.RemoveExistingChain("DOCKER"); err != nil { - job.Error(err) - return engine.StatusErr - } - - if enableIPTables { - chain, err := iptables.NewChain("DOCKER", bridgeIface) - if err != nil { - job.Error(err) - return engine.StatusErr - } - portmapper.SetIptablesChain(chain) - } - - bridgeNetwork = network - - // https://github.com/dotcloud/docker/issues/2768 - job.Eng.Hack_SetGlobalVar("httpapi.bridgeIP", bridgeNetwork.IP) - - for name, f := range map[string]engine.Handler{ - "allocate_interface": Allocate, - "release_interface": Release, - "allocate_port": AllocatePort, - "link": LinkContainers, - } { - if err := job.Eng.Register(name, f); err != nil { - job.Error(err) - return engine.StatusErr - } - } - return engine.StatusOK -} - -func setupIPTables(addr net.Addr, icc bool) error { - // Enable NAT - natArgs := []string{"POSTROUTING", "-t", "nat", "-s", addr.String(), "!", "-d", addr.String(), "-j", "MASQUERADE"} - - if !iptables.Exists(natArgs...) { - if output, err := iptables.Raw(append([]string{"-I"}, natArgs...)...); err != nil { - return fmt.Errorf("Unable to enable network bridge NAT: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error iptables postrouting: %s", output) - } - } - - var ( - args = []string{"FORWARD", "-i", bridgeIface, "-o", bridgeIface, "-j"} - acceptArgs = append(args, "ACCEPT") - dropArgs = append(args, "DROP") - ) - - if !icc { - iptables.Raw(append([]string{"-D"}, acceptArgs...)...) - - if !iptables.Exists(dropArgs...) { - utils.Debugf("Disable inter-container communication") - if output, err := iptables.Raw(append([]string{"-I"}, dropArgs...)...); err != nil { - return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error disabling intercontainer communication: %s", output) - } - } - } else { - iptables.Raw(append([]string{"-D"}, dropArgs...)...) - - if !iptables.Exists(acceptArgs...) { - utils.Debugf("Enable inter-container communication") - if output, err := iptables.Raw(append([]string{"-I"}, acceptArgs...)...); err != nil { - return fmt.Errorf("Unable to allow intercontainer communication: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error enabling intercontainer communication: %s", output) - } - } - } - - // Accept all non-intercontainer outgoing packets - outgoingArgs := []string{"FORWARD", "-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"} - if !iptables.Exists(outgoingArgs...) { - if output, err := iptables.Raw(append([]string{"-I"}, outgoingArgs...)...); err != nil { - return fmt.Errorf("Unable to allow outgoing packets: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error iptables allow outgoing: %s", output) - } - } - - // Accept incoming packets for existing connections - existingArgs := []string{"FORWARD", "-o", bridgeIface, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"} - - if !iptables.Exists(existingArgs...) { - if output, err := iptables.Raw(append([]string{"-I"}, existingArgs...)...); err != nil { - return fmt.Errorf("Unable to allow incoming packets: %s", err) - } else if len(output) != 0 { - return fmt.Errorf("Error iptables allow incoming: %s", output) - } - } - return nil -} - -// CreateBridgeIface creates a network bridge interface on the host system with the name `ifaceName`, -// and attempts to configure it with an address which doesn't conflict with any other interface on the host. -// If it can't find an address which doesn't conflict, it will return an error. -func createBridge(bridgeIP string) error { - nameservers := []string{} - resolvConf, _ := utils.GetResolvConf() - // we don't check for an error here, because we don't really care - // if we can't read /etc/resolv.conf. So instead we skip the append - // if resolvConf is nil. It either doesn't exist, or we can't read it - // for some reason. - if resolvConf != nil { - nameservers = append(nameservers, utils.GetNameserversAsCIDR(resolvConf)...) - } - - var ifaceAddr string - if len(bridgeIP) != 0 { - _, _, err := net.ParseCIDR(bridgeIP) - if err != nil { - return err - } - ifaceAddr = bridgeIP - } else { - for _, addr := range addrs { - _, dockerNetwork, err := net.ParseCIDR(addr) - if err != nil { - return err - } - if err := networkdriver.CheckNameserverOverlaps(nameservers, dockerNetwork); err == nil { - if err := networkdriver.CheckRouteOverlaps(dockerNetwork); err == nil { - ifaceAddr = addr - break - } else { - utils.Debugf("%s %s", addr, err) - } - } - } - } - - if ifaceAddr == "" { - return fmt.Errorf("Could not find a free IP address range for interface '%s'. Please configure its address manually and run 'docker -b %s'", bridgeIface, bridgeIface) - } - utils.Debugf("Creating bridge %s with network %s", bridgeIface, ifaceAddr) - - if err := createBridgeIface(bridgeIface); err != nil { - return err - } - - iface, err := net.InterfaceByName(bridgeIface) - if err != nil { - return err - } - - ipAddr, ipNet, err := net.ParseCIDR(ifaceAddr) - if err != nil { - return err - } - - if netlink.NetworkLinkAddIp(iface, ipAddr, ipNet); err != nil { - return fmt.Errorf("Unable to add private network: %s", err) - } - if err := netlink.NetworkLinkUp(iface); err != nil { - return fmt.Errorf("Unable to start network bridge: %s", err) - } - return nil -} - -// Create the actual bridge device. This is more backward-compatible than -// netlink.NetworkLinkAdd and works on RHEL 6. -func createBridgeIface(name string) error { - s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP) - if err != nil { - utils.Debugf("Bridge socket creation failed IPv6 probably not enabled: %v", err) - s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP) - if err != nil { - return fmt.Errorf("Error creating bridge creation socket: %s", err) - } - } - defer syscall.Close(s) - - nameBytePtr, err := syscall.BytePtrFromString(name) - if err != nil { - return fmt.Errorf("Error converting bridge name %s to byte array: %s", name, err) - } - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), siocBRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { - return fmt.Errorf("Error creating bridge: %s", err) - } - return nil -} - -// Allocate a network interface -func Allocate(job *engine.Job) engine.Status { - var ( - ip *net.IP - err error - id = job.Args[0] - requestedIP = net.ParseIP(job.Getenv("RequestedIP")) - ) - - if requestedIP != nil { - ip, err = ipallocator.RequestIP(bridgeNetwork, &requestedIP) - } else { - ip, err = ipallocator.RequestIP(bridgeNetwork, nil) - } - if err != nil { - job.Error(err) - return engine.StatusErr - } - - out := engine.Env{} - out.Set("IP", ip.String()) - out.Set("Mask", bridgeNetwork.Mask.String()) - out.Set("Gateway", bridgeNetwork.IP.String()) - out.Set("Bridge", bridgeIface) - - size, _ := bridgeNetwork.Mask.Size() - out.SetInt("IPPrefixLen", size) - - currentInterfaces[id] = &networkInterface{ - IP: *ip, - } - - out.WriteTo(job.Stdout) - - return engine.StatusOK -} - -// release an interface for a select ip -func Release(job *engine.Job) engine.Status { - var ( - id = job.Args[0] - containerInterface = currentInterfaces[id] - ip net.IP - port int - proto string - ) - - if containerInterface == nil { - return job.Errorf("No network information to release for %s", id) - } - - for _, nat := range containerInterface.PortMappings { - if err := portmapper.Unmap(nat); err != nil { - log.Printf("Unable to unmap port %s: %s", nat, err) - } - - // this is host mappings - switch a := nat.(type) { - case *net.TCPAddr: - proto = "tcp" - ip = a.IP - port = a.Port - case *net.UDPAddr: - proto = "udp" - ip = a.IP - port = a.Port - } - - if err := portallocator.ReleasePort(ip, proto, port); err != nil { - log.Printf("Unable to release port %s", nat) - } - } - - if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil { - log.Printf("Unable to release ip %s\n", err) - } - return engine.StatusOK -} - -// Allocate an external port and map it to the interface -func AllocatePort(job *engine.Job) engine.Status { - var ( - err error - - ip = defaultBindingIP - id = job.Args[0] - hostIP = job.Getenv("HostIP") - hostPort = job.GetenvInt("HostPort") - containerPort = job.GetenvInt("ContainerPort") - proto = job.Getenv("Proto") - network = currentInterfaces[id] - ) - - if hostIP != "" { - ip = net.ParseIP(hostIP) - } - - // host ip, proto, and host port - hostPort, err = portallocator.RequestPort(ip, proto, hostPort) - if err != nil { - job.Error(err) - return engine.StatusErr - } - - var ( - container net.Addr - host net.Addr - ) - - if proto == "tcp" { - host = &net.TCPAddr{IP: ip, Port: hostPort} - container = &net.TCPAddr{IP: network.IP, Port: containerPort} - } else { - host = &net.UDPAddr{IP: ip, Port: hostPort} - container = &net.UDPAddr{IP: network.IP, Port: containerPort} - } - - if err := portmapper.Map(container, ip, hostPort); err != nil { - portallocator.ReleasePort(ip, proto, hostPort) - - job.Error(err) - return engine.StatusErr - } - network.PortMappings = append(network.PortMappings, host) - - out := engine.Env{} - out.Set("HostIP", ip.String()) - out.SetInt("HostPort", hostPort) - - if _, err := out.WriteTo(job.Stdout); err != nil { - job.Error(err) - return engine.StatusErr - } - return engine.StatusOK -} - -func LinkContainers(job *engine.Job) engine.Status { - var ( - action = job.Args[0] - childIP = job.Getenv("ChildIP") - parentIP = job.Getenv("ParentIP") - ignoreErrors = job.GetenvBool("IgnoreErrors") - ports = job.GetenvList("Ports") - ) - split := func(p string) (string, string) { - parts := strings.Split(p, "/") - return parts[0], parts[1] - } - - for _, p := range ports { - port, proto := split(p) - if output, err := iptables.Raw(action, "FORWARD", - "-i", bridgeIface, "-o", bridgeIface, - "-p", proto, - "-s", parentIP, - "--dport", port, - "-d", childIP, - "-j", "ACCEPT"); !ignoreErrors && err != nil { - job.Error(err) - return engine.StatusErr - } else if len(output) != 0 { - job.Errorf("Error toggle iptables forward: %s", output) - return engine.StatusErr - } - - if output, err := iptables.Raw(action, "FORWARD", - "-i", bridgeIface, "-o", bridgeIface, - "-p", proto, - "-s", childIP, - "--sport", port, - "-d", parentIP, - "-j", "ACCEPT"); !ignoreErrors && err != nil { - job.Error(err) - return engine.StatusErr - } else if len(output) != 0 { - job.Errorf("Error toggle iptables forward: %s", output) - return engine.StatusErr - } - } - return engine.StatusOK -} diff --git a/runtime/runtime.go b/runtime/runtime.go index 38a1beccd2..0d3468e350 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -17,7 +17,7 @@ import ( "github.com/dotcloud/docker/runtime/execdriver/lxc" "github.com/dotcloud/docker/runtime/graphdriver" _ "github.com/dotcloud/docker/runtime/graphdriver/vfs" - _ "github.com/dotcloud/docker/runtime/networkdriver/lxc" + _ "github.com/dotcloud/docker/runtime/networkdriver/bridge" "github.com/dotcloud/docker/runtime/networkdriver/portallocator" "github.com/dotcloud/docker/utils" "io" -- cgit v1.2.1 From f7b3e879fc3047ed93ac6b43cd9bf47a25f3d0fc Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 20 Mar 2014 22:58:02 +0000 Subject: Add initial plugin flag to pass lxc and native driver options Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runconfig/hostconfig.go | 2 ++ runconfig/parse.go | 18 +++++++++++++++ runtime/container.go | 8 +++++-- runtime/execdriver/driver.go | 28 ++++++++++++------------ runtime/execdriver/lxc/lxc_template.go | 4 ++-- runtime/execdriver/lxc/lxc_template_unit_test.go | 3 ++- runtime/execdriver/native/driver.go | 6 ++--- 7 files changed, 46 insertions(+), 23 deletions(-) diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index 6c8618ee81..8ee2288b4b 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -13,6 +13,7 @@ type HostConfig struct { PortBindings nat.PortMap Links []string PublishAllPorts bool + PluginOptions map[string][]string } type KeyValuePair struct { @@ -28,6 +29,7 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) + job.GetenvJson("PluginOptions", &hostConfig.PluginOptions) if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } diff --git a/runconfig/parse.go b/runconfig/parse.go index cc33188ad5..afcaec304f 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -45,6 +45,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flDnsSearch = opts.NewListOpts(opts.ValidateDomain) flVolumesFrom opts.ListOpts flLxcOpts opts.ListOpts + flPluginOpts opts.ListOpts flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id") @@ -77,6 +78,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom dns search domains") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") + cmd.Var(&flPluginOpts, []string{"-plugin"}, "Add custom plugin options") if err := cmd.Parse(args); err != nil { return nil, nil, cmd, err @@ -206,6 +208,8 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf WorkingDir: *flWorkingDir, } + pluginOptions := parsePluginOpts(flPluginOpts) + hostConfig := &HostConfig{ Binds: binds, ContainerIDFile: *flContainerIDFile, @@ -214,6 +218,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf PortBindings: portBindings, Links: flLinks.GetAll(), PublishAllPorts: *flPublishAll, + PluginOptions: pluginOptions, } if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { @@ -247,3 +252,16 @@ func parseLxcOpt(opt string) (string, string, error) { } return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } + +func parsePluginOpts(opts opts.ListOpts) map[string][]string { + out := make(map[string][]string, len(opts.GetAll())) + for _, o := range opts.GetAll() { + parts := strings.SplitN(o, " ", 2) + values, exists := out[parts[0]] + if !exists { + values = []string{} + } + out[parts[0]] = append(values, parts[1]) + } + return out +} diff --git a/runtime/container.go b/runtime/container.go index 6194a19c8c..488d905f4b 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -361,7 +361,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s func populateCommand(c *Container) { var ( en *execdriver.Network - driverConfig []string + driverConfig = c.hostConfig.PluginOptions ) en = &execdriver.Network{ @@ -379,11 +379,15 @@ func populateCommand(c *Container) { } } + // merge in the lxc conf options into the generic config map if lxcConf := c.hostConfig.LxcConf; lxcConf != nil { + lxc := driverConfig["lxc"] for _, pair := range lxcConf { - driverConfig = append(driverConfig, fmt.Sprintf("%s = %s", pair.Key, pair.Value)) + lxc = append(lxc, fmt.Sprintf("%s = %s", pair.Key, pair.Value)) } + driverConfig["lxc"] = lxc } + resources := &execdriver.Resources{ Memory: c.Config.Memory, MemorySwap: c.Config.MemorySwap, diff --git a/runtime/execdriver/driver.go b/runtime/execdriver/driver.go index 23e31ee8d9..2b7c367453 100644 --- a/runtime/execdriver/driver.go +++ b/runtime/execdriver/driver.go @@ -112,20 +112,20 @@ type Mount struct { type Command struct { exec.Cmd `json:"-"` - ID string `json:"id"` - Privileged bool `json:"privileged"` - User string `json:"user"` - Rootfs string `json:"rootfs"` // root fs of the container - InitPath string `json:"initpath"` // dockerinit - Entrypoint string `json:"entrypoint"` - Arguments []string `json:"arguments"` - WorkingDir string `json:"working_dir"` - ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver - Tty bool `json:"tty"` - Network *Network `json:"network"` - Config []string `json:"config"` // generic values that specific drivers can consume - Resources *Resources `json:"resources"` - Mounts []Mount `json:"mounts"` + ID string `json:"id"` + Privileged bool `json:"privileged"` + User string `json:"user"` + Rootfs string `json:"rootfs"` // root fs of the container + InitPath string `json:"initpath"` // dockerinit + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + WorkingDir string `json:"working_dir"` + ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver + Tty bool `json:"tty"` + Network *Network `json:"network"` + Config map[string][]string `json:"config"` // generic values that specific drivers can consume + Resources *Resources `json:"resources"` + Mounts []Mount `json:"mounts"` Terminal Terminal `json:"-"` // standard or tty terminal Console string `json:"-"` // dev/console path diff --git a/runtime/execdriver/lxc/lxc_template.go b/runtime/execdriver/lxc/lxc_template.go index ce9d90469f..7979e4f284 100644 --- a/runtime/execdriver/lxc/lxc_template.go +++ b/runtime/execdriver/lxc/lxc_template.go @@ -118,8 +118,8 @@ lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} {{end}} {{end}} -{{if .Config}} -{{range $value := .Config}} +{{if .Config.lxc}} +{{range $value := .Config.lxc}} {{$value}} {{end}} {{end}} diff --git a/runtime/execdriver/lxc/lxc_template_unit_test.go b/runtime/execdriver/lxc/lxc_template_unit_test.go index e613adf7a9..74cfd6229c 100644 --- a/runtime/execdriver/lxc/lxc_template_unit_test.go +++ b/runtime/execdriver/lxc/lxc_template_unit_test.go @@ -75,10 +75,11 @@ func TestCustomLxcConfig(t *testing.T) { command := &execdriver.Command{ ID: "1", Privileged: false, - Config: []string{ + Config: map[string][]string{"lxc": { "lxc.utsname = docker", "lxc.cgroup.cpuset.cpus = 0,1", }, + }, Network: &execdriver.Network{ Mtu: 1500, Interface: nil, diff --git a/runtime/execdriver/native/driver.go b/runtime/execdriver/native/driver.go index bf7e8ccdec..0a09d324db 100644 --- a/runtime/execdriver/native/driver.go +++ b/runtime/execdriver/native/driver.go @@ -184,10 +184,8 @@ func (d *driver) removeContainerRoot(id string) error { func (d *driver) validateCommand(c *execdriver.Command) error { // we need to check the Config of the command to make sure that we // do not have any of the lxc-conf variables - for _, conf := range c.Config { - if strings.Contains(conf, "lxc") { - return fmt.Errorf("%s is not supported by the native driver", conf) - } + for _, conf := range c.Config["native"] { + log.Println(conf) } return nil } -- cgit v1.2.1 From c5f9c4bd6933c806490e4f7cb52557cee154dbed Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 20 Mar 2014 23:09:01 +0000 Subject: Dont use custom marshaling for caps and namespaces This also adds an enabled field to the types so that they can be easily toggled. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/types.go | 77 ++++++++++++----------------------------- pkg/libcontainer/types_linux.go | 12 +++---- 2 files changed, 28 insertions(+), 61 deletions(-) diff --git a/pkg/libcontainer/types.go b/pkg/libcontainer/types.go index 94fe876554..87346348bc 100644 --- a/pkg/libcontainer/types.go +++ b/pkg/libcontainer/types.go @@ -1,7 +1,6 @@ package libcontainer import ( - "encoding/json" "errors" "github.com/syndtr/gocapability/capability" ) @@ -19,29 +18,30 @@ var ( namespaceList = Namespaces{} capabilityList = Capabilities{ - {Key: "SETPCAP", Value: capability.CAP_SETPCAP}, - {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE}, - {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO}, - {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT}, - {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN}, - {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE}, - {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE}, - {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME}, - {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG}, - {Key: "MKNOD", Value: capability.CAP_MKNOD}, - {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE}, - {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL}, - {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE}, - {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN}, - {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN}, + {Key: "SETPCAP", Value: capability.CAP_SETPCAP, Enabled: true}, + {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE, Enabled: true}, + {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO, Enabled: true}, + {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT, Enabled: true}, + {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN, Enabled: true}, + {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE, Enabled: true}, + {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE, Enabled: true}, + {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME, Enabled: true}, + {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG, Enabled: true}, + {Key: "MKNOD", Value: capability.CAP_MKNOD, Enabled: true}, + {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE, Enabled: true}, + {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL, Enabled: true}, + {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE, Enabled: true}, + {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN, Enabled: true}, + {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN, Enabled: true}, } ) type ( Namespace struct { - Key string - Value int - File string + Key string `json:"key,omitempty"` + Enabled bool `json:"enabled,omitempty"` + Value int `json:"value,omitempty"` + File string `json:"file,omitempty"` } Namespaces []*Namespace ) @@ -50,23 +50,6 @@ func (ns *Namespace) String() string { return ns.Key } -func (ns *Namespace) MarshalJSON() ([]byte, error) { - return json.Marshal(ns.Key) -} - -func (ns *Namespace) UnmarshalJSON(src []byte) error { - var nsName string - if err := json.Unmarshal(src, &nsName); err != nil { - return err - } - ret := GetNamespace(nsName) - if ret == nil { - return ErrUnkownNamespace - } - *ns = *ret - return nil -} - func GetNamespace(key string) *Namespace { for _, ns := range namespaceList { if ns.Key == key { @@ -89,8 +72,9 @@ func (n Namespaces) Contains(ns string) bool { type ( Capability struct { - Key string - Value capability.Cap + Key string `json:"key,omitempty"` + Enabled bool `json:"enabled"` + Value capability.Cap `json:"value,omitempty"` } Capabilities []*Capability ) @@ -99,23 +83,6 @@ func (c *Capability) String() string { return c.Key } -func (c *Capability) MarshalJSON() ([]byte, error) { - return json.Marshal(c.Key) -} - -func (c *Capability) UnmarshalJSON(src []byte) error { - var capName string - if err := json.Unmarshal(src, &capName); err != nil { - return err - } - ret := GetCapability(capName) - if ret == nil { - return ErrUnkownCapability - } - *c = *ret - return nil -} - func GetCapability(key string) *Capability { for _, capp := range capabilityList { if capp.Key == key { diff --git a/pkg/libcontainer/types_linux.go b/pkg/libcontainer/types_linux.go index c14531df20..1f937e0c97 100644 --- a/pkg/libcontainer/types_linux.go +++ b/pkg/libcontainer/types_linux.go @@ -6,11 +6,11 @@ import ( func init() { namespaceList = Namespaces{ - {Key: "NEWNS", Value: syscall.CLONE_NEWNS, File: "mnt"}, - {Key: "NEWUTS", Value: syscall.CLONE_NEWUTS, File: "uts"}, - {Key: "NEWIPC", Value: syscall.CLONE_NEWIPC, File: "ipc"}, - {Key: "NEWUSER", Value: syscall.CLONE_NEWUSER, File: "user"}, - {Key: "NEWPID", Value: syscall.CLONE_NEWPID, File: "pid"}, - {Key: "NEWNET", Value: syscall.CLONE_NEWNET, File: "net"}, + {Key: "NEWNS", Value: syscall.CLONE_NEWNS, File: "mnt", Enabled: true}, + {Key: "NEWUTS", Value: syscall.CLONE_NEWUTS, File: "uts", Enabled: true}, + {Key: "NEWIPC", Value: syscall.CLONE_NEWIPC, File: "ipc", Enabled: true}, + {Key: "NEWUSER", Value: syscall.CLONE_NEWUSER, File: "user", Enabled: true}, + {Key: "NEWPID", Value: syscall.CLONE_NEWPID, File: "pid", Enabled: true}, + {Key: "NEWNET", Value: syscall.CLONE_NEWNET, File: "net", Enabled: true}, } } -- cgit v1.2.1 From 43c3ee3ba154e2480191ed3743391810f23f29af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Djibril=20Kon=C3=A9?= Date: Fri, 21 Mar 2014 00:40:58 +0100 Subject: Harmonize / across all name-related commands/Validate images names MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Docker-DCO-1.1-Signed-off-by: Djibril Koné (github: enokd) --- api/client.go | 26 +++++++++++++++++++++++--- registry/registry_test.go | 4 ++++ 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/api/client.go b/api/client.go index 07a3d25e5c..aed1ccabc6 100644 --- a/api/client.go +++ b/api/client.go @@ -207,6 +207,15 @@ func (cli *DockerCli) CmdBuild(args ...string) error { } // Upload the build context v := &url.Values{} + + //Check if the given image name can be resolved + if *tag != "" { + repository, _ := utils.ParseRepositoryTag(*tag) + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + } + v.Set("t", *tag) if *suppressOutput { @@ -1002,6 +1011,12 @@ func (cli *DockerCli) CmdImport(args ...string) error { repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) } v := url.Values{} + + //Check if the given image name can be resolved + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + v.Set("repo", repository) v.Set("tag", tag) v.Set("fromSrc", src) @@ -1452,9 +1467,9 @@ func (cli *DockerCli) CmdCommit(args ...string) error { return nil } - re := regexp.MustCompile("/{2}") - if re.MatchString(repository) { - return fmt.Errorf("Error: Bad image name. Please rename your image in the format /") + //Check if the given image name can be resolved + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err } v := url.Values{} @@ -1745,6 +1760,11 @@ func (cli *DockerCli) CmdTag(args ...string) error { } v := url.Values{} + + //Check if the given image name can be resolved + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } v.Set("repo", repository) v.Set("tag", tag) diff --git a/registry/registry_test.go b/registry/registry_test.go index ebfb99b4c3..c072da41c5 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -206,4 +206,8 @@ func TestValidRepositoryName(t *testing.T) { t.Log("Repository name should be invalid") t.Fail() } + if err := validateRepositoryName("docker///docker"); err == nil { + t.Log("Repository name should be invalid") + t.Fail() + } } -- cgit v1.2.1 From 443a75d5f66e986e9d7740d3f2aaef080aef8ea0 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 21 Mar 2014 00:10:24 +0000 Subject: Allow caps to be toggled in native driver with plugin flag Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/capabilities/capabilities.go | 4 ++- pkg/libcontainer/types.go | 41 +++++++++++++++------------ runtime/execdriver/native/default_template.go | 31 ++++++++++++++++++++ runtime/execdriver/native/driver.go | 12 -------- 4 files changed, 57 insertions(+), 31 deletions(-) diff --git a/pkg/libcontainer/capabilities/capabilities.go b/pkg/libcontainer/capabilities/capabilities.go index fbf73538e0..4b81e708c7 100644 --- a/pkg/libcontainer/capabilities/capabilities.go +++ b/pkg/libcontainer/capabilities/capabilities.go @@ -27,7 +27,9 @@ func DropCapabilities(container *libcontainer.Container) error { func getCapabilitiesMask(container *libcontainer.Container) []capability.Cap { drop := []capability.Cap{} for _, c := range container.CapabilitiesMask { - drop = append(drop, c.Value) + if !c.Enabled { + drop = append(drop, c.Value) + } } return drop } diff --git a/pkg/libcontainer/types.go b/pkg/libcontainer/types.go index 87346348bc..7751e850b6 100644 --- a/pkg/libcontainer/types.go +++ b/pkg/libcontainer/types.go @@ -18,21 +18,21 @@ var ( namespaceList = Namespaces{} capabilityList = Capabilities{ - {Key: "SETPCAP", Value: capability.CAP_SETPCAP, Enabled: true}, - {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE, Enabled: true}, - {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO, Enabled: true}, - {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT, Enabled: true}, - {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN, Enabled: true}, - {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE, Enabled: true}, - {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE, Enabled: true}, - {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME, Enabled: true}, - {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG, Enabled: true}, - {Key: "MKNOD", Value: capability.CAP_MKNOD, Enabled: true}, - {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE, Enabled: true}, - {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL, Enabled: true}, - {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE, Enabled: true}, - {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN, Enabled: true}, - {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN, Enabled: true}, + {Key: "SETPCAP", Value: capability.CAP_SETPCAP, Enabled: false}, + {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE, Enabled: false}, + {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO, Enabled: false}, + {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT, Enabled: false}, + {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN, Enabled: false}, + {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE, Enabled: false}, + {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE, Enabled: false}, + {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME, Enabled: false}, + {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG, Enabled: false}, + {Key: "MKNOD", Value: capability.CAP_MKNOD, Enabled: false}, + {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE, Enabled: false}, + {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL, Enabled: false}, + {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE, Enabled: false}, + {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN, Enabled: false}, + {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN, Enabled: false}, } ) @@ -86,7 +86,8 @@ func (c *Capability) String() string { func GetCapability(key string) *Capability { for _, capp := range capabilityList { if capp.Key == key { - return capp + cpy := *capp + return &cpy } } return nil @@ -95,10 +96,14 @@ func GetCapability(key string) *Capability { // Contains returns true if the specified Capability is // in the slice func (c Capabilities) Contains(capp string) bool { + return c.Get(capp) != nil +} + +func (c Capabilities) Get(capp string) *Capability { for _, cap := range c { if cap.Key == capp { - return true + return cap } } - return false + return nil } diff --git a/runtime/execdriver/native/default_template.go b/runtime/execdriver/native/default_template.go index d744ab382f..d47a5eb8cd 100644 --- a/runtime/execdriver/native/default_template.go +++ b/runtime/execdriver/native/default_template.go @@ -6,6 +6,7 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/runtime/execdriver" "os" + "strings" ) // createContainer populates and configures the container type with the @@ -63,9 +64,39 @@ func createContainer(c *execdriver.Command) *libcontainer.Container { container.Mounts = append(container.Mounts, libcontainer.Mount{m.Source, m.Destination, m.Writable, m.Private}) } + configureCustomOptions(container, c.Config["native"]) + return container } +// configureCustomOptions takes string commands from the user and allows modification of the +// container's default configuration. +// +// format: +// i.e: cap +MKNOD cap -NET_ADMIN +// i.e: cgroup devices.allow *:* +func configureCustomOptions(container *libcontainer.Container, opts []string) { + for _, opt := range opts { + parts := strings.Split(strings.TrimSpace(opt), " ") + switch parts[0] { + case "cap": + value := strings.TrimSpace(parts[1]) + c := container.CapabilitiesMask.Get(value[1:]) + if c == nil { + continue + } + switch value[0] { + case '-': + c.Enabled = false + case '+': + c.Enabled = true + default: + // do error here + } + } + } +} + // getDefaultTemplate returns the docker default for // the libcontainer configuration file func getDefaultTemplate() *libcontainer.Container { diff --git a/runtime/execdriver/native/driver.go b/runtime/execdriver/native/driver.go index 0a09d324db..0d9297191c 100644 --- a/runtime/execdriver/native/driver.go +++ b/runtime/execdriver/native/driver.go @@ -75,9 +75,6 @@ func NewDriver(root, initPath string) (*driver, error) { } func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { - if err := d.validateCommand(c); err != nil { - return -1, err - } var ( term nsinit.Terminal container = createContainer(c) @@ -181,15 +178,6 @@ func (d *driver) removeContainerRoot(id string) error { return os.RemoveAll(filepath.Join(d.root, id)) } -func (d *driver) validateCommand(c *execdriver.Command) error { - // we need to check the Config of the command to make sure that we - // do not have any of the lxc-conf variables - for _, conf := range c.Config["native"] { - log.Println(conf) - } - return nil -} - func getEnv(key string, env []string) string { for _, pair := range env { parts := strings.Split(pair, "=") -- cgit v1.2.1 From 70f3b9f4ce67ee54ec226814cdd26db01f69378d Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 21 Mar 2014 00:23:34 +0000 Subject: Add ability to work with individual namespaces Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/command.go | 4 +++- pkg/libcontainer/types.go | 11 ++++++++--- runtime/execdriver/native/default_template.go | 16 ++++++++++++++-- 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/pkg/libcontainer/nsinit/command.go b/pkg/libcontainer/nsinit/command.go index 5546065b6d..153a48ab59 100644 --- a/pkg/libcontainer/nsinit/command.go +++ b/pkg/libcontainer/nsinit/command.go @@ -39,7 +39,9 @@ func (c *DefaultCommandFactory) Create(container *libcontainer.Container, consol // flags on clone, unshare, and setns func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) { for _, ns := range namespaces { - flag |= ns.Value + if ns.Enabled { + flag |= ns.Value + } } return flag } diff --git a/pkg/libcontainer/types.go b/pkg/libcontainer/types.go index 7751e850b6..ffeb55a022 100644 --- a/pkg/libcontainer/types.go +++ b/pkg/libcontainer/types.go @@ -53,7 +53,8 @@ func (ns *Namespace) String() string { func GetNamespace(key string) *Namespace { for _, ns := range namespaceList { if ns.Key == key { - return ns + cpy := *ns + return &cpy } } return nil @@ -62,12 +63,16 @@ func GetNamespace(key string) *Namespace { // Contains returns true if the specified Namespace is // in the slice func (n Namespaces) Contains(ns string) bool { + return n.Get(ns) != nil +} + +func (n Namespaces) Get(ns string) *Namespace { for _, nsp := range n { if nsp.Key == ns { - return true + return nsp } } - return false + return nil } type ( diff --git a/runtime/execdriver/native/default_template.go b/runtime/execdriver/native/default_template.go index d47a5eb8cd..dbb7a45ae7 100644 --- a/runtime/execdriver/native/default_template.go +++ b/runtime/execdriver/native/default_template.go @@ -77,10 +77,12 @@ func createContainer(c *execdriver.Command) *libcontainer.Container { // i.e: cgroup devices.allow *:* func configureCustomOptions(container *libcontainer.Container, opts []string) { for _, opt := range opts { - parts := strings.Split(strings.TrimSpace(opt), " ") + var ( + parts = strings.Split(strings.TrimSpace(opt), " ") + value = strings.TrimSpace(parts[1]) + ) switch parts[0] { case "cap": - value := strings.TrimSpace(parts[1]) c := container.CapabilitiesMask.Get(value[1:]) if c == nil { continue @@ -93,6 +95,16 @@ func configureCustomOptions(container *libcontainer.Container, opts []string) { default: // do error here } + case "ns": + ns := container.Namespaces.Get(value[1:]) + switch value[0] { + case '-': + ns.Enabled = false + case '+': + ns.Enabled = true + default: + // error + } } } } -- cgit v1.2.1 From be5538d8a8820ac1192c7a5660e0d950927b42d0 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 21 Mar 2014 00:48:17 +0000 Subject: Allow containers to join the net namespace of other conatiners Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/network/netns.go | 10 +--------- runtime/execdriver/native/default_template.go | 28 ++++++++++++++++++++++++--- runtime/execdriver/native/driver.go | 14 +++++++++----- 3 files changed, 35 insertions(+), 17 deletions(-) diff --git a/pkg/libcontainer/network/netns.go b/pkg/libcontainer/network/netns.go index 3eb8ee587a..7e311f22d8 100644 --- a/pkg/libcontainer/network/netns.go +++ b/pkg/libcontainer/network/netns.go @@ -14,13 +14,7 @@ type NetNS struct { } func (v *NetNS) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error { - nsname, exists := n.Context["nsname"] - - if !exists { - return fmt.Errorf("nspath does not exist in network context") - } - - context["nspath"] = fmt.Sprintf("/var/run/netns/%s", nsname) + context["nspath"] = n.Context["nspath"] return nil } @@ -29,12 +23,10 @@ func (v *NetNS) Initialize(config *libcontainer.Network, context libcontainer.Co if !exists { return fmt.Errorf("nspath does not exist in network context") } - f, err := os.OpenFile(nspath, os.O_RDONLY, 0) if err != nil { return fmt.Errorf("failed get network namespace fd: %v", err) } - if err := system.Setns(f.Fd(), syscall.CLONE_NEWNET); err != nil { return fmt.Errorf("failed to setns current network namespace: %v", err) } diff --git a/runtime/execdriver/native/default_template.go b/runtime/execdriver/native/default_template.go index dbb7a45ae7..890e260ad2 100644 --- a/runtime/execdriver/native/default_template.go +++ b/runtime/execdriver/native/default_template.go @@ -6,12 +6,13 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/runtime/execdriver" "os" + "path/filepath" "strings" ) // createContainer populates and configures the container type with the // data provided by the execdriver.Command -func createContainer(c *execdriver.Command) *libcontainer.Container { +func (d *driver) createContainer(c *execdriver.Command) *libcontainer.Container { container := getDefaultTemplate() container.Hostname = getEnv("HOSTNAME", c.Env) @@ -64,7 +65,7 @@ func createContainer(c *execdriver.Command) *libcontainer.Container { container.Mounts = append(container.Mounts, libcontainer.Mount{m.Source, m.Destination, m.Writable, m.Private}) } - configureCustomOptions(container, c.Config["native"]) + d.configureCustomOptions(container, c.Config["native"]) return container } @@ -75,7 +76,8 @@ func createContainer(c *execdriver.Command) *libcontainer.Container { // format: // i.e: cap +MKNOD cap -NET_ADMIN // i.e: cgroup devices.allow *:* -func configureCustomOptions(container *libcontainer.Container, opts []string) { +// i.e: net join +func (d *driver) configureCustomOptions(container *libcontainer.Container, opts []string) { for _, opt := range opts { var ( parts = strings.Split(strings.TrimSpace(opt), " ") @@ -105,6 +107,26 @@ func configureCustomOptions(container *libcontainer.Container, opts []string) { default: // error } + case "net": + switch strings.TrimSpace(parts[1]) { + case "join": + var ( + id = strings.TrimSpace(parts[2]) + cmd = d.activeContainers[id] + nspath = filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net") + ) + + container.Networks = append(container.Networks, &libcontainer.Network{ + Type: "netns", + Context: libcontainer.Context{ + "nspath": nspath, + }, + }) + default: + // error + } + default: + // error not defined } } } diff --git a/runtime/execdriver/native/driver.go b/runtime/execdriver/native/driver.go index 0d9297191c..b998db743d 100644 --- a/runtime/execdriver/native/driver.go +++ b/runtime/execdriver/native/driver.go @@ -57,8 +57,9 @@ func init() { } type driver struct { - root string - initPath string + root string + initPath string + activeContainers map[string]*execdriver.Command } func NewDriver(root, initPath string) (*driver, error) { @@ -69,15 +70,18 @@ func NewDriver(root, initPath string) (*driver, error) { return nil, err } return &driver{ - root: root, - initPath: initPath, + root: root, + initPath: initPath, + activeContainers: make(map[string]*execdriver.Command), }, nil } func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { + d.activeContainers[c.ID] = c + var ( term nsinit.Terminal - container = createContainer(c) + container = d.createContainer(c) factory = &dockerCommandFactory{c: c, driver: d} stateWriter = &dockerStateWriter{ callback: startCallback, -- cgit v1.2.1 From 79c11b19ecd506bd76db391b896cec0d4263183d Mon Sep 17 00:00:00 2001 From: alambike Date: Fri, 21 Mar 2014 03:13:06 +0100 Subject: Added Eixo::Docker to the list of libraries MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Docker-DCO-1.1-Signed-off-by: Javier Gómez (github: alambike) --- docs/sources/reference/api/remote_api_client_libraries.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/reference/api/remote_api_client_libraries.rst b/docs/sources/reference/api/remote_api_client_libraries.rst index f74dd416bc..4a445db36f 100644 --- a/docs/sources/reference/api/remote_api_client_libraries.rst +++ b/docs/sources/reference/api/remote_api_client_libraries.rst @@ -49,3 +49,5 @@ and we will add the libraries here. +----------------------+----------------+--------------------------------------------+----------+ | Perl | Net::Docker | https://metacpan.org/pod/Net::Docker | Active | +----------------------+----------------+--------------------------------------------+----------+ +| Perl | Eixo::Docker | https://github.com/alambike/eixo-docker | Active | ++----------------------+----------------+--------------------------------------------+----------+ -- cgit v1.2.1 From 4002eac8b8b4007de03e78dbd57232fac583d05b Mon Sep 17 00:00:00 2001 From: Ken ICHIKAWA Date: Wed, 19 Mar 2014 12:05:54 +0900 Subject: Fix since time exit display when s.FinishedAt is zero When s.FinishedAt is zero, the since time exit in docker ps doesn't display correct time. For example ``` Exited (0) 292.471209 years ago ``` This patch fixes the since time exit to display nothing if s.FinishedAt is zero. Docker-DCO-1.1-Signed-off-by: Ken ICHIKAWA (github: ichik1) --- runtime/state.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/runtime/state.go b/runtime/state.go index 1c682acd26..316b8a40f1 100644 --- a/runtime/state.go +++ b/runtime/state.go @@ -28,6 +28,9 @@ func (s *State) String() string { } return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) } + if s.FinishedAt.IsZero() { + return "" + } return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, utils.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) } -- cgit v1.2.1 From 7c726669cbcc0cfde12c6a9f03974bb672839271 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 21 Mar 2014 08:10:07 +0000 Subject: Factor out the native driver config options Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/container.go | 4 + runtime/execdriver/native/configuration/caps.go | 27 +++++ runtime/execdriver/native/configuration/net.go | 35 +++++++ runtime/execdriver/native/configuration/ns.go | 26 +++++ runtime/execdriver/native/configuration/parse.go | 37 +++++++ runtime/execdriver/native/create.go | 70 +++++++++++++ runtime/execdriver/native/default_template.go | 126 ----------------------- runtime/execdriver/native/driver.go | 12 ++- 8 files changed, 207 insertions(+), 130 deletions(-) create mode 100644 runtime/execdriver/native/configuration/caps.go create mode 100644 runtime/execdriver/native/configuration/net.go create mode 100644 runtime/execdriver/native/configuration/ns.go create mode 100644 runtime/execdriver/native/configuration/parse.go create mode 100644 runtime/execdriver/native/create.go diff --git a/runtime/container.go b/runtime/container.go index 488d905f4b..1f0f82eebb 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -364,6 +364,10 @@ func populateCommand(c *Container) { driverConfig = c.hostConfig.PluginOptions ) + if driverConfig == nil { + driverConfig = make(map[string][]string) + } + en = &execdriver.Network{ Mtu: c.runtime.config.Mtu, Interface: nil, diff --git a/runtime/execdriver/native/configuration/caps.go b/runtime/execdriver/native/configuration/caps.go new file mode 100644 index 0000000000..f4de470684 --- /dev/null +++ b/runtime/execdriver/native/configuration/caps.go @@ -0,0 +1,27 @@ +package configuration + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/libcontainer" + "strings" +) + +// i.e: cap +MKNOD cap -NET_ADMIN +func parseCapOpt(container *libcontainer.Container, opts []string) error { + var ( + value = strings.TrimSpace(opts[0]) + c = container.CapabilitiesMask.Get(value[1:]) + ) + if c == nil { + return fmt.Errorf("%s is not a valid capability", value[1:]) + } + switch value[0] { + case '-': + c.Enabled = false + case '+': + c.Enabled = true + default: + return fmt.Errorf("%c is not a valid modifier for capabilities", value[0]) + } + return nil +} diff --git a/runtime/execdriver/native/configuration/net.go b/runtime/execdriver/native/configuration/net.go new file mode 100644 index 0000000000..cac7f658ba --- /dev/null +++ b/runtime/execdriver/native/configuration/net.go @@ -0,0 +1,35 @@ +package configuration + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/libcontainer" + "os/exec" + "path/filepath" + "strings" +) + +// i.e: net join +func parseNetOpt(container *libcontainer.Container, running map[string]*exec.Cmd, opts []string) error { + opt := strings.TrimSpace(opts[1]) + switch opt { + case "join": + var ( + id = strings.TrimSpace(opts[2]) + cmd = running[id] + ) + + if cmd == nil || cmd.Process == nil { + return fmt.Errorf("%s is not a valid running container to join", id) + } + nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net") + container.Networks = append(container.Networks, &libcontainer.Network{ + Type: "netns", + Context: libcontainer.Context{ + "nspath": nspath, + }, + }) + default: + return fmt.Errorf("%s is not a valid network option", opt) + } + return nil +} diff --git a/runtime/execdriver/native/configuration/ns.go b/runtime/execdriver/native/configuration/ns.go new file mode 100644 index 0000000000..ff7f367196 --- /dev/null +++ b/runtime/execdriver/native/configuration/ns.go @@ -0,0 +1,26 @@ +package configuration + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/libcontainer" + "strings" +) + +func parseNsOpt(container *libcontainer.Container, opts []string) error { + var ( + value = strings.TrimSpace(opts[0]) + ns = container.Namespaces.Get(value[1:]) + ) + if ns == nil { + return fmt.Errorf("%s is not a valid namespace", value[1:]) + } + switch value[0] { + case '-': + ns.Enabled = false + case '+': + ns.Enabled = true + default: + return fmt.Errorf("%c is not a valid modifier for namespaces", value[0]) + } + return nil +} diff --git a/runtime/execdriver/native/configuration/parse.go b/runtime/execdriver/native/configuration/parse.go new file mode 100644 index 0000000000..08b98fbd12 --- /dev/null +++ b/runtime/execdriver/native/configuration/parse.go @@ -0,0 +1,37 @@ +package configuration + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/libcontainer" + "os/exec" + "strings" +) + +// configureCustomOptions takes string commands from the user and allows modification of the +// container's default configuration. +// +// format: <...value> +// i.e: cgroup devices.allow *:* +func ParseConfiguration(container *libcontainer.Container, running map[string]*exec.Cmd, opts []string) error { + for _, opt := range opts { + var ( + err error + parts = strings.Split(strings.TrimSpace(opt), " ") + ) + + switch parts[0] { + case "cap": + err = parseCapOpt(container, parts[1:]) + case "ns": + err = parseNsOpt(container, parts[1:]) + case "net": + err = parseNetOpt(container, running, parts[1:]) + default: + return fmt.Errorf("%s is not a valid configuration option for the native driver", parts[0]) + } + if err != nil { + return err + } + } + return nil +} diff --git a/runtime/execdriver/native/create.go b/runtime/execdriver/native/create.go new file mode 100644 index 0000000000..7118edc91e --- /dev/null +++ b/runtime/execdriver/native/create.go @@ -0,0 +1,70 @@ +package native + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/runtime/execdriver" + "github.com/dotcloud/docker/runtime/execdriver/native/configuration" + "os" +) + +// createContainer populates and configures the container type with the +// data provided by the execdriver.Command +func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container, error) { + container := getDefaultTemplate() + + container.Hostname = getEnv("HOSTNAME", c.Env) + container.Tty = c.Tty + container.User = c.User + container.WorkingDir = c.WorkingDir + container.Env = c.Env + + loopbackNetwork := libcontainer.Network{ + Mtu: c.Network.Mtu, + Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0), + Gateway: "localhost", + Type: "loopback", + Context: libcontainer.Context{}, + } + + container.Networks = []*libcontainer.Network{ + &loopbackNetwork, + } + + if c.Network.Interface != nil { + vethNetwork := libcontainer.Network{ + Mtu: c.Network.Mtu, + Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), + Gateway: c.Network.Interface.Gateway, + Type: "veth", + Context: libcontainer.Context{ + "prefix": "veth", + "bridge": c.Network.Interface.Bridge, + }, + } + container.Networks = append(container.Networks, &vethNetwork) + } + + container.Cgroups.Name = c.ID + if c.Privileged { + container.CapabilitiesMask = nil + container.Cgroups.DeviceAccess = true + container.Context["apparmor_profile"] = "unconfined" + } + if c.Resources != nil { + container.Cgroups.CpuShares = c.Resources.CpuShares + container.Cgroups.Memory = c.Resources.Memory + container.Cgroups.MemorySwap = c.Resources.MemorySwap + } + // check to see if we are running in ramdisk to disable pivot root + container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" + + for _, m := range c.Mounts { + container.Mounts = append(container.Mounts, libcontainer.Mount{m.Source, m.Destination, m.Writable, m.Private}) + } + + if err := configuration.ParseConfiguration(container, d.activeContainers, c.Config["native"]); err != nil { + return nil, err + } + return container, nil +} diff --git a/runtime/execdriver/native/default_template.go b/runtime/execdriver/native/default_template.go index 890e260ad2..0dcd7db356 100644 --- a/runtime/execdriver/native/default_template.go +++ b/runtime/execdriver/native/default_template.go @@ -1,136 +1,10 @@ package native import ( - "fmt" "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" - "github.com/dotcloud/docker/runtime/execdriver" - "os" - "path/filepath" - "strings" ) -// createContainer populates and configures the container type with the -// data provided by the execdriver.Command -func (d *driver) createContainer(c *execdriver.Command) *libcontainer.Container { - container := getDefaultTemplate() - - container.Hostname = getEnv("HOSTNAME", c.Env) - container.Tty = c.Tty - container.User = c.User - container.WorkingDir = c.WorkingDir - container.Env = c.Env - - loopbackNetwork := libcontainer.Network{ - Mtu: c.Network.Mtu, - Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0), - Gateway: "localhost", - Type: "loopback", - Context: libcontainer.Context{}, - } - - container.Networks = []*libcontainer.Network{ - &loopbackNetwork, - } - - if c.Network.Interface != nil { - vethNetwork := libcontainer.Network{ - Mtu: c.Network.Mtu, - Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), - Gateway: c.Network.Interface.Gateway, - Type: "veth", - Context: libcontainer.Context{ - "prefix": "veth", - "bridge": c.Network.Interface.Bridge, - }, - } - container.Networks = append(container.Networks, &vethNetwork) - } - - container.Cgroups.Name = c.ID - if c.Privileged { - container.CapabilitiesMask = nil - container.Cgroups.DeviceAccess = true - container.Context["apparmor_profile"] = "unconfined" - } - if c.Resources != nil { - container.Cgroups.CpuShares = c.Resources.CpuShares - container.Cgroups.Memory = c.Resources.Memory - container.Cgroups.MemorySwap = c.Resources.MemorySwap - } - // check to see if we are running in ramdisk to disable pivot root - container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" - - for _, m := range c.Mounts { - container.Mounts = append(container.Mounts, libcontainer.Mount{m.Source, m.Destination, m.Writable, m.Private}) - } - - d.configureCustomOptions(container, c.Config["native"]) - - return container -} - -// configureCustomOptions takes string commands from the user and allows modification of the -// container's default configuration. -// -// format: -// i.e: cap +MKNOD cap -NET_ADMIN -// i.e: cgroup devices.allow *:* -// i.e: net join -func (d *driver) configureCustomOptions(container *libcontainer.Container, opts []string) { - for _, opt := range opts { - var ( - parts = strings.Split(strings.TrimSpace(opt), " ") - value = strings.TrimSpace(parts[1]) - ) - switch parts[0] { - case "cap": - c := container.CapabilitiesMask.Get(value[1:]) - if c == nil { - continue - } - switch value[0] { - case '-': - c.Enabled = false - case '+': - c.Enabled = true - default: - // do error here - } - case "ns": - ns := container.Namespaces.Get(value[1:]) - switch value[0] { - case '-': - ns.Enabled = false - case '+': - ns.Enabled = true - default: - // error - } - case "net": - switch strings.TrimSpace(parts[1]) { - case "join": - var ( - id = strings.TrimSpace(parts[2]) - cmd = d.activeContainers[id] - nspath = filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net") - ) - - container.Networks = append(container.Networks, &libcontainer.Network{ - Type: "netns", - Context: libcontainer.Context{ - "nspath": nspath, - }, - }) - default: - // error - } - default: - // error not defined - } - } -} - // getDefaultTemplate returns the docker default for // the libcontainer configuration file func getDefaultTemplate() *libcontainer.Container { diff --git a/runtime/execdriver/native/driver.go b/runtime/execdriver/native/driver.go index b998db743d..4acc4b388c 100644 --- a/runtime/execdriver/native/driver.go +++ b/runtime/execdriver/native/driver.go @@ -59,7 +59,7 @@ func init() { type driver struct { root string initPath string - activeContainers map[string]*execdriver.Command + activeContainers map[string]*exec.Cmd } func NewDriver(root, initPath string) (*driver, error) { @@ -72,16 +72,20 @@ func NewDriver(root, initPath string) (*driver, error) { return &driver{ root: root, initPath: initPath, - activeContainers: make(map[string]*execdriver.Command), + activeContainers: make(map[string]*exec.Cmd), }, nil } func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { - d.activeContainers[c.ID] = c + // take the Command and populate the libcontainer.Container from it + container, err := d.createContainer(c) + if err != nil { + return -1, err + } + d.activeContainers[c.ID] = &c.Cmd var ( term nsinit.Terminal - container = d.createContainer(c) factory = &dockerCommandFactory{c: c, driver: d} stateWriter = &dockerStateWriter{ callback: startCallback, -- cgit v1.2.1 From c9d7f858fd1c6e2d1287e28ee12952333b327c75 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 21 Mar 2014 11:53:15 +0000 Subject: Change flag to -o and --opt Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runconfig/hostconfig.go | 4 ++-- runconfig/parse.go | 20 +++++++++++++------- runtime/container.go | 2 +- runtime/execdriver/native/configuration/fs.go | 19 +++++++++++++++++++ runtime/execdriver/native/configuration/parse.go | 3 +++ 5 files changed, 38 insertions(+), 10 deletions(-) create mode 100644 runtime/execdriver/native/configuration/fs.go diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index 8ee2288b4b..b564f98cd3 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -13,7 +13,7 @@ type HostConfig struct { PortBindings nat.PortMap Links []string PublishAllPorts bool - PluginOptions map[string][]string + DriverOptions map[string][]string } type KeyValuePair struct { @@ -29,7 +29,7 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) - job.GetenvJson("PluginOptions", &hostConfig.PluginOptions) + job.GetenvJson("DriverOptions", &hostConfig.DriverOptions) if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } diff --git a/runconfig/parse.go b/runconfig/parse.go index afcaec304f..2f51dface2 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -45,7 +45,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flDnsSearch = opts.NewListOpts(opts.ValidateDomain) flVolumesFrom opts.ListOpts flLxcOpts opts.ListOpts - flPluginOpts opts.ListOpts + flDriverOpts opts.ListOpts flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id") @@ -77,8 +77,8 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers") cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom dns search domains") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") - cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") - cmd.Var(&flPluginOpts, []string{"-plugin"}, "Add custom plugin options") + cmd.Var(&flLxcOpts, []string{"#lxc-conf", "#-lxc-conf"}, "Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") + cmd.Var(&flDriverOpts, []string{"o", "-opt"}, "Add custom driver options") if err := cmd.Parse(args); err != nil { return nil, nil, cmd, err @@ -208,7 +208,10 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf WorkingDir: *flWorkingDir, } - pluginOptions := parsePluginOpts(flPluginOpts) + pluginOptions, err := parseDriverOpts(flDriverOpts) + if err != nil { + return nil, nil, cmd, err + } hostConfig := &HostConfig{ Binds: binds, @@ -218,7 +221,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf PortBindings: portBindings, Links: flLinks.GetAll(), PublishAllPorts: *flPublishAll, - PluginOptions: pluginOptions, + DriverOptions: pluginOptions, } if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { @@ -253,15 +256,18 @@ func parseLxcOpt(opt string) (string, string, error) { return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } -func parsePluginOpts(opts opts.ListOpts) map[string][]string { +func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) { out := make(map[string][]string, len(opts.GetAll())) for _, o := range opts.GetAll() { parts := strings.SplitN(o, " ", 2) + if len(parts) < 2 { + return nil, fmt.Errorf("invalid opt format %s", o) + } values, exists := out[parts[0]] if !exists { values = []string{} } out[parts[0]] = append(values, parts[1]) } - return out + return out, nil } diff --git a/runtime/container.go b/runtime/container.go index 1f0f82eebb..ee5045e374 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -361,7 +361,7 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s func populateCommand(c *Container) { var ( en *execdriver.Network - driverConfig = c.hostConfig.PluginOptions + driverConfig = c.hostConfig.DriverOptions ) if driverConfig == nil { diff --git a/runtime/execdriver/native/configuration/fs.go b/runtime/execdriver/native/configuration/fs.go new file mode 100644 index 0000000000..76fb2f08da --- /dev/null +++ b/runtime/execdriver/native/configuration/fs.go @@ -0,0 +1,19 @@ +package configuration + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/libcontainer" + "strings" +) + +func parseFsOpts(container *libcontainer.Container, opts []string) error { + opt := strings.TrimSpace(opts[0]) + + switch opt { + case "readonly": + container.ReadonlyFs = true + default: + return fmt.Errorf("%s is not a valid filesystem option", opt) + } + return nil +} diff --git a/runtime/execdriver/native/configuration/parse.go b/runtime/execdriver/native/configuration/parse.go index 08b98fbd12..083fd43371 100644 --- a/runtime/execdriver/native/configuration/parse.go +++ b/runtime/execdriver/native/configuration/parse.go @@ -18,6 +18,9 @@ func ParseConfiguration(container *libcontainer.Container, running map[string]*e err error parts = strings.Split(strings.TrimSpace(opt), " ") ) + if len(parts) < 2 { + return fmt.Errorf("invalid native driver opt %s", opt) + } switch parts[0] { case "cap": -- cgit v1.2.1 From 146a212f71fe129f9d349c5c3e80ba4197e35850 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 21 Mar 2014 12:38:50 +0000 Subject: Change syntax to use dots Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runconfig/parse.go | 3 +- runtime/execdriver/native/configuration/caps.go | 27 ------ runtime/execdriver/native/configuration/fs.go | 19 ----- runtime/execdriver/native/configuration/net.go | 35 -------- runtime/execdriver/native/configuration/ns.go | 26 ------ runtime/execdriver/native/configuration/parse.go | 101 +++++++++++++++++++---- 6 files changed, 87 insertions(+), 124 deletions(-) delete mode 100644 runtime/execdriver/native/configuration/caps.go delete mode 100644 runtime/execdriver/native/configuration/fs.go delete mode 100644 runtime/execdriver/native/configuration/net.go delete mode 100644 runtime/execdriver/native/configuration/ns.go diff --git a/runconfig/parse.go b/runconfig/parse.go index 2f51dface2..b03f8732ee 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -256,10 +256,11 @@ func parseLxcOpt(opt string) (string, string, error) { return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } +// options will come in the format of name.type=value func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) { out := make(map[string][]string, len(opts.GetAll())) for _, o := range opts.GetAll() { - parts := strings.SplitN(o, " ", 2) + parts := strings.SplitN(o, ".", 2) if len(parts) < 2 { return nil, fmt.Errorf("invalid opt format %s", o) } diff --git a/runtime/execdriver/native/configuration/caps.go b/runtime/execdriver/native/configuration/caps.go deleted file mode 100644 index f4de470684..0000000000 --- a/runtime/execdriver/native/configuration/caps.go +++ /dev/null @@ -1,27 +0,0 @@ -package configuration - -import ( - "fmt" - "github.com/dotcloud/docker/pkg/libcontainer" - "strings" -) - -// i.e: cap +MKNOD cap -NET_ADMIN -func parseCapOpt(container *libcontainer.Container, opts []string) error { - var ( - value = strings.TrimSpace(opts[0]) - c = container.CapabilitiesMask.Get(value[1:]) - ) - if c == nil { - return fmt.Errorf("%s is not a valid capability", value[1:]) - } - switch value[0] { - case '-': - c.Enabled = false - case '+': - c.Enabled = true - default: - return fmt.Errorf("%c is not a valid modifier for capabilities", value[0]) - } - return nil -} diff --git a/runtime/execdriver/native/configuration/fs.go b/runtime/execdriver/native/configuration/fs.go deleted file mode 100644 index 76fb2f08da..0000000000 --- a/runtime/execdriver/native/configuration/fs.go +++ /dev/null @@ -1,19 +0,0 @@ -package configuration - -import ( - "fmt" - "github.com/dotcloud/docker/pkg/libcontainer" - "strings" -) - -func parseFsOpts(container *libcontainer.Container, opts []string) error { - opt := strings.TrimSpace(opts[0]) - - switch opt { - case "readonly": - container.ReadonlyFs = true - default: - return fmt.Errorf("%s is not a valid filesystem option", opt) - } - return nil -} diff --git a/runtime/execdriver/native/configuration/net.go b/runtime/execdriver/native/configuration/net.go deleted file mode 100644 index cac7f658ba..0000000000 --- a/runtime/execdriver/native/configuration/net.go +++ /dev/null @@ -1,35 +0,0 @@ -package configuration - -import ( - "fmt" - "github.com/dotcloud/docker/pkg/libcontainer" - "os/exec" - "path/filepath" - "strings" -) - -// i.e: net join -func parseNetOpt(container *libcontainer.Container, running map[string]*exec.Cmd, opts []string) error { - opt := strings.TrimSpace(opts[1]) - switch opt { - case "join": - var ( - id = strings.TrimSpace(opts[2]) - cmd = running[id] - ) - - if cmd == nil || cmd.Process == nil { - return fmt.Errorf("%s is not a valid running container to join", id) - } - nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net") - container.Networks = append(container.Networks, &libcontainer.Network{ - Type: "netns", - Context: libcontainer.Context{ - "nspath": nspath, - }, - }) - default: - return fmt.Errorf("%s is not a valid network option", opt) - } - return nil -} diff --git a/runtime/execdriver/native/configuration/ns.go b/runtime/execdriver/native/configuration/ns.go deleted file mode 100644 index ff7f367196..0000000000 --- a/runtime/execdriver/native/configuration/ns.go +++ /dev/null @@ -1,26 +0,0 @@ -package configuration - -import ( - "fmt" - "github.com/dotcloud/docker/pkg/libcontainer" - "strings" -) - -func parseNsOpt(container *libcontainer.Container, opts []string) error { - var ( - value = strings.TrimSpace(opts[0]) - ns = container.Namespaces.Get(value[1:]) - ) - if ns == nil { - return fmt.Errorf("%s is not a valid namespace", value[1:]) - } - switch value[0] { - case '-': - ns.Enabled = false - case '+': - ns.Enabled = true - default: - return fmt.Errorf("%c is not a valid modifier for namespaces", value[0]) - } - return nil -} diff --git a/runtime/execdriver/native/configuration/parse.go b/runtime/execdriver/native/configuration/parse.go index 083fd43371..0003d724b3 100644 --- a/runtime/execdriver/native/configuration/parse.go +++ b/runtime/execdriver/native/configuration/parse.go @@ -4,9 +4,86 @@ import ( "fmt" "github.com/dotcloud/docker/pkg/libcontainer" "os/exec" + "path/filepath" "strings" ) +type Action func(*libcontainer.Container, interface{}, string) error + +var actions = map[string]Action{ + "cap.add": addCap, + "cap.drop": dropCap, + "fs.readonly": readonlyFs, + "ns.add": addNamespace, + "ns.drop": dropNamespace, + "net.join": joinNetNamespace, +} + +func addCap(container *libcontainer.Container, context interface{}, value string) error { + c := container.CapabilitiesMask.Get(value) + if c == nil { + return fmt.Errorf("%s is not a valid capability", value) + } + c.Enabled = true + return nil +} + +func dropCap(container *libcontainer.Container, context interface{}, value string) error { + c := container.CapabilitiesMask.Get(value) + if c == nil { + return fmt.Errorf("%s is not a valid capability", value) + } + c.Enabled = false + return nil +} + +func addNamespace(container *libcontainer.Container, context interface{}, value string) error { + ns := container.Namespaces.Get(value) + if ns == nil { + return fmt.Errorf("%s is not a valid namespace", value[1:]) + } + ns.Enabled = true + return nil +} + +func dropNamespace(container *libcontainer.Container, context interface{}, value string) error { + ns := container.Namespaces.Get(value) + if ns == nil { + return fmt.Errorf("%s is not a valid namespace", value[1:]) + } + ns.Enabled = false + return nil +} + +func readonlyFs(container *libcontainer.Container, context interface{}, value string) error { + switch value { + case "1", "true": + container.ReadonlyFs = true + default: + container.ReadonlyFs = false + } + return nil +} + +func joinNetNamespace(container *libcontainer.Container, context interface{}, value string) error { + var ( + running = context.(map[string]*exec.Cmd) + cmd = running[value] + ) + + if cmd == nil || cmd.Process == nil { + return fmt.Errorf("%s is not a valid running container to join", value) + } + nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net") + container.Networks = append(container.Networks, &libcontainer.Network{ + Type: "netns", + Context: libcontainer.Context{ + "nspath": nspath, + }, + }) + return nil +} + // configureCustomOptions takes string commands from the user and allows modification of the // container's default configuration. // @@ -14,25 +91,17 @@ import ( // i.e: cgroup devices.allow *:* func ParseConfiguration(container *libcontainer.Container, running map[string]*exec.Cmd, opts []string) error { for _, opt := range opts { - var ( - err error - parts = strings.Split(strings.TrimSpace(opt), " ") - ) - if len(parts) < 2 { - return fmt.Errorf("invalid native driver opt %s", opt) + kv := strings.SplitN(opt, "=", 2) + if len(kv) < 2 { + return fmt.Errorf("invalid format for %s", opt) } - switch parts[0] { - case "cap": - err = parseCapOpt(container, parts[1:]) - case "ns": - err = parseNsOpt(container, parts[1:]) - case "net": - err = parseNetOpt(container, running, parts[1:]) - default: - return fmt.Errorf("%s is not a valid configuration option for the native driver", parts[0]) + action, exists := actions[kv[0]] + if !exists { + return fmt.Errorf("%s is not a valid option for the native driver", kv[0]) } - if err != nil { + + if err := action(container, running, kv[1]); err != nil { return err } } -- cgit v1.2.1 From 83618c2b81c561cd77fd70eca90b2b251f61fcc1 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 21 Mar 2014 14:07:16 +0000 Subject: Add more native driver options Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/execdriver/native/configuration/parse.go | 80 ++++++++++++++++++++++-- 1 file changed, 74 insertions(+), 6 deletions(-) diff --git a/runtime/execdriver/native/configuration/parse.go b/runtime/execdriver/native/configuration/parse.go index 0003d724b3..1733b94426 100644 --- a/runtime/execdriver/native/configuration/parse.go +++ b/runtime/execdriver/native/configuration/parse.go @@ -5,18 +5,70 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer" "os/exec" "path/filepath" + "strconv" "strings" ) type Action func(*libcontainer.Container, interface{}, string) error var actions = map[string]Action{ - "cap.add": addCap, - "cap.drop": dropCap, - "fs.readonly": readonlyFs, - "ns.add": addNamespace, - "ns.drop": dropNamespace, - "net.join": joinNetNamespace, + "cap.add": addCap, // add a cap + "cap.drop": dropCap, // drop a cap + + "ns.add": addNamespace, // add a namespace + "ns.drop": dropNamespace, // drop a namespace when cloning + + "net.join": joinNetNamespace, // join another containers net namespace + // "net.veth.mac": vethMacAddress, // set the mac address for the veth + + "cgroups.cpu_shares": cpuShares, // set the cpu shares + "cgroups.memory": memory, // set the memory limit + "cgroups.memory_swap": memorySwap, // set the memory swap limit + + "apparmor_profile": apparmorProfile, // set the apparmor profile to apply + + "fs.readonly": readonlyFs, // make the rootfs of the container read only +} + +func apparmorProfile(container *libcontainer.Container, context interface{}, value string) error { + container.Context["apparmor_profile"] = value + return nil +} + +func cpuShares(container *libcontainer.Container, context interface{}, value string) error { + if container.Cgroups == nil { + return fmt.Errorf("cannot set cgroups when they are disabled") + } + v, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return err + } + container.Cgroups.CpuShares = v + return nil +} + +func memory(container *libcontainer.Container, context interface{}, value string) error { + if container.Cgroups == nil { + return fmt.Errorf("cannot set cgroups when they are disabled") + } + v, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return err + } + container.Cgroups.Memory = v + return nil +} + +func memorySwap(container *libcontainer.Container, context interface{}, value string) error { + if container.Cgroups == nil { + return fmt.Errorf("cannot set cgroups when they are disabled") + } + v, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return err + } + container.Cgroups.MemorySwap = v + return nil } func addCap(container *libcontainer.Container, context interface{}, value string) error { @@ -84,6 +136,22 @@ func joinNetNamespace(container *libcontainer.Container, context interface{}, va return nil } +func vethMacAddress(container *libcontainer.Container, context interface{}, value string) error { + var veth *libcontainer.Network + + for _, network := range container.Networks { + if network.Type == "veth" { + veth = network + break + } + } + if veth == nil { + return fmt.Errorf("not veth configured for container") + } + veth.Context["mac"] = value + return nil +} + // configureCustomOptions takes string commands from the user and allows modification of the // container's default configuration. // -- cgit v1.2.1 From 2c58a1e2886433a4266615b1f492f829e7a6f53f Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 21 Mar 2014 14:17:17 +0000 Subject: Change placement of readonly filesystem We need to change it to read only at the very end so that bound, copy dev nodes and other ops do not fail. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/mount.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/nsinit/mount.go index 61a90125e0..19dacfaa17 100644 --- a/pkg/libcontainer/nsinit/mount.go +++ b/pkg/libcontainer/nsinit/mount.go @@ -31,11 +31,6 @@ func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, cons if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil { return fmt.Errorf("mouting %s as bind %s", rootfs, err) } - if readonly { - if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, ""); err != nil { - return fmt.Errorf("mounting %s as readonly %s", rootfs, err) - } - } if err := mountSystem(rootfs); err != nil { return fmt.Errorf("mount system %s", err) } @@ -81,6 +76,12 @@ func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, cons } } + if readonly { + if err := system.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, ""); err != nil { + return fmt.Errorf("mounting %s as readonly %s", rootfs, err) + } + } + system.Umask(0022) return nil -- cgit v1.2.1 From 708ecd7da2125a47abb9678ed382893c7b30f10f Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Wed, 12 Mar 2014 01:58:53 -0600 Subject: Add mention of mounting cgroupfs properly to PACKAGERS.md Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- docs/sources/installation/binaries.rst | 6 ++++++ hack/PACKAGERS.md | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/docs/sources/installation/binaries.rst b/docs/sources/installation/binaries.rst index bfdfbe211f..a367a1a94c 100644 --- a/docs/sources/installation/binaries.rst +++ b/docs/sources/installation/binaries.rst @@ -29,6 +29,12 @@ To run properly, docker needs the following software to be installed at runtime: - iptables version 1.4 or later - Git version 1.7 or later - XZ Utils 4.9 or later +- a `properly mounted + `_ + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point `is + `_ `not + `_ `sufficient + `_) Check kernel dependencies diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md index 5dcb120689..e525a838a1 100644 --- a/hack/PACKAGERS.md +++ b/hack/PACKAGERS.md @@ -239,6 +239,12 @@ installed and available at runtime: * iptables version 1.4 or later * XZ Utils version 4.9 or later +* a [properly + mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point + [is](https://github.com/dotcloud/docker/issues/2683) + [not](https://github.com/dotcloud/docker/issues/3485) + [sufficient](https://github.com/dotcloud/docker/issues/4568)) Additionally, the Docker client needs the following software to be installed and available at runtime: -- cgit v1.2.1 From 9a7be1b015a1ba79e5480d0ddddfa5954b994507 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 21 Mar 2014 14:53:47 +0000 Subject: Add cpuset.cpus to cgroups and native driver options Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/cgroups/cgroups.go | 32 +++++++++++++++++++--- runtime/execdriver/native/configuration/parse.go | 35 ++++++++++++++++++++---- 2 files changed, 57 insertions(+), 10 deletions(-) diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go index b40e1a31fa..9d485d1080 100644 --- a/pkg/cgroups/cgroups.go +++ b/pkg/cgroups/cgroups.go @@ -16,10 +16,11 @@ type Cgroup struct { Name string `json:"name,omitempty"` Parent string `json:"parent,omitempty"` - DeviceAccess bool `json:"device_access,omitempty"` // name of parent cgroup or slice - Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) - MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap - CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) + DeviceAccess bool `json:"device_access,omitempty"` // name of parent cgroup or slice + Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) + MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap + CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) + CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use } // https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt @@ -98,6 +99,7 @@ func (c *Cgroup) Cleanup(root string) error { get("memory"), get("devices"), get("cpu"), + get("cpuset"), } { os.RemoveAll(path) } @@ -150,6 +152,9 @@ func (c *Cgroup) Apply(pid int) error { if err := c.setupCpu(cgroupRoot, pid); err != nil { return err } + if err := c.setupCpuset(cgroupRoot, pid); err != nil { + return err + } return nil } @@ -248,3 +253,22 @@ func (c *Cgroup) setupCpu(cgroupRoot string, pid int) (err error) { } return nil } + +func (c *Cgroup) setupCpuset(cgroupRoot string, pid int) (err error) { + if c.CpusetCpus != "" { + dir, err := c.Join(cgroupRoot, "cpuset", pid) + if err != nil { + return err + } + defer func() { + if err != nil { + os.RemoveAll(dir) + } + }() + + if err := writeFile(dir, "cpuset.cpus", c.CpusetCpus); err != nil { + return err + } + } + return nil +} diff --git a/runtime/execdriver/native/configuration/parse.go b/runtime/execdriver/native/configuration/parse.go index 1733b94426..090cb29660 100644 --- a/runtime/execdriver/native/configuration/parse.go +++ b/runtime/execdriver/native/configuration/parse.go @@ -3,6 +3,7 @@ package configuration import ( "fmt" "github.com/dotcloud/docker/pkg/libcontainer" + "github.com/dotcloud/docker/utils" "os/exec" "path/filepath" "strconv" @@ -19,17 +20,40 @@ var actions = map[string]Action{ "ns.drop": dropNamespace, // drop a namespace when cloning "net.join": joinNetNamespace, // join another containers net namespace - // "net.veth.mac": vethMacAddress, // set the mac address for the veth "cgroups.cpu_shares": cpuShares, // set the cpu shares "cgroups.memory": memory, // set the memory limit "cgroups.memory_swap": memorySwap, // set the memory swap limit + "cgroups.cpuset.cpus": cpusetCpus, // set the cpus used "apparmor_profile": apparmorProfile, // set the apparmor profile to apply "fs.readonly": readonlyFs, // make the rootfs of the container read only } +// GetSupportedActions returns a list of all the avaliable actions supported by the driver +// TODO: this should return a description also +func GetSupportedActions() []string { + var ( + i int + out = make([]string, len(actions)) + ) + for k := range actions { + out[i] = k + i++ + } + return out +} + +func cpusetCpus(container *libcontainer.Container, context interface{}, value string) error { + if container.Cgroups == nil { + return fmt.Errorf("cannot set cgroups when they are disabled") + } + container.Cgroups.CpusetCpus = value + + return nil +} + func apparmorProfile(container *libcontainer.Container, context interface{}, value string) error { container.Context["apparmor_profile"] = value return nil @@ -39,7 +63,7 @@ func cpuShares(container *libcontainer.Container, context interface{}, value str if container.Cgroups == nil { return fmt.Errorf("cannot set cgroups when they are disabled") } - v, err := strconv.ParseInt(value, 0, 64) + v, err := strconv.ParseInt(value, 10, 0) if err != nil { return err } @@ -51,7 +75,8 @@ func memory(container *libcontainer.Container, context interface{}, value string if container.Cgroups == nil { return fmt.Errorf("cannot set cgroups when they are disabled") } - v, err := strconv.ParseInt(value, 0, 64) + + v, err := utils.RAMInBytes(value) if err != nil { return err } @@ -138,7 +163,6 @@ func joinNetNamespace(container *libcontainer.Container, context interface{}, va func vethMacAddress(container *libcontainer.Container, context interface{}, value string) error { var veth *libcontainer.Network - for _, network := range container.Networks { if network.Type == "veth" { veth = network @@ -155,8 +179,7 @@ func vethMacAddress(container *libcontainer.Container, context interface{}, valu // configureCustomOptions takes string commands from the user and allows modification of the // container's default configuration. // -// format: <...value> -// i.e: cgroup devices.allow *:* +// TODO: this can be moved to a general utils or parser in pkg func ParseConfiguration(container *libcontainer.Container, running map[string]*exec.Cmd, opts []string) error { for _, opt := range opts { kv := strings.SplitN(opt, "=", 2) -- cgit v1.2.1 From 10fdbc0467d1be6c7c731d3f35590d87ee42f96f Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 24 Mar 2014 07:16:40 +0000 Subject: Add unit test for lxc conf merge and native opts Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/container.go | 10 +- runtime/execdriver/lxc/lxc_template.go | 2 +- runtime/execdriver/native/configuration/parse.go | 14 -- .../execdriver/native/configuration/parse_test.go | 166 +++++++++++++++++++++ runtime/execdriver/native/create.go | 66 +++++--- runtime/execdriver/native/default_template.go | 44 ------ .../execdriver/native/template/default_template.go | 43 ++++++ runtime/utils.go | 20 +++ runtime/utils_test.go | 27 ++++ 9 files changed, 305 insertions(+), 87 deletions(-) create mode 100644 runtime/execdriver/native/configuration/parse_test.go delete mode 100644 runtime/execdriver/native/default_template.go create mode 100644 runtime/execdriver/native/template/default_template.go create mode 100644 runtime/utils_test.go diff --git a/runtime/container.go b/runtime/container.go index ee5045e374..be162c7a21 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -383,14 +383,8 @@ func populateCommand(c *Container) { } } - // merge in the lxc conf options into the generic config map - if lxcConf := c.hostConfig.LxcConf; lxcConf != nil { - lxc := driverConfig["lxc"] - for _, pair := range lxcConf { - lxc = append(lxc, fmt.Sprintf("%s = %s", pair.Key, pair.Value)) - } - driverConfig["lxc"] = lxc - } + // TODO: this can be removed after lxc-conf is fully deprecated + mergeLxcConfIntoOptions(c.hostConfig, driverConfig) resources := &execdriver.Resources{ Memory: c.Config.Memory, diff --git a/runtime/execdriver/lxc/lxc_template.go b/runtime/execdriver/lxc/lxc_template.go index 7979e4f284..c5ba876dce 100644 --- a/runtime/execdriver/lxc/lxc_template.go +++ b/runtime/execdriver/lxc/lxc_template.go @@ -120,7 +120,7 @@ lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} {{if .Config.lxc}} {{range $value := .Config.lxc}} -{{$value}} +lxc.{{$value}} {{end}} {{end}} ` diff --git a/runtime/execdriver/native/configuration/parse.go b/runtime/execdriver/native/configuration/parse.go index 090cb29660..6d6c643919 100644 --- a/runtime/execdriver/native/configuration/parse.go +++ b/runtime/execdriver/native/configuration/parse.go @@ -31,20 +31,6 @@ var actions = map[string]Action{ "fs.readonly": readonlyFs, // make the rootfs of the container read only } -// GetSupportedActions returns a list of all the avaliable actions supported by the driver -// TODO: this should return a description also -func GetSupportedActions() []string { - var ( - i int - out = make([]string, len(actions)) - ) - for k := range actions { - out[i] = k - i++ - } - return out -} - func cpusetCpus(container *libcontainer.Container, context interface{}, value string) error { if container.Cgroups == nil { return fmt.Errorf("cannot set cgroups when they are disabled") diff --git a/runtime/execdriver/native/configuration/parse_test.go b/runtime/execdriver/native/configuration/parse_test.go new file mode 100644 index 0000000000..8001358766 --- /dev/null +++ b/runtime/execdriver/native/configuration/parse_test.go @@ -0,0 +1,166 @@ +package configuration + +import ( + "github.com/dotcloud/docker/runtime/execdriver/native/template" + "testing" +) + +func TestSetReadonlyRootFs(t *testing.T) { + var ( + container = template.New() + opts = []string{ + "fs.readonly=true", + } + ) + + if container.ReadonlyFs { + t.Fatal("container should not have a readonly rootfs by default") + } + if err := ParseConfiguration(container, nil, opts); err != nil { + t.Fatal(err) + } + + if !container.ReadonlyFs { + t.Fatal("container should have a readonly rootfs") + } +} + +func TestConfigurationsDoNotConflict(t *testing.T) { + var ( + container1 = template.New() + container2 = template.New() + opts = []string{ + "cap.add=NET_ADMIN", + } + ) + + if err := ParseConfiguration(container1, nil, opts); err != nil { + t.Fatal(err) + } + + if !container1.CapabilitiesMask.Get("NET_ADMIN").Enabled { + t.Fatal("container one should have NET_ADMIN enabled") + } + if container2.CapabilitiesMask.Get("NET_ADMIN").Enabled { + t.Fatal("container two should not have NET_ADMIN enabled") + } +} + +func TestCpusetCpus(t *testing.T) { + var ( + container = template.New() + opts = []string{ + "cgroups.cpuset.cpus=1,2", + } + ) + if err := ParseConfiguration(container, nil, opts); err != nil { + t.Fatal(err) + } + + if expected := "1,2"; container.Cgroups.CpusetCpus != expected { + t.Fatalf("expected %s got %s for cpuset.cpus", expected, container.Cgroups.CpusetCpus) + } +} + +func TestAppArmorProfile(t *testing.T) { + var ( + container = template.New() + opts = []string{ + "apparmor_profile=koye-the-protector", + } + ) + if err := ParseConfiguration(container, nil, opts); err != nil { + t.Fatal(err) + } + if expected := "koye-the-protector"; container.Context["apparmor_profile"] != expected { + t.Fatalf("expected profile %s got %s", expected, container.Context["apparmor_profile"]) + } +} + +func TestCpuShares(t *testing.T) { + var ( + container = template.New() + opts = []string{ + "cgroups.cpu_shares=1048", + } + ) + if err := ParseConfiguration(container, nil, opts); err != nil { + t.Fatal(err) + } + + if expected := int64(1048); container.Cgroups.CpuShares != expected { + t.Fatalf("expected cpu shares %d got %d", expected, container.Cgroups.CpuShares) + } +} + +func TestCgroupMemory(t *testing.T) { + var ( + container = template.New() + opts = []string{ + "cgroups.memory=500m", + } + ) + if err := ParseConfiguration(container, nil, opts); err != nil { + t.Fatal(err) + } + + if expected := int64(500 * 1024 * 1024); container.Cgroups.Memory != expected { + t.Fatalf("expected memory %d got %d", expected, container.Cgroups.Memory) + } +} + +func TestAddCap(t *testing.T) { + var ( + container = template.New() + opts = []string{ + "cap.add=MKNOD", + "cap.add=SYS_ADMIN", + } + ) + if err := ParseConfiguration(container, nil, opts); err != nil { + t.Fatal(err) + } + + if !container.CapabilitiesMask.Get("MKNOD").Enabled { + t.Fatal("container should have MKNOD enabled") + } + if !container.CapabilitiesMask.Get("SYS_ADMIN").Enabled { + t.Fatal("container should have SYS_ADMIN enabled") + } +} + +func TestDropCap(t *testing.T) { + var ( + container = template.New() + opts = []string{ + "cap.drop=MKNOD", + } + ) + // enabled all caps like in privileged mode + for _, c := range container.CapabilitiesMask { + c.Enabled = true + } + if err := ParseConfiguration(container, nil, opts); err != nil { + t.Fatal(err) + } + + if container.CapabilitiesMask.Get("MKNOD").Enabled { + t.Fatal("container should not have MKNOD enabled") + } +} + +func TestDropNamespace(t *testing.T) { + var ( + container = template.New() + opts = []string{ + "ns.drop=NEWNET", + } + ) + if err := ParseConfiguration(container, nil, opts); err != nil { + t.Fatal(err) + } + + if container.Namespaces.Get("NEWNET").Enabled { + t.Fatal("container should not have NEWNET enabled") + } +} diff --git a/runtime/execdriver/native/create.go b/runtime/execdriver/native/create.go index 7118edc91e..7e663f0555 100644 --- a/runtime/execdriver/native/create.go +++ b/runtime/execdriver/native/create.go @@ -5,30 +5,53 @@ import ( "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/runtime/execdriver/native/configuration" + "github.com/dotcloud/docker/runtime/execdriver/native/template" "os" ) // createContainer populates and configures the container type with the // data provided by the execdriver.Command func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container, error) { - container := getDefaultTemplate() + container := template.New() container.Hostname = getEnv("HOSTNAME", c.Env) container.Tty = c.Tty container.User = c.User container.WorkingDir = c.WorkingDir container.Env = c.Env + container.Cgroups.Name = c.ID + // check to see if we are running in ramdisk to disable pivot root + container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" - loopbackNetwork := libcontainer.Network{ - Mtu: c.Network.Mtu, - Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0), - Gateway: "localhost", - Type: "loopback", - Context: libcontainer.Context{}, + if err := d.createNetwork(container, c); err != nil { + return nil, err } + if c.Privileged { + if err := d.setPrivileged(container); err != nil { + return nil, err + } + } + if err := d.setupCgroups(container, c); err != nil { + return nil, err + } + if err := d.setupMounts(container, c); err != nil { + return nil, err + } + if err := configuration.ParseConfiguration(container, d.activeContainers, c.Config["native"]); err != nil { + return nil, err + } + return container, nil +} +func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.Command) error { container.Networks = []*libcontainer.Network{ - &loopbackNetwork, + { + Mtu: c.Network.Mtu, + Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0), + Gateway: "localhost", + Type: "loopback", + Context: libcontainer.Context{}, + }, } if c.Network.Interface != nil { @@ -44,27 +67,30 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container } container.Networks = append(container.Networks, &vethNetwork) } + return nil +} - container.Cgroups.Name = c.ID - if c.Privileged { - container.CapabilitiesMask = nil - container.Cgroups.DeviceAccess = true - container.Context["apparmor_profile"] = "unconfined" +func (d *driver) setPrivileged(container *libcontainer.Container) error { + for _, c := range container.CapabilitiesMask { + c.Enabled = true } + container.Cgroups.DeviceAccess = true + container.Context["apparmor_profile"] = "unconfined" + return nil +} + +func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.Command) error { if c.Resources != nil { container.Cgroups.CpuShares = c.Resources.CpuShares container.Cgroups.Memory = c.Resources.Memory container.Cgroups.MemorySwap = c.Resources.MemorySwap } - // check to see if we are running in ramdisk to disable pivot root - container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" + return nil +} +func (d *driver) setupMounts(container *libcontainer.Container, c *execdriver.Command) error { for _, m := range c.Mounts { container.Mounts = append(container.Mounts, libcontainer.Mount{m.Source, m.Destination, m.Writable, m.Private}) } - - if err := configuration.ParseConfiguration(container, d.activeContainers, c.Config["native"]); err != nil { - return nil, err - } - return container, nil + return nil } diff --git a/runtime/execdriver/native/default_template.go b/runtime/execdriver/native/default_template.go deleted file mode 100644 index 0dcd7db356..0000000000 --- a/runtime/execdriver/native/default_template.go +++ /dev/null @@ -1,44 +0,0 @@ -package native - -import ( - "github.com/dotcloud/docker/pkg/cgroups" - "github.com/dotcloud/docker/pkg/libcontainer" -) - -// getDefaultTemplate returns the docker default for -// the libcontainer configuration file -func getDefaultTemplate() *libcontainer.Container { - return &libcontainer.Container{ - CapabilitiesMask: libcontainer.Capabilities{ - libcontainer.GetCapability("SETPCAP"), - libcontainer.GetCapability("SYS_MODULE"), - libcontainer.GetCapability("SYS_RAWIO"), - libcontainer.GetCapability("SYS_PACCT"), - libcontainer.GetCapability("SYS_ADMIN"), - libcontainer.GetCapability("SYS_NICE"), - libcontainer.GetCapability("SYS_RESOURCE"), - libcontainer.GetCapability("SYS_TIME"), - libcontainer.GetCapability("SYS_TTY_CONFIG"), - libcontainer.GetCapability("MKNOD"), - libcontainer.GetCapability("AUDIT_WRITE"), - libcontainer.GetCapability("AUDIT_CONTROL"), - libcontainer.GetCapability("MAC_OVERRIDE"), - libcontainer.GetCapability("MAC_ADMIN"), - libcontainer.GetCapability("NET_ADMIN"), - }, - Namespaces: libcontainer.Namespaces{ - libcontainer.GetNamespace("NEWNS"), - libcontainer.GetNamespace("NEWUTS"), - libcontainer.GetNamespace("NEWIPC"), - libcontainer.GetNamespace("NEWPID"), - libcontainer.GetNamespace("NEWNET"), - }, - Cgroups: &cgroups.Cgroup{ - Parent: "docker", - DeviceAccess: false, - }, - Context: libcontainer.Context{ - "apparmor_profile": "docker-default", - }, - } -} diff --git a/runtime/execdriver/native/template/default_template.go b/runtime/execdriver/native/template/default_template.go new file mode 100644 index 0000000000..b9eb87713e --- /dev/null +++ b/runtime/execdriver/native/template/default_template.go @@ -0,0 +1,43 @@ +package template + +import ( + "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/libcontainer" +) + +// New returns the docker default configuration for libcontainer +func New() *libcontainer.Container { + return &libcontainer.Container{ + CapabilitiesMask: libcontainer.Capabilities{ + libcontainer.GetCapability("SETPCAP"), + libcontainer.GetCapability("SYS_MODULE"), + libcontainer.GetCapability("SYS_RAWIO"), + libcontainer.GetCapability("SYS_PACCT"), + libcontainer.GetCapability("SYS_ADMIN"), + libcontainer.GetCapability("SYS_NICE"), + libcontainer.GetCapability("SYS_RESOURCE"), + libcontainer.GetCapability("SYS_TIME"), + libcontainer.GetCapability("SYS_TTY_CONFIG"), + libcontainer.GetCapability("MKNOD"), + libcontainer.GetCapability("AUDIT_WRITE"), + libcontainer.GetCapability("AUDIT_CONTROL"), + libcontainer.GetCapability("MAC_OVERRIDE"), + libcontainer.GetCapability("MAC_ADMIN"), + libcontainer.GetCapability("NET_ADMIN"), + }, + Namespaces: libcontainer.Namespaces{ + libcontainer.GetNamespace("NEWNS"), + libcontainer.GetNamespace("NEWUTS"), + libcontainer.GetNamespace("NEWIPC"), + libcontainer.GetNamespace("NEWPID"), + libcontainer.GetNamespace("NEWNET"), + }, + Cgroups: &cgroups.Cgroup{ + Parent: "docker", + DeviceAccess: false, + }, + Context: libcontainer.Context{ + "apparmor_profile": "docker-default", + }, + } +} diff --git a/runtime/utils.go b/runtime/utils.go index b343b5b10e..b983e67d41 100644 --- a/runtime/utils.go +++ b/runtime/utils.go @@ -1,9 +1,11 @@ package runtime import ( + "fmt" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/pkg/namesgenerator" "github.com/dotcloud/docker/runconfig" + "strings" ) func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error { @@ -30,6 +32,24 @@ func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostCon return nil } +func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig, driverConfig map[string][]string) { + if hostConfig == nil { + return + } + + // merge in the lxc conf options into the generic config map + if lxcConf := hostConfig.LxcConf; lxcConf != nil { + lxc := driverConfig["lxc"] + for _, pair := range lxcConf { + // because lxc conf gets the driver name lxc.XXXX we need to trim it off + // and let the lxc driver add it back later if needed + parts := strings.SplitN(pair.Key, ".", 2) + lxc = append(lxc, fmt.Sprintf("%s=%s", parts[1], pair.Value)) + } + driverConfig["lxc"] = lxc + } +} + type checker struct { runtime *Runtime } diff --git a/runtime/utils_test.go b/runtime/utils_test.go new file mode 100644 index 0000000000..81c745c0d5 --- /dev/null +++ b/runtime/utils_test.go @@ -0,0 +1,27 @@ +package runtime + +import ( + "github.com/dotcloud/docker/runconfig" + "testing" +) + +func TestMergeLxcConfig(t *testing.T) { + var ( + hostConfig = &runconfig.HostConfig{ + LxcConf: []runconfig.KeyValuePair{ + {Key: "lxc.cgroups.cpuset", Value: "1,2"}, + }, + } + driverConfig = make(map[string][]string) + ) + + mergeLxcConfIntoOptions(hostConfig, driverConfig) + if l := len(driverConfig["lxc"]); l > 1 { + t.Fatalf("expected lxc options len of 1 got %d", l) + } + + cpuset := driverConfig["lxc"][0] + if expected := "cgroups.cpuset=1,2"; cpuset != expected { + t.Fatalf("expected %s got %s", expected, cpuset) + } +} -- cgit v1.2.1 From f1bd79ec97c125148c690d66ebd3ac5ab3f388b2 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 24 Mar 2014 12:03:41 +0000 Subject: Revert "fix failing test to use kill instead of stop" This reverts commit 4434dcee89f7d0d0239f6b492b24e940cdbafb21. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- integration/server_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/integration/server_test.go b/integration/server_test.go index 617f81fa4d..a401f1306e 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -416,7 +416,7 @@ func TestRestartKillWait(t *testing.T) { }) } -func TestCreateStartRestartKillStartKillRm(t *testing.T) { +func TestCreateStartRestartStopStartKillRm(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) defer mkRuntimeFromEngine(eng, t).Nuke() @@ -456,7 +456,8 @@ func TestCreateStartRestartKillStartKillRm(t *testing.T) { t.Fatal(err) } - job = eng.Job("kill", id) + job = eng.Job("stop", id) + job.SetenvInt("t", 15) if err := job.Run(); err != nil { t.Fatal(err) } -- cgit v1.2.1 From d503714285143013d9fa6932ee5775fd155d26d2 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 24 Mar 2014 12:03:56 +0000 Subject: Revert "Disable automatic killing of containers when docker stop fails" This reverts commit 8b5cf51d600dc4f3611cf063c52cf3448e7b01e5. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- api/client.go | 6 +++--- docs/sources/reference/api/docker_remote_api_v1.9.rst | 4 ++-- docs/sources/reference/commandline/cli.rst | 6 +++--- runtime/container.go | 12 ++++++++++-- 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/api/client.go b/api/client.go index ae9e7cef19..b39b102330 100644 --- a/api/client.go +++ b/api/client.go @@ -498,8 +498,8 @@ func (cli *DockerCli) CmdInfo(args ...string) error { } func (cli *DockerCli) CmdStop(args ...string) error { - cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM)") - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop.") + cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.") if err := cmd.Parse(args); err != nil { return nil } @@ -526,7 +526,7 @@ func (cli *DockerCli) CmdStop(args ...string) error { func (cli *DockerCli) CmdRestart(args ...string) error { cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container") - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop. Default=10") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10") if err := cmd.Parse(args); err != nil { return nil } diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.rst b/docs/sources/reference/api/docker_remote_api_v1.9.rst index def38edd55..27812457bb 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.rst +++ b/docs/sources/reference/api/docker_remote_api_v1.9.rst @@ -432,7 +432,7 @@ Stop a container HTTP/1.1 204 OK - :query t: number of seconds to wait for the container to stop + :query t: number of seconds to wait before killing the container :statuscode 204: no error :statuscode 404: no such container :statuscode 500: server error @@ -457,7 +457,7 @@ Restart a container HTTP/1.1 204 OK - :query t: number of seconds to wait for the container to stop + :query t: number of seconds to wait before killing the container :statuscode 204: no error :statuscode 404: no such container :statuscode 500: server error diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index d398b16e53..757f3b239b 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -1360,11 +1360,11 @@ This example shows 5 containers that might be set up to test a web application c Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...] - Stop a running container (Send SIGTERM) + Stop a running container (Send SIGTERM, and then SIGKILL after grace period) - -t, --time=10: Number of seconds to wait for the container to stop. + -t, --time=10: Number of seconds to wait for the container to stop before killing it. -The main process inside the container will receive SIGTERM. +The main process inside the container will receive SIGTERM, and after a grace period, SIGKILL .. _cli_tag: diff --git a/runtime/container.go b/runtime/container.go index 6194a19c8c..bff9aea968 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -903,12 +903,20 @@ func (container *Container) Stop(seconds int) error { // 1. Send a SIGTERM if err := container.KillSig(15); err != nil { - return err + utils.Debugf("Error sending kill SIGTERM: %s", err) + log.Print("Failed to send SIGTERM to the process, force killing") + if err := container.KillSig(9); err != nil { + return err + } } // 2. Wait for the process to exit on its own if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil { - return err + log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) + // 3. If it doesn't, then send SIGKILL + if err := container.Kill(); err != nil { + return err + } } return nil } -- cgit v1.2.1 From f41135bc11b9a4da896e5054a73afa112b2e835f Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 24 Mar 2014 12:39:56 +0000 Subject: As far as I know this code is not used or maintained Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- hack/infrastructure/docker-ci/Dockerfile | 29 ---- hack/infrastructure/docker-ci/MAINTAINERS | 1 - hack/infrastructure/docker-ci/README.rst | 65 -------- hack/infrastructure/docker-ci/VERSION | 1 - hack/infrastructure/docker-ci/buildbot/github.py | 176 --------------------- hack/infrastructure/docker-ci/buildbot/master.cfg | 161 ------------------- .../docker-ci/dcr/prod/docker-ci.yml | 22 --- .../infrastructure/docker-ci/dcr/prod/settings.yml | 5 - .../docker-ci/dcr/stage/docker-ci.yml | 22 --- .../docker-ci/dcr/stage/settings.yml | 5 - .../docker-ci/docker-coverage/gocoverage.sh | 52 ------ hack/infrastructure/docker-ci/dockertest/docker | 1 - .../docker-ci/dockertest/docker-registry | 1 - .../docker-ci/dockertest/nightlyrelease | 13 -- hack/infrastructure/docker-ci/dockertest/project | 8 - .../docker-ci/functionaltests/test_index.py | 61 ------- .../docker-ci/functionaltests/test_registry.sh | 27 ---- hack/infrastructure/docker-ci/nginx/nginx.conf | 12 -- hack/infrastructure/docker-ci/report/Dockerfile | 28 ---- hack/infrastructure/docker-ci/report/deployment.py | 130 --------------- hack/infrastructure/docker-ci/report/report.py | 145 ----------------- hack/infrastructure/docker-ci/setup.sh | 54 ------- .../docker-ci/testbuilder/Dockerfile | 12 -- .../docker-ci/testbuilder/docker-registry.sh | 12 -- .../infrastructure/docker-ci/testbuilder/docker.sh | 18 --- .../docker-ci/testbuilder/testbuilder.sh | 40 ----- hack/infrastructure/docker-ci/tool/backup.py | 47 ------ 27 files changed, 1148 deletions(-) delete mode 100644 hack/infrastructure/docker-ci/Dockerfile delete mode 100644 hack/infrastructure/docker-ci/MAINTAINERS delete mode 100644 hack/infrastructure/docker-ci/README.rst delete mode 100644 hack/infrastructure/docker-ci/VERSION delete mode 100644 hack/infrastructure/docker-ci/buildbot/github.py delete mode 100644 hack/infrastructure/docker-ci/buildbot/master.cfg delete mode 100644 hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml delete mode 100644 hack/infrastructure/docker-ci/dcr/prod/settings.yml delete mode 100644 hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml delete mode 100644 hack/infrastructure/docker-ci/dcr/stage/settings.yml delete mode 100755 hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh delete mode 120000 hack/infrastructure/docker-ci/dockertest/docker delete mode 120000 hack/infrastructure/docker-ci/dockertest/docker-registry delete mode 100755 hack/infrastructure/docker-ci/dockertest/nightlyrelease delete mode 100755 hack/infrastructure/docker-ci/dockertest/project delete mode 100755 hack/infrastructure/docker-ci/functionaltests/test_index.py delete mode 100755 hack/infrastructure/docker-ci/functionaltests/test_registry.sh delete mode 100644 hack/infrastructure/docker-ci/nginx/nginx.conf delete mode 100644 hack/infrastructure/docker-ci/report/Dockerfile delete mode 100755 hack/infrastructure/docker-ci/report/deployment.py delete mode 100755 hack/infrastructure/docker-ci/report/report.py delete mode 100755 hack/infrastructure/docker-ci/setup.sh delete mode 100644 hack/infrastructure/docker-ci/testbuilder/Dockerfile delete mode 100755 hack/infrastructure/docker-ci/testbuilder/docker-registry.sh delete mode 100755 hack/infrastructure/docker-ci/testbuilder/docker.sh delete mode 100755 hack/infrastructure/docker-ci/testbuilder/testbuilder.sh delete mode 100755 hack/infrastructure/docker-ci/tool/backup.py diff --git a/hack/infrastructure/docker-ci/Dockerfile b/hack/infrastructure/docker-ci/Dockerfile deleted file mode 100644 index 5c6eec9663..0000000000 --- a/hack/infrastructure/docker-ci/Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -# DOCKER-VERSION: 0.7.6 -# AUTHOR: Daniel Mizyrycki -# DESCRIPTION: docker-ci continuous integration service -# TO_BUILD: docker build -t docker-ci/docker-ci . -# TO_RUN: docker run --rm -i -t -p 8000:80 -p 2222:22 -v /run:/var/socket \ -# -v /data/docker-ci:/data/docker-ci docker-ci/docker-ci - -from ubuntu:12.04 -maintainer Daniel Mizyrycki - -ENV DEBIAN_FRONTEND noninteractive -RUN echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > \ - /etc/apt/sources.list; apt-get update -RUN apt-get install -y --no-install-recommends python2.7 python-dev \ - libevent-dev git supervisor ssh rsync less vim sudo gcc wget nginx -RUN cd /tmp; wget http://python-distribute.org/distribute_setup.py -RUN cd /tmp; python distribute_setup.py; easy_install pip; rm distribute_setup.py - -RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 -RUN echo 'deb http://get.docker.io/ubuntu docker main' > \ - /etc/apt/sources.list.d/docker.list; apt-get update -RUN apt-get install -y lxc-docker-0.8.0 -RUN pip install SQLAlchemy==0.7.10 buildbot buildbot-slave pyopenssl boto -RUN ln -s /var/socket/docker.sock /run/docker.sock - -ADD . /docker-ci -RUN /docker-ci/setup.sh - -ENTRYPOINT ["supervisord", "-n"] diff --git a/hack/infrastructure/docker-ci/MAINTAINERS b/hack/infrastructure/docker-ci/MAINTAINERS deleted file mode 100644 index 5dfc881420..0000000000 --- a/hack/infrastructure/docker-ci/MAINTAINERS +++ /dev/null @@ -1 +0,0 @@ -Daniel Mizyrycki (@mzdaniel) diff --git a/hack/infrastructure/docker-ci/README.rst b/hack/infrastructure/docker-ci/README.rst deleted file mode 100644 index 07c1ffcec0..0000000000 --- a/hack/infrastructure/docker-ci/README.rst +++ /dev/null @@ -1,65 +0,0 @@ -========= -docker-ci -========= - -This directory contains docker-ci continuous integration system. -As expected, it is a fully dockerized and deployed using -docker-container-runner. -docker-ci is based on Buildbot, a continuous integration system designed -to automate the build/test cycle. By automatically rebuilding and testing -the tree each time something has changed, build problems are pinpointed -quickly, before other developers are inconvenienced by the failure. -We are running buildbot at Rackspace to verify docker and docker-registry -pass tests, and check for coverage code details. - -docker-ci instance is at https://docker-ci.docker.io/waterfall - -Inside docker-ci container we have the following directory structure: - -/docker-ci source code of docker-ci -/data/backup/docker-ci/ daily backup (replicated over S3) -/data/docker-ci/coverage/{docker,docker-registry}/ mapped to host volumes -/data/buildbot/{master,slave}/ main docker-ci buildbot config and database -/var/socket/{docker.sock} host volume access to docker socket - - -Production deployment -===================== - -:: - - # Clone docker-ci repository - git clone https://github.com/dotcloud/docker - cd docker/hack/infrastructure/docker-ci - - export DOCKER_PROD=[PRODUCTION_SERVER_IP] - - # Create data host volume. (only once) - docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ - mkdir -p /data/docker-ci/coverage/docker - docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ - mkdir -p /data/docker-ci/coverage/docker-registry - docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \ - chown -R 1000.1000 /data/docker-ci - - # dcr deployment. Define credentials and special environment dcr variables - # ( retrieved at /hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml ) - export WEB_USER=[DOCKER-CI-WEBSITE-USERNAME] - export WEB_IRC_PWD=[DOCKER-CI-WEBSITE-PASSWORD] - export BUILDBOT_PWD=[BUILDSLAVE_PASSWORD] - export AWS_ACCESS_KEY=[DOCKER_RELEASE_S3_ACCESS] - export AWS_SECRET_KEY=[DOCKER_RELEASE_S3_SECRET] - export GPG_PASSPHRASE=[DOCKER_RELEASE_PASSPHRASE] - export BACKUP_AWS_ID=[S3_BUCKET_CREDENTIAL_ACCESS] - export BACKUP_AWS_SECRET=[S3_BUCKET_CREDENTIAL_SECRET] - export SMTP_USER=[MAILGUN_SMTP_USERNAME] - export SMTP_PWD=[MAILGUN_SMTP_PASSWORD] - export EMAIL_RCP=[EMAIL_FOR_BUILD_ERRORS] - - # Build docker-ci and testbuilder docker images - docker -H $DOCKER_PROD build -t docker-ci/docker-ci . - (cd testbuilder; docker -H $DOCKER_PROD build --rm -t docker-ci/testbuilder .) - - # Run docker-ci container ( assuming no previous container running ) - (cd dcr/prod; dcr docker-ci.yml start) - (cd dcr/prod; dcr docker-ci.yml register docker-ci.docker.io) diff --git a/hack/infrastructure/docker-ci/VERSION b/hack/infrastructure/docker-ci/VERSION deleted file mode 100644 index b49b25336d..0000000000 --- a/hack/infrastructure/docker-ci/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.5.6 diff --git a/hack/infrastructure/docker-ci/buildbot/github.py b/hack/infrastructure/docker-ci/buildbot/github.py deleted file mode 100644 index 5316e13282..0000000000 --- a/hack/infrastructure/docker-ci/buildbot/github.py +++ /dev/null @@ -1,176 +0,0 @@ -# This file is part of Buildbot. Buildbot is free software: you can -# redistribute it and/or modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation, version 2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 51 -# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Copyright Buildbot Team Members - -#!/usr/bin/env python -""" -github_buildbot.py is based on git_buildbot.py - -github_buildbot.py will determine the repository information from the JSON -HTTP POST it receives from github.com and build the appropriate repository. -If your github repository is private, you must add a ssh key to the github -repository for the user who initiated the build on the buildslave. - -""" - -import re -import datetime -from twisted.python import log -import calendar - -try: - import json - assert json -except ImportError: - import simplejson as json - -# python is silly about how it handles timezones -class fixedOffset(datetime.tzinfo): - """ - fixed offset timezone - """ - def __init__(self, minutes, hours, offsetSign = 1): - self.minutes = int(minutes) * offsetSign - self.hours = int(hours) * offsetSign - self.offset = datetime.timedelta(minutes = self.minutes, - hours = self.hours) - - def utcoffset(self, dt): - return self.offset - - def dst(self, dt): - return datetime.timedelta(0) - -def convertTime(myTestTimestamp): - #"1970-01-01T00:00:00+00:00" - # Normalize myTestTimestamp - if myTestTimestamp[-1] == 'Z': - myTestTimestamp = myTestTimestamp[:-1] + '-00:00' - matcher = re.compile(r'(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)([-+])(\d\d):(\d\d)') - result = matcher.match(myTestTimestamp) - (year, month, day, hour, minute, second, offsetsign, houroffset, minoffset) = \ - result.groups() - if offsetsign == '+': - offsetsign = 1 - else: - offsetsign = -1 - - offsetTimezone = fixedOffset( minoffset, houroffset, offsetsign ) - myDatetime = datetime.datetime( int(year), - int(month), - int(day), - int(hour), - int(minute), - int(second), - 0, - offsetTimezone) - return calendar.timegm( myDatetime.utctimetuple() ) - -def getChanges(request, options = None): - """ - Reponds only to POST events and starts the build process - - :arguments: - request - the http request object - """ - payload = json.loads(request.args['payload'][0]) - import urllib,datetime - fname = str(datetime.datetime.now()).replace(' ','_').replace(':','-')[:19] - # Github event debug - # open('github_{0}.json'.format(fname),'w').write(json.dumps(json.loads(urllib.unquote(request.args['payload'][0])), sort_keys = True, indent = 2)) - - if 'pull_request' in payload: - user = payload['pull_request']['user']['login'] - repo = payload['pull_request']['head']['repo']['name'] - repo_url = payload['pull_request']['head']['repo']['html_url'] - else: - user = payload['repository']['owner']['name'] - repo = payload['repository']['name'] - repo_url = payload['repository']['url'] - project = request.args.get('project', None) - if project: - project = project[0] - elif project is None: - project = '' - # This field is unused: - #private = payload['repository']['private'] - changes = process_change(payload, user, repo, repo_url, project) - log.msg("Received %s changes from github" % len(changes)) - return (changes, 'git') - -def process_change(payload, user, repo, repo_url, project): - """ - Consumes the JSON as a python object and actually starts the build. - - :arguments: - payload - Python Object that represents the JSON sent by GitHub Service - Hook. - """ - changes = [] - - newrev = payload['after'] if 'after' in payload else payload['pull_request']['head']['sha'] - refname = payload['ref'] if 'ref' in payload else payload['pull_request']['head']['ref'] - - # We only care about regular heads, i.e. branches - match = re.match(r"^(refs\/heads\/|)([^/]+)$", refname) - if not match: - log.msg("Ignoring refname `%s': Not a branch" % refname) - return [] - - branch = match.groups()[1] - if re.match(r"^0*$", newrev): - log.msg("Branch `%s' deleted, ignoring" % branch) - return [] - else: - if 'pull_request' in payload: - if payload['action'] == 'closed': - log.msg("PR#{} closed, ignoring".format(payload['number'])) - return [] - changes = [{ - 'category' : 'github_pullrequest', - 'who' : '{0} - PR#{1}'.format(user,payload['number']), - 'files' : [], - 'comments' : payload['pull_request']['title'], - 'revision' : newrev, - 'when' : convertTime(payload['pull_request']['updated_at']), - 'branch' : branch, - 'revlink' : '{0}/commit/{1}'.format(repo_url,newrev), - 'repository' : repo_url, - 'project' : project }] - return changes - for commit in payload['commits']: - files = [] - if 'added' in commit: - files.extend(commit['added']) - if 'modified' in commit: - files.extend(commit['modified']) - if 'removed' in commit: - files.extend(commit['removed']) - when = convertTime( commit['timestamp']) - log.msg("New revision: %s" % commit['id'][:8]) - chdict = dict( - who = commit['author']['name'] - + " <" + commit['author']['email'] + ">", - files = files, - comments = commit['message'], - revision = commit['id'], - when = when, - branch = branch, - revlink = commit['url'], - repository = repo_url, - project = project) - changes.append(chdict) - return changes diff --git a/hack/infrastructure/docker-ci/buildbot/master.cfg b/hack/infrastructure/docker-ci/buildbot/master.cfg deleted file mode 100644 index 75605da8ab..0000000000 --- a/hack/infrastructure/docker-ci/buildbot/master.cfg +++ /dev/null @@ -1,161 +0,0 @@ -import os, re -from buildbot.buildslave import BuildSlave -from buildbot.schedulers.forcesched import ForceScheduler -from buildbot.schedulers.basic import SingleBranchScheduler -from buildbot.schedulers.timed import Nightly -from buildbot.changes import filter -from buildbot.config import BuilderConfig -from buildbot.process.factory import BuildFactory -from buildbot.process.properties import Property -from buildbot.steps.shell import ShellCommand -from buildbot.status import html, words -from buildbot.status.web import authz, auth -from buildbot.status.mail import MailNotifier - - -def ENV(x): - '''Promote an environment variable for global use returning its value''' - retval = os.environ.get(x, '') - globals()[x] = retval - return retval - - -class TestCommand(ShellCommand): - '''Extend ShellCommand with optional summary logs''' - def __init__(self, *args, **kwargs): - super(TestCommand, self).__init__(*args, **kwargs) - - def createSummary(self, log): - exit_status = re.sub(r'.+\n\+ exit (\d+).+', - r'\1', log.getText()[-100:], flags=re.DOTALL) - if exit_status != '0': - return - # Infer coverage path from log - if '+ COVERAGE_PATH' in log.getText(): - path = re.sub(r'.+\+ COVERAGE_PATH=((.+?)-\d+).+', - r'\2/\1', log.getText(), flags=re.DOTALL) - url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path) - self.addURL('coverage', url) - elif 'COVERAGE_FILE' in log.getText(): - path = re.sub(r'.+\+ COVERAGE_FILE=((.+?)-\d+).+', - r'\2/\1', log.getText(), flags=re.DOTALL) - url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path) - self.addURL('coverage', url) - - -PORT_WEB = 8000 # Buildbot webserver port -PORT_GITHUB = 8011 # Buildbot github hook port -PORT_MASTER = 9989 # Port where buildbot master listen buildworkers - -BUILDBOT_URL = '//localhost:{}/'.format(PORT_WEB) -DOCKER_REPO = 'https://github.com/docker-test/docker' -DOCKER_TEST_ARGV = 'HEAD {}'.format(DOCKER_REPO) -REGISTRY_REPO = 'https://github.com/docker-test/docker-registry' -REGISTRY_TEST_ARGV = 'HEAD {}'.format(REGISTRY_REPO) -if ENV('DEPLOYMENT') == 'staging': - BUILDBOT_URL = "//docker-ci-stage.docker.io/" -if ENV('DEPLOYMENT') == 'production': - BUILDBOT_URL = '//docker-ci.docker.io/' - DOCKER_REPO = 'https://github.com/dotcloud/docker' - DOCKER_TEST_ARGV = '' - REGISTRY_REPO = 'https://github.com/dotcloud/docker-registry' - REGISTRY_TEST_ARGV = '' - -# Credentials set by setup.sh from deployment.py -ENV('WEB_USER') -ENV('WEB_IRC_PWD') -ENV('BUILDBOT_PWD') -ENV('SMTP_USER') -ENV('SMTP_PWD') -ENV('EMAIL_RCP') -ENV('IRC_CHANNEL') - - -c = BuildmasterConfig = {} - -c['title'] = "docker-ci" -c['titleURL'] = "waterfall" -c['buildbotURL'] = BUILDBOT_URL -c['db'] = {'db_url':"sqlite:///state.sqlite"} -c['slaves'] = [BuildSlave('buildworker', BUILDBOT_PWD)] -c['slavePortnum'] = PORT_MASTER - - -# Schedulers -c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[ - 'docker', 'docker-registry', 'nightlyrelease', 'backup'])] -c['schedulers'] += [SingleBranchScheduler(name="docker", treeStableTimer=None, - change_filter=filter.ChangeFilter(branch='master', - repository=DOCKER_REPO), builderNames=['docker'])] -c['schedulers'] += [SingleBranchScheduler(name="registry", treeStableTimer=None, - change_filter=filter.ChangeFilter(branch='master', - repository=REGISTRY_REPO), builderNames=['docker-registry'])] -c['schedulers'] += [SingleBranchScheduler(name='docker-pr', treeStableTimer=None, - change_filter=filter.ChangeFilter(category='github_pullrequest', - project='docker'), builderNames=['docker-pr'])] -c['schedulers'] += [SingleBranchScheduler(name='docker-registry-pr', treeStableTimer=None, - change_filter=filter.ChangeFilter(category='github_pullrequest', - project='docker-registry'), builderNames=['docker-registry-pr'])] -c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=[ - 'nightlyrelease', 'backup'], hour=7, minute=00)] - - -# Builders - -# Backup -factory = BuildFactory() -factory.addStep(TestCommand(description='backup', logEnviron=False, - usePTY=True, command='/docker-ci/tool/backup.py')) -c['builders'] = [BuilderConfig(name='backup',slavenames=['buildworker'], - factory=factory)] - -# Docker test -factory = BuildFactory() -factory.addStep(TestCommand(description='docker', logEnviron=False, - usePTY=True, command='/docker-ci/dockertest/docker {}'.format(DOCKER_TEST_ARGV))) -c['builders'] += [BuilderConfig(name='docker',slavenames=['buildworker'], - factory=factory)] - -# Docker pull request test -factory = BuildFactory() -factory.addStep(TestCommand(description='docker-pr', logEnviron=False, - usePTY=True, command=['/docker-ci/dockertest/docker', - Property('revision'), Property('repository'), Property('branch')])) -c['builders'] += [BuilderConfig(name='docker-pr',slavenames=['buildworker'], - factory=factory)] - -# docker-registry test -factory = BuildFactory() -factory.addStep(TestCommand(description='docker-registry', logEnviron=False, - usePTY=True, command='/docker-ci/dockertest/docker-registry {}'.format(REGISTRY_TEST_ARGV))) -c['builders'] += [BuilderConfig(name='docker-registry',slavenames=['buildworker'], - factory=factory)] - -# Docker registry pull request test -factory = BuildFactory() -factory.addStep(TestCommand(description='docker-registry-pr', logEnviron=False, - usePTY=True, command=['/docker-ci/dockertest/docker-registry', - Property('revision'), Property('repository'), Property('branch')])) -c['builders'] += [BuilderConfig(name='docker-registry-pr',slavenames=['buildworker'], - factory=factory)] - -# Docker nightly release -factory = BuildFactory() -factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False, - usePTY=True, command=['/docker-ci/dockertest/nightlyrelease'])) -c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'], - factory=factory)] - -# Status -authz_cfg = authz.Authz(auth=auth.BasicAuth([(WEB_USER, WEB_IRC_PWD)]), - forceBuild='auth') -c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)] -c['status'].append(html.WebStatus(http_port=PORT_GITHUB, allowForce=True, - change_hook_dialects={ 'github': True })) -c['status'].append(MailNotifier(fromaddr='docker-test@docker.io', - sendToInterestedUsers=False, extraRecipients=[EMAIL_RCP], - mode='failing', relayhost='smtp.mailgun.org', smtpPort=587, useTls=True, - smtpUser=SMTP_USER, smtpPassword=SMTP_PWD)) -c['status'].append(words.IRC("irc.freenode.net", "dockerqabot", - channels=[IRC_CHANNEL], password=WEB_IRC_PWD, allowForce=True, - notify_events={'exception':1, 'successToFailure':1, 'failureToSuccess':1})) diff --git a/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml b/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml deleted file mode 100644 index 523535446a..0000000000 --- a/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml +++ /dev/null @@ -1,22 +0,0 @@ -docker-ci: - image: "docker-ci/docker-ci" - release_name: "docker-ci-0.5.6" - ports: ["80","2222:22","8011:8011"] - register: "80" - volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"] - command: [] - env: - - "DEPLOYMENT=production" - - "IRC_CHANNEL=docker-testing" - - "BACKUP_BUCKET=backup-ci" - - "$WEB_USER" - - "$WEB_IRC_PWD" - - "$BUILDBOT_PWD" - - "$AWS_ACCESS_KEY" - - "$AWS_SECRET_KEY" - - "$GPG_PASSPHRASE" - - "$BACKUP_AWS_ID" - - "$BACKUP_AWS_SECRET" - - "$SMTP_USER" - - "$SMTP_PWD" - - "$EMAIL_RCP" diff --git a/hack/infrastructure/docker-ci/dcr/prod/settings.yml b/hack/infrastructure/docker-ci/dcr/prod/settings.yml deleted file mode 100644 index 9831afa6dd..0000000000 --- a/hack/infrastructure/docker-ci/dcr/prod/settings.yml +++ /dev/null @@ -1,5 +0,0 @@ -default: - hipaches: ['192.168.100.67:6379'] - daemons: ['192.168.100.67:4243'] - use_ssh: False - diff --git a/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml b/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml deleted file mode 100644 index 8eba84825c..0000000000 --- a/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml +++ /dev/null @@ -1,22 +0,0 @@ -docker-ci: - image: "docker-ci/docker-ci" - release_name: "docker-ci-stage" - ports: ["80","2222:22","8011:8011"] - register: "80" - volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"] - command: [] - env: - - "DEPLOYMENT=staging" - - "IRC_CHANNEL=docker-testing-staging" - - "BACKUP_BUCKET=ci-backup-stage" - - "$BACKUP_AWS_ID" - - "$BACKUP_AWS_SECRET" - - "$WEB_USER" - - "$WEB_IRC_PWD" - - "$BUILDBOT_PWD" - - "$AWS_ACCESS_KEY" - - "$AWS_SECRET_KEY" - - "$GPG_PASSPHRASE" - - "$SMTP_USER" - - "$SMTP_PWD" - - "$EMAIL_RCP" diff --git a/hack/infrastructure/docker-ci/dcr/stage/settings.yml b/hack/infrastructure/docker-ci/dcr/stage/settings.yml deleted file mode 100644 index a7d37acff3..0000000000 --- a/hack/infrastructure/docker-ci/dcr/stage/settings.yml +++ /dev/null @@ -1,5 +0,0 @@ -default: - hipaches: ['192.168.100.65:6379'] - daemons: ['192.168.100.65:4243'] - use_ssh: False - diff --git a/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh b/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh deleted file mode 100755 index fdacc290b4..0000000000 --- a/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -export PATH='/go/bin':$PATH -export DOCKER_PATH='/go/src/github.com/dotcloud/docker' - -# Signal coverage report name, parsed by docker-ci -set -x -COVERAGE_PATH=$(date +"docker-%Y%m%d%H%M%S") -set +x - -REPORTS="/data/$COVERAGE_PATH" -INDEX="$REPORTS/index.html" - -# Test docker -cd $DOCKER_PATH -./hack/make.sh test; exit_status=$? -PROFILE_PATH="$(ls -d $DOCKER_PATH/bundles/* | sed -n '$ p')/test/coverprofiles" - -if [ "$exit_status" -eq "0" ]; then - # Download coverage dependencies - go get github.com/axw/gocov/gocov - go get -u github.com/matm/gocov-html - - # Create coverage report - mkdir -p $REPORTS - cd $PROFILE_PATH - cat > $INDEX << "EOF" - - - - - -Docker Coverage Report - -

Docker Coverage Report

- - -EOF - for profile in *; do - gocov convert $profile | gocov-html >$REPORTS/$profile.html - echo "" >> $INDEX - done - echo "
packagepct
$profile" >> $INDEX - go tool cover -func=$profile | sed -En '$ s/.+\t(.+)/\1/p' >> $INDEX - echo "
" >> $INDEX -fi - -# Signal test and coverage result, parsed by docker-ci -set -x -exit $exit_status - diff --git a/hack/infrastructure/docker-ci/dockertest/docker b/hack/infrastructure/docker-ci/dockertest/docker deleted file mode 120000 index e3f094ee63..0000000000 --- a/hack/infrastructure/docker-ci/dockertest/docker +++ /dev/null @@ -1 +0,0 @@ -project \ No newline at end of file diff --git a/hack/infrastructure/docker-ci/dockertest/docker-registry b/hack/infrastructure/docker-ci/dockertest/docker-registry deleted file mode 120000 index e3f094ee63..0000000000 --- a/hack/infrastructure/docker-ci/dockertest/docker-registry +++ /dev/null @@ -1 +0,0 @@ -project \ No newline at end of file diff --git a/hack/infrastructure/docker-ci/dockertest/nightlyrelease b/hack/infrastructure/docker-ci/dockertest/nightlyrelease deleted file mode 100755 index cface6c125..0000000000 --- a/hack/infrastructure/docker-ci/dockertest/nightlyrelease +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -if [ "$DEPLOYMENT" == "production" ]; then - AWS_S3_BUCKET='test.docker.io' -else - AWS_S3_BUCKET='get-staging.docker.io' -fi - -docker run --rm --privileged -v /run:/var/socket \ - -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY \ - -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE \ - -e DOCKER_RELEASE=1 -e DEPLOYMENT=$DEPLOYMENT docker-ci/testbuilder docker - diff --git a/hack/infrastructure/docker-ci/dockertest/project b/hack/infrastructure/docker-ci/dockertest/project deleted file mode 100755 index 8131ab533a..0000000000 --- a/hack/infrastructure/docker-ci/dockertest/project +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -set -x - -PROJECT_NAME=$(basename $0) - -docker run --rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \ - -v /home/docker-ci/coverage/$PROJECT_NAME:/data docker-ci/testbuilder $PROJECT_NAME $1 $2 $3 - diff --git a/hack/infrastructure/docker-ci/functionaltests/test_index.py b/hack/infrastructure/docker-ci/functionaltests/test_index.py deleted file mode 100755 index fd002c81e8..0000000000 --- a/hack/infrastructure/docker-ci/functionaltests/test_index.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/python - -import os -username, password = os.environ['DOCKER_CREDS'].split(':') - -from selenium import webdriver -from selenium.webdriver.common.by import By -from selenium.webdriver.common.keys import Keys -from selenium.webdriver.support.ui import Select -from selenium.common.exceptions import NoSuchElementException -import unittest, time, re - -class Docker(unittest.TestCase): - def setUp(self): - self.driver = webdriver.PhantomJS() - self.driver.implicitly_wait(30) - self.base_url = "http://www.docker.io/" - self.verificationErrors = [] - self.accept_next_alert = True - - def test_docker(self): - driver = self.driver - print "Login into {0} as login user {1} ...".format(self.base_url,username) - driver.get(self.base_url + "/") - driver.find_element_by_link_text("INDEX").click() - driver.find_element_by_link_text("login").click() - driver.find_element_by_id("id_username").send_keys(username) - driver.find_element_by_id("id_password").send_keys(password) - print "Checking login user ..." - driver.find_element_by_css_selector("input[type=\"submit\"]").click() - try: self.assertEqual("test", driver.find_element_by_css_selector("h3").text) - except AssertionError as e: self.verificationErrors.append(str(e)) - print "Login user {0} found".format(username) - - def is_element_present(self, how, what): - try: self.driver.find_element(by=how, value=what) - except NoSuchElementException, e: return False - return True - - def is_alert_present(self): - try: self.driver.switch_to_alert() - except NoAlertPresentException, e: return False - return True - - def close_alert_and_get_its_text(self): - try: - alert = self.driver.switch_to_alert() - alert_text = alert.text - if self.accept_next_alert: - alert.accept() - else: - alert.dismiss() - return alert_text - finally: self.accept_next_alert = True - - def tearDown(self): - self.driver.quit() - self.assertEqual([], self.verificationErrors) - -if __name__ == "__main__": - unittest.main() diff --git a/hack/infrastructure/docker-ci/functionaltests/test_registry.sh b/hack/infrastructure/docker-ci/functionaltests/test_registry.sh deleted file mode 100755 index 58642529cc..0000000000 --- a/hack/infrastructure/docker-ci/functionaltests/test_registry.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh - -set -x - -# Cleanup -rm -rf docker-registry - -# Setup the environment -export SETTINGS_FLAVOR=test -export DOCKER_REGISTRY_CONFIG=config_test.yml -export PYTHONPATH=$(pwd)/docker-registry/test - -# Get latest docker registry -git clone -q https://github.com/dotcloud/docker-registry.git -cd docker-registry -sed -Ei "s#(boto_bucket: ).+#\1_env:S3_BUCKET#" config_test.yml - -# Get dependencies -pip install -q -r requirements.txt -pip install -q -r test-requirements.txt -pip install -q tox - -# Run registry tests -tox || exit 1 -python -m unittest discover -p s3.py -s test || exit 1 -python -m unittest discover -p workflow.py -s test - diff --git a/hack/infrastructure/docker-ci/nginx/nginx.conf b/hack/infrastructure/docker-ci/nginx/nginx.conf deleted file mode 100644 index 6649741134..0000000000 --- a/hack/infrastructure/docker-ci/nginx/nginx.conf +++ /dev/null @@ -1,12 +0,0 @@ -server { - listen 80; - root /data/docker-ci; - - location / { - proxy_pass http://localhost:8000/; - } - - location /coverage { - root /data/docker-ci; - } -} diff --git a/hack/infrastructure/docker-ci/report/Dockerfile b/hack/infrastructure/docker-ci/report/Dockerfile deleted file mode 100644 index 32600c4c58..0000000000 --- a/hack/infrastructure/docker-ci/report/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -# VERSION: 0.22 -# DOCKER-VERSION 0.6.3 -# AUTHOR: Daniel Mizyrycki -# DESCRIPTION: Generate docker-ci daily report -# COMMENTS: The build process is initiated by deployment.py - Report configuration is passed through ./credentials.json at -# deployment time. -# TO_BUILD: docker build -t report . -# TO_DEPLOY: docker run report - -from ubuntu:12.04 -maintainer Daniel Mizyrycki - -env PYTHONPATH /report - - -# Add report dependencies -run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > \ - /etc/apt/sources.list -run apt-get update; apt-get install -y python2.7 python-pip ssh rsync - -# Set San Francisco timezone -run echo "America/Los_Angeles" >/etc/timezone -run dpkg-reconfigure --frontend noninteractive tzdata - -# Add report code and set default container command -add . /report -cmd "/report/report.py" diff --git a/hack/infrastructure/docker-ci/report/deployment.py b/hack/infrastructure/docker-ci/report/deployment.py deleted file mode 100755 index 5b2eaf3cab..0000000000 --- a/hack/infrastructure/docker-ci/report/deployment.py +++ /dev/null @@ -1,130 +0,0 @@ -#!/usr/bin/env python - -'''Deploy docker-ci report container on Digital Ocean. -Usage: - export CONFIG_JSON=' - { "DROPLET_NAME": "Digital_Ocean_dropplet_name", - "DO_CLIENT_ID": "Digital_Ocean_client_id", - "DO_API_KEY": "Digital_Ocean_api_key", - "DOCKER_KEY_ID": "Digital_Ocean_ssh_key_id", - "DOCKER_CI_KEY_PATH": "docker-ci_private_key_path", - "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)", - "DOCKER_CI_ADDRESS" "user@docker-ci_fqdn_server", - "SMTP_USER": "SMTP_server_user", - "SMTP_PWD": "SMTP_server_password", - "EMAIL_SENDER": "Buildbot_mailing_sender", - "EMAIL_RCP": "Buildbot_mailing_receipient" }' - python deployment.py -''' - -import re, json, requests, base64 -from fabric import api -from fabric.api import cd, run, put, sudo -from os import environ as env -from time import sleep -from datetime import datetime - -# Populate environment variables -CONFIG = json.loads(env['CONFIG_JSON']) -for key in CONFIG: - env[key] = CONFIG[key] - -# Load DOCKER_CI_KEY -env['DOCKER_CI_KEY'] = open(env['DOCKER_CI_KEY_PATH']).read() - -DROPLET_NAME = env.get('DROPLET_NAME','report') -TIMEOUT = 120 # Seconds before timeout droplet creation -IMAGE_ID = 1004145 # Docker on Ubuntu 13.04 -REGION_ID = 4 # New York 2 -SIZE_ID = 66 # memory 512MB -DO_IMAGE_USER = 'root' # Image user on Digital Ocean -API_URL = 'https://api.digitalocean.com/' - - -class digital_ocean(): - - def __init__(self, key, client): - '''Set default API parameters''' - self.key = key - self.client = client - self.api_url = API_URL - - def api(self, cmd_path, api_arg={}): - '''Make api call''' - api_arg.update({'api_key':self.key, 'client_id':self.client}) - resp = requests.get(self.api_url + cmd_path, params=api_arg).text - resp = json.loads(resp) - if resp['status'] != 'OK': - raise Exception(resp['error_message']) - return resp - - def droplet_data(self, name): - '''Get droplet data''' - data = self.api('droplets') - data = [droplet for droplet in data['droplets'] - if droplet['name'] == name] - return data[0] if data else {} - -def json_fmt(data): - '''Format json output''' - return json.dumps(data, sort_keys = True, indent = 2) - - -do = digital_ocean(env['DO_API_KEY'], env['DO_CLIENT_ID']) - -# Get DROPLET_NAME data -data = do.droplet_data(DROPLET_NAME) - -# Stop processing if DROPLET_NAME exists on Digital Ocean -if data: - print ('Droplet: {} already deployed. Not further processing.' - .format(DROPLET_NAME)) - exit(1) - -# Create droplet -do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID, - 'image_id':IMAGE_ID, 'size_id':SIZE_ID, - 'ssh_key_ids':[env['DOCKER_KEY_ID']]}) - -# Wait for droplet to be created. -start_time = datetime.now() -while (data.get('status','') != 'active' and ( - datetime.now()-start_time).seconds < TIMEOUT): - data = do.droplet_data(DROPLET_NAME) - print data['status'] - sleep(3) - -# Wait for the machine to boot -sleep(15) - -# Get droplet IP -ip = str(data['ip_address']) -print 'droplet: {} ip: {}'.format(DROPLET_NAME, ip) - -api.env.host_string = ip -api.env.user = DO_IMAGE_USER -api.env.key_filename = env['DOCKER_CI_KEY_PATH'] - -# Correct timezone -sudo('echo "America/Los_Angeles" >/etc/timezone') -sudo('dpkg-reconfigure --frontend noninteractive tzdata') - -# Load JSON_CONFIG environment for Dockerfile -CONFIG_JSON= base64.b64encode( - '{{"DOCKER_CI_PUB": "{DOCKER_CI_PUB}",' - ' "DOCKER_CI_KEY": "{DOCKER_CI_KEY}",' - ' "DOCKER_CI_ADDRESS": "{DOCKER_CI_ADDRESS}",' - ' "SMTP_USER": "{SMTP_USER}",' - ' "SMTP_PWD": "{SMTP_PWD}",' - ' "EMAIL_SENDER": "{EMAIL_SENDER}",' - ' "EMAIL_RCP": "{EMAIL_RCP}"}}'.format(**env)) - -run('mkdir -p /data/report') -put('./', '/data/report') -with cd('/data/report'): - run('chmod 700 report.py') - run('echo "{}" > credentials.json'.format(CONFIG_JSON)) - run('docker build -t report .') - run('rm credentials.json') - run("echo -e '30 09 * * * /usr/bin/docker run report\n' |" - " /usr/bin/crontab -") diff --git a/hack/infrastructure/docker-ci/report/report.py b/hack/infrastructure/docker-ci/report/report.py deleted file mode 100755 index 7018cabc27..0000000000 --- a/hack/infrastructure/docker-ci/report/report.py +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/python - -'''CONFIG_JSON is a json encoded string base64 environment variable. It is used -to clone docker-ci database, generate docker-ci report and submit it by email. -CONFIG_JSON data comes from the file /report/credentials.json inserted in this -container by deployment.py: - -{ "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)", - "DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)", - "DOCKER_CI_ADDRESS": "user@docker-ci_fqdn_server", - "SMTP_USER": "SMTP_server_user", - "SMTP_PWD": "SMTP_server_password", - "EMAIL_SENDER": "Buildbot_mailing_sender", - "EMAIL_RCP": "Buildbot_mailing_receipient" } ''' - -import os, re, json, sqlite3, datetime, base64 -import smtplib -from datetime import timedelta -from subprocess import call -from os import environ as env - -TODAY = datetime.date.today() - -# Load credentials to the environment -env['CONFIG_JSON'] = base64.b64decode(open('/report/credentials.json').read()) - -# Remove SSH private key as it needs more processing -CONFIG = json.loads(re.sub(r'("DOCKER_CI_KEY".+?"(.+?)",)','', - env['CONFIG_JSON'], flags=re.DOTALL)) - -# Populate environment variables -for key in CONFIG: - env[key] = CONFIG[key] - -# Load SSH private key -env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1', - env['CONFIG_JSON'],flags=re.DOTALL) - -# Prevent rsync to validate host on first connection to docker-ci -os.makedirs('/root/.ssh') -open('/root/.ssh/id_rsa','w').write(env['DOCKER_CI_KEY']) -os.chmod('/root/.ssh/id_rsa',0600) -open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n') - - -# Sync buildbot database from docker-ci -call('rsync {}:/data/buildbot/master/state.sqlite .'.format( - env['DOCKER_CI_ADDRESS']), shell=True) - -class SQL: - def __init__(self, database_name): - sql = sqlite3.connect(database_name) - # Use column names as keys for fetchall rows - sql.row_factory = sqlite3.Row - sql = sql.cursor() - self.sql = sql - - def query(self,query_statement): - return self.sql.execute(query_statement).fetchall() - -sql = SQL("state.sqlite") - - -class Report(): - - def __init__(self,period='',date=''): - self.data = [] - self.period = 'date' if not period else period - self.date = str(TODAY) if not date else date - self.compute() - - def compute(self): - '''Compute report''' - if self.period == 'week': - self.week_report(self.date) - else: - self.date_report(self.date) - - - def date_report(self,date): - '''Create a date test report''' - builds = [] - # Get a queryset with all builds from date - rows = sql.query('SELECT * FROM builds JOIN buildrequests' - ' WHERE builds.brid=buildrequests.id and' - ' date(start_time, "unixepoch", "localtime") = "{0}"' - ' GROUP BY number'.format(date)) - build_names = sorted(set([row['buildername'] for row in rows])) - # Create a report build line for a given build - for build_name in build_names: - tried = len([row['buildername'] - for row in rows if row['buildername'] == build_name]) - fail_tests = [row['buildername'] for row in rows if ( - row['buildername'] == build_name and row['results'] != 0)] - fail = len(fail_tests) - fail_details = '' - fail_pct = int(100.0*fail/tried) if tried != 0 else 100 - builds.append({'name': build_name, 'tried': tried, 'fail': fail, - 'fail_pct': fail_pct, 'fail_details':fail_details}) - if builds: - self.data.append({'date': date, 'builds': builds}) - - - def week_report(self,date): - '''Add the week's date test reports to report.data''' - date = datetime.datetime.strptime(date,'%Y-%m-%d').date() - last_monday = date - datetime.timedelta(days=date.weekday()) - week_dates = [last_monday + timedelta(days=x) for x in range(7,-1,-1)] - for date in week_dates: - self.date_report(str(date)) - - def render_text(self): - '''Return rendered report in text format''' - retval = '' - fail_tests = {} - for builds in self.data: - retval += 'Test date: {0}\n'.format(builds['date'],retval) - table = '' - for build in builds['builds']: - table += ('Build {name:15} Tried: {tried:4} ' - ' Failures: {fail:4} ({fail_pct}%)\n'.format(**build)) - if build['name'] in fail_tests: - fail_tests[build['name']] += build['fail_details'] - else: - fail_tests[build['name']] = build['fail_details'] - retval += '{0}\n'.format(table) - retval += '\n Builds failing' - for fail_name in fail_tests: - retval += '\n' + fail_name + '\n' - for (fail_id,fail_url,rn_tests,nr_errors,log_errors, - tracelog_errors) in fail_tests[fail_name]: - retval += fail_url + '\n' - retval += '\n\n' - return retval - - -# Send email -smtp_from = env['EMAIL_SENDER'] -subject = '[docker-ci] Daily report for {}'.format(str(TODAY)) -msg = "From: {}\r\nTo: {}\r\nSubject: {}\r\n\r\n".format( - smtp_from, env['EMAIL_RCP'], subject) -msg = msg + Report('week').render_text() -server = smtplib.SMTP_SSL('smtp.mailgun.org') -server.login(env['SMTP_USER'], env['SMTP_PWD']) -server.sendmail(smtp_from, env['EMAIL_RCP'], msg) diff --git a/hack/infrastructure/docker-ci/setup.sh b/hack/infrastructure/docker-ci/setup.sh deleted file mode 100755 index 65a00f6dd0..0000000000 --- a/hack/infrastructure/docker-ci/setup.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash - -# Set timezone -echo "GMT" >/etc/timezone -dpkg-reconfigure --frontend noninteractive tzdata - -# Set ssh superuser -mkdir -p /data/buildbot /var/run/sshd /run -useradd -m -d /home/sysadmin -s /bin/bash -G sudo,docker -p '*' sysadmin -sed -Ei 's/(\%sudo.*) ALL/\1 NOPASSWD:ALL/' /etc/sudoers -cd /home/sysadmin -mkdir .ssh -chmod 700 .ssh -cat > .ssh/authorized_keys << 'EOF' -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7ALVhwQ68q1SjrKaAduOuOEAcWmb8kDZf5qA7T1fM8AP07EDC7nSKRJ8PXUBGTOQfxm89coJDuSJsTAZ+1PvglXhA0Mq6+knc6ZrZY+SuZlDIDAk4TOdVPoDZnmR1YW2McxHkhcGIOKeC8MMig5NeEjtgQwXzauUSPqeh8HMlLZRMooFYyyluIpn7NaCLzyWjwAQz2s3KyI7VE7hl+ncCrW86v+dciEdwqtzNoUMFb3iDpPxaiCl3rv+SB7co/5eUDTs1FZvUcYMXKQuf8R+2ZKzXOpwr0Zs8sKQXvXavCeWykwGgXLBjVkvrDcHuDD6UXCW63UKgmRECpLZaMBVIIRWLEEgTS5OSQTcxpMVe5zUW6sDvXHTcdPwWrcn1dE9F/0vLC0HJ4ADKelLX5zyTpmXGbuZuntIf1JO67D/K/P++uV1rmVIH+zgtOf23w5rX2zKb4BSTqP0sv61pmWV7MEVoEz6yXswcTjS92tb775v7XLU9vKAkt042ORFdE4/++hejhL/Lj52IRgjt1CJZHZsR9JywJZrz3kYuf8eU2J2FYh0Cpz5gmf0f+12Rt4HztnZxGPP4KuMa66e4+hpx1jynjMZ7D5QUnNYEmuvJByopn8HSluuY/kS5MMyZCZtJLEPGX4+yECX0Di/S0vCRl2NyqfCBqS+yXXT5SA1nFw== docker-test@docker.io -EOF -chmod 600 .ssh/authorized_keys -chown -R sysadmin .ssh - -# Fix docker group id for use of host dockerd by sysadmin -sed -Ei 's/(docker:x:)[^:]+/\1999/' /etc/group - -# Create buildbot configuration -cd /data/buildbot; buildbot create-master master -cp -a /data/buildbot/master/master.cfg.sample \ - /data/buildbot/master/master.cfg -cd /data/buildbot; \ - buildslave create-slave slave localhost:9989 buildworker pass -cp /docker-ci/buildbot/master.cfg /data/buildbot/master - -# Patch github webstatus to capture pull requests -cp /docker-ci/buildbot/github.py /usr/local/lib/python2.7/dist-packages/buildbot/status/web/hooks -chown -R sysadmin.sysadmin /data - -# Create nginx configuration -rm /etc/nginx/sites-enabled/default -cp /docker-ci/nginx/nginx.conf /etc/nginx/conf.d/buildbot.conf -/bin/echo -e '\ndaemon off;\n' >> /etc/nginx/nginx.conf - -# Set supervisord buildbot, nginx and sshd processes -/bin/echo -e "\ -[program:buildmaster]\n\ -command=twistd --nodaemon --no_save -y buildbot.tac\n\ -directory=/data/buildbot/master\n\ -user=sysadmin\n\n\ -[program:buildworker]\n\ -command=twistd --nodaemon --no_save -y buildbot.tac\n\ -directory=/data/buildbot/slave\n\ -user=sysadmin\n" > \ - /etc/supervisor/conf.d/buildbot.conf -/bin/echo -e "[program:nginx]\ncommand=/usr/sbin/nginx\n" > \ - /etc/supervisor/conf.d/nginx.conf -/bin/echo -e "[program:sshd]\ncommand=/usr/sbin/sshd -D\n" > \ - /etc/supervisor/conf.d/sshd.conf diff --git a/hack/infrastructure/docker-ci/testbuilder/Dockerfile b/hack/infrastructure/docker-ci/testbuilder/Dockerfile deleted file mode 100644 index 8fa9b4c797..0000000000 --- a/hack/infrastructure/docker-ci/testbuilder/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -# TO_BUILD: docker build --no-cache -t docker-ci/testbuilder . -# TO_RUN: docker run --rm -u sysadmin \ -# -v /run:/var/socket docker-ci/testbuilder docker-registry -# - -FROM docker-ci/docker-ci -ENV HOME /home/sysadmin - -RUN mkdir /testbuilder -ADD . /testbuilder - -ENTRYPOINT ["/testbuilder/testbuilder.sh"] diff --git a/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh b/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh deleted file mode 100755 index a73704c50b..0000000000 --- a/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -set -x -set -e -PROJECT_PATH=$1 - -# Build the docker project -cd /data/$PROJECT_PATH -sg docker -c "docker build -q -t registry ." -cd test; sg docker -c "docker build -q -t docker-registry-test ." - -# Run the tests -sg docker -c "docker run --rm -v /home/docker-ci/coverage/docker-registry:/data docker-registry-test" diff --git a/hack/infrastructure/docker-ci/testbuilder/docker.sh b/hack/infrastructure/docker-ci/testbuilder/docker.sh deleted file mode 100755 index c8f3c18eb9..0000000000 --- a/hack/infrastructure/docker-ci/testbuilder/docker.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -set -x -set -e -PROJECT_PATH=$1 - -# Build the docker project -cd /data/$PROJECT_PATH -sg docker -c "docker build -q -t docker ." - -if [ "$DOCKER_RELEASE" == "1" ]; then - # Do nightly release - echo sg docker -c "docker run --rm --privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY= -e AWS_SECRET_KEY= -e GPG_PASSPHRASE= docker hack/release.sh" - set +x - sg docker -c "docker run --rm --privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh" -else - # Run the tests - sg docker -c "docker run --rm --privileged -v /home/docker-ci/coverage/docker:/data docker ./hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh" -fi diff --git a/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh b/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh deleted file mode 100755 index 70701343c2..0000000000 --- a/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash -# Download, build and run a docker project tests -# Environment variables: DEPLOYMENT - -cat $0 -set -e -set -x - -PROJECT=$1 -COMMIT=${2-HEAD} -REPO=${3-https://github.com/dotcloud/$PROJECT} -BRANCH=${4-master} -REPO_PROJ="https://github.com/docker-test/$PROJECT" -if [ "$DEPLOYMENT" == "production" ]; then - REPO_PROJ="https://github.com/dotcloud/$PROJECT" -fi -set +x - -# Generate a random string of $1 characters -function random { - cat /dev/urandom | tr -cd 'a-f0-9' | head -c $1 -} - -PROJECT_PATH="$PROJECT-tmp-$(random 12)" - -# Set docker-test git user -set -x -git config --global user.email "docker-test@docker.io" -git config --global user.name "docker-test" - -# Fetch project -git clone -q $REPO_PROJ -b master /data/$PROJECT_PATH -cd /data/$PROJECT_PATH -echo "Git commit: $(git rev-parse HEAD)" -git fetch -q $REPO $BRANCH -git merge --no-edit $COMMIT - -# Build the project dockertest -/testbuilder/$PROJECT.sh $PROJECT_PATH -rm -rf /data/$PROJECT_PATH diff --git a/hack/infrastructure/docker-ci/tool/backup.py b/hack/infrastructure/docker-ci/tool/backup.py deleted file mode 100755 index 2db633e526..0000000000 --- a/hack/infrastructure/docker-ci/tool/backup.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python - -import os,sys,json -from datetime import datetime -from filecmp import cmp -from subprocess import check_call -from boto.s3.key import Key -from boto.s3.connection import S3Connection - -def ENV(x): - '''Promote an environment variable for global use returning its value''' - retval = os.environ.get(x, '') - globals()[x] = retval - return retval - -ROOT_PATH = '/data/backup/docker-ci' -TODAY = str(datetime.today())[:10] -BACKUP_FILE = '{}/docker-ci_{}.tgz'.format(ROOT_PATH, TODAY) -BACKUP_LINK = '{}/docker-ci.tgz'.format(ROOT_PATH) -ENV('BACKUP_BUCKET') -ENV('BACKUP_AWS_ID') -ENV('BACKUP_AWS_SECRET') - -'''Create full master buildbot backup, avoiding duplicates''' -# Ensure backup path exist -if not os.path.exists(ROOT_PATH): - os.makedirs(ROOT_PATH) -# Make actual backups -check_call('/bin/tar czf {} -C /data --exclude=backup --exclude=buildbot/slave' - ' . 1>/dev/null 2>&1'.format(BACKUP_FILE),shell=True) -# remove previous dump if it is the same as the latest -if (os.path.exists(BACKUP_LINK) and cmp(BACKUP_FILE, BACKUP_LINK) and - os.path._resolve_link(BACKUP_LINK) != BACKUP_FILE): - os.unlink(os.path._resolve_link(BACKUP_LINK)) -# Recreate backup link pointing to latest backup -try: - os.unlink(BACKUP_LINK) -except: - pass -os.symlink(BACKUP_FILE, BACKUP_LINK) - -# Make backup on S3 -bucket = S3Connection(BACKUP_AWS_ID,BACKUP_AWS_SECRET).get_bucket(BACKUP_BUCKET) -k = Key(bucket) -k.key = BACKUP_FILE -k.set_contents_from_filename(BACKUP_FILE) -bucket.copy_key(os.path.basename(BACKUP_LINK),BACKUP_BUCKET,BACKUP_FILE[1:]) -- cgit v1.2.1 From 68dd722e3c54995e609b2524bad501ab1d4d15d6 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 24 Mar 2014 14:15:04 +0000 Subject: Promote btrfs Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/graphdriver/driver.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/runtime/graphdriver/driver.go b/runtime/graphdriver/driver.go index 89fd03a624..9ea7d1c94c 100644 --- a/runtime/graphdriver/driver.go +++ b/runtime/graphdriver/driver.go @@ -39,10 +39,9 @@ var ( // Slice of drivers that should be used in an order priority = []string{ "aufs", + "btrfs", "devicemapper", "vfs", - // experimental, has to be enabled manually for now - "btrfs", } ) -- cgit v1.2.1 From c7540b3e94d7712b6b91ba80de0155f20156f3f3 Mon Sep 17 00:00:00 2001 From: LK4D4 Date: Mon, 24 Mar 2014 22:31:05 +0400 Subject: Workaround for hanging events. Fixes #4804 Docker-DCO-1.1-Signed-off-by: LK4D4 (github: LK4D4) --- server/server.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/server.go b/server/server.go index 840a70357d..2cb3328d55 100644 --- a/server/server.go +++ b/server/server.go @@ -222,6 +222,10 @@ func (srv *Server) Events(job *engine.Job) engine.Status { listener := make(chan utils.JSONMessage) srv.Lock() + if old, ok := srv.listeners[from]; ok { + delete(srv.listeners, from) + close(old) + } srv.listeners[from] = listener srv.Unlock() job.Stdout.Write(nil) // flush -- cgit v1.2.1 From bb034c6b42c347ffdfae834960bf2386429e1980 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 24 Mar 2014 21:38:57 -0400 Subject: Add Chef usage documentation Docker-DCO-1.1-Signed-off-by: Brian Flad (github: bflad) --- docs/sources/use/chef.rst | 95 ++++++++++++++++++++++++++++++++++++++++++++++ docs/sources/use/index.rst | 1 + 2 files changed, 96 insertions(+) create mode 100644 docs/sources/use/chef.rst diff --git a/docs/sources/use/chef.rst b/docs/sources/use/chef.rst new file mode 100644 index 0000000000..54755ad3b3 --- /dev/null +++ b/docs/sources/use/chef.rst @@ -0,0 +1,95 @@ +:title: Chef Usage +:description: Installating and using Docker via Chef +:keywords: chef, installation, usage, docker, documentation + +.. _install_using_chef: + +Using Chef +============= + +.. note:: + + Please note this is a community contributed installation path. The + only 'official' installation is using the :ref:`ubuntu_linux` + installation path. This version may sometimes be out of date. + +Requirements +------------ + +To use this guide you'll need a working installation of +`Chef `_. This cookbook supports a variety of +operating systems. + +Installation +------------ + +The cookbook is available on the `Chef Community Site +`_ and can be installed +using your favorite cookbook dependency manager. + +The source can be found on `GitHub +`_. + +Usage +----- + +The cookbook provides recipes for installing Docker, configuring init +for Docker, and resources for managing images and containers. +It supports almost all Docker functionality. + +Installation +~~~~~~~~~~~~ + +.. code-block:: ruby + + include_recipe 'docker' + +Images +~~~~~~ + +The next step is to pull a Docker image. For this, we have a resource: + +.. code-block:: ruby + + docker_image 'samalba/docker-registry' + +This is equivalent to running: + +.. code-block:: bash + + docker pull samalba/docker-registry + +There are attributes available to control how long the cookbook +will allow for downloading (5 minute default). + +To remove images you no longer need: + +.. code-block:: ruby + + docker_image 'samalba/docker-registry' do + action :remove + end + +Containers +~~~~~~~~~~ + +Now you have an image where you can run commands within a container +managed by Docker. + +.. code-block:: ruby + + docker_container 'samalba/docker-registry' do + detach true + port '5000:5000' + env 'SETTINGS_FLAVOR=local' + volume '/mnt/docker:/docker-storage' + end + +This is equivalent to running the following command, but under upstart: + +.. code-block:: bash + + docker run --detach=true --publish='5000:5000' --env='SETTINGS_FLAVOR=local' --volume='/mnt/docker:/docker-storage' samalba/docker-registry + +The resources will accept a single string or an array of values +for any docker flags that allow multiple values. diff --git a/docs/sources/use/index.rst b/docs/sources/use/index.rst index c1b7691cca..dcf6289b41 100644 --- a/docs/sources/use/index.rst +++ b/docs/sources/use/index.rst @@ -20,4 +20,5 @@ Contents: working_with_volumes working_with_links_names ambassador_pattern_linking + chef puppet -- cgit v1.2.1 From 84e1fdf35d9d6493d389a8e8be3ab41190004b30 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 24 Mar 2014 21:43:26 -0400 Subject: docker load: add --input flag for those that do not care to read from redirected stdin Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- api/client.go | 14 +++++++++++++- docs/sources/reference/commandline/cli.rst | 15 ++++++++++++--- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/api/client.go b/api/client.go index f1a3f64f2a..f9e7445fd9 100644 --- a/api/client.go +++ b/api/client.go @@ -2075,6 +2075,8 @@ func (cli *DockerCli) CmdSave(args ...string) error { func (cli *DockerCli) CmdLoad(args ...string) error { cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN") + infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") + if err := cmd.Parse(args); err != nil { return err } @@ -2084,7 +2086,17 @@ func (cli *DockerCli) CmdLoad(args ...string) error { return nil } - if err := cli.stream("POST", "/images/load", cli.in, cli.out, nil); err != nil { + var ( + input io.Reader = cli.in + err error + ) + if *infile != "" { + input, err = os.Open(*infile) + if err != nil { + return err + } + } + if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil { return err } return nil diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index f4a5e0882f..2626280dd0 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -881,10 +881,19 @@ Known Issues (kill) :: - Usage: docker load < repository.tar + Usage: docker load - Loads a tarred repository from the standard input stream. - Restores both images and tags. + Load an image from a tar archive on STDIN + + -i, --input"": Read from a tar archive file, instead of STDIN + +Loads a tarred repository from the standard input stream. +Restores both images and tags. + +.. code-block:: bash + + $ sudo docker load < busybox.tar + $ sudo docker load --input busybox.tar .. _cli_login: -- cgit v1.2.1 From c84ff187c62060f20b7039a5005a44012898df7b Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 24 Mar 2014 22:29:54 -0400 Subject: Fix typo in Using Chef documentation description Docker-DCO-1.1-Signed-off-by: Brian Flad (github: bflad) --- docs/sources/use/chef.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/use/chef.rst b/docs/sources/use/chef.rst index 54755ad3b3..919eba7a8f 100644 --- a/docs/sources/use/chef.rst +++ b/docs/sources/use/chef.rst @@ -1,5 +1,5 @@ :title: Chef Usage -:description: Installating and using Docker via Chef +:description: Installation and using Docker via Chef :keywords: chef, installation, usage, docker, documentation .. _install_using_chef: -- cgit v1.2.1 From c6c7c03cddc852c42b9f047fbd5c2fb6cecf39eb Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 24 Mar 2014 23:31:59 -0400 Subject: docker load: doc clarification Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- docs/sources/reference/commandline/cli.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 2626280dd0..3c7bb47113 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -885,9 +885,9 @@ Known Issues (kill) Load an image from a tar archive on STDIN - -i, --input"": Read from a tar archive file, instead of STDIN + -i, --input="": Read from a tar archive file, instead of STDIN -Loads a tarred repository from the standard input stream. +Loads a tarred repository from a file or the standard input stream. Restores both images and tags. .. code-block:: bash -- cgit v1.2.1 From a5ccb5b28d7e24a379f77ab7619f296aa500c8dd Mon Sep 17 00:00:00 2001 From: Ryan Thomas Date: Tue, 25 Mar 2014 14:45:11 +1100 Subject: Docker-DCO-1.1-Signed-off-by: Ryan Thomas (github: rthomas) --- registry/registry.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/registry/registry.go b/registry/registry.go index 346132bcc5..01583f97c2 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -41,7 +41,10 @@ func pingRegistryEndpoint(endpoint string) (bool, error) { conn.SetDeadline(time.Now().Add(time.Duration(10) * time.Second)) return conn, nil } - httpTransport := &http.Transport{Dial: httpDial} + httpTransport := &http.Transport{ + Dial: httpDial, + Proxy: http.ProxyFromEnvironment, + } client := &http.Client{Transport: httpTransport} resp, err := client.Get(endpoint + "_ping") if err != nil { -- cgit v1.2.1 From 69087f2d2397b18d6dd2d7b994e24ea9814e4bcd Mon Sep 17 00:00:00 2001 From: noducks Date: Sat, 22 Mar 2014 14:12:15 +0000 Subject: Reminder for OSX users not to use SUDO Useful for those who haven't made it to the examples page yet. https://github.com/chadoh/docker/commit/dad4a998dc716e506c874ce0e792989b9df28748 Docker-DCO-1.1-Signed-off-by: No Ducks (github: noducks) --- docs/sources/use/basics.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/use/basics.rst b/docs/sources/use/basics.rst index 447366f55a..4164e706f7 100644 --- a/docs/sources/use/basics.rst +++ b/docs/sources/use/basics.rst @@ -40,6 +40,8 @@ Repository to a local image cache. short form of the image ID. These short image IDs are the first 12 characters of the full image ID - which can be found using ``docker inspect`` or ``docker images --no-trunc=true`` + + **If you're using OS X** then you shouldn't use ``sudo`` Running an interactive shell ---------------------------- -- cgit v1.2.1 From 293157b8b38dd5ea5fa49d90501cc3c86717da40 Mon Sep 17 00:00:00 2001 From: viirya Date: Sun, 23 Mar 2014 23:55:20 +0800 Subject: check if working dir is a directory and raise corresponding errors when making dir. Docker-DCO-1.1-Signed-off-by: Liang-Chi Hsieh (github: viirya) --- integration/commands_test.go | 19 +++++++++++++++++++ runtime/container.go | 41 +++++++++++++++++++++++++++-------------- 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/integration/commands_test.go b/integration/commands_test.go index f1c5870755..7de7a227ea 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -252,6 +252,25 @@ func TestRunWorkdirExists(t *testing.T) { } } +// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected +func TestRunWorkdirExistsAndIsFile(t *testing.T) { + + cli := api.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + defer cleanup(globalEngine, t) + + c := make(chan struct{}) + go func() { + defer close(c) + if err := cli.CmdRun("-w", "/bin/cat", unitTestImageID, "pwd"); err == nil { + t.Fatal("should have failed to run when using /bin/cat as working dir.") + } + }() + + setTimeout(t, "CmdRun timed out", 5*time.Second, func() { + <-c + }) +} + func TestRunExit(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() diff --git a/runtime/container.go b/runtime/container.go index 6194a19c8c..0e5e255bfc 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -535,8 +535,18 @@ func (container *Container) Start() (err error) { if container.Config.WorkingDir != "" { container.Config.WorkingDir = path.Clean(container.Config.WorkingDir) - if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil { - return nil + + pthInfo, err := os.Stat(path.Join(container.basefs, container.Config.WorkingDir)) + if err != nil { + if !os.IsNotExist(err) { + return err + } + if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil { + return err + } + } + if pthInfo != nil && !pthInfo.IsDir() { + return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) } } @@ -950,10 +960,11 @@ func (container *Container) ExportRw() (archive.Archive, error) { return nil, err } return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.Unmount() - return err - }), nil + err := archive.Close() + container.Unmount() + return err + }), + nil } func (container *Container) Export() (archive.Archive, error) { @@ -967,10 +978,11 @@ func (container *Container) Export() (archive.Archive, error) { return nil, err } return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.Unmount() - return err - }), nil + err := archive.Close() + container.Unmount() + return err + }), + nil } func (container *Container) WaitTimeout(timeout time.Duration) error { @@ -1119,10 +1131,11 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) { return nil, err } return utils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.Unmount() - return err - }), nil + err := archive.Close() + container.Unmount() + return err + }), + nil } // Returns true if the container exposes a certain port -- cgit v1.2.1 From 8e434c314ef74618001cc95466c2b567fa0283e2 Mon Sep 17 00:00:00 2001 From: noducks Date: Tue, 25 Mar 2014 10:26:45 +0000 Subject: Force flag to prevent file already exists error. Docker-DCO-1.1-Signed-off-by: No Ducks (github: noducks) --- docs/sources/examples/mongodb.rst | 2 +- docs/sources/examples/running_riak_service.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/examples/mongodb.rst b/docs/sources/examples/mongodb.rst index 3e37d74c30..930ab2ea9d 100644 --- a/docs/sources/examples/mongodb.rst +++ b/docs/sources/examples/mongodb.rst @@ -47,7 +47,7 @@ divert ``/sbin/initctl`` to ``/bin/true`` so it thinks everything is working. # Hack for initctl not being available in Ubuntu RUN dpkg-divert --local --rename --add /sbin/initctl - RUN ln -s /bin/true /sbin/initctl + RUN ln -sf /bin/true /sbin/initctl Afterwards we'll be able to update our apt repositories and install MongoDB diff --git a/docs/sources/examples/running_riak_service.rst b/docs/sources/examples/running_riak_service.rst index ae08a4b7f0..55e5e405c9 100644 --- a/docs/sources/examples/running_riak_service.rst +++ b/docs/sources/examples/running_riak_service.rst @@ -88,7 +88,7 @@ Almost there. Next, we add a hack to get us by the lack of ``initctl``: # Hack for initctl # See: https://github.com/dotcloud/docker/issues/1024 RUN dpkg-divert --local --rename --add /sbin/initctl - RUN ln -s /bin/true /sbin/initctl + RUN ln -sf /bin/true /sbin/initctl Then, we expose the Riak Protocol Buffers and HTTP interfaces, along with SSH: -- cgit v1.2.1 From 2517370088ad11765f99d75c16b58e93fe18f85a Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Tue, 25 Mar 2014 08:30:59 -0400 Subject: docker load: added example of a multiple tag image Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- docs/sources/reference/commandline/cli.rst | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 3c7bb47113..687ce2f305 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -892,8 +892,21 @@ Restores both images and tags. .. code-block:: bash + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE $ sudo docker load < busybox.tar - $ sudo docker load --input busybox.tar + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + $ sudo docker load --input fedora.tar + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + busybox latest 769b9341d937 7 weeks ago 2.489 MB + fedora rawhide 0d20aec6529d 7 weeks ago 387 MB + fedora 20 58394af37342 7 weeks ago 385.5 MB + fedora heisenbug 58394af37342 7 weeks ago 385.5 MB + fedora latest 58394af37342 7 weeks ago 385.5 MB + .. _cli_login: -- cgit v1.2.1 From 576278102e0fa9166711f8cf23ec972fcccc085e Mon Sep 17 00:00:00 2001 From: Paul Annesley Date: Mon, 24 Mar 2014 21:21:37 -0700 Subject: install.sh (get.docker.io) aufs comment updated. devicemapper has landed, but the TODO hasn't been actioned presumably because aufs is still preferred over devicemapper when available[1]. Comment updated accordingly. Citation [1]: https://github.com/crosbymichael/docker/blob/267ca39921c35826ccbdb84fbbc0690bfef385d7/runtime/graphdriver/driver.go#L40-L46 Docker-DCO-1.1-Signed-off-by: Paul Annesley (github: pda) --- hack/install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/install.sh b/hack/install.sh index 1fa8a47480..205b57ecc7 100755 --- a/hack/install.sh +++ b/hack/install.sh @@ -85,7 +85,7 @@ case "$lsb_dist" in fi } - # TODO remove this section once device-mapper lands + # aufs is preferred over devicemapper; try to ensure the driver is available. if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then kern_extras="linux-image-extra-$(uname -r)" -- cgit v1.2.1 From d36176652ef8f0220a1cff5dc00933400c69a562 Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 24 Mar 2014 19:43:40 +0200 Subject: Bump to version v0.9.1 Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) Conflicts: VERSION --- CHANGELOG.md | 40 ++++++++++++++++++++++++++++++++++++++++ VERSION | 2 +- 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 40ba3d32ac..c8ea94361b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,45 @@ # Changelog +## 0.9.1 (2014-03-24) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. + +#### Documentation +- Fix external link on security of containers. + +#### Contrib +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. + +#### Hack +- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. + +#### Remote API +- Fix content-type detection in `docker cp`. + +#### Runtime +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Only unshare the mount namespace for execin. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Merge existing config when committing. +- Fix panic in monitor. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Improve deprecation message. +- Fix attach exit on darwin. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). +- devicemapper: succeed immediately when removing non-existing devices. +- devicemapper: increase timeout in waitClose to 10 seconds. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. + ## 0.9.0 (2014-03-10) #### Builder diff --git a/VERSION b/VERSION index c70836ca5c..f374f6662e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.9.0-dev +0.9.1 -- cgit v1.2.1 From b2721e05ce4d6026855718ad9b01eb7dec797cd2 Mon Sep 17 00:00:00 2001 From: unclejack Date: Wed, 26 Mar 2014 00:18:45 +0200 Subject: Change version to 0.9.1-dev Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index f374f6662e..dc9bff91aa 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.9.1 +0.9.1-dev -- cgit v1.2.1 From baba9cde9542b480162d11bd30ca3a522fa6b4d0 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 26 Mar 2014 11:51:27 +0000 Subject: Return error when existing bridge does not match ip Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/networkdriver/bridge/driver.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/runtime/networkdriver/bridge/driver.go b/runtime/networkdriver/bridge/driver.go index 41588b1c27..61e82dd481 100644 --- a/runtime/networkdriver/bridge/driver.go +++ b/runtime/networkdriver/bridge/driver.go @@ -93,6 +93,12 @@ func InitDriver(job *engine.Job) engine.Status { network = addr.(*net.IPNet) } else { network = addr.(*net.IPNet) + // validate that the bridge ip matches the ip specified by BridgeIP + if bridgeIP != "" { + if !network.IP.Equal(net.ParseIP(bridgeIP)) { + return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bridgeIP) + } + } } // Configure iptables for link support -- cgit v1.2.1 From 61f7d967ed635098abaf42fc411866bb198b97c5 Mon Sep 17 00:00:00 2001 From: Justin Simonelis Date: Wed, 26 Mar 2014 10:24:16 -0400 Subject: typo fix --- docs/sources/reference/builder.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/builder.rst b/docs/sources/reference/builder.rst index 1c8331e98f..6462512da0 100644 --- a/docs/sources/reference/builder.rst +++ b/docs/sources/reference/builder.rst @@ -49,7 +49,7 @@ to be created - so ``RUN cd /tmp`` will not have any effect on the next instructions. Whenever possible, Docker will re-use the intermediate images, -accelerating ``docker build`` significantly (indicated by ``Using cache``: +accelerating ``docker build`` significantly (indicated by ``Using cache``): .. code-block:: bash -- cgit v1.2.1 From 72dc19fd7d339321a347dce4ea39d59fd503a4cc Mon Sep 17 00:00:00 2001 From: Justin Simonelis Date: Wed, 26 Mar 2014 10:30:57 -0400 Subject: authors update --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index df091d5950..1c58d953f6 100644 --- a/AUTHORS +++ b/AUTHORS @@ -170,6 +170,7 @@ Julien Barbier Julien Dubois Justin Force Justin Plock +Justin Simonelis Karan Lyons Karl Grzeszczak Kawsar Saiyeed -- cgit v1.2.1 From 4746c761566d5d5d4754daf62d20c83cba0efee8 Mon Sep 17 00:00:00 2001 From: Paul Jimenez Date: Tue, 25 Mar 2014 16:29:58 -0400 Subject: Include contributed completions in ubuntu PPA Docker-DCO-1.1-Signed-off-by: Paul Jimenez (github: pjz) --- hack/make/ubuntu | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/hack/make/ubuntu b/hack/make/ubuntu index ebc12f27ec..403a6c7652 100644 --- a/hack/make/ubuntu +++ b/hack/make/ubuntu @@ -38,6 +38,14 @@ bundle_ubuntu() { mkdir -p $DIR/lib/systemd/system cp contrib/init/systemd/docker.service $DIR/lib/systemd/system/ + # Include contributed completions + mkdir -p $DIR/etc/bash_completion.d + cp contrib/completion/bash/docker $DIR/etc/bash_completion.d/ + mkdir -p $DIR/usr/share/zsh/vendor-completions + cp contrib/completion/zsh/_docker $DIR/usr/share/zsh/vendor-completions/ + mkdir -p $DIR/etc/fish/completions + cp contrib/completion/fish/docker.fish $DIR/etc/fish/completions/ + # Copy the binary # This will fail if the binary bundle hasn't been built mkdir -p $DIR/usr/bin -- cgit v1.2.1 From e7f3234c1e4926c966f4c9e4cf08d9aae60d21bb Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Wed, 26 Mar 2014 09:05:21 -0700 Subject: Fix fish completion when having alias on awk or grep Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- contrib/completion/fish/docker.fish | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index 9c4339fe2b..ddec61cffa 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -26,20 +26,20 @@ end function __fish_print_docker_containers --description 'Print a list of docker containers' -a select switch $select case running - docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n' + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n' case stopped - docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n' + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n' case all - docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n' + docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n' end end function __fish_print_docker_images --description 'Print a list of docker images' - docker images | awk 'NR>1' | grep -v '' | awk '{print $1":"$2}' + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1":"$2}' end function __fish_print_docker_repositories --description 'Print a list of docker repositories' - docker images | awk 'NR>1' | grep -v '' | awk '{print $1}' | sort | uniq + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1}' | sort | uniq end # common options -- cgit v1.2.1 From 4c4356692580afb3971094e322aea64abe0e2500 Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Tue, 18 Mar 2014 16:49:16 -0400 Subject: This patch adds SELinux labeling support. docker will run the process(es) within the container with an SELinux label and will label all of the content within the container with mount label. Any temporary file systems created within the container need to be mounted with the same mount label. The user can override the process label by specifying -Z With a string of space separated options. -Z "user=unconfined_u role=unconfined_r type=unconfined_t level=s0" Would cause the process label to run with unconfined_u:unconfined_r:unconfined_t:s0" By default the processes will run execute within the container as svirt_lxc_net_t. All of the content in the container as svirt_sandbox_file_t. The process mcs level is based of the PID of the docker process that is creating the container. If you run the container in --priv mode, the labeling will be disabled. Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: rhatdan) --- Dockerfile | 2 +- graph/graph.go | 2 +- hack/PACKAGERS.md | 7 + pkg/label/label.go | 23 ++ pkg/label/label_selinux.go | 69 +++++ pkg/libcontainer/nsinit/execin.go | 11 +- pkg/libcontainer/nsinit/init.go | 8 +- pkg/libcontainer/nsinit/mount.go | 22 +- pkg/selinux/selinux.go | 387 ++++++++++++++++++++++++++ pkg/selinux/selinux_test.go | 64 +++++ runconfig/config.go | 11 + runconfig/parse.go | 20 ++ runtime/container.go | 1 + runtime/execdriver/driver.go | 5 + runtime/execdriver/lxc/lxc_template.go | 20 +- runtime/execdriver/native/default_template.go | 2 + runtime/graphdriver/aufs/aufs.go | 2 +- runtime/graphdriver/aufs/aufs_test.go | 52 ++-- runtime/graphdriver/aufs/migrate.go | 6 +- runtime/graphdriver/btrfs/btrfs.go | 6 +- runtime/graphdriver/devmapper/deviceset.go | 9 +- runtime/graphdriver/devmapper/driver.go | 15 +- runtime/graphdriver/devmapper/driver_test.go | 20 +- runtime/graphdriver/driver.go | 2 +- runtime/graphdriver/vfs/driver.go | 2 +- runtime/runtime.go | 4 +- 26 files changed, 700 insertions(+), 72 deletions(-) create mode 100644 pkg/label/label.go create mode 100644 pkg/label/label_selinux.go create mode 100644 pkg/selinux/selinux.go create mode 100644 pkg/selinux/selinux_test.go diff --git a/Dockerfile b/Dockerfile index 42438e3946..2de5b34171 100644 --- a/Dockerfile +++ b/Dockerfile @@ -87,7 +87,7 @@ RUN git config --global user.email 'docker-dummy@example.com' VOLUME /var/lib/docker WORKDIR /go/src/github.com/dotcloud/docker -ENV DOCKER_BUILDTAGS apparmor +ENV DOCKER_BUILDTAGS apparmor selinux # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] diff --git a/graph/graph.go b/graph/graph.go index 4349cac129..33aca486c6 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -189,7 +189,7 @@ func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, i } // Create root filesystem in the driver - if err := graph.driver.Create(img.ID, img.Parent); err != nil { + if err := graph.driver.Create(img.ID, img.Parent, ""); err != nil { return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err) } // Mount the root filesystem so we can apply the diff/layer diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md index 47e8413bf3..dc255c57ad 100644 --- a/hack/PACKAGERS.md +++ b/hack/PACKAGERS.md @@ -177,6 +177,13 @@ export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' NOTE: if you need to set more than one build tag, space separate them. +If you're building a binary that may need to be used on platforms that include +SELinux, you will need to set `DOCKER_BUILDTAGS` as follows: + +```bash +export DOCKER_BUILDTAGS='selinux' +``` + ### Static Daemon If it is feasible within the constraints of your distribution, you should diff --git a/pkg/label/label.go b/pkg/label/label.go new file mode 100644 index 0000000000..ba1e9f48ea --- /dev/null +++ b/pkg/label/label.go @@ -0,0 +1,23 @@ +// +build !selinux !linux + +package label + +func GenLabels(options string) (string, string, error) { + return "", "", nil +} + +func FormatMountLabel(src string, MountLabel string) string { + return src +} + +func SetProcessLabel(processLabel string) error { + return nil +} + +func SetFileLabel(path string, fileLabel string) error { + return nil +} + +func GetPidCon(pid int) (string, error) { + return "", nil +} diff --git a/pkg/label/label_selinux.go b/pkg/label/label_selinux.go new file mode 100644 index 0000000000..300a8b6d14 --- /dev/null +++ b/pkg/label/label_selinux.go @@ -0,0 +1,69 @@ +// +build selinux,linux + +package label + +import ( + "fmt" + "github.com/dotcloud/docker/pkg/selinux" + "strings" +) + +func GenLabels(options string) (string, string, error) { + processLabel, mountLabel := selinux.GetLxcContexts() + var err error + if processLabel == "" { // SELinux is disabled + return "", "", err + } + s := strings.Fields(options) + l := len(s) + if l > 0 { + pcon := selinux.NewContext(processLabel) + for i := 0; i < l; i++ { + o := strings.Split(s[i], "=") + pcon[o[0]] = o[1] + } + processLabel = pcon.Get() + mountLabel, err = selinux.CopyLevel(processLabel, mountLabel) + } + return processLabel, mountLabel, err +} + +func FormatMountLabel(src string, MountLabel string) string { + var mountLabel string + if src != "" { + mountLabel = src + if MountLabel != "" { + mountLabel = fmt.Sprintf("%s,context=\"%s\"", mountLabel, MountLabel) + } + } else { + if MountLabel != "" { + mountLabel = fmt.Sprintf("context=\"%s\"", MountLabel) + } + } + return mountLabel +} + +func SetProcessLabel(processLabel string) error { + if selinux.SelinuxEnabled() { + return selinux.Setexeccon(processLabel) + } + return nil +} + +func GetProcessLabel() (string, error) { + if selinux.SelinuxEnabled() { + return selinux.Getexeccon() + } + return "", nil +} + +func SetFileLabel(path string, fileLabel string) error { + if selinux.SelinuxEnabled() && fileLabel != "" { + return selinux.Setfilecon(path, fileLabel) + } + return nil +} + +func GetPidCon(pid int) (string, error) { + return selinux.Getpidcon(pid) +} diff --git a/pkg/libcontainer/nsinit/execin.go b/pkg/libcontainer/nsinit/execin.go index f8b8931390..9017af06e9 100644 --- a/pkg/libcontainer/nsinit/execin.go +++ b/pkg/libcontainer/nsinit/execin.go @@ -4,6 +4,7 @@ package nsinit import ( "fmt" + "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/system" "os" @@ -32,7 +33,11 @@ func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []s closeFds() return -1, err } - + processLabel, err := label.GetPidCon(nspid) + if err != nil { + closeFds() + return -1, err + } // foreach namespace fd, use setns to join an existing container's namespaces for _, fd := range fds { if fd > 0 { @@ -80,6 +85,10 @@ dropAndExec: if err := finalizeNamespace(container); err != nil { return -1, err } + err = label.SetProcessLabel(processLabel) + if err != nil { + return -1, err + } if err := system.Execv(args[0], args[0:], container.Env); err != nil { return -1, err } diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 117ae875ed..5aa5f9f5b5 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -4,6 +4,7 @@ package nsinit import ( "fmt" + "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/apparmor" "github.com/dotcloud/docker/pkg/libcontainer/capabilities" @@ -12,6 +13,7 @@ import ( "github.com/dotcloud/docker/pkg/system" "github.com/dotcloud/docker/pkg/user" "os" + "runtime" "syscall" ) @@ -57,7 +59,7 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol return fmt.Errorf("parent death signal %s", err) } ns.logger.Println("setup mount namespace") - if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot); err != nil { + if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot, container.Context["mount_label"]); err != nil { return fmt.Errorf("setup mount namespace %s", err) } if err := setupNetwork(container, context); err != nil { @@ -76,6 +78,10 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol return err } } + runtime.LockOSThread() + if err := label.SetProcessLabel(container.Context["process_label"]); err != nil { + return fmt.Errorf("SetProcessLabel label %s", err) + } ns.logger.Printf("execing %s\n", args[0]) return system.Execv(args[0], args[0:], container.Env) } diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/nsinit/mount.go index 61a90125e0..796143c68e 100644 --- a/pkg/libcontainer/nsinit/mount.go +++ b/pkg/libcontainer/nsinit/mount.go @@ -4,6 +4,7 @@ package nsinit import ( "fmt" + "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/system" "io/ioutil" @@ -20,7 +21,7 @@ const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NOD // // There is no need to unmount the new mounts because as soon as the mount namespace // is no longer in use, the mounts will be removed automatically -func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, console string, readonly, noPivotRoot bool) error { +func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, console string, readonly, noPivotRoot bool, mountLabel string) error { flag := syscall.MS_PRIVATE if noPivotRoot { flag = syscall.MS_SLAVE @@ -36,7 +37,7 @@ func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, cons return fmt.Errorf("mounting %s as readonly %s", rootfs, err) } } - if err := mountSystem(rootfs); err != nil { + if err := mountSystem(rootfs, mountLabel); err != nil { return fmt.Errorf("mount system %s", err) } @@ -64,7 +65,7 @@ func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, cons if err := setupDev(rootfs); err != nil { return err } - if err := setupPtmx(rootfs, console); err != nil { + if err := setupPtmx(rootfs, console, mountLabel); err != nil { return err } if err := system.Chdir(rootfs); err != nil { @@ -196,7 +197,7 @@ func setupDev(rootfs string) error { } // setupConsole ensures that the container has a proper /dev/console setup -func setupConsole(rootfs, console string) error { +func setupConsole(rootfs, console string, mountLabel string) error { oldMask := system.Umask(0000) defer system.Umask(oldMask) @@ -220,6 +221,9 @@ func setupConsole(rootfs, console string) error { if err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil { return fmt.Errorf("mknod %s %s", dest, err) } + if err := label.SetFileLabel(console, mountLabel); err != nil { + return fmt.Errorf("SetFileLabel Failed %s %s", dest, err) + } if err := system.Mount(console, dest, "bind", syscall.MS_BIND, ""); err != nil { return fmt.Errorf("bind %s to %s %s", console, dest, err) } @@ -228,7 +232,7 @@ func setupConsole(rootfs, console string) error { // mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts // inside the mount namespace -func mountSystem(rootfs string) error { +func mountSystem(rootfs string, mountLabel string) error { for _, m := range []struct { source string path string @@ -238,8 +242,8 @@ func mountSystem(rootfs string) error { }{ {source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags}, {source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags}, - {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: "mode=1777,size=65536k"}, - {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: "newinstance,ptmxmode=0666,mode=620,gid=5"}, + {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1755,size=65536k", mountLabel)}, + {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)}, } { if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) { return fmt.Errorf("mkdirall %s %s", m.path, err) @@ -253,7 +257,7 @@ func mountSystem(rootfs string) error { // setupPtmx adds a symlink to pts/ptmx for /dev/ptmx and // finishes setting up /dev/console -func setupPtmx(rootfs, console string) error { +func setupPtmx(rootfs, console string, mountLabel string) error { ptmx := filepath.Join(rootfs, "dev/ptmx") if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { return err @@ -262,7 +266,7 @@ func setupPtmx(rootfs, console string) error { return fmt.Errorf("symlink dev ptmx %s", err) } if console != "" { - if err := setupConsole(rootfs, console); err != nil { + if err := setupConsole(rootfs, console, mountLabel); err != nil { return err } } diff --git a/pkg/selinux/selinux.go b/pkg/selinux/selinux.go new file mode 100644 index 0000000000..5236d3fb87 --- /dev/null +++ b/pkg/selinux/selinux.go @@ -0,0 +1,387 @@ +package selinux + +import ( + "bufio" + "crypto/rand" + "encoding/binary" + "fmt" + "github.com/dotcloud/docker/pkg/mount" + "github.com/dotcloud/docker/pkg/system" + "io" + "os" + "regexp" + "strconv" + "strings" + "syscall" +) + +const ( + Enforcing = 1 + Permissive = 0 + Disabled = -1 + selinuxDir = "/etc/selinux/" + selinuxConfig = selinuxDir + "config" + selinuxTypeTag = "SELINUXTYPE" + selinuxTag = "SELINUX" + selinuxPath = "/sys/fs/selinux" + xattrNameSelinux = "security.selinux" + stRdOnly = 0x01 +) + +var ( + assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) + spaceRegex = regexp.MustCompile(`^([^=]+) (.*)$`) + mcsList = make(map[string]bool) + selinuxfs = "unknown" + selinuxEnabled = false + selinuxEnabledChecked = false +) + +type SELinuxContext map[string]string + +func GetSelinuxMountPoint() string { + if selinuxfs != "unknown" { + return selinuxfs + } + selinuxfs = "" + + mounts, err := mount.GetMounts() + if err != nil { + return selinuxfs + } + for _, mount := range mounts { + if mount.Fstype == "selinuxfs" { + selinuxfs = mount.Mountpoint + break + } + } + if selinuxfs != "" { + var buf syscall.Statfs_t + syscall.Statfs(selinuxfs, &buf) + if (buf.Flags & stRdOnly) == 1 { + selinuxfs = "" + } + } + return selinuxfs +} + +func SelinuxEnabled() bool { + if selinuxEnabledChecked { + return selinuxEnabled + } + selinuxEnabledChecked = true + if fs := GetSelinuxMountPoint(); fs != "" { + if con, _ := Getcon(); con != "kernel" { + selinuxEnabled = true + } + } + return selinuxEnabled +} + +func ReadConfig(target string) (value string) { + var ( + val, key string + bufin *bufio.Reader + ) + + in, err := os.Open(selinuxConfig) + if err != nil { + return "" + } + defer in.Close() + + bufin = bufio.NewReader(in) + + for done := false; !done; { + var line string + if line, err = bufin.ReadString('\n'); err != nil { + if err != io.EOF { + return "" + } + done = true + } + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) + if key == target { + return strings.Trim(val, "\"") + } + } + } + return "" +} + +func GetSELinuxPolicyRoot() string { + return selinuxDir + ReadConfig(selinuxTypeTag) +} + +func readCon(name string) (string, error) { + var val string + + in, err := os.Open(name) + if err != nil { + return "", err + } + defer in.Close() + + _, err = fmt.Fscanf(in, "%s", &val) + return val, err +} + +func Setfilecon(path string, scon string) error { + return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0) +} + +func Getfilecon(path string) (string, error) { + var scon []byte + + cnt, err := syscall.Getxattr(path, xattrNameSelinux, scon) + scon = make([]byte, cnt) + cnt, err = syscall.Getxattr(path, xattrNameSelinux, scon) + return string(scon), err +} + +func Setfscreatecon(scon string) error { + return writeCon("/proc/self/attr/fscreate", scon) +} + +func Getfscreatecon() (string, error) { + return readCon("/proc/self/attr/fscreate") +} + +func Getcon() (string, error) { + return readCon("/proc/self/attr/current") +} + +func Getpidcon(pid int) (string, error) { + return readCon(fmt.Sprintf("/proc/%d/attr/current", pid)) +} + +func Getexeccon() (string, error) { + return readCon("/proc/self/attr/exec") +} + +func writeCon(name string, val string) error { + if !SelinuxEnabled() { + return nil + } + out, err := os.OpenFile(name, os.O_WRONLY, 0) + if err != nil { + return err + } + defer out.Close() + + if val != "" { + _, err = out.Write([]byte(val)) + } else { + _, err = out.Write(nil) + } + return err +} + +func Setexeccon(scon string) error { + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), scon) +} + +func (c SELinuxContext) Get() string { + return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"]) +} + +func NewContext(scon string) SELinuxContext { + c := make(SELinuxContext) + + if len(scon) != 0 { + con := strings.SplitN(scon, ":", 4) + c["user"] = con[0] + c["role"] = con[1] + c["type"] = con[2] + c["level"] = con[3] + } + return c +} + +func SelinuxGetEnforce() int { + var enforce int + + enforceS, err := readCon(fmt.Sprintf("%s/enforce", selinuxPath)) + if err != nil { + return -1 + } + + enforce, err = strconv.Atoi(string(enforceS)) + if err != nil { + return -1 + } + return enforce +} + +func SelinuxGetEnforceMode() int { + switch ReadConfig(selinuxTag) { + case "enforcing": + return Enforcing + case "permissive": + return Permissive + } + return Disabled +} + +func mcsAdd(mcs string) { + mcsList[mcs] = true +} + +func mcsDelete(mcs string) { + mcsList[mcs] = false +} + +func mcsExists(mcs string) bool { + return mcsList[mcs] +} + +func IntToMcs(id int, catRange uint32) string { + var ( + SETSIZE = int(catRange) + TIER = SETSIZE + ORD = id + ) + + if id < 1 || id > 523776 { + return "" + } + + for ORD > TIER { + ORD = ORD - TIER + TIER -= 1 + } + TIER = SETSIZE - TIER + ORD = ORD + TIER + return fmt.Sprintf("s0:c%d,c%d", TIER, ORD) +} + +func uniqMcs(catRange uint32) string { + var ( + n uint32 + c1, c2 uint32 + mcs string + ) + + for { + binary.Read(rand.Reader, binary.LittleEndian, &n) + c1 = n % catRange + binary.Read(rand.Reader, binary.LittleEndian, &n) + c2 = n % catRange + if c1 == c2 { + continue + } else { + if c1 > c2 { + t := c1 + c1 = c2 + c2 = t + } + } + mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2) + if mcsExists(mcs) { + continue + } + mcsAdd(mcs) + break + } + return mcs +} + +func FreeContext(con string) { + if con != "" { + scon := NewContext(con) + mcsDelete(scon["level"]) + } +} + +func GetLxcContexts() (processLabel string, fileLabel string) { + var ( + val, key string + bufin *bufio.Reader + ) + + if !SelinuxEnabled() { + return "", "" + } + lxcPath := fmt.Sprintf("%s/content/lxc_contexts", GetSELinuxPolicyRoot()) + fileLabel = "system_u:object_r:svirt_sandbox_file_t:s0" + processLabel = "system_u:system_r:svirt_lxc_net_t:s0" + + in, err := os.Open(lxcPath) + if err != nil { + goto exit + } + defer in.Close() + + bufin = bufio.NewReader(in) + + for done := false; !done; { + var line string + if line, err = bufin.ReadString('\n'); err != nil { + if err == io.EOF { + done = true + } else { + goto exit + } + } + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2]) + if key == "process" { + processLabel = strings.Trim(val, "\"") + } + if key == "file" { + fileLabel = strings.Trim(val, "\"") + } + } + } +exit: + mcs := IntToMcs(os.Getpid(), 1024) + scon := NewContext(processLabel) + scon["level"] = mcs + processLabel = scon.Get() + scon = NewContext(fileLabel) + scon["level"] = mcs + fileLabel = scon.Get() + return processLabel, fileLabel +} + +func SecurityCheckContext(val string) error { + return writeCon(fmt.Sprintf("%s.context", selinuxPath), val) +} + +func CopyLevel(src, dest string) (string, error) { + if !SelinuxEnabled() { + return "", nil + } + if src == "" { + return "", nil + } + if err := SecurityCheckContext(src); err != nil { + return "", err + } + if err := SecurityCheckContext(dest); err != nil { + return "", err + } + scon := NewContext(src) + tcon := NewContext(dest) + tcon["level"] = scon["level"] + return tcon.Get(), nil +} diff --git a/pkg/selinux/selinux_test.go b/pkg/selinux/selinux_test.go new file mode 100644 index 0000000000..6b59c1db11 --- /dev/null +++ b/pkg/selinux/selinux_test.go @@ -0,0 +1,64 @@ +package selinux_test + +import ( + "github.com/dotcloud/docker/pkg/selinux" + "os" + "testing" +) + +func testSetfilecon(t *testing.T) { + if selinux.SelinuxEnabled() { + tmp := "selinux_test" + out, _ := os.OpenFile(tmp, os.O_WRONLY, 0) + out.Close() + err := selinux.Setfilecon(tmp, "system_u:object_r:bin_t:s0") + if err == nil { + t.Log(selinux.Getfilecon(tmp)) + } else { + t.Log("Setfilecon failed") + t.Fatal(err) + } + os.Remove(tmp) + } +} + +func TestSELinux(t *testing.T) { + var ( + err error + plabel, flabel string + ) + + if selinux.SelinuxEnabled() { + t.Log("Enabled") + plabel, flabel = selinux.GetLxcContexts() + t.Log(plabel) + t.Log(flabel) + plabel, flabel = selinux.GetLxcContexts() + t.Log(plabel) + t.Log(flabel) + t.Log("getenforce ", selinux.SelinuxGetEnforce()) + t.Log("getenforcemode ", selinux.SelinuxGetEnforceMode()) + pid := os.Getpid() + t.Log("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023)) + t.Log(selinux.Getcon()) + t.Log(selinux.Getfilecon("/etc/passwd")) + err = selinux.Setfscreatecon("unconfined_u:unconfined_r:unconfined_t:s0") + if err == nil { + t.Log(selinux.Getfscreatecon()) + } else { + t.Log("setfscreatecon failed", err) + t.Fatal(err) + } + err = selinux.Setfscreatecon("") + if err == nil { + t.Log(selinux.Getfscreatecon()) + } else { + t.Log("setfscreatecon failed", err) + t.Fatal(err) + } + t.Log(selinux.Getpidcon(1)) + t.Log(selinux.GetSelinuxMountPoint()) + } else { + t.Log("Disabled") + } +} diff --git a/runconfig/config.go b/runconfig/config.go index e961d659d7..c3ade575c5 100644 --- a/runconfig/config.go +++ b/runconfig/config.go @@ -1,8 +1,10 @@ package runconfig import ( + "encoding/json" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/runtime/execdriver" ) // Note: the Config structure should hold only portable information about the container. @@ -34,9 +36,17 @@ type Config struct { Entrypoint []string NetworkDisabled bool OnBuild []string + Context execdriver.Context } func ContainerConfigFromJob(job *engine.Job) *Config { + var context execdriver.Context + val := job.Getenv("Context") + if val != "" { + if err := json.Unmarshal([]byte(val), &context); err != nil { + panic(err) + } + } config := &Config{ Hostname: job.Getenv("Hostname"), Domainname: job.Getenv("Domainname"), @@ -54,6 +64,7 @@ func ContainerConfigFromJob(job *engine.Job) *Config { VolumesFrom: job.Getenv("VolumesFrom"), WorkingDir: job.Getenv("WorkingDir"), NetworkDisabled: job.GetenvBool("NetworkDisabled"), + Context: context, } job.GetenvJson("ExposedPorts", &config.ExposedPorts) job.GetenvJson("Volumes", &config.Volumes) diff --git a/runconfig/parse.go b/runconfig/parse.go index c2591722d5..23c66cd611 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -4,8 +4,10 @@ import ( "fmt" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/opts" + "github.com/dotcloud/docker/pkg/label" flag "github.com/dotcloud/docker/pkg/mflag" "github.com/dotcloud/docker/pkg/sysinfo" + "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/utils" "io/ioutil" "path" @@ -32,6 +34,10 @@ func ParseSubcommand(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) } func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { + var ( + processLabel string + mountLabel string + ) var ( // FIXME: use utils.ListOpts for attach and volumes? flAttach = opts.NewListOpts(opts.ValidateAttach) @@ -60,6 +66,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flLabelOptions = cmd.String([]string{"Z", "-label"}, "", "Options to pass to underlying labeling system") // For documentation purpose _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") @@ -150,6 +157,15 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf entrypoint = []string{*flEntrypoint} } + if !*flPrivileged { + pLabel, mLabel, e := label.GenLabels(*flLabelOptions) + if e != nil { + return nil, nil, cmd, fmt.Errorf("Invalid security labels : %s", e) + } + processLabel = pLabel + mountLabel = mLabel + } + lxcConf, err := parseLxcConfOpts(flLxcOpts) if err != nil { return nil, nil, cmd, err @@ -204,6 +220,10 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","), Entrypoint: entrypoint, WorkingDir: *flWorkingDir, + Context: execdriver.Context{ + "mount_label": mountLabel, + "process_label": processLabel, + }, } hostConfig := &HostConfig{ diff --git a/runtime/container.go b/runtime/container.go index bff9aea968..53d0aa666e 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -402,6 +402,7 @@ func populateCommand(c *Container) { User: c.Config.User, Config: driverConfig, Resources: resources, + Context: c.Config.Context, } c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true} } diff --git a/runtime/execdriver/driver.go b/runtime/execdriver/driver.go index 23e31ee8d9..dca889a82d 100644 --- a/runtime/execdriver/driver.go +++ b/runtime/execdriver/driver.go @@ -7,6 +7,10 @@ import ( "os/exec" ) +// Context is a generic key value pair that allows +// arbatrary data to be sent +type Context map[string]string + var ( ErrNotRunning = errors.New("Process could not be started") ErrWaitTimeoutReached = errors.New("Wait timeout reached") @@ -121,6 +125,7 @@ type Command struct { Arguments []string `json:"arguments"` WorkingDir string `json:"working_dir"` ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver + Context Context `json:"context"` // generic context for specific options (apparmor, selinux) Tty bool `json:"tty"` Network *Network `json:"network"` Config []string `json:"config"` // generic values that specific drivers can consume diff --git a/runtime/execdriver/lxc/lxc_template.go b/runtime/execdriver/lxc/lxc_template.go index ce9d90469f..608fb22436 100644 --- a/runtime/execdriver/lxc/lxc_template.go +++ b/runtime/execdriver/lxc/lxc_template.go @@ -1,6 +1,7 @@ package lxc import ( + "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/runtime/execdriver" "strings" "text/template" @@ -29,6 +30,10 @@ lxc.pts = 1024 # disable the main console lxc.console = none +{{if getProcessLabel .Context}} +lxc.se_context = {{ getProcessLabel .Context}} +{{$MOUNTLABEL := getMountLabel .Context}} +{{end}} # no controlling tty at all lxc.tty = 1 @@ -85,8 +90,8 @@ lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noe lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0 {{end}} -lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0 -lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0 +lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMountLabel "newinstance,ptmxmode=0666,nosuid,noexec" "$MOUNTLABEL"}} 0 0 +lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec" "$MOUNTLABEL"}} 0 0 {{range $value := .Mounts}} {{if $value.Writable}} @@ -142,11 +147,22 @@ func getMemorySwap(v *execdriver.Resources) int64 { return v.Memory * 2 } +func getProcessLabel(c execdriver.Context) string { + return c["process_label"] +} + +func getMountLabel(c execdriver.Context) string { + return c["mount_label"] +} + func init() { var err error funcMap := template.FuncMap{ "getMemorySwap": getMemorySwap, + "getProcessLabel": getProcessLabel, + "getMountLabel": getMountLabel, "escapeFstabSpaces": escapeFstabSpaces, + "formatMountLabel": label.FormatMountLabel, } LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate) if err != nil { diff --git a/runtime/execdriver/native/default_template.go b/runtime/execdriver/native/default_template.go index d744ab382f..7e1e9ed86e 100644 --- a/runtime/execdriver/native/default_template.go +++ b/runtime/execdriver/native/default_template.go @@ -18,6 +18,8 @@ func createContainer(c *execdriver.Command) *libcontainer.Container { container.User = c.User container.WorkingDir = c.WorkingDir container.Env = c.Env + container.Context["mount_label"] = c.Context["mount_label"] + container.Context["process_label"] = c.Context["process_label"] loopbackNetwork := libcontainer.Network{ Mtu: c.Network.Mtu, diff --git a/runtime/graphdriver/aufs/aufs.go b/runtime/graphdriver/aufs/aufs.go index 6f05ddd025..401bbd8c86 100644 --- a/runtime/graphdriver/aufs/aufs.go +++ b/runtime/graphdriver/aufs/aufs.go @@ -134,7 +134,7 @@ func (a Driver) Exists(id string) bool { // Three folders are created for each id // mnt, layers, and diff -func (a *Driver) Create(id, parent string) error { +func (a *Driver) Create(id, parent string, mountLabel string) error { if err := a.createDirsFor(id); err != nil { return err } diff --git a/runtime/graphdriver/aufs/aufs_test.go b/runtime/graphdriver/aufs/aufs_test.go index cb417c3b26..9cfdebd160 100644 --- a/runtime/graphdriver/aufs/aufs_test.go +++ b/runtime/graphdriver/aufs/aufs_test.go @@ -90,7 +90,7 @@ func TestCreateNewDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } } @@ -99,7 +99,7 @@ func TestCreateNewDirStructure(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -120,7 +120,7 @@ func TestRemoveImage(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -145,7 +145,7 @@ func TestGetWithoutParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -172,7 +172,7 @@ func TestCleanupWithDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -185,7 +185,7 @@ func TestMountedFalseResponse(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -204,10 +204,10 @@ func TestMountedTrueReponse(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } - if err := d.Create("2", "1"); err != nil { + if err := d.Create("2", "1", ""); err != nil { t.Fatal(err) } @@ -230,10 +230,10 @@ func TestMountWithParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } - if err := d.Create("2", "1"); err != nil { + if err := d.Create("2", "1", ""); err != nil { t.Fatal(err) } @@ -261,10 +261,10 @@ func TestRemoveMountedDir(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } - if err := d.Create("2", "1"); err != nil { + if err := d.Create("2", "1", ""); err != nil { t.Fatal(err) } @@ -300,7 +300,7 @@ func TestCreateWithInvalidParent(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "docker"); err == nil { + if err := d.Create("1", "docker", ""); err == nil { t.Fatalf("Error should not be nil with parent does not exist") } } @@ -309,7 +309,7 @@ func TestGetDiff(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -343,10 +343,10 @@ func TestChanges(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } - if err := d.Create("2", "1"); err != nil { + if err := d.Create("2", "1", ""); err != nil { t.Fatal(err) } @@ -392,7 +392,7 @@ func TestChanges(t *testing.T) { t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) } - if err := d.Create("3", "2"); err != nil { + if err := d.Create("3", "2", ""); err != nil { t.Fatal(err) } mntPoint, err = d.Get("3") @@ -437,7 +437,7 @@ func TestDiffSize(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -479,7 +479,7 @@ func TestChildDiffSize(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -515,7 +515,7 @@ func TestChildDiffSize(t *testing.T) { t.Fatalf("Expected size to be %d got %d", size, diffSize) } - if err := d.Create("2", "1"); err != nil { + if err := d.Create("2", "1", ""); err != nil { t.Fatal(err) } @@ -534,7 +534,7 @@ func TestExists(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -552,7 +552,7 @@ func TestStatus(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -581,7 +581,7 @@ func TestApplyDiff(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -607,10 +607,10 @@ func TestApplyDiff(t *testing.T) { t.Fatal(err) } - if err := d.Create("2", ""); err != nil { + if err := d.Create("2", "", ""); err != nil { t.Fatal(err) } - if err := d.Create("3", "2"); err != nil { + if err := d.Create("3", "2", ""); err != nil { t.Fatal(err) } @@ -656,7 +656,7 @@ func TestMountMoreThan42Layers(t *testing.T) { } current = hash(current) - if err := d.Create(current, parent); err != nil { + if err := d.Create(current, parent, ""); err != nil { t.Logf("Current layer %d", i) t.Fatal(err) } diff --git a/runtime/graphdriver/aufs/migrate.go b/runtime/graphdriver/aufs/migrate.go index 6018342d6c..400e260797 100644 --- a/runtime/graphdriver/aufs/migrate.go +++ b/runtime/graphdriver/aufs/migrate.go @@ -77,7 +77,7 @@ func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) e } initID := fmt.Sprintf("%s-init", id) - if err := a.Create(initID, metadata.Image); err != nil { + if err := a.Create(initID, metadata.Image, ""); err != nil { return err } @@ -90,7 +90,7 @@ func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) e return err } - if err := a.Create(id, initID); err != nil { + if err := a.Create(id, initID, ""); err != nil { return err } } @@ -144,7 +144,7 @@ func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool) return err } if !a.Exists(m.ID) { - if err := a.Create(m.ID, m.ParentID); err != nil { + if err := a.Create(m.ID, m.ParentID, ""); err != nil { return err } } diff --git a/runtime/graphdriver/btrfs/btrfs.go b/runtime/graphdriver/btrfs/btrfs.go index b0530be92b..2a94a4089f 100644 --- a/runtime/graphdriver/btrfs/btrfs.go +++ b/runtime/graphdriver/btrfs/btrfs.go @@ -80,7 +80,7 @@ func getDirFd(dir *C.DIR) uintptr { return uintptr(C.dirfd(dir)) } -func subvolCreate(path, name string) error { +func subvolCreate(path, name string, mountLabel string) error { dir, err := openDir(path) if err != nil { return err @@ -155,13 +155,13 @@ func (d *Driver) subvolumesDirId(id string) string { return path.Join(d.subvolumesDir(), id) } -func (d *Driver) Create(id string, parent string) error { +func (d *Driver) Create(id string, parent string, mountLabel string) error { subvolumes := path.Join(d.home, "subvolumes") if err := os.MkdirAll(subvolumes, 0700); err != nil { return err } if parent == "" { - if err := subvolCreate(subvolumes, id); err != nil { + if err := subvolCreate(subvolumes, id, mountLabel); err != nil { return err } } else { diff --git a/runtime/graphdriver/devmapper/deviceset.go b/runtime/graphdriver/devmapper/deviceset.go index dfdb180bb2..762e982208 100644 --- a/runtime/graphdriver/devmapper/deviceset.go +++ b/runtime/graphdriver/devmapper/deviceset.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/utils" "io" "io/ioutil" @@ -827,7 +828,7 @@ func (devices *DeviceSet) Shutdown() error { return nil } -func (devices *DeviceSet) MountDevice(hash, path string) error { +func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) error { devices.Lock() defer devices.Unlock() @@ -859,9 +860,11 @@ func (devices *DeviceSet) MountDevice(hash, path string) error { var flags uintptr = sysMsMgcVal - err := sysMount(info.DevName(), path, "ext4", flags, "discard") + mountOptions := label.FormatMountLabel("discard", mountLabel) + err := sysMount(info.DevName(), path, "ext4", flags, mountOptions) if err != nil && err == sysEInval { - err = sysMount(info.DevName(), path, "ext4", flags, "") + mountOptions = label.FormatMountLabel(mountLabel, "") + err = sysMount(info.DevName(), path, "ext4", flags, mountOptions) } if err != nil { return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err) diff --git a/runtime/graphdriver/devmapper/driver.go b/runtime/graphdriver/devmapper/driver.go index 33c7a0f483..1324ddab81 100644 --- a/runtime/graphdriver/devmapper/driver.go +++ b/runtime/graphdriver/devmapper/driver.go @@ -22,7 +22,8 @@ func init() { type Driver struct { *DeviceSet - home string + home string + MountLabel string } var Init = func(home string) (graphdriver.Driver, error) { @@ -60,13 +61,13 @@ func (d *Driver) Cleanup() error { return d.DeviceSet.Shutdown() } -func (d *Driver) Create(id, parent string) error { +func (d *Driver) Create(id, parent string, mountLabel string) error { + d.MountLabel = mountLabel if err := d.DeviceSet.AddDevice(id, parent); err != nil { return err } - mp := path.Join(d.home, "mnt", id) - if err := d.mount(id, mp); err != nil { + if err := d.mount(id, mp, d.MountLabel); err != nil { return err } @@ -116,7 +117,7 @@ func (d *Driver) Remove(id string) error { func (d *Driver) Get(id string) (string, error) { mp := path.Join(d.home, "mnt", id) - if err := d.mount(id, mp); err != nil { + if err := d.mount(id, mp, d.MountLabel); err != nil { return "", err } @@ -129,13 +130,13 @@ func (d *Driver) Put(id string) { } } -func (d *Driver) mount(id, mountPoint string) error { +func (d *Driver) mount(id, mountPoint string, mountLabel string) error { // Create the target directories if they don't exist if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) { return err } // Mount the device - return d.DeviceSet.MountDevice(id, mountPoint) + return d.DeviceSet.MountDevice(id, mountPoint, mountLabel) } func (d *Driver) Exists(id string) bool { diff --git a/runtime/graphdriver/devmapper/driver_test.go b/runtime/graphdriver/devmapper/driver_test.go index 9af71a00b3..4ca72db0ca 100644 --- a/runtime/graphdriver/devmapper/driver_test.go +++ b/runtime/graphdriver/devmapper/driver_test.go @@ -494,7 +494,7 @@ func TestDriverCreate(t *testing.T) { "?ioctl.loopctlgetfree", ) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } calls.Assert(t, @@ -612,7 +612,7 @@ func TestDriverRemove(t *testing.T) { "?ioctl.loopctlgetfree", ) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -668,7 +668,7 @@ func TestCleanup(t *testing.T) { mountPoints := make([]string, 2) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } // Mount the id @@ -678,7 +678,7 @@ func TestCleanup(t *testing.T) { } mountPoints[0] = p - if err := d.Create("2", "1"); err != nil { + if err := d.Create("2", "1", ""); err != nil { t.Fatal(err) } @@ -731,7 +731,7 @@ func TestNotMounted(t *testing.T) { d := newDriver(t) defer cleanup(d) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -749,7 +749,7 @@ func TestMounted(t *testing.T) { d := newDriver(t) defer cleanup(d) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } if _, err := d.Get("1"); err != nil { @@ -769,7 +769,7 @@ func TestInitCleanedDriver(t *testing.T) { t.Skip("FIXME: not a unit test") d := newDriver(t) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } if _, err := d.Get("1"); err != nil { @@ -797,7 +797,7 @@ func TestMountMountedDriver(t *testing.T) { d := newDriver(t) defer cleanup(d) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -816,7 +816,7 @@ func TestGetReturnsValidDevice(t *testing.T) { d := newDriver(t) defer cleanup(d) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } @@ -844,7 +844,7 @@ func TestDriverGetSize(t *testing.T) { d := newDriver(t) defer cleanup(d) - if err := d.Create("1", ""); err != nil { + if err := d.Create("1", "", ""); err != nil { t.Fatal(err) } diff --git a/runtime/graphdriver/driver.go b/runtime/graphdriver/driver.go index 89fd03a624..7bea704682 100644 --- a/runtime/graphdriver/driver.go +++ b/runtime/graphdriver/driver.go @@ -13,7 +13,7 @@ type InitFunc func(root string) (Driver, error) type Driver interface { String() string - Create(id, parent string) error + Create(id, parent string, mountLabel string) error Remove(id string) error Get(id string) (dir string, err error) diff --git a/runtime/graphdriver/vfs/driver.go b/runtime/graphdriver/vfs/driver.go index 10a7b223a4..fe09560f24 100644 --- a/runtime/graphdriver/vfs/driver.go +++ b/runtime/graphdriver/vfs/driver.go @@ -42,7 +42,7 @@ func copyDir(src, dst string) error { return nil } -func (d *Driver) Create(id string, parent string) error { +func (d *Driver) Create(id string, parent string, mountLabel string) error { dir := d.dir(id) if err := os.MkdirAll(path.Dir(dir), 0700); err != nil { return err diff --git a/runtime/runtime.go b/runtime/runtime.go index 0d3468e350..35bcad9781 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -467,7 +467,7 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe } initID := fmt.Sprintf("%s-init", container.ID) - if err := runtime.driver.Create(initID, img.ID); err != nil { + if err := runtime.driver.Create(initID, img.ID, config.Context["mount_label"]); err != nil { return nil, nil, err } initPath, err := runtime.driver.Get(initID) @@ -480,7 +480,7 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe return nil, nil, err } - if err := runtime.driver.Create(container.ID, initID); err != nil { + if err := runtime.driver.Create(container.ID, initID, config.Context["mount_label"]); err != nil { return nil, nil, err } resolvConf, err := utils.GetResolvConf() -- cgit v1.2.1 From 0fb01fd8fe376a3518b1050ab62f2b3370d62535 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 26 Mar 2014 13:55:45 +0000 Subject: Follow symlinks inside container root for build's ADD Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- integration/buildfile_test.go | 18 ++++++++++++++++++ server/buildfile.go | 9 +++++++++ 2 files changed, 27 insertions(+) diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go index 95d5abb8a7..ae2282f53f 100644 --- a/integration/buildfile_test.go +++ b/integration/buildfile_test.go @@ -998,3 +998,21 @@ func TestBuildOnBuildForbiddenMaintainerTrigger(t *testing.T) { t.Fatal("Error should not be nil") } } + +// gh #2446 +func TestBuildAddToSymlinkDest(t *testing.T) { + eng := NewTestEngine(t) + defer nuke(mkRuntimeFromEngine(eng, t)) + + _, err := buildImage(testContextTemplate{` + from {IMAGE} + run mkdir /foo + run ln -s /foo /bar + add foo /bar/ + run stat /bar/foo + `, + [][2]string{{"foo", "HEYO"}}, nil}, t, eng, true) + if err != nil { + t.Fatal(err) + } +} diff --git a/server/buildfile.go b/server/buildfile.go index 5d5fda4d8e..6f95c2e593 100644 --- a/server/buildfile.go +++ b/server/buildfile.go @@ -395,9 +395,18 @@ func (b *buildFile) checkPathForAddition(orig string) error { func (b *buildFile) addContext(container *runtime.Container, orig, dest string, remote bool) error { var ( + err error origPath = path.Join(b.contextPath, orig) destPath = path.Join(container.RootfsPath(), dest) ) + + if destPath != container.RootfsPath() { + destPath, err = utils.FollowSymlinkInScope(destPath, container.RootfsPath()) + if err != nil { + return err + } + } + // Preserve the trailing '/' if strings.HasSuffix(dest, "/") { destPath = destPath + "/" -- cgit v1.2.1 From 67af7b3fb0b5e40a435b434c57291cb2989275ce Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 18 Mar 2014 17:50:40 -0700 Subject: Strip comments before parsing line continuations Fixes #3898 Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- integration/buildfile_test.go | 10 ++++++++++ server/buildfile.go | 25 +++++++++++++++++++------ 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go index 7f6e69ece3..23a1ff3d8e 100644 --- a/integration/buildfile_test.go +++ b/integration/buildfile_test.go @@ -311,6 +311,16 @@ RUN [ "$(cat /testfile)" = 'test!' ] }, nil, }, + { + ` +FROM {IMAGE} +# what \ +RUN mkdir /testing +RUN touch /testing/other +`, + nil, + nil, + }, } // FIXME: test building with 2 successive overlapping ADD commands diff --git a/server/buildfile.go b/server/buildfile.go index af6702cc1d..309b854208 100644 --- a/server/buildfile.go +++ b/server/buildfile.go @@ -729,20 +729,19 @@ func (b *buildFile) Build(context io.Reader) (string, error) { if len(fileBytes) == 0 { return "", ErrDockerfileEmpty } - dockerfile := string(fileBytes) - dockerfile = lineContinuation.ReplaceAllString(dockerfile, "") - stepN := 0 + var ( + dockerfile = lineContinuation.ReplaceAllString(stripComments(fileBytes), "") + stepN = 0 + ) for _, line := range strings.Split(dockerfile, "\n") { line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n") - // Skip comments and empty line - if len(line) == 0 || line[0] == '#' { + if len(line) == 0 { continue } if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil { return "", err } stepN += 1 - } if b.image != "" { fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image)) @@ -779,6 +778,20 @@ func (b *buildFile) BuildStep(name, expression string) error { return nil } +func stripComments(raw []byte) string { + var ( + out []string + lines = strings.Split(string(raw), "\n") + ) + for _, l := range lines { + if len(l) == 0 || l[0] == '#' { + continue + } + out = append(out, l) + } + return strings.Join(out, "\n") +} + func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile { return &buildFile{ runtime: srv.runtime, -- cgit v1.2.1 From 097aef2ca938012a5b42e0032b30267e27a92265 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 27 Mar 2014 04:24:31 +0000 Subject: Fix commit and import when no repository is specified Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- api/client.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/api/client.go b/api/client.go index 1e6bf9d549..df3265a15a 100644 --- a/api/client.go +++ b/api/client.go @@ -1013,9 +1013,11 @@ func (cli *DockerCli) CmdImport(args ...string) error { } v := url.Values{} - //Check if the given image name can be resolved - if _, _, err := registry.ResolveRepositoryName(repository); err != nil { - return err + if repository != "" { + //Check if the given image name can be resolved + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } } v.Set("repo", repository) @@ -1469,8 +1471,10 @@ func (cli *DockerCli) CmdCommit(args ...string) error { } //Check if the given image name can be resolved - if _, _, err := registry.ResolveRepositoryName(repository); err != nil { - return err + if repository != "" { + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } } v := url.Values{} -- cgit v1.2.1 From 2d270c4f06dbc2ee1293e3f81f6922df248ef8eb Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 27 Mar 2014 08:25:01 +0000 Subject: Fix compile and unit test errors after merge Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/cgroups/apply_raw.go | 23 +++++++++++++++++++ pkg/cgroups/cgroups.go | 19 --------------- runconfig/hostconfig.go | 5 ---- runconfig/parse.go | 42 ---------------------------------- runtime/container.go | 1 - runtime/execdriver/lxc/lxc_template.go | 25 ++++++++++++++------ runtime/utils_test.go | 3 ++- 7 files changed, 43 insertions(+), 75 deletions(-) diff --git a/pkg/cgroups/apply_raw.go b/pkg/cgroups/apply_raw.go index 47a2a002b8..5fe317937a 100644 --- a/pkg/cgroups/apply_raw.go +++ b/pkg/cgroups/apply_raw.go @@ -49,6 +49,9 @@ func rawApply(c *Cgroup, pid int) (ActiveCgroup, error) { if err := raw.setupCpu(c, pid); err != nil { return nil, err } + if err := raw.setupCpuset(c, pid); err != nil { + return nil, err + } return raw, nil } @@ -170,6 +173,25 @@ func (raw *rawCgroup) setupCpu(c *Cgroup, pid int) (err error) { return nil } +func (raw *rawCgroup) setupCpuset(c *Cgroup, pid int) (err error) { + if c.CpusetCpus != "" { + dir, err := raw.join("cpuset", pid) + if err != nil { + return err + } + defer func() { + if err != nil { + os.RemoveAll(dir) + } + }() + + if err := writeFile(dir, "cpuset.cpus", c.CpusetCpus); err != nil { + return err + } + } + return nil +} + func (raw *rawCgroup) Cleanup() error { get := func(subsystem string) string { path, _ := raw.path(subsystem) @@ -180,6 +202,7 @@ func (raw *rawCgroup) Cleanup() error { get("memory"), get("devices"), get("cpu"), + get("cpuset"), } { if path != "" { os.RemoveAll(path) diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go index cdf268711a..5fe10346df 100644 --- a/pkg/cgroups/cgroups.go +++ b/pkg/cgroups/cgroups.go @@ -101,22 +101,3 @@ func (c *Cgroup) Apply(pid int) (ActiveCgroup, error) { return rawApply(c, pid) } } - -func (c *Cgroup) setupCpuset(cgroupRoot string, pid int) (err error) { - if c.CpusetCpus != "" { - dir, err := c.Join(cgroupRoot, "cpuset", pid) - if err != nil { - return err - } - defer func() { - if err != nil { - os.RemoveAll(dir) - } - }() - - if err := writeFile(dir, "cpuset.cpus", c.CpusetCpus); err != nil { - return err - } - } - return nil -} diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index 1a9ffbada5..9a92258644 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -17,11 +17,6 @@ type HostConfig struct { DriverOptions map[string][]string } -type KeyValuePair struct { - Key string - Value string -} - func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { hostConfig := &HostConfig{ ContainerIDFile: job.Getenv("ContainerIDFile"), diff --git a/runconfig/parse.go b/runconfig/parse.go index b89d6c4683..a330c6c869 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -4,10 +4,8 @@ import ( "fmt" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/opts" - "github.com/dotcloud/docker/pkg/label" flag "github.com/dotcloud/docker/pkg/mflag" "github.com/dotcloud/docker/pkg/sysinfo" - "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/utils" "io/ioutil" "path" @@ -34,10 +32,6 @@ func ParseSubcommand(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) } func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) { - var ( - processLabel string - mountLabel string - ) var ( // FIXME: use utils.ListOpts for attach and volumes? flAttach = opts.NewListOpts(opts.ValidateAttach) @@ -67,7 +61,6 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID") flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - flLabelOptions = cmd.String([]string{"Z", "-label"}, "", "Options to pass to underlying labeling system") // For documentation purpose _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") @@ -159,15 +152,6 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf entrypoint = []string{*flEntrypoint} } - if !*flPrivileged { - pLabel, mLabel, e := label.GenLabels(*flLabelOptions) - if e != nil { - return nil, nil, cmd, fmt.Errorf("Invalid security labels : %s", e) - } - processLabel = pLabel - mountLabel = mLabel - } - lxcConf, err := parseKeyValueOpts(flLxcOpts) if err != nil { return nil, nil, cmd, err @@ -222,10 +206,6 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","), Entrypoint: entrypoint, WorkingDir: *flWorkingDir, - Context: execdriver.Context{ - "mount_label": mountLabel, - "process_label": processLabel, - }, } driverOptions, err := parseDriverOpts(flDriverOpts) @@ -233,11 +213,6 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf return nil, nil, cmd, err } - pluginOptions, err := parseDriverOpts(flDriverOpts) - if err != nil { - return nil, nil, cmd, err - } - hostConfig := &HostConfig{ Binds: binds, ContainerIDFile: *flContainerIDFile, @@ -289,20 +264,3 @@ func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) { } return out, nil } - -// options will come in the format of name.type=value -func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) { - out := make(map[string][]string, len(opts.GetAll())) - for _, o := range opts.GetAll() { - parts := strings.SplitN(o, ".", 2) - if len(parts) < 2 { - return nil, fmt.Errorf("invalid opt format %s", o) - } - values, exists := out[parts[0]] - if !exists { - values = []string{} - } - out[parts[0]] = append(values, parts[1]) - } - return out, nil -} diff --git a/runtime/container.go b/runtime/container.go index 656e9ae587..f37ffcd1e7 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -404,7 +404,6 @@ func populateCommand(c *Container) { User: c.Config.User, Config: driverConfig, Resources: resources, - Context: c.Config.Context, } c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true} } diff --git a/runtime/execdriver/lxc/lxc_template.go b/runtime/execdriver/lxc/lxc_template.go index 230518bd7f..67095383ec 100644 --- a/runtime/execdriver/lxc/lxc_template.go +++ b/runtime/execdriver/lxc/lxc_template.go @@ -30,9 +30,9 @@ lxc.pts = 1024 # disable the main console lxc.console = none -{{if getProcessLabel .Context}} -lxc.se_context = {{ getProcessLabel .Context}} -{{$MOUNTLABEL := getMountLabel .Context}} +{{if getProcessLabel .Config}} +lxc.se_context = {{ getProcessLabel .Config}} +{{$MOUNTLABEL := getMountLabel .Config}} {{end}} # no controlling tty at all @@ -147,12 +147,23 @@ func getMemorySwap(v *execdriver.Resources) int64 { return v.Memory * 2 } -func getProcessLabel(c execdriver.Context) string { - return c["process_label"] +func getProcessLabel(c map[string][]string) string { + return getLabel(c, "process") } -func getMountLabel(c execdriver.Context) string { - return c["mount_label"] +func getMountLabel(c map[string][]string) string { + return getLabel(c, "mount") +} + +func getLabel(c map[string][]string, name string) string { + label := c["label"] + for _, l := range label { + parts := strings.SplitN(l, "=", 2) + if parts[0] == name { + return parts[1] + } + } + return "" } func init() { diff --git a/runtime/utils_test.go b/runtime/utils_test.go index 81c745c0d5..833634cb47 100644 --- a/runtime/utils_test.go +++ b/runtime/utils_test.go @@ -2,13 +2,14 @@ package runtime import ( "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/utils" "testing" ) func TestMergeLxcConfig(t *testing.T) { var ( hostConfig = &runconfig.HostConfig{ - LxcConf: []runconfig.KeyValuePair{ + LxcConf: []utils.KeyValuePair{ {Key: "lxc.cgroups.cpuset", Value: "1,2"}, }, } -- cgit v1.2.1 From bfa2141765c2a3866ca0f5237fc951f4c2db8b98 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 27 Mar 2014 08:57:01 +0000 Subject: Update lxc to use opts for selinux labels Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/execdriver/lxc/driver.go | 24 ++++++++++++++++++++---- runtime/execdriver/lxc/lxc_template.go | 10 +++++----- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/runtime/execdriver/lxc/driver.go b/runtime/execdriver/lxc/driver.go index 086e35f643..896f215366 100644 --- a/runtime/execdriver/lxc/driver.go +++ b/runtime/execdriver/lxc/driver.go @@ -3,6 +3,7 @@ package lxc import ( "fmt" "github.com/dotcloud/docker/pkg/cgroups" + "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/utils" "io/ioutil" @@ -378,19 +379,34 @@ func rootIsShared() bool { } func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { - root := path.Join(d.root, "containers", c.ID, "config.lxc") + var ( + process, mount string + root = path.Join(d.root, "containers", c.ID, "config.lxc") + labels = c.Config["label"] + ) fo, err := os.Create(root) if err != nil { return "", err } defer fo.Close() + if len(labels) > 0 { + process, mount, err = label.GenLabels(labels[0]) + if err != nil { + return "", err + } + } + if err := LxcTemplateCompiled.Execute(fo, struct { *execdriver.Command - AppArmor bool + AppArmor bool + ProcessLabel string + MountLabel string }{ - Command: c, - AppArmor: d.apparmor, + Command: c, + AppArmor: d.apparmor, + ProcessLabel: process, + MountLabel: mount, }); err != nil { return "", err } diff --git a/runtime/execdriver/lxc/lxc_template.go b/runtime/execdriver/lxc/lxc_template.go index 67095383ec..e5248375a8 100644 --- a/runtime/execdriver/lxc/lxc_template.go +++ b/runtime/execdriver/lxc/lxc_template.go @@ -30,9 +30,9 @@ lxc.pts = 1024 # disable the main console lxc.console = none -{{if getProcessLabel .Config}} -lxc.se_context = {{ getProcessLabel .Config}} -{{$MOUNTLABEL := getMountLabel .Config}} +{{if .ProcessLabel}} +lxc.se_context = {{ .ProcessLabel}} +{{$MOUNTLABEL := .MountLabel}} {{end}} # no controlling tty at all @@ -159,8 +159,8 @@ func getLabel(c map[string][]string, name string) string { label := c["label"] for _, l := range label { parts := strings.SplitN(l, "=", 2) - if parts[0] == name { - return parts[1] + if strings.TrimSpace(parts[0]) == name { + return strings.TrimSpace(parts[1]) } } return "" -- cgit v1.2.1 From 6c9a47f01c583e9c22b831eb426192148d29d792 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 27 Mar 2014 09:04:54 +0000 Subject: Update native driver to use labels from opts Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/execdriver/native/create.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/runtime/execdriver/native/create.go b/runtime/execdriver/native/create.go index 7e663f0555..976416a8ca 100644 --- a/runtime/execdriver/native/create.go +++ b/runtime/execdriver/native/create.go @@ -2,6 +2,7 @@ package native import ( "fmt" + "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/runtime/execdriver/native/configuration" @@ -37,6 +38,9 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container if err := d.setupMounts(container, c); err != nil { return nil, err } + if err := d.setupLabels(container, c); err != nil { + return nil, err + } if err := configuration.ParseConfiguration(container, d.activeContainers, c.Config["native"]); err != nil { return nil, err } @@ -94,3 +98,16 @@ func (d *driver) setupMounts(container *libcontainer.Container, c *execdriver.Co } return nil } + +func (d *driver) setupLabels(container *libcontainer.Container, c *execdriver.Command) error { + labels := c.Config["label"] + if len(labels) > 0 { + process, mount, err := label.GenLabels(labels[0]) + if err != nil { + return err + } + container.Context["mount_label"] = mount + container.Context["process_label"] = process + } + return nil +} -- cgit v1.2.1 From ad3e71d5c7e01dca229d4077cf8b019d8085c33a Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 27 Mar 2014 11:06:32 -0600 Subject: Adjust TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces This fixes the following, which I've been seeing on all my machines for as long as I can remember: --- FAIL: TestOnlyLoopbackExistsWhenUsingDisableNetworkOption (0.36 seconds) container_test.go:1597: Wrong interface count in test container: expected [*: lo], got [1: lo 2: sit0] Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- integration/container_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/container_test.go b/integration/container_test.go index 663b350638..8ed5525c72 100644 --- a/integration/container_test.go +++ b/integration/container_test.go @@ -1553,7 +1553,7 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) { runtime := mkRuntimeFromEngine(eng, t) defer nuke(runtime) - config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil) + config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show", "up"}, nil) if err != nil { t.Fatal(err) } -- cgit v1.2.1 From 73ee4879afd557a3ddd0740b0a281024060f2436 Mon Sep 17 00:00:00 2001 From: Michael Gorsuch Date: Thu, 27 Mar 2014 12:44:33 -0500 Subject: upstart: use exec here so upstart can monitor the process and not just a shell Docker-DCO-1.1-Signed-off-by: Michael Gorsuch (github: gorsuch) --- contrib/init/upstart/docker.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf index 907a536c9c..e27d77e145 100644 --- a/contrib/init/upstart/docker.conf +++ b/contrib/init/upstart/docker.conf @@ -37,5 +37,5 @@ script if [ -f /etc/default/$UPSTART_JOB ]; then . /etc/default/$UPSTART_JOB fi - "$DOCKER" -d $DOCKER_OPTS + exec "$DOCKER" -d $DOCKER_OPTS end script -- cgit v1.2.1 From d6c2188cae85549a40193273cb9893acefadf863 Mon Sep 17 00:00:00 2001 From: Ryan Thomas Date: Fri, 28 Mar 2014 06:31:04 +1100 Subject: Docker-DCO-1.1-Signed-off-by: Ryan Thomas (github: rthomas) --- registry/registry.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/registry/registry.go b/registry/registry.go index 01583f97c2..182ec78a76 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -42,9 +42,9 @@ func pingRegistryEndpoint(endpoint string) (bool, error) { return conn, nil } httpTransport := &http.Transport{ - Dial: httpDial, - Proxy: http.ProxyFromEnvironment, - } + Dial: httpDial, + Proxy: http.ProxyFromEnvironment, + } client := &http.Client{Transport: httpTransport} resp, err := client.Get(endpoint + "_ping") if err != nil { -- cgit v1.2.1 From 9d2a77805139598b272ca6e5f55e3542e1221f26 Mon Sep 17 00:00:00 2001 From: Barnaby Gray Date: Thu, 27 Mar 2014 19:13:27 +0000 Subject: Update fish completions for docker master. Docker-DCO-1.1-Signed-off-by: Barnaby Gray (github: barnybug) --- contrib/completion/fish/docker.fish | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index ddec61cffa..edaa5ca8c6 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -39,23 +39,25 @@ function __fish_print_docker_images --description 'Print a list of docker images end function __fish_print_docker_repositories --description 'Print a list of docker repositories' - docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1}' | sort | uniq + docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1}' | command sort | command uniq end # common options complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' +complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group" complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified' complete -c docker -f -n '__fish_docker_no_subcommand' -l api-enable-cors -d 'Enable CORS headers in the remote API' complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d "Attach containers to a pre-existing network bridge; use 'none' to disable container networking" complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers' +complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the docker runtime to use a specific exec driver' complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime' complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward' complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules" -complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available' +complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available' complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' complete -c docker -f -n '__fish_docker_no_subcommand' -s r -l restart -d 'Restart previously running containers' complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the docker runtime to use a specific storage driver' @@ -71,7 +73,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_pri # build complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build a container from a Dockerfile' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress verbose build output' +complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the verbose output generated by the containers' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' @@ -79,7 +81,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d ' complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (eg. "John Hannibal Smith "' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' -complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: --run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')' +complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: -run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')' complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" # cp @@ -100,16 +102,16 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_pri # history complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" -complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" # images complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'show all images (by default filter out the intermediate images used to build)' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate images used to build)' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'only show numeric IDs' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'output graph in tree format' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s v -l viz -d 'output graph in graphviz format' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'Output graph in tree format' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s v -l viz -d 'Output graph in graphviz format' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" # import @@ -126,7 +128,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from insert' -a '(__fish_pri complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" -complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers running)' -d "Container" +complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" # kill complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' @@ -138,9 +140,9 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image # login complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or Login to the docker registry server' -complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'email' -complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'password' -complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'username' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'Email' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' +complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' # logs complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' @@ -180,12 +182,14 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_pr # rm complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' +complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force removal of running container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated to the container' complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" # rmi complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' +complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force' complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" # run @@ -202,7 +206,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expo complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep stdin open even if not attached' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container (name:alias)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' +complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l lxc-conf -d 'Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: , where unit = b, k, m or g)' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s n -l networking -d 'Enable networking for this container' complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' -- cgit v1.2.1 From 4af79a36e283e94cb48442499534f996e27e0f29 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Wed, 12 Mar 2014 01:58:53 -0600 Subject: Add mention of mounting cgroupfs properly to PACKAGERS.md Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- docs/sources/installation/binaries.rst | 6 ++++++ hack/PACKAGERS.md | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/docs/sources/installation/binaries.rst b/docs/sources/installation/binaries.rst index a070599338..ae548e7657 100644 --- a/docs/sources/installation/binaries.rst +++ b/docs/sources/installation/binaries.rst @@ -29,6 +29,12 @@ To run properly, docker needs the following software to be installed at runtime: - iptables version 1.4 or later - Git version 1.7 or later - XZ Utils 4.9 or later +- a `properly mounted + `_ + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point `is + `_ `not + `_ `sufficient + `_) Check kernel dependencies diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md index dc255c57ad..7170c5ad25 100644 --- a/hack/PACKAGERS.md +++ b/hack/PACKAGERS.md @@ -266,6 +266,12 @@ installed and available at runtime: * iptables version 1.4 or later * XZ Utils version 4.9 or later +* a [properly + mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) + cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point + [is](https://github.com/dotcloud/docker/issues/2683) + [not](https://github.com/dotcloud/docker/issues/3485) + [sufficient](https://github.com/dotcloud/docker/issues/4568)) Additionally, the Docker client needs the following software to be installed and available at runtime: -- cgit v1.2.1 From 7a3070a6000963d12be9dcd2698d911b848a33b6 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Thu, 13 Mar 2014 17:03:09 +0100 Subject: Add --opt arguments for drivers In order to handle special configuration for different drivers we make the Config field a map to string array. This lets us use it for lxc, by using the "lxc" key for those, and we can later extend it easily for other backend-specific options. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- runconfig/hostconfig.go | 10 +++--- runconfig/parse.go | 45 ++++++++++++++++-------- runconfig/parse_test.go | 3 +- runtime/container.go | 14 ++++---- runtime/execdriver/driver.go | 30 ++++++++-------- runtime/execdriver/lxc/lxc_template.go | 4 +-- runtime/execdriver/lxc/lxc_template_unit_test.go | 8 +++-- runtime/execdriver/native/default_template.go | 1 + runtime/execdriver/native/driver.go | 7 ++-- runtime/utils.go | 20 +++++++++++ utils/utils.go | 13 +++++++ 11 files changed, 104 insertions(+), 51 deletions(-) diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index 6c8618ee81..9a92258644 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -3,21 +3,18 @@ package runconfig import ( "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/utils" ) type HostConfig struct { Binds []string ContainerIDFile string - LxcConf []KeyValuePair + LxcConf []utils.KeyValuePair Privileged bool PortBindings nat.PortMap Links []string PublishAllPorts bool -} - -type KeyValuePair struct { - Key string - Value string + DriverOptions map[string][]string } func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { @@ -28,6 +25,7 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) + job.GetenvJson("DriverOptions", &hostConfig.DriverOptions) if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } diff --git a/runconfig/parse.go b/runconfig/parse.go index 23c66cd611..43aecdb753 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -51,6 +51,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flDnsSearch = opts.NewListOpts(opts.ValidateDomain) flVolumesFrom opts.ListOpts flLxcOpts opts.ListOpts + flDriverOpts opts.ListOpts flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id") @@ -83,7 +84,8 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers") cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom dns search domains") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") - cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "(lxc exec-driver only) Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") + cmd.Var(&flLxcOpts, []string{"#lxc-conf", "#-lxc-conf"}, "(lxc exec-driver only) Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") + cmd.Var(&flDriverOpts, []string{"o", "-opt"}, "Add custom driver options") if err := cmd.Parse(args); err != nil { return nil, nil, cmd, err @@ -166,7 +168,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf mountLabel = mLabel } - lxcConf, err := parseLxcConfOpts(flLxcOpts) + lxcConf, err := parseKeyValueOpts(flLxcOpts) if err != nil { return nil, nil, cmd, err } @@ -226,6 +228,11 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf }, } + driverOptions, err := parseDriverOpts(flDriverOpts) + if err != nil { + return nil, nil, cmd, err + } + hostConfig := &HostConfig{ Binds: binds, ContainerIDFile: *flContainerIDFile, @@ -234,6 +241,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf PortBindings: portBindings, Links: flLinks.GetAll(), PublishAllPorts: *flPublishAll, + DriverOptions: driverOptions, } if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { @@ -248,22 +256,31 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf return config, hostConfig, cmd, nil } -func parseLxcConfOpts(opts opts.ListOpts) ([]KeyValuePair, error) { - out := make([]KeyValuePair, opts.Len()) - for i, o := range opts.GetAll() { - k, v, err := parseLxcOpt(o) - if err != nil { - return nil, err +// options will come in the format of name.key=value or name.option +func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) { + out := make(map[string][]string, len(opts.GetAll())) + for _, o := range opts.GetAll() { + parts := strings.SplitN(o, ".", 2) + if len(parts) < 2 { + return nil, fmt.Errorf("invalid opt format %s", o) } - out[i] = KeyValuePair{Key: k, Value: v} + values, exists := out[parts[0]] + if !exists { + values = []string{} + } + out[parts[0]] = append(values, parts[1]) } return out, nil } -func parseLxcOpt(opt string) (string, string, error) { - parts := strings.SplitN(opt, "=", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("Unable to parse lxc conf option: %s", opt) +func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) { + out := make([]utils.KeyValuePair, opts.Len()) + for i, o := range opts.GetAll() { + k, v, err := utils.ParseKeyValueOpt(o) + if err != nil { + return nil, err + } + out[i] = utils.KeyValuePair{Key: k, Value: v} } - return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil + return out, nil } diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go index 2b89e88ec3..fd28c4593e 100644 --- a/runconfig/parse_test.go +++ b/runconfig/parse_test.go @@ -1,6 +1,7 @@ package runconfig import ( + "github.com/dotcloud/docker/utils" "testing" ) @@ -8,7 +9,7 @@ func TestParseLxcConfOpt(t *testing.T) { opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "} for _, o := range opts { - k, v, err := parseLxcOpt(o) + k, v, err := utils.ParseKeyValueOpt(o) if err != nil { t.FailNow() } diff --git a/runtime/container.go b/runtime/container.go index 4cf307d823..656e9ae587 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -361,9 +361,13 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s func populateCommand(c *Container) { var ( en *execdriver.Network - driverConfig []string + driverConfig = c.hostConfig.DriverOptions ) + if driverConfig == nil { + driverConfig = make(map[string][]string) + } + en = &execdriver.Network{ Mtu: c.runtime.config.Mtu, Interface: nil, @@ -379,11 +383,9 @@ func populateCommand(c *Container) { } } - if lxcConf := c.hostConfig.LxcConf; lxcConf != nil { - for _, pair := range lxcConf { - driverConfig = append(driverConfig, fmt.Sprintf("%s = %s", pair.Key, pair.Value)) - } - } + // TODO: this can be removed after lxc-conf is fully deprecated + mergeLxcConfIntoOptions(c.hostConfig, driverConfig) + resources := &execdriver.Resources{ Memory: c.Config.Memory, MemorySwap: c.Config.MemorySwap, diff --git a/runtime/execdriver/driver.go b/runtime/execdriver/driver.go index dca889a82d..096ea0790d 100644 --- a/runtime/execdriver/driver.go +++ b/runtime/execdriver/driver.go @@ -116,21 +116,21 @@ type Mount struct { type Command struct { exec.Cmd `json:"-"` - ID string `json:"id"` - Privileged bool `json:"privileged"` - User string `json:"user"` - Rootfs string `json:"rootfs"` // root fs of the container - InitPath string `json:"initpath"` // dockerinit - Entrypoint string `json:"entrypoint"` - Arguments []string `json:"arguments"` - WorkingDir string `json:"working_dir"` - ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver - Context Context `json:"context"` // generic context for specific options (apparmor, selinux) - Tty bool `json:"tty"` - Network *Network `json:"network"` - Config []string `json:"config"` // generic values that specific drivers can consume - Resources *Resources `json:"resources"` - Mounts []Mount `json:"mounts"` + ID string `json:"id"` + Privileged bool `json:"privileged"` + User string `json:"user"` + Rootfs string `json:"rootfs"` // root fs of the container + InitPath string `json:"initpath"` // dockerinit + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + WorkingDir string `json:"working_dir"` + ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver + Context Context `json:"context"` // generic context for specific options (apparmor, selinux) + Tty bool `json:"tty"` + Network *Network `json:"network"` + Config map[string][]string `json:"config"` // generic values that specific drivers can consume + Resources *Resources `json:"resources"` + Mounts []Mount `json:"mounts"` Terminal Terminal `json:"-"` // standard or tty terminal Console string `json:"-"` // dev/console path diff --git a/runtime/execdriver/lxc/lxc_template.go b/runtime/execdriver/lxc/lxc_template.go index 608fb22436..f325ffcaef 100644 --- a/runtime/execdriver/lxc/lxc_template.go +++ b/runtime/execdriver/lxc/lxc_template.go @@ -123,8 +123,8 @@ lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} {{end}} {{end}} -{{if .Config}} -{{range $value := .Config}} +{{if .Config.lxc}} +{{range $value := .Config.lxc}} {{$value}} {{end}} {{end}} diff --git a/runtime/execdriver/lxc/lxc_template_unit_test.go b/runtime/execdriver/lxc/lxc_template_unit_test.go index e613adf7a9..7f473a0502 100644 --- a/runtime/execdriver/lxc/lxc_template_unit_test.go +++ b/runtime/execdriver/lxc/lxc_template_unit_test.go @@ -75,9 +75,11 @@ func TestCustomLxcConfig(t *testing.T) { command := &execdriver.Command{ ID: "1", Privileged: false, - Config: []string{ - "lxc.utsname = docker", - "lxc.cgroup.cpuset.cpus = 0,1", + Config: map[string][]string{ + "lxc": { + "lxc.utsname = docker", + "lxc.cgroup.cpuset.cpus = 0,1", + }, }, Network: &execdriver.Network{ Mtu: 1500, diff --git a/runtime/execdriver/native/default_template.go b/runtime/execdriver/native/default_template.go index 7e1e9ed86e..e11f2de1cf 100644 --- a/runtime/execdriver/native/default_template.go +++ b/runtime/execdriver/native/default_template.go @@ -58,6 +58,7 @@ func createContainer(c *execdriver.Command) *libcontainer.Container { container.Cgroups.Memory = c.Resources.Memory container.Cgroups.MemorySwap = c.Resources.MemorySwap } + // check to see if we are running in ramdisk to disable pivot root container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" diff --git a/runtime/execdriver/native/driver.go b/runtime/execdriver/native/driver.go index bf7e8ccdec..db974cbf04 100644 --- a/runtime/execdriver/native/driver.go +++ b/runtime/execdriver/native/driver.go @@ -184,10 +184,9 @@ func (d *driver) removeContainerRoot(id string) error { func (d *driver) validateCommand(c *execdriver.Command) error { // we need to check the Config of the command to make sure that we // do not have any of the lxc-conf variables - for _, conf := range c.Config { - if strings.Contains(conf, "lxc") { - return fmt.Errorf("%s is not supported by the native driver", conf) - } + lxc := c.Config["lxc"] + if lxc != nil && len(lxc) > 0 { + return fmt.Errorf("lxc config options are not supported by the native driver") } return nil } diff --git a/runtime/utils.go b/runtime/utils.go index b343b5b10e..b983e67d41 100644 --- a/runtime/utils.go +++ b/runtime/utils.go @@ -1,9 +1,11 @@ package runtime import ( + "fmt" "github.com/dotcloud/docker/nat" "github.com/dotcloud/docker/pkg/namesgenerator" "github.com/dotcloud/docker/runconfig" + "strings" ) func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error { @@ -30,6 +32,24 @@ func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostCon return nil } +func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig, driverConfig map[string][]string) { + if hostConfig == nil { + return + } + + // merge in the lxc conf options into the generic config map + if lxcConf := hostConfig.LxcConf; lxcConf != nil { + lxc := driverConfig["lxc"] + for _, pair := range lxcConf { + // because lxc conf gets the driver name lxc.XXXX we need to trim it off + // and let the lxc driver add it back later if needed + parts := strings.SplitN(pair.Key, ".", 2) + lxc = append(lxc, fmt.Sprintf("%s=%s", parts[1], pair.Value)) + } + driverConfig["lxc"] = lxc + } +} + type checker struct { runtime *Runtime } diff --git a/utils/utils.go b/utils/utils.go index 2702555973..1fe2e87b4f 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -25,6 +25,11 @@ import ( "time" ) +type KeyValuePair struct { + Key string + Value string +} + // A common interface to access the Fatal method of // both testing.B and testing.T. type Fataler interface { @@ -1071,3 +1076,11 @@ func ReadSymlinkedDirectory(path string) (string, error) { } return realPath, nil } + +func ParseKeyValueOpt(opt string) (string, string, error) { + parts := strings.SplitN(opt, "=", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) + } + return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil +} -- cgit v1.2.1 From 7f7d8419a71d49b25e4d38196b36e93b568bb61d Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Fri, 14 Mar 2014 10:47:49 +0100 Subject: cgroups: Splity out Apply/Cleanup to separate file/interface This leaves only the generic cgroup helper functions in cgroups.go and will allow easy implementations of other cgroup managers. This also wires up the call to Cleanup the cgroup which was missing before. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- pkg/cgroups/apply_raw.go | 189 ++++++++++++++++++++++++++++++++++++++++ pkg/cgroups/cgroups.go | 171 ++---------------------------------- pkg/libcontainer/nsinit/exec.go | 16 ++-- 3 files changed, 205 insertions(+), 171 deletions(-) create mode 100644 pkg/cgroups/apply_raw.go diff --git a/pkg/cgroups/apply_raw.go b/pkg/cgroups/apply_raw.go new file mode 100644 index 0000000000..bce96f4951 --- /dev/null +++ b/pkg/cgroups/apply_raw.go @@ -0,0 +1,189 @@ +package cgroups + +import ( + "fmt" + "os" + "path/filepath" + "strconv" +) + +type rawCgroup struct { + root string + cgroup string +} + +func rawApply(c *Cgroup, pid int) (ActiveCgroup, error) { + // We have two implementation of cgroups support, one is based on + // systemd and the dbus api, and one is based on raw cgroup fs operations + // following the pre-single-writer model docs at: + // http://www.freedesktop.org/wiki/Software/systemd/PaxControlGroups/ + // + // we can pick any subsystem to find the root + + cgroupRoot, err := FindCgroupMountpoint("cpu") + if err != nil { + return nil, err + } + cgroupRoot = filepath.Dir(cgroupRoot) + + if _, err := os.Stat(cgroupRoot); err != nil { + return nil, fmt.Errorf("cgroups fs not found") + } + + cgroup := c.Name + if c.Parent != "" { + cgroup = filepath.Join(c.Parent, cgroup) + } + + raw := &rawCgroup{ + root: cgroupRoot, + cgroup: cgroup, + } + + if err := raw.setupDevices(c, pid); err != nil { + return nil, err + } + if err := raw.setupMemory(c, pid); err != nil { + return nil, err + } + if err := raw.setupCpu(c, pid); err != nil { + return nil, err + } + return raw, nil +} + +func (raw *rawCgroup) path(subsystem string) (string, error) { + initPath, err := GetInitCgroupDir(subsystem) + if err != nil { + return "", err + } + return filepath.Join(raw.root, subsystem, initPath, raw.cgroup), nil +} + +func (raw *rawCgroup) join(subsystem string, pid int) (string, error) { + path, err := raw.path(subsystem) + if err != nil { + return "", err + } + if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { + return "", err + } + if err := writeFile(path, "tasks", strconv.Itoa(pid)); err != nil { + return "", err + } + return path, nil +} + +func (raw *rawCgroup) setupDevices(c *Cgroup, pid int) (err error) { + if !c.DeviceAccess { + dir, err := raw.join("devices", pid) + if err != nil { + return err + } + + defer func() { + if err != nil { + os.RemoveAll(dir) + } + }() + + if err := writeFile(dir, "devices.deny", "a"); err != nil { + return err + } + + allow := []string{ + // /dev/null, zero, full + "c 1:3 rwm", + "c 1:5 rwm", + "c 1:7 rwm", + + // consoles + "c 5:1 rwm", + "c 5:0 rwm", + "c 4:0 rwm", + "c 4:1 rwm", + + // /dev/urandom,/dev/random + "c 1:9 rwm", + "c 1:8 rwm", + + // /dev/pts/ - pts namespaces are "coming soon" + "c 136:* rwm", + "c 5:2 rwm", + + // tuntap + "c 10:200 rwm", + } + + for _, val := range allow { + if err := writeFile(dir, "devices.allow", val); err != nil { + return err + } + } + } + return nil +} + +func (raw *rawCgroup) setupMemory(c *Cgroup, pid int) (err error) { + if c.Memory != 0 || c.MemorySwap != 0 { + dir, err := raw.join("memory", pid) + if err != nil { + return err + } + defer func() { + if err != nil { + os.RemoveAll(dir) + } + }() + + if c.Memory != 0 { + if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil { + return err + } + if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil { + return err + } + } + // By default, MemorySwap is set to twice the size of RAM. + // If you want to omit MemorySwap, set it to `-1'. + if c.MemorySwap != -1 { + if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(c.Memory*2, 10)); err != nil { + return err + } + } + } + return nil +} + +func (raw *rawCgroup) setupCpu(c *Cgroup, pid int) (err error) { + // We always want to join the cpu group, to allow fair cpu scheduling + // on a container basis + dir, err := raw.join("cpu", pid) + if err != nil { + return err + } + if c.CpuShares != 0 { + if err := writeFile(dir, "cpu.shares", strconv.FormatInt(c.CpuShares, 10)); err != nil { + return err + } + } + return nil +} + +func (raw *rawCgroup) Cleanup() error { + get := func(subsystem string) string { + path, _ := raw.path(subsystem) + return path + } + + for _, path := range []string{ + get("memory"), + get("devices"), + get("cpu"), + } { + if path != "" { + os.RemoveAll(path) + } + } + return nil +} diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go index b40e1a31fa..f35556f712 100644 --- a/pkg/cgroups/cgroups.go +++ b/pkg/cgroups/cgroups.go @@ -8,7 +8,6 @@ import ( "io/ioutil" "os" "path/filepath" - "strconv" "strings" ) @@ -22,6 +21,10 @@ type Cgroup struct { CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) } +type ActiveCgroup interface { + Cleanup() error +} + // https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt func FindCgroupMountpoint(subsystem string) (string, error) { mounts, err := mount.GetMounts() @@ -62,48 +65,6 @@ func GetInitCgroupDir(subsystem string) (string, error) { return parseCgroupFile(subsystem, f) } -func (c *Cgroup) Path(root, subsystem string) (string, error) { - cgroup := c.Name - if c.Parent != "" { - cgroup = filepath.Join(c.Parent, cgroup) - } - initPath, err := GetInitCgroupDir(subsystem) - if err != nil { - return "", err - } - return filepath.Join(root, subsystem, initPath, cgroup), nil -} - -func (c *Cgroup) Join(root, subsystem string, pid int) (string, error) { - path, err := c.Path(root, subsystem) - if err != nil { - return "", err - } - if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { - return "", err - } - if err := writeFile(path, "tasks", strconv.Itoa(pid)); err != nil { - return "", err - } - return path, nil -} - -func (c *Cgroup) Cleanup(root string) error { - get := func(subsystem string) string { - path, _ := c.Path(root, subsystem) - return path - } - - for _, path := range []string{ - get("memory"), - get("devices"), - get("cpu"), - } { - os.RemoveAll(path) - } - return nil -} - func parseCgroupFile(subsystem string, r io.Reader) (string, error) { s := bufio.NewScanner(r) for s.Scan() { @@ -125,126 +86,6 @@ func writeFile(dir, file, data string) error { return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700) } -func (c *Cgroup) Apply(pid int) error { - // We have two implementation of cgroups support, one is based on - // systemd and the dbus api, and one is based on raw cgroup fs operations - // following the pre-single-writer model docs at: - // http://www.freedesktop.org/wiki/Software/systemd/PaxControlGroups/ - // - // we can pick any subsystem to find the root - cgroupRoot, err := FindCgroupMountpoint("cpu") - if err != nil { - return err - } - cgroupRoot = filepath.Dir(cgroupRoot) - - if _, err := os.Stat(cgroupRoot); err != nil { - return fmt.Errorf("cgroups fs not found") - } - if err := c.setupDevices(cgroupRoot, pid); err != nil { - return err - } - if err := c.setupMemory(cgroupRoot, pid); err != nil { - return err - } - if err := c.setupCpu(cgroupRoot, pid); err != nil { - return err - } - return nil -} - -func (c *Cgroup) setupDevices(cgroupRoot string, pid int) (err error) { - if !c.DeviceAccess { - dir, err := c.Join(cgroupRoot, "devices", pid) - if err != nil { - return err - } - - defer func() { - if err != nil { - os.RemoveAll(dir) - } - }() - - if err := writeFile(dir, "devices.deny", "a"); err != nil { - return err - } - - allow := []string{ - // /dev/null, zero, full - "c 1:3 rwm", - "c 1:5 rwm", - "c 1:7 rwm", - - // consoles - "c 5:1 rwm", - "c 5:0 rwm", - "c 4:0 rwm", - "c 4:1 rwm", - - // /dev/urandom,/dev/random - "c 1:9 rwm", - "c 1:8 rwm", - - // /dev/pts/ - pts namespaces are "coming soon" - "c 136:* rwm", - "c 5:2 rwm", - - // tuntap - "c 10:200 rwm", - } - - for _, val := range allow { - if err := writeFile(dir, "devices.allow", val); err != nil { - return err - } - } - } - return nil -} - -func (c *Cgroup) setupMemory(cgroupRoot string, pid int) (err error) { - if c.Memory != 0 || c.MemorySwap != 0 { - dir, err := c.Join(cgroupRoot, "memory", pid) - if err != nil { - return err - } - defer func() { - if err != nil { - os.RemoveAll(dir) - } - }() - - if c.Memory != 0 { - if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil { - return err - } - if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil { - return err - } - } - // By default, MemorySwap is set to twice the size of RAM. - // If you want to omit MemorySwap, set it to `-1'. - if c.MemorySwap != -1 { - if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(c.Memory*2, 10)); err != nil { - return err - } - } - } - return nil -} - -func (c *Cgroup) setupCpu(cgroupRoot string, pid int) (err error) { - // We always want to join the cpu group, to allow fair cpu scheduling - // on a container basis - dir, err := c.Join(cgroupRoot, "cpu", pid) - if err != nil { - return err - } - if c.CpuShares != 0 { - if err := writeFile(dir, "cpu.shares", strconv.FormatInt(c.CpuShares, 10)); err != nil { - return err - } - } - return nil +func (c *Cgroup) Apply(pid int) (ActiveCgroup, error) { + return rawApply(c, pid) } diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 61286cc13c..a44faafe0e 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -3,6 +3,7 @@ package nsinit import ( + "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/pkg/libcontainer/network" "github.com/dotcloud/docker/pkg/system" @@ -61,10 +62,15 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ // Do this before syncing with child so that no children // can escape the cgroup ns.logger.Println("setting cgroups") - if err := ns.SetupCgroups(container, command.Process.Pid); err != nil { + activeCgroup, err := ns.SetupCgroups(container, command.Process.Pid) + if err != nil { command.Process.Kill() return -1, err } + if activeCgroup != nil { + defer activeCgroup.Cleanup() + } + ns.logger.Println("setting up network") if err := ns.InitializeNetworking(container, command.Process.Pid, syncPipe); err != nil { command.Process.Kill() @@ -85,13 +91,11 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ return status, err } -func (ns *linuxNs) SetupCgroups(container *libcontainer.Container, nspid int) error { +func (ns *linuxNs) SetupCgroups(container *libcontainer.Container, nspid int) (cgroups.ActiveCgroup, error) { if container.Cgroups != nil { - if err := container.Cgroups.Apply(nspid); err != nil { - return err - } + return container.Cgroups.Apply(nspid) } - return nil + return nil, nil } func (ns *linuxNs) InitializeNetworking(container *libcontainer.Container, nspid int, pipe *SyncPipe) error { -- cgit v1.2.1 From 9294d7f2af6ecb7c18be11fb5043fad4a61d8f09 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Fri, 14 Mar 2014 11:45:29 +0100 Subject: cgroups: Join groups by writing to cgroups.procs, not tasks cgroups.procs moves all the threads of the process, and "tasks" just the one thread. I believe there is a risk that we move the main thread, but then we accidentally fork off one of the other threads if the go scheduler randomly switched to another thread. So, it seems safer (and more correct) to use cgroups.procs. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- pkg/cgroups/apply_raw.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cgroups/apply_raw.go b/pkg/cgroups/apply_raw.go index bce96f4951..47a2a002b8 100644 --- a/pkg/cgroups/apply_raw.go +++ b/pkg/cgroups/apply_raw.go @@ -68,7 +68,7 @@ func (raw *rawCgroup) join(subsystem string, pid int) (string, error) { if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) { return "", err } - if err := writeFile(path, "tasks", strconv.Itoa(pid)); err != nil { + if err := writeFile(path, "cgroup.procs", strconv.Itoa(pid)); err != nil { return "", err } return path, nil -- cgit v1.2.1 From d4725801b3401d04b3f35b5783bdc0fc362f7f00 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Fri, 21 Feb 2014 10:34:06 +0100 Subject: Vendor github.com/godbus/dbus and github.com/coreos/go-systemd We need this to do systemd API calls. We also add the static_build tag to make godbus not use os/user which is problematic for static builds. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- hack/make.sh | 2 +- hack/vendor.sh | 3 + .../src/github.com/coreos/go-systemd/.travis.yml | 8 + vendor/src/github.com/coreos/go-systemd/LICENSE | 191 +++++ vendor/src/github.com/coreos/go-systemd/README.md | 44 ++ .../coreos/go-systemd/activation/files.go | 56 ++ .../coreos/go-systemd/activation/files_test.go | 84 +++ .../coreos/go-systemd/activation/listeners.go | 38 + .../coreos/go-systemd/activation/listeners_test.go | 88 +++ .../src/github.com/coreos/go-systemd/dbus/dbus.go | 104 +++ .../github.com/coreos/go-systemd/dbus/dbus_test.go | 41 ++ .../github.com/coreos/go-systemd/dbus/methods.go | 354 +++++++++ .../coreos/go-systemd/dbus/methods_test.go | 314 ++++++++ .../coreos/go-systemd/dbus/properties.go | 220 ++++++ .../src/github.com/coreos/go-systemd/dbus/set.go | 26 + .../github.com/coreos/go-systemd/dbus/set_test.go | 26 + .../coreos/go-systemd/dbus/subscription.go | 249 +++++++ .../coreos/go-systemd/dbus/subscription_set.go | 32 + .../go-systemd/dbus/subscription_set_test.go | 67 ++ .../coreos/go-systemd/dbus/subscription_test.go | 90 +++ .../go-systemd/examples/activation/activation.go | 44 ++ .../examples/activation/httpserver/README.md | 19 + .../examples/activation/httpserver/hello.service | 11 + .../examples/activation/httpserver/hello.socket | 5 + .../examples/activation/httpserver/httpserver.go | 26 + .../go-systemd/examples/activation/listen.go | 50 ++ .../coreos/go-systemd/fixtures/start-stop.service | 5 + .../fixtures/subscribe-events-set.service | 5 + .../go-systemd/fixtures/subscribe-events.service | 5 + .../github.com/coreos/go-systemd/journal/send.go | 168 +++++ vendor/src/github.com/coreos/go-systemd/test | 3 + vendor/src/github.com/godbus/dbus/LICENSE | 25 + vendor/src/github.com/godbus/dbus/README.markdown | 38 + .../github.com/godbus/dbus/_examples/eavesdrop.go | 30 + .../github.com/godbus/dbus/_examples/introspect.go | 21 + .../github.com/godbus/dbus/_examples/list-names.go | 27 + .../godbus/dbus/_examples/notification.go | 17 + .../src/github.com/godbus/dbus/_examples/prop.go | 68 ++ .../src/github.com/godbus/dbus/_examples/server.go | 45 ++ .../src/github.com/godbus/dbus/_examples/signal.go | 24 + vendor/src/github.com/godbus/dbus/auth.go | 253 +++++++ vendor/src/github.com/godbus/dbus/auth_external.go | 26 + vendor/src/github.com/godbus/dbus/auth_sha1.go | 102 +++ vendor/src/github.com/godbus/dbus/call.go | 147 ++++ vendor/src/github.com/godbus/dbus/conn.go | 601 +++++++++++++++ vendor/src/github.com/godbus/dbus/conn_darwin.go | 21 + vendor/src/github.com/godbus/dbus/conn_other.go | 27 + vendor/src/github.com/godbus/dbus/conn_test.go | 199 +++++ vendor/src/github.com/godbus/dbus/dbus.go | 258 +++++++ vendor/src/github.com/godbus/dbus/decoder.go | 228 ++++++ vendor/src/github.com/godbus/dbus/doc.go | 63 ++ vendor/src/github.com/godbus/dbus/encoder.go | 179 +++++ vendor/src/github.com/godbus/dbus/examples_test.go | 50 ++ vendor/src/github.com/godbus/dbus/export.go | 302 ++++++++ vendor/src/github.com/godbus/dbus/homedir.go | 28 + .../src/github.com/godbus/dbus/homedir_dynamic.go | 15 + .../src/github.com/godbus/dbus/homedir_static.go | 45 ++ .../src/github.com/godbus/dbus/introspect/call.go | 27 + .../godbus/dbus/introspect/introspect.go | 80 ++ .../godbus/dbus/introspect/introspectable.go | 74 ++ vendor/src/github.com/godbus/dbus/message.go | 346 +++++++++ vendor/src/github.com/godbus/dbus/prop/prop.go | 264 +++++++ vendor/src/github.com/godbus/dbus/proto_test.go | 369 ++++++++++ vendor/src/github.com/godbus/dbus/sig.go | 257 +++++++ vendor/src/github.com/godbus/dbus/sig_test.go | 70 ++ .../src/github.com/godbus/dbus/transport_darwin.go | 6 + .../github.com/godbus/dbus/transport_generic.go | 35 + .../src/github.com/godbus/dbus/transport_unix.go | 190 +++++ .../github.com/godbus/dbus/transport_unix_test.go | 49 ++ .../github.com/godbus/dbus/transport_unixcred.go | 22 + vendor/src/github.com/godbus/dbus/variant.go | 129 ++++ vendor/src/github.com/godbus/dbus/variant_lexer.go | 284 +++++++ .../src/github.com/godbus/dbus/variant_parser.go | 817 +++++++++++++++++++++ vendor/src/github.com/godbus/dbus/variant_test.go | 78 ++ 74 files changed, 8313 insertions(+), 1 deletion(-) create mode 100644 vendor/src/github.com/coreos/go-systemd/.travis.yml create mode 100644 vendor/src/github.com/coreos/go-systemd/LICENSE create mode 100644 vendor/src/github.com/coreos/go-systemd/README.md create mode 100644 vendor/src/github.com/coreos/go-systemd/activation/files.go create mode 100644 vendor/src/github.com/coreos/go-systemd/activation/files_test.go create mode 100644 vendor/src/github.com/coreos/go-systemd/activation/listeners.go create mode 100644 vendor/src/github.com/coreos/go-systemd/activation/listeners_test.go create mode 100644 vendor/src/github.com/coreos/go-systemd/dbus/dbus.go create mode 100644 vendor/src/github.com/coreos/go-systemd/dbus/dbus_test.go create mode 100644 vendor/src/github.com/coreos/go-systemd/dbus/methods.go create mode 100644 vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go create mode 100644 vendor/src/github.com/coreos/go-systemd/dbus/properties.go create mode 100644 vendor/src/github.com/coreos/go-systemd/dbus/set.go create mode 100644 vendor/src/github.com/coreos/go-systemd/dbus/set_test.go create mode 100644 vendor/src/github.com/coreos/go-systemd/dbus/subscription.go create mode 100644 vendor/src/github.com/coreos/go-systemd/dbus/subscription_set.go create mode 100644 vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go create mode 100644 vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go create mode 100644 vendor/src/github.com/coreos/go-systemd/examples/activation/activation.go create mode 100644 vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/README.md create mode 100644 vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service create mode 100644 vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket create mode 100644 vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go create mode 100644 vendor/src/github.com/coreos/go-systemd/examples/activation/listen.go create mode 100644 vendor/src/github.com/coreos/go-systemd/fixtures/start-stop.service create mode 100644 vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service create mode 100644 vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events.service create mode 100644 vendor/src/github.com/coreos/go-systemd/journal/send.go create mode 100755 vendor/src/github.com/coreos/go-systemd/test create mode 100644 vendor/src/github.com/godbus/dbus/LICENSE create mode 100644 vendor/src/github.com/godbus/dbus/README.markdown create mode 100644 vendor/src/github.com/godbus/dbus/_examples/eavesdrop.go create mode 100644 vendor/src/github.com/godbus/dbus/_examples/introspect.go create mode 100644 vendor/src/github.com/godbus/dbus/_examples/list-names.go create mode 100644 vendor/src/github.com/godbus/dbus/_examples/notification.go create mode 100644 vendor/src/github.com/godbus/dbus/_examples/prop.go create mode 100644 vendor/src/github.com/godbus/dbus/_examples/server.go create mode 100644 vendor/src/github.com/godbus/dbus/_examples/signal.go create mode 100644 vendor/src/github.com/godbus/dbus/auth.go create mode 100644 vendor/src/github.com/godbus/dbus/auth_external.go create mode 100644 vendor/src/github.com/godbus/dbus/auth_sha1.go create mode 100644 vendor/src/github.com/godbus/dbus/call.go create mode 100644 vendor/src/github.com/godbus/dbus/conn.go create mode 100644 vendor/src/github.com/godbus/dbus/conn_darwin.go create mode 100644 vendor/src/github.com/godbus/dbus/conn_other.go create mode 100644 vendor/src/github.com/godbus/dbus/conn_test.go create mode 100644 vendor/src/github.com/godbus/dbus/dbus.go create mode 100644 vendor/src/github.com/godbus/dbus/decoder.go create mode 100644 vendor/src/github.com/godbus/dbus/doc.go create mode 100644 vendor/src/github.com/godbus/dbus/encoder.go create mode 100644 vendor/src/github.com/godbus/dbus/examples_test.go create mode 100644 vendor/src/github.com/godbus/dbus/export.go create mode 100644 vendor/src/github.com/godbus/dbus/homedir.go create mode 100644 vendor/src/github.com/godbus/dbus/homedir_dynamic.go create mode 100644 vendor/src/github.com/godbus/dbus/homedir_static.go create mode 100644 vendor/src/github.com/godbus/dbus/introspect/call.go create mode 100644 vendor/src/github.com/godbus/dbus/introspect/introspect.go create mode 100644 vendor/src/github.com/godbus/dbus/introspect/introspectable.go create mode 100644 vendor/src/github.com/godbus/dbus/message.go create mode 100644 vendor/src/github.com/godbus/dbus/prop/prop.go create mode 100644 vendor/src/github.com/godbus/dbus/proto_test.go create mode 100644 vendor/src/github.com/godbus/dbus/sig.go create mode 100644 vendor/src/github.com/godbus/dbus/sig_test.go create mode 100644 vendor/src/github.com/godbus/dbus/transport_darwin.go create mode 100644 vendor/src/github.com/godbus/dbus/transport_generic.go create mode 100644 vendor/src/github.com/godbus/dbus/transport_unix.go create mode 100644 vendor/src/github.com/godbus/dbus/transport_unix_test.go create mode 100644 vendor/src/github.com/godbus/dbus/transport_unixcred.go create mode 100644 vendor/src/github.com/godbus/dbus/variant.go create mode 100644 vendor/src/github.com/godbus/dbus/variant_lexer.go create mode 100644 vendor/src/github.com/godbus/dbus/variant_parser.go create mode 100644 vendor/src/github.com/godbus/dbus/variant_test.go diff --git a/hack/make.sh b/hack/make.sh index b77e9b7f44..dbb9dbfdfd 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -89,7 +89,7 @@ LDFLAGS=' ' LDFLAGS_STATIC='-linkmode external' EXTLDFLAGS_STATIC='-static' -BUILDFLAGS=( -a -tags "netgo $DOCKER_BUILDTAGS" ) +BUILDFLAGS=( -a -tags "netgo static_build $DOCKER_BUILDTAGS" ) # A few more flags that are specific just to building a completely-static binary (see hack/make/binary) # PLEASE do not use these anywhere else. diff --git a/hack/vendor.sh b/hack/vendor.sh index ac996dde12..4200d90867 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -58,3 +58,6 @@ mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar rm -rf src/code.google.com/p/go mkdir -p src/code.google.com/p/go/src/pkg/archive mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar + +clone git github.com/godbus/dbus cb98efbb933d8389ab549a060e880ea3c375d213 +clone git github.com/coreos/go-systemd 4c14ed39b8a643ac44b4f95b5a53c00e94261475 diff --git a/vendor/src/github.com/coreos/go-systemd/.travis.yml b/vendor/src/github.com/coreos/go-systemd/.travis.yml new file mode 100644 index 0000000000..8c9f56e44a --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: 1.2 + +install: + - echo "Skip install" + +script: + - ./test diff --git a/vendor/src/github.com/coreos/go-systemd/LICENSE b/vendor/src/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/src/github.com/coreos/go-systemd/README.md b/vendor/src/github.com/coreos/go-systemd/README.md new file mode 100644 index 0000000000..0ee09fec0a --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/README.md @@ -0,0 +1,44 @@ +# go-systemd + +Go bindings to systemd. The project has three packages: + +- activation - for writing and using socket activation from Go +- journal - for writing to systemd's logging service, journal +- dbus - for starting/stopping/inspecting running services and units + +Go docs for the entire project are here: + +http://godoc.org/github.com/coreos/go-systemd + +## Socket Activation + +An example HTTP server using socket activation can be quickly setup by +following this README on a Linux machine running systemd: + +https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver + +## Journal + +Using this package you can submit journal entries directly to systemd's journal taking advantage of features like indexed key/value pairs for each log entry. + +## D-Bus + +The D-Bus API lets you start, stop and introspect systemd units. The API docs are here: + +http://godoc.org/github.com/coreos/go-systemd/dbus + +### Debugging + +Create `/etc/dbus-1/system-local.conf` that looks like this: + +``` + + + + + + + +``` diff --git a/vendor/src/github.com/coreos/go-systemd/activation/files.go b/vendor/src/github.com/coreos/go-systemd/activation/files.go new file mode 100644 index 0000000000..74b4fc10f3 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/activation/files.go @@ -0,0 +1,56 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package activation implements primitives for systemd socket activation. +package activation + +import ( + "os" + "strconv" + "syscall" +) + +// based on: https://gist.github.com/alberts/4640792 +const ( + listenFdsStart = 3 +) + +func Files(unsetEnv bool) []*os.File { + if unsetEnv { + // there is no way to unset env in golang os package for now + // https://code.google.com/p/go/issues/detail?id=6423 + defer os.Setenv("LISTEN_PID", "") + defer os.Setenv("LISTEN_FDS", "") + } + + pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) + if err != nil || pid != os.Getpid() { + return nil + } + + nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) + if err != nil || nfds == 0 { + return nil + } + + var files []*os.File + for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { + syscall.CloseOnExec(fd) + files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd))) + } + + return files +} diff --git a/vendor/src/github.com/coreos/go-systemd/activation/files_test.go b/vendor/src/github.com/coreos/go-systemd/activation/files_test.go new file mode 100644 index 0000000000..a1c6948fb2 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/activation/files_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package activation + +import ( + "bytes" + "io" + "os" + "os/exec" + "testing" +) + +// correctStringWritten fails the text if the correct string wasn't written +// to the other side of the pipe. +func correctStringWritten(t *testing.T, r *os.File, expected string) bool { + bytes := make([]byte, len(expected)) + io.ReadAtLeast(r, bytes, len(expected)) + + if string(bytes) != expected { + t.Fatalf("Unexpected string %s", string(bytes)) + } + + return true +} + +// TestActivation forks out a copy of activation.go example and reads back two +// strings from the pipes that are passed in. +func TestActivation(t *testing.T) { + cmd := exec.Command("go", "run", "../examples/activation/activation.go") + + r1, w1, _ := os.Pipe() + r2, w2, _ := os.Pipe() + cmd.ExtraFiles = []*os.File{ + w1, + w2, + } + + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1") + + err := cmd.Run() + if err != nil { + t.Fatalf(err.Error()) + } + + correctStringWritten(t, r1, "Hello world") + correctStringWritten(t, r2, "Goodbye world") +} + +func TestActivationNoFix(t *testing.T) { + cmd := exec.Command("go", "run", "../examples/activation/activation.go") + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "LISTEN_FDS=2") + + out, _ := cmd.CombinedOutput() + if bytes.Contains(out, []byte("No files")) == false { + t.Fatalf("Child didn't error out as expected") + } +} + +func TestActivationNoFiles(t *testing.T) { + cmd := exec.Command("go", "run", "../examples/activation/activation.go") + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "LISTEN_FDS=0", "FIX_LISTEN_PID=1") + + out, _ := cmd.CombinedOutput() + if bytes.Contains(out, []byte("No files")) == false { + t.Fatalf("Child didn't error out as expected") + } +} diff --git a/vendor/src/github.com/coreos/go-systemd/activation/listeners.go b/vendor/src/github.com/coreos/go-systemd/activation/listeners.go new file mode 100644 index 0000000000..cdb2cf4bb4 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/activation/listeners.go @@ -0,0 +1,38 @@ +/* +Copyright 2014 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package activation + +import ( + "fmt" + "net" +) + +// Listeners returns net.Listeners for all socket activated fds passed to this process. +func Listeners(unsetEnv bool) ([]net.Listener, error) { + files := Files(unsetEnv) + listeners := make([]net.Listener, len(files)) + + for i, f := range files { + var err error + listeners[i], err = net.FileListener(f) + if err != nil { + return nil, fmt.Errorf("Error setting up FileListener for fd %d: %s", f.Fd(), err.Error()) + } + } + + return listeners, nil +} diff --git a/vendor/src/github.com/coreos/go-systemd/activation/listeners_test.go b/vendor/src/github.com/coreos/go-systemd/activation/listeners_test.go new file mode 100644 index 0000000000..c3627d6d4d --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/activation/listeners_test.go @@ -0,0 +1,88 @@ +/* +Copyright 2014 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package activation + +import ( + "io" + "net" + "os" + "os/exec" + "testing" +) + +// correctStringWritten fails the text if the correct string wasn't written +// to the other side of the pipe. +func correctStringWrittenNet(t *testing.T, r net.Conn, expected string) bool { + bytes := make([]byte, len(expected)) + io.ReadAtLeast(r, bytes, len(expected)) + + if string(bytes) != expected { + t.Fatalf("Unexpected string %s", string(bytes)) + } + + return true +} + +// TestActivation forks out a copy of activation.go example and reads back two +// strings from the pipes that are passed in. +func TestListeners(t *testing.T) { + cmd := exec.Command("go", "run", "../examples/activation/listen.go") + + l1, err := net.Listen("tcp", ":9999") + if err != nil { + t.Fatalf(err.Error()) + } + l2, err := net.Listen("tcp", ":1234") + if err != nil { + t.Fatalf(err.Error()) + } + + t1 := l1.(*net.TCPListener) + t2 := l2.(*net.TCPListener) + + f1, _ := t1.File() + f2, _ := t2.File() + + cmd.ExtraFiles = []*os.File{ + f1, + f2, + } + + r1, err := net.Dial("tcp", "127.0.0.1:9999") + if err != nil { + t.Fatalf(err.Error()) + } + r1.Write([]byte("Hi")) + + r2, err := net.Dial("tcp", "127.0.0.1:1234") + if err != nil { + t.Fatalf(err.Error()) + } + r2.Write([]byte("Hi")) + + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1") + + out, err := cmd.Output() + if err != nil { + println(string(out)) + t.Fatalf(err.Error()) + } + + correctStringWrittenNet(t, r1, "Hello world") + correctStringWrittenNet(t, r2, "Goodbye world") +} diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/src/github.com/coreos/go-systemd/dbus/dbus.go new file mode 100644 index 0000000000..91d7112145 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/dbus/dbus.go @@ -0,0 +1,104 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus" +) + +const signalBuffer = 100 + +// ObjectPath creates a dbus.ObjectPath using the rules that systemd uses for +// serializing special characters. +func ObjectPath(path string) dbus.ObjectPath { + path = strings.Replace(path, ".", "_2e", -1) + path = strings.Replace(path, "-", "_2d", -1) + path = strings.Replace(path, "@", "_40", -1) + + return dbus.ObjectPath(path) +} + +// Conn is a connection to systemds dbus endpoint. +type Conn struct { + sysconn *dbus.Conn + sysobj *dbus.Object + jobListener struct { + jobs map[dbus.ObjectPath]chan string + sync.Mutex + } + subscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } + dispatch map[string]func(dbus.Signal) +} + +// New() establishes a connection to the system bus and authenticates. +func New() (*Conn, error) { + c := new(Conn) + + if err := c.initConnection(); err != nil { + return nil, err + } + + c.initJobs() + return c, nil +} + +func (c *Conn) initConnection() error { + var err error + c.sysconn, err = dbus.SystemBusPrivate() + if err != nil { + return err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = c.sysconn.Auth(methods) + if err != nil { + c.sysconn.Close() + return err + } + + err = c.sysconn.Hello() + if err != nil { + c.sysconn.Close() + return err + } + + c.sysobj = c.sysconn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) + + // Setup the listeners on jobs so that we can get completions + c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + c.initSubscription() + c.initDispatch() + + return nil +} diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/dbus_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/dbus_test.go new file mode 100644 index 0000000000..2e80f73ef7 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/dbus/dbus_test.go @@ -0,0 +1,41 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "testing" +) + +// TestObjectPath ensures path encoding of the systemd rules works. +func TestObjectPath(t *testing.T) { + input := "/silly-path/to@a/unit..service" + output := ObjectPath(input) + expected := "/silly_2dpath/to_40a/unit_2e_2eservice" + + if string(output) != expected { + t.Fatalf("Output '%s' did not match expected '%s'", output, expected) + } +} + +// TestNew ensures that New() works without errors. +func TestNew(t *testing.T) { + _, err := New() + + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/methods.go b/vendor/src/github.com/coreos/go-systemd/dbus/methods.go new file mode 100644 index 0000000000..11d5cda945 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/dbus/methods.go @@ -0,0 +1,354 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "errors" + "github.com/godbus/dbus" +) + +func (c *Conn) initJobs() { + c.jobListener.jobs = make(map[dbus.ObjectPath]chan string) +} + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(job string, args ...interface{}) (<-chan string, error) { + c.jobListener.Lock() + defer c.jobListener.Unlock() + + ch := make(chan string, 1) + var path dbus.ObjectPath + err := c.sysobj.Call(job, 0, args...).Store(&path) + if err != nil { + return nil, err + } + c.jobListener.jobs[path] = ch + return ch, nil +} + +func (c *Conn) runJob(job string, args ...interface{}) (string, error) { + respCh, err := c.startJob(job, args...) + if err != nil { + return "", err + } + return <-respCh, nil +} + +// StartUnit enqeues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// Result string: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +func (c *Conn) StartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// StopUnit is similar to StartUnit but stops the specified unit rather +// than starting it. +func (c *Conn) StopUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. +func (c *Conn) ReloadUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// RestartUnit restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// TryRestartUnit is like RestartUnit, except that a service that isn't running +// is not affected by the restart. +func (c *Conn) TryRestartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// ReloadOrRestart attempts a reload if the unit supports it and use a restart +// otherwise. +func (c *Conn) ReloadOrRestartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try" +// flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// StartTransientUnit() may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnit(), properties contains properties +// of the unit. +func (c *Conn) StartTransientUnit(name string, mode string, properties ...Property) (string, error) { + return c.runJob("org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's +// processes are killed. +func (c *Conn) KillUnit(name string, signal int32) { + c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() +} + +// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface +func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + path := ObjectPath("/org/freedesktop/systemd1/unit/" + unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// GetUnitProperties takes the unit name and returns all of its dbus object properties. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1.Unit") +} + +func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := ObjectPath("/org/freedesktop/systemd1/unit/" + unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope +// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + return c.getProperties(unit, "org.freedesktop.systemd1."+unitType) +} + +// SetUnitProperties() may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.sysobj.Call("SetUnitProperties", 0, name, runtime, properties).Store() +} + +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(unit, "org.freedesktop.systemd1." + unitType, propertyName) +} + +// ListUnits returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +// EnableUnitFiles() may be used to enable one or more units in the system (by +// creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// DisableUnitFiles() may be used to disable one or more units in the system (by +// removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.Call("DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Reload instructs systemd to scan for and reload unit files. This is +// equivalent to a 'systemctl daemon-reload'. +func (c *Conn) Reload() error { + return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() +} diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go new file mode 100644 index 0000000000..9e2f22323f --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go @@ -0,0 +1,314 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "fmt" + "github.com/guelfey/go.dbus" + "math/rand" + "os" + "path/filepath" + "reflect" + "testing" +) + +func setupConn(t *testing.T) *Conn { + conn, err := New() + if err != nil { + t.Fatal(err) + } + + return conn +} + +func setupUnit(target string, conn *Conn, t *testing.T) { + // Blindly stop the unit in case it is running + conn.StopUnit(target, "replace") + + // Blindly remove the symlink in case it exists + targetRun := filepath.Join("/run/systemd/system/", target) + err := os.Remove(targetRun) + + // 1. Enable the unit + abs, err := filepath.Abs("../fixtures/" + target) + if err != nil { + t.Fatal(err) + } + + fixture := []string{abs} + + install, changes, err := conn.EnableUnitFiles(fixture, true, true) + if err != nil { + t.Fatal(err) + } + + if install != false { + t.Fatal("Install was true") + } + + if len(changes) < 1 { + t.Fatalf("Expected one change, got %v", changes) + } + + if changes[0].Filename != targetRun { + t.Fatal("Unexpected target filename") + } +} + +// Ensure that basic unit starting and stopping works. +func TestStartStopUnit(t *testing.T) { + target := "start-stop.service" + conn := setupConn(t) + + setupUnit(target, conn, t) + + // 2. Start the unit + job, err := conn.StartUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + if job != "done" { + t.Fatal("Job is not done, %v", job) + } + + units, err := conn.ListUnits() + + var unit *UnitStatus + for _, u := range units { + if u.Name == target { + unit = &u + } + } + + if unit == nil { + t.Fatalf("Test unit not found in list") + } + + if unit.ActiveState != "active" { + t.Fatalf("Test unit not active") + } + + // 3. Stop the unit + job, err = conn.StopUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + units, err = conn.ListUnits() + + unit = nil + for _, u := range units { + if u.Name == target { + unit = &u + } + } + + if unit != nil { + t.Fatalf("Test unit found in list, should be stopped") + } +} + +// Enables a unit and then immediately tears it down +func TestEnableDisableUnit(t *testing.T) { + target := "enable-disable.service" + conn := setupConn(t) + + setupUnit(target, conn, t) + + abs, err := filepath.Abs("../fixtures/" + target) + if err != nil { + t.Fatal(err) + } + + path := filepath.Join("/run/systemd/system/", target) + + // 2. Disable the unit + changes, err := conn.DisableUnitFiles([]string{abs}, true) + if err != nil { + t.Fatal(err) + } + + if len(changes) != 1 { + t.Fatalf("Changes should include the path, %v", changes) + } + if changes[0].Filename != path { + t.Fatalf("Change should include correct filename, %+v", changes[0]) + } + if changes[0].Destination != "" { + t.Fatalf("Change destination should be empty, %+v", changes[0]) + } +} + +// TestGetUnitProperties reads the `-.mount` which should exist on all systemd +// systems and ensures that one of its properties is valid. +func TestGetUnitProperties(t *testing.T) { + conn := setupConn(t) + + unit := "-.mount" + + info, err := conn.GetUnitProperties(unit) + if err != nil { + t.Fatal(err) + } + + names := info["Wants"].([]string) + + if len(names) < 1 { + t.Fatal("/ is unwanted") + } + + if names[0] != "system.slice" { + t.Fatal("unexpected wants for /") + } + + prop, err := conn.GetUnitProperty(unit, "Wants") + if err != nil { + t.Fatal(err) + } + + if prop.Name != "Wants" { + t.Fatal("unexpected property name") + } + + val := prop.Value.Value().([]string) + if !reflect.DeepEqual(val, names) { + t.Fatal("unexpected property value") + } +} + +// TestGetUnitPropertiesRejectsInvalidName attempts to get the properties for a +// unit with an invalid name. This test should be run with --test.timeout set, +// as a fail will manifest as GetUnitProperties hanging indefinitely. +func TestGetUnitPropertiesRejectsInvalidName(t *testing.T) { + conn := setupConn(t) + + unit := "//invalid#$^/" + + _, err := conn.GetUnitProperties(unit) + if err == nil { + t.Fatal("Expected an error, got nil") + } + + _, err = conn.GetUnitProperty(unit, "Wants") + if err == nil { + t.Fatal("Expected an error, got nil") + } +} + +// TestSetUnitProperties changes a cgroup setting on the `tmp.mount` +// which should exist on all systemd systems and ensures that the +// property was set. +func TestSetUnitProperties(t *testing.T) { + conn := setupConn(t) + + unit := "tmp.mount" + + if err := conn.SetUnitProperties(unit, true, Property{"CPUShares", dbus.MakeVariant(uint64(1023))}); err != nil { + t.Fatal(err) + } + + info, err := conn.GetUnitTypeProperties(unit, "Mount") + if err != nil { + t.Fatal(err) + } + + value := info["CPUShares"].(uint64) + if value != 1023 { + t.Fatal("CPUShares of unit is not 1023, %s", value) + } +} + +// Ensure that basic transient unit starting and stopping works. +func TestStartStopTransientUnit(t *testing.T) { + conn := setupConn(t) + + props := []Property{ + PropExecStart([]string{"/bin/sleep", "400"}, false), + } + target := fmt.Sprintf("testing-transient-%d.service", rand.Int()) + + // Start the unit + job, err := conn.StartTransientUnit(target, "replace", props...) + if err != nil { + t.Fatal(err) + } + + if job != "done" { + t.Fatal("Job is not done, %v", job) + } + + units, err := conn.ListUnits() + + var unit *UnitStatus + for _, u := range units { + if u.Name == target { + unit = &u + } + } + + if unit == nil { + t.Fatalf("Test unit not found in list") + } + + if unit.ActiveState != "active" { + t.Fatalf("Test unit not active") + } + + // 3. Stop the unit + job, err = conn.StopUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + units, err = conn.ListUnits() + + unit = nil + for _, u := range units { + if u.Name == target { + unit = &u + } + } + + if unit != nil { + t.Fatalf("Test unit found in list, should be stopped") + } +} + +func TestConnJobListener(t *testing.T) { + target := "start-stop.service" + conn := setupConn(t) + + setupUnit(target, conn, t) + + jobSize := len(conn.jobListener.jobs) + + _, err := conn.StartUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + _, err = conn.StopUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + currentJobSize := len(conn.jobListener.jobs) + if jobSize != currentJobSize { + t.Fatal("JobListener jobs leaked") + } +} diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/properties.go b/vendor/src/github.com/coreos/go-systemd/dbus/properties.go new file mode 100644 index 0000000000..a06ccda761 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/dbus/properties.go @@ -0,0 +1,220 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "github.com/godbus/dbus" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + execStart{ + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/set.go b/vendor/src/github.com/coreos/go-systemd/dbus/set.go new file mode 100644 index 0000000000..88378b29a1 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/dbus/set.go @@ -0,0 +1,26 @@ +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() (int) { + return len(s.data) +} + +func newSet() (*set) { + return &set{make(map[string] bool)} +} diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/set_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/set_test.go new file mode 100644 index 0000000000..d8d174d0c4 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/dbus/set_test.go @@ -0,0 +1,26 @@ +package dbus + +import ( + "testing" +) + +// TestBasicSetActions asserts that Add & Remove behavior is correct +func TestBasicSetActions(t *testing.T) { + s := newSet() + + if s.Contains("foo") { + t.Fatal("set should not contain 'foo'") + } + + s.Add("foo") + + if !s.Contains("foo") { + t.Fatal("set should contain 'foo'") + } + + s.Remove("foo") + + if s.Contains("foo") { + t.Fatal("set should not contain 'foo'") + } +} diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go new file mode 100644 index 0000000000..3d896d896f --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go @@ -0,0 +1,249 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbus + +import ( + "errors" + "time" + + "github.com/godbus/dbus" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() + if err != nil { + c.sysconn.Close() + return err + } + + return nil +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() + if err != nil { + c.sysconn.Close() + return err + } + + return nil +} + +func (c *Conn) initSubscription() { + c.subscriber.ignore = make(map[dbus.ObjectPath]int64) +} + +func (c *Conn) initDispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sysconn.Signal(ch) + + go func() { + for { + signal := <-ch + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + c.jobComplete(signal) + + unitName := signal.Body[2].(string) + var unitPath dbus.ObjectPath + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + if unitPath != dbus.ObjectPath("") { + c.sendSubStateUpdate(unitPath) + } + case "org.freedesktop.systemd1.Manager.UnitNew": + c.sendSubStateUpdate(signal.Body[1].(dbus.ObjectPath)) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + // we only care about SubState updates, which are a Unit property + c.sendSubStateUpdate(signal.Path) + } + } + } + }() +} + +// Returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func (string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + c.subscriber.updateCh = updateCh + c.subscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) { + c.subscriber.Lock() + defer c.subscriber.Unlock() + if c.subscriber.updateCh == nil { + return + } + + if c.shouldIgnore(path) { + return + } + + info, err := c.GetUnitProperties(string(path)) + if err != nil { + select { + case c.subscriber.errCh <- err: + default: + } + } + + name := info["Id"].(string) + substate := info["SubState"].(string) + + update := &SubStateUpdate{name, substate} + select { + case c.subscriber.updateCh <- update: + default: + select { + case c.subscriber.errCh <- errors.New("update channel full!"): + default: + } + } + + c.updateIgnore(path, info) +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + c.cleanIgnore() + + // unit is unloaded - it will trigger bad systemd dbus behavior + if info["LoadState"].(string) == "not-found" { + c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subscriber.cleanIgnore < now { + c.subscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subscriber.ignore { + if t < now { + delete(c.subscriber.ignore, p) + } + } + } +} diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set.go new file mode 100644 index 0000000000..2625786052 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set.go @@ -0,0 +1,32 @@ +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() (*SubscriptionSet) { + return &SubscriptionSet{newSet(), conn} +} diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go new file mode 100644 index 0000000000..db600850c2 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go @@ -0,0 +1,67 @@ +package dbus + +import ( + "testing" + "time" +) + +// TestSubscribeUnit exercises the basics of subscription of a particular unit. +func TestSubscriptionSetUnit(t *testing.T) { + target := "subscribe-events-set.service" + + conn, err := New() + + if err != nil { + t.Fatal(err) + } + + err = conn.Subscribe() + if err != nil { + t.Fatal(err) + } + + subSet := conn.NewSubscriptionSet() + evChan, errChan := subSet.Subscribe() + + subSet.Add(target) + setupUnit(target, conn, t) + + job, err := conn.StartUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + if job != "done" { + t.Fatal("Couldn't start", target) + } + + timeout := make(chan bool, 1) + go func() { + time.Sleep(3 * time.Second) + close(timeout) + }() + + for { + select { + case changes := <-evChan: + tCh, ok := changes[target] + + if !ok { + t.Fatal("Unexpected event %v", changes) + } + + if tCh.ActiveState == "active" && tCh.Name == target { + goto success + } + case err = <-errChan: + t.Fatal(err) + case <-timeout: + t.Fatal("Reached timeout") + } + } + +success: + return +} + + diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go new file mode 100644 index 0000000000..6f4d0b32a6 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go @@ -0,0 +1,90 @@ +package dbus + +import ( + "testing" + "time" +) + +// TestSubscribe exercises the basics of subscription +func TestSubscribe(t *testing.T) { + conn, err := New() + + if err != nil { + t.Fatal(err) + } + + err = conn.Subscribe() + if err != nil { + t.Fatal(err) + } + + err = conn.Unsubscribe() + if err != nil { + t.Fatal(err) + } +} + +// TestSubscribeUnit exercises the basics of subscription of a particular unit. +func TestSubscribeUnit(t *testing.T) { + target := "subscribe-events.service" + + conn, err := New() + + if err != nil { + t.Fatal(err) + } + + err = conn.Subscribe() + if err != nil { + t.Fatal(err) + } + + err = conn.Unsubscribe() + if err != nil { + t.Fatal(err) + } + + evChan, errChan := conn.SubscribeUnits(time.Second) + + setupUnit(target, conn, t) + + job, err := conn.StartUnit(target, "replace") + if err != nil { + t.Fatal(err) + } + + if job != "done" { + t.Fatal("Couldn't start", target) + } + + timeout := make(chan bool, 1) + go func() { + time.Sleep(3 * time.Second) + close(timeout) + }() + + for { + select { + case changes := <-evChan: + tCh, ok := changes[target] + + // Just continue until we see our event. + if !ok { + continue + } + + if tCh.ActiveState == "active" && tCh.Name == target { + goto success + } + case err = <-errChan: + t.Fatal(err) + case <-timeout: + t.Fatal("Reached timeout") + } + } + +success: + return +} + + diff --git a/vendor/src/github.com/coreos/go-systemd/examples/activation/activation.go b/vendor/src/github.com/coreos/go-systemd/examples/activation/activation.go new file mode 100644 index 0000000000..b3cf70ed84 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/examples/activation/activation.go @@ -0,0 +1,44 @@ +// Activation example used by the activation unit tests. +package main + +import ( + "fmt" + "os" + + "github.com/coreos/go-systemd/activation" +) + +func fixListenPid() { + if os.Getenv("FIX_LISTEN_PID") != "" { + // HACK: real systemd would set LISTEN_PID before exec'ing but + // this is too difficult in golang for the purpose of a test. + // Do not do this in real code. + os.Setenv("LISTEN_PID", fmt.Sprintf("%d", os.Getpid())) + } +} + +func main() { + fixListenPid() + + files := activation.Files(false) + + if len(files) == 0 { + panic("No files") + } + + if os.Getenv("LISTEN_PID") == "" || os.Getenv("LISTEN_FDS") == "" { + panic("Should not unset envs") + } + + files = activation.Files(true) + + if os.Getenv("LISTEN_PID") != "" || os.Getenv("LISTEN_FDS") != "" { + panic("Can not unset envs") + } + + // Write out the expected strings to the two pipes + files[0].Write([]byte("Hello world")) + files[1].Write([]byte("Goodbye world")) + + return +} diff --git a/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/README.md b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/README.md new file mode 100644 index 0000000000..a350cca5e5 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/README.md @@ -0,0 +1,19 @@ +## socket activated http server + +This is a simple example of using socket activation with systemd to serve a +simple HTTP server on http://127.0.0.1:8076 + +To try it out `go get` the httpserver and run it under the systemd-activate helper + +``` +export GOPATH=`pwd` +go get github.com/coreos/go-systemd/examples/activation/httpserver +sudo /usr/lib/systemd/systemd-activate -l 127.0.0.1:8076 ./bin/httpserver +``` + +Then curl the URL and you will notice that it starts up: + +``` +curl 127.0.0.1:8076 +hello socket activated world! +``` diff --git a/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service new file mode 100644 index 0000000000..c8dea0f6b3 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service @@ -0,0 +1,11 @@ +[Unit] +Description=Hello World HTTP +Requires=network.target +After=multi-user.target + +[Service] +Type=simple +ExecStart=/usr/local/bin/httpserver + +[Install] +WantedBy=multi-user.target diff --git a/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket new file mode 100644 index 0000000000..723ed7ed92 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket @@ -0,0 +1,5 @@ +[Socket] +ListenStream=127.0.0.1:8076 + +[Install] +WantedBy=sockets.target diff --git a/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go new file mode 100644 index 0000000000..380c325d61 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go @@ -0,0 +1,26 @@ +package main + +import ( + "io" + "net/http" + + "github.com/coreos/go-systemd/activation" +) + +func HelloServer(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, "hello socket activated world!\n") +} + +func main() { + listeners, err := activation.Listeners(true) + if err != nil { + panic(err) + } + + if len(listeners) != 1 { + panic("Unexpected number of socket activation fds") + } + + http.HandleFunc("/", HelloServer) + http.Serve(listeners[0], nil) +} diff --git a/vendor/src/github.com/coreos/go-systemd/examples/activation/listen.go b/vendor/src/github.com/coreos/go-systemd/examples/activation/listen.go new file mode 100644 index 0000000000..5850a8b796 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/examples/activation/listen.go @@ -0,0 +1,50 @@ +// Activation example used by the activation unit tests. +package main + +import ( + "fmt" + "os" + + "github.com/coreos/go-systemd/activation" +) + +func fixListenPid() { + if os.Getenv("FIX_LISTEN_PID") != "" { + // HACK: real systemd would set LISTEN_PID before exec'ing but + // this is too difficult in golang for the purpose of a test. + // Do not do this in real code. + os.Setenv("LISTEN_PID", fmt.Sprintf("%d", os.Getpid())) + } +} + +func main() { + fixListenPid() + + listeners, _ := activation.Listeners(false) + + if len(listeners) == 0 { + panic("No listeners") + } + + if os.Getenv("LISTEN_PID") == "" || os.Getenv("LISTEN_FDS") == "" { + panic("Should not unset envs") + } + + listeners, err := activation.Listeners(true) + if err != nil { + panic(err) + } + + if os.Getenv("LISTEN_PID") != "" || os.Getenv("LISTEN_FDS") != "" { + panic("Can not unset envs") + } + + c0, _ := listeners[0].Accept() + c1, _ := listeners[1].Accept() + + // Write out the expected strings to the two pipes + c0.Write([]byte("Hello world")) + c1.Write([]byte("Goodbye world")) + + return +} diff --git a/vendor/src/github.com/coreos/go-systemd/fixtures/start-stop.service b/vendor/src/github.com/coreos/go-systemd/fixtures/start-stop.service new file mode 100644 index 0000000000..a1f8c36773 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/fixtures/start-stop.service @@ -0,0 +1,5 @@ +[Unit] +Description=start stop test + +[Service] +ExecStart=/bin/sleep 400 diff --git a/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service b/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service new file mode 100644 index 0000000000..a1f8c36773 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service @@ -0,0 +1,5 @@ +[Unit] +Description=start stop test + +[Service] +ExecStart=/bin/sleep 400 diff --git a/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events.service b/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events.service new file mode 100644 index 0000000000..a1f8c36773 --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events.service @@ -0,0 +1,5 @@ +[Unit] +Description=start stop test + +[Service] +ExecStart=/bin/sleep 400 diff --git a/vendor/src/github.com/coreos/go-systemd/journal/send.go b/vendor/src/github.com/coreos/go-systemd/journal/send.go new file mode 100644 index 0000000000..a29bcbf0fa --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/journal/send.go @@ -0,0 +1,168 @@ +/* +Copyright 2013 CoreOS Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package journal provides write bindings to the systemd journal +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "syscall" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +var conn net.Conn + +func init() { + var err error + conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") + if err != nil { + conn = nil + } +} + +// Enabled returns true iff the systemd journal is available for logging +func Enabled() bool { + return conn != nil +} + +// Send a message to the systemd journal. vars is a map of journald fields to +// values. Fields must be composed of uppercase letters, numbers, and +// underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + if conn == nil { + return journalError("could not connect to journald socket") + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, err := io.Copy(conn, data) + if err != nil && isSocketSpaceError(err) { + file, err := tempFd() + if err != nil { + return journalError(err.Error()) + } + _, err = io.Copy(file, data) + if err != nil { + return journalError(err.Error()) + } + + rights := syscall.UnixRights(int(file.Fd())) + + /* this connection should always be a UnixConn, but better safe than sorry */ + unixConn, ok := conn.(*net.UnixConn) + if !ok { + return journalError("can't send file through non-Unix connection") + } + unixConn.WriteMsgUnix([]byte{}, rights, nil) + } else if err != nil { + return journalError(err.Error()) + } + return nil +} + +func appendVariable(w io.Writer, name, value string) { + if !validVarName(name) { + journalError("variable name contains invalid character, ignoring") + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintln(w, "%s=%s", name, value) + } +} + +func validVarName(name string) bool { + /* The variable name must be in uppercase and consist only of characters, + * numbers and underscores, and may not begin with an underscore. (from the docs) + */ + + valid := name[0] != '_' + for _, c := range name { + valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' + } + return valid +} + +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok { + return false + } + + sysErr, ok := opErr.Err.(syscall.Errno) + if !ok { + return false + } + + return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS +} + +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +func journalError(s string) error { + s = "journal error: " + s + fmt.Fprintln(os.Stderr, s) + return errors.New(s) +} diff --git a/vendor/src/github.com/coreos/go-systemd/test b/vendor/src/github.com/coreos/go-systemd/test new file mode 100755 index 0000000000..6e043658ae --- /dev/null +++ b/vendor/src/github.com/coreos/go-systemd/test @@ -0,0 +1,3 @@ +#!/bin/sh -e + +go test -v ./... diff --git a/vendor/src/github.com/godbus/dbus/LICENSE b/vendor/src/github.com/godbus/dbus/LICENSE new file mode 100644 index 0000000000..06b252bcbc --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013, Georg Reinke () +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/godbus/dbus/README.markdown b/vendor/src/github.com/godbus/dbus/README.markdown new file mode 100644 index 0000000000..3ab2116651 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/README.markdown @@ -0,0 +1,38 @@ +dbus +---- + +dbus is a simple library that implements native Go client bindings for the +D-Bus message bus system. + +### Features + +* Complete native implementation of the D-Bus message protocol +* Go-like API (channels for signals / asynchronous method calls, Goroutine-safe connections) +* Subpackages that help with the introspection / property interfaces + +### Installation + +This packages requires Go 1.1. If you installed it and set up your GOPATH, just run: + +``` +go get github.com/godbus/dbus +``` + +If you want to use the subpackages, you can install them the same way. + +### Usage + +The complete package documentation and some simple examples are available at +[godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the +[_examples](https://github.com/godbus/dbus/tree/master/_examples) directory +gives a short overview over the basic usage. + +Please note that the API is considered unstable for now and may change without +further notice. + +### License + +go.dbus is available under the Simplified BSD License; see LICENSE for the full +text. + +Nearly all of the credit for this library goes to github.com/guelfey/go.dbus. diff --git a/vendor/src/github.com/godbus/dbus/_examples/eavesdrop.go b/vendor/src/github.com/godbus/dbus/_examples/eavesdrop.go new file mode 100644 index 0000000000..11deef3cf8 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/_examples/eavesdrop.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "github.com/godbus/dbus" + "os" +) + +func main() { + conn, err := dbus.SessionBus() + if err != nil { + fmt.Fprintln(os.Stderr, "Failed to connect to session bus:", err) + os.Exit(1) + } + + for _, v := range []string{"method_call", "method_return", "error", "signal"} { + call := conn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "eavesdrop='true',type='"+v+"'") + if call.Err != nil { + fmt.Fprintln(os.Stderr, "Failed to add match:", call.Err) + os.Exit(1) + } + } + c := make(chan *dbus.Message, 10) + conn.Eavesdrop(c) + fmt.Println("Listening for everything") + for v := range c { + fmt.Println(v) + } +} diff --git a/vendor/src/github.com/godbus/dbus/_examples/introspect.go b/vendor/src/github.com/godbus/dbus/_examples/introspect.go new file mode 100644 index 0000000000..a2af4e5f24 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/_examples/introspect.go @@ -0,0 +1,21 @@ +package main + +import ( + "encoding/json" + "github.com/godbus/dbus" + "github.com/godbus/dbus/introspect" + "os" +) + +func main() { + conn, err := dbus.SessionBus() + if err != nil { + panic(err) + } + node, err := introspect.Call(conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")) + if err != nil { + panic(err) + } + data, _ := json.MarshalIndent(node, "", " ") + os.Stdout.Write(data) +} diff --git a/vendor/src/github.com/godbus/dbus/_examples/list-names.go b/vendor/src/github.com/godbus/dbus/_examples/list-names.go new file mode 100644 index 0000000000..ce1f7ec52e --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/_examples/list-names.go @@ -0,0 +1,27 @@ +package main + +import ( + "fmt" + "github.com/godbus/dbus" + "os" +) + +func main() { + conn, err := dbus.SessionBus() + if err != nil { + fmt.Fprintln(os.Stderr, "Failed to connect to session bus:", err) + os.Exit(1) + } + + var s []string + err = conn.BusObject().Call("org.freedesktop.DBus.ListNames", 0).Store(&s) + if err != nil { + fmt.Fprintln(os.Stderr, "Failed to get list of owned names:", err) + os.Exit(1) + } + + fmt.Println("Currently owned names on the session bus:") + for _, v := range s { + fmt.Println(v) + } +} diff --git a/vendor/src/github.com/godbus/dbus/_examples/notification.go b/vendor/src/github.com/godbus/dbus/_examples/notification.go new file mode 100644 index 0000000000..5fe11d04c4 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/_examples/notification.go @@ -0,0 +1,17 @@ +package main + +import "github.com/godbus/dbus" + +func main() { + conn, err := dbus.SessionBus() + if err != nil { + panic(err) + } + obj := conn.Object("org.freedesktop.Notifications", "/org/freedesktop/Notifications") + call := obj.Call("org.freedesktop.Notifications.Notify", 0, "", uint32(0), + "", "Test", "This is a test of the DBus bindings for go.", []string{}, + map[string]dbus.Variant{}, int32(5000)) + if call.Err != nil { + panic(call.Err) + } +} diff --git a/vendor/src/github.com/godbus/dbus/_examples/prop.go b/vendor/src/github.com/godbus/dbus/_examples/prop.go new file mode 100644 index 0000000000..e3408c53e9 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/_examples/prop.go @@ -0,0 +1,68 @@ +package main + +import ( + "fmt" + "github.com/godbus/dbus" + "github.com/godbus/dbus/introspect" + "github.com/godbus/dbus/prop" + "os" +) + +type foo string + +func (f foo) Foo() (string, *dbus.Error) { + fmt.Println(f) + return string(f), nil +} + +func main() { + conn, err := dbus.SessionBus() + if err != nil { + panic(err) + } + reply, err := conn.RequestName("com.github.guelfey.Demo", + dbus.NameFlagDoNotQueue) + if err != nil { + panic(err) + } + if reply != dbus.RequestNameReplyPrimaryOwner { + fmt.Fprintln(os.Stderr, "name already taken") + os.Exit(1) + } + propsSpec := map[string]map[string]*prop.Prop{ + "com.github.guelfey.Demo": { + "SomeInt": { + int32(0), + true, + prop.EmitTrue, + func(c *prop.Change) *dbus.Error { + fmt.Println(c.Name, "changed to", c.Value) + return nil + }, + }, + }, + } + f := foo("Bar") + conn.Export(f, "/com/github/guelfey/Demo", "com.github.guelfey.Demo") + props := prop.New(conn, "/com/github/guelfey/Demo", propsSpec) + n := &introspect.Node{ + Name: "/com/github/guelfey/Demo", + Interfaces: []introspect.Interface{ + introspect.IntrospectData, + prop.IntrospectData, + { + Name: "com.github.guelfey.Demo", + Methods: introspect.Methods(f), + Properties: props.Introspection("com.github.guelfey.Demo"), + }, + }, + } + conn.Export(introspect.NewIntrospectable(n), "/com/github/guelfey/Demo", + "org.freedesktop.DBus.Introspectable") + fmt.Println("Listening on com.github.guelfey.Demo / /com/github/guelfey/Demo ...") + + c := make(chan *dbus.Signal) + conn.Signal(c) + for _ = range c { + } +} diff --git a/vendor/src/github.com/godbus/dbus/_examples/server.go b/vendor/src/github.com/godbus/dbus/_examples/server.go new file mode 100644 index 0000000000..32b7b291c7 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/_examples/server.go @@ -0,0 +1,45 @@ +package main + +import ( + "fmt" + "github.com/godbus/dbus" + "github.com/godbus/dbus/introspect" + "os" +) + +const intro = ` + + + + + + ` + introspect.IntrospectDataString + ` ` + +type foo string + +func (f foo) Foo() (string, *dbus.Error) { + fmt.Println(f) + return string(f), nil +} + +func main() { + conn, err := dbus.SessionBus() + if err != nil { + panic(err) + } + reply, err := conn.RequestName("com.github.guelfey.Demo", + dbus.NameFlagDoNotQueue) + if err != nil { + panic(err) + } + if reply != dbus.RequestNameReplyPrimaryOwner { + fmt.Fprintln(os.Stderr, "name already taken") + os.Exit(1) + } + f := foo("Bar!") + conn.Export(f, "/com/github/guelfey/Demo", "com.github.guelfey.Demo") + conn.Export(introspect.Introspectable(intro), "/com/github/guelfey/Demo", + "org.freedesktop.DBus.Introspectable") + fmt.Println("Listening on com.github.guelfey.Demo / /com/github/guelfey/Demo ...") + select {} +} diff --git a/vendor/src/github.com/godbus/dbus/_examples/signal.go b/vendor/src/github.com/godbus/dbus/_examples/signal.go new file mode 100644 index 0000000000..8f3f809759 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/_examples/signal.go @@ -0,0 +1,24 @@ +package main + +import ( + "fmt" + "github.com/godbus/dbus" + "os" +) + +func main() { + conn, err := dbus.SessionBus() + if err != nil { + fmt.Fprintln(os.Stderr, "Failed to connect to session bus:", err) + os.Exit(1) + } + + conn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',path='/org/freedesktop/DBus',interface='org.freedesktop.DBus',sender='org.freedesktop.DBus'") + + c := make(chan *dbus.Signal, 10) + conn.Signal(c) + for v := range c { + fmt.Println(v) + } +} diff --git a/vendor/src/github.com/godbus/dbus/auth.go b/vendor/src/github.com/godbus/dbus/auth.go new file mode 100644 index 0000000000..98017b693e --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/auth.go @@ -0,0 +1,253 @@ +package dbus + +import ( + "bufio" + "bytes" + "errors" + "io" + "os" + "strconv" +) + +// AuthStatus represents the Status of an authentication mechanism. +type AuthStatus byte + +const ( + // AuthOk signals that authentication is finished; the next command + // from the server should be an OK. + AuthOk AuthStatus = iota + + // AuthContinue signals that additional data is needed; the next command + // from the server should be a DATA. + AuthContinue + + // AuthError signals an error; the server sent invalid data or some + // other unexpected thing happened and the current authentication + // process should be aborted. + AuthError +) + +type authState byte + +const ( + waitingForData authState = iota + waitingForOk + waitingForReject +) + +// Auth defines the behaviour of an authentication mechanism. +type Auth interface { + // Return the name of the mechnism, the argument to the first AUTH command + // and the next status. + FirstData() (name, resp []byte, status AuthStatus) + + // Process the given DATA command, and return the argument to the DATA + // command and the next status. If len(resp) == 0, no DATA command is sent. + HandleData(data []byte) (resp []byte, status AuthStatus) +} + +// Auth authenticates the connection, trying the given list of authentication +// mechanisms (in that order). If nil is passed, the EXTERNAL and +// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private +// connections, this method must be called before sending any messages to the +// bus. Auth must not be called on shared connections. +func (conn *Conn) Auth(methods []Auth) error { + if methods == nil { + uid := strconv.Itoa(os.Getuid()) + methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())} + } + in := bufio.NewReader(conn.transport) + err := conn.transport.SendNullByte() + if err != nil { + return err + } + err = authWriteLine(conn.transport, []byte("AUTH")) + if err != nil { + return err + } + s, err := authReadLine(in) + if err != nil { + return err + } + if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) { + return errors.New("dbus: authentication protocol error") + } + s = s[1:] + for _, v := range s { + for _, m := range methods { + if name, data, status := m.FirstData(); bytes.Equal(v, name) { + var ok bool + err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data) + if err != nil { + return err + } + switch status { + case AuthOk: + err, ok = conn.tryAuth(m, waitingForOk, in) + case AuthContinue: + err, ok = conn.tryAuth(m, waitingForData, in) + default: + panic("dbus: invalid authentication status") + } + if err != nil { + return err + } + if ok { + if conn.transport.SupportsUnixFDs() { + err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD")) + if err != nil { + return err + } + line, err := authReadLine(in) + if err != nil { + return err + } + switch { + case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")): + conn.EnableUnixFDs() + conn.unixFD = true + case bytes.Equal(line[0], []byte("ERROR")): + default: + return errors.New("dbus: authentication protocol error") + } + } + err = authWriteLine(conn.transport, []byte("BEGIN")) + if err != nil { + return err + } + go conn.inWorker() + go conn.outWorker() + return nil + } + } + } + } + return errors.New("dbus: authentication failed") +} + +// tryAuth tries to authenticate with m as the mechanism, using state as the +// initial authState and in for reading input. It returns (nil, true) on +// success, (nil, false) on a REJECTED and (someErr, false) if some other +// error occured. +func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) { + for { + s, err := authReadLine(in) + if err != nil { + return err, false + } + switch { + case state == waitingForData && string(s[0]) == "DATA": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + continue + } + data, status := m.HandleData(s[1]) + switch status { + case AuthOk, AuthContinue: + if len(data) != 0 { + err = authWriteLine(conn.transport, []byte("DATA"), data) + if err != nil { + return err, false + } + } + if status == AuthOk { + state = waitingForOk + } + case AuthError: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + } + case state == waitingForData && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForData && string(s[0]) == "ERROR": + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + case state == waitingForData && string(s[0]) == "OK": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + } + conn.uuid = string(s[1]) + return nil, true + case state == waitingForData: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + case state == waitingForOk && string(s[0]) == "OK": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + } + conn.uuid = string(s[1]) + return nil, true + case state == waitingForOk && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForOk && (string(s[0]) == "DATA" || + string(s[0]) == "ERROR"): + + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + case state == waitingForOk: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + case state == waitingForReject && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForReject: + return errors.New("dbus: authentication protocol error"), false + default: + panic("dbus: invalid auth state") + } + } +} + +// authReadLine reads a line and separates it into its fields. +func authReadLine(in *bufio.Reader) ([][]byte, error) { + data, err := in.ReadBytes('\n') + if err != nil { + return nil, err + } + data = bytes.TrimSuffix(data, []byte("\r\n")) + return bytes.Split(data, []byte{' '}), nil +} + +// authWriteLine writes the given line in the authentication protocol format +// (elements of data separated by a " " and terminated by "\r\n"). +func authWriteLine(out io.Writer, data ...[]byte) error { + buf := make([]byte, 0) + for i, v := range data { + buf = append(buf, v...) + if i != len(data)-1 { + buf = append(buf, ' ') + } + } + buf = append(buf, '\r') + buf = append(buf, '\n') + n, err := out.Write(buf) + if err != nil { + return err + } + if n != len(buf) { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/src/github.com/godbus/dbus/auth_external.go b/vendor/src/github.com/godbus/dbus/auth_external.go new file mode 100644 index 0000000000..7e376d3ef6 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/auth_external.go @@ -0,0 +1,26 @@ +package dbus + +import ( + "encoding/hex" +) + +// AuthExternal returns an Auth that authenticates as the given user with the +// EXTERNAL mechanism. +func AuthExternal(user string) Auth { + return authExternal{user} +} + +// AuthExternal implements the EXTERNAL authentication mechanism. +type authExternal struct { + user string +} + +func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) { + b := make([]byte, 2*len(a.user)) + hex.Encode(b, []byte(a.user)) + return []byte("EXTERNAL"), b, AuthOk +} + +func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) { + return nil, AuthError +} diff --git a/vendor/src/github.com/godbus/dbus/auth_sha1.go b/vendor/src/github.com/godbus/dbus/auth_sha1.go new file mode 100644 index 0000000000..df15b46119 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/auth_sha1.go @@ -0,0 +1,102 @@ +package dbus + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/hex" + "os" +) + +// AuthCookieSha1 returns an Auth that authenticates as the given user with the +// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home +// directory of the user. +func AuthCookieSha1(user, home string) Auth { + return authCookieSha1{user, home} +} + +type authCookieSha1 struct { + user, home string +} + +func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) { + b := make([]byte, 2*len(a.user)) + hex.Encode(b, []byte(a.user)) + return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue +} + +func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) { + challenge := make([]byte, len(data)/2) + _, err := hex.Decode(challenge, data) + if err != nil { + return nil, AuthError + } + b := bytes.Split(challenge, []byte{' '}) + if len(b) != 3 { + return nil, AuthError + } + context := b[0] + id := b[1] + svchallenge := b[2] + cookie := a.getCookie(context, id) + if cookie == nil { + return nil, AuthError + } + clchallenge := a.generateChallenge() + if clchallenge == nil { + return nil, AuthError + } + hash := sha1.New() + hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'})) + hexhash := make([]byte, 2*hash.Size()) + hex.Encode(hexhash, hash.Sum(nil)) + data = append(clchallenge, ' ') + data = append(data, hexhash...) + resp := make([]byte, 2*len(data)) + hex.Encode(resp, data) + return resp, AuthOk +} + +// getCookie searches for the cookie identified by id in context and returns +// the cookie content or nil. (Since HandleData can't return a specific error, +// but only whether an error occured, this function also doesn't bother to +// return an error.) +func (a authCookieSha1) getCookie(context, id []byte) []byte { + file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context)) + if err != nil { + return nil + } + defer file.Close() + rd := bufio.NewReader(file) + for { + line, err := rd.ReadBytes('\n') + if err != nil { + return nil + } + line = line[:len(line)-1] + b := bytes.Split(line, []byte{' '}) + if len(b) != 3 { + return nil + } + if bytes.Equal(b[0], id) { + return b[2] + } + } +} + +// generateChallenge returns a random, hex-encoded challenge, or nil on error +// (see above). +func (a authCookieSha1) generateChallenge() []byte { + b := make([]byte, 16) + n, err := rand.Read(b) + if err != nil { + return nil + } + if n != 16 { + return nil + } + enc := make([]byte, 32) + hex.Encode(enc, b) + return enc +} diff --git a/vendor/src/github.com/godbus/dbus/call.go b/vendor/src/github.com/godbus/dbus/call.go new file mode 100644 index 0000000000..1d2fbc7efd --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/call.go @@ -0,0 +1,147 @@ +package dbus + +import ( + "errors" + "strings" +) + +// Call represents a pending or completed method call. +type Call struct { + Destination string + Path ObjectPath + Method string + Args []interface{} + + // Strobes when the call is complete. + Done chan *Call + + // After completion, the error status. If this is non-nil, it may be an + // error message from the peer (with Error as its type) or some other error. + Err error + + // Holds the response once the call is done. + Body []interface{} +} + +var errSignature = errors.New("dbus: mismatched signature") + +// Store stores the body of the reply into the provided pointers. It returns +// an error if the signatures of the body and retvalues don't match, or if +// the error status is not nil. +func (c *Call) Store(retvalues ...interface{}) error { + if c.Err != nil { + return c.Err + } + + return Store(c.Body, retvalues...) +} + +// Object represents a remote object on which methods can be invoked. +type Object struct { + conn *Conn + dest string + path ObjectPath +} + +// Call calls a method with (*Object).Go and waits for its reply. +func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call { + return <-o.Go(method, flags, make(chan *Call, 1), args...).Done +} + +// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given +// object. The property name must be given in interface.member notation. +func (o *Object) GetProperty(p string) (Variant, error) { + idx := strings.LastIndex(p, ".") + if idx == -1 || idx+1 == len(p) { + return Variant{}, errors.New("dbus: invalid property " + p) + } + + iface := p[:idx] + prop := p[idx+1:] + + result := Variant{} + err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result) + + if err != nil { + return Variant{}, err + } + + return result, nil +} + +// Go calls a method with the given arguments asynchronously. It returns a +// Call structure representing this method call. The passed channel will +// return the same value once the call is done. If ch is nil, a new channel +// will be allocated. Otherwise, ch has to be buffered or Go will panic. +// +// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure +// is returned of which only the Err member is valid. +// +// If the method parameter contains a dot ('.'), the part before the last dot +// specifies the interface on which the method is called. +func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call { + iface := "" + i := strings.LastIndex(method, ".") + if i != -1 { + iface = method[:i] + } + method = method[i+1:] + msg := new(Message) + msg.Type = TypeMethodCall + msg.serial = o.conn.getSerial() + msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected) + msg.Headers = make(map[HeaderField]Variant) + msg.Headers[FieldPath] = MakeVariant(o.path) + msg.Headers[FieldDestination] = MakeVariant(o.dest) + msg.Headers[FieldMember] = MakeVariant(method) + if iface != "" { + msg.Headers[FieldInterface] = MakeVariant(iface) + } + msg.Body = args + if len(args) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...)) + } + if msg.Flags&FlagNoReplyExpected == 0 { + if ch == nil { + ch = make(chan *Call, 10) + } else if cap(ch) == 0 { + panic("dbus: unbuffered channel passed to (*Object).Go") + } + call := &Call{ + Destination: o.dest, + Path: o.path, + Method: method, + Args: args, + Done: ch, + } + o.conn.callsLck.Lock() + o.conn.calls[msg.serial] = call + o.conn.callsLck.Unlock() + o.conn.outLck.RLock() + if o.conn.closed { + call.Err = ErrClosed + call.Done <- call + } else { + o.conn.out <- msg + } + o.conn.outLck.RUnlock() + return call + } + o.conn.outLck.RLock() + defer o.conn.outLck.RUnlock() + if o.conn.closed { + return &Call{Err: ErrClosed} + } + o.conn.out <- msg + return &Call{Err: nil} +} + +// Destination returns the destination that calls on o are sent to. +func (o *Object) Destination() string { + return o.dest +} + +// Path returns the path that calls on o are sent to. +func (o *Object) Path() ObjectPath { + return o.path +} diff --git a/vendor/src/github.com/godbus/dbus/conn.go b/vendor/src/github.com/godbus/dbus/conn.go new file mode 100644 index 0000000000..75dd22652a --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/conn.go @@ -0,0 +1,601 @@ +package dbus + +import ( + "errors" + "io" + "os" + "reflect" + "strings" + "sync" +) + +const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket" + +var ( + systemBus *Conn + systemBusLck sync.Mutex + sessionBus *Conn + sessionBusLck sync.Mutex +) + +// ErrClosed is the error returned by calls on a closed connection. +var ErrClosed = errors.New("dbus: connection closed by user") + +// Conn represents a connection to a message bus (usually, the system or +// session bus). +// +// Connections are either shared or private. Shared connections +// are shared between calls to the functions that return them. As a result, +// the methods Close, Auth and Hello must not be called on them. +// +// Multiple goroutines may invoke methods on a connection simultaneously. +type Conn struct { + transport + + busObj *Object + unixFD bool + uuid string + + names []string + namesLck sync.RWMutex + + serialLck sync.Mutex + nextSerial uint32 + serialUsed map[uint32]bool + + calls map[uint32]*Call + callsLck sync.RWMutex + + handlers map[ObjectPath]map[string]interface{} + handlersLck sync.RWMutex + + out chan *Message + closed bool + outLck sync.RWMutex + + signals []chan<- *Signal + signalsLck sync.Mutex + + eavesdropped chan<- *Message + eavesdroppedLck sync.Mutex +} + +// SessionBus returns a shared connection to the session bus, connecting to it +// if not already done. +func SessionBus() (conn *Conn, err error) { + sessionBusLck.Lock() + defer sessionBusLck.Unlock() + if sessionBus != nil { + return sessionBus, nil + } + defer func() { + if conn != nil { + sessionBus = conn + } + }() + conn, err = SessionBusPrivate() + if err != nil { + return + } + if err = conn.Auth(nil); err != nil { + conn.Close() + conn = nil + return + } + if err = conn.Hello(); err != nil { + conn.Close() + conn = nil + } + return +} + +// SessionBusPrivate returns a new private connection to the session bus. +func SessionBusPrivate() (*Conn, error) { + address := os.Getenv("DBUS_SESSION_BUS_ADDRESS") + if address != "" && address != "autolaunch:" { + return Dial(address) + } + + return sessionBusPlatform() +} + +// SystemBus returns a shared connection to the system bus, connecting to it if +// not already done. +func SystemBus() (conn *Conn, err error) { + systemBusLck.Lock() + defer systemBusLck.Unlock() + if systemBus != nil { + return systemBus, nil + } + defer func() { + if conn != nil { + systemBus = conn + } + }() + conn, err = SystemBusPrivate() + if err != nil { + return + } + if err = conn.Auth(nil); err != nil { + conn.Close() + conn = nil + return + } + if err = conn.Hello(); err != nil { + conn.Close() + conn = nil + } + return +} + +// SystemBusPrivate returns a new private connection to the system bus. +func SystemBusPrivate() (*Conn, error) { + address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") + if address != "" { + return Dial(address) + } + return Dial(defaultSystemBusAddress) +} + +// Dial establishes a new private connection to the message bus specified by address. +func Dial(address string) (*Conn, error) { + tr, err := getTransport(address) + if err != nil { + return nil, err + } + return newConn(tr) +} + +// NewConn creates a new private *Conn from an already established connection. +func NewConn(conn io.ReadWriteCloser) (*Conn, error) { + return newConn(genericTransport{conn}) +} + +// newConn creates a new *Conn from a transport. +func newConn(tr transport) (*Conn, error) { + conn := new(Conn) + conn.transport = tr + conn.calls = make(map[uint32]*Call) + conn.out = make(chan *Message, 10) + conn.handlers = make(map[ObjectPath]map[string]interface{}) + conn.nextSerial = 1 + conn.serialUsed = map[uint32]bool{0: true} + conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus") + return conn, nil +} + +// BusObject returns the object owned by the bus daemon which handles +// administrative requests. +func (conn *Conn) BusObject() *Object { + return conn.busObj +} + +// Close closes the connection. Any blocked operations will return with errors +// and the channels passed to Eavesdrop and Signal are closed. This method must +// not be called on shared connections. +func (conn *Conn) Close() error { + conn.outLck.Lock() + close(conn.out) + conn.closed = true + conn.outLck.Unlock() + conn.signalsLck.Lock() + for _, ch := range conn.signals { + close(ch) + } + conn.signalsLck.Unlock() + conn.eavesdroppedLck.Lock() + if conn.eavesdropped != nil { + close(conn.eavesdropped) + } + conn.eavesdroppedLck.Unlock() + return conn.transport.Close() +} + +// Eavesdrop causes conn to send all incoming messages to the given channel +// without further processing. Method replies, errors and signals will not be +// sent to the appropiate channels and method calls will not be handled. If nil +// is passed, the normal behaviour is restored. +// +// The caller has to make sure that ch is sufficiently buffered; +// if a message arrives when a write to ch is not possible, the message is +// discarded. +func (conn *Conn) Eavesdrop(ch chan<- *Message) { + conn.eavesdroppedLck.Lock() + conn.eavesdropped = ch + conn.eavesdroppedLck.Unlock() +} + +// getSerial returns an unused serial. +func (conn *Conn) getSerial() uint32 { + conn.serialLck.Lock() + defer conn.serialLck.Unlock() + n := conn.nextSerial + for conn.serialUsed[n] { + n++ + } + conn.serialUsed[n] = true + conn.nextSerial = n + 1 + return n +} + +// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be +// called after authentication, but before sending any other messages to the +// bus. Hello must not be called for shared connections. +func (conn *Conn) Hello() error { + var s string + err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s) + if err != nil { + return err + } + conn.namesLck.Lock() + conn.names = make([]string, 1) + conn.names[0] = s + conn.namesLck.Unlock() + return nil +} + +// inWorker runs in an own goroutine, reading incoming messages from the +// transport and dispatching them appropiately. +func (conn *Conn) inWorker() { + for { + msg, err := conn.ReadMessage() + if err == nil { + conn.eavesdroppedLck.Lock() + if conn.eavesdropped != nil { + select { + case conn.eavesdropped <- msg: + default: + } + conn.eavesdroppedLck.Unlock() + continue + } + conn.eavesdroppedLck.Unlock() + dest, _ := msg.Headers[FieldDestination].value.(string) + found := false + if dest == "" { + found = true + } else { + conn.namesLck.RLock() + if len(conn.names) == 0 { + found = true + } + for _, v := range conn.names { + if dest == v { + found = true + break + } + } + conn.namesLck.RUnlock() + } + if !found { + // Eavesdropped a message, but no channel for it is registered. + // Ignore it. + continue + } + switch msg.Type { + case TypeMethodReply, TypeError: + serial := msg.Headers[FieldReplySerial].value.(uint32) + conn.callsLck.Lock() + if c, ok := conn.calls[serial]; ok { + if msg.Type == TypeError { + name, _ := msg.Headers[FieldErrorName].value.(string) + c.Err = Error{name, msg.Body} + } else { + c.Body = msg.Body + } + c.Done <- c + conn.serialLck.Lock() + delete(conn.serialUsed, serial) + conn.serialLck.Unlock() + delete(conn.calls, serial) + } + conn.callsLck.Unlock() + case TypeSignal: + iface := msg.Headers[FieldInterface].value.(string) + member := msg.Headers[FieldMember].value.(string) + // as per http://dbus.freedesktop.org/doc/dbus-specification.html , + // sender is optional for signals. + sender, _ := msg.Headers[FieldSender].value.(string) + if iface == "org.freedesktop.DBus" && member == "NameLost" && + sender == "org.freedesktop.DBus" { + + name, _ := msg.Body[0].(string) + conn.namesLck.Lock() + for i, v := range conn.names { + if v == name { + copy(conn.names[i:], conn.names[i+1:]) + conn.names = conn.names[:len(conn.names)-1] + } + } + conn.namesLck.Unlock() + } + signal := &Signal{ + Sender: sender, + Path: msg.Headers[FieldPath].value.(ObjectPath), + Name: iface + "." + member, + Body: msg.Body, + } + conn.signalsLck.Lock() + for _, ch := range conn.signals { + // don't block trying to send a signal + select { + case ch <- signal: + default: + } + } + conn.signalsLck.Unlock() + case TypeMethodCall: + go conn.handleCall(msg) + } + } else if _, ok := err.(InvalidMessageError); !ok { + // Some read error occured (usually EOF); we can't really do + // anything but to shut down all stuff and returns errors to all + // pending replies. + conn.Close() + conn.callsLck.RLock() + for _, v := range conn.calls { + v.Err = err + v.Done <- v + } + conn.callsLck.RUnlock() + return + } + // invalid messages are ignored + } +} + +// Names returns the list of all names that are currently owned by this +// connection. The slice is always at least one element long, the first element +// being the unique name of the connection. +func (conn *Conn) Names() []string { + conn.namesLck.RLock() + // copy the slice so it can't be modified + s := make([]string, len(conn.names)) + copy(s, conn.names) + conn.namesLck.RUnlock() + return s +} + +// Object returns the object identified by the given destination name and path. +func (conn *Conn) Object(dest string, path ObjectPath) *Object { + return &Object{conn, dest, path} +} + +// outWorker runs in an own goroutine, encoding and sending messages that are +// sent to conn.out. +func (conn *Conn) outWorker() { + for msg := range conn.out { + err := conn.SendMessage(msg) + conn.callsLck.RLock() + if err != nil { + if c := conn.calls[msg.serial]; c != nil { + c.Err = err + c.Done <- c + } + conn.serialLck.Lock() + delete(conn.serialUsed, msg.serial) + conn.serialLck.Unlock() + } else if msg.Type != TypeMethodCall { + conn.serialLck.Lock() + delete(conn.serialUsed, msg.serial) + conn.serialLck.Unlock() + } + conn.callsLck.RUnlock() + } +} + +// Send sends the given message to the message bus. You usually don't need to +// use this; use the higher-level equivalents (Call / Go, Emit and Export) +// instead. If msg is a method call and NoReplyExpected is not set, a non-nil +// call is returned and the same value is sent to ch (which must be buffered) +// once the call is complete. Otherwise, ch is ignored and a Call structure is +// returned of which only the Err member is valid. +func (conn *Conn) Send(msg *Message, ch chan *Call) *Call { + var call *Call + + msg.serial = conn.getSerial() + if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 { + if ch == nil { + ch = make(chan *Call, 5) + } else if cap(ch) == 0 { + panic("dbus: unbuffered channel passed to (*Conn).Send") + } + call = new(Call) + call.Destination, _ = msg.Headers[FieldDestination].value.(string) + call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath) + iface, _ := msg.Headers[FieldInterface].value.(string) + member, _ := msg.Headers[FieldMember].value.(string) + call.Method = iface + "." + member + call.Args = msg.Body + call.Done = ch + conn.callsLck.Lock() + conn.calls[msg.serial] = call + conn.callsLck.Unlock() + conn.outLck.RLock() + if conn.closed { + call.Err = ErrClosed + call.Done <- call + } else { + conn.out <- msg + } + conn.outLck.RUnlock() + } else { + conn.outLck.RLock() + if conn.closed { + call = &Call{Err: ErrClosed} + } else { + conn.out <- msg + call = &Call{Err: nil} + } + conn.outLck.RUnlock() + } + return call +} + +// sendError creates an error message corresponding to the parameters and sends +// it to conn.out. +func (conn *Conn) sendError(e Error, dest string, serial uint32) { + msg := new(Message) + msg.Type = TypeError + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + if dest != "" { + msg.Headers[FieldDestination] = MakeVariant(dest) + } + msg.Headers[FieldErrorName] = MakeVariant(e.Name) + msg.Headers[FieldReplySerial] = MakeVariant(serial) + msg.Body = e.Body + if len(e.Body) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- msg + } + conn.outLck.RUnlock() +} + +// sendReply creates a method reply message corresponding to the parameters and +// sends it to conn.out. +func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { + msg := new(Message) + msg.Type = TypeMethodReply + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + if dest != "" { + msg.Headers[FieldDestination] = MakeVariant(dest) + } + msg.Headers[FieldReplySerial] = MakeVariant(serial) + msg.Body = values + if len(values) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- msg + } + conn.outLck.RUnlock() +} + +// Signal registers the given channel to be passed all received signal messages. +// The caller has to make sure that ch is sufficiently buffered; if a message +// arrives when a write to c is not possible, it is discarded. +// +// Multiple of these channels can be registered at the same time. Passing a +// channel that already is registered will remove it from the list of the +// registered channels. +// +// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a +// channel for eavesdropped messages, this channel receives all signals, and +// none of the channels passed to Signal will receive any signals. +func (conn *Conn) Signal(ch chan<- *Signal) { + conn.signalsLck.Lock() + conn.signals = append(conn.signals, ch) + conn.signalsLck.Unlock() +} + +// SupportsUnixFDs returns whether the underlying transport supports passing of +// unix file descriptors. If this is false, method calls containing unix file +// descriptors will return an error and emitted signals containing them will +// not be sent. +func (conn *Conn) SupportsUnixFDs() bool { + return conn.unixFD +} + +// Error represents a D-Bus message of type Error. +type Error struct { + Name string + Body []interface{} +} + +func (e Error) Error() string { + if len(e.Body) >= 1 { + s, ok := e.Body[0].(string) + if ok { + return s + } + } + return e.Name +} + +// Signal represents a D-Bus message of type Signal. The name member is given in +// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost. +type Signal struct { + Sender string + Path ObjectPath + Name string + Body []interface{} +} + +// transport is a D-Bus transport. +type transport interface { + // Read and Write raw data (for example, for the authentication protocol). + io.ReadWriteCloser + + // Send the initial null byte used for the EXTERNAL mechanism. + SendNullByte() error + + // Returns whether this transport supports passing Unix FDs. + SupportsUnixFDs() bool + + // Signal the transport that Unix FD passing is enabled for this connection. + EnableUnixFDs() + + // Read / send a message, handling things like Unix FDs. + ReadMessage() (*Message, error) + SendMessage(*Message) error +} + +func getTransport(address string) (transport, error) { + var err error + var t transport + + m := map[string]func(string) (transport, error){ + "unix": newUnixTransport, + } + addresses := strings.Split(address, ";") + for _, v := range addresses { + i := strings.IndexRune(v, ':') + if i == -1 { + err = errors.New("dbus: invalid bus address (no transport)") + continue + } + f := m[v[:i]] + if f == nil { + err = errors.New("dbus: invalid bus address (invalid or unsupported transport)") + } + t, err = f(v[i+1:]) + if err == nil { + return t, nil + } + } + return nil, err +} + +// dereferenceAll returns a slice that, assuming that vs is a slice of pointers +// of arbitrary types, containes the values that are obtained from dereferencing +// all elements in vs. +func dereferenceAll(vs []interface{}) []interface{} { + for i := range vs { + v := reflect.ValueOf(vs[i]) + v = v.Elem() + vs[i] = v.Interface() + } + return vs +} + +// getKey gets a key from a the list of keys. Returns "" on error / not found... +func getKey(s, key string) string { + i := strings.Index(s, key) + if i == -1 { + return "" + } + if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' { + return "" + } + j := strings.Index(s, ",") + if j == -1 { + j = len(s) + } + return s[i+len(key)+1 : j] +} diff --git a/vendor/src/github.com/godbus/dbus/conn_darwin.go b/vendor/src/github.com/godbus/dbus/conn_darwin.go new file mode 100644 index 0000000000..b67bb1b81d --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/conn_darwin.go @@ -0,0 +1,21 @@ +package dbus + +import ( + "errors" + "os/exec" +) + +func sessionBusPlatform() (*Conn, error) { + cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET") + b, err := cmd.CombinedOutput() + + if err != nil { + return nil, err + } + + if len(b) == 0 { + return nil, errors.New("dbus: couldn't determine address of session bus") + } + + return Dial("unix:path=" + string(b[:len(b)-1])) +} diff --git a/vendor/src/github.com/godbus/dbus/conn_other.go b/vendor/src/github.com/godbus/dbus/conn_other.go new file mode 100644 index 0000000000..f74b8758d4 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/conn_other.go @@ -0,0 +1,27 @@ +// +build !darwin + +package dbus + +import ( + "bytes" + "errors" + "os/exec" +) + +func sessionBusPlatform() (*Conn, error) { + cmd := exec.Command("dbus-launch") + b, err := cmd.CombinedOutput() + + if err != nil { + return nil, err + } + + i := bytes.IndexByte(b, '=') + j := bytes.IndexByte(b, '\n') + + if i == -1 || j == -1 { + return nil, errors.New("dbus: couldn't determine address of session bus") + } + + return Dial(string(b[i+1 : j])) +} diff --git a/vendor/src/github.com/godbus/dbus/conn_test.go b/vendor/src/github.com/godbus/dbus/conn_test.go new file mode 100644 index 0000000000..a2b14e8cc4 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/conn_test.go @@ -0,0 +1,199 @@ +package dbus + +import "testing" + +func TestSessionBus(t *testing.T) { + _, err := SessionBus() + if err != nil { + t.Error(err) + } +} + +func TestSystemBus(t *testing.T) { + _, err := SystemBus() + if err != nil { + t.Error(err) + } +} + +func TestSend(t *testing.T) { + bus, err := SessionBus() + if err != nil { + t.Error(err) + } + ch := make(chan *Call, 1) + msg := &Message{ + Type: TypeMethodCall, + Flags: 0, + Headers: map[HeaderField]Variant{ + FieldDestination: MakeVariant(bus.Names()[0]), + FieldPath: MakeVariant(ObjectPath("/org/freedesktop/DBus")), + FieldInterface: MakeVariant("org.freedesktop.DBus.Peer"), + FieldMember: MakeVariant("Ping"), + }, + } + call := bus.Send(msg, ch) + <-ch + if call.Err != nil { + t.Error(call.Err) + } +} + +type server struct{} + +func (server) Double(i int64) (int64, *Error) { + return 2 * i, nil +} + +func BenchmarkCall(b *testing.B) { + b.StopTimer() + var s string + bus, err := SessionBus() + if err != nil { + b.Fatal(err) + } + name := bus.Names()[0] + obj := bus.BusObject() + b.StartTimer() + for i := 0; i < b.N; i++ { + err := obj.Call("org.freedesktop.DBus.GetNameOwner", 0, name).Store(&s) + if err != nil { + b.Fatal(err) + } + if s != name { + b.Errorf("got %s, wanted %s", s, name) + } + } +} + +func BenchmarkCallAsync(b *testing.B) { + b.StopTimer() + bus, err := SessionBus() + if err != nil { + b.Fatal(err) + } + name := bus.Names()[0] + obj := bus.BusObject() + c := make(chan *Call, 50) + done := make(chan struct{}) + go func() { + for i := 0; i < b.N; i++ { + v := <-c + if v.Err != nil { + b.Error(v.Err) + } + s := v.Body[0].(string) + if s != name { + b.Errorf("got %s, wanted %s", s, name) + } + } + close(done) + }() + b.StartTimer() + for i := 0; i < b.N; i++ { + obj.Go("org.freedesktop.DBus.GetNameOwner", 0, c, name) + } + <-done +} + +func BenchmarkServe(b *testing.B) { + b.StopTimer() + srv, err := SessionBus() + if err != nil { + b.Fatal(err) + } + cli, err := SessionBusPrivate() + if err != nil { + b.Fatal(err) + } + if err = cli.Auth(nil); err != nil { + b.Fatal(err) + } + if err = cli.Hello(); err != nil { + b.Fatal(err) + } + benchmarkServe(b, srv, cli) +} + +func BenchmarkServeAsync(b *testing.B) { + b.StopTimer() + srv, err := SessionBus() + if err != nil { + b.Fatal(err) + } + cli, err := SessionBusPrivate() + if err != nil { + b.Fatal(err) + } + if err = cli.Auth(nil); err != nil { + b.Fatal(err) + } + if err = cli.Hello(); err != nil { + b.Fatal(err) + } + benchmarkServeAsync(b, srv, cli) +} + +func BenchmarkServeSameConn(b *testing.B) { + b.StopTimer() + bus, err := SessionBus() + if err != nil { + b.Fatal(err) + } + + benchmarkServe(b, bus, bus) +} + +func BenchmarkServeSameConnAsync(b *testing.B) { + b.StopTimer() + bus, err := SessionBus() + if err != nil { + b.Fatal(err) + } + + benchmarkServeAsync(b, bus, bus) +} + +func benchmarkServe(b *testing.B, srv, cli *Conn) { + var r int64 + var err error + dest := srv.Names()[0] + srv.Export(server{}, "/org/guelfey/DBus/Test", "org.guelfey.DBus.Test") + obj := cli.Object(dest, "/org/guelfey/DBus/Test") + b.StartTimer() + for i := 0; i < b.N; i++ { + err = obj.Call("org.guelfey.DBus.Test.Double", 0, int64(i)).Store(&r) + if err != nil { + b.Fatal(err) + } + if r != 2*int64(i) { + b.Errorf("got %d, wanted %d", r, 2*int64(i)) + } + } +} + +func benchmarkServeAsync(b *testing.B, srv, cli *Conn) { + dest := srv.Names()[0] + srv.Export(server{}, "/org/guelfey/DBus/Test", "org.guelfey.DBus.Test") + obj := cli.Object(dest, "/org/guelfey/DBus/Test") + c := make(chan *Call, 50) + done := make(chan struct{}) + go func() { + for i := 0; i < b.N; i++ { + v := <-c + if v.Err != nil { + b.Fatal(v.Err) + } + i, r := v.Args[0].(int64), v.Body[0].(int64) + if 2*i != r { + b.Errorf("got %d, wanted %d", r, 2*i) + } + } + close(done) + }() + b.StartTimer() + for i := 0; i < b.N; i++ { + obj.Go("org.guelfey.DBus.Test.Double", 0, c, int64(i)) + } + <-done +} diff --git a/vendor/src/github.com/godbus/dbus/dbus.go b/vendor/src/github.com/godbus/dbus/dbus.go new file mode 100644 index 0000000000..2ce68735cd --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/dbus.go @@ -0,0 +1,258 @@ +package dbus + +import ( + "errors" + "reflect" + "strings" +) + +var ( + byteType = reflect.TypeOf(byte(0)) + boolType = reflect.TypeOf(false) + uint8Type = reflect.TypeOf(uint8(0)) + int16Type = reflect.TypeOf(int16(0)) + uint16Type = reflect.TypeOf(uint16(0)) + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf("") + signatureType = reflect.TypeOf(Signature{""}) + objectPathType = reflect.TypeOf(ObjectPath("")) + variantType = reflect.TypeOf(Variant{Signature{""}, nil}) + interfacesType = reflect.TypeOf([]interface{}{}) + unixFDType = reflect.TypeOf(UnixFD(0)) + unixFDIndexType = reflect.TypeOf(UnixFDIndex(0)) +) + +// An InvalidTypeError signals that a value which cannot be represented in the +// D-Bus wire format was passed to a function. +type InvalidTypeError struct { + Type reflect.Type +} + +func (e InvalidTypeError) Error() string { + return "dbus: invalid type " + e.Type.String() +} + +// Store copies the values contained in src to dest, which must be a slice of +// pointers. It converts slices of interfaces from src to corresponding structs +// in dest. An error is returned if the lengths of src and dest or the types of +// their elements don't match. +func Store(src []interface{}, dest ...interface{}) error { + if len(src) != len(dest) { + return errors.New("dbus.Store: length mismatch") + } + + for i := range src { + if err := store(src[i], dest[i]); err != nil { + return err + } + } + return nil +} + +func store(src, dest interface{}) error { + if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) { + reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src)) + return nil + } else if hasStruct(dest) { + rv := reflect.ValueOf(dest).Elem() + switch rv.Kind() { + case reflect.Struct: + vs, ok := src.([]interface{}) + if !ok { + return errors.New("dbus.Store: type mismatch") + } + t := rv.Type() + ndest := make([]interface{}, 0, rv.NumField()) + for i := 0; i < rv.NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + ndest = append(ndest, rv.Field(i).Addr().Interface()) + } + } + if len(vs) != len(ndest) { + return errors.New("dbus.Store: type mismatch") + } + err := Store(vs, ndest...) + if err != nil { + return errors.New("dbus.Store: type mismatch") + } + case reflect.Slice: + sv := reflect.ValueOf(src) + if sv.Kind() != reflect.Slice { + return errors.New("dbus.Store: type mismatch") + } + rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len())) + for i := 0; i < sv.Len(); i++ { + if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil { + return err + } + } + case reflect.Map: + sv := reflect.ValueOf(src) + if sv.Kind() != reflect.Map { + return errors.New("dbus.Store: type mismatch") + } + keys := sv.MapKeys() + rv.Set(reflect.MakeMap(sv.Type())) + for _, key := range keys { + v := reflect.New(sv.Type().Elem()) + if err := store(v, sv.MapIndex(key).Interface()); err != nil { + return err + } + rv.SetMapIndex(key, v.Elem()) + } + default: + return errors.New("dbus.Store: type mismatch") + } + return nil + } else { + return errors.New("dbus.Store: type mismatch") + } +} + +func hasStruct(v interface{}) bool { + t := reflect.TypeOf(v) + for { + switch t.Kind() { + case reflect.Struct: + return true + case reflect.Slice, reflect.Ptr, reflect.Map: + t = t.Elem() + default: + return false + } + } +} + +// An ObjectPath is an object path as defined by the D-Bus spec. +type ObjectPath string + +// IsValid returns whether the object path is valid. +func (o ObjectPath) IsValid() bool { + s := string(o) + if len(s) == 0 { + return false + } + if s[0] != '/' { + return false + } + if s[len(s)-1] == '/' && len(s) != 1 { + return false + } + // probably not used, but technically possible + if s == "/" { + return true + } + split := strings.Split(s[1:], "/") + for _, v := range split { + if len(v) == 0 { + return false + } + for _, c := range v { + if !isMemberChar(c) { + return false + } + } + } + return true +} + +// A UnixFD is a Unix file descriptor sent over the wire. See the package-level +// documentation for more information about Unix file descriptor passsing. +type UnixFD int32 + +// A UnixFDIndex is the representation of a Unix file descriptor in a message. +type UnixFDIndex uint32 + +// alignment returns the alignment of values of type t. +func alignment(t reflect.Type) int { + switch t { + case variantType: + return 1 + case objectPathType: + return 4 + case signatureType: + return 1 + case interfacesType: // sometimes used for structs + return 8 + } + switch t.Kind() { + case reflect.Uint8: + return 1 + case reflect.Uint16, reflect.Int16: + return 2 + case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map: + return 4 + case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct: + return 8 + case reflect.Ptr: + return alignment(t.Elem()) + } + return 1 +} + +// isKeyType returns whether t is a valid type for a D-Bus dict. +func isKeyType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64, + reflect.String: + + return true + } + return false +} + +// isValidInterface returns whether s is a valid name for an interface. +func isValidInterface(s string) bool { + if len(s) == 0 || len(s) > 255 || s[0] == '.' { + return false + } + elem := strings.Split(s, ".") + if len(elem) < 2 { + return false + } + for _, v := range elem { + if len(v) == 0 { + return false + } + if v[0] >= '0' && v[0] <= '9' { + return false + } + for _, c := range v { + if !isMemberChar(c) { + return false + } + } + } + return true +} + +// isValidMember returns whether s is a valid name for a member. +func isValidMember(s string) bool { + if len(s) == 0 || len(s) > 255 { + return false + } + i := strings.Index(s, ".") + if i != -1 { + return false + } + if s[0] >= '0' && s[0] <= '9' { + return false + } + for _, c := range s { + if !isMemberChar(c) { + return false + } + } + return true +} + +func isMemberChar(c rune) bool { + return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || + (c >= 'a' && c <= 'z') || c == '_' +} diff --git a/vendor/src/github.com/godbus/dbus/decoder.go b/vendor/src/github.com/godbus/dbus/decoder.go new file mode 100644 index 0000000000..ef50dcab98 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/decoder.go @@ -0,0 +1,228 @@ +package dbus + +import ( + "encoding/binary" + "io" + "reflect" +) + +type decoder struct { + in io.Reader + order binary.ByteOrder + pos int +} + +// newDecoder returns a new decoder that reads values from in. The input is +// expected to be in the given byte order. +func newDecoder(in io.Reader, order binary.ByteOrder) *decoder { + dec := new(decoder) + dec.in = in + dec.order = order + return dec +} + +// align aligns the input to the given boundary and panics on error. +func (dec *decoder) align(n int) { + if dec.pos%n != 0 { + newpos := (dec.pos + n - 1) & ^(n - 1) + empty := make([]byte, newpos-dec.pos) + if _, err := io.ReadFull(dec.in, empty); err != nil { + panic(err) + } + dec.pos = newpos + } +} + +// Calls binary.Read(dec.in, dec.order, v) and panics on read errors. +func (dec *decoder) binread(v interface{}) { + if err := binary.Read(dec.in, dec.order, v); err != nil { + panic(err) + } +} + +func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) { + defer func() { + var ok bool + v := recover() + if err, ok = v.(error); ok { + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = FormatError("unexpected EOF") + } + } + }() + vs = make([]interface{}, 0) + s := sig.str + for s != "" { + err, rem := validSingle(s, 0) + if err != nil { + return nil, err + } + v := dec.decode(s[:len(s)-len(rem)], 0) + vs = append(vs, v) + s = rem + } + return vs, nil +} + +func (dec *decoder) decode(s string, depth int) interface{} { + dec.align(alignment(typeFor(s))) + switch s[0] { + case 'y': + var b [1]byte + if _, err := dec.in.Read(b[:]); err != nil { + panic(err) + } + dec.pos++ + return b[0] + case 'b': + i := dec.decode("u", depth).(uint32) + switch { + case i == 0: + return false + case i == 1: + return true + default: + panic(FormatError("invalid value for boolean")) + } + case 'n': + var i int16 + dec.binread(&i) + dec.pos += 2 + return i + case 'i': + var i int32 + dec.binread(&i) + dec.pos += 4 + return i + case 'x': + var i int64 + dec.binread(&i) + dec.pos += 8 + return i + case 'q': + var i uint16 + dec.binread(&i) + dec.pos += 2 + return i + case 'u': + var i uint32 + dec.binread(&i) + dec.pos += 4 + return i + case 't': + var i uint64 + dec.binread(&i) + dec.pos += 8 + return i + case 'd': + var f float64 + dec.binread(&f) + dec.pos += 8 + return f + case 's': + length := dec.decode("u", depth).(uint32) + b := make([]byte, int(length)+1) + if _, err := io.ReadFull(dec.in, b); err != nil { + panic(err) + } + dec.pos += int(length) + 1 + return string(b[:len(b)-1]) + case 'o': + return ObjectPath(dec.decode("s", depth).(string)) + case 'g': + length := dec.decode("y", depth).(byte) + b := make([]byte, int(length)+1) + if _, err := io.ReadFull(dec.in, b); err != nil { + panic(err) + } + dec.pos += int(length) + 1 + sig, err := ParseSignature(string(b[:len(b)-1])) + if err != nil { + panic(err) + } + return sig + case 'v': + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + var variant Variant + sig := dec.decode("g", depth).(Signature) + if len(sig.str) == 0 { + panic(FormatError("variant signature is empty")) + } + err, rem := validSingle(sig.str, 0) + if err != nil { + panic(err) + } + if rem != "" { + panic(FormatError("variant signature has multiple types")) + } + variant.sig = sig + variant.value = dec.decode(sig.str, depth+1) + return variant + case 'h': + return UnixFDIndex(dec.decode("u", depth).(uint32)) + case 'a': + if len(s) > 1 && s[1] == '{' { + ksig := s[2:3] + vsig := s[3 : len(s)-1] + v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig))) + if depth >= 63 { + panic(FormatError("input exceeds container depth limit")) + } + length := dec.decode("u", depth).(uint32) + // Even for empty maps, the correct padding must be included + dec.align(8) + spos := dec.pos + for dec.pos < spos+int(length) { + dec.align(8) + if !isKeyType(v.Type().Key()) { + panic(InvalidTypeError{v.Type()}) + } + kv := dec.decode(ksig, depth+2) + vv := dec.decode(vsig, depth+2) + v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv)) + } + return v.Interface() + } + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + length := dec.decode("u", depth).(uint32) + v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length)) + // Even for empty arrays, the correct padding must be included + dec.align(alignment(typeFor(s[1:]))) + spos := dec.pos + for dec.pos < spos+int(length) { + ev := dec.decode(s[1:], depth+1) + v = reflect.Append(v, reflect.ValueOf(ev)) + } + return v.Interface() + case '(': + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + dec.align(8) + v := make([]interface{}, 0) + s = s[1 : len(s)-1] + for s != "" { + err, rem := validSingle(s, 0) + if err != nil { + panic(err) + } + ev := dec.decode(s[:len(s)-len(rem)], depth+1) + v = append(v, ev) + s = rem + } + return v + default: + panic(SignatureError{Sig: s}) + } +} + +// A FormatError is an error in the wire format. +type FormatError string + +func (e FormatError) Error() string { + return "dbus: wire format error: " + string(e) +} diff --git a/vendor/src/github.com/godbus/dbus/doc.go b/vendor/src/github.com/godbus/dbus/doc.go new file mode 100644 index 0000000000..deff554a38 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/doc.go @@ -0,0 +1,63 @@ +/* +Package dbus implements bindings to the D-Bus message bus system. + +To use the message bus API, you first need to connect to a bus (usually the +session or system bus). The acquired connection then can be used to call methods +on remote objects and emit or receive signals. Using the Export method, you can +arrange D-Bus methods calls to be directly translated to method calls on a Go +value. + +Conversion Rules + +For outgoing messages, Go types are automatically converted to the +corresponding D-Bus types. The following types are directly encoded as their +respective D-Bus equivalents: + + Go type | D-Bus type + ------------+----------- + byte | BYTE + bool | BOOLEAN + int16 | INT16 + uint16 | UINT16 + int32 | INT32 + uint32 | UINT32 + int64 | INT64 + uint64 | UINT64 + float64 | DOUBLE + string | STRING + ObjectPath | OBJECT_PATH + Signature | SIGNATURE + Variant | VARIANT + UnixFDIndex | UNIX_FD + +Slices and arrays encode as ARRAYs of their element type. + +Maps encode as DICTs, provided that their key type can be used as a key for +a DICT. + +Structs other than Variant and Signature encode as a STRUCT containing their +exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will +be skipped. + +Pointers encode as the value they're pointed to. + +Trying to encode any other type or a slice, map or struct containing an +unsupported type will result in an InvalidTypeError. + +For incoming messages, the inverse of these rules are used, with the exception +of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces +containing the struct fields in the correct order. The Store function can be +used to convert such values to Go structs. + +Unix FD passing + +Handling Unix file descriptors deserves special mention. To use them, you should +first check that they are supported on a connection by calling SupportsUnixFDs. +If it returns true, all method of Connection will translate messages containing +UnixFD's to messages that are accompanied by the given file descriptors with the +UnixFD values being substituted by the correct indices. Similarily, the indices +of incoming messages are automatically resolved. It shouldn't be necessary to use +UnixFDIndex. + +*/ +package dbus diff --git a/vendor/src/github.com/godbus/dbus/encoder.go b/vendor/src/github.com/godbus/dbus/encoder.go new file mode 100644 index 0000000000..f9d2f05716 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/encoder.go @@ -0,0 +1,179 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "io" + "reflect" +) + +// An encoder encodes values to the D-Bus wire format. +type encoder struct { + out io.Writer + order binary.ByteOrder + pos int +} + +// NewEncoder returns a new encoder that writes to out in the given byte order. +func newEncoder(out io.Writer, order binary.ByteOrder) *encoder { + enc := new(encoder) + enc.out = out + enc.order = order + return enc +} + +// Aligns the next output to be on a multiple of n. Panics on write errors. +func (enc *encoder) align(n int) { + if enc.pos%n != 0 { + newpos := (enc.pos + n - 1) & ^(n - 1) + empty := make([]byte, newpos-enc.pos) + if _, err := enc.out.Write(empty); err != nil { + panic(err) + } + enc.pos = newpos + } +} + +// Calls binary.Write(enc.out, enc.order, v) and panics on write errors. +func (enc *encoder) binwrite(v interface{}) { + if err := binary.Write(enc.out, enc.order, v); err != nil { + panic(err) + } +} + +// Encode encodes the given values to the underyling reader. All written values +// are aligned properly as required by the D-Bus spec. +func (enc *encoder) Encode(vs ...interface{}) (err error) { + defer func() { + err, _ = recover().(error) + }() + for _, v := range vs { + enc.encode(reflect.ValueOf(v), 0) + } + return nil +} + +// encode encodes the given value to the writer and panics on error. depth holds +// the depth of the container nesting. +func (enc *encoder) encode(v reflect.Value, depth int) { + enc.align(alignment(v.Type())) + switch v.Kind() { + case reflect.Uint8: + var b [1]byte + b[0] = byte(v.Uint()) + if _, err := enc.out.Write(b[:]); err != nil { + panic(err) + } + enc.pos++ + case reflect.Bool: + if v.Bool() { + enc.encode(reflect.ValueOf(uint32(1)), depth) + } else { + enc.encode(reflect.ValueOf(uint32(0)), depth) + } + case reflect.Int16: + enc.binwrite(int16(v.Int())) + enc.pos += 2 + case reflect.Uint16: + enc.binwrite(uint16(v.Uint())) + enc.pos += 2 + case reflect.Int32: + enc.binwrite(int32(v.Int())) + enc.pos += 4 + case reflect.Uint32: + enc.binwrite(uint32(v.Uint())) + enc.pos += 4 + case reflect.Int64: + enc.binwrite(v.Int()) + enc.pos += 8 + case reflect.Uint64: + enc.binwrite(v.Uint()) + enc.pos += 8 + case reflect.Float64: + enc.binwrite(v.Float()) + enc.pos += 8 + case reflect.String: + enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth) + b := make([]byte, v.Len()+1) + copy(b, v.String()) + b[len(b)-1] = 0 + n, err := enc.out.Write(b) + if err != nil { + panic(err) + } + enc.pos += n + case reflect.Ptr: + enc.encode(v.Elem(), depth) + case reflect.Slice, reflect.Array: + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + var buf bytes.Buffer + bufenc := newEncoder(&buf, enc.order) + + for i := 0; i < v.Len(); i++ { + bufenc.encode(v.Index(i), depth+1) + } + enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) + length := buf.Len() + enc.align(alignment(v.Type().Elem())) + if _, err := buf.WriteTo(enc.out); err != nil { + panic(err) + } + enc.pos += length + case reflect.Struct: + if depth >= 64 && v.Type() != signatureType { + panic(FormatError("input exceeds container depth limit")) + } + switch t := v.Type(); t { + case signatureType: + str := v.Field(0) + enc.encode(reflect.ValueOf(byte(str.Len())), depth+1) + b := make([]byte, str.Len()+1) + copy(b, str.String()) + b[len(b)-1] = 0 + n, err := enc.out.Write(b) + if err != nil { + panic(err) + } + enc.pos += n + case variantType: + variant := v.Interface().(Variant) + enc.encode(reflect.ValueOf(variant.sig), depth+1) + enc.encode(reflect.ValueOf(variant.value), depth+1) + default: + for i := 0; i < v.Type().NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + enc.encode(v.Field(i), depth+1) + } + } + } + case reflect.Map: + // Maps are arrays of structures, so they actually increase the depth by + // 2. + if depth >= 63 { + panic(FormatError("input exceeds container depth limit")) + } + if !isKeyType(v.Type().Key()) { + panic(InvalidTypeError{v.Type()}) + } + keys := v.MapKeys() + var buf bytes.Buffer + bufenc := newEncoder(&buf, enc.order) + for _, k := range keys { + bufenc.align(8) + bufenc.encode(k, depth+2) + bufenc.encode(v.MapIndex(k), depth+2) + } + enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) + length := buf.Len() + enc.align(8) + if _, err := buf.WriteTo(enc.out); err != nil { + panic(err) + } + enc.pos += length + default: + panic(InvalidTypeError{v.Type()}) + } +} diff --git a/vendor/src/github.com/godbus/dbus/examples_test.go b/vendor/src/github.com/godbus/dbus/examples_test.go new file mode 100644 index 0000000000..0218ac5598 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/examples_test.go @@ -0,0 +1,50 @@ +package dbus + +import "fmt" + +func ExampleConn_Emit() { + conn, err := SessionBus() + if err != nil { + panic(err) + } + + conn.Emit("/foo/bar", "foo.bar.Baz", uint32(0xDAEDBEEF)) +} + +func ExampleObject_Call() { + var list []string + + conn, err := SessionBus() + if err != nil { + panic(err) + } + + err = conn.BusObject().Call("org.freedesktop.DBus.ListNames", 0).Store(&list) + if err != nil { + panic(err) + } + for _, v := range list { + fmt.Println(v) + } +} + +func ExampleObject_Go() { + conn, err := SessionBus() + if err != nil { + panic(err) + } + + ch := make(chan *Call, 10) + conn.BusObject().Go("org.freedesktop.DBus.ListActivatableNames", 0, ch) + select { + case call := <-ch: + if call.Err != nil { + panic(err) + } + list := call.Body[0].([]string) + for _, v := range list { + fmt.Println(v) + } + // put some other cases here + } +} diff --git a/vendor/src/github.com/godbus/dbus/export.go b/vendor/src/github.com/godbus/dbus/export.go new file mode 100644 index 0000000000..1dd1591528 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/export.go @@ -0,0 +1,302 @@ +package dbus + +import ( + "errors" + "reflect" + "strings" + "unicode" +) + +var ( + errmsgInvalidArg = Error{ + "org.freedesktop.DBus.Error.InvalidArgs", + []interface{}{"Invalid type / number of args"}, + } + errmsgNoObject = Error{ + "org.freedesktop.DBus.Error.NoSuchObject", + []interface{}{"No such object"}, + } + errmsgUnknownMethod = Error{ + "org.freedesktop.DBus.Error.UnknownMethod", + []interface{}{"Unknown / invalid method"}, + } +) + +// Sender is a type which can be used in exported methods to receive the message +// sender. +type Sender string + +func exportedMethod(v interface{}, name string) reflect.Value { + if v == nil { + return reflect.Value{} + } + m := reflect.ValueOf(v).MethodByName(name) + if !m.IsValid() { + return reflect.Value{} + } + t := m.Type() + if t.NumOut() == 0 || + t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) { + + return reflect.Value{} + } + return m +} + +// handleCall handles the given method call (i.e. looks if it's one of the +// pre-implemented ones and searches for a corresponding handler if not). +func (conn *Conn) handleCall(msg *Message) { + name := msg.Headers[FieldMember].value.(string) + path := msg.Headers[FieldPath].value.(ObjectPath) + ifaceName, hasIface := msg.Headers[FieldInterface].value.(string) + sender, hasSender := msg.Headers[FieldSender].value.(string) + serial := msg.serial + if ifaceName == "org.freedesktop.DBus.Peer" { + switch name { + case "Ping": + conn.sendReply(sender, serial) + case "GetMachineId": + conn.sendReply(sender, serial, conn.uuid) + default: + conn.sendError(errmsgUnknownMethod, sender, serial) + } + return + } + if len(name) == 0 || unicode.IsLower([]rune(name)[0]) { + conn.sendError(errmsgUnknownMethod, sender, serial) + } + var m reflect.Value + if hasIface { + conn.handlersLck.RLock() + obj, ok := conn.handlers[path] + if !ok { + conn.sendError(errmsgNoObject, sender, serial) + conn.handlersLck.RUnlock() + return + } + iface := obj[ifaceName] + conn.handlersLck.RUnlock() + m = exportedMethod(iface, name) + } else { + conn.handlersLck.RLock() + if _, ok := conn.handlers[path]; !ok { + conn.sendError(errmsgNoObject, sender, serial) + conn.handlersLck.RUnlock() + return + } + for _, v := range conn.handlers[path] { + m = exportedMethod(v, name) + if m.IsValid() { + break + } + } + conn.handlersLck.RUnlock() + } + if !m.IsValid() { + conn.sendError(errmsgUnknownMethod, sender, serial) + return + } + t := m.Type() + vs := msg.Body + pointers := make([]interface{}, t.NumIn()) + decode := make([]interface{}, 0, len(vs)) + for i := 0; i < t.NumIn(); i++ { + tp := t.In(i) + val := reflect.New(tp) + pointers[i] = val.Interface() + if tp == reflect.TypeOf((*Sender)(nil)).Elem() { + val.Elem().SetString(sender) + } else { + decode = append(decode, pointers[i]) + } + } + if len(decode) != len(vs) { + conn.sendError(errmsgInvalidArg, sender, serial) + return + } + if err := Store(vs, decode...); err != nil { + conn.sendError(errmsgInvalidArg, sender, serial) + return + } + params := make([]reflect.Value, len(pointers)) + for i := 0; i < len(pointers); i++ { + params[i] = reflect.ValueOf(pointers[i]).Elem() + } + ret := m.Call(params) + if em := ret[t.NumOut()-1].Interface().(*Error); em != nil { + conn.sendError(*em, sender, serial) + return + } + if msg.Flags&FlagNoReplyExpected == 0 { + reply := new(Message) + reply.Type = TypeMethodReply + reply.serial = conn.getSerial() + reply.Headers = make(map[HeaderField]Variant) + if hasSender { + reply.Headers[FieldDestination] = msg.Headers[FieldSender] + } + reply.Headers[FieldReplySerial] = MakeVariant(msg.serial) + reply.Body = make([]interface{}, len(ret)-1) + for i := 0; i < len(ret)-1; i++ { + reply.Body[i] = ret[i].Interface() + } + if len(ret) != 1 { + reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- reply + } + conn.outLck.RUnlock() + } +} + +// Emit emits the given signal on the message bus. The name parameter must be +// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost". +func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error { + if !path.IsValid() { + return errors.New("dbus: invalid object path") + } + i := strings.LastIndex(name, ".") + if i == -1 { + return errors.New("dbus: invalid method name") + } + iface := name[:i] + member := name[i+1:] + if !isValidMember(member) { + return errors.New("dbus: invalid method name") + } + if !isValidInterface(iface) { + return errors.New("dbus: invalid interface name") + } + msg := new(Message) + msg.Type = TypeSignal + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + msg.Headers[FieldInterface] = MakeVariant(iface) + msg.Headers[FieldMember] = MakeVariant(member) + msg.Headers[FieldPath] = MakeVariant(path) + msg.Body = values + if len(values) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) + } + conn.outLck.RLock() + defer conn.outLck.RUnlock() + if conn.closed { + return ErrClosed + } + conn.out <- msg + return nil +} + +// Export registers the given value to be exported as an object on the +// message bus. +// +// If a method call on the given path and interface is received, an exported +// method with the same name is called with v as the receiver if the +// parameters match and the last return value is of type *Error. If this +// *Error is not nil, it is sent back to the caller as an error. +// Otherwise, a method reply is sent with the other return values as its body. +// +// Any parameters with the special type Sender are set to the sender of the +// dbus message when the method is called. Parameters of this type do not +// contribute to the dbus signature of the method (i.e. the method is exposed +// as if the parameters of type Sender were not there). +// +// Every method call is executed in a new goroutine, so the method may be called +// in multiple goroutines at once. +// +// Method calls on the interface org.freedesktop.DBus.Peer will be automatically +// handled for every object. +// +// Passing nil as the first parameter will cause conn to cease handling calls on +// the given combination of path and interface. +// +// Export returns an error if path is not a valid path name. +func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error { + if !path.IsValid() { + return errors.New("dbus: invalid path name") + } + conn.handlersLck.Lock() + if v == nil { + if _, ok := conn.handlers[path]; ok { + delete(conn.handlers[path], iface) + if len(conn.handlers[path]) == 0 { + delete(conn.handlers, path) + } + } + return nil + } + if _, ok := conn.handlers[path]; !ok { + conn.handlers[path] = make(map[string]interface{}) + } + conn.handlers[path][iface] = v + conn.handlersLck.Unlock() + return nil +} + +// ReleaseName calls org.freedesktop.DBus.ReleaseName. You should use only this +// method to release a name (see below). +func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) { + var r uint32 + err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r) + if err != nil { + return 0, err + } + if r == uint32(ReleaseNameReplyReleased) { + conn.namesLck.Lock() + for i, v := range conn.names { + if v == name { + copy(conn.names[i:], conn.names[i+1:]) + conn.names = conn.names[:len(conn.names)-1] + } + } + conn.namesLck.Unlock() + } + return ReleaseNameReply(r), nil +} + +// RequestName calls org.freedesktop.DBus.RequestName. You should use only this +// method to request a name because package dbus needs to keep track of all +// names that the connection has. +func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) { + var r uint32 + err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r) + if err != nil { + return 0, err + } + if r == uint32(RequestNameReplyPrimaryOwner) { + conn.namesLck.Lock() + conn.names = append(conn.names, name) + conn.namesLck.Unlock() + } + return RequestNameReply(r), nil +} + +// ReleaseNameReply is the reply to a ReleaseName call. +type ReleaseNameReply uint32 + +const ( + ReleaseNameReplyReleased ReleaseNameReply = 1 + iota + ReleaseNameReplyNonExistent + ReleaseNameReplyNotOwner +) + +// RequestNameFlags represents the possible flags for a RequestName call. +type RequestNameFlags uint32 + +const ( + NameFlagAllowReplacement RequestNameFlags = 1 << iota + NameFlagReplaceExisting + NameFlagDoNotQueue +) + +// RequestNameReply is the reply to a RequestName call. +type RequestNameReply uint32 + +const ( + RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota + RequestNameReplyInQueue + RequestNameReplyExists + RequestNameReplyAlreadyOwner +) diff --git a/vendor/src/github.com/godbus/dbus/homedir.go b/vendor/src/github.com/godbus/dbus/homedir.go new file mode 100644 index 0000000000..0b745f9313 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/homedir.go @@ -0,0 +1,28 @@ +package dbus + +import ( + "os" + "sync" +) + +var ( + homeDir string + homeDirLock sync.Mutex +) + +func getHomeDir() string { + homeDirLock.Lock() + defer homeDirLock.Unlock() + + if homeDir != "" { + return homeDir + } + + homeDir = os.Getenv("HOME") + if homeDir != "" { + return homeDir + } + + homeDir = lookupHomeDir() + return homeDir +} diff --git a/vendor/src/github.com/godbus/dbus/homedir_dynamic.go b/vendor/src/github.com/godbus/dbus/homedir_dynamic.go new file mode 100644 index 0000000000..2732081e73 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/homedir_dynamic.go @@ -0,0 +1,15 @@ +// +build !static_build + +package dbus + +import ( + "os/user" +) + +func lookupHomeDir() string { + u, err := user.Current() + if err != nil { + return "/" + } + return u.HomeDir +} diff --git a/vendor/src/github.com/godbus/dbus/homedir_static.go b/vendor/src/github.com/godbus/dbus/homedir_static.go new file mode 100644 index 0000000000..b9d9cb5525 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/homedir_static.go @@ -0,0 +1,45 @@ +// +build static_build + +package dbus + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +func lookupHomeDir() string { + myUid := os.Getuid() + + f, err := os.Open("/etc/passwd") + if err != nil { + return "/" + } + defer f.Close() + + s := bufio.NewScanner(f) + + for s.Scan() { + if err := s.Err(); err != nil { + break + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + parts := strings.Split(line, ":") + + if len(parts) >= 6 { + uid, err := strconv.Atoi(parts[2]) + if err == nil && uid == myUid { + return parts[5] + } + } + } + + // Default to / if we can't get a better value + return "/" +} diff --git a/vendor/src/github.com/godbus/dbus/introspect/call.go b/vendor/src/github.com/godbus/dbus/introspect/call.go new file mode 100644 index 0000000000..4aca2ea63e --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/introspect/call.go @@ -0,0 +1,27 @@ +package introspect + +import ( + "encoding/xml" + "github.com/godbus/dbus" + "strings" +) + +// Call calls org.freedesktop.Introspectable.Introspect on a remote object +// and returns the introspection data. +func Call(o *dbus.Object) (*Node, error) { + var xmldata string + var node Node + + err := o.Call("org.freedesktop.DBus.Introspectable.Introspect", 0).Store(&xmldata) + if err != nil { + return nil, err + } + err = xml.NewDecoder(strings.NewReader(xmldata)).Decode(&node) + if err != nil { + return nil, err + } + if node.Name == "" { + node.Name = string(o.Path()) + } + return &node, nil +} diff --git a/vendor/src/github.com/godbus/dbus/introspect/introspect.go b/vendor/src/github.com/godbus/dbus/introspect/introspect.go new file mode 100644 index 0000000000..dafcdb8b7a --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/introspect/introspect.go @@ -0,0 +1,80 @@ +// Package introspect provides some utilities for dealing with the DBus +// introspection format. +package introspect + +import "encoding/xml" + +// The introspection data for the org.freedesktop.DBus.Introspectable interface. +var IntrospectData = Interface{ + Name: "org.freedesktop.DBus.Introspectable", + Methods: []Method{ + { + Name: "Introspect", + Args: []Arg{ + {"out", "s", "out"}, + }, + }, + }, +} + +// The introspection data for the org.freedesktop.DBus.Introspectable interface, +// as a string. +const IntrospectDataString = ` + + + + + +` + +// Node is the root element of an introspection. +type Node struct { + XMLName xml.Name `xml:"node"` + Name string `xml:"name,attr,omitempty"` + Interfaces []Interface `xml:"interface"` + Children []Node `xml:"node,omitempty"` +} + +// Interface describes a DBus interface that is available on the message bus. +type Interface struct { + Name string `xml:"name,attr"` + Methods []Method `xml:"method"` + Signals []Signal `xml:"signal"` + Properties []Property `xml:"property"` + Annotations []Annotation `xml:"annotation"` +} + +// Method describes a Method on an Interface as retured by an introspection. +type Method struct { + Name string `xml:"name,attr"` + Args []Arg `xml:"arg"` + Annotations []Annotation `xml:"annotation"` +} + +// Signal describes a Signal emitted on an Interface. +type Signal struct { + Name string `xml:"name,attr"` + Args []Arg `xml:"arg"` + Annotations []Annotation `xml:"annotation"` +} + +// Property describes a property of an Interface. +type Property struct { + Name string `xml:"name,attr"` + Type string `xml:"type,attr"` + Access string `xml:"access,attr"` + Annotations []Annotation `xml:"annotation"` +} + +// Arg represents an argument of a method or a signal. +type Arg struct { + Name string `xml:"name,attr,omitempty"` + Type string `xml:"type,attr"` + Direction string `xml:"direction,attr,omitempty"` +} + +// Annotation is an annotation in the introspection format. +type Annotation struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} diff --git a/vendor/src/github.com/godbus/dbus/introspect/introspectable.go b/vendor/src/github.com/godbus/dbus/introspect/introspectable.go new file mode 100644 index 0000000000..a2a965a343 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/introspect/introspectable.go @@ -0,0 +1,74 @@ +package introspect + +import ( + "encoding/xml" + "github.com/godbus/dbus" + "reflect" +) + +// Introspectable implements org.freedesktop.Introspectable. +// +// You can create it by converting the XML-formatted introspection data from a +// string to an Introspectable or call NewIntrospectable with a Node. Then, +// export it as org.freedesktop.Introspectable on you object. +type Introspectable string + +// NewIntrospectable returns an Introspectable that returns the introspection +// data that corresponds to the given Node. If n.Interfaces doesn't contain the +// data for org.freedesktop.DBus.Introspectable, it is added automatically. +func NewIntrospectable(n *Node) Introspectable { + found := false + for _, v := range n.Interfaces { + if v.Name == "org.freedesktop.DBus.Introspectable" { + found = true + break + } + } + if !found { + n.Interfaces = append(n.Interfaces, IntrospectData) + } + b, err := xml.Marshal(n) + if err != nil { + panic(err) + } + return Introspectable(b) +} + +// Introspect implements org.freedesktop.Introspectable.Introspect. +func (i Introspectable) Introspect() (string, *dbus.Error) { + return string(i), nil +} + +// Methods returns the description of the methods of v. This can be used to +// create a Node which can be passed to NewIntrospectable. +func Methods(v interface{}) []Method { + t := reflect.TypeOf(v) + ms := make([]Method, 0, t.NumMethod()) + for i := 0; i < t.NumMethod(); i++ { + if t.Method(i).PkgPath != "" { + continue + } + mt := t.Method(i).Type + if mt.NumOut() == 0 || + mt.Out(mt.NumOut()-1) != reflect.TypeOf(&dbus.Error{"", nil}) { + + continue + } + var m Method + m.Name = t.Method(i).Name + m.Args = make([]Arg, 0, mt.NumIn()+mt.NumOut()-2) + for j := 1; j < mt.NumIn(); j++ { + if mt.In(j) != reflect.TypeOf((*dbus.Sender)(nil)).Elem() { + arg := Arg{"", dbus.SignatureOfType(mt.In(j)).String(), "in"} + m.Args = append(m.Args, arg) + } + } + for j := 0; j < mt.NumOut()-1; j++ { + arg := Arg{"", dbus.SignatureOfType(mt.Out(j)).String(), "out"} + m.Args = append(m.Args, arg) + } + m.Annotations = make([]Annotation, 0) + ms = append(ms, m) + } + return ms +} diff --git a/vendor/src/github.com/godbus/dbus/message.go b/vendor/src/github.com/godbus/dbus/message.go new file mode 100644 index 0000000000..075d6e38ba --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/message.go @@ -0,0 +1,346 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "reflect" + "strconv" +) + +const protoVersion byte = 1 + +// Flags represents the possible flags of a D-Bus message. +type Flags byte + +const ( + // FlagNoReplyExpected signals that the message is not expected to generate + // a reply. If this flag is set on outgoing messages, any possible reply + // will be discarded. + FlagNoReplyExpected Flags = 1 << iota + // FlagNoAutoStart signals that the message bus should not automatically + // start an application when handling this message. + FlagNoAutoStart +) + +// Type represents the possible types of a D-Bus message. +type Type byte + +const ( + TypeMethodCall Type = 1 + iota + TypeMethodReply + TypeError + TypeSignal + typeMax +) + +func (t Type) String() string { + switch t { + case TypeMethodCall: + return "method call" + case TypeMethodReply: + return "reply" + case TypeError: + return "error" + case TypeSignal: + return "signal" + } + return "invalid" +} + +// HeaderField represents the possible byte codes for the headers +// of a D-Bus message. +type HeaderField byte + +const ( + FieldPath HeaderField = 1 + iota + FieldInterface + FieldMember + FieldErrorName + FieldReplySerial + FieldDestination + FieldSender + FieldSignature + FieldUnixFDs + fieldMax +) + +// An InvalidMessageError describes the reason why a D-Bus message is regarded as +// invalid. +type InvalidMessageError string + +func (e InvalidMessageError) Error() string { + return "dbus: invalid message: " + string(e) +} + +// fieldType are the types of the various header fields. +var fieldTypes = [fieldMax]reflect.Type{ + FieldPath: objectPathType, + FieldInterface: stringType, + FieldMember: stringType, + FieldErrorName: stringType, + FieldReplySerial: uint32Type, + FieldDestination: stringType, + FieldSender: stringType, + FieldSignature: signatureType, + FieldUnixFDs: uint32Type, +} + +// requiredFields lists the header fields that are required by the different +// message types. +var requiredFields = [typeMax][]HeaderField{ + TypeMethodCall: {FieldPath, FieldMember}, + TypeMethodReply: {FieldReplySerial}, + TypeError: {FieldErrorName, FieldReplySerial}, + TypeSignal: {FieldPath, FieldInterface, FieldMember}, +} + +// Message represents a single D-Bus message. +type Message struct { + Type + Flags + Headers map[HeaderField]Variant + Body []interface{} + + serial uint32 +} + +type header struct { + Field byte + Variant +} + +// DecodeMessage tries to decode a single message in the D-Bus wire format +// from the given reader. The byte order is figured out from the first byte. +// The possibly returned error can be an error of the underlying reader, an +// InvalidMessageError or a FormatError. +func DecodeMessage(rd io.Reader) (msg *Message, err error) { + var order binary.ByteOrder + var hlength, length uint32 + var typ, flags, proto byte + var headers []header + + b := make([]byte, 1) + _, err = rd.Read(b) + if err != nil { + return + } + switch b[0] { + case 'l': + order = binary.LittleEndian + case 'B': + order = binary.BigEndian + default: + return nil, InvalidMessageError("invalid byte order") + } + + dec := newDecoder(rd, order) + dec.pos = 1 + + msg = new(Message) + vs, err := dec.Decode(Signature{"yyyuu"}) + if err != nil { + return nil, err + } + if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil { + return nil, err + } + msg.Type = Type(typ) + msg.Flags = Flags(flags) + + // get the header length separately because we need it later + b = make([]byte, 4) + _, err = io.ReadFull(rd, b) + if err != nil { + return nil, err + } + binary.Read(bytes.NewBuffer(b), order, &hlength) + if hlength+length+16 > 1<<27 { + return nil, InvalidMessageError("message is too long") + } + dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order) + dec.pos = 12 + vs, err = dec.Decode(Signature{"a(yv)"}) + if err != nil { + return nil, err + } + if err = Store(vs, &headers); err != nil { + return nil, err + } + + msg.Headers = make(map[HeaderField]Variant) + for _, v := range headers { + msg.Headers[HeaderField(v.Field)] = v.Variant + } + + dec.align(8) + body := make([]byte, int(length)) + if length != 0 { + _, err := io.ReadFull(rd, body) + if err != nil { + return nil, err + } + } + + if err = msg.IsValid(); err != nil { + return nil, err + } + sig, _ := msg.Headers[FieldSignature].value.(Signature) + if sig.str != "" { + buf := bytes.NewBuffer(body) + dec = newDecoder(buf, order) + vs, err := dec.Decode(sig) + if err != nil { + return nil, err + } + msg.Body = vs + } + + return +} + +// EncodeTo encodes and sends a message to the given writer. The byte order must +// be either binary.LittleEndian or binary.BigEndian. If the message is not +// valid or an error occurs when writing, an error is returned. +func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error { + if err := msg.IsValid(); err != nil { + return err + } + var vs [7]interface{} + switch order { + case binary.LittleEndian: + vs[0] = byte('l') + case binary.BigEndian: + vs[0] = byte('B') + default: + return errors.New("dbus: invalid byte order") + } + body := new(bytes.Buffer) + enc := newEncoder(body, order) + if len(msg.Body) != 0 { + enc.Encode(msg.Body...) + } + vs[1] = msg.Type + vs[2] = msg.Flags + vs[3] = protoVersion + vs[4] = uint32(len(body.Bytes())) + vs[5] = msg.serial + headers := make([]header, 0, len(msg.Headers)) + for k, v := range msg.Headers { + headers = append(headers, header{byte(k), v}) + } + vs[6] = headers + var buf bytes.Buffer + enc = newEncoder(&buf, order) + enc.Encode(vs[:]...) + enc.align(8) + body.WriteTo(&buf) + if buf.Len() > 1<<27 { + return InvalidMessageError("message is too long") + } + if _, err := buf.WriteTo(out); err != nil { + return err + } + return nil +} + +// IsValid checks whether msg is a valid message and returns an +// InvalidMessageError if it is not. +func (msg *Message) IsValid() error { + if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected) != 0 { + return InvalidMessageError("invalid flags") + } + if msg.Type == 0 || msg.Type >= typeMax { + return InvalidMessageError("invalid message type") + } + for k, v := range msg.Headers { + if k == 0 || k >= fieldMax { + return InvalidMessageError("invalid header") + } + if reflect.TypeOf(v.value) != fieldTypes[k] { + return InvalidMessageError("invalid type of header field") + } + } + for _, v := range requiredFields[msg.Type] { + if _, ok := msg.Headers[v]; !ok { + return InvalidMessageError("missing required header") + } + } + if path, ok := msg.Headers[FieldPath]; ok { + if !path.value.(ObjectPath).IsValid() { + return InvalidMessageError("invalid path name") + } + } + if iface, ok := msg.Headers[FieldInterface]; ok { + if !isValidInterface(iface.value.(string)) { + return InvalidMessageError("invalid interface name") + } + } + if member, ok := msg.Headers[FieldMember]; ok { + if !isValidMember(member.value.(string)) { + return InvalidMessageError("invalid member name") + } + } + if errname, ok := msg.Headers[FieldErrorName]; ok { + if !isValidInterface(errname.value.(string)) { + return InvalidMessageError("invalid error name") + } + } + if len(msg.Body) != 0 { + if _, ok := msg.Headers[FieldSignature]; !ok { + return InvalidMessageError("missing signature") + } + } + return nil +} + +// Serial returns the message's serial number. The returned value is only valid +// for messages received by eavesdropping. +func (msg *Message) Serial() uint32 { + return msg.serial +} + +// String returns a string representation of a message similar to the format of +// dbus-monitor. +func (msg *Message) String() string { + if err := msg.IsValid(); err != nil { + return "" + } + s := msg.Type.String() + if v, ok := msg.Headers[FieldSender]; ok { + s += " from " + v.value.(string) + } + if v, ok := msg.Headers[FieldDestination]; ok { + s += " to " + v.value.(string) + } + s += " serial " + strconv.FormatUint(uint64(msg.serial), 10) + if v, ok := msg.Headers[FieldReplySerial]; ok { + s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10) + } + if v, ok := msg.Headers[FieldUnixFDs]; ok { + s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10) + } + if v, ok := msg.Headers[FieldPath]; ok { + s += " path " + string(v.value.(ObjectPath)) + } + if v, ok := msg.Headers[FieldInterface]; ok { + s += " interface " + v.value.(string) + } + if v, ok := msg.Headers[FieldErrorName]; ok { + s += " error " + v.value.(string) + } + if v, ok := msg.Headers[FieldMember]; ok { + s += " member " + v.value.(string) + } + if len(msg.Body) != 0 { + s += "\n" + } + for i, v := range msg.Body { + s += " " + MakeVariant(v).String() + if i != len(msg.Body)-1 { + s += "\n" + } + } + return s +} diff --git a/vendor/src/github.com/godbus/dbus/prop/prop.go b/vendor/src/github.com/godbus/dbus/prop/prop.go new file mode 100644 index 0000000000..ed5bdf2243 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/prop/prop.go @@ -0,0 +1,264 @@ +// Package prop provides the Properties struct which can be used to implement +// org.freedesktop.DBus.Properties. +package prop + +import ( + "github.com/godbus/dbus" + "github.com/godbus/dbus/introspect" + "sync" +) + +// EmitType controls how org.freedesktop.DBus.Properties.PropertiesChanged is +// emitted for a property. If it is EmitTrue, the signal is emitted. If it is +// EmitInvalidates, the signal is also emitted, but the new value of the property +// is not disclosed. +type EmitType byte + +const ( + EmitFalse EmitType = iota + EmitTrue + EmitInvalidates +) + +// ErrIfaceNotFound is the error returned to peers who try to access properties +// on interfaces that aren't found. +var ErrIfaceNotFound = &dbus.Error{"org.freedesktop.DBus.Properties.Error.InterfaceNotFound", nil} + +// ErrPropNotFound is the error returned to peers trying to access properties +// that aren't found. +var ErrPropNotFound = &dbus.Error{"org.freedesktop.DBus.Properties.Error.PropertyNotFound", nil} + +// ErrReadOnly is the error returned to peers trying to set a read-only +// property. +var ErrReadOnly = &dbus.Error{"org.freedesktop.DBus.Properties.Error.ReadOnly", nil} + +// ErrInvalidArg is returned to peers if the type of the property that is being +// changed and the argument don't match. +var ErrInvalidArg = &dbus.Error{"org.freedesktop.DBus.Properties.Error.InvalidArg", nil} + +// The introspection data for the org.freedesktop.DBus.Properties interface. +var IntrospectData = introspect.Interface{ + Name: "org.freedesktop.DBus.Properties", + Methods: []introspect.Method{ + { + Name: "Get", + Args: []introspect.Arg{ + {"interface", "s", "in"}, + {"property", "s", "in"}, + {"value", "v", "out"}, + }, + }, + { + Name: "GetAll", + Args: []introspect.Arg{ + {"interface", "s", "in"}, + {"props", "a{sv}", "out"}, + }, + }, + { + Name: "Set", + Args: []introspect.Arg{ + {"interface", "s", "in"}, + {"property", "s", "in"}, + {"value", "v", "in"}, + }, + }, + }, + Signals: []introspect.Signal{ + { + Name: "PropertiesChanged", + Args: []introspect.Arg{ + {"interface", "s", "out"}, + {"changed_properties", "a{sv}", "out"}, + {"invalidates_properties", "as", "out"}, + }, + }, + }, +} + +// The introspection data for the org.freedesktop.DBus.Properties interface, as +// a string. +const IntrospectDataString = ` + + + + + + + + + + + + + + + + + + + + + +` + +// Prop represents a single property. It is used for creating a Properties +// value. +type Prop struct { + // Initial value. Must be a DBus-representable type. + Value interface{} + + // If true, the value can be modified by calls to Set. + Writable bool + + // Controls how org.freedesktop.DBus.Properties.PropertiesChanged is + // emitted if this property changes. + Emit EmitType + + // If not nil, anytime this property is changed by Set, this function is + // called with an appropiate Change as its argument. If the returned error + // is not nil, it is sent back to the caller of Set and the property is not + // changed. + Callback func(*Change) *dbus.Error +} + +// Change represents a change of a property by a call to Set. +type Change struct { + Props *Properties + Iface string + Name string + Value interface{} +} + +// Properties is a set of values that can be made available to the message bus +// using the org.freedesktop.DBus.Properties interface. It is safe for +// concurrent use by multiple goroutines. +type Properties struct { + m map[string]map[string]*Prop + mut sync.RWMutex + conn *dbus.Conn + path dbus.ObjectPath +} + +// New returns a new Properties structure that manages the given properties. +// The key for the first-level map of props is the name of the interface; the +// second-level key is the name of the property. The returned structure will be +// exported as org.freedesktop.DBus.Properties on path. +func New(conn *dbus.Conn, path dbus.ObjectPath, props map[string]map[string]*Prop) *Properties { + p := &Properties{m: props, conn: conn, path: path} + conn.Export(p, path, "org.freedesktop.DBus.Properties") + return p +} + +// Get implements org.freedesktop.DBus.Properties.Get. +func (p *Properties) Get(iface, property string) (dbus.Variant, *dbus.Error) { + p.mut.RLock() + defer p.mut.RUnlock() + m, ok := p.m[iface] + if !ok { + return dbus.Variant{}, ErrIfaceNotFound + } + prop, ok := m[property] + if !ok { + return dbus.Variant{}, ErrPropNotFound + } + return dbus.MakeVariant(prop.Value), nil +} + +// GetAll implements org.freedesktop.DBus.Properties.GetAll. +func (p *Properties) GetAll(iface string) (map[string]dbus.Variant, *dbus.Error) { + p.mut.RLock() + defer p.mut.RUnlock() + m, ok := p.m[iface] + if !ok { + return nil, ErrIfaceNotFound + } + rm := make(map[string]dbus.Variant, len(m)) + for k, v := range m { + rm[k] = dbus.MakeVariant(v.Value) + } + return rm, nil +} + +// GetMust returns the value of the given property and panics if either the +// interface or the property name are invalid. +func (p *Properties) GetMust(iface, property string) interface{} { + p.mut.RLock() + defer p.mut.RUnlock() + return p.m[iface][property].Value +} + +// Introspection returns the introspection data that represents the properties +// of iface. +func (p *Properties) Introspection(iface string) []introspect.Property { + p.mut.RLock() + defer p.mut.RUnlock() + m := p.m[iface] + s := make([]introspect.Property, 0, len(m)) + for k, v := range m { + p := introspect.Property{Name: k, Type: dbus.SignatureOf(v.Value).String()} + if v.Writable { + p.Access = "readwrite" + } else { + p.Access = "read" + } + s = append(s, p) + } + return s +} + +// set sets the given property and emits PropertyChanged if appropiate. p.mut +// must already be locked. +func (p *Properties) set(iface, property string, v interface{}) { + prop := p.m[iface][property] + prop.Value = v + switch prop.Emit { + case EmitFalse: + // do nothing + case EmitInvalidates: + p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged", + iface, map[string]dbus.Variant{}, []string{property}) + case EmitTrue: + p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged", + iface, map[string]dbus.Variant{property: dbus.MakeVariant(v)}, + []string{}) + default: + panic("invalid value for EmitType") + } +} + +// Set implements org.freedesktop.Properties.Set. +func (p *Properties) Set(iface, property string, newv dbus.Variant) *dbus.Error { + p.mut.Lock() + defer p.mut.Unlock() + m, ok := p.m[iface] + if !ok { + return ErrIfaceNotFound + } + prop, ok := m[property] + if !ok { + return ErrPropNotFound + } + if !prop.Writable { + return ErrReadOnly + } + if newv.Signature() != dbus.SignatureOf(prop.Value) { + return ErrInvalidArg + } + if prop.Callback != nil { + err := prop.Callback(&Change{p, iface, property, newv.Value()}) + if err != nil { + return err + } + } + p.set(iface, property, newv.Value()) + return nil +} + +// SetMust sets the value of the given property and panics if the interface or +// the property name are invalid. +func (p *Properties) SetMust(iface, property string, v interface{}) { + p.mut.Lock() + p.set(iface, property, v) + p.mut.Unlock() +} diff --git a/vendor/src/github.com/godbus/dbus/proto_test.go b/vendor/src/github.com/godbus/dbus/proto_test.go new file mode 100644 index 0000000000..608a770d41 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/proto_test.go @@ -0,0 +1,369 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "io/ioutil" + "math" + "reflect" + "testing" +) + +var protoTests = []struct { + vs []interface{} + bigEndian []byte + littleEndian []byte +}{ + { + []interface{}{int32(0)}, + []byte{0, 0, 0, 0}, + []byte{0, 0, 0, 0}, + }, + { + []interface{}{true, false}, + []byte{0, 0, 0, 1, 0, 0, 0, 0}, + []byte{1, 0, 0, 0, 0, 0, 0, 0}, + }, + { + []interface{}{byte(0), uint16(12), int16(32), uint32(43)}, + []byte{0, 0, 0, 12, 0, 32, 0, 0, 0, 0, 0, 43}, + []byte{0, 0, 12, 0, 32, 0, 0, 0, 43, 0, 0, 0}, + }, + { + []interface{}{int64(-1), uint64(1<<64 - 1)}, + bytes.Repeat([]byte{255}, 16), + bytes.Repeat([]byte{255}, 16), + }, + { + []interface{}{math.Inf(+1)}, + []byte{0x7f, 0xf0, 0, 0, 0, 0, 0, 0}, + []byte{0, 0, 0, 0, 0, 0, 0xf0, 0x7f}, + }, + { + []interface{}{"foo"}, + []byte{0, 0, 0, 3, 'f', 'o', 'o', 0}, + []byte{3, 0, 0, 0, 'f', 'o', 'o', 0}, + }, + { + []interface{}{Signature{"ai"}}, + []byte{2, 'a', 'i', 0}, + []byte{2, 'a', 'i', 0}, + }, + { + []interface{}{[]int16{42, 256}}, + []byte{0, 0, 0, 4, 0, 42, 1, 0}, + []byte{4, 0, 0, 0, 42, 0, 0, 1}, + }, + { + []interface{}{MakeVariant("foo")}, + []byte{1, 's', 0, 0, 0, 0, 0, 3, 'f', 'o', 'o', 0}, + []byte{1, 's', 0, 0, 3, 0, 0, 0, 'f', 'o', 'o', 0}, + }, + { + []interface{}{MakeVariant(MakeVariant(Signature{"v"}))}, + []byte{1, 'v', 0, 1, 'g', 0, 1, 'v', 0}, + []byte{1, 'v', 0, 1, 'g', 0, 1, 'v', 0}, + }, + { + []interface{}{map[int32]bool{42: true}}, + []byte{0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 1}, + []byte{8, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 1, 0, 0, 0}, + }, + { + []interface{}{map[string]Variant{}, byte(42)}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 42}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 42}, + }, + { + []interface{}{[]uint64{}, byte(42)}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 42}, + []byte{0, 0, 0, 0, 0, 0, 0, 0, 42}, + }, +} + +func TestProto(t *testing.T) { + for i, v := range protoTests { + buf := new(bytes.Buffer) + bigEnc := newEncoder(buf, binary.BigEndian) + bigEnc.Encode(v.vs...) + marshalled := buf.Bytes() + if bytes.Compare(marshalled, v.bigEndian) != 0 { + t.Errorf("test %d (marshal be): got '%v', but expected '%v'\n", i+1, marshalled, + v.bigEndian) + } + buf.Reset() + litEnc := newEncoder(buf, binary.LittleEndian) + litEnc.Encode(v.vs...) + marshalled = buf.Bytes() + if bytes.Compare(marshalled, v.littleEndian) != 0 { + t.Errorf("test %d (marshal le): got '%v', but expected '%v'\n", i+1, marshalled, + v.littleEndian) + } + unmarshalled := reflect.MakeSlice(reflect.TypeOf(v.vs), + 0, 0) + for i := range v.vs { + unmarshalled = reflect.Append(unmarshalled, + reflect.New(reflect.TypeOf(v.vs[i]))) + } + bigDec := newDecoder(bytes.NewReader(v.bigEndian), binary.BigEndian) + vs, err := bigDec.Decode(SignatureOf(v.vs...)) + if err != nil { + t.Errorf("test %d (unmarshal be): %s\n", i+1, err) + continue + } + if !reflect.DeepEqual(vs, v.vs) { + t.Errorf("test %d (unmarshal be): got %#v, but expected %#v\n", i+1, vs, v.vs) + } + litDec := newDecoder(bytes.NewReader(v.littleEndian), binary.LittleEndian) + vs, err = litDec.Decode(SignatureOf(v.vs...)) + if err != nil { + t.Errorf("test %d (unmarshal le): %s\n", i+1, err) + continue + } + if !reflect.DeepEqual(vs, v.vs) { + t.Errorf("test %d (unmarshal le): got %#v, but expected %#v\n", i+1, vs, v.vs) + } + + } +} + +func TestProtoMap(t *testing.T) { + m := map[string]uint8{ + "foo": 23, + "bar": 2, + } + var n map[string]uint8 + buf := new(bytes.Buffer) + enc := newEncoder(buf, binary.LittleEndian) + enc.Encode(m) + dec := newDecoder(buf, binary.LittleEndian) + vs, err := dec.Decode(Signature{"a{sy}"}) + if err != nil { + t.Fatal(err) + } + if err = Store(vs, &n); err != nil { + t.Fatal(err) + } + if len(n) != 2 || n["foo"] != 23 || n["bar"] != 2 { + t.Error("got", n) + } +} + +func TestProtoVariantStruct(t *testing.T) { + var variant Variant + v := MakeVariant(struct { + A int32 + B int16 + }{1, 2}) + buf := new(bytes.Buffer) + enc := newEncoder(buf, binary.LittleEndian) + enc.Encode(v) + dec := newDecoder(buf, binary.LittleEndian) + vs, err := dec.Decode(Signature{"v"}) + if err != nil { + t.Fatal(err) + } + if err = Store(vs, &variant); err != nil { + t.Fatal(err) + } + sl := variant.Value().([]interface{}) + v1, v2 := sl[0].(int32), sl[1].(int16) + if v1 != int32(1) { + t.Error("got", v1, "as first int") + } + if v2 != int16(2) { + t.Error("got", v2, "as second int") + } +} + +func TestProtoStructTag(t *testing.T) { + type Bar struct { + A int32 + B chan interface{} `dbus:"-"` + C int32 + } + var bar1, bar2 Bar + bar1.A = 234 + bar2.C = 345 + buf := new(bytes.Buffer) + enc := newEncoder(buf, binary.LittleEndian) + enc.Encode(bar1) + dec := newDecoder(buf, binary.LittleEndian) + vs, err := dec.Decode(Signature{"(ii)"}) + if err != nil { + t.Fatal(err) + } + if err = Store(vs, &bar2); err != nil { + t.Fatal(err) + } + if bar1 != bar2 { + t.Error("struct tag test: got", bar2) + } +} + +func TestProtoStoreStruct(t *testing.T) { + var foo struct { + A int32 + B string + c chan interface{} + D interface{} `dbus:"-"` + } + src := []interface{}{[]interface{}{int32(42), "foo"}} + err := Store(src, &foo) + if err != nil { + t.Fatal(err) + } +} + +func TestProtoStoreNestedStruct(t *testing.T) { + var foo struct { + A int32 + B struct { + C string + D float64 + } + } + src := []interface{}{ + []interface{}{ + int32(42), + []interface{}{ + "foo", + 3.14, + }, + }, + } + err := Store(src, &foo) + if err != nil { + t.Fatal(err) + } +} + +func TestMessage(t *testing.T) { + buf := new(bytes.Buffer) + message := new(Message) + message.Type = TypeMethodCall + message.serial = 32 + message.Headers = map[HeaderField]Variant{ + FieldPath: MakeVariant(ObjectPath("/org/foo/bar")), + FieldMember: MakeVariant("baz"), + } + message.Body = make([]interface{}, 0) + err := message.EncodeTo(buf, binary.LittleEndian) + if err != nil { + t.Error(err) + } + _, err = DecodeMessage(buf) + if err != nil { + t.Error(err) + } +} + +func TestProtoStructInterfaces(t *testing.T) { + b := []byte{42} + vs, err := newDecoder(bytes.NewReader(b), binary.LittleEndian).Decode(Signature{"(y)"}) + if err != nil { + t.Fatal(err) + } + if vs[0].([]interface{})[0].(byte) != 42 { + t.Errorf("wrongs results (got %v)", vs) + } +} + +// ordinary org.freedesktop.DBus.Hello call +var smallMessage = &Message{ + Type: TypeMethodCall, + serial: 1, + Headers: map[HeaderField]Variant{ + FieldDestination: MakeVariant("org.freedesktop.DBus"), + FieldPath: MakeVariant(ObjectPath("/org/freedesktop/DBus")), + FieldInterface: MakeVariant("org.freedesktop.DBus"), + FieldMember: MakeVariant("Hello"), + }, +} + +// org.freedesktop.Notifications.Notify +var bigMessage = &Message{ + Type: TypeMethodCall, + serial: 2, + Headers: map[HeaderField]Variant{ + FieldDestination: MakeVariant("org.freedesktop.Notifications"), + FieldPath: MakeVariant(ObjectPath("/org/freedesktop/Notifications")), + FieldInterface: MakeVariant("org.freedesktop.Notifications"), + FieldMember: MakeVariant("Notify"), + FieldSignature: MakeVariant(Signature{"susssasa{sv}i"}), + }, + Body: []interface{}{ + "app_name", + uint32(0), + "dialog-information", + "Notification", + "This is the body of a notification", + []string{"ok", "Ok"}, + map[string]Variant{ + "sound-name": MakeVariant("dialog-information"), + }, + int32(-1), + }, +} + +func BenchmarkDecodeMessageSmall(b *testing.B) { + var err error + var rd *bytes.Reader + + b.StopTimer() + buf := new(bytes.Buffer) + err = smallMessage.EncodeTo(buf, binary.LittleEndian) + if err != nil { + b.Fatal(err) + } + decoded := buf.Bytes() + b.StartTimer() + for i := 0; i < b.N; i++ { + rd = bytes.NewReader(decoded) + _, err = DecodeMessage(rd) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkDecodeMessageBig(b *testing.B) { + var err error + var rd *bytes.Reader + + b.StopTimer() + buf := new(bytes.Buffer) + err = bigMessage.EncodeTo(buf, binary.LittleEndian) + if err != nil { + b.Fatal(err) + } + decoded := buf.Bytes() + b.StartTimer() + for i := 0; i < b.N; i++ { + rd = bytes.NewReader(decoded) + _, err = DecodeMessage(rd) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeMessageSmall(b *testing.B) { + var err error + for i := 0; i < b.N; i++ { + err = smallMessage.EncodeTo(ioutil.Discard, binary.LittleEndian) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeMessageBig(b *testing.B) { + var err error + for i := 0; i < b.N; i++ { + err = bigMessage.EncodeTo(ioutil.Discard, binary.LittleEndian) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/src/github.com/godbus/dbus/sig.go b/vendor/src/github.com/godbus/dbus/sig.go new file mode 100644 index 0000000000..f45b53ce1b --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/sig.go @@ -0,0 +1,257 @@ +package dbus + +import ( + "fmt" + "reflect" + "strings" +) + +var sigToType = map[byte]reflect.Type{ + 'y': byteType, + 'b': boolType, + 'n': int16Type, + 'q': uint16Type, + 'i': int32Type, + 'u': uint32Type, + 'x': int64Type, + 't': uint64Type, + 'd': float64Type, + 's': stringType, + 'g': signatureType, + 'o': objectPathType, + 'v': variantType, + 'h': unixFDIndexType, +} + +// Signature represents a correct type signature as specified by the D-Bus +// specification. The zero value represents the empty signature, "". +type Signature struct { + str string +} + +// SignatureOf returns the concatenation of all the signatures of the given +// values. It panics if one of them is not representable in D-Bus. +func SignatureOf(vs ...interface{}) Signature { + var s string + for _, v := range vs { + s += getSignature(reflect.TypeOf(v)) + } + return Signature{s} +} + +// SignatureOfType returns the signature of the given type. It panics if the +// type is not representable in D-Bus. +func SignatureOfType(t reflect.Type) Signature { + return Signature{getSignature(t)} +} + +// getSignature returns the signature of the given type and panics on unknown types. +func getSignature(t reflect.Type) string { + // handle simple types first + switch t.Kind() { + case reflect.Uint8: + return "y" + case reflect.Bool: + return "b" + case reflect.Int16: + return "n" + case reflect.Uint16: + return "q" + case reflect.Int32: + if t == unixFDType { + return "h" + } + return "i" + case reflect.Uint32: + if t == unixFDIndexType { + return "h" + } + return "u" + case reflect.Int64: + return "x" + case reflect.Uint64: + return "t" + case reflect.Float64: + return "d" + case reflect.Ptr: + return getSignature(t.Elem()) + case reflect.String: + if t == objectPathType { + return "o" + } + return "s" + case reflect.Struct: + if t == variantType { + return "v" + } else if t == signatureType { + return "g" + } + var s string + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + s += getSignature(t.Field(i).Type) + } + } + return "(" + s + ")" + case reflect.Array, reflect.Slice: + return "a" + getSignature(t.Elem()) + case reflect.Map: + if !isKeyType(t.Key()) { + panic(InvalidTypeError{t}) + } + return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}" + } + panic(InvalidTypeError{t}) +} + +// ParseSignature returns the signature represented by this string, or a +// SignatureError if the string is not a valid signature. +func ParseSignature(s string) (sig Signature, err error) { + if len(s) == 0 { + return + } + if len(s) > 255 { + return Signature{""}, SignatureError{s, "too long"} + } + sig.str = s + for err == nil && len(s) != 0 { + err, s = validSingle(s, 0) + } + if err != nil { + sig = Signature{""} + } + + return +} + +// ParseSignatureMust behaves like ParseSignature, except that it panics if s +// is not valid. +func ParseSignatureMust(s string) Signature { + sig, err := ParseSignature(s) + if err != nil { + panic(err) + } + return sig +} + +// Empty retruns whether the signature is the empty signature. +func (s Signature) Empty() bool { + return s.str == "" +} + +// Single returns whether the signature represents a single, complete type. +func (s Signature) Single() bool { + err, r := validSingle(s.str, 0) + return err != nil && r == "" +} + +// String returns the signature's string representation. +func (s Signature) String() string { + return s.str +} + +// A SignatureError indicates that a signature passed to a function or received +// on a connection is not a valid signature. +type SignatureError struct { + Sig string + Reason string +} + +func (e SignatureError) Error() string { + return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason) +} + +// Try to read a single type from this string. If it was successfull, err is nil +// and rem is the remaining unparsed part. Otherwise, err is a non-nil +// SignatureError and rem is "". depth is the current recursion depth which may +// not be greater than 64 and should be given as 0 on the first call. +func validSingle(s string, depth int) (err error, rem string) { + if s == "" { + return SignatureError{Sig: s, Reason: "empty signature"}, "" + } + if depth > 64 { + return SignatureError{Sig: s, Reason: "container nesting too deep"}, "" + } + switch s[0] { + case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h': + return nil, s[1:] + case 'a': + if len(s) > 1 && s[1] == '{' { + i := findMatching(s[1:], '{', '}') + if i == -1 { + return SignatureError{Sig: s, Reason: "unmatched '{'"}, "" + } + i++ + rem = s[i+1:] + s = s[2:i] + if err, _ = validSingle(s[:1], depth+1); err != nil { + return err, "" + } + err, nr := validSingle(s[1:], depth+1) + if err != nil { + return err, "" + } + if nr != "" { + return SignatureError{Sig: s, Reason: "too many types in dict"}, "" + } + return nil, rem + } + return validSingle(s[1:], depth+1) + case '(': + i := findMatching(s, '(', ')') + if i == -1 { + return SignatureError{Sig: s, Reason: "unmatched ')'"}, "" + } + rem = s[i+1:] + s = s[1:i] + for err == nil && s != "" { + err, s = validSingle(s, depth+1) + } + if err != nil { + rem = "" + } + return + } + return SignatureError{Sig: s, Reason: "invalid type character"}, "" +} + +func findMatching(s string, left, right rune) int { + n := 0 + for i, v := range s { + if v == left { + n++ + } else if v == right { + n-- + } + if n == 0 { + return i + } + } + return -1 +} + +// typeFor returns the type of the given signature. It ignores any left over +// characters and panics if s doesn't start with a valid type signature. +func typeFor(s string) (t reflect.Type) { + err, _ := validSingle(s, 0) + if err != nil { + panic(err) + } + + if t, ok := sigToType[s[0]]; ok { + return t + } + switch s[0] { + case 'a': + if s[1] == '{' { + i := strings.LastIndex(s, "}") + t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i])) + } else { + t = reflect.SliceOf(typeFor(s[1:])) + } + case '(': + t = interfacesType + } + return +} diff --git a/vendor/src/github.com/godbus/dbus/sig_test.go b/vendor/src/github.com/godbus/dbus/sig_test.go new file mode 100644 index 0000000000..da37bc968e --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/sig_test.go @@ -0,0 +1,70 @@ +package dbus + +import ( + "testing" +) + +var sigTests = []struct { + vs []interface{} + sig Signature +}{ + { + []interface{}{new(int32)}, + Signature{"i"}, + }, + { + []interface{}{new(string)}, + Signature{"s"}, + }, + { + []interface{}{new(Signature)}, + Signature{"g"}, + }, + { + []interface{}{new([]int16)}, + Signature{"an"}, + }, + { + []interface{}{new(int16), new(uint32)}, + Signature{"nu"}, + }, + { + []interface{}{new(map[byte]Variant)}, + Signature{"a{yv}"}, + }, + { + []interface{}{new(Variant), new([]map[int32]string)}, + Signature{"vaa{is}"}, + }, +} + +func TestSig(t *testing.T) { + for i, v := range sigTests { + sig := SignatureOf(v.vs...) + if sig != v.sig { + t.Errorf("test %d: got %q, expected %q", i+1, sig.str, v.sig.str) + } + } +} + +var getSigTest = []interface{}{ + []struct { + b byte + i int32 + t uint64 + s string + }{}, + map[string]Variant{}, +} + +func BenchmarkGetSignatureSimple(b *testing.B) { + for i := 0; i < b.N; i++ { + SignatureOf("", int32(0)) + } +} + +func BenchmarkGetSignatureLong(b *testing.B) { + for i := 0; i < b.N; i++ { + SignatureOf(getSigTest...) + } +} diff --git a/vendor/src/github.com/godbus/dbus/transport_darwin.go b/vendor/src/github.com/godbus/dbus/transport_darwin.go new file mode 100644 index 0000000000..1bba0d6bf7 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/transport_darwin.go @@ -0,0 +1,6 @@ +package dbus + +func (t *unixTransport) SendNullByte() error { + _, err := t.Write([]byte{0}) + return err +} diff --git a/vendor/src/github.com/godbus/dbus/transport_generic.go b/vendor/src/github.com/godbus/dbus/transport_generic.go new file mode 100644 index 0000000000..46f8f49d69 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/transport_generic.go @@ -0,0 +1,35 @@ +package dbus + +import ( + "encoding/binary" + "errors" + "io" +) + +type genericTransport struct { + io.ReadWriteCloser +} + +func (t genericTransport) SendNullByte() error { + _, err := t.Write([]byte{0}) + return err +} + +func (t genericTransport) SupportsUnixFDs() bool { + return false +} + +func (t genericTransport) EnableUnixFDs() {} + +func (t genericTransport) ReadMessage() (*Message, error) { + return DecodeMessage(t) +} + +func (t genericTransport) SendMessage(msg *Message) error { + for _, v := range msg.Body { + if _, ok := v.(UnixFD); ok { + return errors.New("dbus: unix fd passing not enabled") + } + } + return msg.EncodeTo(t, binary.LittleEndian) +} diff --git a/vendor/src/github.com/godbus/dbus/transport_unix.go b/vendor/src/github.com/godbus/dbus/transport_unix.go new file mode 100644 index 0000000000..d16229be40 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/transport_unix.go @@ -0,0 +1,190 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "net" + "syscall" +) + +type oobReader struct { + conn *net.UnixConn + oob []byte + buf [4096]byte +} + +func (o *oobReader) Read(b []byte) (n int, err error) { + n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:]) + if err != nil { + return n, err + } + if flags&syscall.MSG_CTRUNC != 0 { + return n, errors.New("dbus: control data truncated (too many fds received)") + } + o.oob = append(o.oob, o.buf[:oobn]...) + return n, nil +} + +type unixTransport struct { + *net.UnixConn + hasUnixFDs bool +} + +func newUnixTransport(keys string) (transport, error) { + var err error + + t := new(unixTransport) + abstract := getKey(keys, "abstract") + path := getKey(keys, "path") + switch { + case abstract == "" && path == "": + return nil, errors.New("dbus: invalid address (neither path nor abstract set)") + case abstract != "" && path == "": + t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"}) + if err != nil { + return nil, err + } + return t, nil + case abstract == "" && path != "": + t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"}) + if err != nil { + return nil, err + } + return t, nil + default: + return nil, errors.New("dbus: invalid address (both path and abstract set)") + } +} + +func (t *unixTransport) EnableUnixFDs() { + t.hasUnixFDs = true +} + +func (t *unixTransport) ReadMessage() (*Message, error) { + var ( + blen, hlen uint32 + csheader [16]byte + headers []header + order binary.ByteOrder + unixfds uint32 + ) + // To be sure that all bytes of out-of-band data are read, we use a special + // reader that uses ReadUnix on the underlying connection instead of Read + // and gathers the out-of-band data in a buffer. + rd := &oobReader{conn: t.UnixConn} + // read the first 16 bytes (the part of the header that has a constant size), + // from which we can figure out the length of the rest of the message + if _, err := io.ReadFull(rd, csheader[:]); err != nil { + return nil, err + } + switch csheader[0] { + case 'l': + order = binary.LittleEndian + case 'B': + order = binary.BigEndian + default: + return nil, InvalidMessageError("invalid byte order") + } + // csheader[4:8] -> length of message body, csheader[12:16] -> length of + // header fields (without alignment) + binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen) + binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen) + if hlen%8 != 0 { + hlen += 8 - (hlen % 8) + } + + // decode headers and look for unix fds + headerdata := make([]byte, hlen+4) + copy(headerdata, csheader[12:]) + if _, err := io.ReadFull(t, headerdata[4:]); err != nil { + return nil, err + } + dec := newDecoder(bytes.NewBuffer(headerdata), order) + dec.pos = 12 + vs, err := dec.Decode(Signature{"a(yv)"}) + if err != nil { + return nil, err + } + Store(vs, &headers) + for _, v := range headers { + if v.Field == byte(FieldUnixFDs) { + unixfds, _ = v.Variant.value.(uint32) + } + } + all := make([]byte, 16+hlen+blen) + copy(all, csheader[:]) + copy(all[16:], headerdata[4:]) + if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil { + return nil, err + } + if unixfds != 0 { + if !t.hasUnixFDs { + return nil, errors.New("dbus: got unix fds on unsupported transport") + } + // read the fds from the OOB data + scms, err := syscall.ParseSocketControlMessage(rd.oob) + if err != nil { + return nil, err + } + if len(scms) != 1 { + return nil, errors.New("dbus: received more than one socket control message") + } + fds, err := syscall.ParseUnixRights(&scms[0]) + if err != nil { + return nil, err + } + msg, err := DecodeMessage(bytes.NewBuffer(all)) + if err != nil { + return nil, err + } + // substitute the values in the message body (which are indices for the + // array receiver via OOB) with the actual values + for i, v := range msg.Body { + if j, ok := v.(UnixFDIndex); ok { + if uint32(j) >= unixfds { + return nil, InvalidMessageError("invalid index for unix fd") + } + msg.Body[i] = UnixFD(fds[j]) + } + } + return msg, nil + } + return DecodeMessage(bytes.NewBuffer(all)) +} + +func (t *unixTransport) SendMessage(msg *Message) error { + fds := make([]int, 0) + for i, v := range msg.Body { + if fd, ok := v.(UnixFD); ok { + msg.Body[i] = UnixFDIndex(len(fds)) + fds = append(fds, int(fd)) + } + } + if len(fds) != 0 { + if !t.hasUnixFDs { + return errors.New("dbus: unix fd passing not enabled") + } + msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds))) + oob := syscall.UnixRights(fds...) + buf := new(bytes.Buffer) + msg.EncodeTo(buf, binary.LittleEndian) + n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil) + if err != nil { + return err + } + if n != buf.Len() || oobn != len(oob) { + return io.ErrShortWrite + } + } else { + if err := msg.EncodeTo(t, binary.LittleEndian); err != nil { + return nil + } + } + return nil +} + +func (t *unixTransport) SupportsUnixFDs() bool { + return true +} diff --git a/vendor/src/github.com/godbus/dbus/transport_unix_test.go b/vendor/src/github.com/godbus/dbus/transport_unix_test.go new file mode 100644 index 0000000000..302233fc65 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/transport_unix_test.go @@ -0,0 +1,49 @@ +package dbus + +import ( + "os" + "testing" +) + +const testString = `This is a test! +This text should be read from the file that is created by this test.` + +type unixFDTest struct{} + +func (t unixFDTest) Test(fd UnixFD) (string, *Error) { + var b [4096]byte + file := os.NewFile(uintptr(fd), "testfile") + defer file.Close() + n, err := file.Read(b[:]) + if err != nil { + return "", &Error{"com.github.guelfey.test.Error", nil} + } + return string(b[:n]), nil +} + +func TestUnixFDs(t *testing.T) { + conn, err := SessionBus() + if err != nil { + t.Fatal(err) + } + r, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + defer w.Close() + if _, err := w.Write([]byte(testString)); err != nil { + t.Fatal(err) + } + name := conn.Names()[0] + test := unixFDTest{} + conn.Export(test, "/com/github/guelfey/test", "com.github.guelfey.test") + var s string + obj := conn.Object(name, "/com/github/guelfey/test") + err = obj.Call("com.github.guelfey.test.Test", 0, UnixFD(r.Fd())).Store(&s) + if err != nil { + t.Fatal(err) + } + if s != testString { + t.Fatal("got", s, "wanted", testString) + } +} diff --git a/vendor/src/github.com/godbus/dbus/transport_unixcred.go b/vendor/src/github.com/godbus/dbus/transport_unixcred.go new file mode 100644 index 0000000000..42a0e769ef --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/transport_unixcred.go @@ -0,0 +1,22 @@ +// +build !darwin + +package dbus + +import ( + "io" + "os" + "syscall" +) + +func (t *unixTransport) SendNullByte() error { + ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} + b := syscall.UnixCredentials(ucred) + _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) + if err != nil { + return err + } + if oobn != len(b) { + return io.ErrShortWrite + } + return nil +} diff --git a/vendor/src/github.com/godbus/dbus/variant.go b/vendor/src/github.com/godbus/dbus/variant.go new file mode 100644 index 0000000000..b1b53ceb47 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/variant.go @@ -0,0 +1,129 @@ +package dbus + +import ( + "bytes" + "fmt" + "reflect" + "strconv" +) + +// Variant represents the D-Bus variant type. +type Variant struct { + sig Signature + value interface{} +} + +// MakeVariant converts the given value to a Variant. It panics if v cannot be +// represented as a D-Bus type. +func MakeVariant(v interface{}) Variant { + return Variant{SignatureOf(v), v} +} + +// ParseVariant parses the given string as a variant as described at +// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not +// empty, it is taken to be the expected signature for the variant. +func ParseVariant(s string, sig Signature) (Variant, error) { + tokens := varLex(s) + p := &varParser{tokens: tokens} + n, err := varMakeNode(p) + if err != nil { + return Variant{}, err + } + if sig.str == "" { + sig, err = varInfer(n) + if err != nil { + return Variant{}, err + } + } + v, err := n.Value(sig) + if err != nil { + return Variant{}, err + } + return MakeVariant(v), nil +} + +// format returns a formatted version of v and whether this string can be parsed +// unambigously. +func (v Variant) format() (string, bool) { + switch v.sig.str[0] { + case 'b', 'i': + return fmt.Sprint(v.value), true + case 'n', 'q', 'u', 'x', 't', 'd', 'h': + return fmt.Sprint(v.value), false + case 's': + return strconv.Quote(v.value.(string)), true + case 'o': + return strconv.Quote(string(v.value.(ObjectPath))), false + case 'g': + return strconv.Quote(v.value.(Signature).str), false + case 'v': + s, unamb := v.value.(Variant).format() + if !unamb { + return "<@" + v.value.(Variant).sig.str + " " + s + ">", true + } + return "<" + s + ">", true + case 'y': + return fmt.Sprintf("%#x", v.value.(byte)), false + } + rv := reflect.ValueOf(v.value) + switch rv.Kind() { + case reflect.Slice: + if rv.Len() == 0 { + return "[]", false + } + unamb := true + buf := bytes.NewBuffer([]byte("[")) + for i := 0; i < rv.Len(); i++ { + // TODO: slooow + s, b := MakeVariant(rv.Index(i).Interface()).format() + unamb = unamb && b + buf.WriteString(s) + if i != rv.Len()-1 { + buf.WriteString(", ") + } + } + buf.WriteByte(']') + return buf.String(), unamb + case reflect.Map: + if rv.Len() == 0 { + return "{}", false + } + unamb := true + buf := bytes.NewBuffer([]byte("{")) + for i, k := range rv.MapKeys() { + s, b := MakeVariant(k.Interface()).format() + unamb = unamb && b + buf.WriteString(s) + buf.WriteString(": ") + s, b = MakeVariant(rv.MapIndex(k).Interface()).format() + unamb = unamb && b + buf.WriteString(s) + if i != rv.Len()-1 { + buf.WriteString(", ") + } + } + buf.WriteByte('}') + return buf.String(), unamb + } + return `"INVALID"`, true +} + +// Signature returns the D-Bus signature of the underlying value of v. +func (v Variant) Signature() Signature { + return v.sig +} + +// String returns the string representation of the underlying value of v as +// described at https://developer.gnome.org/glib/unstable/gvariant-text.html. +func (v Variant) String() string { + s, unamb := v.format() + if !unamb { + return "@" + v.sig.str + " " + s + } + return s +} + +// Value returns the underlying value of v. +func (v Variant) Value() interface{} { + return v.value +} diff --git a/vendor/src/github.com/godbus/dbus/variant_lexer.go b/vendor/src/github.com/godbus/dbus/variant_lexer.go new file mode 100644 index 0000000000..332007d6f1 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/variant_lexer.go @@ -0,0 +1,284 @@ +package dbus + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// Heavily inspired by the lexer from text/template. + +type varToken struct { + typ varTokenType + val string +} + +type varTokenType byte + +const ( + tokEOF varTokenType = iota + tokError + tokNumber + tokString + tokBool + tokArrayStart + tokArrayEnd + tokDictStart + tokDictEnd + tokVariantStart + tokVariantEnd + tokComma + tokColon + tokType + tokByteString +) + +type varLexer struct { + input string + start int + pos int + width int + tokens []varToken +} + +type lexState func(*varLexer) lexState + +func varLex(s string) []varToken { + l := &varLexer{input: s} + l.run() + return l.tokens +} + +func (l *varLexer) accept(valid string) bool { + if strings.IndexRune(valid, l.next()) >= 0 { + return true + } + l.backup() + return false +} + +func (l *varLexer) backup() { + l.pos -= l.width +} + +func (l *varLexer) emit(t varTokenType) { + l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]}) + l.start = l.pos +} + +func (l *varLexer) errorf(format string, v ...interface{}) lexState { + l.tokens = append(l.tokens, varToken{ + tokError, + fmt.Sprintf(format, v...), + }) + return nil +} + +func (l *varLexer) ignore() { + l.start = l.pos +} + +func (l *varLexer) next() rune { + var r rune + + if l.pos >= len(l.input) { + l.width = 0 + return -1 + } + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + l.pos += l.width + return r +} + +func (l *varLexer) run() { + for state := varLexNormal; state != nil; { + state = state(l) + } +} + +func (l *varLexer) peek() rune { + r := l.next() + l.backup() + return r +} + +func varLexNormal(l *varLexer) lexState { + for { + r := l.next() + switch { + case r == -1: + l.emit(tokEOF) + return nil + case r == '[': + l.emit(tokArrayStart) + case r == ']': + l.emit(tokArrayEnd) + case r == '{': + l.emit(tokDictStart) + case r == '}': + l.emit(tokDictEnd) + case r == '<': + l.emit(tokVariantStart) + case r == '>': + l.emit(tokVariantEnd) + case r == ':': + l.emit(tokColon) + case r == ',': + l.emit(tokComma) + case r == '\'' || r == '"': + l.backup() + return varLexString + case r == '@': + l.backup() + return varLexType + case unicode.IsSpace(r): + l.ignore() + case unicode.IsNumber(r) || r == '+' || r == '-': + l.backup() + return varLexNumber + case r == 'b': + pos := l.start + if n := l.peek(); n == '"' || n == '\'' { + return varLexByteString + } + // not a byte string; try to parse it as a type or bool below + l.pos = pos + 1 + l.width = 1 + fallthrough + default: + // either a bool or a type. Try bools first. + l.backup() + if l.pos+4 <= len(l.input) { + if l.input[l.pos:l.pos+4] == "true" { + l.pos += 4 + l.emit(tokBool) + continue + } + } + if l.pos+5 <= len(l.input) { + if l.input[l.pos:l.pos+5] == "false" { + l.pos += 5 + l.emit(tokBool) + continue + } + } + // must be a type. + return varLexType + } + } +} + +var varTypeMap = map[string]string{ + "boolean": "b", + "byte": "y", + "int16": "n", + "uint16": "q", + "int32": "i", + "uint32": "u", + "int64": "x", + "uint64": "t", + "double": "f", + "string": "s", + "objectpath": "o", + "signature": "g", +} + +func varLexByteString(l *varLexer) lexState { + q := l.next() +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != -1 { + break + } + fallthrough + case -1: + return l.errorf("unterminated bytestring") + case q: + break Loop + } + } + l.emit(tokByteString) + return varLexNormal +} + +func varLexNumber(l *varLexer) lexState { + l.accept("+-") + digits := "0123456789" + if l.accept("0") { + if l.accept("x") { + digits = "0123456789abcdefABCDEF" + } else { + digits = "01234567" + } + } + for strings.IndexRune(digits, l.next()) >= 0 { + } + l.backup() + if l.accept(".") { + for strings.IndexRune(digits, l.next()) >= 0 { + } + l.backup() + } + if l.accept("eE") { + l.accept("+-") + for strings.IndexRune("0123456789", l.next()) >= 0 { + } + l.backup() + } + if r := l.peek(); unicode.IsLetter(r) { + l.next() + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + l.emit(tokNumber) + return varLexNormal +} + +func varLexString(l *varLexer) lexState { + q := l.next() +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != -1 { + break + } + fallthrough + case -1: + return l.errorf("unterminated string") + case q: + break Loop + } + } + l.emit(tokString) + return varLexNormal +} + +func varLexType(l *varLexer) lexState { + at := l.accept("@") + for { + r := l.next() + if r == -1 { + break + } + if unicode.IsSpace(r) { + l.backup() + break + } + } + if at { + if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil { + return l.errorf("%s", err) + } + } else { + if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok { + l.emit(tokType) + return varLexNormal + } + return l.errorf("unrecognized type %q", l.input[l.start:l.pos]) + } + l.emit(tokType) + return varLexNormal +} diff --git a/vendor/src/github.com/godbus/dbus/variant_parser.go b/vendor/src/github.com/godbus/dbus/variant_parser.go new file mode 100644 index 0000000000..d20f5da6dd --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/variant_parser.go @@ -0,0 +1,817 @@ +package dbus + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +type varParser struct { + tokens []varToken + i int +} + +func (p *varParser) backup() { + p.i-- +} + +func (p *varParser) next() varToken { + if p.i < len(p.tokens) { + t := p.tokens[p.i] + p.i++ + return t + } + return varToken{typ: tokEOF} +} + +type varNode interface { + Infer() (Signature, error) + String() string + Sigs() sigSet + Value(Signature) (interface{}, error) +} + +func varMakeNode(p *varParser) (varNode, error) { + var sig Signature + + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokNumber: + return varMakeNumNode(t, sig) + case tokString: + return varMakeStringNode(t, sig) + case tokBool: + if sig.str != "" && sig.str != "b" { + return nil, varTypeError{t.val, sig} + } + b, err := strconv.ParseBool(t.val) + if err != nil { + return nil, err + } + return boolNode(b), nil + case tokArrayStart: + return varMakeArrayNode(p, sig) + case tokVariantStart: + return varMakeVariantNode(p, sig) + case tokDictStart: + return varMakeDictNode(p, sig) + case tokType: + if sig.str != "" { + return nil, errors.New("unexpected type annotation") + } + if t.val[0] == '@' { + sig.str = t.val[1:] + } else { + sig.str = varTypeMap[t.val] + } + case tokByteString: + if sig.str != "" && sig.str != "ay" { + return nil, varTypeError{t.val, sig} + } + b, err := varParseByteString(t.val) + if err != nil { + return nil, err + } + return byteStringNode(b), nil + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } +} + +type varTypeError struct { + val string + sig Signature +} + +func (e varTypeError) Error() string { + return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str) +} + +type sigSet map[Signature]bool + +func (s sigSet) Empty() bool { + return len(s) == 0 +} + +func (s sigSet) Intersect(s2 sigSet) sigSet { + r := make(sigSet) + for k := range s { + if s2[k] { + r[k] = true + } + } + return r +} + +func (s sigSet) Single() (Signature, bool) { + if len(s) == 1 { + for k := range s { + return k, true + } + } + return Signature{}, false +} + +func (s sigSet) ToArray() sigSet { + r := make(sigSet, len(s)) + for k := range s { + r[Signature{"a" + k.str}] = true + } + return r +} + +type numNode struct { + sig Signature + str string + val interface{} +} + +var numSigSet = sigSet{ + Signature{"y"}: true, + Signature{"n"}: true, + Signature{"q"}: true, + Signature{"i"}: true, + Signature{"u"}: true, + Signature{"x"}: true, + Signature{"t"}: true, + Signature{"d"}: true, +} + +func (n numNode) Infer() (Signature, error) { + if strings.ContainsAny(n.str, ".e") { + return Signature{"d"}, nil + } + return Signature{"i"}, nil +} + +func (n numNode) String() string { + return n.str +} + +func (n numNode) Sigs() sigSet { + if n.sig.str != "" { + return sigSet{n.sig: true} + } + if strings.ContainsAny(n.str, ".e") { + return sigSet{Signature{"d"}: true} + } + return numSigSet +} + +func (n numNode) Value(sig Signature) (interface{}, error) { + if n.sig.str != "" && n.sig != sig { + return nil, varTypeError{n.str, sig} + } + if n.val != nil { + return n.val, nil + } + return varNumAs(n.str, sig) +} + +func varMakeNumNode(tok varToken, sig Signature) (varNode, error) { + if sig.str == "" { + return numNode{str: tok.val}, nil + } + num, err := varNumAs(tok.val, sig) + if err != nil { + return nil, err + } + return numNode{sig: sig, val: num}, nil +} + +func varNumAs(s string, sig Signature) (interface{}, error) { + isUnsigned := false + size := 32 + switch sig.str { + case "n": + size = 16 + case "i": + case "x": + size = 64 + case "y": + size = 8 + isUnsigned = true + case "q": + size = 16 + isUnsigned = true + case "u": + isUnsigned = true + case "t": + size = 64 + isUnsigned = true + case "d": + d, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, err + } + return d, nil + default: + return nil, varTypeError{s, sig} + } + base := 10 + if strings.HasPrefix(s, "0x") { + base = 16 + s = s[2:] + } + if strings.HasPrefix(s, "0") && len(s) != 1 { + base = 8 + s = s[1:] + } + if isUnsigned { + i, err := strconv.ParseUint(s, base, size) + if err != nil { + return nil, err + } + var v interface{} = i + switch sig.str { + case "y": + v = byte(i) + case "q": + v = uint16(i) + case "u": + v = uint32(i) + } + return v, nil + } + i, err := strconv.ParseInt(s, base, size) + if err != nil { + return nil, err + } + var v interface{} = i + switch sig.str { + case "n": + v = int16(i) + case "i": + v = int32(i) + } + return v, nil +} + +type stringNode struct { + sig Signature + str string // parsed + val interface{} // has correct type +} + +var stringSigSet = sigSet{ + Signature{"s"}: true, + Signature{"g"}: true, + Signature{"o"}: true, +} + +func (n stringNode) Infer() (Signature, error) { + return Signature{"s"}, nil +} + +func (n stringNode) String() string { + return n.str +} + +func (n stringNode) Sigs() sigSet { + if n.sig.str != "" { + return sigSet{n.sig: true} + } + return stringSigSet +} + +func (n stringNode) Value(sig Signature) (interface{}, error) { + if n.sig.str != "" && n.sig != sig { + return nil, varTypeError{n.str, sig} + } + if n.val != nil { + return n.val, nil + } + switch { + case sig.str == "g": + return Signature{n.str}, nil + case sig.str == "o": + return ObjectPath(n.str), nil + case sig.str == "s": + return n.str, nil + default: + return nil, varTypeError{n.str, sig} + } +} + +func varMakeStringNode(tok varToken, sig Signature) (varNode, error) { + if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" { + return nil, fmt.Errorf("invalid type %q for string", sig.str) + } + s, err := varParseString(tok.val) + if err != nil { + return nil, err + } + n := stringNode{str: s} + if sig.str == "" { + return stringNode{str: s}, nil + } + n.sig = sig + switch sig.str { + case "o": + n.val = ObjectPath(s) + case "g": + n.val = Signature{s} + case "s": + n.val = s + } + return n, nil +} + +func varParseString(s string) (string, error) { + // quotes are guaranteed to be there + s = s[1 : len(s)-1] + buf := new(bytes.Buffer) + for len(s) != 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && size == 1 { + return "", errors.New("invalid UTF-8") + } + s = s[size:] + if r != '\\' { + buf.WriteRune(r) + continue + } + r, size = utf8.DecodeRuneInString(s) + if r == utf8.RuneError && size == 1 { + return "", errors.New("invalid UTF-8") + } + s = s[size:] + switch r { + case 'a': + buf.WriteRune(0x7) + case 'b': + buf.WriteRune(0x8) + case 'f': + buf.WriteRune(0xc) + case 'n': + buf.WriteRune('\n') + case 'r': + buf.WriteRune('\r') + case 't': + buf.WriteRune('\t') + case '\n': + case 'u': + if len(s) < 4 { + return "", errors.New("short unicode escape") + } + r, err := strconv.ParseUint(s[:4], 16, 32) + if err != nil { + return "", err + } + buf.WriteRune(rune(r)) + s = s[4:] + case 'U': + if len(s) < 8 { + return "", errors.New("short unicode escape") + } + r, err := strconv.ParseUint(s[:8], 16, 32) + if err != nil { + return "", err + } + buf.WriteRune(rune(r)) + s = s[8:] + default: + buf.WriteRune(r) + } + } + return buf.String(), nil +} + +var boolSigSet = sigSet{Signature{"b"}: true} + +type boolNode bool + +func (boolNode) Infer() (Signature, error) { + return Signature{"b"}, nil +} + +func (b boolNode) String() string { + if b { + return "true" + } + return "false" +} + +func (boolNode) Sigs() sigSet { + return boolSigSet +} + +func (b boolNode) Value(sig Signature) (interface{}, error) { + if sig.str != "b" { + return nil, varTypeError{b.String(), sig} + } + return bool(b), nil +} + +type arrayNode struct { + set sigSet + children []varNode + val interface{} +} + +func (n arrayNode) Infer() (Signature, error) { + for _, v := range n.children { + csig, err := varInfer(v) + if err != nil { + continue + } + return Signature{"a" + csig.str}, nil + } + return Signature{}, fmt.Errorf("can't infer type for %q", n.String()) +} + +func (n arrayNode) String() string { + s := "[" + for i, v := range n.children { + s += v.String() + if i != len(n.children)-1 { + s += ", " + } + } + return s + "]" +} + +func (n arrayNode) Sigs() sigSet { + return n.set +} + +func (n arrayNode) Value(sig Signature) (interface{}, error) { + if n.set.Empty() { + // no type information whatsoever, so this must be an empty slice + return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil + } + if !n.set[sig] { + return nil, varTypeError{n.String(), sig} + } + s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children)) + for i, v := range n.children { + rv, err := v.Value(Signature{sig.str[1:]}) + if err != nil { + return nil, err + } + s.Index(i).Set(reflect.ValueOf(rv)) + } + return s.Interface(), nil +} + +func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) { + var n arrayNode + if sig.str != "" { + n.set = sigSet{sig: true} + } + if t := p.next(); t.typ == tokArrayEnd { + return n, nil + } else { + p.backup() + } +Loop: + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + cn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if cset := cn.Sigs(); !cset.Empty() { + if n.set.Empty() { + n.set = cset.ToArray() + } else { + nset := cset.ToArray().Intersect(n.set) + if nset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", cn.String()) + } + n.set = nset + } + } + n.children = append(n.children, cn) + switch t := p.next(); t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokArrayEnd: + break Loop + case tokComma: + continue + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } + return n, nil +} + +type variantNode struct { + n varNode +} + +var variantSet = sigSet{ + Signature{"v"}: true, +} + +func (variantNode) Infer() (Signature, error) { + return Signature{"v"}, nil +} + +func (n variantNode) String() string { + return "<" + n.n.String() + ">" +} + +func (variantNode) Sigs() sigSet { + return variantSet +} + +func (n variantNode) Value(sig Signature) (interface{}, error) { + if sig.str != "v" { + return nil, varTypeError{n.String(), sig} + } + sig, err := varInfer(n.n) + if err != nil { + return nil, err + } + v, err := n.n.Value(sig) + if err != nil { + return nil, err + } + return MakeVariant(v), nil +} + +func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) { + n, err := varMakeNode(p) + if err != nil { + return nil, err + } + if t := p.next(); t.typ != tokVariantEnd { + return nil, fmt.Errorf("unexpected %q", t.val) + } + vn := variantNode{n} + if sig.str != "" && sig.str != "v" { + return nil, varTypeError{vn.String(), sig} + } + return variantNode{n}, nil +} + +type dictEntry struct { + key, val varNode +} + +type dictNode struct { + kset, vset sigSet + children []dictEntry + val interface{} +} + +func (n dictNode) Infer() (Signature, error) { + for _, v := range n.children { + ksig, err := varInfer(v.key) + if err != nil { + continue + } + vsig, err := varInfer(v.val) + if err != nil { + continue + } + return Signature{"a{" + ksig.str + vsig.str + "}"}, nil + } + return Signature{}, fmt.Errorf("can't infer type for %q", n.String()) +} + +func (n dictNode) String() string { + s := "{" + for i, v := range n.children { + s += v.key.String() + ": " + v.val.String() + if i != len(n.children)-1 { + s += ", " + } + } + return s + "}" +} + +func (n dictNode) Sigs() sigSet { + r := sigSet{} + for k := range n.kset { + for v := range n.vset { + sig := "a{" + k.str + v.str + "}" + r[Signature{sig}] = true + } + } + return r +} + +func (n dictNode) Value(sig Signature) (interface{}, error) { + set := n.Sigs() + if set.Empty() { + // no type information -> empty dict + return reflect.MakeMap(typeFor(sig.str)).Interface(), nil + } + if !set[sig] { + return nil, varTypeError{n.String(), sig} + } + m := reflect.MakeMap(typeFor(sig.str)) + ksig := Signature{sig.str[2:3]} + vsig := Signature{sig.str[3 : len(sig.str)-1]} + for _, v := range n.children { + kv, err := v.key.Value(ksig) + if err != nil { + return nil, err + } + vv, err := v.val.Value(vsig) + if err != nil { + return nil, err + } + m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv)) + } + return m.Interface(), nil +} + +func varMakeDictNode(p *varParser, sig Signature) (varNode, error) { + var n dictNode + + if sig.str != "" { + if len(sig.str) < 5 { + return nil, fmt.Errorf("invalid signature %q for dict type", sig) + } + ksig := Signature{string(sig.str[2])} + vsig := Signature{sig.str[3 : len(sig.str)-1]} + n.kset = sigSet{ksig: true} + n.vset = sigSet{vsig: true} + } + if t := p.next(); t.typ == tokDictEnd { + return n, nil + } else { + p.backup() + } +Loop: + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + kn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if kset := kn.Sigs(); !kset.Empty() { + if n.kset.Empty() { + n.kset = kset + } else { + n.kset = kset.Intersect(n.kset) + if n.kset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", kn.String()) + } + } + } + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokColon: + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + vn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if vset := vn.Sigs(); !vset.Empty() { + if n.vset.Empty() { + n.vset = vset + } else { + n.vset = n.vset.Intersect(vset) + if n.vset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", vn.String()) + } + } + } + n.children = append(n.children, dictEntry{kn, vn}) + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokDictEnd: + break Loop + case tokComma: + continue + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } + return n, nil +} + +type byteStringNode []byte + +var byteStringSet = sigSet{ + Signature{"ay"}: true, +} + +func (byteStringNode) Infer() (Signature, error) { + return Signature{"ay"}, nil +} + +func (b byteStringNode) String() string { + return string(b) +} + +func (b byteStringNode) Sigs() sigSet { + return byteStringSet +} + +func (b byteStringNode) Value(sig Signature) (interface{}, error) { + if sig.str != "ay" { + return nil, varTypeError{b.String(), sig} + } + return []byte(b), nil +} + +func varParseByteString(s string) ([]byte, error) { + // quotes and b at start are guaranteed to be there + b := make([]byte, 0, 1) + s = s[2 : len(s)-1] + for len(s) != 0 { + c := s[0] + s = s[1:] + if c != '\\' { + b = append(b, c) + continue + } + c = s[0] + s = s[1:] + switch c { + case 'a': + b = append(b, 0x7) + case 'b': + b = append(b, 0x8) + case 'f': + b = append(b, 0xc) + case 'n': + b = append(b, '\n') + case 'r': + b = append(b, '\r') + case 't': + b = append(b, '\t') + case 'x': + if len(s) < 2 { + return nil, errors.New("short escape") + } + n, err := strconv.ParseUint(s[:2], 16, 8) + if err != nil { + return nil, err + } + b = append(b, byte(n)) + s = s[2:] + case '0': + if len(s) < 3 { + return nil, errors.New("short escape") + } + n, err := strconv.ParseUint(s[:3], 8, 8) + if err != nil { + return nil, err + } + b = append(b, byte(n)) + s = s[3:] + default: + b = append(b, c) + } + } + return append(b, 0), nil +} + +func varInfer(n varNode) (Signature, error) { + if sig, ok := n.Sigs().Single(); ok { + return sig, nil + } + return n.Infer() +} diff --git a/vendor/src/github.com/godbus/dbus/variant_test.go b/vendor/src/github.com/godbus/dbus/variant_test.go new file mode 100644 index 0000000000..da917c8e29 --- /dev/null +++ b/vendor/src/github.com/godbus/dbus/variant_test.go @@ -0,0 +1,78 @@ +package dbus + +import "reflect" +import "testing" + +var variantFormatTests = []struct { + v interface{} + s string +}{ + {int32(1), `1`}, + {"foo", `"foo"`}, + {ObjectPath("/org/foo"), `@o "/org/foo"`}, + {Signature{"i"}, `@g "i"`}, + {[]byte{}, `@ay []`}, + {[]int32{1, 2}, `[1, 2]`}, + {[]int64{1, 2}, `@ax [1, 2]`}, + {[][]int32{{3, 4}, {5, 6}}, `[[3, 4], [5, 6]]`}, + {[]Variant{MakeVariant(int32(1)), MakeVariant(1.0)}, `[<1>, <@d 1>]`}, + {map[string]int32{"one": 1, "two": 2}, `{"one": 1, "two": 2}`}, + {map[int32]ObjectPath{1: "/org/foo"}, `@a{io} {1: "/org/foo"}`}, + {map[string]Variant{}, `@a{sv} {}`}, +} + +func TestFormatVariant(t *testing.T) { + for i, v := range variantFormatTests { + if s := MakeVariant(v.v).String(); s != v.s { + t.Errorf("test %d: got %q, wanted %q", i+1, s, v.s) + } + } +} + +var variantParseTests = []struct { + s string + v interface{} +}{ + {"1", int32(1)}, + {"true", true}, + {"false", false}, + {"1.0", float64(1.0)}, + {"0x10", int32(16)}, + {"1e1", float64(10)}, + {`"foo"`, "foo"}, + {`"\a\b\f\n\r\t"`, "\x07\x08\x0c\n\r\t"}, + {`"\u00e4\U0001f603"`, "\u00e4\U0001f603"}, + {"[1]", []int32{1}}, + {"[1, 2, 3]", []int32{1, 2, 3}}, + {"@ai []", []int32{}}, + {"[1, 5.0]", []float64{1, 5.0}}, + {"[[1, 2], [3, 4.0]]", [][]float64{{1, 2}, {3, 4}}}, + {`[@o "/org/foo", "/org/bar"]`, []ObjectPath{"/org/foo", "/org/bar"}}, + {"<1>", MakeVariant(int32(1))}, + {"[<1>, <2.0>]", []Variant{MakeVariant(int32(1)), MakeVariant(2.0)}}, + {`[[], [""]]`, [][]string{{}, {""}}}, + {`@a{ss} {}`, map[string]string{}}, + {`{"foo": 1}`, map[string]int32{"foo": 1}}, + {`[{}, {"foo": "bar"}]`, []map[string]string{{}, {"foo": "bar"}}}, + {`{"a": <1>, "b": <"foo">}`, + map[string]Variant{"a": MakeVariant(int32(1)), "b": MakeVariant("foo")}}, + {`b''`, []byte{0}}, + {`b"abc"`, []byte{'a', 'b', 'c', 0}}, + {`b"\x01\0002\a\b\f\n\r\t"`, []byte{1, 2, 0x7, 0x8, 0xc, '\n', '\r', '\t', 0}}, + {`[[0], b""]`, [][]byte{{0}, {0}}}, + {"int16 0", int16(0)}, + {"byte 0", byte(0)}, +} + +func TestParseVariant(t *testing.T) { + for i, v := range variantParseTests { + nv, err := ParseVariant(v.s, Signature{}) + if err != nil { + t.Errorf("test %d: parsing failed: %s", i+1, err) + continue + } + if !reflect.DeepEqual(nv.value, v.v) { + t.Errorf("test %d: got %q, wanted %q", i+1, nv, v.v) + } + } +} -- cgit v1.2.1 From cb43fd007133fc05b6bb2b0d3d58fef8b1e60537 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 18 Mar 2014 18:40:00 +0100 Subject: pkg/systemd: Drop our copy-pasted version of go-systemd/activation Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- pkg/systemd/activation/files.go | 55 ------------------------------------- pkg/systemd/activation/listeners.go | 37 ------------------------- pkg/systemd/listendfd.go | 2 +- 3 files changed, 1 insertion(+), 93 deletions(-) delete mode 100644 pkg/systemd/activation/files.go delete mode 100644 pkg/systemd/activation/listeners.go diff --git a/pkg/systemd/activation/files.go b/pkg/systemd/activation/files.go deleted file mode 100644 index 0281146310..0000000000 --- a/pkg/systemd/activation/files.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2013 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -// Package activation implements primitives for systemd socket activation. -package activation - -import ( - "os" - "strconv" - "syscall" -) - -// based on: https://gist.github.com/alberts/4640792 -const ( - listenFdsStart = 3 -) - -func Files(unsetEnv bool) []*os.File { - if unsetEnv { - // there is no way to unset env in golang os package for now - // https://code.google.com/p/go/issues/detail?id=6423 - defer os.Setenv("LISTEN_PID", "") - defer os.Setenv("LISTEN_FDS", "") - } - - pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) - if err != nil || pid != os.Getpid() { - return nil - } - - nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) - if err != nil || nfds == 0 { - return nil - } - - var files []*os.File - for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { - syscall.CloseOnExec(fd) - files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd))) - } - - return files -} diff --git a/pkg/systemd/activation/listeners.go b/pkg/systemd/activation/listeners.go deleted file mode 100644 index 3296a08361..0000000000 --- a/pkg/systemd/activation/listeners.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2014 CoreOS Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package activation - -import ( - "fmt" - "net" -) - -// Listeners returns net.Listeners for all socket activated fds passed to this process. -func Listeners(unsetEnv bool) ([]net.Listener, error) { - files := Files(unsetEnv) - listeners := make([]net.Listener, len(files)) - - for i, f := range files { - var err error - listeners[i], err = net.FileListener(f) - if err != nil { - return nil, fmt.Errorf("Error setting up FileListener for fd %d: %s", f.Fd(), err.Error()) - } - } - - return listeners, nil -} diff --git a/pkg/systemd/listendfd.go b/pkg/systemd/listendfd.go index f6044328c2..0fbc0a6ab6 100644 --- a/pkg/systemd/listendfd.go +++ b/pkg/systemd/listendfd.go @@ -5,7 +5,7 @@ import ( "net" "strconv" - "github.com/dotcloud/docker/pkg/systemd/activation" + "github.com/coreos/go-systemd/activation" ) // ListenFD returns the specified socket activated files as a slice of -- cgit v1.2.1 From 1296d5ce9ad43d8d833f6e5661da45aef6d4c26b Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Fri, 21 Feb 2014 10:48:02 +0100 Subject: Add systemd.SdBooted() This is a conversion of sd_booted() from libsystemd to go and checks if the system was booted with systemd. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- pkg/systemd/booted.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 pkg/systemd/booted.go diff --git a/pkg/systemd/booted.go b/pkg/systemd/booted.go new file mode 100644 index 0000000000..2aae931ec1 --- /dev/null +++ b/pkg/systemd/booted.go @@ -0,0 +1,15 @@ +package systemd + +import ( + "os" +) + +// Conversion to Go of systemd's sd_booted() +func SdBooted() bool { + s, err := os.Stat("/run/systemd/system") + if err != nil { + return false + } + + return s.IsDir() +} -- cgit v1.2.1 From 6c7835050e53b733181ddfca6152c358fd625400 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Fri, 21 Feb 2014 14:35:43 +0100 Subject: cgroups: Add systemd implementation of cgroups This implements cgroup.Apply() using the systemd apis. We create a transient unit called "docker-$id.scope" that contains the container processes. We also have a way to set unit specific properties, currently only defining the Slice to put the scope in. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- pkg/cgroups/apply_nosystemd.go | 15 +++ pkg/cgroups/apply_systemd.go | 158 ++++++++++++++++++++++++++ pkg/cgroups/cgroups.go | 13 ++- runtime/execdriver/native/default_template.go | 14 +++ 4 files changed, 199 insertions(+), 1 deletion(-) create mode 100644 pkg/cgroups/apply_nosystemd.go create mode 100644 pkg/cgroups/apply_systemd.go diff --git a/pkg/cgroups/apply_nosystemd.go b/pkg/cgroups/apply_nosystemd.go new file mode 100644 index 0000000000..f94d475907 --- /dev/null +++ b/pkg/cgroups/apply_nosystemd.go @@ -0,0 +1,15 @@ +// +build !linux + +package cgroups + +import ( + "fmt" +) + +func useSystemd() bool { + return false +} + +func systemdApply(c *Cgroup, pid int) (ActiveCgroup, error) { + return nil, fmt.Errorf("Systemd not supported") +} diff --git a/pkg/cgroups/apply_systemd.go b/pkg/cgroups/apply_systemd.go new file mode 100644 index 0000000000..c689d5753e --- /dev/null +++ b/pkg/cgroups/apply_systemd.go @@ -0,0 +1,158 @@ +// +build linux + +package cgroups + +import ( + "fmt" + systemd1 "github.com/coreos/go-systemd/dbus" + "github.com/dotcloud/docker/pkg/systemd" + "github.com/godbus/dbus" + "path/filepath" + "strings" + "sync" +) + +type systemdCgroup struct { +} + +var ( + connLock sync.Mutex + theConn *systemd1.Conn + hasStartTransientUnit bool +) + +func useSystemd() bool { + if !systemd.SdBooted() { + return false + } + + connLock.Lock() + defer connLock.Unlock() + + if theConn == nil { + var err error + theConn, err = systemd1.New() + if err != nil { + return false + } + + // Assume we have StartTransientUnit + hasStartTransientUnit = true + + // But if we get UnknownMethod error we don't + if _, err := theConn.StartTransientUnit("test.scope", "invalid"); err != nil { + if dbusError, ok := err.(dbus.Error); ok { + if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" { + hasStartTransientUnit = false + } + } + } + } + + return hasStartTransientUnit +} + +type DeviceAllow struct { + Node string + Permissions string +} + +func getIfaceForUnit(unitName string) string { + if strings.HasSuffix(unitName, ".scope") { + return "Scope" + } + if strings.HasSuffix(unitName, ".service") { + return "Service" + } + return "Unit" +} + +func systemdApply(c *Cgroup, pid int) (ActiveCgroup, error) { + unitName := c.Parent + "-" + c.Name + ".scope" + slice := "system.slice" + + var properties []systemd1.Property + + for _, v := range c.UnitProperties { + switch v[0] { + case "Slice": + slice = v[1] + default: + return nil, fmt.Errorf("Unknown unit propery %s", v[0]) + } + } + + properties = append(properties, + systemd1.Property{"Slice", dbus.MakeVariant(slice)}, + systemd1.Property{"Description", dbus.MakeVariant("docker container " + c.Name)}, + systemd1.Property{"PIDs", dbus.MakeVariant([]uint32{uint32(pid)})}) + + if !c.DeviceAccess { + properties = append(properties, + systemd1.Property{"DevicePolicy", dbus.MakeVariant("strict")}, + systemd1.Property{"DeviceAllow", dbus.MakeVariant([]DeviceAllow{ + {"/dev/null", "rwm"}, + {"/dev/zero", "rwm"}, + {"/dev/full", "rwm"}, + {"/dev/random", "rwm"}, + {"/dev/urandom", "rwm"}, + {"/dev/tty", "rwm"}, + {"/dev/console", "rwm"}, + {"/dev/tty0", "rwm"}, + {"/dev/tty1", "rwm"}, + {"/dev/pts/ptmx", "rwm"}, + // There is no way to add /dev/pts/* here atm, so we hack this manually below + // /dev/pts/* (how to add this?) + // Same with tuntap, which doesn't exist as a node most of the time + })}) + } + + if c.Memory != 0 { + properties = append(properties, + systemd1.Property{"MemoryLimit", dbus.MakeVariant(uint64(c.Memory))}) + } + // TODO: MemorySwap not available in systemd + + if c.CpuShares != 0 { + properties = append(properties, + systemd1.Property{"CPUShares", dbus.MakeVariant(uint64(c.CpuShares))}) + } + + if _, err := theConn.StartTransientUnit(unitName, "replace", properties...); err != nil { + return nil, err + } + + // To work around the lack of /dev/pts/* support above we need to manually add these + // so, ask systemd for the cgroup used + props, err := theConn.GetUnitTypeProperties(unitName, getIfaceForUnit(unitName)) + if err != nil { + return nil, err + } + + cgroup := props["ControlGroup"].(string) + + if !c.DeviceAccess { + mountpoint, err := FindCgroupMountpoint("devices") + if err != nil { + return nil, err + } + + path := filepath.Join(mountpoint, cgroup) + + // /dev/pts/* + if err := writeFile(path, "devices.allow", "c 136:* rwm"); err != nil { + return nil, err + } + // tuntap + if err := writeFile(path, "devices.allow", "c 10:200 rwm"); err != nil { + return nil, err + } + } + + return &systemdCgroup{}, nil +} + +func (c *systemdCgroup) Cleanup() error { + // systemd cleans up, we don't need to do anything + return nil +} diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go index f35556f712..8554ba9376 100644 --- a/pkg/cgroups/cgroups.go +++ b/pkg/cgroups/cgroups.go @@ -19,6 +19,8 @@ type Cgroup struct { Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes) MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers) + + UnitProperties [][2]string `json:"unit_properties,omitempty"` // systemd unit properties } type ActiveCgroup interface { @@ -87,5 +89,14 @@ func writeFile(dir, file, data string) error { } func (c *Cgroup) Apply(pid int) (ActiveCgroup, error) { - return rawApply(c, pid) + // We have two implementation of cgroups support, one is based on + // systemd and the dbus api, and one is based on raw cgroup fs operations + // following the pre-single-writer model docs at: + // http://www.freedesktop.org/wiki/Software/systemd/PaxControlGroups/ + + if useSystemd() { + return systemdApply(c, pid) + } else { + return rawApply(c, pid) + } } diff --git a/runtime/execdriver/native/default_template.go b/runtime/execdriver/native/default_template.go index e11f2de1cf..32886f2396 100644 --- a/runtime/execdriver/native/default_template.go +++ b/runtime/execdriver/native/default_template.go @@ -5,6 +5,7 @@ import ( "github.com/dotcloud/docker/pkg/cgroups" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/runtime/execdriver" + "github.com/dotcloud/docker/utils" "os" ) @@ -59,6 +60,19 @@ func createContainer(c *execdriver.Command) *libcontainer.Container { container.Cgroups.MemorySwap = c.Resources.MemorySwap } + if opts, ok := c.Config["unit"]; ok { + props := [][2]string{} + for _, opt := range opts { + key, value, err := utils.ParseKeyValueOpt(opt) + if err == nil { + props = append(props, [2]string{key, value}) + } else { + props = append(props, [2]string{opt, ""}) + } + } + container.Cgroups.UnitProperties = props + } + // check to see if we are running in ramdisk to disable pivot root container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != "" -- cgit v1.2.1 From 64dd77fa0eb73ec4f395848982ccb60bb3bf8fc5 Mon Sep 17 00:00:00 2001 From: James Harrison Fisher Date: Thu, 27 Mar 2014 23:02:44 +0000 Subject: Add missing port NAT configuration Missing port translation causes last line to fail Docker-DCO-1.1-Signed-off-by: James Fisher (github: jameshfisher) --- docs/sources/examples/mongodb.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/examples/mongodb.rst b/docs/sources/examples/mongodb.rst index 930ab2ea9d..913dc2699a 100644 --- a/docs/sources/examples/mongodb.rst +++ b/docs/sources/examples/mongodb.rst @@ -86,10 +86,10 @@ the local port! .. code-block:: bash # Regular style - MONGO_ID=$(sudo docker run -d /mongodb) + MONGO_ID=$(sudo docker run -P -d /mongodb) # Lean and mean - MONGO_ID=$(sudo docker run -d /mongodb --noprealloc --smallfiles) + MONGO_ID=$(sudo docker run -P -d /mongodb --noprealloc --smallfiles) # Check the logs out sudo docker logs $MONGO_ID -- cgit v1.2.1 From 792bb41e524615486ef8266b7bf4804b4fe178f1 Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Thu, 27 Mar 2014 16:38:27 -0400 Subject: Remount /var/lib/docker as --private to fix scaling issue If an admin mounts all file systems as -rshared (Default on RHEL and Fedora) we see a scaling problem as the number of container increase. Basically every new container needs to have it new mounts in /var/lib/docker shared to all other containers, this ends up with us only able to scale to around 100 containers, before the system slows down. By simply bind mounting /var/lib/docker on its and then setting it private, the scaling issue goes away. Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: rhatdan) --- runtime/runtime.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/runtime/runtime.go b/runtime/runtime.go index 35bcad9781..b035f5df9f 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -10,6 +10,7 @@ import ( "github.com/dotcloud/docker/graph" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/pkg/graphdb" + "github.com/dotcloud/docker/pkg/mount" "github.com/dotcloud/docker/pkg/sysinfo" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/runtime/execdriver" @@ -59,6 +60,22 @@ type Runtime struct { execDriver execdriver.Driver } +// Mountpoints should be private to the container +func remountPrivate(mountPoint string) error { + + mounted, err := mount.Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := mount.Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + return mount.ForceMount("", mountPoint, "none", "private") +} + // List returns an array of all containers registered in the runtime. func (runtime *Runtime) List() []*Container { containers := new(History) @@ -654,6 +671,10 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (* } utils.Debugf("Using graph driver %s", driver) + if err := remountPrivate(config.Root); err != nil { + return nil, err + } + runtimeRepo := path.Join(config.Root, "containers") if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) { -- cgit v1.2.1 From 66c5e19f9bd057644fab475499ea45bb428ba2b2 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Fri, 28 Mar 2014 15:58:14 +0100 Subject: devmapper: Ensure we shut down thin pool cleanly. The change in commit a9fa1a13c3b0a654a96be01ff7ec19e8009b2094 made us only deactivate devices that were mounted. Unfortunately this made us not deactivate the base device. Which caused us to not be able to deactivate the pool. This fixes that by always just deactivating the base device. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- runtime/graphdriver/devmapper/deviceset.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/runtime/graphdriver/devmapper/deviceset.go b/runtime/graphdriver/devmapper/deviceset.go index 762e982208..731e9dab8b 100644 --- a/runtime/graphdriver/devmapper/deviceset.go +++ b/runtime/graphdriver/devmapper/deviceset.go @@ -821,6 +821,10 @@ func (devices *DeviceSet) Shutdown() error { info.lock.Unlock() } + if err := devices.deactivateDevice(""); err != nil { + utils.Debugf("Shutdown deactivate base , error: %s\n", err) + } + if err := devices.deactivatePool(); err != nil { utils.Debugf("Shutdown deactivate pool , error: %s\n", err) } -- cgit v1.2.1 From 7d750180e49159ead712843a9cc1c0c58fd5c53c Mon Sep 17 00:00:00 2001 From: Justin Simonelis Date: Fri, 28 Mar 2014 15:15:54 -0500 Subject: Update AUTHORS --- AUTHORS | 1 - 1 file changed, 1 deletion(-) diff --git a/AUTHORS b/AUTHORS index 1c58d953f6..df091d5950 100644 --- a/AUTHORS +++ b/AUTHORS @@ -170,7 +170,6 @@ Julien Barbier Julien Dubois Justin Force Justin Plock -Justin Simonelis Karan Lyons Karl Grzeszczak Kawsar Saiyeed -- cgit v1.2.1 From 36dfa0c4ec90404f76a6ec73c89d199e279ee96c Mon Sep 17 00:00:00 2001 From: Jason Plum Date: Fri, 28 Mar 2014 18:02:17 -0400 Subject: Fix daemon's documentation for -bip flag --- docs/sources/reference/commandline/cli.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index b39cbc2e9f..3d2aac5233 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -74,7 +74,7 @@ Commands -G, --group="docker": Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group --api-enable-cors=false: Enable CORS headers in the remote API -b, --bridge="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking - --bip="": Use this CIDR notation address for the network bridge's IP, not compatible with -b + -bip="": Use this CIDR notation address for the network bridge's IP, not compatible with -b -d, --daemon=false: Enable daemon mode --dns=[]: Force docker to use specific DNS servers --dns-search=[]: Force Docker to use specific DNS search domains -- cgit v1.2.1 From 04f5c75239cba156db70523bcd90657e5c7b5ddb Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Sat, 29 Mar 2014 00:48:47 +0000 Subject: Steve Wozniak is not boring. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- pkg/namesgenerator/names-generator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go index dfece5d611..3776e59225 100644 --- a/pkg/namesgenerator/names-generator.go +++ b/pkg/namesgenerator/names-generator.go @@ -56,7 +56,7 @@ func GenerateRandomName(checker NameChecker) (string, error) { retry := 5 rand.Seed(time.Now().UnixNano()) name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) - for checker != nil && checker.Exists(name) && retry > 0 { + for checker != nil && checker.Exists(name) && retry > 0 || name == "boring_wozniak" /* Steve Wozniak is not boring */ { name = fmt.Sprintf("%s%d", name, rand.Intn(10)) retry = retry - 1 } -- cgit v1.2.1 From 6db32fdefdae49843ed9535b3af1099e6bd2755d Mon Sep 17 00:00:00 2001 From: unclejack Date: Tue, 25 Feb 2014 18:17:48 +0200 Subject: initial version of cli integration tests Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- Makefile | 7 +- hack/make.sh | 1 + hack/make/test-integration-cli | 37 +++ .../build_tests/TestBuildSixtySteps/Dockerfile | 60 +++++ integration-cli/docker_cli_build_test.go | 28 +++ integration-cli/docker_cli_commit_test.go | 34 +++ integration-cli/docker_cli_diff_test.go | 66 ++++++ integration-cli/docker_cli_export_import_test.go | 50 ++++ integration-cli/docker_cli_images_test.go | 20 ++ integration-cli/docker_cli_info_test.go | 29 +++ integration-cli/docker_cli_kill_test.go | 36 +++ integration-cli/docker_cli_pull_test.go | 30 +++ integration-cli/docker_cli_push_test.go | 48 ++++ integration-cli/docker_cli_run_test.go | 255 +++++++++++++++++++++ integration-cli/docker_cli_save_load_test.go | 52 +++++ integration-cli/docker_cli_search_test.go | 25 ++ integration-cli/docker_cli_tag_test.go | 86 +++++++ integration-cli/docker_cli_top_test.go | 32 +++ integration-cli/docker_cli_version_test.go | 29 +++ integration-cli/docker_test_vars.go | 29 +++ integration-cli/docker_utils.go | 56 +++++ integration-cli/utils.go | 109 +++++++++ 22 files changed, 1117 insertions(+), 2 deletions(-) create mode 100644 hack/make/test-integration-cli create mode 100644 integration-cli/build_tests/TestBuildSixtySteps/Dockerfile create mode 100644 integration-cli/docker_cli_build_test.go create mode 100644 integration-cli/docker_cli_commit_test.go create mode 100644 integration-cli/docker_cli_diff_test.go create mode 100644 integration-cli/docker_cli_export_import_test.go create mode 100644 integration-cli/docker_cli_images_test.go create mode 100644 integration-cli/docker_cli_info_test.go create mode 100644 integration-cli/docker_cli_kill_test.go create mode 100644 integration-cli/docker_cli_pull_test.go create mode 100644 integration-cli/docker_cli_push_test.go create mode 100644 integration-cli/docker_cli_run_test.go create mode 100644 integration-cli/docker_cli_save_load_test.go create mode 100644 integration-cli/docker_cli_search_test.go create mode 100644 integration-cli/docker_cli_tag_test.go create mode 100644 integration-cli/docker_cli_top_test.go create mode 100644 integration-cli/docker_cli_version_test.go create mode 100644 integration-cli/docker_test_vars.go create mode 100644 integration-cli/docker_utils.go create mode 100644 integration-cli/utils.go diff --git a/Makefile b/Makefile index b3bea8a31f..5656472213 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration +.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration test-integration-cli GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD) DOCKER_IMAGE := docker:$(GIT_BRANCH) @@ -23,11 +23,14 @@ docs-shell: docs-build docker run --rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" bash test: build - $(DOCKER_RUN_DOCKER) hack/make.sh test test-integration + $(DOCKER_RUN_DOCKER) hack/make.sh binary test test-integration test-integration-cli test-integration: build $(DOCKER_RUN_DOCKER) hack/make.sh test-integration +test-integration-cli: build + $(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli + shell: build $(DOCKER_RUN_DOCKER) bash diff --git a/hack/make.sh b/hack/make.sh index dbb9dbfdfd..447a00f039 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -139,6 +139,7 @@ find_dirs() { \( \ -wholename './vendor' \ -o -wholename './integration' \ + -o -wholename './integration-cli' \ -o -wholename './contrib' \ -o -wholename './pkg/mflag/example' \ -o -wholename './.git' \ diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli new file mode 100644 index 0000000000..5ab37a4021 --- /dev/null +++ b/hack/make/test-integration-cli @@ -0,0 +1,37 @@ +#!/bin/bash + +DEST=$1 +DOCKERBIN=$DEST/../binary/docker-$VERSION +DYNDOCKERBIN=$DEST/../dynbinary/docker-$VERSION +DOCKERINITBIN=$DEST/../dynbinary/dockerinit-$VERSION + +set -e + +bundle_test_integration_cli() { + go_test_dir ./integration-cli +} + +if [ -x "/usr/bin/docker" ]; then + echo "docker found at /usr/bin/docker" +elif [ -x "$DOCKERBIN" ]; then + ln -s $DOCKERBIN /usr/bin/docker +elif [ -x "$DYNDOCKERBIN" ]; then + ln -s $DYNDOCKERBIN /usr/bin/docker + ln -s $DOCKERINITBIN /usr/bin/dockerinit +else + echo >&2 'error: binary or dynbinary must be run before test-integration-cli' + false +fi + + +docker -d -D -p $DEST/docker.pid &> $DEST/docker.log & +sleep 2 +docker info +DOCKERD_PID=`cat $DEST/docker.pid` + +bundle_test_integration_cli 2>&1 \ + | tee $DEST/test.log + +kill $DOCKERD_PID +wait $DOCKERD_PID + diff --git a/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile b/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile new file mode 100644 index 0000000000..89b66f4f1d --- /dev/null +++ b/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile @@ -0,0 +1,60 @@ +FROM busybox +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" +RUN echo "foo" diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go new file mode 100644 index 0000000000..e6f3096892 --- /dev/null +++ b/integration-cli/docker_cli_build_test.go @@ -0,0 +1,28 @@ +package main + +import ( + "fmt" + "os/exec" + "path/filepath" + "testing" +) + +func TestBuildSixtySteps(t *testing.T) { + buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildSixtySteps") + buildCmd := exec.Command(dockerBinary, "build", "-t", "foobuildsixtysteps", ".") + buildCmd.Dir = buildDirectory + out, exitCode, err := runCommandWithOutput(buildCmd) + errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to build the image") + } + + go deleteImages("foobuildsixtysteps") + + logDone("build - build an image with sixty build steps") +} + +// TODO: TestCaching + +// TODO: TestADDCacheInvalidation diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go new file mode 100644 index 0000000000..5ed55ef62a --- /dev/null +++ b/integration-cli/docker_cli_commit_test.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + "os/exec" + "testing" +) + +func TestCommitAfterContainerIsDone(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("failed to run container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) + _, _, err = runCommandWithOutput(waitCmd) + errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID) + out, _, err = runCommandWithOutput(commitCmd) + errorOut(err, t, fmt.Sprintf("failed to commit container to image: %v %v", out, err)) + + cleanedImageID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("failed to inspect image: %v %v", out, err)) + + go deleteContainer(cleanedContainerID) + go deleteImages(cleanedImageID) + + logDone("commit - echo foo and commit the image") +} diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go new file mode 100644 index 0000000000..5f8ba74161 --- /dev/null +++ b/integration-cli/docker_cli_diff_test.go @@ -0,0 +1,66 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// ensure that an added file shows up in docker diff +func TestDiffFilenameShownInOutput(t *testing.T) { + containerCmd := `echo foo > /root/bar` + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) + cid, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + + cleanCID := stripTrailingCharacters(cid) + + diffCmd := exec.Command(dockerBinary, "diff", cleanCID) + out, _, err := runCommandWithOutput(diffCmd) + errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + + found := false + for _, line := range strings.Split(out, "\n") { + if strings.Contains("A /root/bar", line) { + found = true + break + } + } + if !found { + t.Errorf("couldn't find the new file in docker diff's output: %v", out) + } + go deleteContainer(cleanCID) + + logDone("diff - check if created file shows up") +} + +// test to ensure GH #3840 doesn't occur any more +func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) { + // this is a list of files which shouldn't show up in `docker diff` + dockerinitFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerinit", "/.dockerenv"} + + // we might not run into this problem from the first run, so start a few containers + for i := 0; i < 20; i++ { + containerCmd := `echo foo > /root/bar` + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) + cid, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("%s", err)) + + cleanCID := stripTrailingCharacters(cid) + + diffCmd := exec.Command(dockerBinary, "diff", cleanCID) + out, _, err := runCommandWithOutput(diffCmd) + errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + + go deleteContainer(cleanCID) + + for _, filename := range dockerinitFiles { + if strings.Contains(out, filename) { + t.Errorf("found file which should've been ignored %v in diff output", filename) + } + } + } + + logDone("diff - check if ignored files show up in diff") +} diff --git a/integration-cli/docker_cli_export_import_test.go b/integration-cli/docker_cli_export_import_test.go new file mode 100644 index 0000000000..66ff1055ba --- /dev/null +++ b/integration-cli/docker_cli_export_import_test.go @@ -0,0 +1,50 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "testing" +) + +// export an image and try to import it into a new one +func TestExportContainerAndImportImage(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + t.Fatal("failed to create a container", out, err) + } + + cleanedContainerID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + out, _, err = runCommandWithOutput(inspectCmd) + if err != nil { + t.Fatalf("output should've been a container id: %s %s ", cleanedContainerID, err) + } + + exportCmdTemplate := `%v export %v > /tmp/testexp.tar` + exportCmdFinal := fmt.Sprintf(exportCmdTemplate, dockerBinary, cleanedContainerID) + exportCmd := exec.Command("bash", "-c", exportCmdFinal) + out, _, err = runCommandWithOutput(exportCmd) + errorOut(err, t, fmt.Sprintf("failed to export container: %v %v", out, err)) + + importCmdFinal := `cat /tmp/testexp.tar | docker import - testexp` + importCmd := exec.Command("bash", "-c", importCmdFinal) + out, _, err = runCommandWithOutput(importCmd) + errorOut(err, t, fmt.Sprintf("failed to import image: %v %v", out, err)) + + cleanedImageID := stripTrailingCharacters(out) + + inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("output should've been an image id: %v %v", out, err)) + + go deleteImages("testexp") + go deleteContainer(cleanedContainerID) + + os.Remove("/tmp/testexp.tar") + + logDone("export - export a container") + logDone("import - import an image") +} diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go new file mode 100644 index 0000000000..17efc6f5c4 --- /dev/null +++ b/integration-cli/docker_cli_images_test.go @@ -0,0 +1,20 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestImagesEnsureImageIsListed(t *testing.T) { + imagesCmd := exec.Command(dockerBinary, "images") + out, _, err := runCommandWithOutput(imagesCmd) + errorOut(err, t, fmt.Sprintf("listing images failed with errors: %v", err)) + + if !strings.Contains(out, "busybox") { + t.Fatal("images should've listed busybox") + } + + logDone("images - busybox should be listed") +} diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go new file mode 100644 index 0000000000..32aa3a2125 --- /dev/null +++ b/integration-cli/docker_cli_info_test.go @@ -0,0 +1,29 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// ensure docker info succeeds +func TestInfoEnsureSucceeds(t *testing.T) { + versionCmd := exec.Command(dockerBinary, "info") + out, exitCode, err := runCommandWithOutput(versionCmd) + errorOut(err, t, fmt.Sprintf("encountered error while running docker info: %v", err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to execute docker info") + } + + stringsToCheck := []string{"Containers:", "Execution Driver:", "Kernel Version:"} + + for _, linePrefix := range stringsToCheck { + if !strings.Contains(out, linePrefix) { + t.Errorf("couldn't find string %v in output", linePrefix) + } + } + + logDone("info - verify that it works") +} diff --git a/integration-cli/docker_cli_kill_test.go b/integration-cli/docker_cli_kill_test.go new file mode 100644 index 0000000000..676ccd0ca0 --- /dev/null +++ b/integration-cli/docker_cli_kill_test.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestKillContainer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 10") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err)) + + killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) + out, _, err = runCommandWithOutput(killCmd) + errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err)) + + listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q") + out, _, err = runCommandWithOutput(listRunningContainersCmd) + errorOut(err, t, fmt.Sprintf("failed to list running containers: %v", err)) + + if strings.Contains(out, cleanedContainerID) { + t.Fatal("killed container is still running") + } + + go deleteContainer(cleanedContainerID) + + logDone("kill - kill container running sleep 10") +} diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go new file mode 100644 index 0000000000..13b443f3d6 --- /dev/null +++ b/integration-cli/docker_cli_pull_test.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "os/exec" + "testing" +) + +// pulling an image from the central registry should work +func TestPullImageFromCentralRegistry(t *testing.T) { + pullCmd := exec.Command(dockerBinary, "pull", "busybox") + out, exitCode, err := runCommandWithOutput(pullCmd) + errorOut(err, t, fmt.Sprintf("%s %s", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("pulling the busybox image from the registry has failed") + } + logDone("pull - pull busybox") +} + +// pulling a non-existing image from the central registry should return a non-zero exit code +func TestPullNonExistingImage(t *testing.T) { + pullCmd := exec.Command(dockerBinary, "pull", "fooblahblah1234") + _, exitCode, err := runCommandWithOutput(pullCmd) + + if err == nil || exitCode == 0 { + t.Fatal("expected non-zero exit status when pulling non-existing image") + } + logDone("pull - pull fooblahblah1234 (non-existing image)") +} diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go new file mode 100644 index 0000000000..8117c077bc --- /dev/null +++ b/integration-cli/docker_cli_push_test.go @@ -0,0 +1,48 @@ +package main + +import ( + "fmt" + "os/exec" + "testing" +) + +// these tests need a freshly started empty private docker registry + +// pulling an image from the central registry should work +func TestPushBusyboxImage(t *testing.T) { + // skip this test until we're able to use a registry + t.Skip() + // tag the image to upload it tot he private registry + repoName := fmt.Sprintf("%v/busybox", privateRegistryURL) + tagCmd := exec.Command(dockerBinary, "tag", "busybox", repoName) + out, exitCode, err := runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("%v %v", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("image tagging failed") + } + + pushCmd := exec.Command(dockerBinary, "push", repoName) + out, exitCode, err = runCommandWithOutput(pushCmd) + errorOut(err, t, fmt.Sprintf("%v %v", out, err)) + + go deleteImages(repoName) + + if err != nil || exitCode != 0 { + t.Fatal("pushing the image to the private registry has failed") + } + logDone("push - push busybox to private registry") +} + +// pushing an image without a prefix should throw an error +func TestPushUnprefixedRepo(t *testing.T) { + // skip this test until we're able to use a registry + t.Skip() + pushCmd := exec.Command(dockerBinary, "push", "busybox") + _, exitCode, err := runCommandWithOutput(pushCmd) + + if err == nil || exitCode == 0 { + t.Fatal("pushing an unprefixed repo didn't result in a non-zero exit status") + } + logDone("push - push unprefixed busybox repo --> must fail") +} diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go new file mode 100644 index 0000000000..12915d72ff --- /dev/null +++ b/integration-cli/docker_cli_run_test.go @@ -0,0 +1,255 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// "test123" should be printed by docker run +func TestDockerRunEchoStdout(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "echo", "test123") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + if out != "test123\n" { + t.Errorf("container should've printed 'test123'") + } + + deleteAllContainers() + + logDone("run - echo test123") +} + +// "test" should be printed +func TestDockerRunEchoStdoutWithMemoryLimit(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-m", "2786432", "busybox", "echo", "test") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + if out != "test\n" { + t.Errorf("container should've printed 'test'") + + } + + deleteAllContainers() + + logDone("run - echo with memory limit") +} + +// "test" should be printed +func TestDockerRunEchoStdoutWitCPULimit(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "busybox", "echo", "test") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + if out != "test\n" { + t.Errorf("container should've printed 'test'") + } + + deleteAllContainers() + + logDone("run - echo with CPU limit") +} + +// "test" should be printed +func TestDockerRunEchoStdoutWithCPUAndMemoryLimit(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "2786432", "busybox", "echo", "test") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + if out != "test\n" { + t.Errorf("container should've printed 'test'") + } + + deleteAllContainers() + + logDone("run - echo with CPU and memory limit") +} + +// "test" should be printed +func TestDockerRunEchoNamedContainer(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + if out != "test\n" { + t.Errorf("container should've printed 'test'") + } + + if err := deleteContainer("testfoonamedcontainer"); err != nil { + t.Errorf("failed to remove the named container: %v", err) + } + + deleteAllContainers() + + logDone("run - echo with named container") +} + +// it should be possible to ping Google DNS resolver +// this will fail when Internet access is unavailable +func TestDockerRunPingGoogle(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "ping", "-c", "1", "8.8.8.8") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + errorOut(err, t, "container should've been able to ping 8.8.8.8") + + deleteAllContainers() + + logDone("run - ping 8.8.8.8") +} + +// the exit code should be 0 +// some versions of lxc might make this test fail +func TestDockerRunExitCodeZero(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "true") + exitCode, err := runCommand(runCmd) + errorOut(err, t, fmt.Sprintf("%s", err)) + + if exitCode != 0 { + t.Errorf("container should've exited with exit code 0") + } + + deleteAllContainers() + + logDone("run - exit with 0") +} + +// the exit code should be 1 +// some versions of lxc might make this test fail +func TestDockerRunExitCodeOne(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "busybox", "false") + exitCode, err := runCommand(runCmd) + if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) { + t.Fatal(err) + } + if exitCode != 1 { + t.Errorf("container should've exited with exit code 1") + } + + deleteAllContainers() + + logDone("run - exit with 1") +} + +// it should be possible to pipe in data via stdin to a process running in a container +// some versions of lxc might make this test fail +func TestRunStdinPipe(t *testing.T) { + runCmd := exec.Command("bash", "-c", `echo "blahblah" | docker run -i -a stdin busybox cat`) + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + out = stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", out) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %s %s", out, inspectOut)) + + waitCmd := exec.Command(dockerBinary, "wait", out) + _, _, err = runCommandWithOutput(waitCmd) + errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + + logsCmd := exec.Command(dockerBinary, "logs", out) + containerLogs, _, err := runCommandWithOutput(logsCmd) + errorOut(err, t, fmt.Sprintf("error thrown while trying to get container logs: %s", err)) + + containerLogs = stripTrailingCharacters(containerLogs) + + if containerLogs != "blahblah" { + t.Errorf("logs didn't print the container's logs %s", containerLogs) + } + + rmCmd := exec.Command(dockerBinary, "rm", out) + _, _, err = runCommandWithOutput(rmCmd) + errorOut(err, t, fmt.Sprintf("rm failed to remove container %s", err)) + + deleteAllContainers() + + logDone("run - pipe in with -i -a stdin") +} + +// the container's ID should be printed when starting a container in detached mode +func TestDockerRunDetachedContainerIDPrinting(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + out = stripTrailingCharacters(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", out) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("out should've been a container id: %s %s", out, inspectOut)) + + waitCmd := exec.Command(dockerBinary, "wait", out) + _, _, err = runCommandWithOutput(waitCmd) + errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out)) + + rmCmd := exec.Command(dockerBinary, "rm", out) + rmOut, _, err := runCommandWithOutput(rmCmd) + errorOut(err, t, "rm failed to remove container") + + rmOut = stripTrailingCharacters(rmOut) + if rmOut != out { + t.Errorf("rm didn't print the container ID %s %s", out, rmOut) + } + + deleteAllContainers() + + logDone("run - print container ID in detached mode") +} + +// the working directory should be set correctly +func TestDockerRunWorkingDirectory(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-w", "/root", "busybox", "pwd") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + out = stripTrailingCharacters(out) + + if out != "/root" { + t.Errorf("-w failed to set working directory") + } + + runCmd = exec.Command(dockerBinary, "run", "--workdir", "/root", "busybox", "pwd") + out, _, _, err = runCommandWithStdoutStderr(runCmd) + errorOut(err, t, out) + + out = stripTrailingCharacters(out) + + if out != "/root" { + t.Errorf("--workdir failed to set working directory") + } + + deleteAllContainers() + + logDone("run - run with working directory set by -w") + logDone("run - run with working directory set by --workdir") +} + +// pinging Google's DNS resolver should fail when we disable the networking +func TestDockerRunWithoutNetworking(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--networking=false", "busybox", "ping", "-c", "1", "8.8.8.8") + out, _, exitCode, err := runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 1 { + t.Fatal(out, err) + } + if exitCode != 1 { + t.Errorf("--networking=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + } + + runCmd = exec.Command(dockerBinary, "run", "-n=false", "busybox", "ping", "-c", "1", "8.8.8.8") + out, _, exitCode, err = runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 1 { + t.Fatal(out, err) + } + if exitCode != 1 { + t.Errorf("-n=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + } + + deleteAllContainers() + + logDone("run - disable networking with --networking=false") + logDone("run - disable networking with -n=false") +} diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go new file mode 100644 index 0000000000..7f04f7ca53 --- /dev/null +++ b/integration-cli/docker_cli_save_load_test.go @@ -0,0 +1,52 @@ +package main + +import ( + "fmt" + "os" + "os/exec" + "testing" +) + +// save a repo and try to load it +func TestSaveAndLoadRepo(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err)) + + cleanedContainerID := stripTrailingCharacters(out) + + repoName := "foobar-save-load-test" + + inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("output should've been a container id: %v %v", cleanedContainerID, err)) + + commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) + out, _, err = runCommandWithOutput(commitCmd) + errorOut(err, t, fmt.Sprintf("failed to commit container: %v %v", out, err)) + + saveCmdTemplate := `%v save %v > /tmp/foobar-save-load-test.tar` + saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName) + saveCmd := exec.Command("bash", "-c", saveCmdFinal) + out, _, err = runCommandWithOutput(saveCmd) + errorOut(err, t, fmt.Sprintf("failed to save repo: %v %v", out, err)) + + deleteImages(repoName) + + loadCmdFinal := `cat /tmp/foobar-save-load-test.tar | docker load` + loadCmd := exec.Command("bash", "-c", loadCmdFinal) + out, _, err = runCommandWithOutput(loadCmd) + errorOut(err, t, fmt.Sprintf("failed to load repo: %v %v", out, err)) + + inspectCmd = exec.Command(dockerBinary, "inspect", repoName) + out, _, err = runCommandWithOutput(inspectCmd) + errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", out, err)) + + go deleteImages(repoName) + go deleteContainer(cleanedContainerID) + + os.Remove("/tmp/foobar-save-load-test.tar") + + logDone("save - save a repo") + logDone("load - load a repo") +} diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go new file mode 100644 index 0000000000..050aec51a6 --- /dev/null +++ b/integration-cli/docker_cli_search_test.go @@ -0,0 +1,25 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// search for repos named "registry" on the central registry +func TestSearchOnCentralRegistry(t *testing.T) { + searchCmd := exec.Command(dockerBinary) + out, exitCode, err := runCommandWithOutput(searchCmd) + errorOut(err, t, fmt.Sprintf("encountered error while searching: %v", err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to search on the central registry") + } + + if !strings.Contains(out, "registry") { + t.Fatal("couldn't find any repository named (or containing) 'registry'") + } + + logDone("search - search for repositories named (or containing) 'registry'") +} diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go new file mode 100644 index 0000000000..67c28c570a --- /dev/null +++ b/integration-cli/docker_cli_tag_test.go @@ -0,0 +1,86 @@ +package main + +import ( + "fmt" + "os/exec" + "testing" +) + +// tagging a named image in a new unprefixed repo should work +func TestTagUnprefixedRepoByName(t *testing.T) { + pullCmd := exec.Command(dockerBinary, "pull", "busybox") + out, exitCode, err := runCommandWithOutput(pullCmd) + errorOut(err, t, fmt.Sprintf("%s %s", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("pulling the busybox image from the registry has failed") + } + + tagCmd := exec.Command(dockerBinary, "tag", "busybox", "testfoobarbaz") + out, _, err = runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("%v %v", out, err)) + + deleteImages("testfoobarbaz") + + logDone("tag - busybox -> testfoobarbaz") +} + +// tagging an image by ID in a new unprefixed repo should work +func TestTagUnprefixedRepoByID(t *testing.T) { + getIDCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.id}}", "busybox") + out, _, err := runCommandWithOutput(getIDCmd) + errorOut(err, t, fmt.Sprintf("failed to get the image ID of busybox: %v", err)) + + cleanedImageID := stripTrailingCharacters(out) + tagCmd := exec.Command(dockerBinary, "tag", cleanedImageID, "testfoobarbaz") + out, _, err = runCommandWithOutput(tagCmd) + errorOut(err, t, fmt.Sprintf("%s %s", out, err)) + + deleteImages("testfoobarbaz") + + logDone("tag - busybox's image ID -> testfoobarbaz") +} + +// ensure we don't allow the use of invalid tags; these tag operations should fail +func TestTagInvalidUnprefixedRepo(t *testing.T) { + // skip this until we start blocking bad tags + t.Skip() + + invalidRepos := []string{"-foo", "fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo", "f"} + + for _, repo := range invalidRepos { + tagCmd := exec.Command(dockerBinary, "tag", "busybox", repo) + _, _, err := runCommandWithOutput(tagCmd) + if err == nil { + t.Errorf("tag busybox %v should have failed", repo) + continue + } + logMessage := fmt.Sprintf("tag - busybox %v --> must fail", repo) + logDone(logMessage) + } +} + +// ensure we allow the use of valid tags +func TestTagValidPrefixedRepo(t *testing.T) { + pullCmd := exec.Command(dockerBinary, "pull", "busybox") + out, exitCode, err := runCommandWithOutput(pullCmd) + errorOut(err, t, fmt.Sprintf("%s %s", out, err)) + + if err != nil || exitCode != 0 { + t.Fatal("pulling the busybox image from the registry has failed") + } + + validRepos := []string{"fooo/bar", "fooaa/test"} + + for _, repo := range validRepos { + tagCmd := exec.Command(dockerBinary, "tag", "busybox", repo) + _, _, err := runCommandWithOutput(tagCmd) + if err != nil { + t.Errorf("tag busybox %v should have worked: %s", repo, err) + continue + } + go deleteImages(repo) + logMessage := fmt.Sprintf("tag - busybox %v", repo) + logDone(logMessage) + } +} diff --git a/integration-cli/docker_cli_top_test.go b/integration-cli/docker_cli_top_test.go new file mode 100644 index 0000000000..1895054ccc --- /dev/null +++ b/integration-cli/docker_cli_top_test.go @@ -0,0 +1,32 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestTop(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20") + out, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + + topCmd := exec.Command(dockerBinary, "top", cleanedContainerID) + out, _, err = runCommandWithOutput(topCmd) + errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err)) + + killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) + _, err = runCommand(killCmd) + errorOut(err, t, fmt.Sprintf("failed to kill container: %v", err)) + + go deleteContainer(cleanedContainerID) + + if !strings.Contains(out, "sleep 20") { + t.Fatal("top should've listed sleep 20 in the process list") + } + + logDone("top - sleep process should be listed") +} diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go new file mode 100644 index 0000000000..8adedd540b --- /dev/null +++ b/integration-cli/docker_cli_version_test.go @@ -0,0 +1,29 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +// ensure docker version works +func TestVersionEnsureSucceeds(t *testing.T) { + versionCmd := exec.Command(dockerBinary, "version") + out, exitCode, err := runCommandWithOutput(versionCmd) + errorOut(err, t, fmt.Sprintf("encountered error while running docker version: %v", err)) + + if err != nil || exitCode != 0 { + t.Fatal("failed to execute docker version") + } + + stringsToCheck := []string{"Client version:", "Go version (client):", "Git commit (client):", "Server version:", "Git commit (server):", "Go version (server):", "Last stable version:"} + + for _, linePrefix := range stringsToCheck { + if !strings.Contains(out, linePrefix) { + t.Errorf("couldn't find string %v in output", linePrefix) + } + } + + logDone("version - verify that it works and that the output is properly formatted") +} diff --git a/integration-cli/docker_test_vars.go b/integration-cli/docker_test_vars.go new file mode 100644 index 0000000000..f8bd5c116b --- /dev/null +++ b/integration-cli/docker_test_vars.go @@ -0,0 +1,29 @@ +package main + +import ( + "os" +) + +// the docker binary to use +var dockerBinary = "docker" + +// the private registry image to use for tests involving the registry +var registryImageName = "registry" + +// the private registry to use for tests +var privateRegistryURL = "127.0.0.1:5000" + +var workingDirectory string + +func init() { + if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { + dockerBinary = dockerBin + } + if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { + registryImageName = registryImage + } + if registry := os.Getenv("REGISTRY_URL"); registry != "" { + privateRegistryURL = registry + } + workingDirectory, _ = os.Getwd() +} diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go new file mode 100644 index 0000000000..8e9d0a23ff --- /dev/null +++ b/integration-cli/docker_utils.go @@ -0,0 +1,56 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" +) + +func deleteContainer(container string) error { + container = strings.Replace(container, "\n", " ", -1) + container = strings.Trim(container, " ") + rmArgs := fmt.Sprintf("rm %v", container) + rmSplitArgs := strings.Split(rmArgs, " ") + rmCmd := exec.Command(dockerBinary, rmSplitArgs...) + exitCode, err := runCommand(rmCmd) + // set error manually if not set + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to remove container: `docker rm` exit is non-zero") + } + + return err +} + +func getAllContainers() (string, error) { + getContainersCmd := exec.Command(dockerBinary, "ps", "-q", "-a") + out, exitCode, err := runCommandWithOutput(getContainersCmd) + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to get a list of containers: %v\n", out) + } + + return out, err +} + +func deleteAllContainers() error { + containers, err := getAllContainers() + if err != nil { + fmt.Println(containers) + return err + } + + if err = deleteContainer(containers); err != nil { + return err + } + return nil +} + +func deleteImages(images string) error { + rmiCmd := exec.Command(dockerBinary, "rmi", images) + exitCode, err := runCommand(rmiCmd) + // set error manually if not set + if exitCode != 0 && err == nil { + err = fmt.Errorf("failed to remove image: `docker rmi` exit is non-zero") + } + + return err +} diff --git a/integration-cli/utils.go b/integration-cli/utils.go new file mode 100644 index 0000000000..680cc6cfcf --- /dev/null +++ b/integration-cli/utils.go @@ -0,0 +1,109 @@ +package main + +import ( + "bytes" + "fmt" + "io" + "os/exec" + "strings" + "syscall" + "testing" +) + +func getExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { + exitCode = 0 + out, err := cmd.CombinedOutput() + if err != nil { + var exiterr error + if exitCode, exiterr = getExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + output = string(out) + return +} + +func runCommandWithStdoutStderr(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { + exitCode = 0 + var stderrBuffer bytes.Buffer + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return "", "", -1, err + } + go io.Copy(&stderrBuffer, stderrPipe) + out, err := cmd.Output() + + if err != nil { + var exiterr error + if exitCode, exiterr = getExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + stdout = string(out) + stderr = string(stderrBuffer.Bytes()) + return +} + +func runCommand(cmd *exec.Cmd) (exitCode int, err error) { + exitCode = 0 + err = cmd.Run() + if err != nil { + var exiterr error + if exitCode, exiterr = getExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} + +func startCommand(cmd *exec.Cmd) (exitCode int, err error) { + exitCode = 0 + err = cmd.Start() + if err != nil { + var exiterr error + if exitCode, exiterr = getExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} + +func logDone(message string) { + fmt.Printf("[PASSED]: %s\n", message) +} + +func stripTrailingCharacters(target string) string { + target = strings.Trim(target, "\n") + target = strings.Trim(target, " ") + return target +} + +func errorOut(err error, t *testing.T, message string) { + if err != nil { + t.Fatal(message) + } +} + +func errorOutOnNonNilError(err error, t *testing.T, message string) { + if err == nil { + t.Fatalf(message) + } +} -- cgit v1.2.1 From 3fb1fc0b7b225295b3059cb9a2f5fd9af7a73f36 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 20 Mar 2014 16:46:55 -0600 Subject: Small tweaks to the hack scripts to make them simpler Please do with this as you please (including rebasing and/or squashing it), especially under clause (c) of the DCO. Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/make/binary | 1 + hack/make/test-integration-cli | 23 +++++++---------------- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/hack/make/binary b/hack/make/binary index 7b4d7b5b5b..041e4d1ee8 100755 --- a/hack/make/binary +++ b/hack/make/binary @@ -11,5 +11,6 @@ go build \ " \ ./docker echo "Created binary: $DEST/docker-$VERSION" +ln -sf "docker-$VERSION" "$DEST/docker" hash_files "$DEST/docker-$VERSION" diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli index 5ab37a4021..d007fbaf6a 100644 --- a/hack/make/test-integration-cli +++ b/hack/make/test-integration-cli @@ -1,37 +1,28 @@ #!/bin/bash DEST=$1 -DOCKERBIN=$DEST/../binary/docker-$VERSION -DYNDOCKERBIN=$DEST/../dynbinary/docker-$VERSION -DOCKERINITBIN=$DEST/../dynbinary/dockerinit-$VERSION set -e +# subshell so that we can export PATH without breaking other things +( +export PATH="$DEST/../binary:$DEST/../dynbinary:$PATH" + bundle_test_integration_cli() { go_test_dir ./integration-cli } -if [ -x "/usr/bin/docker" ]; then - echo "docker found at /usr/bin/docker" -elif [ -x "$DOCKERBIN" ]; then - ln -s $DOCKERBIN /usr/bin/docker -elif [ -x "$DYNDOCKERBIN" ]; then - ln -s $DYNDOCKERBIN /usr/bin/docker - ln -s $DOCKERINITBIN /usr/bin/dockerinit -else +if ! command -v docker &> /dev/null; then echo >&2 'error: binary or dynbinary must be run before test-integration-cli' false fi - docker -d -D -p $DEST/docker.pid &> $DEST/docker.log & -sleep 2 -docker info -DOCKERD_PID=`cat $DEST/docker.pid` bundle_test_integration_cli 2>&1 \ | tee $DEST/test.log +DOCKERD_PID=$(cat $DEST/docker.pid) kill $DOCKERD_PID wait $DOCKERD_PID - +) -- cgit v1.2.1 From 04d1e686398fff0784a47cb85c37db629d40f5b5 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Mon, 31 Mar 2014 11:05:21 +1000 Subject: OSX mktemp is different - hopfully this will now work on HP/UX >:} Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/installation/mac.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/installation/mac.rst b/docs/sources/installation/mac.rst index f4c771cf9f..9ce3961f7e 100644 --- a/docs/sources/installation/mac.rst +++ b/docs/sources/installation/mac.rst @@ -66,7 +66,7 @@ Run the following commands to get it downloaded and set up: .. code-block:: bash # Get the docker client file - DIR=$(mktemp -d) && \ + DIR=$(mktemp -d ${TMPDIR:-/tmp}/dockerdl.XXXXXXX) && \ curl -f -o $DIR/ld.tgz https://get.docker.io/builds/Darwin/x86_64/docker-latest.tgz && \ gunzip $DIR/ld.tgz && \ tar xvf $DIR/ld.tar -C $DIR/ && \ -- cgit v1.2.1 From a2487aa683dc84938eb94c1ae29f8160d09441ea Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Mon, 31 Mar 2014 09:07:56 -0700 Subject: Reduce error level form harmless errors Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- api/client.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/api/client.go b/api/client.go index df3265a15a..86858d0b30 100644 --- a/api/client.go +++ b/api/client.go @@ -2374,11 +2374,11 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea } if tcpc, ok := rwc.(*net.TCPConn); ok { if err := tcpc.CloseWrite(); err != nil { - utils.Errorf("Couldn't send EOF: %s\n", err) + utils.Debugf("Couldn't send EOF: %s\n", err) } } else if unixc, ok := rwc.(*net.UnixConn); ok { if err := unixc.CloseWrite(); err != nil { - utils.Errorf("Couldn't send EOF: %s\n", err) + utils.Debugf("Couldn't send EOF: %s\n", err) } } // Discard errors due to pipe interruption @@ -2387,14 +2387,14 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea if stdout != nil || stderr != nil { if err := <-receiveStdout; err != nil { - utils.Errorf("Error receiveStdout: %s", err) + utils.Debugf("Error receiveStdout: %s", err) return err } } if !cli.isTerminal { if err := <-sendStdin; err != nil { - utils.Errorf("Error sendStdin: %s", err) + utils.Debugf("Error sendStdin: %s", err) return err } } @@ -2408,7 +2408,7 @@ func (cli *DockerCli) getTtySize() (int, int) { } ws, err := term.GetWinsize(cli.terminalFd) if err != nil { - utils.Errorf("Error getting size: %s", err) + utils.Debugf("Error getting size: %s", err) if ws == nil { return 0, 0 } @@ -2425,7 +2425,7 @@ func (cli *DockerCli) resizeTty(id string) { v.Set("h", strconv.Itoa(height)) v.Set("w", strconv.Itoa(width)) if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil { - utils.Errorf("Error resize: %s", err) + utils.Debugf("Error resize: %s", err) } } -- cgit v1.2.1 From a57900e35f2c30026a070fdfdbdb0ce99b35e1ff Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 18 Mar 2014 15:28:40 -0700 Subject: Allow volumes from to be individual files Fixes #4741 Right now volumes from expected a dir and not a file so when the drivers tried to do the bind mount, the destination was a dir, not a file so it fails to run. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/volumes.go | 48 +++++++++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 19 deletions(-) diff --git a/runtime/volumes.go b/runtime/volumes.go index 1bbb14a369..5ac82ef089 100644 --- a/runtime/volumes.go +++ b/runtime/volumes.go @@ -88,7 +88,11 @@ func applyVolumesFrom(container *Container) error { if _, exists := container.Volumes[volPath]; exists { continue } - if err := os.MkdirAll(filepath.Join(container.basefs, volPath), 0755); err != nil { + stat, err := os.Stat(filepath.Join(c.basefs, volPath)) + if err != nil { + return err + } + if err := createIfNotExists(filepath.Join(container.basefs, volPath), stat.IsDir()); err != nil { return err } container.Volumes[volPath] = id @@ -208,24 +212,8 @@ func createVolumes(container *Container) error { if err != nil { return err } - - if _, err := os.Stat(rootVolPath); err != nil { - if os.IsNotExist(err) { - if volIsDir { - if err := os.MkdirAll(rootVolPath, 0755); err != nil { - return err - } - } else { - if err := os.MkdirAll(filepath.Dir(rootVolPath), 0755); err != nil { - return err - } - if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil { - return err - } else { - f.Close() - } - } - } + if err := createIfNotExists(rootVolPath, volIsDir); err != nil { + return err } // Do not copy or change permissions if we are mounting from the host @@ -266,3 +254,25 @@ func createVolumes(container *Container) error { } return nil } + +func createIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } else { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + defer f.Close() + } + } + } + return nil +} -- cgit v1.2.1 From 28015f8e579e7bbe396f65b3343188ca03b06cbd Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 31 Mar 2014 17:41:40 +0000 Subject: Add integration test for volumes-from as file Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- integration-cli/docker_cli_run_test.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 12915d72ff..13959adea7 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -253,3 +253,21 @@ func TestDockerRunWithoutNetworking(t *testing.T) { logDone("run - disable networking with --networking=false") logDone("run - disable networking with -n=false") } + +// Regression test for #4741 +func TestDockerRunWithVolumesAsFiles(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/etc/hosts:/target-file", "busybox", "true") + out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal("1", out, stderr, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/target-file") + out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal("2", out, stderr, err) + } + deleteAllContainers() + + logDone("run - regression test for #4741 - volumes from as files") +} -- cgit v1.2.1 From 7808886744595af509b7b144890900674ea5ccfd Mon Sep 17 00:00:00 2001 From: Johannes 'fish' Ziemke Date: Mon, 31 Mar 2014 13:14:56 +0200 Subject: Add more women MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added Adele Goldstine, Erna Schneider Hoover, Grace Hopper, Jean Bartik, Jean E. Sammet, Karen Spärck Jones, Radia Perlman and Sophie Wilson. Thanks to @jamtur01 for Sophie Kowalevski, Hypatia, Jane Goodall, Maria Mayer, Rosalind Franklin, Gertrude Elion, Elizabeth Blackwell, Marie-Jeanne de Lalande, Maria Kirch, Maria Ardinghelli, Jane Colden, June Almeida, Mary Leakey, Lise Meitner, Johanna Mestorf. Thanks to @xamebax for Françoise Barré-Sinoussi, Rachel Carson, Barbara McClintock, Ada Yonath. Docker-DCO-1.1-Signed-off-by: Johannes 'fish' Ziemke (github: discordianfish) --- pkg/namesgenerator/names-generator.go | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go index dfece5d611..78f4d07358 100644 --- a/pkg/namesgenerator/names-generator.go +++ b/pkg/namesgenerator/names-generator.go @@ -15,33 +15,60 @@ var ( // Docker 0.7.x generates names from notable scientists and hackers. // // Ada Lovelace invented the first algorithm. http://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. http://en.wikipedia.org/wiki/Ada_Yonath + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. http://en.wikipedia.org/wiki/Adele_Goldstine // Alan Turing was a founding father of computer science. http://en.wikipedia.org/wiki/Alan_Turing. // Albert Einstein invented the general theory of relativity. http://en.wikipedia.org/wiki/Albert_Einstein // Ambroise Pare invented modern surgery. http://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. http://en.wikipedia.org/wiki/Archimedes + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. http://en.wikipedia.org/wiki/Barbara_McClintock // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. // Charles Babbage invented the concept of a programmable computer. http://en.wikipedia.org/wiki/Charles_Babbage. // Charles Darwin established the principles of natural evolution. http://en.wikipedia.org/wiki/Charles_Darwin. // Dennis Ritchie and Ken Thompson created UNIX and the C programming language. http://en.wikipedia.org/wiki/Dennis_Ritchie http://en.wikipedia.org/wiki/Ken_Thompson // Douglas Engelbart gave the mother of all demos: http://en.wikipedia.org/wiki/Douglas_Engelbart + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - http://en.wikipedia.org/wiki/Elizabeth_Blackwell // Emmett Brown invented time travel. http://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) // Enrico Fermi invented the first nuclear reactor. http://en.wikipedia.org/wiki/Enrico_Fermi. + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephon switching method. http://en.wikipedia.org/wiki/Erna_Schneider_Hoover // Euclid invented geometry. http://en.wikipedia.org/wiki/Euclid + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. http://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. http://en.wikipedia.org/wiki/Galileo_Galilei + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - http://en.wikipedia.org/wiki/Gertrude_Elion + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. http://en.wikipedia.org/wiki/Grace_Hopper // Henry Poincare made fundamental contributions in several fields of mathematics. http://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - http://en.wikipedia.org/wiki/Hypatia // Isaac Newton invented classic mechanics and modern optics. http://en.wikipedia.org/wiki/Isaac_Newton + // Jane Colden - American botanist widely considered the first female American botanist - http://en.wikipedia.org/wiki/Jane_Colden + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - http://en.wikipedia.org/wiki/Jane_Goodall + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. http://en.wikipedia.org/wiki/Jean_Bartik + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. http://en.wikipedia.org/wiki/Jean_E._Sammet + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - http://en.wikipedia.org/wiki/Johanna_Mestorf // John McCarthy invented LISP: http://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - http://en.wikipedia.org/wiki/June_Almeida + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. http://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones // Leonardo Da Vinci invented too many things to list here. http://en.wikipedia.org/wiki/Leonardo_da_Vinci. // Linus Torvalds invented Linux and Git. http://en.wikipedia.org/wiki/Linus_Torvalds + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - http://en.wikipedia.org/wiki/Lise_Meitner // Louis Pasteur discovered vaccination, fermentation and pasteurization. http://en.wikipedia.org/wiki/Louis_Pasteur. // Malcolm McLean invented the modern shipping container: http://en.wikipedia.org/wiki/Malcom_McLean + // Maria Ardinghelli - Italian translator, mathematician and physicist - http://en.wikipedia.org/wiki/Maria_Ardinghelli + // Maria Kirch - German astronomer and first woman to discover a comet - http://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - http://en.wikipedia.org/wiki/Maria_Mayer // Marie Curie discovered radioactivity. http://en.wikipedia.org/wiki/Marie_Curie. + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - http://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - http://en.wikipedia.org/wiki/Mary_Leakey // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. http://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB // Niels Bohr is the father of quantum theory. http://en.wikipedia.org/wiki/Niels_Bohr. // Nikola Tesla invented the AC electric system and every gaget ever used by a James Bond villain. http://en.wikipedia.org/wiki/Nikola_Tesla // Pierre de Fermat pioneered several aspects of modern mathematics. http://en.wikipedia.org/wiki/Pierre_de_Fermat + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. http://en.wikipedia.org/wiki/Rachel_Carson + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). http://en.wikipedia.org/wiki/Radia_Perlman // Richard Feynman was a key contributor to quantum mechanics and particle physics. http://en.wikipedia.org/wiki/Richard_Feynman // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. http://en.wikipedia.org/wiki/Rob_Pike + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - http://en.wikipedia.org/wiki/Rosalind_Franklin + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - http://en.wikipedia.org/wiki/Sofia_Kovalevskaya + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. http://en.wikipedia.org/wiki/Sophie_Wilson // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. http://en.wikipedia.org/wiki/Stephen_Hawking // Steve Wozniak invented the Apple I and Apple II. http://en.wikipedia.org/wiki/Steve_Wozniak // Werner Heisenberg was a founding father of quantum mechanics. http://en.wikipedia.org/wiki/Werner_Heisenberg @@ -49,7 +76,7 @@ var ( // http://en.wikipedia.org/wiki/John_Bardeen // http://en.wikipedia.org/wiki/Walter_Houser_Brattain // http://en.wikipedia.org/wiki/William_Shockley - right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclid", "newton", "fermat", "archimedes", "poincare", "heisenberg", "feynman", "hawking", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley"} + right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclid", "newton", "fermat", "archimedes", "poincare", "heisenberg", "feynman", "hawking", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley", "goldstine", "hoover", "hopper", "bartik", "sammet", "jones", "perlman", "wilson", "kowalevski", "hypatia", "goodall", "mayer", "elion", "blackwell", "lalande", "kirch", "ardinghelli", "colden", "almeida", "leakey", "meitner", "mestorf", "rosalind", "sinoussi", "carson", "mcmclintock", "yonath"} ) func GenerateRandomName(checker NameChecker) (string, error) { -- cgit v1.2.1 From e4aaacc2351d2e1dd4b69afeeee2aeab9c625efe Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Mon, 31 Mar 2014 10:49:48 -0700 Subject: Fix expending buffer in StdCopy Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- utils/stdcopy.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/utils/stdcopy.go b/utils/stdcopy.go index 3cb8ab02b3..8b43386140 100644 --- a/utils/stdcopy.go +++ b/utils/stdcopy.go @@ -108,12 +108,13 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) // Retrieve the size of the frame frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4])) + Debugf("framesize: %d", frameSize) // Check if the buffer is big enough to read the frame. // Extend it if necessary. if frameSize+StdWriterPrefixLen > bufLen { - Debugf("Extending buffer cap.") - buf = append(buf, make([]byte, frameSize-len(buf)+1)...) + Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf)) + buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...) bufLen = len(buf) } -- cgit v1.2.1 From 2543912e7b5593722a6a22b9ceb6a23f6268e397 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 31 Mar 2014 11:55:55 -0600 Subject: Add "test-integration-cli" to our DEFAULT_BUNDLES list (make all) Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/make.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/make.sh b/hack/make.sh index 447a00f039..e81271370d 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -43,6 +43,7 @@ DEFAULT_BUNDLES=( binary test test-integration + test-integration-cli dynbinary dyntest dyntest-integration -- cgit v1.2.1 From a7365a6237c45ed05d96ab11f36fde35d675b462 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 28 Mar 2014 22:59:29 +0000 Subject: split API into 2 go packages Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/client.go | 2546 ------------------------------------------------- api/client/client.go | 2551 ++++++++++++++++++++++++++++++++++++++++++++++++++ api/common.go | 2 +- api/server.go | 1255 ------------------------- api/server/server.go | 1257 +++++++++++++++++++++++++ builtins/builtins.go | 2 +- docker/docker.go | 7 +- 7 files changed, 3814 insertions(+), 3806 deletions(-) delete mode 100644 api/client.go create mode 100644 api/client/client.go delete mode 100644 api/server.go create mode 100644 api/server/server.go diff --git a/api/client.go b/api/client.go deleted file mode 100644 index 86858d0b30..0000000000 --- a/api/client.go +++ /dev/null @@ -1,2546 +0,0 @@ -package api - -import ( - "bufio" - "bytes" - "crypto/tls" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/nat" - flag "github.com/dotcloud/docker/pkg/mflag" - "github.com/dotcloud/docker/pkg/signal" - "github.com/dotcloud/docker/pkg/term" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "os/exec" - gosignal "os/signal" - "path" - "reflect" - "regexp" - goruntime "runtime" - "strconv" - "strings" - "syscall" - "text/tabwriter" - "text/template" - "time" -) - -var funcMap = template.FuncMap{ - "json": func(v interface{}) string { - a, _ := json.Marshal(v) - return string(a) - }, -} - -var ( - ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") -) - -func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) { - methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:]) - method := reflect.ValueOf(cli).MethodByName(methodName) - if !method.IsValid() { - return nil, false - } - return method.Interface().(func(...string) error), true -} - -func (cli *DockerCli) ParseCommands(args ...string) error { - if len(args) > 0 { - method, exists := cli.getMethod(args[0]) - if !exists { - fmt.Println("Error: Command not found:", args[0]) - return cli.CmdHelp(args[1:]...) - } - return method(args[1:]...) - } - return cli.CmdHelp(args...) -} - -func (cli *DockerCli) CmdHelp(args ...string) error { - if len(args) > 0 { - method, exists := cli.getMethod(args[0]) - if !exists { - fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0]) - } else { - method("--help") - return nil - } - } - help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET) - for _, command := range [][]string{ - {"attach", "Attach to a running container"}, - {"build", "Build a container from a Dockerfile"}, - {"commit", "Create a new image from a container's changes"}, - {"cp", "Copy files/folders from the containers filesystem to the host path"}, - {"diff", "Inspect changes on a container's filesystem"}, - {"events", "Get real time events from the server"}, - {"export", "Stream the contents of a container as a tar archive"}, - {"history", "Show the history of an image"}, - {"images", "List images"}, - {"import", "Create a new filesystem image from the contents of a tarball"}, - {"info", "Display system-wide information"}, - {"insert", "Insert a file in an image"}, - {"inspect", "Return low-level information on a container"}, - {"kill", "Kill a running container"}, - {"load", "Load an image from a tar archive"}, - {"login", "Register or Login to the docker registry server"}, - {"logs", "Fetch the logs of a container"}, - {"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"}, - {"ps", "List containers"}, - {"pull", "Pull an image or a repository from the docker registry server"}, - {"push", "Push an image or a repository to the docker registry server"}, - {"restart", "Restart a running container"}, - {"rm", "Remove one or more containers"}, - {"rmi", "Remove one or more images"}, - {"run", "Run a command in a new container"}, - {"save", "Save an image to a tar archive"}, - {"search", "Search for an image in the docker index"}, - {"start", "Start a stopped container"}, - {"stop", "Stop a running container"}, - {"tag", "Tag an image into a repository"}, - {"top", "Lookup the running processes of a container"}, - {"version", "Show the docker version information"}, - {"wait", "Block until a container stops, then print its exit code"}, - } { - help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1]) - } - fmt.Fprintf(cli.err, "%s\n", help) - return nil -} - -func (cli *DockerCli) CmdInsert(args ...string) error { - cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 3 { - cmd.Usage() - return nil - } - - v := url.Values{} - v.Set("url", cmd.Arg(1)) - v.Set("path", cmd.Arg(2)) - - return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil) -} - -func (cli *DockerCli) CmdBuild(args ...string) error { - cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH") - tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success") - suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") - noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") - rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - var ( - context archive.Archive - isRemote bool - err error - ) - - _, err = exec.LookPath("git") - hasGit := err == nil - if cmd.Arg(0) == "-" { - // As a special case, 'docker build -' will build from an empty context with the - // contents of stdin as a Dockerfile - dockerfile, err := ioutil.ReadAll(cli.in) - if err != nil { - return err - } - context, err = archive.Generate("Dockerfile", string(dockerfile)) - } else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) { - isRemote = true - } else { - root := cmd.Arg(0) - if utils.IsGIT(root) { - remoteURL := cmd.Arg(0) - if !strings.HasPrefix(remoteURL, "git://") && !strings.HasPrefix(remoteURL, "git@") && !utils.IsURL(remoteURL) { - remoteURL = "https://" + remoteURL - } - - root, err = ioutil.TempDir("", "docker-build-git") - if err != nil { - return err - } - defer os.RemoveAll(root) - - if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { - return fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - } - if _, err := os.Stat(root); err != nil { - return err - } - filename := path.Join(root, "Dockerfile") - if _, err = os.Stat(filename); os.IsNotExist(err) { - return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0)) - } - context, err = archive.Tar(root, archive.Uncompressed) - } - var body io.Reader - // Setup an upload progress bar - // FIXME: ProgressReader shouldn't be this annoying to use - if context != nil { - sf := utils.NewStreamFormatter(false) - body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Uploading context") - } - // Upload the build context - v := &url.Values{} - - //Check if the given image name can be resolved - if *tag != "" { - repository, _ := utils.ParseRepositoryTag(*tag) - if _, _, err := registry.ResolveRepositoryName(repository); err != nil { - return err - } - } - - v.Set("t", *tag) - - if *suppressOutput { - v.Set("q", "1") - } - if isRemote { - v.Set("remote", cmd.Arg(0)) - } - if *noCache { - v.Set("nocache", "1") - } - if *rm { - v.Set("rm", "1") - } - - cli.LoadConfigFile() - - headers := http.Header(make(map[string][]string)) - buf, err := json.Marshal(cli.configFile) - if err != nil { - return err - } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - - if context != nil { - headers.Set("Content-Type", "application/tar") - } - err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers) - if jerr, ok := err.(*utils.JSONError); ok { - // If no error code is set, default to 1 - if jerr.Code == 0 { - jerr.Code = 1 - } - return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code} - } - return err -} - -// 'docker login': login / register a user to registry service. -func (cli *DockerCli) CmdLogin(args ...string) error { - cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.") - - var username, password, email string - - cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") - cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") - cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") - err := cmd.Parse(args) - if err != nil { - return nil - } - serverAddress := registry.IndexServerAddress() - if len(cmd.Args()) > 0 { - serverAddress = cmd.Arg(0) - } - - promptDefault := func(prompt string, configDefault string) { - if configDefault == "" { - fmt.Fprintf(cli.out, "%s: ", prompt) - } else { - fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) - } - } - - readInput := func(in io.Reader, out io.Writer) string { - reader := bufio.NewReader(in) - line, _, err := reader.ReadLine() - if err != nil { - fmt.Fprintln(out, err.Error()) - os.Exit(1) - } - return string(line) - } - - cli.LoadConfigFile() - authconfig, ok := cli.configFile.Configs[serverAddress] - if !ok { - authconfig = registry.AuthConfig{} - } - - if username == "" { - promptDefault("Username", authconfig.Username) - username = readInput(cli.in, cli.out) - if username == "" { - username = authconfig.Username - } - } - if username != authconfig.Username { - if password == "" { - oldState, _ := term.SaveState(cli.terminalFd) - fmt.Fprintf(cli.out, "Password: ") - term.DisableEcho(cli.terminalFd, oldState) - - password = readInput(cli.in, cli.out) - fmt.Fprint(cli.out, "\n") - - term.RestoreTerminal(cli.terminalFd, oldState) - if password == "" { - return fmt.Errorf("Error : Password Required") - } - } - - if email == "" { - promptDefault("Email", authconfig.Email) - email = readInput(cli.in, cli.out) - if email == "" { - email = authconfig.Email - } - } - } else { - password = authconfig.Password - email = authconfig.Email - } - authconfig.Username = username - authconfig.Password = password - authconfig.Email = email - authconfig.ServerAddress = serverAddress - cli.configFile.Configs[serverAddress] = authconfig - - stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false) - if statusCode == 401 { - delete(cli.configFile.Configs, serverAddress) - registry.SaveConfig(cli.configFile) - return err - } - if err != nil { - return err - } - var out2 engine.Env - err = out2.Decode(stream) - if err != nil { - cli.configFile, _ = registry.LoadConfig(os.Getenv("HOME")) - return err - } - registry.SaveConfig(cli.configFile) - if out2.Get("Status") != "" { - fmt.Fprintf(cli.out, "%s\n", out2.Get("Status")) - } - return nil -} - -// 'docker wait': block until a container stops -func (cli *DockerCli) CmdWait(args ...string) error { - cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - var encounteredError error - for _, name := range cmd.Args() { - status, err := waitForExit(cli, name) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to wait one or more containers") - } else { - fmt.Fprintf(cli.out, "%d\n", status) - } - } - return encounteredError -} - -// 'docker version': show version information -func (cli *DockerCli) CmdVersion(args ...string) error { - cmd := cli.Subcmd("version", "", "Show the docker version information.") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() > 0 { - cmd.Usage() - return nil - } - if dockerversion.VERSION != "" { - fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) - } - fmt.Fprintf(cli.out, "Go version (client): %s\n", goruntime.Version()) - if dockerversion.GITCOMMIT != "" { - fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) - } - - body, _, err := readBody(cli.call("GET", "/version", nil, false)) - if err != nil { - return err - } - - out := engine.NewOutput() - remoteVersion, err := out.AddEnv() - if err != nil { - utils.Errorf("Error reading remote version: %s\n", err) - return err - } - if _, err := out.Write(body); err != nil { - utils.Errorf("Error reading remote version: %s\n", err) - return err - } - out.Close() - fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version")) - fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) - fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion")) - release := utils.GetReleaseVersion() - if release != "" { - fmt.Fprintf(cli.out, "Last stable version: %s", release) - if (dockerversion.VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(dockerversion.VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) { - fmt.Fprintf(cli.out, ", please update docker") - } - fmt.Fprintf(cli.out, "\n") - } - return nil -} - -// 'docker info': display system-wide information. -func (cli *DockerCli) CmdInfo(args ...string) error { - cmd := cli.Subcmd("info", "", "Display system-wide information") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() > 0 { - cmd.Usage() - return nil - } - - body, _, err := readBody(cli.call("GET", "/info", nil, false)) - if err != nil { - return err - } - - out := engine.NewOutput() - remoteInfo, err := out.AddEnv() - if err != nil { - return err - } - - if _, err := out.Write(body); err != nil { - utils.Errorf("Error reading remote info: %s\n", err) - return err - } - out.Close() - - fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) - fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) - fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) - var driverStatus [][2]string - if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { - return err - } - for _, pair := range driverStatus { - fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) - } - fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) - fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) - - if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { - fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) - fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") - fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) - fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) - fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) - - if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { - fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) - } - if initPath := remoteInfo.Get("InitPath"); initPath != "" { - fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) - } - } - - if len(remoteInfo.GetList("IndexServerAddress")) != 0 { - cli.LoadConfigFile() - u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username - if len(u) > 0 { - fmt.Fprintf(cli.out, "Username: %v\n", u) - fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress")) - } - } - if !remoteInfo.GetBool("MemoryLimit") { - fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") - } - if !remoteInfo.GetBool("SwapLimit") { - fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") - } - if !remoteInfo.GetBool("IPv4Forwarding") { - fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") - } - return nil -} - -func (cli *DockerCli) CmdStop(args ...string) error { - cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)") - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - v := url.Values{} - v.Set("t", strconv.Itoa(*nSeconds)) - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to stop one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) CmdRestart(args ...string) error { - cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container") - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - v := url.Values{} - v.Set("t", strconv.Itoa(*nSeconds)) - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to restart one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { - sigc := make(chan os.Signal, 1) - signal.CatchAll(sigc) - go func() { - for s := range sigc { - if s == syscall.SIGCHLD { - continue - } - var sig string - for sigStr, sigN := range signal.SignalMap { - if sigN == s { - sig = sigStr - break - } - } - if sig == "" { - utils.Errorf("Unsupported signal: %d. Discarding.", s) - } - if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil { - utils.Debugf("Error sending signal: %s", err) - } - } - }() - return sigc -} - -func (cli *DockerCli) CmdStart(args ...string) error { - cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container") - attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process") - openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - var cErr chan error - var tty bool - if *attach || *openStdin { - if cmd.NArg() > 1 { - return fmt.Errorf("You cannot start and attach multiple containers at once.") - } - - body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) - if err != nil { - return err - } - - container := &Container{} - err = json.Unmarshal(body, container) - if err != nil { - return err - } - - tty = container.Config.Tty - - if !container.Config.Tty { - sigc := cli.forwardAllSignals(cmd.Arg(0)) - defer signal.StopCatch(sigc) - } - - var in io.ReadCloser - - v := url.Values{} - v.Set("stream", "1") - if *openStdin && container.Config.OpenStdin { - v.Set("stdin", "1") - in = cli.in - } - v.Set("stdout", "1") - v.Set("stderr", "1") - - cErr = utils.Go(func() error { - return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil) - }) - } - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false)) - if err != nil { - if !*attach || !*openStdin { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to start one or more containers") - } - } else { - if !*attach || !*openStdin { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - } - if encounteredError != nil { - if *openStdin || *attach { - cli.in.Close() - <-cErr - } - return encounteredError - } - - if *openStdin || *attach { - if tty && cli.isTerminal { - if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { - utils.Errorf("Error monitoring TTY size: %s\n", err) - } - } - return <-cErr - } - return nil -} - -func (cli *DockerCli) CmdInspect(args ...string) error { - cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image") - tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - var tmpl *template.Template - if *tmplStr != "" { - var err error - if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { - fmt.Fprintf(cli.err, "Template parsing error: %v\n", err) - return &utils.StatusError{StatusCode: 64, - Status: "Template parsing error: " + err.Error()} - } - } - - indented := new(bytes.Buffer) - indented.WriteByte('[') - status := 0 - - for _, name := range cmd.Args() { - obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) - if err != nil { - obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false)) - if err != nil { - if strings.Contains(err.Error(), "No such") { - fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) - } else { - fmt.Fprintf(cli.err, "%s", err) - } - status = 1 - continue - } - } - - if tmpl == nil { - if err = json.Indent(indented, obj, "", " "); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - status = 1 - continue - } - } else { - // Has template, will render - var value interface{} - if err := json.Unmarshal(obj, &value); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - status = 1 - continue - } - if err := tmpl.Execute(cli.out, value); err != nil { - return err - } - cli.out.Write([]byte{'\n'}) - } - indented.WriteString(",") - } - - if indented.Len() > 1 { - // Remove trailing ',' - indented.Truncate(indented.Len() - 1) - } - indented.WriteByte(']') - - if tmpl == nil { - if _, err := io.Copy(cli.out, indented); err != nil { - return err - } - } - - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - return nil -} - -func (cli *DockerCli) CmdTop(args ...string) error { - cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Lookup the running processes of a container") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() == 0 { - cmd.Usage() - return nil - } - val := url.Values{} - if cmd.NArg() > 1 { - val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) - } - - stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false) - if err != nil { - return err - } - var procs engine.Env - if err := procs.Decode(stream); err != nil { - return err - } - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) - processes := [][]string{} - if err := procs.GetJson("Processes", &processes); err != nil { - return err - } - for _, proc := range processes { - fmt.Fprintln(w, strings.Join(proc, "\t")) - } - w.Flush() - return nil -} - -func (cli *DockerCli) CmdPort(args ...string) error { - cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 2 { - cmd.Usage() - return nil - } - - port := cmd.Arg(1) - proto := "tcp" - parts := strings.SplitN(port, "/", 2) - if len(parts) == 2 && len(parts[1]) != 0 { - port = parts[0] - proto = parts[1] - } - body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) - if err != nil { - return err - } - var out Container - err = json.Unmarshal(body, &out) - if err != nil { - return err - } - - if frontends, exists := out.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { - for _, frontend := range frontends { - fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort) - } - } else { - return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0)) - } - return nil -} - -// 'docker rmi IMAGE' removes all images with the name IMAGE -func (cli *DockerCli) CmdRmi(args ...string) error { - var ( - cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images") - force = cmd.Bool([]string{"f", "-force"}, false, "Force") - noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") - ) - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - v := url.Values{} - if *force { - v.Set("force", "1") - } - if *noprune { - v.Set("noprune", "1") - } - - var encounteredError error - for _, name := range cmd.Args() { - body, _, err := readBody(cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to remove one or more images") - } else { - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to remove one or more images") - continue - } - for _, out := range outs.Data { - if out.Get("Deleted") != "" { - fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted")) - } else { - fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged")) - } - } - } - } - return encounteredError -} - -func (cli *DockerCli) CmdHistory(args ...string) error { - cmd := cli.Subcmd("history", "[OPTIONS] IMAGE", "Show the history of an image") - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false)) - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE") - } - - for _, out := range outs.Data { - outID := out.Get("Id") - if !*quiet { - if *noTrunc { - fmt.Fprintf(w, "%s\t", outID) - } else { - fmt.Fprintf(w, "%s\t", utils.TruncateID(outID)) - } - - fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) - - if *noTrunc { - fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) - } else { - fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) - } - fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("Size"))) - } else { - if *noTrunc { - fmt.Fprintln(w, outID) - } else { - fmt.Fprintln(w, utils.TruncateID(outID)) - } - } - } - w.Flush() - return nil -} - -func (cli *DockerCli) CmdRm(args ...string) error { - cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers") - v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated to the container") - link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container") - force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of running container") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - val := url.Values{} - if *v { - val.Set("v", "1") - } - if *link { - val.Set("link", "1") - } - if *force { - val.Set("force", "1") - } - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to remove one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -// 'docker kill NAME' kills a running container -func (cli *DockerCli) CmdKill(args ...string) error { - cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL, or specified signal)") - signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - var encounteredError error - for _, name := range cmd.Args() { - if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to kill one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) CmdImport(args ...string) error { - cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - var src, repository, tag string - - if cmd.NArg() == 3 { - fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n") - src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2) - } else { - src = cmd.Arg(0) - repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) - } - v := url.Values{} - - if repository != "" { - //Check if the given image name can be resolved - if _, _, err := registry.ResolveRepositoryName(repository); err != nil { - return err - } - } - - v.Set("repo", repository) - v.Set("tag", tag) - v.Set("fromSrc", src) - - var in io.Reader - - if src == "-" { - in = cli.in - } - - return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil) -} - -func (cli *DockerCli) CmdPush(args ...string) error { - cmd := cli.Subcmd("push", "NAME", "Push an image or a repository to the registry") - if err := cmd.Parse(args); err != nil { - return nil - } - name := cmd.Arg(0) - - if name == "" { - cmd.Usage() - return nil - } - - cli.LoadConfigFile() - - // Resolve the Repository name from fqn to hostname + name - hostname, _, err := registry.ResolveRepositoryName(name) - if err != nil { - return err - } - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(hostname) - // If we're not using a custom registry, we know the restrictions - // applied to repository names and can warn the user in advance. - // Custom repositories can have different rules, and we must also - // allow pushing by image ID. - if len(strings.SplitN(name, "/", 2)) == 1 { - username := cli.configFile.Configs[registry.IndexServerAddress()].Username - if username == "" { - username = "" - } - return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository in / (ex: %s/%s)", username, name) - } - - v := url.Values{} - push := func(authConfig registry.AuthConfig) error { - buf, err := json.Marshal(authConfig) - if err != nil { - return err - } - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - - return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{ - "X-Registry-Auth": registryAuthHeader, - }) - } - - if err := push(authConfig); err != nil { - if strings.Contains(err.Error(), "Status 401") { - fmt.Fprintln(cli.out, "\nPlease login prior to push:") - if err := cli.CmdLogin(hostname); err != nil { - return err - } - authConfig := cli.configFile.ResolveAuthConfig(hostname) - return push(authConfig) - } - return err - } - return nil -} - -func (cli *DockerCli) CmdPull(args ...string) error { - cmd := cli.Subcmd("pull", "NAME[:TAG]", "Pull an image or a repository from the registry") - tag := cmd.String([]string{"#t", "#-tag"}, "", "Download tagged image in repository") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0)) - if *tag == "" { - *tag = parsedTag - } - - // Resolve the Repository name from fqn to hostname + name - hostname, _, err := registry.ResolveRepositoryName(remote) - if err != nil { - return err - } - - cli.LoadConfigFile() - - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(hostname) - v := url.Values{} - v.Set("fromImage", remote) - v.Set("tag", *tag) - - pull := func(authConfig registry.AuthConfig) error { - buf, err := json.Marshal(authConfig) - if err != nil { - return err - } - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - - return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{ - "X-Registry-Auth": registryAuthHeader, - }) - } - - if err := pull(authConfig); err != nil { - if strings.Contains(err.Error(), "Status 401") { - fmt.Fprintln(cli.out, "\nPlease login prior to pull:") - if err := cli.CmdLogin(hostname); err != nil { - return err - } - authConfig := cli.configFile.ResolveAuthConfig(hostname) - return pull(authConfig) - } - return err - } - - return nil -} - -func (cli *DockerCli) CmdImages(args ...string) error { - cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images") - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") - all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate images used to build)") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - flViz := cmd.Bool([]string{"v", "#viz", "-viz"}, false, "Output graph in graphviz format") - flTree := cmd.Bool([]string{"t", "#tree", "-tree"}, false, "Output graph in tree format") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() > 1 { - cmd.Usage() - return nil - } - - filter := cmd.Arg(0) - - if *flViz || *flTree { - body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false)) - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - - var ( - printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string) - startImage *engine.Env - - roots = engine.NewTable("Created", outs.Len()) - byParent = make(map[string]*engine.Table) - ) - - for _, image := range outs.Data { - if image.Get("ParentId") == "" { - roots.Add(image) - } else { - if children, exists := byParent[image.Get("ParentId")]; exists { - children.Add(image) - } else { - byParent[image.Get("ParentId")] = engine.NewTable("Created", 1) - byParent[image.Get("ParentId")].Add(image) - } - } - - if filter != "" { - if filter == image.Get("Id") || filter == utils.TruncateID(image.Get("Id")) { - startImage = image - } - - for _, repotag := range image.GetList("RepoTags") { - if repotag == filter { - startImage = image - } - } - } - } - - if *flViz { - fmt.Fprintf(cli.out, "digraph docker {\n") - printNode = (*DockerCli).printVizNode - } else { - printNode = (*DockerCli).printTreeNode - } - - if startImage != nil { - root := engine.NewTable("Created", 1) - root.Add(startImage) - cli.WalkTree(*noTrunc, root, byParent, "", printNode) - } else if filter == "" { - cli.WalkTree(*noTrunc, roots, byParent, "", printNode) - } - if *flViz { - fmt.Fprintf(cli.out, " base [style=invisible]\n}\n") - } - } else { - v := url.Values{} - if cmd.NArg() == 1 { - v.Set("filter", filter) - } - if *all { - v.Set("all", "1") - } - - body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) - - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") - } - - for _, out := range outs.Data { - for _, repotag := range out.GetList("RepoTags") { - - repo, tag := utils.ParseRepositoryTag(repotag) - outID := out.Get("Id") - if !*noTrunc { - outID = utils.TruncateID(outID) - } - - if !*quiet { - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), utils.HumanSize(out.GetInt64("VirtualSize"))) - } else { - fmt.Fprintln(w, outID) - } - } - } - - if !*quiet { - w.Flush() - } - } - return nil -} - -func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) { - length := images.Len() - if length > 1 { - for index, image := range images.Data { - if index+1 == length { - printNode(cli, noTrunc, image, prefix+"└─") - if subimages, exists := byParent[image.Get("Id")]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) - } - } else { - printNode(cli, noTrunc, image, prefix+"\u251C─") - if subimages, exists := byParent[image.Get("Id")]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode) - } - } - } - } else { - for _, image := range images.Data { - printNode(cli, noTrunc, image, prefix+"└─") - if subimages, exists := byParent[image.Get("Id")]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) - } - } - } -} - -func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) { - var ( - imageID string - parentID string - ) - if noTrunc { - imageID = image.Get("Id") - parentID = image.Get("ParentId") - } else { - imageID = utils.TruncateID(image.Get("Id")) - parentID = utils.TruncateID(image.Get("ParentId")) - } - if parentID == "" { - fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) - } else { - fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID) - } - if image.GetList("RepoTags")[0] != ":" { - fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", - imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n")) - } -} - -func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { - var imageID string - if noTrunc { - imageID = image.Get("Id") - } else { - imageID = utils.TruncateID(image.Get("Id")) - } - - fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.GetInt64("VirtualSize"))) - if image.GetList("RepoTags")[0] != ":" { - fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) - } else { - fmt.Fprint(cli.out, "\n") - } -} - -func (cli *DockerCli) CmdPs(args ...string) error { - cmd := cli.Subcmd("ps", "[OPTIONS]", "List containers") - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") - size := cmd.Bool([]string{"s", "-size"}, false, "Display sizes") - all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.") - since := cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show only containers created since Id or Name, include non-running ones.") - before := cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.") - last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.") - - if err := cmd.Parse(args); err != nil { - return nil - } - v := url.Values{} - if *last == -1 && *nLatest { - *last = 1 - } - if *all { - v.Set("all", "1") - } - if *last != -1 { - v.Set("limit", strconv.Itoa(*last)) - } - if *since != "" { - v.Set("since", *since) - } - if *before != "" { - v.Set("before", *before) - } - if *size { - v.Set("size", "1") - } - - body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false)) - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES") - if *size { - fmt.Fprintln(w, "\tSIZE") - } else { - fmt.Fprint(w, "\n") - } - } - - for _, out := range outs.Data { - var ( - outID = out.Get("Id") - outNames = out.GetList("Names") - ) - - if !*noTrunc { - outID = utils.TruncateID(outID) - } - - // Remove the leading / from the names - for i := 0; i < len(outNames); i++ { - outNames[i] = outNames[i][1:] - } - - if !*quiet { - var ( - outCommand = out.Get("Command") - ports = engine.NewTable("", 0) - ) - if !*noTrunc { - outCommand = utils.Trunc(outCommand, 20) - } - ports.ReadListFrom([]byte(out.Get("Ports"))) - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), displayablePorts(ports), strings.Join(outNames, ",")) - if *size { - if out.GetInt("SizeRootFs") > 0 { - fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs"))) - } else { - fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("SizeRw"))) - } - } else { - fmt.Fprint(w, "\n") - } - } else { - fmt.Fprintln(w, outID) - } - } - - if !*quiet { - w.Flush() - } - return nil -} - -func (cli *DockerCli) CmdCommit(args ...string) error { - cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes") - flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") - flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith \"") - flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: --run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`) - if err := cmd.Parse(args); err != nil { - return nil - } - - var name, repository, tag string - - if cmd.NArg() == 3 { - fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n") - name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2) - } else { - name = cmd.Arg(0) - repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) - } - - if name == "" { - cmd.Usage() - return nil - } - - //Check if the given image name can be resolved - if repository != "" { - if _, _, err := registry.ResolveRepositoryName(repository); err != nil { - return err - } - } - - v := url.Values{} - v.Set("container", name) - v.Set("repo", repository) - v.Set("tag", tag) - v.Set("comment", *flComment) - v.Set("author", *flAuthor) - var ( - config *runconfig.Config - env engine.Env - ) - if *flConfig != "" { - config = &runconfig.Config{} - if err := json.Unmarshal([]byte(*flConfig), config); err != nil { - return err - } - } - stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, false) - if err != nil { - return err - } - if err := env.Decode(stream); err != nil { - return err - } - - fmt.Fprintf(cli.out, "%s\n", env.Get("Id")) - return nil -} - -func (cli *DockerCli) CmdEvents(args ...string) error { - cmd := cli.Subcmd("events", "[OPTIONS]", "Get real time events from the server") - since := cmd.String([]string{"#since", "-since"}, "", "Show previously created events and then stream.") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() != 0 { - cmd.Usage() - return nil - } - - v := url.Values{} - if *since != "" { - loc := time.FixedZone(time.Now().Zone()) - format := "2006-01-02 15:04:05 -0700 MST" - if len(*since) < len(format) { - format = format[:len(*since)] - } - - if t, err := time.ParseInLocation(format, *since, loc); err == nil { - v.Set("since", strconv.FormatInt(t.Unix(), 10)) - } else { - v.Set("since", *since) - } - } - - if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdExport(args ...string) error { - cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdDiff(args ...string) error { - cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false)) - - if err != nil { - return err - } - - outs := engine.NewTable("", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - for _, change := range outs.Data { - var kind string - switch change.GetInt("Kind") { - case archive.ChangeModify: - kind = "C" - case archive.ChangeAdd: - kind = "A" - case archive.ChangeDelete: - kind = "D" - } - fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path")) - } - return nil -} - -func (cli *DockerCli) CmdLogs(args ...string) error { - cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container") - follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - name := cmd.Arg(0) - body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) - if err != nil { - return err - } - - container := &Container{} - err = json.Unmarshal(body, container) - if err != nil { - return err - } - - v := url.Values{} - v.Set("logs", "1") - v.Set("stdout", "1") - v.Set("stderr", "1") - if *follow && container.State.Running { - v.Set("stream", "1") - } - - if err := cli.hijack("POST", "/containers/"+name+"/attach?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdAttach(args ...string) error { - cmd := cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container") - noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin") - proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - name := cmd.Arg(0) - body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) - if err != nil { - return err - } - - container := &Container{} - err = json.Unmarshal(body, container) - if err != nil { - return err - } - - if !container.State.Running { - return fmt.Errorf("You cannot attach to a stopped container, start it first") - } - - if container.Config.Tty && cli.isTerminal { - if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { - utils.Debugf("Error monitoring TTY size: %s", err) - } - } - - var in io.ReadCloser - - v := url.Values{} - v.Set("stream", "1") - if !*noStdin && container.Config.OpenStdin { - v.Set("stdin", "1") - in = cli.in - } - v.Set("stdout", "1") - v.Set("stderr", "1") - - if *proxy && !container.Config.Tty { - sigc := cli.forwardAllSignals(cmd.Arg(0)) - defer signal.StopCatch(sigc) - } - - if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil { - return err - } - - _, status, err := getExitCode(cli, cmd.Arg(0)) - if err != nil { - return err - } - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - - return nil -} - -func (cli *DockerCli) CmdSearch(args ...string) error { - cmd := cli.Subcmd("search", "TERM", "Search the docker index for images") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - trusted := cmd.Bool([]string{"t", "#trusted", "-trusted"}, false, "Only show trusted builds") - stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - v := url.Values{} - v.Set("term", cmd.Arg(0)) - - body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, true)) - - if err != nil { - return err - } - outs := engine.NewTable("star_count", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) - fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n") - for _, out := range outs.Data { - if (*trusted && !out.GetBool("is_trusted")) || (*stars > out.GetInt("star_count")) { - continue - } - desc := strings.Replace(out.Get("description"), "\n", " ", -1) - desc = strings.Replace(desc, "\r", " ", -1) - if !*noTrunc && len(desc) > 45 { - desc = utils.Trunc(desc, 42) + "..." - } - fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count")) - if out.GetBool("is_official") { - fmt.Fprint(w, "[OK]") - - } - fmt.Fprint(w, "\t") - if out.GetBool("is_trusted") { - fmt.Fprint(w, "[OK]") - } - fmt.Fprint(w, "\n") - } - w.Flush() - return nil -} - -// Ports type - Used to parse multiple -p flags -type ports []int - -func (cli *DockerCli) CmdTag(args ...string) error { - cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository") - force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 2 && cmd.NArg() != 3 { - cmd.Usage() - return nil - } - - var repository, tag string - - if cmd.NArg() == 3 { - fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]]\n") - repository, tag = cmd.Arg(1), cmd.Arg(2) - } else { - repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) - } - - v := url.Values{} - - //Check if the given image name can be resolved - if _, _, err := registry.ResolveRepositoryName(repository); err != nil { - return err - } - v.Set("repo", repository) - v.Set("tag", tag) - - if *force { - v.Set("force", "1") - } - - if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdRun(args ...string) error { - // FIXME: just use runconfig.Parse already - config, hostConfig, cmd, err := runconfig.ParseSubcommand(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil) - if err != nil { - return err - } - if config.Image == "" { - cmd.Usage() - return nil - } - - // Retrieve relevant client-side config - var ( - flName = cmd.Lookup("name") - flRm = cmd.Lookup("rm") - flSigProxy = cmd.Lookup("sig-proxy") - autoRemove, _ = strconv.ParseBool(flRm.Value.String()) - sigProxy, _ = strconv.ParseBool(flSigProxy.Value.String()) - ) - - // Disable sigProxy in case on TTY - if config.Tty { - sigProxy = false - } - - var containerIDFile io.WriteCloser - if len(hostConfig.ContainerIDFile) > 0 { - if _, err := os.Stat(hostConfig.ContainerIDFile); err == nil { - return fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", hostConfig.ContainerIDFile) - } - if containerIDFile, err = os.Create(hostConfig.ContainerIDFile); err != nil { - return fmt.Errorf("Failed to create the container ID file: %s", err) - } - defer func() { - containerIDFile.Close() - var ( - cidFileInfo os.FileInfo - err error - ) - if cidFileInfo, err = os.Stat(hostConfig.ContainerIDFile); err != nil { - return - } - if cidFileInfo.Size() == 0 { - if err := os.Remove(hostConfig.ContainerIDFile); err != nil { - fmt.Printf("failed to remove CID file '%s': %s \n", hostConfig.ContainerIDFile, err) - } - } - }() - } - - containerValues := url.Values{} - if name := flName.Value.String(); name != "" { - containerValues.Set("name", name) - } - - //create the container - stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false) - //if image not found try to pull it - if statusCode == 404 { - fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) - - v := url.Values{} - repos, tag := utils.ParseRepositoryTag(config.Image) - v.Set("fromImage", repos) - v.Set("tag", tag) - - // Resolve the Repository name from fqn to hostname + name - hostname, _, err := registry.ResolveRepositoryName(repos) - if err != nil { - return err - } - - // Load the auth config file, to be able to pull the image - cli.LoadConfigFile() - - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(hostname) - buf, err := json.Marshal(authConfig) - if err != nil { - return err - } - - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { - return err - } - if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false); err != nil { - return err - } - } else if err != nil { - return err - } - - var runResult engine.Env - if err := runResult.Decode(stream); err != nil { - return err - } - - for _, warning := range runResult.GetList("Warnings") { - fmt.Fprintf(cli.err, "WARNING: %s\n", warning) - } - - if len(hostConfig.ContainerIDFile) > 0 { - if _, err = containerIDFile.Write([]byte(runResult.Get("Id"))); err != nil { - return fmt.Errorf("Failed to write the container ID to the file: %s", err) - } - } - - if sigProxy { - sigc := cli.forwardAllSignals(runResult.Get("Id")) - defer signal.StopCatch(sigc) - } - - var ( - waitDisplayId chan struct{} - errCh chan error - ) - - if !config.AttachStdout && !config.AttachStderr { - // Make this asynchrone in order to let the client write to stdin before having to read the ID - waitDisplayId = make(chan struct{}) - go func() { - defer close(waitDisplayId) - fmt.Fprintf(cli.out, "%s\n", runResult.Get("Id")) - }() - } - - // We need to instanciate the chan because the select needs it. It can - // be closed but can't be uninitialized. - hijacked := make(chan io.Closer) - - // Block the return until the chan gets closed - defer func() { - utils.Debugf("End of CmdRun(), Waiting for hijack to finish.") - if _, ok := <-hijacked; ok { - utils.Errorf("Hijack did not finish (chan still open)") - } - }() - - if config.AttachStdin || config.AttachStdout || config.AttachStderr { - var ( - out, stderr io.Writer - in io.ReadCloser - v = url.Values{} - ) - v.Set("stream", "1") - - if config.AttachStdin { - v.Set("stdin", "1") - in = cli.in - } - if config.AttachStdout { - v.Set("stdout", "1") - out = cli.out - } - if config.AttachStderr { - v.Set("stderr", "1") - if config.Tty { - stderr = cli.out - } else { - stderr = cli.err - } - } - - errCh = utils.Go(func() error { - return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked) - }) - } else { - close(hijacked) - } - - // Acknowledge the hijack before starting - select { - case closer := <-hijacked: - // Make sure that hijack gets closed when returning. (result - // in closing hijack chan and freeing server's goroutines. - if closer != nil { - defer closer.Close() - } - case err := <-errCh: - if err != nil { - utils.Debugf("Error hijack: %s", err) - return err - } - } - - //start the container - if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil { - return err - } - - if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal { - if err := cli.monitorTtySize(runResult.Get("Id")); err != nil { - utils.Errorf("Error monitoring TTY size: %s\n", err) - } - } - - if errCh != nil { - if err := <-errCh; err != nil { - utils.Debugf("Error hijack: %s", err) - return err - } - } - - // Detached mode: wait for the id to be displayed and return. - if !config.AttachStdout && !config.AttachStderr { - // Detached mode - <-waitDisplayId - return nil - } - - var status int - - // Attached mode - if autoRemove { - // Autoremove: wait for the container to finish, retrieve - // the exit code and remove the container - if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/wait", nil, false)); err != nil { - return err - } - if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { - return err - } - if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.Get("Id")+"?v=1", nil, false)); err != nil { - return err - } - } else { - if !config.Tty { - // In non-tty mode, we can't dettach, so we know we need to wait. - if status, err = waitForExit(cli, runResult.Get("Id")); err != nil { - return err - } - } else { - // In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call - // and result in a wrong exit code. - // No Autoremove: Simply retrieve the exit code - if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { - return err - } - } - } - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - return nil -} - -func (cli *DockerCli) CmdCp(args ...string) error { - cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() != 2 { - cmd.Usage() - return nil - } - - var copyData engine.Env - info := strings.Split(cmd.Arg(0), ":") - - if len(info) != 2 { - return fmt.Errorf("Error: Path not specified") - } - - copyData.Set("Resource", info[1]) - copyData.Set("HostPath", cmd.Arg(1)) - - stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false) - if stream != nil { - defer stream.Close() - } - if statusCode == 404 { - return fmt.Errorf("No such container: %v", info[0]) - } - if err != nil { - return err - } - - if statusCode == 200 { - if err := archive.Untar(stream, copyData.Get("HostPath"), nil); err != nil { - return err - } - } - return nil -} - -func (cli *DockerCli) CmdSave(args ...string) error { - cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout by default)") - outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT") - - if err := cmd.Parse(args); err != nil { - return err - } - - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - var ( - output io.Writer = cli.out - err error - ) - if *outfile != "" { - output, err = os.Create(*outfile) - if err != nil { - return err - } - } - image := cmd.Arg(0) - if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdLoad(args ...string) error { - cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN") - infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") - - if err := cmd.Parse(args); err != nil { - return err - } - - if cmd.NArg() != 0 { - cmd.Usage() - return nil - } - - var ( - input io.Reader = cli.in - err error - ) - if *infile != "" { - input, err = os.Open(*infile) - if err != nil { - return err - } - } - if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) dial() (net.Conn, error) { - if cli.tlsConfig != nil && cli.proto != "unix" { - return tls.Dial(cli.proto, cli.addr, cli.tlsConfig) - } - return net.Dial(cli.proto, cli.addr) -} - -func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if env, ok := data.(engine.Env); ok { - if err := env.Encode(params); err != nil { - return nil, -1, err - } - } else { - buf, err := json.Marshal(data) - if err != nil { - return nil, -1, err - } - if _, err := params.Write(buf); err != nil { - return nil, -1, err - } - } - } - // fixme: refactor client to support redirect - re := regexp.MustCompile("/+") - path = re.ReplaceAllString(path, "/") - - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), params) - if err != nil { - return nil, -1, err - } - if passAuthInfo { - cli.LoadConfigFile() - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(registry.IndexServerAddress()) - getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return nil, err - } - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil - } - if headers, err := getHeaders(authConfig); err == nil && headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.Host = cli.addr - if data != nil { - req.Header.Set("Content-Type", "application/json") - } else if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - dial, err := cli.dial() - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return nil, -1, ErrConnectionRefused - } - return nil, -1, err - } - clientconn := httputil.NewClientConn(dial, nil) - resp, err := clientconn.Do(req) - if err != nil { - clientconn.Close() - if strings.Contains(err.Error(), "connection refused") { - return nil, -1, ErrConnectionRefused - } - return nil, -1, err - } - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, -1, err - } - if len(body) == 0 { - return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL) - } - return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body)) - } - - wrapper := utils.NewReadCloserWrapper(resp.Body, func() error { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return clientconn.Close() - }) - return wrapper, resp.StatusCode, nil -} - -func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error { - if (method == "POST" || method == "PUT") && in == nil { - in = bytes.NewReader([]byte{}) - } - - // fixme: refactor client to support redirect - re := regexp.MustCompile("/+") - path = re.ReplaceAllString(path, "/") - - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), in) - if err != nil { - return err - } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.Host = cli.addr - if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - - if headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - - dial, err := cli.dial() - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } - clientconn := httputil.NewClientConn(dial, nil) - resp, err := clientconn.Do(req) - defer clientconn.Close() - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - if len(body) == 0 { - return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode)) - } - return fmt.Errorf("Error: %s", bytes.TrimSpace(body)) - } - - if MatchesContentType(resp.Header.Get("Content-Type"), "application/json") { - return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal) - } - if _, err := io.Copy(out, resp.Body); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error { - defer func() { - if started != nil { - close(started) - } - }() - // fixme: refactor client to support redirect - re := regexp.MustCompile("/+") - path = re.ReplaceAllString(path, "/") - - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), nil) - if err != nil { - return err - } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.Header.Set("Content-Type", "plain/text") - req.Host = cli.addr - - dial, err := cli.dial() - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } - clientconn := httputil.NewClientConn(dial, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - clientconn.Do(req) - - rwc, br := clientconn.Hijack() - defer rwc.Close() - - if started != nil { - started <- rwc - } - - var receiveStdout chan error - - var oldState *term.State - - if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" { - oldState, err = term.SetRawTerminal(cli.terminalFd) - if err != nil { - return err - } - defer term.RestoreTerminal(cli.terminalFd, oldState) - } - - if stdout != nil || stderr != nil { - receiveStdout = utils.Go(func() (err error) { - defer func() { - if in != nil { - if setRawTerminal && cli.isTerminal { - term.RestoreTerminal(cli.terminalFd, oldState) - } - // For some reason this Close call blocks on darwin.. - // As the client exists right after, simply discard the close - // until we find a better solution. - if goruntime.GOOS != "darwin" { - in.Close() - } - } - }() - - // When TTY is ON, use regular copy - if setRawTerminal { - _, err = io.Copy(stdout, br) - } else { - _, err = utils.StdCopy(stdout, stderr, br) - } - utils.Debugf("[hijack] End of stdout") - return err - }) - } - - sendStdin := utils.Go(func() error { - if in != nil { - io.Copy(rwc, in) - utils.Debugf("[hijack] End of stdin") - } - if tcpc, ok := rwc.(*net.TCPConn); ok { - if err := tcpc.CloseWrite(); err != nil { - utils.Debugf("Couldn't send EOF: %s\n", err) - } - } else if unixc, ok := rwc.(*net.UnixConn); ok { - if err := unixc.CloseWrite(); err != nil { - utils.Debugf("Couldn't send EOF: %s\n", err) - } - } - // Discard errors due to pipe interruption - return nil - }) - - if stdout != nil || stderr != nil { - if err := <-receiveStdout; err != nil { - utils.Debugf("Error receiveStdout: %s", err) - return err - } - } - - if !cli.isTerminal { - if err := <-sendStdin; err != nil { - utils.Debugf("Error sendStdin: %s", err) - return err - } - } - return nil - -} - -func (cli *DockerCli) getTtySize() (int, int) { - if !cli.isTerminal { - return 0, 0 - } - ws, err := term.GetWinsize(cli.terminalFd) - if err != nil { - utils.Debugf("Error getting size: %s", err) - if ws == nil { - return 0, 0 - } - } - return int(ws.Height), int(ws.Width) -} - -func (cli *DockerCli) resizeTty(id string) { - height, width := cli.getTtySize() - if height == 0 && width == 0 { - return - } - v := url.Values{} - v.Set("h", strconv.Itoa(height)) - v.Set("w", strconv.Itoa(width)) - if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil { - utils.Debugf("Error resize: %s", err) - } -} - -func (cli *DockerCli) monitorTtySize(id string) error { - cli.resizeTty(id) - - sigchan := make(chan os.Signal, 1) - gosignal.Notify(sigchan, syscall.SIGWINCH) - go func() { - for _ = range sigchan { - cli.resizeTty(id) - } - }() - return nil -} - -func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet { - flags := flag.NewFlagSet(name, flag.ContinueOnError) - flags.Usage = func() { - fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description) - flags.PrintDefaults() - os.Exit(2) - } - return flags -} - -func (cli *DockerCli) LoadConfigFile() (err error) { - cli.configFile, err = registry.LoadConfig(os.Getenv("HOME")) - if err != nil { - fmt.Fprintf(cli.err, "WARNING: %s\n", err) - } - return err -} - -func waitForExit(cli *DockerCli, containerId string) (int, error) { - stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false) - if err != nil { - return -1, err - } - - var out engine.Env - if err := out.Decode(stream); err != nil { - return -1, err - } - return out.GetInt("StatusCode"), nil -} - -// getExitCode perform an inspect on the container. It returns -// the running state and the exit code. -func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { - body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false)) - if err != nil { - // If we can't connect, then the daemon probably died. - if err != ErrConnectionRefused { - return false, -1, err - } - return false, -1, nil - } - c := &Container{} - if err := json.Unmarshal(body, c); err != nil { - return false, -1, err - } - return c.State.Running, c.State.ExitCode, nil -} - -func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) { - if stream != nil { - defer stream.Close() - } - if err != nil { - return nil, statusCode, err - } - body, err := ioutil.ReadAll(stream) - if err != nil { - return nil, -1, err - } - return body, statusCode, nil -} - -func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsConfig *tls.Config) *DockerCli { - var ( - isTerminal = false - terminalFd uintptr - ) - - if in != nil { - if file, ok := in.(*os.File); ok { - terminalFd = file.Fd() - isTerminal = term.IsTerminal(terminalFd) - } - } - - if err == nil { - err = out - } - return &DockerCli{ - proto: proto, - addr: addr, - in: in, - out: out, - err: err, - isTerminal: isTerminal, - terminalFd: terminalFd, - tlsConfig: tlsConfig, - } -} - -type DockerCli struct { - proto string - addr string - configFile *registry.ConfigFile - in io.ReadCloser - out io.Writer - err io.Writer - isTerminal bool - terminalFd uintptr - tlsConfig *tls.Config -} diff --git a/api/client/client.go b/api/client/client.go new file mode 100644 index 0000000000..29b49464c4 --- /dev/null +++ b/api/client/client.go @@ -0,0 +1,2551 @@ +package client + +import ( + "bufio" + "bytes" + "crypto/tls" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/dockerversion" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/nat" + flag "github.com/dotcloud/docker/pkg/mflag" + "github.com/dotcloud/docker/pkg/signal" + "github.com/dotcloud/docker/pkg/term" + "github.com/dotcloud/docker/registry" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/utils" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "os/exec" + gosignal "os/signal" + "path" + "reflect" + "regexp" + goruntime "runtime" + "strconv" + "strings" + "syscall" + "text/tabwriter" + "text/template" + "time" +) + +var funcMap = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, +} + +var ( + ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") +) + +func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) { + methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:]) + method := reflect.ValueOf(cli).MethodByName(methodName) + if !method.IsValid() { + return nil, false + } + return method.Interface().(func(...string) error), true +} + +func (cli *DockerCli) ParseCommands(args ...string) error { + if len(args) > 0 { + method, exists := cli.getMethod(args[0]) + if !exists { + fmt.Println("Error: Command not found:", args[0]) + return cli.CmdHelp(args[1:]...) + } + return method(args[1:]...) + } + return cli.CmdHelp(args...) +} + +func (cli *DockerCli) CmdHelp(args ...string) error { + if len(args) > 0 { + method, exists := cli.getMethod(args[0]) + if !exists { + fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0]) + } else { + method("--help") + return nil + } + } + help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", api.DEFAULTUNIXSOCKET) + for _, command := range [][]string{ + {"attach", "Attach to a running container"}, + {"build", "Build a container from a Dockerfile"}, + {"commit", "Create a new image from a container's changes"}, + {"cp", "Copy files/folders from the containers filesystem to the host path"}, + {"diff", "Inspect changes on a container's filesystem"}, + {"events", "Get real time events from the server"}, + {"export", "Stream the contents of a container as a tar archive"}, + {"history", "Show the history of an image"}, + {"images", "List images"}, + {"import", "Create a new filesystem image from the contents of a tarball"}, + {"info", "Display system-wide information"}, + {"insert", "Insert a file in an image"}, + {"inspect", "Return low-level information on a container"}, + {"kill", "Kill a running container"}, + {"load", "Load an image from a tar archive"}, + {"login", "Register or Login to the docker registry server"}, + {"logs", "Fetch the logs of a container"}, + {"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"}, + {"ps", "List containers"}, + {"pull", "Pull an image or a repository from the docker registry server"}, + {"push", "Push an image or a repository to the docker registry server"}, + {"restart", "Restart a running container"}, + {"rm", "Remove one or more containers"}, + {"rmi", "Remove one or more images"}, + {"run", "Run a command in a new container"}, + {"save", "Save an image to a tar archive"}, + {"search", "Search for an image in the docker index"}, + {"start", "Start a stopped container"}, + {"stop", "Stop a running container"}, + {"tag", "Tag an image into a repository"}, + {"top", "Lookup the running processes of a container"}, + {"version", "Show the docker version information"}, + {"wait", "Block until a container stops, then print its exit code"}, + } { + help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1]) + } + fmt.Fprintf(cli.err, "%s\n", help) + return nil +} + +func (cli *DockerCli) CmdInsert(args ...string) error { + cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 3 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("url", cmd.Arg(1)) + v.Set("path", cmd.Arg(2)) + + return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil) +} + +func (cli *DockerCli) CmdBuild(args ...string) error { + cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH") + tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success") + suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") + noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") + rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + var ( + context archive.Archive + isRemote bool + err error + ) + + _, err = exec.LookPath("git") + hasGit := err == nil + if cmd.Arg(0) == "-" { + // As a special case, 'docker build -' will build from an empty context with the + // contents of stdin as a Dockerfile + dockerfile, err := ioutil.ReadAll(cli.in) + if err != nil { + return err + } + context, err = archive.Generate("Dockerfile", string(dockerfile)) + } else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) { + isRemote = true + } else { + root := cmd.Arg(0) + if utils.IsGIT(root) { + remoteURL := cmd.Arg(0) + if !strings.HasPrefix(remoteURL, "git://") && !strings.HasPrefix(remoteURL, "git@") && !utils.IsURL(remoteURL) { + remoteURL = "https://" + remoteURL + } + + root, err = ioutil.TempDir("", "docker-build-git") + if err != nil { + return err + } + defer os.RemoveAll(root) + + if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { + return fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + } + if _, err := os.Stat(root); err != nil { + return err + } + filename := path.Join(root, "Dockerfile") + if _, err = os.Stat(filename); os.IsNotExist(err) { + return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0)) + } + context, err = archive.Tar(root, archive.Uncompressed) + } + var body io.Reader + // Setup an upload progress bar + // FIXME: ProgressReader shouldn't be this annoying to use + if context != nil { + sf := utils.NewStreamFormatter(false) + body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Uploading context") + } + // Upload the build context + v := &url.Values{} + + //Check if the given image name can be resolved + if *tag != "" { + repository, _ := utils.ParseRepositoryTag(*tag) + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + } + + v.Set("t", *tag) + + if *suppressOutput { + v.Set("q", "1") + } + if isRemote { + v.Set("remote", cmd.Arg(0)) + } + if *noCache { + v.Set("nocache", "1") + } + if *rm { + v.Set("rm", "1") + } + + cli.LoadConfigFile() + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(cli.configFile) + if err != nil { + return err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + + if context != nil { + headers.Set("Content-Type", "application/tar") + } + err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers) + if jerr, ok := err.(*utils.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + return err +} + +// 'docker login': login / register a user to registry service. +func (cli *DockerCli) CmdLogin(args ...string) error { + cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.") + + var username, password, email string + + cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") + cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") + cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") + err := cmd.Parse(args) + if err != nil { + return nil + } + serverAddress := registry.IndexServerAddress() + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } + + promptDefault := func(prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(cli.out, "%s: ", prompt) + } else { + fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) + } + } + + readInput := func(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) + } + + cli.LoadConfigFile() + authconfig, ok := cli.configFile.Configs[serverAddress] + if !ok { + authconfig = registry.AuthConfig{} + } + + if username == "" { + promptDefault("Username", authconfig.Username) + username = readInput(cli.in, cli.out) + if username == "" { + username = authconfig.Username + } + } + if username != authconfig.Username { + if password == "" { + oldState, _ := term.SaveState(cli.terminalFd) + fmt.Fprintf(cli.out, "Password: ") + term.DisableEcho(cli.terminalFd, oldState) + + password = readInput(cli.in, cli.out) + fmt.Fprint(cli.out, "\n") + + term.RestoreTerminal(cli.terminalFd, oldState) + if password == "" { + return fmt.Errorf("Error : Password Required") + } + } + + if email == "" { + promptDefault("Email", authconfig.Email) + email = readInput(cli.in, cli.out) + if email == "" { + email = authconfig.Email + } + } + } else { + password = authconfig.Password + email = authconfig.Email + } + authconfig.Username = username + authconfig.Password = password + authconfig.Email = email + authconfig.ServerAddress = serverAddress + cli.configFile.Configs[serverAddress] = authconfig + + stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false) + if statusCode == 401 { + delete(cli.configFile.Configs, serverAddress) + registry.SaveConfig(cli.configFile) + return err + } + if err != nil { + return err + } + var out2 engine.Env + err = out2.Decode(stream) + if err != nil { + cli.configFile, _ = registry.LoadConfig(os.Getenv("HOME")) + return err + } + registry.SaveConfig(cli.configFile) + if out2.Get("Status") != "" { + fmt.Fprintf(cli.out, "%s\n", out2.Get("Status")) + } + return nil +} + +// 'docker wait': block until a container stops +func (cli *DockerCli) CmdWait(args ...string) error { + cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + var encounteredError error + for _, name := range cmd.Args() { + status, err := waitForExit(cli, name) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to wait one or more containers") + } else { + fmt.Fprintf(cli.out, "%d\n", status) + } + } + return encounteredError +} + +// 'docker version': show version information +func (cli *DockerCli) CmdVersion(args ...string) error { + cmd := cli.Subcmd("version", "", "Show the docker version information.") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() > 0 { + cmd.Usage() + return nil + } + if dockerversion.VERSION != "" { + fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) + } + fmt.Fprintf(cli.out, "Go version (client): %s\n", goruntime.Version()) + if dockerversion.GITCOMMIT != "" { + fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) + } + + body, _, err := readBody(cli.call("GET", "/version", nil, false)) + if err != nil { + return err + } + + out := engine.NewOutput() + remoteVersion, err := out.AddEnv() + if err != nil { + utils.Errorf("Error reading remote version: %s\n", err) + return err + } + if _, err := out.Write(body); err != nil { + utils.Errorf("Error reading remote version: %s\n", err) + return err + } + out.Close() + fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version")) + fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) + fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion")) + release := utils.GetReleaseVersion() + if release != "" { + fmt.Fprintf(cli.out, "Last stable version: %s", release) + if (dockerversion.VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(dockerversion.VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) { + fmt.Fprintf(cli.out, ", please update docker") + } + fmt.Fprintf(cli.out, "\n") + } + return nil +} + +// 'docker info': display system-wide information. +func (cli *DockerCli) CmdInfo(args ...string) error { + cmd := cli.Subcmd("info", "", "Display system-wide information") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() > 0 { + cmd.Usage() + return nil + } + + body, _, err := readBody(cli.call("GET", "/info", nil, false)) + if err != nil { + return err + } + + out := engine.NewOutput() + remoteInfo, err := out.AddEnv() + if err != nil { + return err + } + + if _, err := out.Write(body); err != nil { + utils.Errorf("Error reading remote info: %s\n", err) + return err + } + out.Close() + + fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) + fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) + fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) + var driverStatus [][2]string + if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { + return err + } + for _, pair := range driverStatus { + fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + } + fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) + fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) + + if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { + fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) + fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") + fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) + fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) + fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) + + if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { + fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) + } + if initPath := remoteInfo.Get("InitPath"); initPath != "" { + fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) + } + } + + if len(remoteInfo.GetList("IndexServerAddress")) != 0 { + cli.LoadConfigFile() + u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username + if len(u) > 0 { + fmt.Fprintf(cli.out, "Username: %v\n", u) + fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress")) + } + } + if !remoteInfo.GetBool("MemoryLimit") { + fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") + } + if !remoteInfo.GetBool("SwapLimit") { + fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") + } + if !remoteInfo.GetBool("IPv4Forwarding") { + fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") + } + return nil +} + +func (cli *DockerCli) CmdStop(args ...string) error { + cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("t", strconv.Itoa(*nSeconds)) + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to stop one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) CmdRestart(args ...string) error { + cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("t", strconv.Itoa(*nSeconds)) + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to restart one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { + sigc := make(chan os.Signal, 1) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == syscall.SIGCHLD { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + utils.Errorf("Unsupported signal: %d. Discarding.", s) + } + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil { + utils.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} + +func (cli *DockerCli) CmdStart(args ...string) error { + cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container") + attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process") + openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var cErr chan error + var tty bool + if *attach || *openStdin { + if cmd.NArg() > 1 { + return fmt.Errorf("You cannot start and attach multiple containers at once.") + } + + body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) + if err != nil { + return err + } + + container := &api.Container{} + err = json.Unmarshal(body, container) + if err != nil { + return err + } + + tty = container.Config.Tty + + if !container.Config.Tty { + sigc := cli.forwardAllSignals(cmd.Arg(0)) + defer signal.StopCatch(sigc) + } + + var in io.ReadCloser + + v := url.Values{} + v.Set("stream", "1") + if *openStdin && container.Config.OpenStdin { + v.Set("stdin", "1") + in = cli.in + } + v.Set("stdout", "1") + v.Set("stderr", "1") + + cErr = utils.Go(func() error { + return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil) + }) + } + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false)) + if err != nil { + if !*attach || !*openStdin { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to start one or more containers") + } + } else { + if !*attach || !*openStdin { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + } + if encounteredError != nil { + if *openStdin || *attach { + cli.in.Close() + <-cErr + } + return encounteredError + } + + if *openStdin || *attach { + if tty && cli.isTerminal { + if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { + utils.Errorf("Error monitoring TTY size: %s\n", err) + } + } + return <-cErr + } + return nil +} + +func (cli *DockerCli) CmdInspect(args ...string) error { + cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image") + tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var tmpl *template.Template + if *tmplStr != "" { + var err error + if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { + fmt.Fprintf(cli.err, "Template parsing error: %v\n", err) + return &utils.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + } + + indented := new(bytes.Buffer) + indented.WriteByte('[') + status := 0 + + for _, name := range cmd.Args() { + obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) + if err != nil { + obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false)) + if err != nil { + if strings.Contains(err.Error(), "No such") { + fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) + } else { + fmt.Fprintf(cli.err, "%s", err) + } + status = 1 + continue + } + } + + if tmpl == nil { + if err = json.Indent(indented, obj, "", " "); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + } else { + // Has template, will render + var value interface{} + if err := json.Unmarshal(obj, &value); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + if err := tmpl.Execute(cli.out, value); err != nil { + return err + } + cli.out.Write([]byte{'\n'}) + } + indented.WriteString(",") + } + + if indented.Len() > 1 { + // Remove trailing ',' + indented.Truncate(indented.Len() - 1) + } + indented.WriteByte(']') + + if tmpl == nil { + if _, err := io.Copy(cli.out, indented); err != nil { + return err + } + } + + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + return nil +} + +func (cli *DockerCli) CmdTop(args ...string) error { + cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Lookup the running processes of a container") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() == 0 { + cmd.Usage() + return nil + } + val := url.Values{} + if cmd.NArg() > 1 { + val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) + } + + stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false) + if err != nil { + return err + } + var procs engine.Env + if err := procs.Decode(stream); err != nil { + return err + } + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) + processes := [][]string{} + if err := procs.GetJson("Processes", &processes); err != nil { + return err + } + for _, proc := range processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} + +func (cli *DockerCli) CmdPort(args ...string) error { + cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 2 { + cmd.Usage() + return nil + } + + var ( + port = cmd.Arg(1) + proto = "tcp" + parts = strings.SplitN(port, "/", 2) + container api.Container + ) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) + if err != nil { + return err + } + + err = json.Unmarshal(body, &container) + if err != nil { + return err + } + + if frontends, exists := container.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort) + } + } else { + return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0)) + } + return nil +} + +// 'docker rmi IMAGE' removes all images with the name IMAGE +func (cli *DockerCli) CmdRmi(args ...string) error { + var ( + cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images") + force = cmd.Bool([]string{"f", "-force"}, false, "Force") + noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") + ) + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + if *force { + v.Set("force", "1") + } + if *noprune { + v.Set("noprune", "1") + } + + var encounteredError error + for _, name := range cmd.Args() { + body, _, err := readBody(cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to remove one or more images") + } else { + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to remove one or more images") + continue + } + for _, out := range outs.Data { + if out.Get("Deleted") != "" { + fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted")) + } else { + fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged")) + } + } + } + } + return encounteredError +} + +func (cli *DockerCli) CmdHistory(args ...string) error { + cmd := cli.Subcmd("history", "[OPTIONS] IMAGE", "Show the history of an image") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false)) + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE") + } + + for _, out := range outs.Data { + outID := out.Get("Id") + if !*quiet { + if *noTrunc { + fmt.Fprintf(w, "%s\t", outID) + } else { + fmt.Fprintf(w, "%s\t", utils.TruncateID(outID)) + } + + fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) + + if *noTrunc { + fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) + } else { + fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) + } + fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("Size"))) + } else { + if *noTrunc { + fmt.Fprintln(w, outID) + } else { + fmt.Fprintln(w, utils.TruncateID(outID)) + } + } + } + w.Flush() + return nil +} + +func (cli *DockerCli) CmdRm(args ...string) error { + cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers") + v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated to the container") + link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container") + force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of running container") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + val := url.Values{} + if *v { + val.Set("v", "1") + } + if *link { + val.Set("link", "1") + } + if *force { + val.Set("force", "1") + } + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to remove one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +// 'docker kill NAME' kills a running container +func (cli *DockerCli) CmdKill(args ...string) error { + cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL, or specified signal)") + signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var encounteredError error + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to kill one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) CmdImport(args ...string) error { + cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var src, repository, tag string + + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n") + src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2) + } else { + src = cmd.Arg(0) + repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) + } + v := url.Values{} + + if repository != "" { + //Check if the given image name can be resolved + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + } + + v.Set("repo", repository) + v.Set("tag", tag) + v.Set("fromSrc", src) + + var in io.Reader + + if src == "-" { + in = cli.in + } + + return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil) +} + +func (cli *DockerCli) CmdPush(args ...string) error { + cmd := cli.Subcmd("push", "NAME", "Push an image or a repository to the registry") + if err := cmd.Parse(args); err != nil { + return nil + } + name := cmd.Arg(0) + + if name == "" { + cmd.Usage() + return nil + } + + cli.LoadConfigFile() + + // Resolve the Repository name from fqn to hostname + name + hostname, _, err := registry.ResolveRepositoryName(name) + if err != nil { + return err + } + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(hostname) + // If we're not using a custom registry, we know the restrictions + // applied to repository names and can warn the user in advance. + // Custom repositories can have different rules, and we must also + // allow pushing by image ID. + if len(strings.SplitN(name, "/", 2)) == 1 { + username := cli.configFile.Configs[registry.IndexServerAddress()].Username + if username == "" { + username = "" + } + return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository in / (ex: %s/%s)", username, name) + } + + v := url.Values{} + push := func(authConfig registry.AuthConfig) error { + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + + return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{ + "X-Registry-Auth": registryAuthHeader, + }) + } + + if err := push(authConfig); err != nil { + if strings.Contains(err.Error(), "Status 401") { + fmt.Fprintln(cli.out, "\nPlease login prior to push:") + if err := cli.CmdLogin(hostname); err != nil { + return err + } + authConfig := cli.configFile.ResolveAuthConfig(hostname) + return push(authConfig) + } + return err + } + return nil +} + +func (cli *DockerCli) CmdPull(args ...string) error { + cmd := cli.Subcmd("pull", "NAME[:TAG]", "Pull an image or a repository from the registry") + tag := cmd.String([]string{"#t", "#-tag"}, "", "Download tagged image in repository") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0)) + if *tag == "" { + *tag = parsedTag + } + + // Resolve the Repository name from fqn to hostname + name + hostname, _, err := registry.ResolveRepositoryName(remote) + if err != nil { + return err + } + + cli.LoadConfigFile() + + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(hostname) + v := url.Values{} + v.Set("fromImage", remote) + v.Set("tag", *tag) + + pull := func(authConfig registry.AuthConfig) error { + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + + return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{ + "X-Registry-Auth": registryAuthHeader, + }) + } + + if err := pull(authConfig); err != nil { + if strings.Contains(err.Error(), "Status 401") { + fmt.Fprintln(cli.out, "\nPlease login prior to pull:") + if err := cli.CmdLogin(hostname); err != nil { + return err + } + authConfig := cli.configFile.ResolveAuthConfig(hostname) + return pull(authConfig) + } + return err + } + + return nil +} + +func (cli *DockerCli) CmdImages(args ...string) error { + cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate images used to build)") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + flViz := cmd.Bool([]string{"v", "#viz", "-viz"}, false, "Output graph in graphviz format") + flTree := cmd.Bool([]string{"t", "#tree", "-tree"}, false, "Output graph in tree format") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() > 1 { + cmd.Usage() + return nil + } + + filter := cmd.Arg(0) + + if *flViz || *flTree { + body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false)) + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + var ( + printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string) + startImage *engine.Env + + roots = engine.NewTable("Created", outs.Len()) + byParent = make(map[string]*engine.Table) + ) + + for _, image := range outs.Data { + if image.Get("ParentId") == "" { + roots.Add(image) + } else { + if children, exists := byParent[image.Get("ParentId")]; exists { + children.Add(image) + } else { + byParent[image.Get("ParentId")] = engine.NewTable("Created", 1) + byParent[image.Get("ParentId")].Add(image) + } + } + + if filter != "" { + if filter == image.Get("Id") || filter == utils.TruncateID(image.Get("Id")) { + startImage = image + } + + for _, repotag := range image.GetList("RepoTags") { + if repotag == filter { + startImage = image + } + } + } + } + + if *flViz { + fmt.Fprintf(cli.out, "digraph docker {\n") + printNode = (*DockerCli).printVizNode + } else { + printNode = (*DockerCli).printTreeNode + } + + if startImage != nil { + root := engine.NewTable("Created", 1) + root.Add(startImage) + cli.WalkTree(*noTrunc, root, byParent, "", printNode) + } else if filter == "" { + cli.WalkTree(*noTrunc, roots, byParent, "", printNode) + } + if *flViz { + fmt.Fprintf(cli.out, " base [style=invisible]\n}\n") + } + } else { + v := url.Values{} + if cmd.NArg() == 1 { + v.Set("filter", filter) + } + if *all { + v.Set("all", "1") + } + + body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) + + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") + } + + for _, out := range outs.Data { + for _, repotag := range out.GetList("RepoTags") { + + repo, tag := utils.ParseRepositoryTag(repotag) + outID := out.Get("Id") + if !*noTrunc { + outID = utils.TruncateID(outID) + } + + if !*quiet { + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), utils.HumanSize(out.GetInt64("VirtualSize"))) + } else { + fmt.Fprintln(w, outID) + } + } + } + + if !*quiet { + w.Flush() + } + } + return nil +} + +func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) { + length := images.Len() + if length > 1 { + for index, image := range images.Data { + if index+1 == length { + printNode(cli, noTrunc, image, prefix+"└─") + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) + } + } else { + printNode(cli, noTrunc, image, prefix+"\u251C─") + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode) + } + } + } + } else { + for _, image := range images.Data { + printNode(cli, noTrunc, image, prefix+"└─") + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) + } + } + } +} + +func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) { + var ( + imageID string + parentID string + ) + if noTrunc { + imageID = image.Get("Id") + parentID = image.Get("ParentId") + } else { + imageID = utils.TruncateID(image.Get("Id")) + parentID = utils.TruncateID(image.Get("ParentId")) + } + if parentID == "" { + fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) + } else { + fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID) + } + if image.GetList("RepoTags")[0] != ":" { + fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", + imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n")) + } +} + +func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { + var imageID string + if noTrunc { + imageID = image.Get("Id") + } else { + imageID = utils.TruncateID(image.Get("Id")) + } + + fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.GetInt64("VirtualSize"))) + if image.GetList("RepoTags")[0] != ":" { + fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) + } else { + fmt.Fprint(cli.out, "\n") + } +} + +func (cli *DockerCli) CmdPs(args ...string) error { + cmd := cli.Subcmd("ps", "[OPTIONS]", "List containers") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + size := cmd.Bool([]string{"s", "-size"}, false, "Display sizes") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.") + since := cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show only containers created since Id or Name, include non-running ones.") + before := cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.") + last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.") + + if err := cmd.Parse(args); err != nil { + return nil + } + v := url.Values{} + if *last == -1 && *nLatest { + *last = 1 + } + if *all { + v.Set("all", "1") + } + if *last != -1 { + v.Set("limit", strconv.Itoa(*last)) + } + if *since != "" { + v.Set("since", *since) + } + if *before != "" { + v.Set("before", *before) + } + if *size { + v.Set("size", "1") + } + + body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false)) + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES") + if *size { + fmt.Fprintln(w, "\tSIZE") + } else { + fmt.Fprint(w, "\n") + } + } + + for _, out := range outs.Data { + var ( + outID = out.Get("Id") + outNames = out.GetList("Names") + ) + + if !*noTrunc { + outID = utils.TruncateID(outID) + } + + // Remove the leading / from the names + for i := 0; i < len(outNames); i++ { + outNames[i] = outNames[i][1:] + } + + if !*quiet { + var ( + outCommand = out.Get("Command") + ports = engine.NewTable("", 0) + ) + if !*noTrunc { + outCommand = utils.Trunc(outCommand, 20) + } + ports.ReadListFrom([]byte(out.Get("Ports"))) + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ",")) + if *size { + if out.GetInt("SizeRootFs") > 0 { + fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs"))) + } else { + fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("SizeRw"))) + } + } else { + fmt.Fprint(w, "\n") + } + } else { + fmt.Fprintln(w, outID) + } + } + + if !*quiet { + w.Flush() + } + return nil +} + +func (cli *DockerCli) CmdCommit(args ...string) error { + cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes") + flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") + flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith \"") + flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: --run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`) + if err := cmd.Parse(args); err != nil { + return nil + } + + var name, repository, tag string + + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n") + name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2) + } else { + name = cmd.Arg(0) + repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) + } + + if name == "" { + cmd.Usage() + return nil + } + + //Check if the given image name can be resolved + if repository != "" { + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + } + + v := url.Values{} + v.Set("container", name) + v.Set("repo", repository) + v.Set("tag", tag) + v.Set("comment", *flComment) + v.Set("author", *flAuthor) + var ( + config *runconfig.Config + env engine.Env + ) + if *flConfig != "" { + config = &runconfig.Config{} + if err := json.Unmarshal([]byte(*flConfig), config); err != nil { + return err + } + } + stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, false) + if err != nil { + return err + } + if err := env.Decode(stream); err != nil { + return err + } + + fmt.Fprintf(cli.out, "%s\n", env.Get("Id")) + return nil +} + +func (cli *DockerCli) CmdEvents(args ...string) error { + cmd := cli.Subcmd("events", "[OPTIONS]", "Get real time events from the server") + since := cmd.String([]string{"#since", "-since"}, "", "Show previously created events and then stream.") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 0 { + cmd.Usage() + return nil + } + + v := url.Values{} + if *since != "" { + loc := time.FixedZone(time.Now().Zone()) + format := "2006-01-02 15:04:05 -0700 MST" + if len(*since) < len(format) { + format = format[:len(*since)] + } + + if t, err := time.ParseInLocation(format, *since, loc); err == nil { + v.Set("since", strconv.FormatInt(t.Unix(), 10)) + } else { + v.Set("since", *since) + } + } + + if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdExport(args ...string) error { + cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdDiff(args ...string) error { + cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false)) + + if err != nil { + return err + } + + outs := engine.NewTable("", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + for _, change := range outs.Data { + var kind string + switch change.GetInt("Kind") { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path")) + } + return nil +} + +func (cli *DockerCli) CmdLogs(args ...string) error { + cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container") + follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + name := cmd.Arg(0) + body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) + if err != nil { + return err + } + + container := &api.Container{} + err = json.Unmarshal(body, container) + if err != nil { + return err + } + + v := url.Values{} + v.Set("logs", "1") + v.Set("stdout", "1") + v.Set("stderr", "1") + if *follow && container.State.Running { + v.Set("stream", "1") + } + + if err := cli.hijack("POST", "/containers/"+name+"/attach?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdAttach(args ...string) error { + cmd := cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container") + noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin") + proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + name := cmd.Arg(0) + body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) + if err != nil { + return err + } + + container := &api.Container{} + err = json.Unmarshal(body, container) + if err != nil { + return err + } + + if !container.State.Running { + return fmt.Errorf("You cannot attach to a stopped container, start it first") + } + + if container.Config.Tty && cli.isTerminal { + if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { + utils.Debugf("Error monitoring TTY size: %s", err) + } + } + + var in io.ReadCloser + + v := url.Values{} + v.Set("stream", "1") + if !*noStdin && container.Config.OpenStdin { + v.Set("stdin", "1") + in = cli.in + } + v.Set("stdout", "1") + v.Set("stderr", "1") + + if *proxy && !container.Config.Tty { + sigc := cli.forwardAllSignals(cmd.Arg(0)) + defer signal.StopCatch(sigc) + } + + if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil { + return err + } + + _, status, err := getExitCode(cli, cmd.Arg(0)) + if err != nil { + return err + } + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + + return nil +} + +func (cli *DockerCli) CmdSearch(args ...string) error { + cmd := cli.Subcmd("search", "TERM", "Search the docker index for images") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + trusted := cmd.Bool([]string{"t", "#trusted", "-trusted"}, false, "Only show trusted builds") + stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("term", cmd.Arg(0)) + + body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, true)) + + if err != nil { + return err + } + outs := engine.NewTable("star_count", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n") + for _, out := range outs.Data { + if (*trusted && !out.GetBool("is_trusted")) || (*stars > out.GetInt("star_count")) { + continue + } + desc := strings.Replace(out.Get("description"), "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !*noTrunc && len(desc) > 45 { + desc = utils.Trunc(desc, 42) + "..." + } + fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count")) + if out.GetBool("is_official") { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if out.GetBool("is_trusted") { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} + +// Ports type - Used to parse multiple -p flags +type ports []int + +func (cli *DockerCli) CmdTag(args ...string) error { + cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository") + force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 2 && cmd.NArg() != 3 { + cmd.Usage() + return nil + } + + var repository, tag string + + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]]\n") + repository, tag = cmd.Arg(1), cmd.Arg(2) + } else { + repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) + } + + v := url.Values{} + + //Check if the given image name can be resolved + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + v.Set("repo", repository) + v.Set("tag", tag) + + if *force { + v.Set("force", "1") + } + + if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdRun(args ...string) error { + // FIXME: just use runconfig.Parse already + config, hostConfig, cmd, err := runconfig.ParseSubcommand(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil) + if err != nil { + return err + } + if config.Image == "" { + cmd.Usage() + return nil + } + + // Retrieve relevant client-side config + var ( + flName = cmd.Lookup("name") + flRm = cmd.Lookup("rm") + flSigProxy = cmd.Lookup("sig-proxy") + autoRemove, _ = strconv.ParseBool(flRm.Value.String()) + sigProxy, _ = strconv.ParseBool(flSigProxy.Value.String()) + ) + + // Disable sigProxy in case on TTY + if config.Tty { + sigProxy = false + } + + var containerIDFile io.WriteCloser + if len(hostConfig.ContainerIDFile) > 0 { + if _, err := os.Stat(hostConfig.ContainerIDFile); err == nil { + return fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", hostConfig.ContainerIDFile) + } + if containerIDFile, err = os.Create(hostConfig.ContainerIDFile); err != nil { + return fmt.Errorf("Failed to create the container ID file: %s", err) + } + defer func() { + containerIDFile.Close() + var ( + cidFileInfo os.FileInfo + err error + ) + if cidFileInfo, err = os.Stat(hostConfig.ContainerIDFile); err != nil { + return + } + if cidFileInfo.Size() == 0 { + if err := os.Remove(hostConfig.ContainerIDFile); err != nil { + fmt.Printf("failed to remove CID file '%s': %s \n", hostConfig.ContainerIDFile, err) + } + } + }() + } + + containerValues := url.Values{} + if name := flName.Value.String(); name != "" { + containerValues.Set("name", name) + } + + //create the container + stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false) + //if image not found try to pull it + if statusCode == 404 { + fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) + + v := url.Values{} + repos, tag := utils.ParseRepositoryTag(config.Image) + v.Set("fromImage", repos) + v.Set("tag", tag) + + // Resolve the Repository name from fqn to hostname + name + hostname, _, err := registry.ResolveRepositoryName(repos) + if err != nil { + return err + } + + // Load the auth config file, to be able to pull the image + cli.LoadConfigFile() + + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(hostname) + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { + return err + } + if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false); err != nil { + return err + } + } else if err != nil { + return err + } + + var runResult engine.Env + if err := runResult.Decode(stream); err != nil { + return err + } + + for _, warning := range runResult.GetList("Warnings") { + fmt.Fprintf(cli.err, "WARNING: %s\n", warning) + } + + if len(hostConfig.ContainerIDFile) > 0 { + if _, err = containerIDFile.Write([]byte(runResult.Get("Id"))); err != nil { + return fmt.Errorf("Failed to write the container ID to the file: %s", err) + } + } + + if sigProxy { + sigc := cli.forwardAllSignals(runResult.Get("Id")) + defer signal.StopCatch(sigc) + } + + var ( + waitDisplayId chan struct{} + errCh chan error + ) + + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchrone in order to let the client write to stdin before having to read the ID + waitDisplayId = make(chan struct{}) + go func() { + defer close(waitDisplayId) + fmt.Fprintf(cli.out, "%s\n", runResult.Get("Id")) + }() + } + + // We need to instanciate the chan because the select needs it. It can + // be closed but can't be uninitialized. + hijacked := make(chan io.Closer) + + // Block the return until the chan gets closed + defer func() { + utils.Debugf("End of CmdRun(), Waiting for hijack to finish.") + if _, ok := <-hijacked; ok { + utils.Errorf("Hijack did not finish (chan still open)") + } + }() + + if config.AttachStdin || config.AttachStdout || config.AttachStderr { + var ( + out, stderr io.Writer + in io.ReadCloser + v = url.Values{} + ) + v.Set("stream", "1") + + if config.AttachStdin { + v.Set("stdin", "1") + in = cli.in + } + if config.AttachStdout { + v.Set("stdout", "1") + out = cli.out + } + if config.AttachStderr { + v.Set("stderr", "1") + if config.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + + errCh = utils.Go(func() error { + return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked) + }) + } else { + close(hijacked) + } + + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that hijack gets closed when returning. (result + // in closing hijack chan and freeing server's goroutines. + if closer != nil { + defer closer.Close() + } + case err := <-errCh: + if err != nil { + utils.Debugf("Error hijack: %s", err) + return err + } + } + + //start the container + if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil { + return err + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal { + if err := cli.monitorTtySize(runResult.Get("Id")); err != nil { + utils.Errorf("Error monitoring TTY size: %s\n", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + utils.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayId + return nil + } + + var status int + + // Attached mode + if autoRemove { + // Autoremove: wait for the container to finish, retrieve + // the exit code and remove the container + if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/wait", nil, false)); err != nil { + return err + } + if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { + return err + } + if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.Get("Id")+"?v=1", nil, false)); err != nil { + return err + } + } else { + if !config.Tty { + // In non-tty mode, we can't dettach, so we know we need to wait. + if status, err = waitForExit(cli, runResult.Get("Id")); err != nil { + return err + } + } else { + // In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call + // and result in a wrong exit code. + // No Autoremove: Simply retrieve the exit code + if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { + return err + } + } + } + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + return nil +} + +func (cli *DockerCli) CmdCp(args ...string) error { + cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 2 { + cmd.Usage() + return nil + } + + var copyData engine.Env + info := strings.Split(cmd.Arg(0), ":") + + if len(info) != 2 { + return fmt.Errorf("Error: Path not specified") + } + + copyData.Set("Resource", info[1]) + copyData.Set("HostPath", cmd.Arg(1)) + + stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false) + if stream != nil { + defer stream.Close() + } + if statusCode == 404 { + return fmt.Errorf("No such container: %v", info[0]) + } + if err != nil { + return err + } + + if statusCode == 200 { + if err := archive.Untar(stream, copyData.Get("HostPath"), nil); err != nil { + return err + } + } + return nil +} + +func (cli *DockerCli) CmdSave(args ...string) error { + cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout by default)") + outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT") + + if err := cmd.Parse(args); err != nil { + return err + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + var ( + output io.Writer = cli.out + err error + ) + if *outfile != "" { + output, err = os.Create(*outfile) + if err != nil { + return err + } + } + image := cmd.Arg(0) + if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdLoad(args ...string) error { + cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN") + infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") + + if err := cmd.Parse(args); err != nil { + return err + } + + if cmd.NArg() != 0 { + cmd.Usage() + return nil + } + + var ( + input io.Reader = cli.in + err error + ) + if *infile != "" { + input, err = os.Open(*infile) + if err != nil { + return err + } + } + if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) dial() (net.Conn, error) { + if cli.tlsConfig != nil && cli.proto != "unix" { + return tls.Dial(cli.proto, cli.addr, cli.tlsConfig) + } + return net.Dial(cli.proto, cli.addr) +} + +func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if env, ok := data.(engine.Env); ok { + if err := env.Encode(params); err != nil { + return nil, -1, err + } + } else { + buf, err := json.Marshal(data) + if err != nil { + return nil, -1, err + } + if _, err := params.Write(buf); err != nil { + return nil, -1, err + } + } + } + // fixme: refactor client to support redirect + re := regexp.MustCompile("/+") + path = re.ReplaceAllString(path, "/") + + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params) + if err != nil { + return nil, -1, err + } + if passAuthInfo { + cli.LoadConfigFile() + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(registry.IndexServerAddress()) + getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return nil, err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil + } + if headers, err := getHeaders(authConfig); err == nil && headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) + req.Host = cli.addr + if data != nil { + req.Header.Set("Content-Type", "application/json") + } else if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + dial, err := cli.dial() + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return nil, -1, ErrConnectionRefused + } + return nil, -1, err + } + clientconn := httputil.NewClientConn(dial, nil) + resp, err := clientconn.Do(req) + if err != nil { + clientconn.Close() + if strings.Contains(err.Error(), "connection refused") { + return nil, -1, ErrConnectionRefused + } + return nil, -1, err + } + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, -1, err + } + if len(body) == 0 { + return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL) + } + return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body)) + } + + wrapper := utils.NewReadCloserWrapper(resp.Body, func() error { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return clientconn.Close() + }) + return wrapper, resp.StatusCode, nil +} + +func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error { + if (method == "POST" || method == "PUT") && in == nil { + in = bytes.NewReader([]byte{}) + } + + // fixme: refactor client to support redirect + re := regexp.MustCompile("/+") + path = re.ReplaceAllString(path, "/") + + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in) + if err != nil { + return err + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) + req.Host = cli.addr + if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + + dial, err := cli.dial() + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + } + return err + } + clientconn := httputil.NewClientConn(dial, nil) + resp, err := clientconn.Do(req) + defer clientconn.Close() + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + } + return err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + if len(body) == 0 { + return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode)) + } + return fmt.Errorf("Error: %s", bytes.TrimSpace(body)) + } + + if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") { + return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal) + } + if _, err := io.Copy(out, resp.Body); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error { + defer func() { + if started != nil { + close(started) + } + }() + // fixme: refactor client to support redirect + re := regexp.MustCompile("/+") + path = re.ReplaceAllString(path, "/") + + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), nil) + if err != nil { + return err + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) + req.Header.Set("Content-Type", "plain/text") + req.Host = cli.addr + + dial, err := cli.dial() + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + } + return err + } + clientconn := httputil.NewClientConn(dial, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + clientconn.Do(req) + + rwc, br := clientconn.Hijack() + defer rwc.Close() + + if started != nil { + started <- rwc + } + + var receiveStdout chan error + + var oldState *term.State + + if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" { + oldState, err = term.SetRawTerminal(cli.terminalFd) + if err != nil { + return err + } + defer term.RestoreTerminal(cli.terminalFd, oldState) + } + + if stdout != nil || stderr != nil { + receiveStdout = utils.Go(func() (err error) { + defer func() { + if in != nil { + if setRawTerminal && cli.isTerminal { + term.RestoreTerminal(cli.terminalFd, oldState) + } + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if goruntime.GOOS != "darwin" { + in.Close() + } + } + }() + + // When TTY is ON, use regular copy + if setRawTerminal { + _, err = io.Copy(stdout, br) + } else { + _, err = utils.StdCopy(stdout, stderr, br) + } + utils.Debugf("[hijack] End of stdout") + return err + }) + } + + sendStdin := utils.Go(func() error { + if in != nil { + io.Copy(rwc, in) + utils.Debugf("[hijack] End of stdin") + } + if tcpc, ok := rwc.(*net.TCPConn); ok { + if err := tcpc.CloseWrite(); err != nil { + utils.Debugf("Couldn't send EOF: %s\n", err) + } + } else if unixc, ok := rwc.(*net.UnixConn); ok { + if err := unixc.CloseWrite(); err != nil { + utils.Debugf("Couldn't send EOF: %s\n", err) + } + } + // Discard errors due to pipe interruption + return nil + }) + + if stdout != nil || stderr != nil { + if err := <-receiveStdout; err != nil { + utils.Debugf("Error receiveStdout: %s", err) + return err + } + } + + if !cli.isTerminal { + if err := <-sendStdin; err != nil { + utils.Debugf("Error sendStdin: %s", err) + return err + } + } + return nil + +} + +func (cli *DockerCli) getTtySize() (int, int) { + if !cli.isTerminal { + return 0, 0 + } + ws, err := term.GetWinsize(cli.terminalFd) + if err != nil { + utils.Debugf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return int(ws.Height), int(ws.Width) +} + +func (cli *DockerCli) resizeTty(id string) { + height, width := cli.getTtySize() + if height == 0 && width == 0 { + return + } + v := url.Values{} + v.Set("h", strconv.Itoa(height)) + v.Set("w", strconv.Itoa(width)) + if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil { + utils.Debugf("Error resize: %s", err) + } +} + +func (cli *DockerCli) monitorTtySize(id string) error { + cli.resizeTty(id) + + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, syscall.SIGWINCH) + go func() { + for _ = range sigchan { + cli.resizeTty(id) + } + }() + return nil +} + +func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet { + flags := flag.NewFlagSet(name, flag.ContinueOnError) + flags.Usage = func() { + fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description) + flags.PrintDefaults() + os.Exit(2) + } + return flags +} + +func (cli *DockerCli) LoadConfigFile() (err error) { + cli.configFile, err = registry.LoadConfig(os.Getenv("HOME")) + if err != nil { + fmt.Fprintf(cli.err, "WARNING: %s\n", err) + } + return err +} + +func waitForExit(cli *DockerCli, containerId string) (int, error) { + stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false) + if err != nil { + return -1, err + } + + var out engine.Env + if err := out.Decode(stream); err != nil { + return -1, err + } + return out.GetInt("StatusCode"), nil +} + +// getExitCode perform an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { + body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false)) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != ErrConnectionRefused { + return false, -1, err + } + return false, -1, nil + } + c := &api.Container{} + if err := json.Unmarshal(body, c); err != nil { + return false, -1, err + } + return c.State.Running, c.State.ExitCode, nil +} + +func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) { + if stream != nil { + defer stream.Close() + } + if err != nil { + return nil, statusCode, err + } + body, err := ioutil.ReadAll(stream) + if err != nil { + return nil, -1, err + } + return body, statusCode, nil +} + +func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsConfig *tls.Config) *DockerCli { + var ( + isTerminal = false + terminalFd uintptr + ) + + if in != nil { + if file, ok := in.(*os.File); ok { + terminalFd = file.Fd() + isTerminal = term.IsTerminal(terminalFd) + } + } + + if err == nil { + err = out + } + return &DockerCli{ + proto: proto, + addr: addr, + in: in, + out: out, + err: err, + isTerminal: isTerminal, + terminalFd: terminalFd, + tlsConfig: tlsConfig, + } +} + +type DockerCli struct { + proto string + addr string + configFile *registry.ConfigFile + in io.ReadCloser + out io.Writer + err io.Writer + isTerminal bool + terminalFd uintptr + tlsConfig *tls.Config +} diff --git a/api/common.go b/api/common.go index 5e5d2c5767..7273e5c56d 100644 --- a/api/common.go +++ b/api/common.go @@ -23,7 +23,7 @@ func ValidateHost(val string) (string, error) { } //TODO remove, used on < 1.5 in getContainersJSON -func displayablePorts(ports *engine.Table) string { +func DisplayablePorts(ports *engine.Table) string { result := []string{} ports.SetKey("PublicPort") ports.Sort() diff --git a/api/server.go b/api/server.go deleted file mode 100644 index 29ea180030..0000000000 --- a/api/server.go +++ /dev/null @@ -1,1255 +0,0 @@ -package api - -import ( - "bufio" - "bytes" - "code.google.com/p/go.net/websocket" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "encoding/json" - "expvar" - "fmt" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/pkg/listenbuffer" - "github.com/dotcloud/docker/pkg/systemd" - "github.com/dotcloud/docker/pkg/user" - "github.com/dotcloud/docker/pkg/version" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/utils" - "github.com/gorilla/mux" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/http/pprof" - "os" - "strconv" - "strings" - "syscall" -) - -var ( - activationLock chan struct{} -) - -type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error - -func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { - conn, _, err := w.(http.Hijacker).Hijack() - if err != nil { - return nil, nil, err - } - // Flush the options to make sure the client sets the raw mode - conn.Write([]byte{}) - return conn, conn, nil -} - -//If we don't do this, POST method without Content-type (even with empty body) will fail -func parseForm(r *http.Request) error { - if r == nil { - return nil - } - if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -func parseMultipartForm(r *http.Request) error { - if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -func httpError(w http.ResponseWriter, err error) { - statusCode := http.StatusInternalServerError - // FIXME: this is brittle and should not be necessary. - // If we need to differentiate between different possible error types, we should - // create appropriate error types with clearly defined meaning. - if strings.Contains(err.Error(), "No such") { - statusCode = http.StatusNotFound - } else if strings.Contains(err.Error(), "Bad parameter") { - statusCode = http.StatusBadRequest - } else if strings.Contains(err.Error(), "Conflict") { - statusCode = http.StatusConflict - } else if strings.Contains(err.Error(), "Impossible") { - statusCode = http.StatusNotAcceptable - } else if strings.Contains(err.Error(), "Wrong login/password") { - statusCode = http.StatusUnauthorized - } else if strings.Contains(err.Error(), "hasn't been activated") { - statusCode = http.StatusForbidden - } - - if err != nil { - utils.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error()) - http.Error(w, err.Error(), statusCode) - } -} - -func writeJSON(w http.ResponseWriter, code int, v engine.Env) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - return v.Encode(w) -} - -func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { - w.Header().Set("Content-Type", "application/json") - if flush { - job.Stdout.Add(utils.NewWriteFlusher(w)) - } else { - job.Stdout.Add(w) - } -} - -func getBoolParam(value string) (bool, error) { - if value == "" { - return false, nil - } - ret, err := strconv.ParseBool(value) - if err != nil { - return false, fmt.Errorf("Bad parameter") - } - return ret, nil -} - -func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var ( - authConfig, err = ioutil.ReadAll(r.Body) - job = eng.Job("auth") - status string - ) - if err != nil { - return err - } - job.Setenv("authConfig", string(authConfig)) - job.Stdout.AddString(&status) - if err = job.Run(); err != nil { - return err - } - if status != "" { - var env engine.Env - env.Set("Status", status) - return writeJSON(w, http.StatusOK, env) - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.Header().Set("Content-Type", "application/json") - eng.ServeHTTP(w, r) - return nil -} - -func postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return err - } - job := eng.Job("kill", vars["name"]) - if sig := r.Form.Get("signal"); sig != "" { - job.Args = append(job.Args, sig) - } - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("export", vars["name"]) - job.Stdout.Add(w) - if err := job.Run(); err != nil { - return err - } - return nil -} - -func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - - var ( - err error - outs *engine.Table - job = eng.Job("images") - ) - - job.Setenv("filter", r.Form.Get("filter")) - job.Setenv("all", r.Form.Get("all")) - - if version.GreaterThanOrEqualTo("1.7") { - streamJSON(job, w, false) - } else if outs, err = job.Stdout.AddListTable(); err != nil { - return err - } - - if err := job.Run(); err != nil { - return err - } - - if version.LessThan("1.7") && outs != nil { // Convert to legacy format - outsLegacy := engine.NewTable("Created", 0) - for _, out := range outs.Data { - for _, repoTag := range out.GetList("RepoTags") { - parts := strings.Split(repoTag, ":") - outLegacy := &engine.Env{} - outLegacy.Set("Repository", parts[0]) - outLegacy.Set("Tag", parts[1]) - outLegacy.Set("Id", out.Get("Id")) - outLegacy.SetInt64("Created", out.GetInt64("Created")) - outLegacy.SetInt64("Size", out.GetInt64("Size")) - outLegacy.SetInt64("VirtualSize", out.GetInt64("VirtualSize")) - outsLegacy.Add(outLegacy) - } - } - w.Header().Set("Content-Type", "application/json") - if _, err := outsLegacy.WriteListTo(w); err != nil { - return err - } - } - return nil -} - -func getImagesViz(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if version.GreaterThan("1.6") { - w.WriteHeader(http.StatusNotFound) - return fmt.Errorf("This is now implemented in the client.") - } - eng.ServeHTTP(w, r) - return nil -} - -func getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.Header().Set("Content-Type", "application/json") - eng.ServeHTTP(w, r) - return nil -} - -func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - - var job = eng.Job("events", r.RemoteAddr) - streamJSON(job, w, true) - job.Setenv("since", r.Form.Get("since")) - return job.Run() -} - -func getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - var job = eng.Job("history", vars["name"]) - streamJSON(job, w, false) - - if err := job.Run(); err != nil { - return err - } - return nil -} - -func getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var job = eng.Job("changes", vars["name"]) - streamJSON(job, w, false) - - return job.Run() -} - -func getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if version.LessThan("1.4") { - return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.") - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return err - } - - job := eng.Job("top", vars["name"], r.Form.Get("ps_args")) - streamJSON(job, w, false) - return job.Run() -} - -func getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - var ( - err error - outs *engine.Table - job = eng.Job("containers") - ) - - job.Setenv("all", r.Form.Get("all")) - job.Setenv("size", r.Form.Get("size")) - job.Setenv("since", r.Form.Get("since")) - job.Setenv("before", r.Form.Get("before")) - job.Setenv("limit", r.Form.Get("limit")) - - if version.GreaterThanOrEqualTo("1.5") { - streamJSON(job, w, false) - } else if outs, err = job.Stdout.AddTable(); err != nil { - return err - } - if err = job.Run(); err != nil { - return err - } - if version.LessThan("1.5") { // Convert to legacy format - for _, out := range outs.Data { - ports := engine.NewTable("", 0) - ports.ReadListFrom([]byte(out.Get("Ports"))) - out.Set("Ports", displayablePorts(ports)) - } - w.Header().Set("Content-Type", "application/json") - if _, err = outs.WriteListTo(w); err != nil { - return err - } - } - return nil -} - -func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - job := eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag")) - job.Setenv("force", r.Form.Get("force")) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusCreated) - return nil -} - -func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - var ( - config engine.Env - env engine.Env - job = eng.Job("commit", r.Form.Get("container")) - ) - if err := config.Decode(r.Body); err != nil { - utils.Errorf("%s", err) - } - - job.Setenv("repo", r.Form.Get("repo")) - job.Setenv("tag", r.Form.Get("tag")) - job.Setenv("author", r.Form.Get("author")) - job.Setenv("comment", r.Form.Get("comment")) - job.SetenvSubEnv("config", &config) - - var id string - job.Stdout.AddString(&id) - if err := job.Run(); err != nil { - return err - } - env.Set("Id", id) - return writeJSON(w, http.StatusCreated, env) -} - -// Creates an image from Pull or from Import -func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - - var ( - image = r.Form.Get("fromImage") - tag = r.Form.Get("tag") - job *engine.Job - ) - authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := ®istry.AuthConfig{} - if authEncoded != "" { - authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = ®istry.AuthConfig{} - } - } - if image != "" { //pull - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - job = eng.Job("pull", r.Form.Get("fromImage"), tag) - job.SetenvBool("parallel", version.GreaterThan("1.3")) - job.SetenvJson("metaHeaders", metaHeaders) - job.SetenvJson("authConfig", authConfig) - } else { //import - job = eng.Job("import", r.Form.Get("fromSrc"), r.Form.Get("repo"), tag) - job.Stdin.Add(r.Body) - } - - if version.GreaterThan("1.0") { - job.SetenvBool("json", true) - streamJSON(job, w, true) - } else { - job.Stdout.Add(utils.NewWriteFlusher(w)) - } - if err := job.Run(); err != nil { - if !job.Stdout.Used() { - return err - } - sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) - w.Write(sf.FormatError(err)) - } - - return nil -} - -func getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - var ( - authEncoded = r.Header.Get("X-Registry-Auth") - authConfig = ®istry.AuthConfig{} - metaHeaders = map[string][]string{} - ) - - if authEncoded != "" { - authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { - // for a search it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = ®istry.AuthConfig{} - } - } - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - - var job = eng.Job("search", r.Form.Get("term")) - job.SetenvJson("metaHeaders", metaHeaders) - job.SetenvJson("authConfig", authConfig) - streamJSON(job, w, false) - - return job.Run() -} - -func postImagesInsert(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("insert", vars["name"], r.Form.Get("url"), r.Form.Get("path")) - if version.GreaterThan("1.0") { - job.SetenvBool("json", true) - streamJSON(job, w, false) - } else { - job.Stdout.Add(w) - } - if err := job.Run(); err != nil { - if !job.Stdout.Used() { - return err - } - sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) - w.Write(sf.FormatError(err)) - } - - return nil -} - -func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - if err := parseForm(r); err != nil { - return err - } - authConfig := ®istry.AuthConfig{} - - authEncoded := r.Header.Get("X-Registry-Auth") - if authEncoded != "" { - // the new format is to handle the authConfig as a header - authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { - // to increase compatibility to existing api it is defaulting to be empty - authConfig = ®istry.AuthConfig{} - } - } else { - // the old format is supported for compatibility if there was no authConfig header - if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { - return err - } - } - - job := eng.Job("push", vars["name"]) - job.SetenvJson("metaHeaders", metaHeaders) - job.SetenvJson("authConfig", authConfig) - if version.GreaterThan("1.0") { - job.SetenvBool("json", true) - streamJSON(job, w, true) - } else { - job.Stdout.Add(utils.NewWriteFlusher(w)) - } - - if err := job.Run(); err != nil { - if !job.Stdout.Used() { - return err - } - sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) - w.Write(sf.FormatError(err)) - } - return nil -} - -func getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if version.GreaterThan("1.0") { - w.Header().Set("Content-Type", "application/x-tar") - } - job := eng.Job("image_export", vars["name"]) - job.Stdout.Add(w) - return job.Run() -} - -func postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - job := eng.Job("load") - job.Stdin.Add(r.Body) - return job.Run() -} - -func postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return nil - } - var ( - out engine.Env - job = eng.Job("create", r.Form.Get("name")) - outWarnings []string - outId string - warnings = bytes.NewBuffer(nil) - ) - if err := job.DecodeEnv(r.Body); err != nil { - return err - } - // Read container ID from the first line of stdout - job.Stdout.AddString(&outId) - // Read warnings from stderr - job.Stderr.Add(warnings) - if err := job.Run(); err != nil { - return err - } - // Parse warnings from stderr - scanner := bufio.NewScanner(warnings) - for scanner.Scan() { - outWarnings = append(outWarnings, scanner.Text()) - } - out.Set("Id", outId) - out.SetList("Warnings", outWarnings) - return writeJSON(w, http.StatusCreated, out) -} - -func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("restart", vars["name"]) - job.Setenv("t", r.Form.Get("t")) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("container_delete", vars["name"]) - job.Setenv("removeVolume", r.Form.Get("v")) - job.Setenv("removeLink", r.Form.Get("link")) - job.Setenv("forceRemove", r.Form.Get("force")) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var job = eng.Job("image_delete", vars["name"]) - streamJSON(job, w, false) - job.Setenv("force", r.Form.Get("force")) - job.Setenv("noprune", r.Form.Get("noprune")) - - return job.Run() -} - -func postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - name := vars["name"] - job := eng.Job("start", name) - // allow a nil body for backwards compatibility - if r.Body != nil { - if MatchesContentType(r.Header.Get("Content-Type"), "application/json") { - if err := job.DecodeEnv(r.Body); err != nil { - return err - } - } - } - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("stop", vars["name"]) - job.Setenv("t", r.Form.Get("t")) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var ( - env engine.Env - status string - job = eng.Job("wait", vars["name"]) - ) - job.Stdout.AddString(&status) - if err := job.Run(); err != nil { - return err - } - // Parse a 16-bit encoded integer to map typical unix exit status. - _, err := strconv.ParseInt(status, 10, 16) - if err != nil { - return err - } - env.Set("StatusCode", status) - return writeJSON(w, http.StatusOK, env) -} - -func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { - return err - } - return nil -} - -func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - var ( - job = eng.Job("inspect", vars["name"], "container") - c, err = job.Stdout.AddEnv() - ) - if err != nil { - return err - } - if err = job.Run(); err != nil { - return err - } - - inStream, outStream, err := hijackServer(w) - if err != nil { - return err - } - defer func() { - if tcpc, ok := inStream.(*net.TCPConn); ok { - tcpc.CloseWrite() - } else { - inStream.Close() - } - }() - defer func() { - if tcpc, ok := outStream.(*net.TCPConn); ok { - tcpc.CloseWrite() - } else if closer, ok := outStream.(io.Closer); ok { - closer.Close() - } - }() - - var errStream io.Writer - - fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - - if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { - errStream = utils.NewStdWriter(outStream, utils.Stderr) - outStream = utils.NewStdWriter(outStream, utils.Stdout) - } else { - errStream = outStream - } - - job = eng.Job("attach", vars["name"]) - job.Setenv("logs", r.Form.Get("logs")) - job.Setenv("stream", r.Form.Get("stream")) - job.Setenv("stdin", r.Form.Get("stdin")) - job.Setenv("stdout", r.Form.Get("stdout")) - job.Setenv("stderr", r.Form.Get("stderr")) - job.Stdin.Add(inStream) - job.Stdout.Add(outStream) - job.Stderr.Set(errStream) - if err := job.Run(); err != nil { - fmt.Fprintf(outStream, "Error: %s\n", err) - - } - return nil -} - -func wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - if err := eng.Job("inspect", vars["name"], "container").Run(); err != nil { - return err - } - - h := websocket.Handler(func(ws *websocket.Conn) { - defer ws.Close() - job := eng.Job("attach", vars["name"]) - job.Setenv("logs", r.Form.Get("logs")) - job.Setenv("stream", r.Form.Get("stream")) - job.Setenv("stdin", r.Form.Get("stdin")) - job.Setenv("stdout", r.Form.Get("stdout")) - job.Setenv("stderr", r.Form.Get("stderr")) - job.Stdin.Add(ws) - job.Stdout.Add(ws) - job.Stderr.Set(ws) - if err := job.Run(); err != nil { - utils.Errorf("Error: %s", err) - } - }) - h.ServeHTTP(w, r) - - return nil -} - -func getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var job = eng.Job("inspect", vars["name"], "container") - streamJSON(job, w, false) - job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job - return job.Run() -} - -func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var job = eng.Job("inspect", vars["name"], "image") - streamJSON(job, w, false) - job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job - return job.Run() -} - -func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if version.LessThan("1.3") { - return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") - } - var ( - authEncoded = r.Header.Get("X-Registry-Auth") - authConfig = ®istry.AuthConfig{} - configFileEncoded = r.Header.Get("X-Registry-Config") - configFile = ®istry.ConfigFile{} - job = eng.Job("build") - ) - - // This block can be removed when API versions prior to 1.9 are deprecated. - // Both headers will be parsed and sent along to the daemon, but if a non-empty - // ConfigFile is present, any value provided as an AuthConfig directly will - // be overridden. See BuildFile::CmdFrom for details. - if version.LessThan("1.9") && authEncoded != "" { - authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = ®istry.AuthConfig{} - } - } - - if configFileEncoded != "" { - configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) - if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - configFile = ®istry.ConfigFile{} - } - } - - if version.GreaterThanOrEqualTo("1.8") { - job.SetenvBool("json", true) - streamJSON(job, w, true) - } else { - job.Stdout.Add(utils.NewWriteFlusher(w)) - } - job.Stdin.Add(r.Body) - job.Setenv("remote", r.FormValue("remote")) - job.Setenv("t", r.FormValue("t")) - job.Setenv("q", r.FormValue("q")) - job.Setenv("nocache", r.FormValue("nocache")) - job.Setenv("rm", r.FormValue("rm")) - job.SetenvJson("authConfig", authConfig) - job.SetenvJson("configFile", configFile) - - if err := job.Run(); err != nil { - if !job.Stdout.Used() { - return err - } - sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8")) - w.Write(sf.FormatError(err)) - } - return nil -} - -func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - var copyData engine.Env - - if contentType := r.Header.Get("Content-Type"); MatchesContentType(contentType, "application/json") { - if err := copyData.Decode(r.Body); err != nil { - return err - } - } else { - return fmt.Errorf("Content-Type not supported: %s", contentType) - } - - if copyData.Get("Resource") == "" { - return fmt.Errorf("Path cannot be empty") - } - - origResource := copyData.Get("Resource") - - if copyData.Get("Resource")[0] == '/' { - copyData.Set("Resource", copyData.Get("Resource")[1:]) - } - - job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) - job.Stdout.Add(w) - if err := job.Run(); err != nil { - utils.Errorf("%s", err.Error()) - if strings.Contains(err.Error(), "No such container") { - w.WriteHeader(http.StatusNotFound) - } else if strings.Contains(err.Error(), "no such file or directory") { - return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) - } - } - return nil -} - -func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.WriteHeader(http.StatusOK) - return nil -} -func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Access-Control-Allow-Origin", "*") - w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") - w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") -} - -func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - // log the request - utils.Debugf("Calling %s %s", localMethod, localRoute) - - if logging { - log.Println(r.Method, r.RequestURI) - } - - if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { - userAgent := strings.Split(r.Header.Get("User-Agent"), "/") - if len(userAgent) == 2 && !dockerVersion.Equal(userAgent[1]) { - utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) - } - } - version := version.Version(mux.Vars(r)["version"]) - if version == "" { - version = APIVERSION - } - if enableCors { - writeCorsHeaders(w, r) - } - - if version.GreaterThan(APIVERSION) { - http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, APIVERSION).Error(), http.StatusNotFound) - return - } - - if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { - utils.Errorf("Error: %s", err) - httpError(w, err) - } - } -} - -// Replicated from expvar.go as not public. -func expvarHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} - -func AttachProfiler(router *mux.Router) { - router.HandleFunc("/debug/vars", expvarHandler) - router.HandleFunc("/debug/pprof/", pprof.Index) - router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) - router.HandleFunc("/debug/pprof/profile", pprof.Profile) - router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP) - router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) - router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) -} - -func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion string) (*mux.Router, error) { - r := mux.NewRouter() - if os.Getenv("DEBUG") != "" { - AttachProfiler(r) - } - m := map[string]map[string]HttpApiFunc{ - "GET": { - "/events": getEvents, - "/info": getInfo, - "/version": getVersion, - "/images/json": getImagesJSON, - "/images/viz": getImagesViz, - "/images/search": getImagesSearch, - "/images/{name:.*}/get": getImagesGet, - "/images/{name:.*}/history": getImagesHistory, - "/images/{name:.*}/json": getImagesByName, - "/containers/ps": getContainersJSON, - "/containers/json": getContainersJSON, - "/containers/{name:.*}/export": getContainersExport, - "/containers/{name:.*}/changes": getContainersChanges, - "/containers/{name:.*}/json": getContainersByName, - "/containers/{name:.*}/top": getContainersTop, - "/containers/{name:.*}/attach/ws": wsContainersAttach, - }, - "POST": { - "/auth": postAuth, - "/commit": postCommit, - "/build": postBuild, - "/images/create": postImagesCreate, - "/images/{name:.*}/insert": postImagesInsert, - "/images/load": postImagesLoad, - "/images/{name:.*}/push": postImagesPush, - "/images/{name:.*}/tag": postImagesTag, - "/containers/create": postContainersCreate, - "/containers/{name:.*}/kill": postContainersKill, - "/containers/{name:.*}/restart": postContainersRestart, - "/containers/{name:.*}/start": postContainersStart, - "/containers/{name:.*}/stop": postContainersStop, - "/containers/{name:.*}/wait": postContainersWait, - "/containers/{name:.*}/resize": postContainersResize, - "/containers/{name:.*}/attach": postContainersAttach, - "/containers/{name:.*}/copy": postContainersCopy, - }, - "DELETE": { - "/containers/{name:.*}": deleteContainers, - "/images/{name:.*}": deleteImages, - }, - "OPTIONS": { - "": optionsHandler, - }, - } - - for method, routes := range m { - for route, fct := range routes { - utils.Debugf("Registering %s, %s", method, route) - // NOTE: scope issue, make sure the variables are local and won't be changed - localRoute := route - localFct := fct - localMethod := method - - // build the handler function - f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, enableCors, version.Version(dockerVersion)) - - // add the new route - if localRoute == "" { - r.Methods(localMethod).HandlerFunc(f) - } else { - r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f) - r.Path(localRoute).Methods(localMethod).HandlerFunc(f) - } - } - } - - return r, nil -} - -// ServeRequest processes a single http request to the docker remote api. -// FIXME: refactor this to be part of Server and not require re-creating a new -// router each time. This requires first moving ListenAndServe into Server. -func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) error { - router, err := createRouter(eng, false, true, "") - if err != nil { - return err - } - // Insert APIVERSION into the request as a convenience - req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path) - router.ServeHTTP(w, req) - return nil -} - -// ServeFD creates an http.Server and sets it up to serve given a socket activated -// argument. -func ServeFd(addr string, handle http.Handler) error { - ls, e := systemd.ListenFD(addr) - if e != nil { - return e - } - - chErrors := make(chan error, len(ls)) - - // We don't want to start serving on these sockets until the - // "initserver" job has completed. Otherwise required handlers - // won't be ready. - <-activationLock - - // Since ListenFD will return one or more sockets we have - // to create a go func to spawn off multiple serves - for i := range ls { - listener := ls[i] - go func() { - httpSrv := http.Server{Handler: handle} - chErrors <- httpSrv.Serve(listener) - }() - } - - for i := 0; i < len(ls); i += 1 { - err := <-chErrors - if err != nil { - return err - } - } - - return nil -} - -func lookupGidByName(nameOrGid string) (int, error) { - groups, err := user.ParseGroupFilter(func(g *user.Group) bool { - return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid - }) - if err != nil { - return -1, err - } - if groups != nil && len(groups) > 0 { - return groups[0].Gid, nil - } - return -1, fmt.Errorf("Group %s not found", nameOrGid) -} - -func changeGroup(addr string, nameOrGid string) error { - gid, err := lookupGidByName(nameOrGid) - if err != nil { - return err - } - - utils.Debugf("%s group found. gid: %d", nameOrGid, gid) - return os.Chown(addr, 0, gid) -} - -// ListenAndServe sets up the required http.Server and gets it listening for -// each addr passed in and does protocol specific checking. -func ListenAndServe(proto, addr string, job *engine.Job) error { - r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) - if err != nil { - return err - } - - if proto == "fd" { - return ServeFd(addr, r) - } - - if proto == "unix" { - if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { - return err - } - } - - l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock) - if err != nil { - return err - } - - if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) { - tlsCert := job.Getenv("TlsCert") - tlsKey := job.Getenv("TlsKey") - cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) - if err != nil { - return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", - tlsCert, tlsKey, err) - } - tlsConfig := &tls.Config{ - NextProtos: []string{"http/1.1"}, - Certificates: []tls.Certificate{cert}, - } - if job.GetenvBool("TlsVerify") { - certPool := x509.NewCertPool() - file, err := ioutil.ReadFile(job.Getenv("TlsCa")) - if err != nil { - return fmt.Errorf("Couldn't read CA certificate: %s", err) - } - certPool.AppendCertsFromPEM(file) - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = certPool - } - l = tls.NewListener(l, tlsConfig) - } - - // Basic error and sanity checking - switch proto { - case "tcp": - if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { - log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") - } - case "unix": - if err := os.Chmod(addr, 0660); err != nil { - return err - } - socketGroup := job.Getenv("SocketGroup") - if socketGroup != "" { - if err := changeGroup(addr, socketGroup); err != nil { - if socketGroup == "docker" { - // if the user hasn't explicitly specified the group ownership, don't fail on errors. - utils.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error()) - } else { - return err - } - } - } - default: - return fmt.Errorf("Invalid protocol format.") - } - - httpSrv := http.Server{Addr: addr, Handler: r} - return httpSrv.Serve(l) -} - -// ServeApi loops through all of the protocols sent in to docker and spawns -// off a go routine to setup a serving http.Server for each. -func ServeApi(job *engine.Job) engine.Status { - var ( - protoAddrs = job.Args - chErrors = make(chan error, len(protoAddrs)) - ) - activationLock = make(chan struct{}) - - if err := job.Eng.Register("acceptconnections", AcceptConnections); err != nil { - return job.Error(err) - } - - for _, protoAddr := range protoAddrs { - protoAddrParts := strings.SplitN(protoAddr, "://", 2) - go func() { - log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1]) - chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) - }() - } - - for i := 0; i < len(protoAddrs); i += 1 { - err := <-chErrors - if err != nil { - return job.Error(err) - } - } - - return engine.StatusOK -} - -func AcceptConnections(job *engine.Job) engine.Status { - // Tell the init daemon we are accepting requests - go systemd.SdNotify("READY=1") - - // close the lock so the listeners start accepting connections - close(activationLock) - - return engine.StatusOK -} diff --git a/api/server/server.go b/api/server/server.go new file mode 100644 index 0000000000..18aefe42cd --- /dev/null +++ b/api/server/server.go @@ -0,0 +1,1257 @@ +package server + +import ( + "bufio" + "bytes" + "code.google.com/p/go.net/websocket" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "expvar" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/pprof" + "os" + "strconv" + "strings" + "syscall" + + "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/pkg/listenbuffer" + "github.com/dotcloud/docker/pkg/systemd" + "github.com/dotcloud/docker/pkg/user" + "github.com/dotcloud/docker/pkg/version" + "github.com/dotcloud/docker/registry" + "github.com/dotcloud/docker/utils" + "github.com/gorilla/mux" +) + +var ( + activationLock chan struct{} +) + +type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error + +func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, err + } + // Flush the options to make sure the client sets the raw mode + conn.Write([]byte{}) + return conn, conn, nil +} + +//If we don't do this, POST method without Content-type (even with empty body) will fail +func parseForm(r *http.Request) error { + if r == nil { + return nil + } + if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +func parseMultipartForm(r *http.Request) error { + if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { + return err + } + return nil +} + +func httpError(w http.ResponseWriter, err error) { + statusCode := http.StatusInternalServerError + // FIXME: this is brittle and should not be necessary. + // If we need to differentiate between different possible error types, we should + // create appropriate error types with clearly defined meaning. + if strings.Contains(err.Error(), "No such") { + statusCode = http.StatusNotFound + } else if strings.Contains(err.Error(), "Bad parameter") { + statusCode = http.StatusBadRequest + } else if strings.Contains(err.Error(), "Conflict") { + statusCode = http.StatusConflict + } else if strings.Contains(err.Error(), "Impossible") { + statusCode = http.StatusNotAcceptable + } else if strings.Contains(err.Error(), "Wrong login/password") { + statusCode = http.StatusUnauthorized + } else if strings.Contains(err.Error(), "hasn't been activated") { + statusCode = http.StatusForbidden + } + + if err != nil { + utils.Errorf("HTTP Error: statusCode=%d %s", statusCode, err.Error()) + http.Error(w, err.Error(), statusCode) + } +} + +func writeJSON(w http.ResponseWriter, code int, v engine.Env) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + return v.Encode(w) +} + +func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { + w.Header().Set("Content-Type", "application/json") + if flush { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } else { + job.Stdout.Add(w) + } +} + +func getBoolParam(value string) (bool, error) { + if value == "" { + return false, nil + } + ret, err := strconv.ParseBool(value) + if err != nil { + return false, fmt.Errorf("Bad parameter") + } + return ret, nil +} + +func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var ( + authConfig, err = ioutil.ReadAll(r.Body) + job = eng.Job("auth") + status string + ) + if err != nil { + return err + } + job.Setenv("authConfig", string(authConfig)) + job.Stdout.AddString(&status) + if err = job.Run(); err != nil { + return err + } + if status != "" { + var env engine.Env + env.Set("Status", status) + return writeJSON(w, http.StatusOK, env) + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Content-Type", "application/json") + eng.ServeHTTP(w, r) + return nil +} + +func postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + job := eng.Job("kill", vars["name"]) + if sig := r.Form.Get("signal"); sig != "" { + job.Args = append(job.Args, sig) + } + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("export", vars["name"]) + job.Stdout.Add(w) + if err := job.Run(); err != nil { + return err + } + return nil +} + +func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var ( + err error + outs *engine.Table + job = eng.Job("images") + ) + + job.Setenv("filter", r.Form.Get("filter")) + job.Setenv("all", r.Form.Get("all")) + + if version.GreaterThanOrEqualTo("1.7") { + streamJSON(job, w, false) + } else if outs, err = job.Stdout.AddListTable(); err != nil { + return err + } + + if err := job.Run(); err != nil { + return err + } + + if version.LessThan("1.7") && outs != nil { // Convert to legacy format + outsLegacy := engine.NewTable("Created", 0) + for _, out := range outs.Data { + for _, repoTag := range out.GetList("RepoTags") { + parts := strings.Split(repoTag, ":") + outLegacy := &engine.Env{} + outLegacy.Set("Repository", parts[0]) + outLegacy.Set("Tag", parts[1]) + outLegacy.Set("Id", out.Get("Id")) + outLegacy.SetInt64("Created", out.GetInt64("Created")) + outLegacy.SetInt64("Size", out.GetInt64("Size")) + outLegacy.SetInt64("VirtualSize", out.GetInt64("VirtualSize")) + outsLegacy.Add(outLegacy) + } + } + w.Header().Set("Content-Type", "application/json") + if _, err := outsLegacy.WriteListTo(w); err != nil { + return err + } + } + return nil +} + +func getImagesViz(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if version.GreaterThan("1.6") { + w.WriteHeader(http.StatusNotFound) + return fmt.Errorf("This is now implemented in the client.") + } + eng.ServeHTTP(w, r) + return nil +} + +func getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Content-Type", "application/json") + eng.ServeHTTP(w, r) + return nil +} + +func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var job = eng.Job("events", r.RemoteAddr) + streamJSON(job, w, true) + job.Setenv("since", r.Form.Get("since")) + return job.Run() +} + +func getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var job = eng.Job("history", vars["name"]) + streamJSON(job, w, false) + + if err := job.Run(); err != nil { + return err + } + return nil +} + +func getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("changes", vars["name"]) + streamJSON(job, w, false) + + return job.Run() +} + +func getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if version.LessThan("1.4") { + return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.") + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + job := eng.Job("top", vars["name"], r.Form.Get("ps_args")) + streamJSON(job, w, false) + return job.Run() +} + +func getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + err error + outs *engine.Table + job = eng.Job("containers") + ) + + job.Setenv("all", r.Form.Get("all")) + job.Setenv("size", r.Form.Get("size")) + job.Setenv("since", r.Form.Get("since")) + job.Setenv("before", r.Form.Get("before")) + job.Setenv("limit", r.Form.Get("limit")) + + if version.GreaterThanOrEqualTo("1.5") { + streamJSON(job, w, false) + } else if outs, err = job.Stdout.AddTable(); err != nil { + return err + } + if err = job.Run(); err != nil { + return err + } + if version.LessThan("1.5") { // Convert to legacy format + for _, out := range outs.Data { + ports := engine.NewTable("", 0) + ports.ReadListFrom([]byte(out.Get("Ports"))) + out.Set("Ports", api.DisplayablePorts(ports)) + } + w.Header().Set("Content-Type", "application/json") + if _, err = outs.WriteListTo(w); err != nil { + return err + } + } + return nil +} + +func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + job := eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag")) + job.Setenv("force", r.Form.Get("force")) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusCreated) + return nil +} + +func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + config engine.Env + env engine.Env + job = eng.Job("commit", r.Form.Get("container")) + ) + if err := config.Decode(r.Body); err != nil { + utils.Errorf("%s", err) + } + + job.Setenv("repo", r.Form.Get("repo")) + job.Setenv("tag", r.Form.Get("tag")) + job.Setenv("author", r.Form.Get("author")) + job.Setenv("comment", r.Form.Get("comment")) + job.SetenvSubEnv("config", &config) + + var id string + job.Stdout.AddString(&id) + if err := job.Run(); err != nil { + return err + } + env.Set("Id", id) + return writeJSON(w, http.StatusCreated, env) +} + +// Creates an image from Pull or from Import +func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + + var ( + image = r.Form.Get("fromImage") + tag = r.Form.Get("tag") + job *engine.Job + ) + authEncoded := r.Header.Get("X-Registry-Auth") + authConfig := ®istry.AuthConfig{} + if authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } + if image != "" { //pull + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + job = eng.Job("pull", r.Form.Get("fromImage"), tag) + job.SetenvBool("parallel", version.GreaterThan("1.3")) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + } else { //import + job = eng.Job("import", r.Form.Get("fromSrc"), r.Form.Get("repo"), tag) + job.Stdin.Add(r.Body) + } + + if version.GreaterThan("1.0") { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err + } + sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) + w.Write(sf.FormatError(err)) + } + + return nil +} + +func getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + var ( + authEncoded = r.Header.Get("X-Registry-Auth") + authConfig = ®istry.AuthConfig{} + metaHeaders = map[string][]string{} + ) + + if authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a search it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + + var job = eng.Job("search", r.Form.Get("term")) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + streamJSON(job, w, false) + + return job.Run() +} + +func postImagesInsert(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("insert", vars["name"], r.Form.Get("url"), r.Form.Get("path")) + if version.GreaterThan("1.0") { + job.SetenvBool("json", true) + streamJSON(job, w, false) + } else { + job.Stdout.Add(w) + } + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err + } + sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) + w.Write(sf.FormatError(err)) + } + + return nil +} + +func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + metaHeaders := map[string][]string{} + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Meta-") { + metaHeaders[k] = v + } + } + if err := parseForm(r); err != nil { + return err + } + authConfig := ®istry.AuthConfig{} + + authEncoded := r.Header.Get("X-Registry-Auth") + if authEncoded != "" { + // the new format is to handle the authConfig as a header + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // to increase compatibility to existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } else { + // the old format is supported for compatibility if there was no authConfig header + if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { + return err + } + } + + job := eng.Job("push", vars["name"]) + job.SetenvJson("metaHeaders", metaHeaders) + job.SetenvJson("authConfig", authConfig) + if version.GreaterThan("1.0") { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } + + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err + } + sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) + w.Write(sf.FormatError(err)) + } + return nil +} + +func getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if version.GreaterThan("1.0") { + w.Header().Set("Content-Type", "application/x-tar") + } + job := eng.Job("image_export", vars["name"]) + job.Stdout.Add(w) + return job.Run() +} + +func postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + job := eng.Job("load") + job.Stdin.Add(r.Body) + return job.Run() +} + +func postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return nil + } + var ( + out engine.Env + job = eng.Job("create", r.Form.Get("name")) + outWarnings []string + outId string + warnings = bytes.NewBuffer(nil) + ) + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + // Read container ID from the first line of stdout + job.Stdout.AddString(&outId) + // Read warnings from stderr + job.Stderr.Add(warnings) + if err := job.Run(); err != nil { + return err + } + // Parse warnings from stderr + scanner := bufio.NewScanner(warnings) + for scanner.Scan() { + outWarnings = append(outWarnings, scanner.Text()) + } + out.Set("Id", outId) + out.SetList("Warnings", outWarnings) + return writeJSON(w, http.StatusCreated, out) +} + +func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("restart", vars["name"]) + job.Setenv("t", r.Form.Get("t")) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("container_delete", vars["name"]) + job.Setenv("removeVolume", r.Form.Get("v")) + job.Setenv("removeLink", r.Form.Get("link")) + job.Setenv("forceRemove", r.Form.Get("force")) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("image_delete", vars["name"]) + streamJSON(job, w, false) + job.Setenv("force", r.Form.Get("force")) + job.Setenv("noprune", r.Form.Get("noprune")) + + return job.Run() +} + +func postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + name := vars["name"] + job := eng.Job("start", name) + // allow a nil body for backwards compatibility + if r.Body != nil { + if api.MatchesContentType(r.Header.Get("Content-Type"), "application/json") { + if err := job.DecodeEnv(r.Body); err != nil { + return err + } + } + } + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + job := eng.Job("stop", vars["name"]) + job.Setenv("t", r.Form.Get("t")) + if err := job.Run(); err != nil { + return err + } + w.WriteHeader(http.StatusNoContent) + return nil +} + +func postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var ( + env engine.Env + status string + job = eng.Job("wait", vars["name"]) + ) + job.Stdout.AddString(&status) + if err := job.Run(); err != nil { + return err + } + // Parse a 16-bit encoded integer to map typical unix exit status. + _, err := strconv.ParseInt(status, 10, 16) + if err != nil { + return err + } + env.Set("StatusCode", status) + return writeJSON(w, http.StatusOK, env) +} + +func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { + return err + } + return nil +} + +func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var ( + job = eng.Job("inspect", vars["name"], "container") + c, err = job.Stdout.AddEnv() + ) + if err != nil { + return err + } + if err = job.Run(); err != nil { + return err + } + + inStream, outStream, err := hijackServer(w) + if err != nil { + return err + } + defer func() { + if tcpc, ok := inStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else { + inStream.Close() + } + }() + defer func() { + if tcpc, ok := outStream.(*net.TCPConn); ok { + tcpc.CloseWrite() + } else if closer, ok := outStream.(io.Closer); ok { + closer.Close() + } + }() + + var errStream io.Writer + + fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + + if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { + errStream = utils.NewStdWriter(outStream, utils.Stderr) + outStream = utils.NewStdWriter(outStream, utils.Stdout) + } else { + errStream = outStream + } + + job = eng.Job("attach", vars["name"]) + job.Setenv("logs", r.Form.Get("logs")) + job.Setenv("stream", r.Form.Get("stream")) + job.Setenv("stdin", r.Form.Get("stdin")) + job.Setenv("stdout", r.Form.Get("stdout")) + job.Setenv("stderr", r.Form.Get("stderr")) + job.Stdin.Add(inStream) + job.Stdout.Add(outStream) + job.Stderr.Set(errStream) + if err := job.Run(); err != nil { + fmt.Fprintf(outStream, "Error: %s\n", err) + + } + return nil +} + +func wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { + return err + } + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + if err := eng.Job("inspect", vars["name"], "container").Run(); err != nil { + return err + } + + h := websocket.Handler(func(ws *websocket.Conn) { + defer ws.Close() + job := eng.Job("attach", vars["name"]) + job.Setenv("logs", r.Form.Get("logs")) + job.Setenv("stream", r.Form.Get("stream")) + job.Setenv("stdin", r.Form.Get("stdin")) + job.Setenv("stdout", r.Form.Get("stdout")) + job.Setenv("stderr", r.Form.Get("stderr")) + job.Stdin.Add(ws) + job.Stdout.Add(ws) + job.Stderr.Set(ws) + if err := job.Run(); err != nil { + utils.Errorf("Error: %s", err) + } + }) + h.ServeHTTP(w, r) + + return nil +} + +func getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("inspect", vars["name"], "container") + streamJSON(job, w, false) + job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job + return job.Run() +} + +func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + var job = eng.Job("inspect", vars["name"], "image") + streamJSON(job, w, false) + job.SetenvBool("conflict", true) //conflict=true to detect conflict between containers and images in the job + return job.Run() +} + +func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if version.LessThan("1.3") { + return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") + } + var ( + authEncoded = r.Header.Get("X-Registry-Auth") + authConfig = ®istry.AuthConfig{} + configFileEncoded = r.Header.Get("X-Registry-Config") + configFile = ®istry.ConfigFile{} + job = eng.Job("build") + ) + + // This block can be removed when API versions prior to 1.9 are deprecated. + // Both headers will be parsed and sent along to the daemon, but if a non-empty + // ConfigFile is present, any value provided as an AuthConfig directly will + // be overridden. See BuildFile::CmdFrom for details. + if version.LessThan("1.9") && authEncoded != "" { + authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) + if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + authConfig = ®istry.AuthConfig{} + } + } + + if configFileEncoded != "" { + configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) + if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { + // for a pull it is not an error if no auth was given + // to increase compatibility with the existing api it is defaulting to be empty + configFile = ®istry.ConfigFile{} + } + } + + if version.GreaterThanOrEqualTo("1.8") { + job.SetenvBool("json", true) + streamJSON(job, w, true) + } else { + job.Stdout.Add(utils.NewWriteFlusher(w)) + } + job.Stdin.Add(r.Body) + job.Setenv("remote", r.FormValue("remote")) + job.Setenv("t", r.FormValue("t")) + job.Setenv("q", r.FormValue("q")) + job.Setenv("nocache", r.FormValue("nocache")) + job.Setenv("rm", r.FormValue("rm")) + job.SetenvJson("authConfig", authConfig) + job.SetenvJson("configFile", configFile) + + if err := job.Run(); err != nil { + if !job.Stdout.Used() { + return err + } + sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8")) + w.Write(sf.FormatError(err)) + } + return nil +} + +func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + + var copyData engine.Env + + if contentType := r.Header.Get("Content-Type"); api.MatchesContentType(contentType, "application/json") { + if err := copyData.Decode(r.Body); err != nil { + return err + } + } else { + return fmt.Errorf("Content-Type not supported: %s", contentType) + } + + if copyData.Get("Resource") == "" { + return fmt.Errorf("Path cannot be empty") + } + + origResource := copyData.Get("Resource") + + if copyData.Get("Resource")[0] == '/' { + copyData.Set("Resource", copyData.Get("Resource")[1:]) + } + + job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) + job.Stdout.Add(w) + if err := job.Run(); err != nil { + utils.Errorf("%s", err.Error()) + if strings.Contains(err.Error(), "No such container") { + w.WriteHeader(http.StatusNotFound) + } else if strings.Contains(err.Error(), "no such file or directory") { + return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) + } + } + return nil +} + +func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.WriteHeader(http.StatusOK) + return nil +} +func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Access-Control-Allow-Origin", "*") + w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") + w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") +} + +func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, enableCors bool, dockerVersion version.Version) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // log the request + utils.Debugf("Calling %s %s", localMethod, localRoute) + + if logging { + log.Println(r.Method, r.RequestURI) + } + + if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { + userAgent := strings.Split(r.Header.Get("User-Agent"), "/") + if len(userAgent) == 2 && !dockerVersion.Equal(userAgent[1]) { + utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) + } + } + version := version.Version(mux.Vars(r)["version"]) + if version == "" { + version = api.APIVERSION + } + if enableCors { + writeCorsHeaders(w, r) + } + + if version.GreaterThan(api.APIVERSION) { + http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound) + return + } + + if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { + utils.Errorf("Error: %s", err) + httpError(w, err) + } + } +} + +// Replicated from expvar.go as not public. +func expvarHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} + +func AttachProfiler(router *mux.Router) { + router.HandleFunc("/debug/vars", expvarHandler) + router.HandleFunc("/debug/pprof/", pprof.Index) + router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + router.HandleFunc("/debug/pprof/profile", pprof.Profile) + router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP) + router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) + router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) +} + +func createRouter(eng *engine.Engine, logging, enableCors bool, dockerVersion string) (*mux.Router, error) { + r := mux.NewRouter() + if os.Getenv("DEBUG") != "" { + AttachProfiler(r) + } + m := map[string]map[string]HttpApiFunc{ + "GET": { + "/events": getEvents, + "/info": getInfo, + "/version": getVersion, + "/images/json": getImagesJSON, + "/images/viz": getImagesViz, + "/images/search": getImagesSearch, + "/images/{name:.*}/get": getImagesGet, + "/images/{name:.*}/history": getImagesHistory, + "/images/{name:.*}/json": getImagesByName, + "/containers/ps": getContainersJSON, + "/containers/json": getContainersJSON, + "/containers/{name:.*}/export": getContainersExport, + "/containers/{name:.*}/changes": getContainersChanges, + "/containers/{name:.*}/json": getContainersByName, + "/containers/{name:.*}/top": getContainersTop, + "/containers/{name:.*}/attach/ws": wsContainersAttach, + }, + "POST": { + "/auth": postAuth, + "/commit": postCommit, + "/build": postBuild, + "/images/create": postImagesCreate, + "/images/{name:.*}/insert": postImagesInsert, + "/images/load": postImagesLoad, + "/images/{name:.*}/push": postImagesPush, + "/images/{name:.*}/tag": postImagesTag, + "/containers/create": postContainersCreate, + "/containers/{name:.*}/kill": postContainersKill, + "/containers/{name:.*}/restart": postContainersRestart, + "/containers/{name:.*}/start": postContainersStart, + "/containers/{name:.*}/stop": postContainersStop, + "/containers/{name:.*}/wait": postContainersWait, + "/containers/{name:.*}/resize": postContainersResize, + "/containers/{name:.*}/attach": postContainersAttach, + "/containers/{name:.*}/copy": postContainersCopy, + }, + "DELETE": { + "/containers/{name:.*}": deleteContainers, + "/images/{name:.*}": deleteImages, + }, + "OPTIONS": { + "": optionsHandler, + }, + } + + for method, routes := range m { + for route, fct := range routes { + utils.Debugf("Registering %s, %s", method, route) + // NOTE: scope issue, make sure the variables are local and won't be changed + localRoute := route + localFct := fct + localMethod := method + + // build the handler function + f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, enableCors, version.Version(dockerVersion)) + + // add the new route + if localRoute == "" { + r.Methods(localMethod).HandlerFunc(f) + } else { + r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f) + r.Path(localRoute).Methods(localMethod).HandlerFunc(f) + } + } + } + + return r, nil +} + +// ServeRequest processes a single http request to the docker remote api. +// FIXME: refactor this to be part of Server and not require re-creating a new +// router each time. This requires first moving ListenAndServe into Server. +func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) error { + router, err := createRouter(eng, false, true, "") + if err != nil { + return err + } + // Insert APIVERSION into the request as a convenience + req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path) + router.ServeHTTP(w, req) + return nil +} + +// ServeFD creates an http.Server and sets it up to serve given a socket activated +// argument. +func ServeFd(addr string, handle http.Handler) error { + ls, e := systemd.ListenFD(addr) + if e != nil { + return e + } + + chErrors := make(chan error, len(ls)) + + // We don't want to start serving on these sockets until the + // "initserver" job has completed. Otherwise required handlers + // won't be ready. + <-activationLock + + // Since ListenFD will return one or more sockets we have + // to create a go func to spawn off multiple serves + for i := range ls { + listener := ls[i] + go func() { + httpSrv := http.Server{Handler: handle} + chErrors <- httpSrv.Serve(listener) + }() + } + + for i := 0; i < len(ls); i += 1 { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} + +func lookupGidByName(nameOrGid string) (int, error) { + groups, err := user.ParseGroupFilter(func(g *user.Group) bool { + return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid + }) + if err != nil { + return -1, err + } + if groups != nil && len(groups) > 0 { + return groups[0].Gid, nil + } + return -1, fmt.Errorf("Group %s not found", nameOrGid) +} + +func changeGroup(addr string, nameOrGid string) error { + gid, err := lookupGidByName(nameOrGid) + if err != nil { + return err + } + + utils.Debugf("%s group found. gid: %d", nameOrGid, gid) + return os.Chown(addr, 0, gid) +} + +// ListenAndServe sets up the required http.Server and gets it listening for +// each addr passed in and does protocol specific checking. +func ListenAndServe(proto, addr string, job *engine.Job) error { + r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version")) + if err != nil { + return err + } + + if proto == "fd" { + return ServeFd(addr, r) + } + + if proto == "unix" { + if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { + return err + } + } + + l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock) + if err != nil { + return err + } + + if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) { + tlsCert := job.Getenv("TlsCert") + tlsKey := job.Getenv("TlsKey") + cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey) + if err != nil { + return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", + tlsCert, tlsKey, err) + } + tlsConfig := &tls.Config{ + NextProtos: []string{"http/1.1"}, + Certificates: []tls.Certificate{cert}, + } + if job.GetenvBool("TlsVerify") { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(job.Getenv("TlsCa")) + if err != nil { + return fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + l = tls.NewListener(l, tlsConfig) + } + + // Basic error and sanity checking + switch proto { + case "tcp": + if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") { + log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + } + case "unix": + if err := os.Chmod(addr, 0660); err != nil { + return err + } + socketGroup := job.Getenv("SocketGroup") + if socketGroup != "" { + if err := changeGroup(addr, socketGroup); err != nil { + if socketGroup == "docker" { + // if the user hasn't explicitly specified the group ownership, don't fail on errors. + utils.Debugf("Warning: could not chgrp %s to docker: %s", addr, err.Error()) + } else { + return err + } + } + } + default: + return fmt.Errorf("Invalid protocol format.") + } + + httpSrv := http.Server{Addr: addr, Handler: r} + return httpSrv.Serve(l) +} + +// ServeApi loops through all of the protocols sent in to docker and spawns +// off a go routine to setup a serving http.Server for each. +func ServeApi(job *engine.Job) engine.Status { + var ( + protoAddrs = job.Args + chErrors = make(chan error, len(protoAddrs)) + ) + activationLock = make(chan struct{}) + + if err := job.Eng.Register("acceptconnections", AcceptConnections); err != nil { + return job.Error(err) + } + + for _, protoAddr := range protoAddrs { + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + go func() { + log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1]) + chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job) + }() + } + + for i := 0; i < len(protoAddrs); i += 1 { + err := <-chErrors + if err != nil { + return job.Error(err) + } + } + + return engine.StatusOK +} + +func AcceptConnections(job *engine.Job) engine.Status { + // Tell the init daemon we are accepting requests + go systemd.SdNotify("READY=1") + + // close the lock so the listeners start accepting connections + close(activationLock) + + return engine.StatusOK +} diff --git a/builtins/builtins.go b/builtins/builtins.go index 10ee9b19e6..109bc5b913 100644 --- a/builtins/builtins.go +++ b/builtins/builtins.go @@ -1,7 +1,7 @@ package builtins import ( - "github.com/dotcloud/docker/api" + api "github.com/dotcloud/docker/api/server" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/runtime/networkdriver/bridge" "github.com/dotcloud/docker/server" diff --git a/docker/docker.go b/docker/docker.go index e4ce8a0b74..e96c173d30 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/api/client" "github.com/dotcloud/docker/builtins" "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" @@ -178,7 +179,7 @@ func main() { protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2) var ( - cli *api.DockerCli + cli *client.DockerCli tlsConfig tls.Config ) tlsConfig.InsecureSkipVerify = true @@ -211,9 +212,9 @@ func main() { } if *flTls || *flTlsVerify { - cli = api.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], &tlsConfig) + cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], &tlsConfig) } else { - cli = api.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], nil) + cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], nil) } if err := cli.ParseCommands(flag.Args()...); err != nil { -- cgit v1.2.1 From ae9ed84fdab4db5cec663bbd2d4ba8bcad897dcc Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 28 Mar 2014 23:21:55 +0000 Subject: split client in 2 files Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/client/cli.go | 102 ++ api/client/client.go | 2551 ------------------------------------------------ api/client/commands.go | 2098 +++++++++++++++++++++++++++++++++++++++ api/client/utils.go | 390 ++++++++ 4 files changed, 2590 insertions(+), 2551 deletions(-) create mode 100644 api/client/cli.go delete mode 100644 api/client/client.go create mode 100644 api/client/commands.go create mode 100644 api/client/utils.go diff --git a/api/client/cli.go b/api/client/cli.go new file mode 100644 index 0000000000..b58d3c3c75 --- /dev/null +++ b/api/client/cli.go @@ -0,0 +1,102 @@ +package client + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io" + "os" + "reflect" + "strings" + "text/template" + + flag "github.com/dotcloud/docker/pkg/mflag" + "github.com/dotcloud/docker/pkg/term" + "github.com/dotcloud/docker/registry" +) + +var funcMap = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, +} + +func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) { + methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:]) + method := reflect.ValueOf(cli).MethodByName(methodName) + if !method.IsValid() { + return nil, false + } + return method.Interface().(func(...string) error), true +} + +func (cli *DockerCli) ParseCommands(args ...string) error { + if len(args) > 0 { + method, exists := cli.getMethod(args[0]) + if !exists { + fmt.Println("Error: Command not found:", args[0]) + return cli.CmdHelp(args[1:]...) + } + return method(args[1:]...) + } + return cli.CmdHelp(args...) +} + +func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet { + flags := flag.NewFlagSet(name, flag.ContinueOnError) + flags.Usage = func() { + fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description) + flags.PrintDefaults() + os.Exit(2) + } + return flags +} + +func (cli *DockerCli) LoadConfigFile() (err error) { + cli.configFile, err = registry.LoadConfig(os.Getenv("HOME")) + if err != nil { + fmt.Fprintf(cli.err, "WARNING: %s\n", err) + } + return err +} + +func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsConfig *tls.Config) *DockerCli { + var ( + isTerminal = false + terminalFd uintptr + ) + + if in != nil { + if file, ok := in.(*os.File); ok { + terminalFd = file.Fd() + isTerminal = term.IsTerminal(terminalFd) + } + } + + if err == nil { + err = out + } + return &DockerCli{ + proto: proto, + addr: addr, + in: in, + out: out, + err: err, + isTerminal: isTerminal, + terminalFd: terminalFd, + tlsConfig: tlsConfig, + } +} + +type DockerCli struct { + proto string + addr string + configFile *registry.ConfigFile + in io.ReadCloser + out io.Writer + err io.Writer + isTerminal bool + terminalFd uintptr + tlsConfig *tls.Config +} diff --git a/api/client/client.go b/api/client/client.go deleted file mode 100644 index 29b49464c4..0000000000 --- a/api/client/client.go +++ /dev/null @@ -1,2551 +0,0 @@ -package client - -import ( - "bufio" - "bytes" - "crypto/tls" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/archive" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/nat" - flag "github.com/dotcloud/docker/pkg/mflag" - "github.com/dotcloud/docker/pkg/signal" - "github.com/dotcloud/docker/pkg/term" - "github.com/dotcloud/docker/registry" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/utils" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "os/exec" - gosignal "os/signal" - "path" - "reflect" - "regexp" - goruntime "runtime" - "strconv" - "strings" - "syscall" - "text/tabwriter" - "text/template" - "time" -) - -var funcMap = template.FuncMap{ - "json": func(v interface{}) string { - a, _ := json.Marshal(v) - return string(a) - }, -} - -var ( - ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") -) - -func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) { - methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:]) - method := reflect.ValueOf(cli).MethodByName(methodName) - if !method.IsValid() { - return nil, false - } - return method.Interface().(func(...string) error), true -} - -func (cli *DockerCli) ParseCommands(args ...string) error { - if len(args) > 0 { - method, exists := cli.getMethod(args[0]) - if !exists { - fmt.Println("Error: Command not found:", args[0]) - return cli.CmdHelp(args[1:]...) - } - return method(args[1:]...) - } - return cli.CmdHelp(args...) -} - -func (cli *DockerCli) CmdHelp(args ...string) error { - if len(args) > 0 { - method, exists := cli.getMethod(args[0]) - if !exists { - fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0]) - } else { - method("--help") - return nil - } - } - help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", api.DEFAULTUNIXSOCKET) - for _, command := range [][]string{ - {"attach", "Attach to a running container"}, - {"build", "Build a container from a Dockerfile"}, - {"commit", "Create a new image from a container's changes"}, - {"cp", "Copy files/folders from the containers filesystem to the host path"}, - {"diff", "Inspect changes on a container's filesystem"}, - {"events", "Get real time events from the server"}, - {"export", "Stream the contents of a container as a tar archive"}, - {"history", "Show the history of an image"}, - {"images", "List images"}, - {"import", "Create a new filesystem image from the contents of a tarball"}, - {"info", "Display system-wide information"}, - {"insert", "Insert a file in an image"}, - {"inspect", "Return low-level information on a container"}, - {"kill", "Kill a running container"}, - {"load", "Load an image from a tar archive"}, - {"login", "Register or Login to the docker registry server"}, - {"logs", "Fetch the logs of a container"}, - {"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"}, - {"ps", "List containers"}, - {"pull", "Pull an image or a repository from the docker registry server"}, - {"push", "Push an image or a repository to the docker registry server"}, - {"restart", "Restart a running container"}, - {"rm", "Remove one or more containers"}, - {"rmi", "Remove one or more images"}, - {"run", "Run a command in a new container"}, - {"save", "Save an image to a tar archive"}, - {"search", "Search for an image in the docker index"}, - {"start", "Start a stopped container"}, - {"stop", "Stop a running container"}, - {"tag", "Tag an image into a repository"}, - {"top", "Lookup the running processes of a container"}, - {"version", "Show the docker version information"}, - {"wait", "Block until a container stops, then print its exit code"}, - } { - help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1]) - } - fmt.Fprintf(cli.err, "%s\n", help) - return nil -} - -func (cli *DockerCli) CmdInsert(args ...string) error { - cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 3 { - cmd.Usage() - return nil - } - - v := url.Values{} - v.Set("url", cmd.Arg(1)) - v.Set("path", cmd.Arg(2)) - - return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil) -} - -func (cli *DockerCli) CmdBuild(args ...string) error { - cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH") - tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success") - suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") - noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") - rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - var ( - context archive.Archive - isRemote bool - err error - ) - - _, err = exec.LookPath("git") - hasGit := err == nil - if cmd.Arg(0) == "-" { - // As a special case, 'docker build -' will build from an empty context with the - // contents of stdin as a Dockerfile - dockerfile, err := ioutil.ReadAll(cli.in) - if err != nil { - return err - } - context, err = archive.Generate("Dockerfile", string(dockerfile)) - } else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) { - isRemote = true - } else { - root := cmd.Arg(0) - if utils.IsGIT(root) { - remoteURL := cmd.Arg(0) - if !strings.HasPrefix(remoteURL, "git://") && !strings.HasPrefix(remoteURL, "git@") && !utils.IsURL(remoteURL) { - remoteURL = "https://" + remoteURL - } - - root, err = ioutil.TempDir("", "docker-build-git") - if err != nil { - return err - } - defer os.RemoveAll(root) - - if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { - return fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - } - if _, err := os.Stat(root); err != nil { - return err - } - filename := path.Join(root, "Dockerfile") - if _, err = os.Stat(filename); os.IsNotExist(err) { - return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0)) - } - context, err = archive.Tar(root, archive.Uncompressed) - } - var body io.Reader - // Setup an upload progress bar - // FIXME: ProgressReader shouldn't be this annoying to use - if context != nil { - sf := utils.NewStreamFormatter(false) - body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Uploading context") - } - // Upload the build context - v := &url.Values{} - - //Check if the given image name can be resolved - if *tag != "" { - repository, _ := utils.ParseRepositoryTag(*tag) - if _, _, err := registry.ResolveRepositoryName(repository); err != nil { - return err - } - } - - v.Set("t", *tag) - - if *suppressOutput { - v.Set("q", "1") - } - if isRemote { - v.Set("remote", cmd.Arg(0)) - } - if *noCache { - v.Set("nocache", "1") - } - if *rm { - v.Set("rm", "1") - } - - cli.LoadConfigFile() - - headers := http.Header(make(map[string][]string)) - buf, err := json.Marshal(cli.configFile) - if err != nil { - return err - } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - - if context != nil { - headers.Set("Content-Type", "application/tar") - } - err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers) - if jerr, ok := err.(*utils.JSONError); ok { - // If no error code is set, default to 1 - if jerr.Code == 0 { - jerr.Code = 1 - } - return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code} - } - return err -} - -// 'docker login': login / register a user to registry service. -func (cli *DockerCli) CmdLogin(args ...string) error { - cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.") - - var username, password, email string - - cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") - cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") - cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") - err := cmd.Parse(args) - if err != nil { - return nil - } - serverAddress := registry.IndexServerAddress() - if len(cmd.Args()) > 0 { - serverAddress = cmd.Arg(0) - } - - promptDefault := func(prompt string, configDefault string) { - if configDefault == "" { - fmt.Fprintf(cli.out, "%s: ", prompt) - } else { - fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) - } - } - - readInput := func(in io.Reader, out io.Writer) string { - reader := bufio.NewReader(in) - line, _, err := reader.ReadLine() - if err != nil { - fmt.Fprintln(out, err.Error()) - os.Exit(1) - } - return string(line) - } - - cli.LoadConfigFile() - authconfig, ok := cli.configFile.Configs[serverAddress] - if !ok { - authconfig = registry.AuthConfig{} - } - - if username == "" { - promptDefault("Username", authconfig.Username) - username = readInput(cli.in, cli.out) - if username == "" { - username = authconfig.Username - } - } - if username != authconfig.Username { - if password == "" { - oldState, _ := term.SaveState(cli.terminalFd) - fmt.Fprintf(cli.out, "Password: ") - term.DisableEcho(cli.terminalFd, oldState) - - password = readInput(cli.in, cli.out) - fmt.Fprint(cli.out, "\n") - - term.RestoreTerminal(cli.terminalFd, oldState) - if password == "" { - return fmt.Errorf("Error : Password Required") - } - } - - if email == "" { - promptDefault("Email", authconfig.Email) - email = readInput(cli.in, cli.out) - if email == "" { - email = authconfig.Email - } - } - } else { - password = authconfig.Password - email = authconfig.Email - } - authconfig.Username = username - authconfig.Password = password - authconfig.Email = email - authconfig.ServerAddress = serverAddress - cli.configFile.Configs[serverAddress] = authconfig - - stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false) - if statusCode == 401 { - delete(cli.configFile.Configs, serverAddress) - registry.SaveConfig(cli.configFile) - return err - } - if err != nil { - return err - } - var out2 engine.Env - err = out2.Decode(stream) - if err != nil { - cli.configFile, _ = registry.LoadConfig(os.Getenv("HOME")) - return err - } - registry.SaveConfig(cli.configFile) - if out2.Get("Status") != "" { - fmt.Fprintf(cli.out, "%s\n", out2.Get("Status")) - } - return nil -} - -// 'docker wait': block until a container stops -func (cli *DockerCli) CmdWait(args ...string) error { - cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - var encounteredError error - for _, name := range cmd.Args() { - status, err := waitForExit(cli, name) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to wait one or more containers") - } else { - fmt.Fprintf(cli.out, "%d\n", status) - } - } - return encounteredError -} - -// 'docker version': show version information -func (cli *DockerCli) CmdVersion(args ...string) error { - cmd := cli.Subcmd("version", "", "Show the docker version information.") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() > 0 { - cmd.Usage() - return nil - } - if dockerversion.VERSION != "" { - fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) - } - fmt.Fprintf(cli.out, "Go version (client): %s\n", goruntime.Version()) - if dockerversion.GITCOMMIT != "" { - fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) - } - - body, _, err := readBody(cli.call("GET", "/version", nil, false)) - if err != nil { - return err - } - - out := engine.NewOutput() - remoteVersion, err := out.AddEnv() - if err != nil { - utils.Errorf("Error reading remote version: %s\n", err) - return err - } - if _, err := out.Write(body); err != nil { - utils.Errorf("Error reading remote version: %s\n", err) - return err - } - out.Close() - fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version")) - fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) - fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion")) - release := utils.GetReleaseVersion() - if release != "" { - fmt.Fprintf(cli.out, "Last stable version: %s", release) - if (dockerversion.VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(dockerversion.VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) { - fmt.Fprintf(cli.out, ", please update docker") - } - fmt.Fprintf(cli.out, "\n") - } - return nil -} - -// 'docker info': display system-wide information. -func (cli *DockerCli) CmdInfo(args ...string) error { - cmd := cli.Subcmd("info", "", "Display system-wide information") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() > 0 { - cmd.Usage() - return nil - } - - body, _, err := readBody(cli.call("GET", "/info", nil, false)) - if err != nil { - return err - } - - out := engine.NewOutput() - remoteInfo, err := out.AddEnv() - if err != nil { - return err - } - - if _, err := out.Write(body); err != nil { - utils.Errorf("Error reading remote info: %s\n", err) - return err - } - out.Close() - - fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) - fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) - fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) - var driverStatus [][2]string - if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { - return err - } - for _, pair := range driverStatus { - fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) - } - fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) - fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) - - if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { - fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) - fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") - fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) - fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) - fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) - - if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { - fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) - } - if initPath := remoteInfo.Get("InitPath"); initPath != "" { - fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) - } - } - - if len(remoteInfo.GetList("IndexServerAddress")) != 0 { - cli.LoadConfigFile() - u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username - if len(u) > 0 { - fmt.Fprintf(cli.out, "Username: %v\n", u) - fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress")) - } - } - if !remoteInfo.GetBool("MemoryLimit") { - fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") - } - if !remoteInfo.GetBool("SwapLimit") { - fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") - } - if !remoteInfo.GetBool("IPv4Forwarding") { - fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") - } - return nil -} - -func (cli *DockerCli) CmdStop(args ...string) error { - cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)") - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - v := url.Values{} - v.Set("t", strconv.Itoa(*nSeconds)) - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to stop one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) CmdRestart(args ...string) error { - cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container") - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - v := url.Values{} - v.Set("t", strconv.Itoa(*nSeconds)) - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to restart one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { - sigc := make(chan os.Signal, 1) - signal.CatchAll(sigc) - go func() { - for s := range sigc { - if s == syscall.SIGCHLD { - continue - } - var sig string - for sigStr, sigN := range signal.SignalMap { - if sigN == s { - sig = sigStr - break - } - } - if sig == "" { - utils.Errorf("Unsupported signal: %d. Discarding.", s) - } - if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil { - utils.Debugf("Error sending signal: %s", err) - } - } - }() - return sigc -} - -func (cli *DockerCli) CmdStart(args ...string) error { - cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container") - attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process") - openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - var cErr chan error - var tty bool - if *attach || *openStdin { - if cmd.NArg() > 1 { - return fmt.Errorf("You cannot start and attach multiple containers at once.") - } - - body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) - if err != nil { - return err - } - - container := &api.Container{} - err = json.Unmarshal(body, container) - if err != nil { - return err - } - - tty = container.Config.Tty - - if !container.Config.Tty { - sigc := cli.forwardAllSignals(cmd.Arg(0)) - defer signal.StopCatch(sigc) - } - - var in io.ReadCloser - - v := url.Values{} - v.Set("stream", "1") - if *openStdin && container.Config.OpenStdin { - v.Set("stdin", "1") - in = cli.in - } - v.Set("stdout", "1") - v.Set("stderr", "1") - - cErr = utils.Go(func() error { - return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil) - }) - } - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false)) - if err != nil { - if !*attach || !*openStdin { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to start one or more containers") - } - } else { - if !*attach || !*openStdin { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - } - if encounteredError != nil { - if *openStdin || *attach { - cli.in.Close() - <-cErr - } - return encounteredError - } - - if *openStdin || *attach { - if tty && cli.isTerminal { - if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { - utils.Errorf("Error monitoring TTY size: %s\n", err) - } - } - return <-cErr - } - return nil -} - -func (cli *DockerCli) CmdInspect(args ...string) error { - cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image") - tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - var tmpl *template.Template - if *tmplStr != "" { - var err error - if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { - fmt.Fprintf(cli.err, "Template parsing error: %v\n", err) - return &utils.StatusError{StatusCode: 64, - Status: "Template parsing error: " + err.Error()} - } - } - - indented := new(bytes.Buffer) - indented.WriteByte('[') - status := 0 - - for _, name := range cmd.Args() { - obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) - if err != nil { - obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false)) - if err != nil { - if strings.Contains(err.Error(), "No such") { - fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) - } else { - fmt.Fprintf(cli.err, "%s", err) - } - status = 1 - continue - } - } - - if tmpl == nil { - if err = json.Indent(indented, obj, "", " "); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - status = 1 - continue - } - } else { - // Has template, will render - var value interface{} - if err := json.Unmarshal(obj, &value); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - status = 1 - continue - } - if err := tmpl.Execute(cli.out, value); err != nil { - return err - } - cli.out.Write([]byte{'\n'}) - } - indented.WriteString(",") - } - - if indented.Len() > 1 { - // Remove trailing ',' - indented.Truncate(indented.Len() - 1) - } - indented.WriteByte(']') - - if tmpl == nil { - if _, err := io.Copy(cli.out, indented); err != nil { - return err - } - } - - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - return nil -} - -func (cli *DockerCli) CmdTop(args ...string) error { - cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Lookup the running processes of a container") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() == 0 { - cmd.Usage() - return nil - } - val := url.Values{} - if cmd.NArg() > 1 { - val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) - } - - stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false) - if err != nil { - return err - } - var procs engine.Env - if err := procs.Decode(stream); err != nil { - return err - } - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) - processes := [][]string{} - if err := procs.GetJson("Processes", &processes); err != nil { - return err - } - for _, proc := range processes { - fmt.Fprintln(w, strings.Join(proc, "\t")) - } - w.Flush() - return nil -} - -func (cli *DockerCli) CmdPort(args ...string) error { - cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 2 { - cmd.Usage() - return nil - } - - var ( - port = cmd.Arg(1) - proto = "tcp" - parts = strings.SplitN(port, "/", 2) - container api.Container - ) - - if len(parts) == 2 && len(parts[1]) != 0 { - port = parts[0] - proto = parts[1] - } - body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) - if err != nil { - return err - } - - err = json.Unmarshal(body, &container) - if err != nil { - return err - } - - if frontends, exists := container.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { - for _, frontend := range frontends { - fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort) - } - } else { - return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0)) - } - return nil -} - -// 'docker rmi IMAGE' removes all images with the name IMAGE -func (cli *DockerCli) CmdRmi(args ...string) error { - var ( - cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images") - force = cmd.Bool([]string{"f", "-force"}, false, "Force") - noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") - ) - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - v := url.Values{} - if *force { - v.Set("force", "1") - } - if *noprune { - v.Set("noprune", "1") - } - - var encounteredError error - for _, name := range cmd.Args() { - body, _, err := readBody(cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to remove one or more images") - } else { - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to remove one or more images") - continue - } - for _, out := range outs.Data { - if out.Get("Deleted") != "" { - fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted")) - } else { - fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged")) - } - } - } - } - return encounteredError -} - -func (cli *DockerCli) CmdHistory(args ...string) error { - cmd := cli.Subcmd("history", "[OPTIONS] IMAGE", "Show the history of an image") - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false)) - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE") - } - - for _, out := range outs.Data { - outID := out.Get("Id") - if !*quiet { - if *noTrunc { - fmt.Fprintf(w, "%s\t", outID) - } else { - fmt.Fprintf(w, "%s\t", utils.TruncateID(outID)) - } - - fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) - - if *noTrunc { - fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) - } else { - fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) - } - fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("Size"))) - } else { - if *noTrunc { - fmt.Fprintln(w, outID) - } else { - fmt.Fprintln(w, utils.TruncateID(outID)) - } - } - } - w.Flush() - return nil -} - -func (cli *DockerCli) CmdRm(args ...string) error { - cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers") - v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated to the container") - link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container") - force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of running container") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - val := url.Values{} - if *v { - val.Set("v", "1") - } - if *link { - val.Set("link", "1") - } - if *force { - val.Set("force", "1") - } - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to remove one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -// 'docker kill NAME' kills a running container -func (cli *DockerCli) CmdKill(args ...string) error { - cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL, or specified signal)") - signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - var encounteredError error - for _, name := range cmd.Args() { - if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to kill one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) CmdImport(args ...string) error { - cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() < 1 { - cmd.Usage() - return nil - } - - var src, repository, tag string - - if cmd.NArg() == 3 { - fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n") - src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2) - } else { - src = cmd.Arg(0) - repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) - } - v := url.Values{} - - if repository != "" { - //Check if the given image name can be resolved - if _, _, err := registry.ResolveRepositoryName(repository); err != nil { - return err - } - } - - v.Set("repo", repository) - v.Set("tag", tag) - v.Set("fromSrc", src) - - var in io.Reader - - if src == "-" { - in = cli.in - } - - return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil) -} - -func (cli *DockerCli) CmdPush(args ...string) error { - cmd := cli.Subcmd("push", "NAME", "Push an image or a repository to the registry") - if err := cmd.Parse(args); err != nil { - return nil - } - name := cmd.Arg(0) - - if name == "" { - cmd.Usage() - return nil - } - - cli.LoadConfigFile() - - // Resolve the Repository name from fqn to hostname + name - hostname, _, err := registry.ResolveRepositoryName(name) - if err != nil { - return err - } - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(hostname) - // If we're not using a custom registry, we know the restrictions - // applied to repository names and can warn the user in advance. - // Custom repositories can have different rules, and we must also - // allow pushing by image ID. - if len(strings.SplitN(name, "/", 2)) == 1 { - username := cli.configFile.Configs[registry.IndexServerAddress()].Username - if username == "" { - username = "" - } - return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository in / (ex: %s/%s)", username, name) - } - - v := url.Values{} - push := func(authConfig registry.AuthConfig) error { - buf, err := json.Marshal(authConfig) - if err != nil { - return err - } - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - - return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{ - "X-Registry-Auth": registryAuthHeader, - }) - } - - if err := push(authConfig); err != nil { - if strings.Contains(err.Error(), "Status 401") { - fmt.Fprintln(cli.out, "\nPlease login prior to push:") - if err := cli.CmdLogin(hostname); err != nil { - return err - } - authConfig := cli.configFile.ResolveAuthConfig(hostname) - return push(authConfig) - } - return err - } - return nil -} - -func (cli *DockerCli) CmdPull(args ...string) error { - cmd := cli.Subcmd("pull", "NAME[:TAG]", "Pull an image or a repository from the registry") - tag := cmd.String([]string{"#t", "#-tag"}, "", "Download tagged image in repository") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0)) - if *tag == "" { - *tag = parsedTag - } - - // Resolve the Repository name from fqn to hostname + name - hostname, _, err := registry.ResolveRepositoryName(remote) - if err != nil { - return err - } - - cli.LoadConfigFile() - - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(hostname) - v := url.Values{} - v.Set("fromImage", remote) - v.Set("tag", *tag) - - pull := func(authConfig registry.AuthConfig) error { - buf, err := json.Marshal(authConfig) - if err != nil { - return err - } - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - - return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{ - "X-Registry-Auth": registryAuthHeader, - }) - } - - if err := pull(authConfig); err != nil { - if strings.Contains(err.Error(), "Status 401") { - fmt.Fprintln(cli.out, "\nPlease login prior to pull:") - if err := cli.CmdLogin(hostname); err != nil { - return err - } - authConfig := cli.configFile.ResolveAuthConfig(hostname) - return pull(authConfig) - } - return err - } - - return nil -} - -func (cli *DockerCli) CmdImages(args ...string) error { - cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images") - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") - all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate images used to build)") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - flViz := cmd.Bool([]string{"v", "#viz", "-viz"}, false, "Output graph in graphviz format") - flTree := cmd.Bool([]string{"t", "#tree", "-tree"}, false, "Output graph in tree format") - - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() > 1 { - cmd.Usage() - return nil - } - - filter := cmd.Arg(0) - - if *flViz || *flTree { - body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false)) - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - - var ( - printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string) - startImage *engine.Env - - roots = engine.NewTable("Created", outs.Len()) - byParent = make(map[string]*engine.Table) - ) - - for _, image := range outs.Data { - if image.Get("ParentId") == "" { - roots.Add(image) - } else { - if children, exists := byParent[image.Get("ParentId")]; exists { - children.Add(image) - } else { - byParent[image.Get("ParentId")] = engine.NewTable("Created", 1) - byParent[image.Get("ParentId")].Add(image) - } - } - - if filter != "" { - if filter == image.Get("Id") || filter == utils.TruncateID(image.Get("Id")) { - startImage = image - } - - for _, repotag := range image.GetList("RepoTags") { - if repotag == filter { - startImage = image - } - } - } - } - - if *flViz { - fmt.Fprintf(cli.out, "digraph docker {\n") - printNode = (*DockerCli).printVizNode - } else { - printNode = (*DockerCli).printTreeNode - } - - if startImage != nil { - root := engine.NewTable("Created", 1) - root.Add(startImage) - cli.WalkTree(*noTrunc, root, byParent, "", printNode) - } else if filter == "" { - cli.WalkTree(*noTrunc, roots, byParent, "", printNode) - } - if *flViz { - fmt.Fprintf(cli.out, " base [style=invisible]\n}\n") - } - } else { - v := url.Values{} - if cmd.NArg() == 1 { - v.Set("filter", filter) - } - if *all { - v.Set("all", "1") - } - - body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) - - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") - } - - for _, out := range outs.Data { - for _, repotag := range out.GetList("RepoTags") { - - repo, tag := utils.ParseRepositoryTag(repotag) - outID := out.Get("Id") - if !*noTrunc { - outID = utils.TruncateID(outID) - } - - if !*quiet { - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), utils.HumanSize(out.GetInt64("VirtualSize"))) - } else { - fmt.Fprintln(w, outID) - } - } - } - - if !*quiet { - w.Flush() - } - } - return nil -} - -func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) { - length := images.Len() - if length > 1 { - for index, image := range images.Data { - if index+1 == length { - printNode(cli, noTrunc, image, prefix+"└─") - if subimages, exists := byParent[image.Get("Id")]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) - } - } else { - printNode(cli, noTrunc, image, prefix+"\u251C─") - if subimages, exists := byParent[image.Get("Id")]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode) - } - } - } - } else { - for _, image := range images.Data { - printNode(cli, noTrunc, image, prefix+"└─") - if subimages, exists := byParent[image.Get("Id")]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) - } - } - } -} - -func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) { - var ( - imageID string - parentID string - ) - if noTrunc { - imageID = image.Get("Id") - parentID = image.Get("ParentId") - } else { - imageID = utils.TruncateID(image.Get("Id")) - parentID = utils.TruncateID(image.Get("ParentId")) - } - if parentID == "" { - fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) - } else { - fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID) - } - if image.GetList("RepoTags")[0] != ":" { - fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", - imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n")) - } -} - -func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { - var imageID string - if noTrunc { - imageID = image.Get("Id") - } else { - imageID = utils.TruncateID(image.Get("Id")) - } - - fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.GetInt64("VirtualSize"))) - if image.GetList("RepoTags")[0] != ":" { - fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) - } else { - fmt.Fprint(cli.out, "\n") - } -} - -func (cli *DockerCli) CmdPs(args ...string) error { - cmd := cli.Subcmd("ps", "[OPTIONS]", "List containers") - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") - size := cmd.Bool([]string{"s", "-size"}, false, "Display sizes") - all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.") - since := cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show only containers created since Id or Name, include non-running ones.") - before := cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.") - last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.") - - if err := cmd.Parse(args); err != nil { - return nil - } - v := url.Values{} - if *last == -1 && *nLatest { - *last = 1 - } - if *all { - v.Set("all", "1") - } - if *last != -1 { - v.Set("limit", strconv.Itoa(*last)) - } - if *since != "" { - v.Set("since", *since) - } - if *before != "" { - v.Set("before", *before) - } - if *size { - v.Set("size", "1") - } - - body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false)) - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES") - if *size { - fmt.Fprintln(w, "\tSIZE") - } else { - fmt.Fprint(w, "\n") - } - } - - for _, out := range outs.Data { - var ( - outID = out.Get("Id") - outNames = out.GetList("Names") - ) - - if !*noTrunc { - outID = utils.TruncateID(outID) - } - - // Remove the leading / from the names - for i := 0; i < len(outNames); i++ { - outNames[i] = outNames[i][1:] - } - - if !*quiet { - var ( - outCommand = out.Get("Command") - ports = engine.NewTable("", 0) - ) - if !*noTrunc { - outCommand = utils.Trunc(outCommand, 20) - } - ports.ReadListFrom([]byte(out.Get("Ports"))) - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ",")) - if *size { - if out.GetInt("SizeRootFs") > 0 { - fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs"))) - } else { - fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("SizeRw"))) - } - } else { - fmt.Fprint(w, "\n") - } - } else { - fmt.Fprintln(w, outID) - } - } - - if !*quiet { - w.Flush() - } - return nil -} - -func (cli *DockerCli) CmdCommit(args ...string) error { - cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes") - flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") - flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith \"") - flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: --run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`) - if err := cmd.Parse(args); err != nil { - return nil - } - - var name, repository, tag string - - if cmd.NArg() == 3 { - fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n") - name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2) - } else { - name = cmd.Arg(0) - repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) - } - - if name == "" { - cmd.Usage() - return nil - } - - //Check if the given image name can be resolved - if repository != "" { - if _, _, err := registry.ResolveRepositoryName(repository); err != nil { - return err - } - } - - v := url.Values{} - v.Set("container", name) - v.Set("repo", repository) - v.Set("tag", tag) - v.Set("comment", *flComment) - v.Set("author", *flAuthor) - var ( - config *runconfig.Config - env engine.Env - ) - if *flConfig != "" { - config = &runconfig.Config{} - if err := json.Unmarshal([]byte(*flConfig), config); err != nil { - return err - } - } - stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, false) - if err != nil { - return err - } - if err := env.Decode(stream); err != nil { - return err - } - - fmt.Fprintf(cli.out, "%s\n", env.Get("Id")) - return nil -} - -func (cli *DockerCli) CmdEvents(args ...string) error { - cmd := cli.Subcmd("events", "[OPTIONS]", "Get real time events from the server") - since := cmd.String([]string{"#since", "-since"}, "", "Show previously created events and then stream.") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() != 0 { - cmd.Usage() - return nil - } - - v := url.Values{} - if *since != "" { - loc := time.FixedZone(time.Now().Zone()) - format := "2006-01-02 15:04:05 -0700 MST" - if len(*since) < len(format) { - format = format[:len(*since)] - } - - if t, err := time.ParseInLocation(format, *since, loc); err == nil { - v.Set("since", strconv.FormatInt(t.Unix(), 10)) - } else { - v.Set("since", *since) - } - } - - if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdExport(args ...string) error { - cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdDiff(args ...string) error { - cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false)) - - if err != nil { - return err - } - - outs := engine.NewTable("", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - for _, change := range outs.Data { - var kind string - switch change.GetInt("Kind") { - case archive.ChangeModify: - kind = "C" - case archive.ChangeAdd: - kind = "A" - case archive.ChangeDelete: - kind = "D" - } - fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path")) - } - return nil -} - -func (cli *DockerCli) CmdLogs(args ...string) error { - cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container") - follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - name := cmd.Arg(0) - body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) - if err != nil { - return err - } - - container := &api.Container{} - err = json.Unmarshal(body, container) - if err != nil { - return err - } - - v := url.Values{} - v.Set("logs", "1") - v.Set("stdout", "1") - v.Set("stderr", "1") - if *follow && container.State.Running { - v.Set("stream", "1") - } - - if err := cli.hijack("POST", "/containers/"+name+"/attach?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdAttach(args ...string) error { - cmd := cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container") - noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin") - proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - name := cmd.Arg(0) - body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) - if err != nil { - return err - } - - container := &api.Container{} - err = json.Unmarshal(body, container) - if err != nil { - return err - } - - if !container.State.Running { - return fmt.Errorf("You cannot attach to a stopped container, start it first") - } - - if container.Config.Tty && cli.isTerminal { - if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { - utils.Debugf("Error monitoring TTY size: %s", err) - } - } - - var in io.ReadCloser - - v := url.Values{} - v.Set("stream", "1") - if !*noStdin && container.Config.OpenStdin { - v.Set("stdin", "1") - in = cli.in - } - v.Set("stdout", "1") - v.Set("stderr", "1") - - if *proxy && !container.Config.Tty { - sigc := cli.forwardAllSignals(cmd.Arg(0)) - defer signal.StopCatch(sigc) - } - - if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil { - return err - } - - _, status, err := getExitCode(cli, cmd.Arg(0)) - if err != nil { - return err - } - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - - return nil -} - -func (cli *DockerCli) CmdSearch(args ...string) error { - cmd := cli.Subcmd("search", "TERM", "Search the docker index for images") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - trusted := cmd.Bool([]string{"t", "#trusted", "-trusted"}, false, "Only show trusted builds") - stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - v := url.Values{} - v.Set("term", cmd.Arg(0)) - - body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, true)) - - if err != nil { - return err - } - outs := engine.NewTable("star_count", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) - fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n") - for _, out := range outs.Data { - if (*trusted && !out.GetBool("is_trusted")) || (*stars > out.GetInt("star_count")) { - continue - } - desc := strings.Replace(out.Get("description"), "\n", " ", -1) - desc = strings.Replace(desc, "\r", " ", -1) - if !*noTrunc && len(desc) > 45 { - desc = utils.Trunc(desc, 42) + "..." - } - fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count")) - if out.GetBool("is_official") { - fmt.Fprint(w, "[OK]") - - } - fmt.Fprint(w, "\t") - if out.GetBool("is_trusted") { - fmt.Fprint(w, "[OK]") - } - fmt.Fprint(w, "\n") - } - w.Flush() - return nil -} - -// Ports type - Used to parse multiple -p flags -type ports []int - -func (cli *DockerCli) CmdTag(args ...string) error { - cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository") - force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") - if err := cmd.Parse(args); err != nil { - return nil - } - if cmd.NArg() != 2 && cmd.NArg() != 3 { - cmd.Usage() - return nil - } - - var repository, tag string - - if cmd.NArg() == 3 { - fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]]\n") - repository, tag = cmd.Arg(1), cmd.Arg(2) - } else { - repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) - } - - v := url.Values{} - - //Check if the given image name can be resolved - if _, _, err := registry.ResolveRepositoryName(repository); err != nil { - return err - } - v.Set("repo", repository) - v.Set("tag", tag) - - if *force { - v.Set("force", "1") - } - - if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdRun(args ...string) error { - // FIXME: just use runconfig.Parse already - config, hostConfig, cmd, err := runconfig.ParseSubcommand(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil) - if err != nil { - return err - } - if config.Image == "" { - cmd.Usage() - return nil - } - - // Retrieve relevant client-side config - var ( - flName = cmd.Lookup("name") - flRm = cmd.Lookup("rm") - flSigProxy = cmd.Lookup("sig-proxy") - autoRemove, _ = strconv.ParseBool(flRm.Value.String()) - sigProxy, _ = strconv.ParseBool(flSigProxy.Value.String()) - ) - - // Disable sigProxy in case on TTY - if config.Tty { - sigProxy = false - } - - var containerIDFile io.WriteCloser - if len(hostConfig.ContainerIDFile) > 0 { - if _, err := os.Stat(hostConfig.ContainerIDFile); err == nil { - return fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", hostConfig.ContainerIDFile) - } - if containerIDFile, err = os.Create(hostConfig.ContainerIDFile); err != nil { - return fmt.Errorf("Failed to create the container ID file: %s", err) - } - defer func() { - containerIDFile.Close() - var ( - cidFileInfo os.FileInfo - err error - ) - if cidFileInfo, err = os.Stat(hostConfig.ContainerIDFile); err != nil { - return - } - if cidFileInfo.Size() == 0 { - if err := os.Remove(hostConfig.ContainerIDFile); err != nil { - fmt.Printf("failed to remove CID file '%s': %s \n", hostConfig.ContainerIDFile, err) - } - } - }() - } - - containerValues := url.Values{} - if name := flName.Value.String(); name != "" { - containerValues.Set("name", name) - } - - //create the container - stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false) - //if image not found try to pull it - if statusCode == 404 { - fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) - - v := url.Values{} - repos, tag := utils.ParseRepositoryTag(config.Image) - v.Set("fromImage", repos) - v.Set("tag", tag) - - // Resolve the Repository name from fqn to hostname + name - hostname, _, err := registry.ResolveRepositoryName(repos) - if err != nil { - return err - } - - // Load the auth config file, to be able to pull the image - cli.LoadConfigFile() - - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(hostname) - buf, err := json.Marshal(authConfig) - if err != nil { - return err - } - - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { - return err - } - if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false); err != nil { - return err - } - } else if err != nil { - return err - } - - var runResult engine.Env - if err := runResult.Decode(stream); err != nil { - return err - } - - for _, warning := range runResult.GetList("Warnings") { - fmt.Fprintf(cli.err, "WARNING: %s\n", warning) - } - - if len(hostConfig.ContainerIDFile) > 0 { - if _, err = containerIDFile.Write([]byte(runResult.Get("Id"))); err != nil { - return fmt.Errorf("Failed to write the container ID to the file: %s", err) - } - } - - if sigProxy { - sigc := cli.forwardAllSignals(runResult.Get("Id")) - defer signal.StopCatch(sigc) - } - - var ( - waitDisplayId chan struct{} - errCh chan error - ) - - if !config.AttachStdout && !config.AttachStderr { - // Make this asynchrone in order to let the client write to stdin before having to read the ID - waitDisplayId = make(chan struct{}) - go func() { - defer close(waitDisplayId) - fmt.Fprintf(cli.out, "%s\n", runResult.Get("Id")) - }() - } - - // We need to instanciate the chan because the select needs it. It can - // be closed but can't be uninitialized. - hijacked := make(chan io.Closer) - - // Block the return until the chan gets closed - defer func() { - utils.Debugf("End of CmdRun(), Waiting for hijack to finish.") - if _, ok := <-hijacked; ok { - utils.Errorf("Hijack did not finish (chan still open)") - } - }() - - if config.AttachStdin || config.AttachStdout || config.AttachStderr { - var ( - out, stderr io.Writer - in io.ReadCloser - v = url.Values{} - ) - v.Set("stream", "1") - - if config.AttachStdin { - v.Set("stdin", "1") - in = cli.in - } - if config.AttachStdout { - v.Set("stdout", "1") - out = cli.out - } - if config.AttachStderr { - v.Set("stderr", "1") - if config.Tty { - stderr = cli.out - } else { - stderr = cli.err - } - } - - errCh = utils.Go(func() error { - return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked) - }) - } else { - close(hijacked) - } - - // Acknowledge the hijack before starting - select { - case closer := <-hijacked: - // Make sure that hijack gets closed when returning. (result - // in closing hijack chan and freeing server's goroutines. - if closer != nil { - defer closer.Close() - } - case err := <-errCh: - if err != nil { - utils.Debugf("Error hijack: %s", err) - return err - } - } - - //start the container - if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil { - return err - } - - if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal { - if err := cli.monitorTtySize(runResult.Get("Id")); err != nil { - utils.Errorf("Error monitoring TTY size: %s\n", err) - } - } - - if errCh != nil { - if err := <-errCh; err != nil { - utils.Debugf("Error hijack: %s", err) - return err - } - } - - // Detached mode: wait for the id to be displayed and return. - if !config.AttachStdout && !config.AttachStderr { - // Detached mode - <-waitDisplayId - return nil - } - - var status int - - // Attached mode - if autoRemove { - // Autoremove: wait for the container to finish, retrieve - // the exit code and remove the container - if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/wait", nil, false)); err != nil { - return err - } - if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { - return err - } - if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.Get("Id")+"?v=1", nil, false)); err != nil { - return err - } - } else { - if !config.Tty { - // In non-tty mode, we can't dettach, so we know we need to wait. - if status, err = waitForExit(cli, runResult.Get("Id")); err != nil { - return err - } - } else { - // In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call - // and result in a wrong exit code. - // No Autoremove: Simply retrieve the exit code - if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { - return err - } - } - } - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - return nil -} - -func (cli *DockerCli) CmdCp(args ...string) error { - cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH") - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() != 2 { - cmd.Usage() - return nil - } - - var copyData engine.Env - info := strings.Split(cmd.Arg(0), ":") - - if len(info) != 2 { - return fmt.Errorf("Error: Path not specified") - } - - copyData.Set("Resource", info[1]) - copyData.Set("HostPath", cmd.Arg(1)) - - stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false) - if stream != nil { - defer stream.Close() - } - if statusCode == 404 { - return fmt.Errorf("No such container: %v", info[0]) - } - if err != nil { - return err - } - - if statusCode == 200 { - if err := archive.Untar(stream, copyData.Get("HostPath"), nil); err != nil { - return err - } - } - return nil -} - -func (cli *DockerCli) CmdSave(args ...string) error { - cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout by default)") - outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT") - - if err := cmd.Parse(args); err != nil { - return err - } - - if cmd.NArg() != 1 { - cmd.Usage() - return nil - } - - var ( - output io.Writer = cli.out - err error - ) - if *outfile != "" { - output, err = os.Create(*outfile) - if err != nil { - return err - } - } - image := cmd.Arg(0) - if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdLoad(args ...string) error { - cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN") - infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") - - if err := cmd.Parse(args); err != nil { - return err - } - - if cmd.NArg() != 0 { - cmd.Usage() - return nil - } - - var ( - input io.Reader = cli.in - err error - ) - if *infile != "" { - input, err = os.Open(*infile) - if err != nil { - return err - } - } - if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) dial() (net.Conn, error) { - if cli.tlsConfig != nil && cli.proto != "unix" { - return tls.Dial(cli.proto, cli.addr, cli.tlsConfig) - } - return net.Dial(cli.proto, cli.addr) -} - -func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if env, ok := data.(engine.Env); ok { - if err := env.Encode(params); err != nil { - return nil, -1, err - } - } else { - buf, err := json.Marshal(data) - if err != nil { - return nil, -1, err - } - if _, err := params.Write(buf); err != nil { - return nil, -1, err - } - } - } - // fixme: refactor client to support redirect - re := regexp.MustCompile("/+") - path = re.ReplaceAllString(path, "/") - - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params) - if err != nil { - return nil, -1, err - } - if passAuthInfo { - cli.LoadConfigFile() - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(registry.IndexServerAddress()) - getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return nil, err - } - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil - } - if headers, err := getHeaders(authConfig); err == nil && headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.Host = cli.addr - if data != nil { - req.Header.Set("Content-Type", "application/json") - } else if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - dial, err := cli.dial() - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return nil, -1, ErrConnectionRefused - } - return nil, -1, err - } - clientconn := httputil.NewClientConn(dial, nil) - resp, err := clientconn.Do(req) - if err != nil { - clientconn.Close() - if strings.Contains(err.Error(), "connection refused") { - return nil, -1, ErrConnectionRefused - } - return nil, -1, err - } - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, -1, err - } - if len(body) == 0 { - return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL) - } - return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body)) - } - - wrapper := utils.NewReadCloserWrapper(resp.Body, func() error { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return clientconn.Close() - }) - return wrapper, resp.StatusCode, nil -} - -func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error { - if (method == "POST" || method == "PUT") && in == nil { - in = bytes.NewReader([]byte{}) - } - - // fixme: refactor client to support redirect - re := regexp.MustCompile("/+") - path = re.ReplaceAllString(path, "/") - - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in) - if err != nil { - return err - } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.Host = cli.addr - if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - - if headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - - dial, err := cli.dial() - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } - clientconn := httputil.NewClientConn(dial, nil) - resp, err := clientconn.Do(req) - defer clientconn.Close() - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - if len(body) == 0 { - return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode)) - } - return fmt.Errorf("Error: %s", bytes.TrimSpace(body)) - } - - if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") { - return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal) - } - if _, err := io.Copy(out, resp.Body); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error { - defer func() { - if started != nil { - close(started) - } - }() - // fixme: refactor client to support redirect - re := regexp.MustCompile("/+") - path = re.ReplaceAllString(path, "/") - - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), nil) - if err != nil { - return err - } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.Header.Set("Content-Type", "plain/text") - req.Host = cli.addr - - dial, err := cli.dial() - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } - clientconn := httputil.NewClientConn(dial, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - clientconn.Do(req) - - rwc, br := clientconn.Hijack() - defer rwc.Close() - - if started != nil { - started <- rwc - } - - var receiveStdout chan error - - var oldState *term.State - - if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" { - oldState, err = term.SetRawTerminal(cli.terminalFd) - if err != nil { - return err - } - defer term.RestoreTerminal(cli.terminalFd, oldState) - } - - if stdout != nil || stderr != nil { - receiveStdout = utils.Go(func() (err error) { - defer func() { - if in != nil { - if setRawTerminal && cli.isTerminal { - term.RestoreTerminal(cli.terminalFd, oldState) - } - // For some reason this Close call blocks on darwin.. - // As the client exists right after, simply discard the close - // until we find a better solution. - if goruntime.GOOS != "darwin" { - in.Close() - } - } - }() - - // When TTY is ON, use regular copy - if setRawTerminal { - _, err = io.Copy(stdout, br) - } else { - _, err = utils.StdCopy(stdout, stderr, br) - } - utils.Debugf("[hijack] End of stdout") - return err - }) - } - - sendStdin := utils.Go(func() error { - if in != nil { - io.Copy(rwc, in) - utils.Debugf("[hijack] End of stdin") - } - if tcpc, ok := rwc.(*net.TCPConn); ok { - if err := tcpc.CloseWrite(); err != nil { - utils.Debugf("Couldn't send EOF: %s\n", err) - } - } else if unixc, ok := rwc.(*net.UnixConn); ok { - if err := unixc.CloseWrite(); err != nil { - utils.Debugf("Couldn't send EOF: %s\n", err) - } - } - // Discard errors due to pipe interruption - return nil - }) - - if stdout != nil || stderr != nil { - if err := <-receiveStdout; err != nil { - utils.Debugf("Error receiveStdout: %s", err) - return err - } - } - - if !cli.isTerminal { - if err := <-sendStdin; err != nil { - utils.Debugf("Error sendStdin: %s", err) - return err - } - } - return nil - -} - -func (cli *DockerCli) getTtySize() (int, int) { - if !cli.isTerminal { - return 0, 0 - } - ws, err := term.GetWinsize(cli.terminalFd) - if err != nil { - utils.Debugf("Error getting size: %s", err) - if ws == nil { - return 0, 0 - } - } - return int(ws.Height), int(ws.Width) -} - -func (cli *DockerCli) resizeTty(id string) { - height, width := cli.getTtySize() - if height == 0 && width == 0 { - return - } - v := url.Values{} - v.Set("h", strconv.Itoa(height)) - v.Set("w", strconv.Itoa(width)) - if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil { - utils.Debugf("Error resize: %s", err) - } -} - -func (cli *DockerCli) monitorTtySize(id string) error { - cli.resizeTty(id) - - sigchan := make(chan os.Signal, 1) - gosignal.Notify(sigchan, syscall.SIGWINCH) - go func() { - for _ = range sigchan { - cli.resizeTty(id) - } - }() - return nil -} - -func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet { - flags := flag.NewFlagSet(name, flag.ContinueOnError) - flags.Usage = func() { - fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description) - flags.PrintDefaults() - os.Exit(2) - } - return flags -} - -func (cli *DockerCli) LoadConfigFile() (err error) { - cli.configFile, err = registry.LoadConfig(os.Getenv("HOME")) - if err != nil { - fmt.Fprintf(cli.err, "WARNING: %s\n", err) - } - return err -} - -func waitForExit(cli *DockerCli, containerId string) (int, error) { - stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false) - if err != nil { - return -1, err - } - - var out engine.Env - if err := out.Decode(stream); err != nil { - return -1, err - } - return out.GetInt("StatusCode"), nil -} - -// getExitCode perform an inspect on the container. It returns -// the running state and the exit code. -func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { - body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false)) - if err != nil { - // If we can't connect, then the daemon probably died. - if err != ErrConnectionRefused { - return false, -1, err - } - return false, -1, nil - } - c := &api.Container{} - if err := json.Unmarshal(body, c); err != nil { - return false, -1, err - } - return c.State.Running, c.State.ExitCode, nil -} - -func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) { - if stream != nil { - defer stream.Close() - } - if err != nil { - return nil, statusCode, err - } - body, err := ioutil.ReadAll(stream) - if err != nil { - return nil, -1, err - } - return body, statusCode, nil -} - -func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsConfig *tls.Config) *DockerCli { - var ( - isTerminal = false - terminalFd uintptr - ) - - if in != nil { - if file, ok := in.(*os.File); ok { - terminalFd = file.Fd() - isTerminal = term.IsTerminal(terminalFd) - } - } - - if err == nil { - err = out - } - return &DockerCli{ - proto: proto, - addr: addr, - in: in, - out: out, - err: err, - isTerminal: isTerminal, - terminalFd: terminalFd, - tlsConfig: tlsConfig, - } -} - -type DockerCli struct { - proto string - addr string - configFile *registry.ConfigFile - in io.ReadCloser - out io.Writer - err io.Writer - isTerminal bool - terminalFd uintptr - tlsConfig *tls.Config -} diff --git a/api/client/commands.go b/api/client/commands.go new file mode 100644 index 0000000000..49a5c008b3 --- /dev/null +++ b/api/client/commands.go @@ -0,0 +1,2098 @@ +package client + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path" + goruntime "runtime" + "strconv" + "strings" + "syscall" + "text/tabwriter" + "text/template" + "time" + + "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/archive" + "github.com/dotcloud/docker/dockerversion" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/nat" + "github.com/dotcloud/docker/pkg/signal" + "github.com/dotcloud/docker/pkg/term" + "github.com/dotcloud/docker/registry" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/utils" +) + +func (cli *DockerCli) CmdHelp(args ...string) error { + if len(args) > 0 { + method, exists := cli.getMethod(args[0]) + if !exists { + fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0]) + } else { + method("--help") + return nil + } + } + help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", api.DEFAULTUNIXSOCKET) + for _, command := range [][]string{ + {"attach", "Attach to a running container"}, + {"build", "Build a container from a Dockerfile"}, + {"commit", "Create a new image from a container's changes"}, + {"cp", "Copy files/folders from the containers filesystem to the host path"}, + {"diff", "Inspect changes on a container's filesystem"}, + {"events", "Get real time events from the server"}, + {"export", "Stream the contents of a container as a tar archive"}, + {"history", "Show the history of an image"}, + {"images", "List images"}, + {"import", "Create a new filesystem image from the contents of a tarball"}, + {"info", "Display system-wide information"}, + {"insert", "Insert a file in an image"}, + {"inspect", "Return low-level information on a container"}, + {"kill", "Kill a running container"}, + {"load", "Load an image from a tar archive"}, + {"login", "Register or Login to the docker registry server"}, + {"logs", "Fetch the logs of a container"}, + {"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"}, + {"ps", "List containers"}, + {"pull", "Pull an image or a repository from the docker registry server"}, + {"push", "Push an image or a repository to the docker registry server"}, + {"restart", "Restart a running container"}, + {"rm", "Remove one or more containers"}, + {"rmi", "Remove one or more images"}, + {"run", "Run a command in a new container"}, + {"save", "Save an image to a tar archive"}, + {"search", "Search for an image in the docker index"}, + {"start", "Start a stopped container"}, + {"stop", "Stop a running container"}, + {"tag", "Tag an image into a repository"}, + {"top", "Lookup the running processes of a container"}, + {"version", "Show the docker version information"}, + {"wait", "Block until a container stops, then print its exit code"}, + } { + help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1]) + } + fmt.Fprintf(cli.err, "%s\n", help) + return nil +} + +func (cli *DockerCli) CmdInsert(args ...string) error { + cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 3 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("url", cmd.Arg(1)) + v.Set("path", cmd.Arg(2)) + + return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil) +} + +func (cli *DockerCli) CmdBuild(args ...string) error { + cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH") + tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success") + suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") + noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") + rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + var ( + context archive.Archive + isRemote bool + err error + ) + + _, err = exec.LookPath("git") + hasGit := err == nil + if cmd.Arg(0) == "-" { + // As a special case, 'docker build -' will build from an empty context with the + // contents of stdin as a Dockerfile + dockerfile, err := ioutil.ReadAll(cli.in) + if err != nil { + return err + } + context, err = archive.Generate("Dockerfile", string(dockerfile)) + } else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) { + isRemote = true + } else { + root := cmd.Arg(0) + if utils.IsGIT(root) { + remoteURL := cmd.Arg(0) + if !strings.HasPrefix(remoteURL, "git://") && !strings.HasPrefix(remoteURL, "git@") && !utils.IsURL(remoteURL) { + remoteURL = "https://" + remoteURL + } + + root, err = ioutil.TempDir("", "docker-build-git") + if err != nil { + return err + } + defer os.RemoveAll(root) + + if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { + return fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + } + if _, err := os.Stat(root); err != nil { + return err + } + filename := path.Join(root, "Dockerfile") + if _, err = os.Stat(filename); os.IsNotExist(err) { + return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0)) + } + context, err = archive.Tar(root, archive.Uncompressed) + } + var body io.Reader + // Setup an upload progress bar + // FIXME: ProgressReader shouldn't be this annoying to use + if context != nil { + sf := utils.NewStreamFormatter(false) + body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Uploading context") + } + // Upload the build context + v := &url.Values{} + + //Check if the given image name can be resolved + if *tag != "" { + repository, _ := utils.ParseRepositoryTag(*tag) + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + } + + v.Set("t", *tag) + + if *suppressOutput { + v.Set("q", "1") + } + if isRemote { + v.Set("remote", cmd.Arg(0)) + } + if *noCache { + v.Set("nocache", "1") + } + if *rm { + v.Set("rm", "1") + } + + cli.LoadConfigFile() + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(cli.configFile) + if err != nil { + return err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + + if context != nil { + headers.Set("Content-Type", "application/tar") + } + err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers) + if jerr, ok := err.(*utils.JSONError); ok { + // If no error code is set, default to 1 + if jerr.Code == 0 { + jerr.Code = 1 + } + return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + } + return err +} + +// 'docker login': login / register a user to registry service. +func (cli *DockerCli) CmdLogin(args ...string) error { + cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.") + + var username, password, email string + + cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") + cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") + cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") + err := cmd.Parse(args) + if err != nil { + return nil + } + serverAddress := registry.IndexServerAddress() + if len(cmd.Args()) > 0 { + serverAddress = cmd.Arg(0) + } + + promptDefault := func(prompt string, configDefault string) { + if configDefault == "" { + fmt.Fprintf(cli.out, "%s: ", prompt) + } else { + fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) + } + } + + readInput := func(in io.Reader, out io.Writer) string { + reader := bufio.NewReader(in) + line, _, err := reader.ReadLine() + if err != nil { + fmt.Fprintln(out, err.Error()) + os.Exit(1) + } + return string(line) + } + + cli.LoadConfigFile() + authconfig, ok := cli.configFile.Configs[serverAddress] + if !ok { + authconfig = registry.AuthConfig{} + } + + if username == "" { + promptDefault("Username", authconfig.Username) + username = readInput(cli.in, cli.out) + if username == "" { + username = authconfig.Username + } + } + if username != authconfig.Username { + if password == "" { + oldState, _ := term.SaveState(cli.terminalFd) + fmt.Fprintf(cli.out, "Password: ") + term.DisableEcho(cli.terminalFd, oldState) + + password = readInput(cli.in, cli.out) + fmt.Fprint(cli.out, "\n") + + term.RestoreTerminal(cli.terminalFd, oldState) + if password == "" { + return fmt.Errorf("Error : Password Required") + } + } + + if email == "" { + promptDefault("Email", authconfig.Email) + email = readInput(cli.in, cli.out) + if email == "" { + email = authconfig.Email + } + } + } else { + password = authconfig.Password + email = authconfig.Email + } + authconfig.Username = username + authconfig.Password = password + authconfig.Email = email + authconfig.ServerAddress = serverAddress + cli.configFile.Configs[serverAddress] = authconfig + + stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false) + if statusCode == 401 { + delete(cli.configFile.Configs, serverAddress) + registry.SaveConfig(cli.configFile) + return err + } + if err != nil { + return err + } + var out2 engine.Env + err = out2.Decode(stream) + if err != nil { + cli.configFile, _ = registry.LoadConfig(os.Getenv("HOME")) + return err + } + registry.SaveConfig(cli.configFile) + if out2.Get("Status") != "" { + fmt.Fprintf(cli.out, "%s\n", out2.Get("Status")) + } + return nil +} + +// 'docker wait': block until a container stops +func (cli *DockerCli) CmdWait(args ...string) error { + cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + var encounteredError error + for _, name := range cmd.Args() { + status, err := waitForExit(cli, name) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to wait one or more containers") + } else { + fmt.Fprintf(cli.out, "%d\n", status) + } + } + return encounteredError +} + +// 'docker version': show version information +func (cli *DockerCli) CmdVersion(args ...string) error { + cmd := cli.Subcmd("version", "", "Show the docker version information.") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() > 0 { + cmd.Usage() + return nil + } + if dockerversion.VERSION != "" { + fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) + } + fmt.Fprintf(cli.out, "Go version (client): %s\n", goruntime.Version()) + if dockerversion.GITCOMMIT != "" { + fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) + } + + body, _, err := readBody(cli.call("GET", "/version", nil, false)) + if err != nil { + return err + } + + out := engine.NewOutput() + remoteVersion, err := out.AddEnv() + if err != nil { + utils.Errorf("Error reading remote version: %s\n", err) + return err + } + if _, err := out.Write(body); err != nil { + utils.Errorf("Error reading remote version: %s\n", err) + return err + } + out.Close() + fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version")) + fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) + fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion")) + release := utils.GetReleaseVersion() + if release != "" { + fmt.Fprintf(cli.out, "Last stable version: %s", release) + if (dockerversion.VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(dockerversion.VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) { + fmt.Fprintf(cli.out, ", please update docker") + } + fmt.Fprintf(cli.out, "\n") + } + return nil +} + +// 'docker info': display system-wide information. +func (cli *DockerCli) CmdInfo(args ...string) error { + cmd := cli.Subcmd("info", "", "Display system-wide information") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() > 0 { + cmd.Usage() + return nil + } + + body, _, err := readBody(cli.call("GET", "/info", nil, false)) + if err != nil { + return err + } + + out := engine.NewOutput() + remoteInfo, err := out.AddEnv() + if err != nil { + return err + } + + if _, err := out.Write(body); err != nil { + utils.Errorf("Error reading remote info: %s\n", err) + return err + } + out.Close() + + fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) + fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) + fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) + var driverStatus [][2]string + if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { + return err + } + for _, pair := range driverStatus { + fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) + } + fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) + fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) + + if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { + fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) + fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") + fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) + fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) + fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) + + if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { + fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) + } + if initPath := remoteInfo.Get("InitPath"); initPath != "" { + fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) + } + } + + if len(remoteInfo.GetList("IndexServerAddress")) != 0 { + cli.LoadConfigFile() + u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username + if len(u) > 0 { + fmt.Fprintf(cli.out, "Username: %v\n", u) + fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress")) + } + } + if !remoteInfo.GetBool("MemoryLimit") { + fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") + } + if !remoteInfo.GetBool("SwapLimit") { + fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") + } + if !remoteInfo.GetBool("IPv4Forwarding") { + fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") + } + return nil +} + +func (cli *DockerCli) CmdStop(args ...string) error { + cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("t", strconv.Itoa(*nSeconds)) + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to stop one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) CmdRestart(args ...string) error { + cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container") + nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("t", strconv.Itoa(*nSeconds)) + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to restart one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { + sigc := make(chan os.Signal, 1) + signal.CatchAll(sigc) + go func() { + for s := range sigc { + if s == syscall.SIGCHLD { + continue + } + var sig string + for sigStr, sigN := range signal.SignalMap { + if sigN == s { + sig = sigStr + break + } + } + if sig == "" { + utils.Errorf("Unsupported signal: %d. Discarding.", s) + } + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil { + utils.Debugf("Error sending signal: %s", err) + } + } + }() + return sigc +} + +func (cli *DockerCli) CmdStart(args ...string) error { + cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container") + attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process") + openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var cErr chan error + var tty bool + if *attach || *openStdin { + if cmd.NArg() > 1 { + return fmt.Errorf("You cannot start and attach multiple containers at once.") + } + + body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) + if err != nil { + return err + } + + container := &api.Container{} + err = json.Unmarshal(body, container) + if err != nil { + return err + } + + tty = container.Config.Tty + + if !container.Config.Tty { + sigc := cli.forwardAllSignals(cmd.Arg(0)) + defer signal.StopCatch(sigc) + } + + var in io.ReadCloser + + v := url.Values{} + v.Set("stream", "1") + if *openStdin && container.Config.OpenStdin { + v.Set("stdin", "1") + in = cli.in + } + v.Set("stdout", "1") + v.Set("stderr", "1") + + cErr = utils.Go(func() error { + return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil) + }) + } + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false)) + if err != nil { + if !*attach || !*openStdin { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to start one or more containers") + } + } else { + if !*attach || !*openStdin { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + } + if encounteredError != nil { + if *openStdin || *attach { + cli.in.Close() + <-cErr + } + return encounteredError + } + + if *openStdin || *attach { + if tty && cli.isTerminal { + if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { + utils.Errorf("Error monitoring TTY size: %s\n", err) + } + } + return <-cErr + } + return nil +} + +func (cli *DockerCli) CmdInspect(args ...string) error { + cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image") + tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var tmpl *template.Template + if *tmplStr != "" { + var err error + if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { + fmt.Fprintf(cli.err, "Template parsing error: %v\n", err) + return &utils.StatusError{StatusCode: 64, + Status: "Template parsing error: " + err.Error()} + } + } + + indented := new(bytes.Buffer) + indented.WriteByte('[') + status := 0 + + for _, name := range cmd.Args() { + obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) + if err != nil { + obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false)) + if err != nil { + if strings.Contains(err.Error(), "No such") { + fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) + } else { + fmt.Fprintf(cli.err, "%s", err) + } + status = 1 + continue + } + } + + if tmpl == nil { + if err = json.Indent(indented, obj, "", " "); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + } else { + // Has template, will render + var value interface{} + if err := json.Unmarshal(obj, &value); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + if err := tmpl.Execute(cli.out, value); err != nil { + return err + } + cli.out.Write([]byte{'\n'}) + } + indented.WriteString(",") + } + + if indented.Len() > 1 { + // Remove trailing ',' + indented.Truncate(indented.Len() - 1) + } + indented.WriteByte(']') + + if tmpl == nil { + if _, err := io.Copy(cli.out, indented); err != nil { + return err + } + } + + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + return nil +} + +func (cli *DockerCli) CmdTop(args ...string) error { + cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Lookup the running processes of a container") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() == 0 { + cmd.Usage() + return nil + } + val := url.Values{} + if cmd.NArg() > 1 { + val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) + } + + stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false) + if err != nil { + return err + } + var procs engine.Env + if err := procs.Decode(stream); err != nil { + return err + } + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) + processes := [][]string{} + if err := procs.GetJson("Processes", &processes); err != nil { + return err + } + for _, proc := range processes { + fmt.Fprintln(w, strings.Join(proc, "\t")) + } + w.Flush() + return nil +} + +func (cli *DockerCli) CmdPort(args ...string) error { + cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 2 { + cmd.Usage() + return nil + } + + var ( + port = cmd.Arg(1) + proto = "tcp" + parts = strings.SplitN(port, "/", 2) + container api.Container + ) + + if len(parts) == 2 && len(parts[1]) != 0 { + port = parts[0] + proto = parts[1] + } + body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)) + if err != nil { + return err + } + + err = json.Unmarshal(body, &container) + if err != nil { + return err + } + + if frontends, exists := container.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { + for _, frontend := range frontends { + fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort) + } + } else { + return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0)) + } + return nil +} + +// 'docker rmi IMAGE' removes all images with the name IMAGE +func (cli *DockerCli) CmdRmi(args ...string) error { + var ( + cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images") + force = cmd.Bool([]string{"f", "-force"}, false, "Force") + noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") + ) + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + if *force { + v.Set("force", "1") + } + if *noprune { + v.Set("noprune", "1") + } + + var encounteredError error + for _, name := range cmd.Args() { + body, _, err := readBody(cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to remove one or more images") + } else { + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to remove one or more images") + continue + } + for _, out := range outs.Data { + if out.Get("Deleted") != "" { + fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted")) + } else { + fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged")) + } + } + } + } + return encounteredError +} + +func (cli *DockerCli) CmdHistory(args ...string) error { + cmd := cli.Subcmd("history", "[OPTIONS] IMAGE", "Show the history of an image") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false)) + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE") + } + + for _, out := range outs.Data { + outID := out.Get("Id") + if !*quiet { + if *noTrunc { + fmt.Fprintf(w, "%s\t", outID) + } else { + fmt.Fprintf(w, "%s\t", utils.TruncateID(outID)) + } + + fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) + + if *noTrunc { + fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) + } else { + fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) + } + fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("Size"))) + } else { + if *noTrunc { + fmt.Fprintln(w, outID) + } else { + fmt.Fprintln(w, utils.TruncateID(outID)) + } + } + } + w.Flush() + return nil +} + +func (cli *DockerCli) CmdRm(args ...string) error { + cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers") + v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated to the container") + link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container") + force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of running container") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + val := url.Values{} + if *v { + val.Set("v", "1") + } + if *link { + val.Set("link", "1") + } + if *force { + val.Set("force", "1") + } + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to remove one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +// 'docker kill NAME' kills a running container +func (cli *DockerCli) CmdKill(args ...string) error { + cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL, or specified signal)") + signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var encounteredError error + for _, name := range cmd.Args() { + if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to kill one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} + +func (cli *DockerCli) CmdImport(args ...string) error { + cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + var src, repository, tag string + + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n") + src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2) + } else { + src = cmd.Arg(0) + repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) + } + v := url.Values{} + + if repository != "" { + //Check if the given image name can be resolved + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + } + + v.Set("repo", repository) + v.Set("tag", tag) + v.Set("fromSrc", src) + + var in io.Reader + + if src == "-" { + in = cli.in + } + + return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil) +} + +func (cli *DockerCli) CmdPush(args ...string) error { + cmd := cli.Subcmd("push", "NAME", "Push an image or a repository to the registry") + if err := cmd.Parse(args); err != nil { + return nil + } + name := cmd.Arg(0) + + if name == "" { + cmd.Usage() + return nil + } + + cli.LoadConfigFile() + + // Resolve the Repository name from fqn to hostname + name + hostname, _, err := registry.ResolveRepositoryName(name) + if err != nil { + return err + } + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(hostname) + // If we're not using a custom registry, we know the restrictions + // applied to repository names and can warn the user in advance. + // Custom repositories can have different rules, and we must also + // allow pushing by image ID. + if len(strings.SplitN(name, "/", 2)) == 1 { + username := cli.configFile.Configs[registry.IndexServerAddress()].Username + if username == "" { + username = "" + } + return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository in / (ex: %s/%s)", username, name) + } + + v := url.Values{} + push := func(authConfig registry.AuthConfig) error { + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + + return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{ + "X-Registry-Auth": registryAuthHeader, + }) + } + + if err := push(authConfig); err != nil { + if strings.Contains(err.Error(), "Status 401") { + fmt.Fprintln(cli.out, "\nPlease login prior to push:") + if err := cli.CmdLogin(hostname); err != nil { + return err + } + authConfig := cli.configFile.ResolveAuthConfig(hostname) + return push(authConfig) + } + return err + } + return nil +} + +func (cli *DockerCli) CmdPull(args ...string) error { + cmd := cli.Subcmd("pull", "NAME[:TAG]", "Pull an image or a repository from the registry") + tag := cmd.String([]string{"#t", "#-tag"}, "", "Download tagged image in repository") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0)) + if *tag == "" { + *tag = parsedTag + } + + // Resolve the Repository name from fqn to hostname + name + hostname, _, err := registry.ResolveRepositoryName(remote) + if err != nil { + return err + } + + cli.LoadConfigFile() + + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(hostname) + v := url.Values{} + v.Set("fromImage", remote) + v.Set("tag", *tag) + + pull := func(authConfig registry.AuthConfig) error { + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + + return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{ + "X-Registry-Auth": registryAuthHeader, + }) + } + + if err := pull(authConfig); err != nil { + if strings.Contains(err.Error(), "Status 401") { + fmt.Fprintln(cli.out, "\nPlease login prior to pull:") + if err := cli.CmdLogin(hostname); err != nil { + return err + } + authConfig := cli.configFile.ResolveAuthConfig(hostname) + return pull(authConfig) + } + return err + } + + return nil +} + +func (cli *DockerCli) CmdImages(args ...string) error { + cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate images used to build)") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + flViz := cmd.Bool([]string{"v", "#viz", "-viz"}, false, "Output graph in graphviz format") + flTree := cmd.Bool([]string{"t", "#tree", "-tree"}, false, "Output graph in tree format") + + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() > 1 { + cmd.Usage() + return nil + } + + filter := cmd.Arg(0) + + if *flViz || *flTree { + body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false)) + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + var ( + printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string) + startImage *engine.Env + + roots = engine.NewTable("Created", outs.Len()) + byParent = make(map[string]*engine.Table) + ) + + for _, image := range outs.Data { + if image.Get("ParentId") == "" { + roots.Add(image) + } else { + if children, exists := byParent[image.Get("ParentId")]; exists { + children.Add(image) + } else { + byParent[image.Get("ParentId")] = engine.NewTable("Created", 1) + byParent[image.Get("ParentId")].Add(image) + } + } + + if filter != "" { + if filter == image.Get("Id") || filter == utils.TruncateID(image.Get("Id")) { + startImage = image + } + + for _, repotag := range image.GetList("RepoTags") { + if repotag == filter { + startImage = image + } + } + } + } + + if *flViz { + fmt.Fprintf(cli.out, "digraph docker {\n") + printNode = (*DockerCli).printVizNode + } else { + printNode = (*DockerCli).printTreeNode + } + + if startImage != nil { + root := engine.NewTable("Created", 1) + root.Add(startImage) + cli.WalkTree(*noTrunc, root, byParent, "", printNode) + } else if filter == "" { + cli.WalkTree(*noTrunc, roots, byParent, "", printNode) + } + if *flViz { + fmt.Fprintf(cli.out, " base [style=invisible]\n}\n") + } + } else { + v := url.Values{} + if cmd.NArg() == 1 { + v.Set("filter", filter) + } + if *all { + v.Set("all", "1") + } + + body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) + + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") + } + + for _, out := range outs.Data { + for _, repotag := range out.GetList("RepoTags") { + + repo, tag := utils.ParseRepositoryTag(repotag) + outID := out.Get("Id") + if !*noTrunc { + outID = utils.TruncateID(outID) + } + + if !*quiet { + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), utils.HumanSize(out.GetInt64("VirtualSize"))) + } else { + fmt.Fprintln(w, outID) + } + } + } + + if !*quiet { + w.Flush() + } + } + return nil +} + +func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) { + length := images.Len() + if length > 1 { + for index, image := range images.Data { + if index+1 == length { + printNode(cli, noTrunc, image, prefix+"└─") + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) + } + } else { + printNode(cli, noTrunc, image, prefix+"\u251C─") + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode) + } + } + } + } else { + for _, image := range images.Data { + printNode(cli, noTrunc, image, prefix+"└─") + if subimages, exists := byParent[image.Get("Id")]; exists { + cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) + } + } + } +} + +func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) { + var ( + imageID string + parentID string + ) + if noTrunc { + imageID = image.Get("Id") + parentID = image.Get("ParentId") + } else { + imageID = utils.TruncateID(image.Get("Id")) + parentID = utils.TruncateID(image.Get("ParentId")) + } + if parentID == "" { + fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) + } else { + fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID) + } + if image.GetList("RepoTags")[0] != ":" { + fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", + imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n")) + } +} + +func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { + var imageID string + if noTrunc { + imageID = image.Get("Id") + } else { + imageID = utils.TruncateID(image.Get("Id")) + } + + fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.GetInt64("VirtualSize"))) + if image.GetList("RepoTags")[0] != ":" { + fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) + } else { + fmt.Fprint(cli.out, "\n") + } +} + +func (cli *DockerCli) CmdPs(args ...string) error { + cmd := cli.Subcmd("ps", "[OPTIONS]", "List containers") + quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") + size := cmd.Bool([]string{"s", "-size"}, false, "Display sizes") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.") + since := cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show only containers created since Id or Name, include non-running ones.") + before := cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.") + last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.") + + if err := cmd.Parse(args); err != nil { + return nil + } + v := url.Values{} + if *last == -1 && *nLatest { + *last = 1 + } + if *all { + v.Set("all", "1") + } + if *last != -1 { + v.Set("limit", strconv.Itoa(*last)) + } + if *since != "" { + v.Set("since", *since) + } + if *before != "" { + v.Set("before", *before) + } + if *size { + v.Set("size", "1") + } + + body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false)) + if err != nil { + return err + } + + outs := engine.NewTable("Created", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES") + if *size { + fmt.Fprintln(w, "\tSIZE") + } else { + fmt.Fprint(w, "\n") + } + } + + for _, out := range outs.Data { + var ( + outID = out.Get("Id") + outNames = out.GetList("Names") + ) + + if !*noTrunc { + outID = utils.TruncateID(outID) + } + + // Remove the leading / from the names + for i := 0; i < len(outNames); i++ { + outNames[i] = outNames[i][1:] + } + + if !*quiet { + var ( + outCommand = out.Get("Command") + ports = engine.NewTable("", 0) + ) + if !*noTrunc { + outCommand = utils.Trunc(outCommand, 20) + } + ports.ReadListFrom([]byte(out.Get("Ports"))) + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ",")) + if *size { + if out.GetInt("SizeRootFs") > 0 { + fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs"))) + } else { + fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("SizeRw"))) + } + } else { + fmt.Fprint(w, "\n") + } + } else { + fmt.Fprintln(w, outID) + } + } + + if !*quiet { + w.Flush() + } + return nil +} + +func (cli *DockerCli) CmdCommit(args ...string) error { + cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes") + flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") + flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith \"") + flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: --run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`) + if err := cmd.Parse(args); err != nil { + return nil + } + + var name, repository, tag string + + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n") + name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2) + } else { + name = cmd.Arg(0) + repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) + } + + if name == "" { + cmd.Usage() + return nil + } + + //Check if the given image name can be resolved + if repository != "" { + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + } + + v := url.Values{} + v.Set("container", name) + v.Set("repo", repository) + v.Set("tag", tag) + v.Set("comment", *flComment) + v.Set("author", *flAuthor) + var ( + config *runconfig.Config + env engine.Env + ) + if *flConfig != "" { + config = &runconfig.Config{} + if err := json.Unmarshal([]byte(*flConfig), config); err != nil { + return err + } + } + stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, false) + if err != nil { + return err + } + if err := env.Decode(stream); err != nil { + return err + } + + fmt.Fprintf(cli.out, "%s\n", env.Get("Id")) + return nil +} + +func (cli *DockerCli) CmdEvents(args ...string) error { + cmd := cli.Subcmd("events", "[OPTIONS]", "Get real time events from the server") + since := cmd.String([]string{"#since", "-since"}, "", "Show previously created events and then stream.") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 0 { + cmd.Usage() + return nil + } + + v := url.Values{} + if *since != "" { + loc := time.FixedZone(time.Now().Zone()) + format := "2006-01-02 15:04:05 -0700 MST" + if len(*since) < len(format) { + format = format[:len(*since)] + } + + if t, err := time.ParseInLocation(format, *since, loc); err == nil { + v.Set("since", strconv.FormatInt(t.Unix(), 10)) + } else { + v.Set("since", *since) + } + } + + if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdExport(args ...string) error { + cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdDiff(args ...string) error { + cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false)) + + if err != nil { + return err + } + + outs := engine.NewTable("", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + for _, change := range outs.Data { + var kind string + switch change.GetInt("Kind") { + case archive.ChangeModify: + kind = "C" + case archive.ChangeAdd: + kind = "A" + case archive.ChangeDelete: + kind = "D" + } + fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path")) + } + return nil +} + +func (cli *DockerCli) CmdLogs(args ...string) error { + cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container") + follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + name := cmd.Arg(0) + body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) + if err != nil { + return err + } + + container := &api.Container{} + err = json.Unmarshal(body, container) + if err != nil { + return err + } + + v := url.Values{} + v.Set("logs", "1") + v.Set("stdout", "1") + v.Set("stderr", "1") + if *follow && container.State.Running { + v.Set("stream", "1") + } + + if err := cli.hijack("POST", "/containers/"+name+"/attach?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdAttach(args ...string) error { + cmd := cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container") + noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin") + proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + name := cmd.Arg(0) + body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) + if err != nil { + return err + } + + container := &api.Container{} + err = json.Unmarshal(body, container) + if err != nil { + return err + } + + if !container.State.Running { + return fmt.Errorf("You cannot attach to a stopped container, start it first") + } + + if container.Config.Tty && cli.isTerminal { + if err := cli.monitorTtySize(cmd.Arg(0)); err != nil { + utils.Debugf("Error monitoring TTY size: %s", err) + } + } + + var in io.ReadCloser + + v := url.Values{} + v.Set("stream", "1") + if !*noStdin && container.Config.OpenStdin { + v.Set("stdin", "1") + in = cli.in + } + v.Set("stdout", "1") + v.Set("stderr", "1") + + if *proxy && !container.Config.Tty { + sigc := cli.forwardAllSignals(cmd.Arg(0)) + defer signal.StopCatch(sigc) + } + + if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil { + return err + } + + _, status, err := getExitCode(cli, cmd.Arg(0)) + if err != nil { + return err + } + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + + return nil +} + +func (cli *DockerCli) CmdSearch(args ...string) error { + cmd := cli.Subcmd("search", "TERM", "Search the docker index for images") + noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") + trusted := cmd.Bool([]string{"t", "#trusted", "-trusted"}, false, "Only show trusted builds") + stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + v := url.Values{} + v.Set("term", cmd.Arg(0)) + + body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, true)) + + if err != nil { + return err + } + outs := engine.NewTable("star_count", 0) + if _, err := outs.ReadListFrom(body); err != nil { + return err + } + w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) + fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n") + for _, out := range outs.Data { + if (*trusted && !out.GetBool("is_trusted")) || (*stars > out.GetInt("star_count")) { + continue + } + desc := strings.Replace(out.Get("description"), "\n", " ", -1) + desc = strings.Replace(desc, "\r", " ", -1) + if !*noTrunc && len(desc) > 45 { + desc = utils.Trunc(desc, 42) + "..." + } + fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count")) + if out.GetBool("is_official") { + fmt.Fprint(w, "[OK]") + + } + fmt.Fprint(w, "\t") + if out.GetBool("is_trusted") { + fmt.Fprint(w, "[OK]") + } + fmt.Fprint(w, "\n") + } + w.Flush() + return nil +} + +// Ports type - Used to parse multiple -p flags +type ports []int + +func (cli *DockerCli) CmdTag(args ...string) error { + cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository") + force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") + if err := cmd.Parse(args); err != nil { + return nil + } + if cmd.NArg() != 2 && cmd.NArg() != 3 { + cmd.Usage() + return nil + } + + var repository, tag string + + if cmd.NArg() == 3 { + fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]]\n") + repository, tag = cmd.Arg(1), cmd.Arg(2) + } else { + repository, tag = utils.ParseRepositoryTag(cmd.Arg(1)) + } + + v := url.Values{} + + //Check if the given image name can be resolved + if _, _, err := registry.ResolveRepositoryName(repository); err != nil { + return err + } + v.Set("repo", repository) + v.Set("tag", tag) + + if *force { + v.Set("force", "1") + } + + if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdRun(args ...string) error { + // FIXME: just use runconfig.Parse already + config, hostConfig, cmd, err := runconfig.ParseSubcommand(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil) + if err != nil { + return err + } + if config.Image == "" { + cmd.Usage() + return nil + } + + // Retrieve relevant client-side config + var ( + flName = cmd.Lookup("name") + flRm = cmd.Lookup("rm") + flSigProxy = cmd.Lookup("sig-proxy") + autoRemove, _ = strconv.ParseBool(flRm.Value.String()) + sigProxy, _ = strconv.ParseBool(flSigProxy.Value.String()) + ) + + // Disable sigProxy in case on TTY + if config.Tty { + sigProxy = false + } + + var containerIDFile io.WriteCloser + if len(hostConfig.ContainerIDFile) > 0 { + if _, err := os.Stat(hostConfig.ContainerIDFile); err == nil { + return fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", hostConfig.ContainerIDFile) + } + if containerIDFile, err = os.Create(hostConfig.ContainerIDFile); err != nil { + return fmt.Errorf("Failed to create the container ID file: %s", err) + } + defer func() { + containerIDFile.Close() + var ( + cidFileInfo os.FileInfo + err error + ) + if cidFileInfo, err = os.Stat(hostConfig.ContainerIDFile); err != nil { + return + } + if cidFileInfo.Size() == 0 { + if err := os.Remove(hostConfig.ContainerIDFile); err != nil { + fmt.Printf("failed to remove CID file '%s': %s \n", hostConfig.ContainerIDFile, err) + } + } + }() + } + + containerValues := url.Values{} + if name := flName.Value.String(); name != "" { + containerValues.Set("name", name) + } + + //create the container + stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false) + //if image not found try to pull it + if statusCode == 404 { + fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image) + + v := url.Values{} + repos, tag := utils.ParseRepositoryTag(config.Image) + v.Set("fromImage", repos) + v.Set("tag", tag) + + // Resolve the Repository name from fqn to hostname + name + hostname, _, err := registry.ResolveRepositoryName(repos) + if err != nil { + return err + } + + // Load the auth config file, to be able to pull the image + cli.LoadConfigFile() + + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(hostname) + buf, err := json.Marshal(authConfig) + if err != nil { + return err + } + + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { + return err + } + if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false); err != nil { + return err + } + } else if err != nil { + return err + } + + var runResult engine.Env + if err := runResult.Decode(stream); err != nil { + return err + } + + for _, warning := range runResult.GetList("Warnings") { + fmt.Fprintf(cli.err, "WARNING: %s\n", warning) + } + + if len(hostConfig.ContainerIDFile) > 0 { + if _, err = containerIDFile.Write([]byte(runResult.Get("Id"))); err != nil { + return fmt.Errorf("Failed to write the container ID to the file: %s", err) + } + } + + if sigProxy { + sigc := cli.forwardAllSignals(runResult.Get("Id")) + defer signal.StopCatch(sigc) + } + + var ( + waitDisplayId chan struct{} + errCh chan error + ) + + if !config.AttachStdout && !config.AttachStderr { + // Make this asynchrone in order to let the client write to stdin before having to read the ID + waitDisplayId = make(chan struct{}) + go func() { + defer close(waitDisplayId) + fmt.Fprintf(cli.out, "%s\n", runResult.Get("Id")) + }() + } + + // We need to instanciate the chan because the select needs it. It can + // be closed but can't be uninitialized. + hijacked := make(chan io.Closer) + + // Block the return until the chan gets closed + defer func() { + utils.Debugf("End of CmdRun(), Waiting for hijack to finish.") + if _, ok := <-hijacked; ok { + utils.Errorf("Hijack did not finish (chan still open)") + } + }() + + if config.AttachStdin || config.AttachStdout || config.AttachStderr { + var ( + out, stderr io.Writer + in io.ReadCloser + v = url.Values{} + ) + v.Set("stream", "1") + + if config.AttachStdin { + v.Set("stdin", "1") + in = cli.in + } + if config.AttachStdout { + v.Set("stdout", "1") + out = cli.out + } + if config.AttachStderr { + v.Set("stderr", "1") + if config.Tty { + stderr = cli.out + } else { + stderr = cli.err + } + } + + errCh = utils.Go(func() error { + return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked) + }) + } else { + close(hijacked) + } + + // Acknowledge the hijack before starting + select { + case closer := <-hijacked: + // Make sure that hijack gets closed when returning. (result + // in closing hijack chan and freeing server's goroutines. + if closer != nil { + defer closer.Close() + } + case err := <-errCh: + if err != nil { + utils.Debugf("Error hijack: %s", err) + return err + } + } + + //start the container + if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil { + return err + } + + if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal { + if err := cli.monitorTtySize(runResult.Get("Id")); err != nil { + utils.Errorf("Error monitoring TTY size: %s\n", err) + } + } + + if errCh != nil { + if err := <-errCh; err != nil { + utils.Debugf("Error hijack: %s", err) + return err + } + } + + // Detached mode: wait for the id to be displayed and return. + if !config.AttachStdout && !config.AttachStderr { + // Detached mode + <-waitDisplayId + return nil + } + + var status int + + // Attached mode + if autoRemove { + // Autoremove: wait for the container to finish, retrieve + // the exit code and remove the container + if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/wait", nil, false)); err != nil { + return err + } + if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { + return err + } + if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.Get("Id")+"?v=1", nil, false)); err != nil { + return err + } + } else { + if !config.Tty { + // In non-tty mode, we can't dettach, so we know we need to wait. + if status, err = waitForExit(cli, runResult.Get("Id")); err != nil { + return err + } + } else { + // In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call + // and result in a wrong exit code. + // No Autoremove: Simply retrieve the exit code + if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil { + return err + } + } + } + if status != 0 { + return &utils.StatusError{StatusCode: status} + } + return nil +} + +func (cli *DockerCli) CmdCp(args ...string) error { + cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH") + if err := cmd.Parse(args); err != nil { + return nil + } + + if cmd.NArg() != 2 { + cmd.Usage() + return nil + } + + var copyData engine.Env + info := strings.Split(cmd.Arg(0), ":") + + if len(info) != 2 { + return fmt.Errorf("Error: Path not specified") + } + + copyData.Set("Resource", info[1]) + copyData.Set("HostPath", cmd.Arg(1)) + + stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false) + if stream != nil { + defer stream.Close() + } + if statusCode == 404 { + return fmt.Errorf("No such container: %v", info[0]) + } + if err != nil { + return err + } + + if statusCode == 200 { + if err := archive.Untar(stream, copyData.Get("HostPath"), nil); err != nil { + return err + } + } + return nil +} + +func (cli *DockerCli) CmdSave(args ...string) error { + cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout by default)") + outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT") + + if err := cmd.Parse(args); err != nil { + return err + } + + if cmd.NArg() != 1 { + cmd.Usage() + return nil + } + + var ( + output io.Writer = cli.out + err error + ) + if *outfile != "" { + output, err = os.Create(*outfile) + if err != nil { + return err + } + } + image := cmd.Arg(0) + if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) CmdLoad(args ...string) error { + cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN") + infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") + + if err := cmd.Parse(args); err != nil { + return err + } + + if cmd.NArg() != 0 { + cmd.Usage() + return nil + } + + var ( + input io.Reader = cli.in + err error + ) + if *infile != "" { + input, err = os.Open(*infile) + if err != nil { + return err + } + } + if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil { + return err + } + return nil +} diff --git a/api/client/utils.go b/api/client/utils.go new file mode 100644 index 0000000000..c2c7b1780a --- /dev/null +++ b/api/client/utils.go @@ -0,0 +1,390 @@ +package client + +import ( + "bytes" + "crypto/tls" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + gosignal "os/signal" + "regexp" + goruntime "runtime" + "strconv" + "strings" + "syscall" + + "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/dockerversion" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/pkg/term" + "github.com/dotcloud/docker/registry" + "github.com/dotcloud/docker/utils" +) + +var ( + ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") +) + +func (cli *DockerCli) dial() (net.Conn, error) { + if cli.tlsConfig != nil && cli.proto != "unix" { + return tls.Dial(cli.proto, cli.addr, cli.tlsConfig) + } + return net.Dial(cli.proto, cli.addr) +} + +func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if env, ok := data.(engine.Env); ok { + if err := env.Encode(params); err != nil { + return nil, -1, err + } + } else { + buf, err := json.Marshal(data) + if err != nil { + return nil, -1, err + } + if _, err := params.Write(buf); err != nil { + return nil, -1, err + } + } + } + // fixme: refactor client to support redirect + re := regexp.MustCompile("/+") + path = re.ReplaceAllString(path, "/") + + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params) + if err != nil { + return nil, -1, err + } + if passAuthInfo { + cli.LoadConfigFile() + // Resolve the Auth config relevant for this server + authConfig := cli.configFile.ResolveAuthConfig(registry.IndexServerAddress()) + getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) { + buf, err := json.Marshal(authConfig) + if err != nil { + return nil, err + } + registryAuthHeader := []string{ + base64.URLEncoding.EncodeToString(buf), + } + return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil + } + if headers, err := getHeaders(authConfig); err == nil && headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) + req.Host = cli.addr + if data != nil { + req.Header.Set("Content-Type", "application/json") + } else if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + dial, err := cli.dial() + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return nil, -1, ErrConnectionRefused + } + return nil, -1, err + } + clientconn := httputil.NewClientConn(dial, nil) + resp, err := clientconn.Do(req) + if err != nil { + clientconn.Close() + if strings.Contains(err.Error(), "connection refused") { + return nil, -1, ErrConnectionRefused + } + return nil, -1, err + } + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, -1, err + } + if len(body) == 0 { + return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL) + } + return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body)) + } + + wrapper := utils.NewReadCloserWrapper(resp.Body, func() error { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return clientconn.Close() + }) + return wrapper, resp.StatusCode, nil +} + +func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error { + if (method == "POST" || method == "PUT") && in == nil { + in = bytes.NewReader([]byte{}) + } + + // fixme: refactor client to support redirect + re := regexp.MustCompile("/+") + path = re.ReplaceAllString(path, "/") + + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in) + if err != nil { + return err + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) + req.Host = cli.addr + if method == "POST" { + req.Header.Set("Content-Type", "plain/text") + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + + dial, err := cli.dial() + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + } + return err + } + clientconn := httputil.NewClientConn(dial, nil) + resp, err := clientconn.Do(req) + defer clientconn.Close() + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + } + return err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + if len(body) == 0 { + return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode)) + } + return fmt.Errorf("Error: %s", bytes.TrimSpace(body)) + } + + if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") { + return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal) + } + if _, err := io.Copy(out, resp.Body); err != nil { + return err + } + return nil +} + +func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error { + defer func() { + if started != nil { + close(started) + } + }() + // fixme: refactor client to support redirect + re := regexp.MustCompile("/+") + path = re.ReplaceAllString(path, "/") + + req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), nil) + if err != nil { + return err + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) + req.Header.Set("Content-Type", "plain/text") + req.Host = cli.addr + + dial, err := cli.dial() + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + } + return err + } + clientconn := httputil.NewClientConn(dial, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + clientconn.Do(req) + + rwc, br := clientconn.Hijack() + defer rwc.Close() + + if started != nil { + started <- rwc + } + + var receiveStdout chan error + + var oldState *term.State + + if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" { + oldState, err = term.SetRawTerminal(cli.terminalFd) + if err != nil { + return err + } + defer term.RestoreTerminal(cli.terminalFd, oldState) + } + + if stdout != nil || stderr != nil { + receiveStdout = utils.Go(func() (err error) { + defer func() { + if in != nil { + if setRawTerminal && cli.isTerminal { + term.RestoreTerminal(cli.terminalFd, oldState) + } + // For some reason this Close call blocks on darwin.. + // As the client exists right after, simply discard the close + // until we find a better solution. + if goruntime.GOOS != "darwin" { + in.Close() + } + } + }() + + // When TTY is ON, use regular copy + if setRawTerminal { + _, err = io.Copy(stdout, br) + } else { + _, err = utils.StdCopy(stdout, stderr, br) + } + utils.Debugf("[hijack] End of stdout") + return err + }) + } + + sendStdin := utils.Go(func() error { + if in != nil { + io.Copy(rwc, in) + utils.Debugf("[hijack] End of stdin") + } + if tcpc, ok := rwc.(*net.TCPConn); ok { + if err := tcpc.CloseWrite(); err != nil { + utils.Errorf("Couldn't send EOF: %s\n", err) + } + } else if unixc, ok := rwc.(*net.UnixConn); ok { + if err := unixc.CloseWrite(); err != nil { + utils.Errorf("Couldn't send EOF: %s\n", err) + } + } + // Discard errors due to pipe interruption + return nil + }) + + if stdout != nil || stderr != nil { + if err := <-receiveStdout; err != nil { + utils.Errorf("Error receiveStdout: %s", err) + return err + } + } + + if !cli.isTerminal { + if err := <-sendStdin; err != nil { + utils.Errorf("Error sendStdin: %s", err) + return err + } + } + return nil + +} + +func (cli *DockerCli) resizeTty(id string) { + height, width := cli.getTtySize() + if height == 0 && width == 0 { + return + } + v := url.Values{} + v.Set("h", strconv.Itoa(height)) + v.Set("w", strconv.Itoa(width)) + if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil { + utils.Errorf("Error resize: %s", err) + } +} + +func waitForExit(cli *DockerCli, containerId string) (int, error) { + stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false) + if err != nil { + return -1, err + } + + var out engine.Env + if err := out.Decode(stream); err != nil { + return -1, err + } + return out.GetInt("StatusCode"), nil +} + +// getExitCode perform an inspect on the container. It returns +// the running state and the exit code. +func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { + body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false)) + if err != nil { + // If we can't connect, then the daemon probably died. + if err != ErrConnectionRefused { + return false, -1, err + } + return false, -1, nil + } + c := &api.Container{} + if err := json.Unmarshal(body, c); err != nil { + return false, -1, err + } + return c.State.Running, c.State.ExitCode, nil +} + +func (cli *DockerCli) monitorTtySize(id string) error { + cli.resizeTty(id) + + sigchan := make(chan os.Signal, 1) + gosignal.Notify(sigchan, syscall.SIGWINCH) + go func() { + for _ = range sigchan { + cli.resizeTty(id) + } + }() + return nil +} + +func (cli *DockerCli) getTtySize() (int, int) { + if !cli.isTerminal { + return 0, 0 + } + ws, err := term.GetWinsize(cli.terminalFd) + if err != nil { + utils.Errorf("Error getting size: %s", err) + if ws == nil { + return 0, 0 + } + } + return int(ws.Height), int(ws.Width) +} + +func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) { + if stream != nil { + defer stream.Close() + } + if err != nil { + return nil, statusCode, err + } + body, err := ioutil.ReadAll(stream) + if err != nil { + return nil, -1, err + } + return body, statusCode, nil +} -- cgit v1.2.1 From 185b040e49aa9ab74f8d9254c7ff86b2891e3708 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 28 Mar 2014 23:36:33 +0000 Subject: fix tests Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/api_unit_test.go | 46 --------------------- api/server/server_unit_test.go | 52 ++++++++++++++++++++++++ integration/api_test.go | 90 +++++++++++++++++++++--------------------- integration/commands_test.go | 50 +++++++++++------------ integration/https_test.go | 8 ++-- 5 files changed, 127 insertions(+), 119 deletions(-) create mode 100644 api/server/server_unit_test.go diff --git a/api/api_unit_test.go b/api/api_unit_test.go index 2b3e76e75c..678331d369 100644 --- a/api/api_unit_test.go +++ b/api/api_unit_test.go @@ -1,9 +1,6 @@ package api import ( - "fmt" - "net/http" - "net/http/httptest" "testing" ) @@ -20,46 +17,3 @@ func TestJsonContentType(t *testing.T) { t.Fail() } } - -func TestGetBoolParam(t *testing.T) { - if ret, err := getBoolParam("true"); err != nil || !ret { - t.Fatalf("true -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("True"); err != nil || !ret { - t.Fatalf("True -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("1"); err != nil || !ret { - t.Fatalf("1 -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam(""); err != nil || ret { - t.Fatalf("\"\" -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("false"); err != nil || ret { - t.Fatalf("false -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("0"); err != nil || ret { - t.Fatalf("0 -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("faux"); err == nil || ret { - t.Fatalf("faux -> false, err | got %t %s", ret, err) - } -} - -func TesthttpError(t *testing.T) { - r := httptest.NewRecorder() - - httpError(r, fmt.Errorf("No such method")) - if r.Code != http.StatusNotFound { - t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) - } - - httpError(r, fmt.Errorf("This accound hasn't been activated")) - if r.Code != http.StatusForbidden { - t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) - } - - httpError(r, fmt.Errorf("Some error")) - if r.Code != http.StatusInternalServerError { - t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) - } -} diff --git a/api/server/server_unit_test.go b/api/server/server_unit_test.go new file mode 100644 index 0000000000..5ea5af411c --- /dev/null +++ b/api/server/server_unit_test.go @@ -0,0 +1,52 @@ +package server + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" +) + +func TestGetBoolParam(t *testing.T) { + if ret, err := getBoolParam("true"); err != nil || !ret { + t.Fatalf("true -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("True"); err != nil || !ret { + t.Fatalf("True -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("1"); err != nil || !ret { + t.Fatalf("1 -> true, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam(""); err != nil || ret { + t.Fatalf("\"\" -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("false"); err != nil || ret { + t.Fatalf("false -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("0"); err != nil || ret { + t.Fatalf("0 -> false, nil | got %t %s", ret, err) + } + if ret, err := getBoolParam("faux"); err == nil || ret { + t.Fatalf("faux -> false, err | got %t %s", ret, err) + + } +} + +func TesthttpError(t *testing.T) { + r := httptest.NewRecorder() + + httpError(r, fmt.Errorf("No such method")) + if r.Code != http.StatusNotFound { + t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) + } + + httpError(r, fmt.Errorf("This accound hasn't been activated")) + if r.Code != http.StatusForbidden { + t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) + } + + httpError(r, fmt.Errorf("Some error")) + if r.Code != http.StatusInternalServerError { + t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) + } +} diff --git a/integration/api_test.go b/integration/api_test.go index bac4efea53..d08617ea69 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -5,14 +5,6 @@ import ( "bytes" "encoding/json" "fmt" - "github.com/dotcloud/docker/api" - "github.com/dotcloud/docker/dockerversion" - "github.com/dotcloud/docker/engine" - "github.com/dotcloud/docker/image" - "github.com/dotcloud/docker/runconfig" - "github.com/dotcloud/docker/runtime" - "github.com/dotcloud/docker/utils" - "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "io" "io/ioutil" "net" @@ -21,6 +13,16 @@ import ( "strings" "testing" "time" + + "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/api/server" + "github.com/dotcloud/docker/dockerversion" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/image" + "github.com/dotcloud/docker/runconfig" + "github.com/dotcloud/docker/runtime" + "github.com/dotcloud/docker/utils" + "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) func TestGetVersion(t *testing.T) { @@ -35,7 +37,7 @@ func TestGetVersion(t *testing.T) { t.Fatal(err) } // FIXME getting the version should require an actual running Server - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -77,7 +79,7 @@ func TestGetInfo(t *testing.T) { } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -125,7 +127,7 @@ func TestGetEvents(t *testing.T) { r := httptest.NewRecorder() setTimeout(t, "", 500*time.Millisecond, func() { - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -166,7 +168,7 @@ func TestGetImagesJSON(t *testing.T) { r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -201,7 +203,7 @@ func TestGetImagesJSON(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil { t.Fatal(err) } assertHttpNotError(r2, t) @@ -234,7 +236,7 @@ func TestGetImagesJSON(t *testing.T) { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r3, req3); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r3, req3); err != nil { t.Fatal(err) } assertHttpNotError(r3, t) @@ -259,7 +261,7 @@ func TestGetImagesHistory(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -283,7 +285,7 @@ func TestGetImagesByName(t *testing.T) { } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -327,7 +329,7 @@ func TestGetContainersJSON(t *testing.T) { } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -363,7 +365,7 @@ func TestGetContainersExport(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -401,7 +403,7 @@ func TestSaveImageAndThenLoad(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusOK { @@ -415,7 +417,7 @@ func TestSaveImageAndThenLoad(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusOK { @@ -428,7 +430,7 @@ func TestSaveImageAndThenLoad(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusNotFound { @@ -441,7 +443,7 @@ func TestSaveImageAndThenLoad(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusOK { @@ -454,7 +456,7 @@ func TestSaveImageAndThenLoad(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusOK { @@ -481,7 +483,7 @@ func TestGetContainersChanges(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -548,7 +550,7 @@ func TestGetContainersTop(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -596,7 +598,7 @@ func TestGetContainersByName(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -631,7 +633,7 @@ func TestPostCommit(t *testing.T) { } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -667,7 +669,7 @@ func TestPostContainersCreate(t *testing.T) { } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -716,7 +718,7 @@ func TestPostContainersKill(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -755,7 +757,7 @@ func TestPostContainersRestart(t *testing.T) { t.Fatal(err) } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -797,7 +799,7 @@ func TestPostContainersStart(t *testing.T) { req.Header.Set("Content-Type", "application/json") r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -814,7 +816,7 @@ func TestPostContainersStart(t *testing.T) { } r = httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } // Starting an already started container should return an error @@ -852,7 +854,7 @@ func TestRunErrorBindMountRootSource(t *testing.T) { req.Header.Set("Content-Type", "application/json") r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusInternalServerError { @@ -889,7 +891,7 @@ func TestPostContainersStop(t *testing.T) { t.Fatal(err) } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -921,7 +923,7 @@ func TestPostContainersWait(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -979,7 +981,7 @@ func TestPostContainersAttach(t *testing.T) { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r.ResponseRecorder, t) @@ -1057,7 +1059,7 @@ func TestPostContainersAttachStderr(t *testing.T) { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r.ResponseRecorder, t) @@ -1114,7 +1116,7 @@ func TestDeleteContainers(t *testing.T) { t.Fatal(err) } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -1133,7 +1135,7 @@ func TestOptionsRoute(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -1152,7 +1154,7 @@ func TestGetEnabledCors(t *testing.T) { if err != nil { t.Fatal(err) } - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -1199,7 +1201,7 @@ func TestDeleteImages(t *testing.T) { } r := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusConflict { @@ -1212,7 +1214,7 @@ func TestDeleteImages(t *testing.T) { } r2 := httptest.NewRecorder() - if err := api.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil { t.Fatal(err) } assertHttpNotError(r2, t) @@ -1264,7 +1266,7 @@ func TestPostContainersCopy(t *testing.T) { t.Fatal(err) } req.Header.Add("Content-Type", "application/json") - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) @@ -1312,7 +1314,7 @@ func TestPostContainersCopyWhenContainerNotFound(t *testing.T) { t.Fatal(err) } req.Header.Add("Content-Type", "application/json") - if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { + if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } if r.Code != http.StatusNotFound { diff --git a/integration/commands_test.go b/integration/commands_test.go index 7de7a227ea..2dc0ff384a 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -3,7 +3,7 @@ package docker import ( "bufio" "fmt" - "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/api/client" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/pkg/term" @@ -121,7 +121,7 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error func TestRunHostname(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -166,7 +166,7 @@ func TestRunHostname(t *testing.T) { func TestRunWorkdir(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -211,7 +211,7 @@ func TestRunWorkdir(t *testing.T) { func TestRunWorkdirExists(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -255,7 +255,7 @@ func TestRunWorkdirExists(t *testing.T) { // TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected func TestRunWorkdirExistsAndIsFile(t *testing.T) { - cli := api.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -275,7 +275,7 @@ func TestRunExit(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -328,7 +328,7 @@ func TestRunDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -374,7 +374,7 @@ func TestRunDisconnectTty(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c1 := make(chan struct{}) @@ -426,7 +426,7 @@ func TestRunAttachStdin(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -490,7 +490,7 @@ func TestRunDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -537,7 +537,7 @@ func TestAttachDetach(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) ch := make(chan struct{}) @@ -570,7 +570,7 @@ func TestAttachDetach(t *testing.T) { stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) ch = make(chan struct{}) go func() { @@ -618,7 +618,7 @@ func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) // Discard the CmdRun output @@ -636,7 +636,7 @@ func TestAttachDetachTruncatedID(t *testing.T) { stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) ch := make(chan struct{}) go func() { @@ -683,7 +683,7 @@ func TestAttachDisconnect(t *testing.T) { stdin, stdinPipe := io.Pipe() stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) go func() { @@ -752,7 +752,7 @@ func TestAttachDisconnect(t *testing.T) { func TestRunAutoRemove(t *testing.T) { t.Skip("Fixme. Skipping test for now, race condition") stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -788,7 +788,7 @@ func TestRunAutoRemove(t *testing.T) { func TestCmdLogs(t *testing.T) { t.Skip("Test not impemented") - cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil { @@ -806,7 +806,7 @@ func TestCmdLogs(t *testing.T) { // Expected behaviour: error out when attempting to bind mount non-existing source paths func TestRunErrorBindNonExistingSource(t *testing.T) { - cli := api.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -826,7 +826,7 @@ func TestRunErrorBindNonExistingSource(t *testing.T) { func TestImagesViz(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) image := buildTestImages(t, globalEngine) @@ -876,7 +876,7 @@ func TestImagesViz(t *testing.T) { func TestImagesTree(t *testing.T) { stdout, stdoutPipe := io.Pipe() - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) image := buildTestImages(t, globalEngine) @@ -959,7 +959,7 @@ func TestRunCidFileCheckIDLength(t *testing.T) { } tmpCidFile := path.Join(tmpDir, "cid") - cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -1008,7 +1008,7 @@ func TestRunCidFileCleanupIfEmpty(t *testing.T) { } tmpCidFile := path.Join(tmpDir, "cid") - cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) c := make(chan struct{}) @@ -1038,7 +1038,7 @@ func TestContainerOrphaning(t *testing.T) { defer os.RemoveAll(tmpDir) // setup a CLI and server - cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) defer cleanup(globalEngine, t) srv := mkServerFromEngine(globalEngine, t) @@ -1098,8 +1098,8 @@ func TestCmdKill(t *testing.T) { var ( stdin, stdinPipe = io.Pipe() stdout, stdoutPipe = io.Pipe() - cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) - cli2 = api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) + cli2 = client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil) ) defer cleanup(globalEngine, t) diff --git a/integration/https_test.go b/integration/https_test.go index a1c855e1a9..0b4abea881 100644 --- a/integration/https_test.go +++ b/integration/https_test.go @@ -3,7 +3,7 @@ package docker import ( "crypto/tls" "crypto/x509" - "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/api/client" "io/ioutil" "testing" "time" @@ -35,7 +35,7 @@ func getTlsConfig(certFile, keyFile string, t *testing.T) *tls.Config { // TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint func TestHttpsInfo(t *testing.T) { - cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, + cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t)) setTimeout(t, "Reading command output time out", 10*time.Second, func() { @@ -48,7 +48,7 @@ func TestHttpsInfo(t *testing.T) { // TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint // by using a rogue client certificate and checks that it fails with the expected error. func TestHttpsInfoRogueCert(t *testing.T) { - cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, + cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonHttpsAddr, getTlsConfig("client-rogue-cert.pem", "client-rogue-key.pem", t)) setTimeout(t, "Reading command output time out", 10*time.Second, func() { @@ -65,7 +65,7 @@ func TestHttpsInfoRogueCert(t *testing.T) { // TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint // which provides a rogue server certificate and checks that it fails with the expected error func TestHttpsInfoRogueServerCert(t *testing.T) { - cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, + cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonRogueHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t)) setTimeout(t, "Reading command output time out", 10*time.Second, func() { -- cgit v1.2.1 From 7697aad7b0537dade1d598cca5b7b1b420fa47c9 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 31 Mar 2014 18:08:46 +0000 Subject: apply Reduce error level form harmless errors Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/client/utils.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/api/client/utils.go b/api/client/utils.go index c2c7b1780a..4ef09ba783 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -276,11 +276,11 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea } if tcpc, ok := rwc.(*net.TCPConn); ok { if err := tcpc.CloseWrite(); err != nil { - utils.Errorf("Couldn't send EOF: %s\n", err) + utils.Debugf("Couldn't send EOF: %s\n", err) } } else if unixc, ok := rwc.(*net.UnixConn); ok { if err := unixc.CloseWrite(); err != nil { - utils.Errorf("Couldn't send EOF: %s\n", err) + utils.Debugf("Couldn't send EOF: %s\n", err) } } // Discard errors due to pipe interruption @@ -289,14 +289,14 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea if stdout != nil || stderr != nil { if err := <-receiveStdout; err != nil { - utils.Errorf("Error receiveStdout: %s", err) + utils.Debugf("Error receiveStdout: %s", err) return err } } if !cli.isTerminal { if err := <-sendStdin; err != nil { - utils.Errorf("Error sendStdin: %s", err) + utils.Debugf("Error sendStdin: %s", err) return err } } @@ -313,7 +313,7 @@ func (cli *DockerCli) resizeTty(id string) { v.Set("h", strconv.Itoa(height)) v.Set("w", strconv.Itoa(width)) if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil { - utils.Errorf("Error resize: %s", err) + utils.Debugf("Error resize: %s", err) } } @@ -367,7 +367,7 @@ func (cli *DockerCli) getTtySize() (int, int) { } ws, err := term.GetWinsize(cli.terminalFd) if err != nil { - utils.Errorf("Error getting size: %s", err) + utils.Debugf("Error getting size: %s", err) if ws == nil { return 0, 0 } -- cgit v1.2.1 From 51d9a04f17d1c8c6c1a069227c1417b20283dda2 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 31 Mar 2014 18:21:07 +0000 Subject: Make sure to set error reguardless of attach or stdin Fixes #3364 Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- api/client/commands.go | 2 +- integration-cli/docker_cli_start_test.go | 34 ++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 integration-cli/docker_cli_start_test.go diff --git a/api/client/commands.go b/api/client/commands.go index 49a5c008b3..49cd07700f 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -607,8 +607,8 @@ func (cli *DockerCli) CmdStart(args ...string) error { if err != nil { if !*attach || !*openStdin { fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to start one or more containers") } + encounteredError = fmt.Errorf("Error: failed to start one or more containers") } else { if !*attach || !*openStdin { fmt.Fprintf(cli.out, "%s\n", name) diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go new file mode 100644 index 0000000000..c3059a66c4 --- /dev/null +++ b/integration-cli/docker_cli_start_test.go @@ -0,0 +1,34 @@ +package main + +import ( + "os/exec" + "testing" +) + +// Regression test for #3364 +func TestDockerStartWithPortCollision(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--name", "fail", "-p", "25:25", "busybox", "true") + out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal(out, stderr, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--name", "conflict", "-dti", "-p", "25:25", "busybox", "sh") + out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal(out, stderr, err) + } + + startCmd := exec.Command(dockerBinary, "start", "-a", "fail") + out, stderr, exitCode, err = runCommandWithStdoutStderr(startCmd) + if err != nil && exitCode != 1 { + t.Fatal(out, err) + } + + killCmd := exec.Command(dockerBinary, "kill", "conflict") + runCommand(killCmd) + + deleteAllContainers() + + logDone("start - -a=true error on port use") +} -- cgit v1.2.1 From cd51ac92bdf1ce0a1245f5b4565995631512ba64 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Sun, 16 Feb 2014 19:24:22 -0500 Subject: support for `docker run` environment variables file Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- docs/sources/reference/commandline/cli.rst | 12 ++++++++ pkg/opts/envfile.go | 44 ++++++++++++++++++++++++++++++ runconfig/parse.go | 14 +++++++++- 3 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 pkg/opts/envfile.go diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 3d2aac5233..6a473ec461 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -1152,6 +1152,7 @@ image is removed. --cidfile="": Write the container ID to the file -d, --detach=false: Detached mode: Run container in the background, print new container id -e, --env=[]: Set environment variables + --envfile="": Read in a line delimited file of ENV variables -h, --hostname="": Container host name -i, --interactive=false: Keep stdin open even if not attached --privileged=false: Give extended privileges to this container @@ -1284,6 +1285,17 @@ This exposes port ``80`` of the container for use within a link without publishing the port to the host system's interfaces. :ref:`port_redirection` explains in detail how to manipulate ports in Docker. +.. code-block:: bash + + $ sudo docker run -e MYVAR1 --env MYVAR2=foo --envfile ./env.list ubuntu bash + +This sets environmental variables to the container. For illustration all three +flags are shown here. Where -e and --env can be repeated, take an environment +variable and value, or if no "=" is provided, then that variable's current +value is passed through (i.e. $MYVAR1 from the host is set to $MYVAR1 in the +container). The --envfile flag takes a filename as an argument and expects each +line to be a VAR=VAL format. + .. code-block:: bash $ sudo docker run --name console -t -i ubuntu bash diff --git a/pkg/opts/envfile.go b/pkg/opts/envfile.go new file mode 100644 index 0000000000..004c320803 --- /dev/null +++ b/pkg/opts/envfile.go @@ -0,0 +1,44 @@ +package opts + +import ( + "bufio" + "bytes" + "io" + "os" +) + +/* +Read in a line delimited file with environment variables enumerated +*/ +func ParseEnvFile(filename string) ([]string, error) { + fh, err := os.Open(filename) + if err != nil { + return []string{}, err + } + var ( + lines []string = []string{} + line, chunk []byte + ) + reader := bufio.NewReader(fh) + line, isPrefix, err := reader.ReadLine() + + for err == nil { + if isPrefix { + chunk = append(chunk, line...) + } else if !isPrefix && len(chunk) > 0 { + line = chunk + chunk = []byte{} + } else { + chunk = []byte{} + } + + if !isPrefix && len(line) > 0 && bytes.Contains(line, []byte("=")) { + lines = append(lines, string(line)) + } + line, isPrefix, err = reader.ReadLine() + } + if err != nil && err != io.EOF { + return []string{}, err + } + return lines, nil +} diff --git a/runconfig/parse.go b/runconfig/parse.go index 43aecdb753..aa1ed6d174 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -68,6 +68,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flLabelOptions = cmd.String([]string{"Z", "-label"}, "", "Options to pass to underlying labeling system") + flEnvFile = cmd.String([]string{"#envfile", "-envfile"}, "", "Read in a line delimited file of ENV variables") // For documentation purpose _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") @@ -199,6 +200,17 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf } } + // collect all the environment variables for the container + envVariables := []string{} + envVariables = append(envVariables, flEnv.GetAll()...) + parsedVars, err := opts.ParseEnvFile(*flEnvFile) + if err != nil { + return nil, nil, cmd, err + } + envVariables = append(envVariables, parsedVars...) + // boo, there's no debug output for docker run + //utils.Debugf("Environment variables for the container: %#v", envVariables) + config := &Config{ Hostname: hostname, Domainname: domainname, @@ -213,7 +225,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf AttachStdin: flAttach.Get("stdin"), AttachStdout: flAttach.Get("stdout"), AttachStderr: flAttach.Get("stderr"), - Env: flEnv.GetAll(), + Env: envVariables, Cmd: runCmd, Dns: flDns.GetAll(), DnsSearch: flDnsSearch.GetAll(), -- cgit v1.2.1 From bfaa917a966fab1e2c92e70617320957a8d8b43b Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Tue, 18 Feb 2014 14:22:46 -0500 Subject: pkg/opts: Close the file handle Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- pkg/opts/envfile.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/opts/envfile.go b/pkg/opts/envfile.go index 004c320803..65495a1585 100644 --- a/pkg/opts/envfile.go +++ b/pkg/opts/envfile.go @@ -15,6 +15,8 @@ func ParseEnvFile(filename string) ([]string, error) { if err != nil { return []string{}, err } + defer fh.Close() + var ( lines []string = []string{} line, chunk []byte -- cgit v1.2.1 From 586e6c5eb9cd21a95d4bbba051249c4b05b2011e Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Tue, 18 Feb 2014 15:41:28 -0500 Subject: --env-file instead of --envfile Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- docs/sources/reference/commandline/cli.rst | 2 +- runconfig/parse.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 6a473ec461..d8cf5965da 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -1152,7 +1152,7 @@ image is removed. --cidfile="": Write the container ID to the file -d, --detach=false: Detached mode: Run container in the background, print new container id -e, --env=[]: Set environment variables - --envfile="": Read in a line delimited file of ENV variables + --env-file="": Read in a line delimited file of ENV variables -h, --hostname="": Container host name -i, --interactive=false: Keep stdin open even if not attached --privileged=false: Give extended privileges to this container diff --git a/runconfig/parse.go b/runconfig/parse.go index aa1ed6d174..6ebe9f2bc3 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -68,7 +68,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flLabelOptions = cmd.String([]string{"Z", "-label"}, "", "Options to pass to underlying labeling system") - flEnvFile = cmd.String([]string{"#envfile", "-envfile"}, "", "Read in a line delimited file of ENV variables") + flEnvFile = cmd.String([]string{"#env-file", "-env-file"}, "", "Read in a line delimited file of ENV variables") // For documentation purpose _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") -- cgit v1.2.1 From 4e0014f582617960bad513518d292b64da866f73 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 20 Feb 2014 15:34:45 -0500 Subject: go fmt Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- pkg/opts/envfile.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/opts/envfile.go b/pkg/opts/envfile.go index 65495a1585..7c69f5d799 100644 --- a/pkg/opts/envfile.go +++ b/pkg/opts/envfile.go @@ -15,7 +15,7 @@ func ParseEnvFile(filename string) ([]string, error) { if err != nil { return []string{}, err } - defer fh.Close() + defer fh.Close() var ( lines []string = []string{} -- cgit v1.2.1 From bcba5246f993a74eece36aa4b25df5b5e486e15b Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Wed, 26 Feb 2014 16:05:25 -0500 Subject: Fixing doc references to --env-file Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- docs/sources/reference/commandline/cli.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index d8cf5965da..4e697766bc 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -1287,13 +1287,13 @@ explains in detail how to manipulate ports in Docker. .. code-block:: bash - $ sudo docker run -e MYVAR1 --env MYVAR2=foo --envfile ./env.list ubuntu bash + $ sudo docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash This sets environmental variables to the container. For illustration all three flags are shown here. Where -e and --env can be repeated, take an environment variable and value, or if no "=" is provided, then that variable's current value is passed through (i.e. $MYVAR1 from the host is set to $MYVAR1 in the -container). The --envfile flag takes a filename as an argument and expects each +container). The --env-file flag takes a filename as an argument and expects each line to be a VAR=VAL format. .. code-block:: bash -- cgit v1.2.1 From acf5289dddfbbd69e19714f53575eb5088c618f7 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 6 Mar 2014 12:55:47 -0500 Subject: make the --env-file accept multiple flags Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- runconfig/parse.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/runconfig/parse.go b/runconfig/parse.go index 6ebe9f2bc3..e6e8b120d8 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -52,6 +52,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flVolumesFrom opts.ListOpts flLxcOpts opts.ListOpts flDriverOpts opts.ListOpts + flEnvFile opts.ListOpts flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id") @@ -68,7 +69,6 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flLabelOptions = cmd.String([]string{"Z", "-label"}, "", "Options to pass to underlying labeling system") - flEnvFile = cmd.String([]string{"#env-file", "-env-file"}, "", "Read in a line delimited file of ENV variables") // For documentation purpose _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)") @@ -79,6 +79,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)") cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)") cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") + cmd.Var(&flEnvFile, []string{"#env-file", "-env-file"}, "Read in a line delimited file of ENV variables") cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") @@ -203,11 +204,13 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf // collect all the environment variables for the container envVariables := []string{} envVariables = append(envVariables, flEnv.GetAll()...) - parsedVars, err := opts.ParseEnvFile(*flEnvFile) - if err != nil { - return nil, nil, cmd, err + for _, ef := range flEnvFile.GetAll() { + parsedVars, err := opts.ParseEnvFile(ef) + if err != nil { + return nil, nil, cmd, err + } + envVariables = append(envVariables, parsedVars...) } - envVariables = append(envVariables, parsedVars...) // boo, there's no debug output for docker run //utils.Debugf("Environment variables for the container: %#v", envVariables) -- cgit v1.2.1 From 33dde1f7288781a3a36951309f963e6040a1a0f5 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Thu, 6 Mar 2014 17:49:47 -0500 Subject: env-file: update functionality and docs Multiple flags allowed. Order prescribed. Examples provided. Multiline accounted for. Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- docs/sources/reference/commandline/cli.rst | 47 ++++++++++++++++++++++++++---- pkg/opts/envfile.go | 41 ++++++++++---------------- runconfig/parse.go | 3 +- 3 files changed, 59 insertions(+), 32 deletions(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 4e697766bc..989ea38798 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -1289,12 +1289,47 @@ explains in detail how to manipulate ports in Docker. $ sudo docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash -This sets environmental variables to the container. For illustration all three -flags are shown here. Where -e and --env can be repeated, take an environment -variable and value, or if no "=" is provided, then that variable's current -value is passed through (i.e. $MYVAR1 from the host is set to $MYVAR1 in the -container). The --env-file flag takes a filename as an argument and expects each -line to be a VAR=VAL format. +This sets environmental variables in the container. For illustration all three +flags are shown here. Where ``-e``, ``--env`` take an environment variable and +value, or if no "=" is provided, then that variable's current value is passed +through (i.e. $MYVAR1 from the host is set to $MYVAR1 in the container). All +three flags, ``-e``, ``--env`` and ``--env-file`` can be repeated. + +Regardless of the order of these three flags, the ``--env-file`` are processed +first, and then ``-e``/``--env`` flags. So that they can override VAR as needed. + +.. code-block:: bash + + $ cat ./env.list + TEST_FOO=BAR + $ sudo docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO + TEST_FOO=This is a test + +The ``--env-file`` flag takes a filename as an argument and expects each line +to be in the VAR=VAL format. The VAL is Unquoted, so if you need a multi-line +value, then use `\n` escape characters inside of a double quoted VAL. Single +quotes are literal. An example of a file passed with ``--env-file`` + +.. code-block:: bash + + $ cat ./env.list + TEST_FOO=BAR + TEST_APP_DEST_HOST=10.10.0.127 + TEST_APP_DEST_PORT=8888 + TEST_SOME_MULTILINE_VAR="this is first line\nthis is second line" + TEST_SOME_LITERAL_VAR='this\nwill\nall\nbe\none\nline' + $ sudo docker run --env-file ./env.list busybox env + HOME=/ + PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + HOSTNAME=215d54a814bc + TEST_FOO=BAR + TEST_APP_DEST_HOST=10.10.0.127 + TEST_APP_DEST_PORT=8888 + TEST_SOME_MULTILINE_VAR=this is first line + this is second line + TEST_SOME_LITERAL_VAR='this\nwill\nall\nbe\none\nline' + container=lxc + .. code-block:: bash diff --git a/pkg/opts/envfile.go b/pkg/opts/envfile.go index 7c69f5d799..d9afdd952c 100644 --- a/pkg/opts/envfile.go +++ b/pkg/opts/envfile.go @@ -2,9 +2,10 @@ package opts import ( "bufio" - "bytes" - "io" + "fmt" "os" + "strconv" + "strings" ) /* @@ -17,30 +18,20 @@ func ParseEnvFile(filename string) ([]string, error) { } defer fh.Close() - var ( - lines []string = []string{} - line, chunk []byte - ) - reader := bufio.NewReader(fh) - line, isPrefix, err := reader.ReadLine() - - for err == nil { - if isPrefix { - chunk = append(chunk, line...) - } else if !isPrefix && len(chunk) > 0 { - line = chunk - chunk = []byte{} - } else { - chunk = []byte{} - } - - if !isPrefix && len(line) > 0 && bytes.Contains(line, []byte("=")) { - lines = append(lines, string(line)) + lines := []string{} + scanner := bufio.NewScanner(fh) + for scanner.Scan() { + line := scanner.Text() + // line is not empty, and not starting with '#' + if len(line) > 0 && !strings.HasPrefix(line, "#") && strings.Contains(line, "=") { + data := strings.SplitN(line, "=", 2) + key := data[0] + val := data[1] + if str, err := strconv.Unquote(data[1]); err == nil { + val = str + } + lines = append(lines, fmt.Sprintf("%s=%s", key, val)) } - line, isPrefix, err = reader.ReadLine() - } - if err != nil && err != io.EOF { - return []string{}, err } return lines, nil } diff --git a/runconfig/parse.go b/runconfig/parse.go index e6e8b120d8..d3e32bc2de 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -203,7 +203,6 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf // collect all the environment variables for the container envVariables := []string{} - envVariables = append(envVariables, flEnv.GetAll()...) for _, ef := range flEnvFile.GetAll() { parsedVars, err := opts.ParseEnvFile(ef) if err != nil { @@ -211,6 +210,8 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf } envVariables = append(envVariables, parsedVars...) } + // parse the '-e' and '--env' after, to allow override + envVariables = append(envVariables, flEnv.GetAll()...) // boo, there's no debug output for docker run //utils.Debugf("Environment variables for the container: %#v", envVariables) -- cgit v1.2.1 From d9c257732e435cecdcc1ec8452f35e7614e4713f Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Fri, 7 Mar 2014 16:18:42 -0500 Subject: env-file: remove the unneeded deprecation markup Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- runconfig/parse.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runconfig/parse.go b/runconfig/parse.go index d3e32bc2de..db4ac351a9 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -79,7 +79,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)") cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)") cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables") - cmd.Var(&flEnvFile, []string{"#env-file", "-env-file"}, "Read in a line delimited file of ENV variables") + cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a line delimited file of ENV variables") cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat)) cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host") -- cgit v1.2.1 From ff4ac7441ba582c8c339b25b400c6756d9646ff1 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Tue, 11 Mar 2014 16:22:58 -0400 Subject: --env-file: simple line-delimited match dock functionality, and not try to achieve shell-sourcing compatibility Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- docs/sources/reference/commandline/cli.rst | 26 +++++++++++---------- opts/envfile.go | 35 ++++++++++++++++++++++++++++ pkg/opts/envfile.go | 37 ------------------------------ 3 files changed, 49 insertions(+), 49 deletions(-) create mode 100644 opts/envfile.go delete mode 100644 pkg/opts/envfile.go diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 989ea38798..0c9db138c2 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -1296,7 +1296,8 @@ through (i.e. $MYVAR1 from the host is set to $MYVAR1 in the container). All three flags, ``-e``, ``--env`` and ``--env-file`` can be repeated. Regardless of the order of these three flags, the ``--env-file`` are processed -first, and then ``-e``/``--env`` flags. So that they can override VAR as needed. +first, and then ``-e``/``--env`` flags. This way, the ``-e`` or ``--env`` will +override variables as needed. .. code-block:: bash @@ -1306,29 +1307,30 @@ first, and then ``-e``/``--env`` flags. So that they can override VAR as needed. TEST_FOO=This is a test The ``--env-file`` flag takes a filename as an argument and expects each line -to be in the VAR=VAL format. The VAL is Unquoted, so if you need a multi-line -value, then use `\n` escape characters inside of a double quoted VAL. Single -quotes are literal. An example of a file passed with ``--env-file`` +to be in the VAR=VAL format, mimicking the argument passed to ``--env``. +Comment lines need only be prefixed with ``#`` + +An example of a file passed with ``--env-file`` .. code-block:: bash $ cat ./env.list TEST_FOO=BAR + + # this is a comment TEST_APP_DEST_HOST=10.10.0.127 TEST_APP_DEST_PORT=8888 - TEST_SOME_MULTILINE_VAR="this is first line\nthis is second line" - TEST_SOME_LITERAL_VAR='this\nwill\nall\nbe\none\nline' - $ sudo docker run --env-file ./env.list busybox env + + # pass through this variable from the caller + TEST_PASSTHROUGH + $ sudo TEST_PASSTHROUGH=howdy docker run --env-file ./env.list busybox env HOME=/ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - HOSTNAME=215d54a814bc + HOSTNAME=5198e0745561 TEST_FOO=BAR TEST_APP_DEST_HOST=10.10.0.127 TEST_APP_DEST_PORT=8888 - TEST_SOME_MULTILINE_VAR=this is first line - this is second line - TEST_SOME_LITERAL_VAR='this\nwill\nall\nbe\none\nline' - container=lxc + TEST_PASSTHROUGH=howdy .. code-block:: bash diff --git a/opts/envfile.go b/opts/envfile.go new file mode 100644 index 0000000000..99a713e761 --- /dev/null +++ b/opts/envfile.go @@ -0,0 +1,35 @@ +package opts + +import ( + "bufio" + "fmt" + "os" + "strings" +) + +/* +Read in a line delimited file with environment variables enumerated +*/ +func ParseEnvFile(filename string) ([]string, error) { + fh, err := os.Open(filename) + if err != nil { + return []string{}, err + } + defer fh.Close() + + lines := []string{} + scanner := bufio.NewScanner(fh) + for scanner.Scan() { + line := scanner.Text() + // line is not empty, and not starting with '#' + if len(line) > 0 && !strings.HasPrefix(line, "#") { + if strings.Contains(line, "=") { + data := strings.SplitN(line, "=", 2) + lines = append(lines, fmt.Sprintf("%s=%s", data[0], data[1])) + } else { + lines = append(lines, fmt.Sprintf("%s=%s", line, os.Getenv(line))) + } + } + } + return lines, nil +} diff --git a/pkg/opts/envfile.go b/pkg/opts/envfile.go deleted file mode 100644 index d9afdd952c..0000000000 --- a/pkg/opts/envfile.go +++ /dev/null @@ -1,37 +0,0 @@ -package opts - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -/* -Read in a line delimited file with environment variables enumerated -*/ -func ParseEnvFile(filename string) ([]string, error) { - fh, err := os.Open(filename) - if err != nil { - return []string{}, err - } - defer fh.Close() - - lines := []string{} - scanner := bufio.NewScanner(fh) - for scanner.Scan() { - line := scanner.Text() - // line is not empty, and not starting with '#' - if len(line) > 0 && !strings.HasPrefix(line, "#") && strings.Contains(line, "=") { - data := strings.SplitN(line, "=", 2) - key := data[0] - val := data[1] - if str, err := strconv.Unquote(data[1]); err == nil { - val = str - } - lines = append(lines, fmt.Sprintf("%s=%s", key, val)) - } - } - return lines, nil -} -- cgit v1.2.1 From 500c8ba4b66c35cf2c29aeb81a9392cc406835a4 Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Mon, 17 Mar 2014 17:11:27 -0400 Subject: env-file: variable behavior trim the front of variables. Error if there are other spaces present. Leave the value alone. Docker-DCO-1.1-Signed-off-by: Vincent Batts (github: vbatts) --- opts/envfile.go | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/opts/envfile.go b/opts/envfile.go index 99a713e761..19ee8955f9 100644 --- a/opts/envfile.go +++ b/opts/envfile.go @@ -25,11 +25,30 @@ func ParseEnvFile(filename string) ([]string, error) { if len(line) > 0 && !strings.HasPrefix(line, "#") { if strings.Contains(line, "=") { data := strings.SplitN(line, "=", 2) - lines = append(lines, fmt.Sprintf("%s=%s", data[0], data[1])) + + // trim the front of a variable, but nothing else + variable := strings.TrimLeft(data[0], whiteSpaces) + if strings.ContainsAny(variable, whiteSpaces) { + return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} + } + + // pass the value through, no trimming + lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) } else { - lines = append(lines, fmt.Sprintf("%s=%s", line, os.Getenv(line))) + // if only a pass-through variable is given, clean it up. + lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) } } } return lines, nil } + +var whiteSpaces = " \t" + +type ErrBadEnvVariable struct { + msg string +} + +func (e ErrBadEnvVariable) Error() string { + return fmt.Sprintf("poorly formatted environment: %s", e.msg) +} -- cgit v1.2.1 From f16372022875a6dfe9d53489bde635789dd865a6 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Mon, 31 Mar 2014 10:39:02 -0700 Subject: Move installmirrors anchor in doc so it's before "Mirrors" instead of "Docker and local DNS server warnings" Docker-DCO-1.1-Signed-off-by: Marc Abramowitz (github: msabramo) --- docs/sources/installation/ubuntulinux.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index 85098e9552..44dba6b97e 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -282,8 +282,6 @@ incoming connections on the Docker port (default 4243): sudo ufw allow 4243/tcp -.. _installmirrors: - Docker and local DNS server warnings ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -342,6 +340,8 @@ NetworkManager and Docker need to be restarted afterwards: .. warning:: This might make DNS resolution slower on some networks. +.. _installmirrors: + Mirrors ^^^^^^^ -- cgit v1.2.1 From f7ae3a1381fdc53042bebec085bb3f108bc05da3 Mon Sep 17 00:00:00 2001 From: unclejack Date: Mon, 31 Mar 2014 21:48:30 +0300 Subject: integration-cli: pull busybox before running Make sure the busybox image is ready to be used when running the cli integration tests. Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- hack/make/test-integration-cli | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli index d007fbaf6a..5c6fc367fc 100644 --- a/hack/make/test-integration-cli +++ b/hack/make/test-integration-cli @@ -19,6 +19,10 @@ fi docker -d -D -p $DEST/docker.pid &> $DEST/docker.log & +# pull the busybox image before running the tests +sleep 2 +docker pull busybox + bundle_test_integration_cli 2>&1 \ | tee $DEST/test.log -- cgit v1.2.1 From 904bf049c1626567ee28a21bde4b68ab82c5ce77 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 31 Mar 2014 19:10:19 +0000 Subject: Force abs paths for host volumes Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- integration-cli/docker_cli_run_test.go | 12 ++++++++++++ runtime/volumes.go | 7 +++++++ 2 files changed, 19 insertions(+) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 13959adea7..8d62108fed 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -271,3 +271,15 @@ func TestDockerRunWithVolumesAsFiles(t *testing.T) { logDone("run - regression test for #4741 - volumes from as files") } + +// Regression test for #4830 +func TestDockerRunWithRelativePath(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-v", "tmp:/other-tmp", "busybox", "true") + if _, _, _, err := runCommandWithStdoutStderr(runCmd); err == nil { + t.Fatalf("relative path should result in an error") + } + + deleteAllContainers() + + logDone("run - volume with relative path") +} diff --git a/runtime/volumes.go b/runtime/volumes.go index 5ac82ef089..c504644ae8 100644 --- a/runtime/volumes.go +++ b/runtime/volumes.go @@ -172,6 +172,13 @@ func createVolumes(container *Container) error { if bindMap, exists := binds[volPath]; exists { isBindMount = true srcPath = bindMap.SrcPath + srcAbs, err := filepath.Abs(srcPath) + if err != nil { + return err + } + if srcPath != srcAbs { + return fmt.Errorf("%s should be an absolute path", srcPath) + } if strings.ToLower(bindMap.Mode) == "rw" { srcRW = true } -- cgit v1.2.1 From 9709c31d1b500fb7cfdb02aaf62c7d8c187874cf Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 31 Mar 2014 19:21:57 +0000 Subject: fix import display Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- utils/jsonmessage.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/utils/jsonmessage.go b/utils/jsonmessage.go index f84cc42c78..6be421be94 100644 --- a/utils/jsonmessage.go +++ b/utils/jsonmessage.go @@ -131,7 +131,7 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, if jm.Progress != nil { jm.Progress.terminalFd = terminalFd } - if jm.Progress != nil || jm.ProgressMessage != "" { + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { line, ok := ids[jm.ID] if !ok { line = len(ids) @@ -141,17 +141,15 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, } else { diff = len(ids) - line } - if isTerminal { + if jm.ID != "" && isTerminal { // [{diff}A = move cursor up diff rows fmt.Fprintf(out, "%c[%dA", 27, diff) } } err := jm.Display(out, isTerminal) - if jm.ID != "" { - if isTerminal { - // [{diff}B = move cursor down diff rows - fmt.Fprintf(out, "%c[%dB", 27, diff) - } + if jm.ID != "" && isTerminal { + // [{diff}B = move cursor down diff rows + fmt.Fprintf(out, "%c[%dB", 27, diff) } if err != nil { return err -- cgit v1.2.1 From b430f4f45be27b9565027b5c89b2506577027e88 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Mon, 31 Mar 2014 19:31:21 +0000 Subject: add test Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- integration-cli/docker_cli_import_test.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 integration-cli/docker_cli_import_test.go diff --git a/integration-cli/docker_cli_import_test.go b/integration-cli/docker_cli_import_test.go new file mode 100644 index 0000000000..9b36aa9ce1 --- /dev/null +++ b/integration-cli/docker_cli_import_test.go @@ -0,0 +1,20 @@ +package main + +import ( + "fmt" + "os/exec" + "strings" + "testing" +) + +func TestImportDisplay(t *testing.T) { + importCmd := exec.Command(dockerBinary, "import", "https://github.com/ewindisch/docker-cirros/raw/master/cirros-0.3.0-x86_64-lxc.tar.gz") + out, _, err := runCommandWithOutput(importCmd) + errorOut(err, t, fmt.Sprintf("import failed with errors: %v", err)) + + if n := len(strings.Split(out, "\n")); n != 3 { + t.Fatalf("display is messed up: %d '\\n' instead of 3", n) + } + + logDone("import - cirros was imported and display is fine") +} -- cgit v1.2.1 From 5fb28eab3e670f225019174987424be31a0d0527 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Mon, 31 Mar 2014 11:40:39 -0700 Subject: Add regression test Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- integration-cli/docker_cli_logs_test.go | 76 +++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 integration-cli/docker_cli_logs_test.go diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go new file mode 100644 index 0000000000..f8fcbe8832 --- /dev/null +++ b/integration-cli/docker_cli_logs_test.go @@ -0,0 +1,76 @@ +package main + +import ( + "fmt" + "os/exec" + "testing" +) + +// This used to work, it test a log of PageSize-1 (gh#4851) +func TestLogsContainerSmallerThanPage(t *testing.T) { + testLen := 32767 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if len(out) != testLen+1 { + t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + } + + go deleteContainer(cleanedContainerID) + + logDone("logs - logs container running echo smaller than page size") +} + +// Regression test: When going over the PageSize, it used to panic (gh#4851) +func TestLogsContainerBiggerThanPage(t *testing.T) { + testLen := 32768 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if len(out) != testLen+1 { + t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + } + + go deleteContainer(cleanedContainerID) + + logDone("logs - logs container running echo bigger than page size") +} + +// Regression test: When going much over the PageSize, it used to block (gh#4851) +func TestLogsContainerMuchBiggerThanPage(t *testing.T) { + testLen := 33000 + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) + out, _, _, err := runCommandWithStdoutStderr(runCmd) + errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err)) + + cleanedContainerID := stripTrailingCharacters(out) + exec.Command(dockerBinary, "wait", cleanedContainerID).Run() + + logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) + out, _, _, err = runCommandWithStdoutStderr(logsCmd) + errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err)) + + if len(out) != testLen+1 { + t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + } + + go deleteContainer(cleanedContainerID) + + logDone("logs - logs container running echo much bigger than page size") +} -- cgit v1.2.1 From 07b60d626acaddffb6a0b118bfc3f19631411d72 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Wed, 19 Mar 2014 18:52:38 +0000 Subject: symlink /etc/mtab and /proc/mounts Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- graph/graph.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/graph/graph.go b/graph/graph.go index 4349cac129..a177cbd1e1 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -259,6 +259,7 @@ func SetupInitLayer(initLayer string) error { "/etc/hosts": "file", "/etc/hostname": "file", "/dev/console": "file", + "/etc/mtab": "/proc/mounts", // "var/run": "dir", // "var/lock": "dir", } { @@ -285,6 +286,10 @@ func SetupInitLayer(initLayer string) error { return err } f.Close() + default: + if err := os.Symlink(typ, path.Join(initLayer, pth)); err != nil { + return err + } } } else { return err -- cgit v1.2.1 From 289377b409b321a5a624af3517032c396df6c22f Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Mon, 31 Mar 2014 15:32:57 -0700 Subject: No longer expose gravatar_email in docker.io api Docker.io API has replaced the gravatar_email field with a gravatar_url field instead. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- docs/sources/reference/api/docker_io_accounts_api.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sources/reference/api/docker_io_accounts_api.rst b/docs/sources/reference/api/docker_io_accounts_api.rst index 7976f1fddf..dc5c44d4a8 100644 --- a/docs/sources/reference/api/docker_io_accounts_api.rst +++ b/docs/sources/reference/api/docker_io_accounts_api.rst @@ -49,14 +49,14 @@ docker.io Accounts API { "id": 2, "username": "janedoe", - "url": "", + "url": "https://www.docker.io/api/v1.1/users/janedoe/", "date_joined": "2014-02-12T17:58:01.431312Z", "type": "User", "full_name": "Jane Doe", "location": "San Francisco, CA", "company": "Success, Inc.", "profile_url": "https://docker.io/", - "gravatar_email": "jane.doe+gravatar@example.com", + "gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm" "email": "jane.doe@example.com", "is_active": true } @@ -111,14 +111,14 @@ docker.io Accounts API { "id": 2, "username": "janedoe", - "url": "", + "url": "https://www.docker.io/api/v1.1/users/janedoe/", "date_joined": "2014-02-12T17:58:01.431312Z", "type": "User", "full_name": "Jane Doe", "location": "Private Island", "company": "Retired", "profile_url": "http://janedoe.com/", - "gravatar_email": "jane.doe+gravatar@example.com", + "gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm" "email": "jane.doe@example.com", "is_active": true } -- cgit v1.2.1 From 4cdcea20474a9f42291fe6b6c6dee348343a7c05 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 31 Mar 2014 21:02:42 +0000 Subject: Set bridge mac addr on supported kernels Fixes #3200 Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/netlink/netlink_linux.go | 51 ++++++++++++++++++++++++++++++++++ pkg/netlink/netlink_unsupported.go | 4 +++ runtime/networkdriver/bridge/driver.go | 30 ++++---------------- 3 files changed, 61 insertions(+), 24 deletions(-) diff --git a/pkg/netlink/netlink_linux.go b/pkg/netlink/netlink_linux.go index f8bb6bac3c..f4aa92ed34 100644 --- a/pkg/netlink/netlink_linux.go +++ b/pkg/netlink/netlink_linux.go @@ -5,6 +5,7 @@ package netlink import ( "encoding/binary" "fmt" + "math/rand" "net" "syscall" "unsafe" @@ -17,10 +18,16 @@ const ( IFLA_INFO_DATA = 2 VETH_INFO_PEER = 1 IFLA_NET_NS_FD = 28 + SIOC_BRADDBR = 0x89a0 ) var nextSeqNr int +type ifreqHwaddr struct { + IfrnName [16]byte + IfruHwaddr syscall.RawSockaddr +} + func nativeEndian() binary.ByteOrder { var x uint32 = 0x01020304 if *(*byte)(unsafe.Pointer(&x)) == 0x01 { @@ -808,3 +815,47 @@ func NetworkCreateVethPair(name1, name2 string) error { } return s.HandleAck(wb.Seq) } + +// Create the actual bridge device. This is more backward-compatible than +// netlink.NetworkLinkAdd and works on RHEL 6. +func CreateBridge(name string, setMacAddr bool) error { + s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP) + if err != nil { + // ipv6 issue, creating with ipv4 + s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP) + if err != nil { + return err + } + } + defer syscall.Close(s) + + nameBytePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return err + } + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { + return err + } + if setMacAddr { + return setBridgeMacAddress(s, name) + } + return nil +} + +func setBridgeMacAddress(s int, name string) error { + ifr := ifreqHwaddr{} + ifr.IfruHwaddr.Family = syscall.ARPHRD_ETHER + copy(ifr.IfrnName[:], name) + + for i := 0; i < 6; i++ { + ifr.IfruHwaddr.Data[i] = int8(rand.Intn(255)) + } + + ifr.IfruHwaddr.Data[0] &^= 0x1 // clear multicast bit + ifr.IfruHwaddr.Data[0] |= 0x2 // set local assignment bit (IEEE802) + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), syscall.SIOCSIFHWADDR, uintptr(unsafe.Pointer(&ifr))); err != 0 { + return err + } + return nil +} diff --git a/pkg/netlink/netlink_unsupported.go b/pkg/netlink/netlink_unsupported.go index bd9e962d35..00a3b3fae8 100644 --- a/pkg/netlink/netlink_unsupported.go +++ b/pkg/netlink/netlink_unsupported.go @@ -59,3 +59,7 @@ func NetworkSetMaster(iface, master *net.Interface) error { func NetworkLinkDown(iface *net.Interface) error { return ErrNotImplemented } + +func CreateBridge(name string, setMacAddr bool) error { + return ErrNotImplemented +} diff --git a/runtime/networkdriver/bridge/driver.go b/runtime/networkdriver/bridge/driver.go index 61e82dd481..f7c3bc6b01 100644 --- a/runtime/networkdriver/bridge/driver.go +++ b/runtime/networkdriver/bridge/driver.go @@ -14,13 +14,10 @@ import ( "log" "net" "strings" - "syscall" - "unsafe" ) const ( DefaultNetworkBridge = "docker0" - siocBRADDBR = 0x89a0 ) // Network interface represents the networking stack of a container @@ -281,28 +278,13 @@ func createBridge(bridgeIP string) error { return nil } -// Create the actual bridge device. This is more backward-compatible than -// netlink.NetworkLinkAdd and works on RHEL 6. func createBridgeIface(name string) error { - s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP) - if err != nil { - utils.Debugf("Bridge socket creation failed IPv6 probably not enabled: %v", err) - s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP) - if err != nil { - return fmt.Errorf("Error creating bridge creation socket: %s", err) - } - } - defer syscall.Close(s) - - nameBytePtr, err := syscall.BytePtrFromString(name) - if err != nil { - return fmt.Errorf("Error converting bridge name %s to byte array: %s", name, err) - } - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), siocBRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { - return fmt.Errorf("Error creating bridge: %s", err) - } - return nil + kv, err := utils.GetKernelVersion() + // only set the bridge's mac address if the kernel version is > 3.3 + // before that it was not supported + setBridgeMacAddr := err == nil && (kv.Kernel >= 3 && kv.Major >= 3) + utils.Debugf("setting bridge mac address = %v", setBridgeMacAddr) + return netlink.CreateBridge(name, setBridgeMacAddr) } // Allocate a network interface -- cgit v1.2.1 From 7a7f59210d5eb7a38a5fac8889010bd54576ea01 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 31 Mar 2014 23:12:08 +0000 Subject: Ensure secound part of the key is provided Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runconfig/parse.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/runconfig/parse.go b/runconfig/parse.go index a330c6c869..da4e045cd0 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -243,6 +243,8 @@ func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) { parts := strings.SplitN(o, ".", 2) if len(parts) < 2 { return nil, fmt.Errorf("invalid opt format %s", o) + } else if strings.TrimSpace(parts[0]) == "" { + return nil, fmt.Errorf("key cannot be empty %s", o) } values, exists := out[parts[0]] if !exists { -- cgit v1.2.1 From d52d24dd801f3ffe1b894226b8dba613de59bd87 Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 1 Apr 2014 00:28:44 +0000 Subject: remove setupDev from libcontainer Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- pkg/libcontainer/nsinit/mount.go | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/nsinit/mount.go index 796143c68e..59f4a0e0d9 100644 --- a/pkg/libcontainer/nsinit/mount.go +++ b/pkg/libcontainer/nsinit/mount.go @@ -62,9 +62,6 @@ func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, cons } // In non-privileged mode, this fails. Discard the error. setupLoopbackDevices(rootfs) - if err := setupDev(rootfs); err != nil { - return err - } if err := setupPtmx(rootfs, console, mountLabel); err != nil { return err } @@ -172,30 +169,6 @@ func copyDevNode(rootfs, node string) error { return nil } -// setupDev symlinks the current processes pipes into the -// appropriate destination on the containers rootfs -func setupDev(rootfs string) error { - for _, link := range []struct { - from string - to string - }{ - {"/proc/kcore", "/dev/core"}, - {"/proc/self/fd", "/dev/fd"}, - {"/proc/self/fd/0", "/dev/stdin"}, - {"/proc/self/fd/1", "/dev/stdout"}, - {"/proc/self/fd/2", "/dev/stderr"}, - } { - dest := filepath.Join(rootfs, link.to) - if err := os.Remove(dest); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("remove %s %s", dest, err) - } - if err := os.Symlink(link.from, dest); err != nil { - return fmt.Errorf("symlink %s %s", dest, err) - } - } - return nil -} - // setupConsole ensures that the container has a proper /dev/console setup func setupConsole(rootfs, console string, mountLabel string) error { oldMask := system.Umask(0000) -- cgit v1.2.1 From 3f0886c8c3084341e9ef454bf41445cfc22efca2 Mon Sep 17 00:00:00 2001 From: Sam Alba Date: Mon, 31 Mar 2014 17:56:25 -0700 Subject: Inverted layer checksum and tarsum. The checksum of the payload has to be computed on the Gzip'ed content. Docker-DCO-1.1-Signed-off-by: Sam Alba (github: samalba) --- registry/registry.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/registry/registry.go b/registry/registry.go index 182ec78a76..414283b82c 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -438,10 +438,10 @@ func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registr utils.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer") h := sha256.New() - checksumLayer := &utils.CheckSum{Reader: layer, Hash: h} - tarsumLayer := &utils.TarSum{Reader: checksumLayer} + tarsumLayer := &utils.TarSum{Reader: layer} + checksumLayer := &utils.CheckSum{Reader: tarsumLayer, Hash: h} - req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", tarsumLayer) + req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", checksumLayer) if err != nil { return "", "", err } -- cgit v1.2.1 From de9fba71721f71f86d53cf94504b10dcea80a5bd Mon Sep 17 00:00:00 2001 From: Sam Alba Date: Mon, 31 Mar 2014 18:31:15 -0700 Subject: Payload checksum now match the checksum simple Backported for backward compatibility. Docker-DCO-1.1-Signed-off-by: Sam Alba (github: samalba) --- registry/registry.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/registry/registry.go b/registry/registry.go index 414283b82c..5ac04f9e7e 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -437,8 +437,10 @@ func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registr utils.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer") - h := sha256.New() tarsumLayer := &utils.TarSum{Reader: layer} + h := sha256.New() + h.Write(jsonRaw) + h.Write([]byte{'\n'}) checksumLayer := &utils.CheckSum{Reader: tarsumLayer, Hash: h} req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", checksumLayer) -- cgit v1.2.1 From e648a186d68dcb3ee0d6123b041c5aa66438cc89 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Mon, 31 Mar 2014 18:50:10 -0700 Subject: Allow push of a single tag Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- api/client/commands.go | 9 ++++++--- api/server/server.go | 1 + server/server.go | 33 ++++++++++++++++++++++++--------- 3 files changed, 31 insertions(+), 12 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 49cd07700f..ef9796b747 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -1000,7 +1000,7 @@ func (cli *DockerCli) CmdImport(args ...string) error { } func (cli *DockerCli) CmdPush(args ...string) error { - cmd := cli.Subcmd("push", "NAME", "Push an image or a repository to the registry") + cmd := cli.Subcmd("push", "NAME[:TAG]", "Push an image or a repository to the registry") if err := cmd.Parse(args); err != nil { return nil } @@ -1013,8 +1013,10 @@ func (cli *DockerCli) CmdPush(args ...string) error { cli.LoadConfigFile() + remote, tag := utils.ParseRepositoryTag(name) + // Resolve the Repository name from fqn to hostname + name - hostname, _, err := registry.ResolveRepositoryName(name) + hostname, _, err := registry.ResolveRepositoryName(remote) if err != nil { return err } @@ -1033,6 +1035,7 @@ func (cli *DockerCli) CmdPush(args ...string) error { } v := url.Values{} + v.Set("tag", tag) push := func(authConfig registry.AuthConfig) error { buf, err := json.Marshal(authConfig) if err != nil { @@ -1042,7 +1045,7 @@ func (cli *DockerCli) CmdPush(args ...string) error { base64.URLEncoding.EncodeToString(buf), } - return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{ + return cli.stream("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, map[string][]string{ "X-Registry-Auth": registryAuthHeader, }) } diff --git a/api/server/server.go b/api/server/server.go index 18aefe42cd..5597d8b92c 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -517,6 +517,7 @@ func postImagesPush(eng *engine.Engine, version version.Version, w http.Response job := eng.Job("push", vars["name"]) job.SetenvJson("metaHeaders", metaHeaders) job.SetenvJson("authConfig", authConfig) + job.Setenv("tag", r.Form.Get("tag")) if version.GreaterThan("1.0") { job.SetenvBool("json", true) streamJSON(job, w, true) diff --git a/server/server.go b/server/server.go index 2cb3328d55..3e97481e0e 100644 --- a/server/server.go +++ b/server/server.go @@ -1401,7 +1401,7 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status { } // Retrieve the all the images to be uploaded in the correct order -func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[string][]string, error) { +func (srv *Server) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) { var ( imageList []string imagesSeen map[string]bool = make(map[string]bool) @@ -1409,6 +1409,9 @@ func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[stri ) for tag, id := range localRepo { + if requestedTag != "" && requestedTag != tag { + continue + } var imageListForThisTag []string tagsByImage[id] = append(tagsByImage[id], tag) @@ -1435,25 +1438,29 @@ func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[stri // append to main image list imageList = append(imageList, imageListForThisTag...) } - + if len(imageList) == 0 { + return nil, nil, fmt.Errorf("No images found for the requested repository / tag") + } utils.Debugf("Image list: %v", imageList) utils.Debugf("Tags by image: %v", tagsByImage) return imageList, tagsByImage, nil } -func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, sf *utils.StreamFormatter) error { +func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error { out = utils.NewWriteFlusher(out) utils.Debugf("Local repo: %s", localRepo) - imgList, tagsByImage, err := srv.getImageList(localRepo) + imgList, tagsByImage, err := srv.getImageList(localRepo, tag) if err != nil { return err } out.Write(sf.FormatStatus("", "Sending image list")) - var repoData *registry.RepositoryData - var imageIndex []*registry.ImgData + var ( + repoData *registry.RepositoryData + imageIndex []*registry.ImgData + ) for _, imgId := range imgList { if tags, exists := tagsByImage[imgId]; exists { @@ -1488,8 +1495,12 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName return err } + nTag := 1 + if tag == "" { + nTag = len(localRepo) + } for _, ep := range repoData.Endpoints { - out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo))) + out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, nTag)) for _, imgId := range imgList { if r.LookupRemoteImage(imgId, ep, repoData.Tokens) { @@ -1575,6 +1586,7 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status { metaHeaders map[string][]string ) + tag := job.Getenv("tag") job.GetenvJson("authConfig", authConfig) job.GetenvJson("metaHeaders", metaHeaders) if _, err := srv.poolAdd("push", localName); err != nil { @@ -1600,11 +1612,14 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status { } if err != nil { - reposLen := len(srv.runtime.Repositories().Repositories[localName]) + reposLen := 1 + if tag == "" { + reposLen = len(srv.runtime.Repositories().Repositories[localName]) + } job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen)) // If it fails, try to get the repository if localRepo, exists := srv.runtime.Repositories().Repositories[localName]; exists { - if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, sf); err != nil { + if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil { return job.Error(err) } return engine.StatusOK -- cgit v1.2.1 From 739d1244807bc3522a0af4dc3490305d6f037601 Mon Sep 17 00:00:00 2001 From: tjmehta Date: Mon, 31 Mar 2014 22:21:52 -0700 Subject: make findNextPort circular, add all-ports-allocated error Docker-DCO-1.1-Signed-off-by: Tejesh Mehta (github: tjmehta) --- runtime/networkdriver/portallocator/portallocator.go | 12 ++++++++---- runtime/networkdriver/portallocator/portallocator_test.go | 4 ++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/runtime/networkdriver/portallocator/portallocator.go b/runtime/networkdriver/portallocator/portallocator.go index 4d698f2de2..9ecd447116 100644 --- a/runtime/networkdriver/portallocator/portallocator.go +++ b/runtime/networkdriver/portallocator/portallocator.go @@ -18,8 +18,8 @@ type ( ) var ( + ErrAllPortsAllocated = errors.New("all ports are allocated") ErrPortAlreadyAllocated = errors.New("port has already been allocated") - ErrPortExceedsRange = errors.New("port exceeds upper range") ErrUnknownProtocol = errors.New("unknown protocol") ) @@ -152,17 +152,21 @@ func equalsDefault(ip net.IP) bool { func findNextPort(proto string, allocated *collections.OrderedIntSet) (int, error) { port := nextPort(proto) + startSearchPort := port for allocated.Exists(port) { port = nextPort(proto) - } - if port > EndPortRange { - return 0, ErrPortExceedsRange + if startSearchPort == port { + return 0, ErrAllPortsAllocated + } } return port, nil } func nextPort(proto string) int { c := currentDynamicPort[proto] + 1 + if c > EndPortRange { + c = BeginPortRange + } currentDynamicPort[proto] = c return c } diff --git a/runtime/networkdriver/portallocator/portallocator_test.go b/runtime/networkdriver/portallocator/portallocator_test.go index f01bcfc99e..8b4062c37c 100644 --- a/runtime/networkdriver/portallocator/portallocator_test.go +++ b/runtime/networkdriver/portallocator/portallocator_test.go @@ -110,8 +110,8 @@ func TestAllocateAllPorts(t *testing.T) { } } - if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrPortExceedsRange { - t.Fatalf("Expected error %s got %s", ErrPortExceedsRange, err) + if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrAllPortsAllocated { + t.Fatalf("Expected error %s got %s", ErrAllPortsAllocated, err) } _, err := RequestPort(defaultIP, "udp", 0) -- cgit v1.2.1 From 40c6d00c97c737d9d3827f159518007803affcc7 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 1 Apr 2014 07:07:42 +0000 Subject: Update imports to be consistent Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/execdriver/native/create.go | 3 ++- runtime/utils_test.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/runtime/execdriver/native/create.go b/runtime/execdriver/native/create.go index 976416a8ca..71fab3e064 100644 --- a/runtime/execdriver/native/create.go +++ b/runtime/execdriver/native/create.go @@ -2,12 +2,13 @@ package native import ( "fmt" + "os" + "github.com/dotcloud/docker/pkg/label" "github.com/dotcloud/docker/pkg/libcontainer" "github.com/dotcloud/docker/runtime/execdriver" "github.com/dotcloud/docker/runtime/execdriver/native/configuration" "github.com/dotcloud/docker/runtime/execdriver/native/template" - "os" ) // createContainer populates and configures the container type with the diff --git a/runtime/utils_test.go b/runtime/utils_test.go index 833634cb47..bdf3543a49 100644 --- a/runtime/utils_test.go +++ b/runtime/utils_test.go @@ -1,9 +1,10 @@ package runtime import ( + "testing" + "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/utils" - "testing" ) func TestMergeLxcConfig(t *testing.T) { -- cgit v1.2.1 From f067e263677fc86f9610ca61fbe42f63efad91f2 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 25 Mar 2014 23:21:07 +0000 Subject: Ensure that all containers are stopped cleanly at shutdown Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/runtime.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/runtime/runtime.go b/runtime/runtime.go index b035f5df9f..85880ff9ab 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -778,8 +778,31 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (* return runtime, nil } +func (runtime *Runtime) shutdown() error { + group := sync.WaitGroup{} + utils.Debugf("starting clean shutdown of all containers...") + for _, container := range runtime.List() { + if container.State.IsRunning() { + utils.Debugf("stopping %s", container.ID) + group.Add(1) + + go func() { + defer group.Done() + container.Stop(10) + }() + } + } + group.Wait() + + return nil +} + func (runtime *Runtime) Close() error { errorsStrings := []string{} + if err := runtime.shutdown(); err != nil { + utils.Errorf("runtime.shutdown(): %s", err) + errorsStrings = append(errorsStrings, err.Error()) + } if err := portallocator.ReleaseAll(); err != nil { utils.Errorf("portallocator.ReleaseAll(): %s", err) errorsStrings = append(errorsStrings, err.Error()) -- cgit v1.2.1 From 5b9069bd990dca0a35d8e490c6f6b56d27163bb8 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 26 Mar 2014 00:04:55 +0000 Subject: Add kill for other drivers on restart Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/runtime.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/runtime/runtime.go b/runtime/runtime.go index 85880ff9ab..4ece7d1533 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -174,6 +174,7 @@ func (runtime *Runtime) Register(container *Container) error { if container.State.IsGhost() { utils.Debugf("killing ghost %s", container.ID) + existingPid := container.State.Pid container.State.SetGhost(false) container.State.SetStopped(0) @@ -181,9 +182,20 @@ func (runtime *Runtime) Register(container *Container) error { // no ghost processes are left when docker dies if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") { lxc.KillLxc(container.ID, 9) - if err := container.Unmount(); err != nil { - utils.Debugf("ghost unmount error %s", err) + } else { + // use the current driver and ensure that the container is dead x.x + cmd := &execdriver.Command{ + ID: container.ID, } + var err error + cmd.Process, err = os.FindProcess(existingPid) + if err != nil { + utils.Debugf("cannot find existing process for %d", existingPid) + } + runtime.execDriver.Kill(cmd, 9) + } + if err := container.Unmount(); err != nil { + utils.Debugf("ghost unmount error %s", err) } } -- cgit v1.2.1 From 5bb82f6313d7f789783ffac854be85a44a56617e Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 26 Mar 2014 06:48:16 +0000 Subject: Ensure a reliable way to kill ghost containers on reboot Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/exec.go | 7 ++++++- pkg/libcontainer/nsinit/state.go | 16 ++++++++++++---- pkg/system/proc.go | 26 ++++++++++++++++++++++++++ runtime/execdriver/driver.go | 1 + runtime/execdriver/lxc/driver.go | 4 ++++ runtime/execdriver/native/driver.go | 36 +++++++++++++++++++++++++++++++++--- runtime/runtime.go | 2 +- 7 files changed, 83 insertions(+), 9 deletions(-) create mode 100644 pkg/system/proc.go diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go index 73842f729f..c07c45de3c 100644 --- a/pkg/libcontainer/nsinit/exec.go +++ b/pkg/libcontainer/nsinit/exec.go @@ -50,8 +50,13 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [ if err := command.Start(); err != nil { return -1, err } + + started, err := system.GetProcessStartTime(command.Process.Pid) + if err != nil { + return -1, err + } ns.logger.Printf("writting pid %d to file\n", command.Process.Pid) - if err := ns.stateWriter.WritePid(command.Process.Pid); err != nil { + if err := ns.stateWriter.WritePid(command.Process.Pid, started); err != nil { command.Process.Kill() return -1, err } diff --git a/pkg/libcontainer/nsinit/state.go b/pkg/libcontainer/nsinit/state.go index af38008c03..26d7fa4230 100644 --- a/pkg/libcontainer/nsinit/state.go +++ b/pkg/libcontainer/nsinit/state.go @@ -10,7 +10,7 @@ import ( // StateWriter handles writing and deleting the pid file // on disk type StateWriter interface { - WritePid(pid int) error + WritePid(pid int, startTime string) error DeletePid() error } @@ -19,10 +19,18 @@ type DefaultStateWriter struct { } // writePidFile writes the namespaced processes pid to pid in the rootfs for the container -func (d *DefaultStateWriter) WritePid(pid int) error { - return ioutil.WriteFile(filepath.Join(d.Root, "pid"), []byte(fmt.Sprint(pid)), 0655) +func (d *DefaultStateWriter) WritePid(pid int, startTime string) error { + err := ioutil.WriteFile(filepath.Join(d.Root, "pid"), []byte(fmt.Sprint(pid)), 0655) + if err != nil { + return err + } + return ioutil.WriteFile(filepath.Join(d.Root, "start"), []byte(startTime), 0655) } func (d *DefaultStateWriter) DeletePid() error { - return os.Remove(filepath.Join(d.Root, "pid")) + err := os.Remove(filepath.Join(d.Root, "pid")) + if serr := os.Remove(filepath.Join(d.Root, "start")); err == nil { + err = serr + } + return err } diff --git a/pkg/system/proc.go b/pkg/system/proc.go new file mode 100644 index 0000000000..a492346c7f --- /dev/null +++ b/pkg/system/proc.go @@ -0,0 +1,26 @@ +package system + +import ( + "io/ioutil" + "path/filepath" + "strconv" + "strings" +) + +// look in /proc to find the process start time so that we can verify +// that this pid has started after ourself +func GetProcessStartTime(pid int) (string, error) { + data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) + if err != nil { + return "", err + } + parts := strings.Split(string(data), " ") + // the starttime is located at pos 22 + // from the man page + // + // starttime %llu (was %lu before Linux 2.6) + // (22) The time the process started after system boot. In kernels before Linux 2.6, this + // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks + // (divide by sysconf(_SC_CLK_TCK)). + return parts[22-1], nil // starts at 1 +} diff --git a/runtime/execdriver/driver.go b/runtime/execdriver/driver.go index d067973419..27a575cb3a 100644 --- a/runtime/execdriver/driver.go +++ b/runtime/execdriver/driver.go @@ -84,6 +84,7 @@ type Driver interface { Name() string // Driver name Info(id string) Info // "temporary" hack (until we move state from core to plugins) GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. + Terminate(c *Command) error // kill it with fire } // Network settings of the container diff --git a/runtime/execdriver/lxc/driver.go b/runtime/execdriver/lxc/driver.go index 896f215366..ef16dcc380 100644 --- a/runtime/execdriver/lxc/driver.go +++ b/runtime/execdriver/lxc/driver.go @@ -204,6 +204,10 @@ func (d *driver) Kill(c *execdriver.Command, sig int) error { return KillLxc(c.ID, sig) } +func (d *driver) Terminate(c *execdriver.Command) error { + return KillLxc(c.ID, 9) +} + func (d *driver) version() string { var ( version string diff --git a/runtime/execdriver/native/driver.go b/runtime/execdriver/native/driver.go index 4acc4b388c..c5a3837615 100644 --- a/runtime/execdriver/native/driver.go +++ b/runtime/execdriver/native/driver.go @@ -117,9 +117,39 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba } func (d *driver) Kill(p *execdriver.Command, sig int) error { - err := syscall.Kill(p.Process.Pid, syscall.Signal(sig)) + return syscall.Kill(p.Process.Pid, syscall.Signal(sig)) +} + +func (d *driver) Terminate(p *execdriver.Command) error { + // lets check the start time for the process + started, err := d.readStartTime(p) + if err != nil { + // if we don't have the data on disk then we can assume the process is gone + // because this is only removed after we know the process has stopped + if os.IsNotExist(err) { + return nil + } + return err + } + + currentStartTime, err := system.GetProcessStartTime(p.Process.Pid) + if err != nil { + return err + } + if started == currentStartTime { + err = syscall.Kill(p.Process.Pid, 9) + } d.removeContainerRoot(p.ID) return err + +} + +func (d *driver) readStartTime(p *execdriver.Command) (string, error) { + data, err := ioutil.ReadFile(filepath.Join(d.root, p.ID, "start")) + if err != nil { + return "", err + } + return string(data), nil } func (d *driver) Info(id string) execdriver.Info { @@ -235,9 +265,9 @@ type dockerStateWriter struct { callback execdriver.StartCallback } -func (d *dockerStateWriter) WritePid(pid int) error { +func (d *dockerStateWriter) WritePid(pid int, started string) error { d.c.ContainerPid = pid - err := d.dsw.WritePid(pid) + err := d.dsw.WritePid(pid, started) if d.callback != nil { d.callback(d.c) } diff --git a/runtime/runtime.go b/runtime/runtime.go index 4ece7d1533..1c99a02811 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -192,7 +192,7 @@ func (runtime *Runtime) Register(container *Container) error { if err != nil { utils.Debugf("cannot find existing process for %d", existingPid) } - runtime.execDriver.Kill(cmd, 9) + runtime.execDriver.Terminate(cmd) } if err := container.Unmount(); err != nil { utils.Debugf("ghost unmount error %s", err) -- cgit v1.2.1 From 283daced0c919be760947d44d7e46c80e1054d64 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 26 Mar 2014 06:55:46 +0000 Subject: Don't send prctl to be consistent with other drivers Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/init.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 85182326ee..c7c2addb18 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -54,11 +54,6 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol return fmt.Errorf("setctty %s", err) } } - // this is our best effort to let the process know that the parent has died and that it - // should it should act on it how it sees fit - if err := system.ParentDeathSignal(uintptr(syscall.SIGTERM)); err != nil { - return fmt.Errorf("parent death signal %s", err) - } if err := setupNetwork(container, context); err != nil { return fmt.Errorf("setup networking %s", err) } -- cgit v1.2.1 From e36d89b0f9c8ba5b071374310ca632f6b2fdb7a1 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 26 Mar 2014 06:59:41 +0000 Subject: Ensure state is saved to disk after we kill the ghost Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/runtime.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/runtime/runtime.go b/runtime/runtime.go index 1c99a02811..d5c1a96ada 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -197,6 +197,9 @@ func (runtime *Runtime) Register(container *Container) error { if err := container.Unmount(); err != nil { utils.Debugf("ghost unmount error %s", err) } + if err := container.ToDisk(); err != nil { + utils.Debugf("saving ghost state to disk %s", err) + } } info := runtime.execDriver.Info(container.ID) -- cgit v1.2.1 From 93779cc7fee4ee0690d9dd28eed478a418e79577 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 1 Apr 2014 00:11:17 +0000 Subject: Send sigterm and wait forever Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/container.go | 1 - runtime/runtime.go | 11 ++++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/runtime/container.go b/runtime/container.go index ed68fd0844..bd4a6f2bea 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -915,7 +915,6 @@ func (container *Container) Stop(seconds int) error { // 1. Send a SIGTERM if err := container.KillSig(15); err != nil { - utils.Debugf("Error sending kill SIGTERM: %s", err) log.Print("Failed to send SIGTERM to the process, force killing") if err := container.KillSig(9); err != nil { return err diff --git a/runtime/runtime.go b/runtime/runtime.go index d5c1a96ada..9e8323279e 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -797,13 +797,18 @@ func (runtime *Runtime) shutdown() error { group := sync.WaitGroup{} utils.Debugf("starting clean shutdown of all containers...") for _, container := range runtime.List() { - if container.State.IsRunning() { - utils.Debugf("stopping %s", container.ID) + c := container + if c.State.IsRunning() { + utils.Debugf("stopping %s", c.ID) group.Add(1) go func() { defer group.Done() - container.Stop(10) + if err := c.KillSig(15); err != nil { + utils.Debugf("kill 15 error for %s - %s", c.ID, err) + } + c.Wait() + utils.Debugf("container stopped %s", c.ID) }() } } -- cgit v1.2.1 From 6b7cfc9e95af8510abc801a02cda7879e4f82518 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 1 Apr 2014 07:27:34 +0000 Subject: Update test to reallocate port Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/networkdriver/portallocator/portallocator_test.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/runtime/networkdriver/portallocator/portallocator_test.go b/runtime/networkdriver/portallocator/portallocator_test.go index 8b4062c37c..5a4765ddd4 100644 --- a/runtime/networkdriver/portallocator/portallocator_test.go +++ b/runtime/networkdriver/portallocator/portallocator_test.go @@ -118,6 +118,19 @@ func TestAllocateAllPorts(t *testing.T) { if err != nil { t.Fatal(err) } + + // release a port in the middle and ensure we get another tcp port + port := BeginPortRange + 5 + if err := ReleasePort(defaultIP, "tcp", port); err != nil { + t.Fatal(err) + } + newPort, err := RequestPort(defaultIP, "tcp", 0) + if err != nil { + t.Fatal(err) + } + if newPort != port { + t.Fatalf("Expected port %d got %d", port, newPort) + } } func BenchmarkAllocatePorts(b *testing.B) { -- cgit v1.2.1 From ac9b06ae95f1da8407934036ab1e4019a96a6b21 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 1 Apr 2014 08:18:52 +0000 Subject: Update sig message Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- server/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/server.go b/server/server.go index 278cab2b2a..65dbcca47b 100644 --- a/server/server.go +++ b/server/server.go @@ -54,7 +54,7 @@ func InitServer(job *engine.Job) engine.Status { gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) go func() { sig := <-c - log.Printf("Received signal '%v', exiting\n", sig) + log.Printf("Received signal '%v', starting shutdown of docker...\n", sig) utils.RemovePidFile(srv.runtime.Config().Pidfile) srv.Close() os.Exit(0) -- cgit v1.2.1 From bd24eb07b6c3a9448d8b4a8b3ab0d9cd60995aaa Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Fri, 28 Mar 2014 18:40:16 -0700 Subject: Add $BINDIR to allow mounting the whole sources if needed (for development) Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- Makefile | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b3bea8a31f..717e73cb1a 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,12 @@ GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD) DOCKER_IMAGE := docker:$(GIT_BRANCH) DOCKER_DOCS_IMAGE := docker-docs:$(GIT_BRANCH) -DOCKER_RUN_DOCKER := docker run --rm -i -t --privileged -e TESTFLAGS -v "$(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles" "$(DOCKER_IMAGE)" + +# to allow `make BINDDIR=. shell` +BINDDIR := bundles + +DOCKER_RUN_DOCKER := docker run --rm -i -t --privileged -e TESTFLAGS -v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)" "$(DOCKER_IMAGE)" + default: binary -- cgit v1.2.1 From 4cb602afa0a905ceb0cccf49fe142c1c7b62087b Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 31 Mar 2014 20:22:28 -0600 Subject: Allow "SIG" prefix on signal names in `docker kill` ("SIGKILL", etc) This way, we can use both `docker kill -s INT some_container` and `docker kill -s SIGINT some_container` and both will do nice things for us. :) Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- integration/commands_test.go | 9 +++++++-- server/server.go | 4 ++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/integration/commands_test.go b/integration/commands_test.go index 2dc0ff384a..15bb61b49c 100644 --- a/integration/commands_test.go +++ b/integration/commands_test.go @@ -1129,8 +1129,13 @@ func TestCmdKill(t *testing.T) { }) setTimeout(t, "SIGUSR2 timed out", 2*time.Second, func() { - for i := 0; i < 10; i++ { - if err := cli2.CmdKill("--signal=USR2", container.ID); err != nil { + for i := 0; i < 20; i++ { + sig := "USR2" + if i%2 != 0 { + // Swap to testing "SIGUSR2" for every odd iteration + sig = "SIGUSR2" + } + if err := cli2.CmdKill("--signal="+sig, container.ID); err != nil { t.Fatal(err) } if err := expectPipe("SIGUSR2", stdout); err != nil { diff --git a/server/server.go b/server/server.go index 2cb3328d55..d689c0304c 100644 --- a/server/server.go +++ b/server/server.go @@ -144,6 +144,10 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status { if err != nil { // The signal is not a number, treat it as a string sig = uint64(signal.SignalMap[job.Args[1]]) + if sig == 0 && strings.HasPrefix(job.Args[1], "SIG") { + // If signal is prefixed with SIG, try with it stripped (ie, "SIGKILL", etc) + sig = uint64(signal.SignalMap[job.Args[1][3:]]) + } if sig == 0 { return job.Errorf("Invalid signal: %s", job.Args[1]) } -- cgit v1.2.1 From a03f83e3370f3859ebff172d504cc29817863b17 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Sat, 29 Mar 2014 22:12:34 -0700 Subject: Do not error when trying to start a started container Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- runtime/container.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/container.go b/runtime/container.go index 656e9ae587..ca7b23e62b 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -426,7 +426,7 @@ func (container *Container) Start() (err error) { defer container.Unlock() if container.State.IsRunning() { - return fmt.Errorf("The container %s is already running.", container.ID) + return nil } defer func() { -- cgit v1.2.1 From cff5f0357ea35245ac906a0326863ac1f8c47c61 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Sat, 29 Mar 2014 22:12:43 -0700 Subject: Minor cleanup Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- api/client/commands.go | 14 +++++++++----- server/server.go | 8 +++++--- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 49cd07700f..01fc9a9106 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -549,9 +549,11 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { } func (cli *DockerCli) CmdStart(args ...string) error { - cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container") - attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process") - openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin") + var ( + cmd = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container") + attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process") + openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin") + ) if err := cmd.Parse(args); err != nil { return nil } @@ -560,8 +562,10 @@ func (cli *DockerCli) CmdStart(args ...string) error { return nil } - var cErr chan error - var tty bool + var ( + cErr chan error + tty bool + ) if *attach || *openStdin { if cmd.NArg() > 1 { return fmt.Errorf("You cannot start and attach multiple containers at once.") diff --git a/server/server.go b/server/server.go index 2cb3328d55..af9976b4e2 100644 --- a/server/server.go +++ b/server/server.go @@ -2064,9 +2064,11 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status { if len(job.Args) < 1 { return job.Errorf("Usage: %s container_id", job.Name) } - name := job.Args[0] - runtime := srv.runtime - container := runtime.Get(name) + var ( + name = job.Args[0] + runtime = srv.runtime + container = runtime.Get(name) + ) if container == nil { return job.Errorf("No such container: %s", name) -- cgit v1.2.1 From 7a50f03fa69fb81c27d361333fadbc9cccdd8948 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Sun, 30 Mar 2014 16:00:04 -0700 Subject: Update test to be consistent Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- integration/container_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integration/container_test.go b/integration/container_test.go index 8ed5525c72..d3d35734ed 100644 --- a/integration/container_test.go +++ b/integration/container_test.go @@ -350,7 +350,7 @@ func TestStart(t *testing.T) { if !container.State.IsRunning() { t.Errorf("Container should be running") } - if err := container.Start(); err == nil { + if err := container.Start(); err != nil { t.Fatalf("A running container should be able to be started") } @@ -385,7 +385,7 @@ func TestCpuShares(t *testing.T) { if !container.State.IsRunning() { t.Errorf("Container should be running") } - if err := container.Start(); err == nil { + if err := container.Start(); err != nil { t.Fatalf("A running container should be able to be started") } -- cgit v1.2.1 From f9b8161c60f58d383ca0eaf5a99865b83e4a41b8 Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Tue, 1 Apr 2014 09:24:24 -0400 Subject: Remove hard coding of SELinux labels on systems without proper selinux policy. If a system is configured for SELinux but does not know about docker or containers, then we want the transitions of the policy to work. Hard coding the labels causes docker to break on older Fedora and RHEL systems Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: rhatdan) --- pkg/selinux/selinux.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/selinux/selinux.go b/pkg/selinux/selinux.go index 5236d3fb87..6453f37ea9 100644 --- a/pkg/selinux/selinux.go +++ b/pkg/selinux/selinux.go @@ -313,12 +313,9 @@ func GetLxcContexts() (processLabel string, fileLabel string) { return "", "" } lxcPath := fmt.Sprintf("%s/content/lxc_contexts", GetSELinuxPolicyRoot()) - fileLabel = "system_u:object_r:svirt_sandbox_file_t:s0" - processLabel = "system_u:system_r:svirt_lxc_net_t:s0" - in, err := os.Open(lxcPath) if err != nil { - goto exit + return "", "" } defer in.Close() @@ -352,6 +349,11 @@ func GetLxcContexts() (processLabel string, fileLabel string) { } } } + + if processLabel == "" || fileLabel == "" { + return "", "" + } + exit: mcs := IntToMcs(os.Getpid(), 1024) scon := NewContext(processLabel) -- cgit v1.2.1 From 2224e0d65adfbd08e53430a1d7c750491f788257 Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Tue, 1 Apr 2014 10:03:29 -0400 Subject: In certain cases, setting the process label will not happen. When the code attempts to set the ProcessLabel, it checks if SELinux Is enabled. We have seen a case with some of our patches where the code is fooled by the container to think that SELinux is not enabled. Calling label.Init before setting up the rest of the container, tells the library that SELinux is enabled and everything works fine. Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: rhatdan) --- pkg/label/label.go | 3 +++ pkg/label/label_selinux.go | 4 ++++ pkg/libcontainer/nsinit/init.go | 2 ++ 3 files changed, 9 insertions(+) diff --git a/pkg/label/label.go b/pkg/label/label.go index ba1e9f48ea..be0d0ae079 100644 --- a/pkg/label/label.go +++ b/pkg/label/label.go @@ -21,3 +21,6 @@ func SetFileLabel(path string, fileLabel string) error { func GetPidCon(pid int) (string, error) { return "", nil } + +func Init() { +} diff --git a/pkg/label/label_selinux.go b/pkg/label/label_selinux.go index 300a8b6d14..64a1720996 100644 --- a/pkg/label/label_selinux.go +++ b/pkg/label/label_selinux.go @@ -67,3 +67,7 @@ func SetFileLabel(path string, fileLabel string) error { func GetPidCon(pid int) (string, error) { return selinux.Getpidcon(pid) } + +func Init() { + selinux.SelinuxEnabled() +} diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 5aa5f9f5b5..e5d69f5453 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -58,6 +58,8 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol if err := system.ParentDeathSignal(uintptr(syscall.SIGTERM)); err != nil { return fmt.Errorf("parent death signal %s", err) } + + label.Init() ns.logger.Println("setup mount namespace") if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot, container.Context["mount_label"]); err != nil { return fmt.Errorf("setup mount namespace %s", err) -- cgit v1.2.1 From b8d660d946c5f0be3a4f01867b11f26a4f303293 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Tue, 1 Apr 2014 11:17:54 -0700 Subject: Update docs. Make PULL up to date, remove deprecated falg and update PUSH Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- docs/sources/reference/commandline/cli.rst | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 0c9db138c2..324b84b0ae 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -1007,12 +1007,10 @@ The last container is marked as a ``Ghost`` container. It is a container that wa :: - Usage: docker pull NAME + Usage: docker pull NAME[:TAG] Pull an image or a repository from the registry - -t, --tag="": Download tagged image in repository - .. _cli_push: @@ -1021,7 +1019,7 @@ The last container is marked as a ``Ghost`` container. It is a container that wa :: - Usage: docker push NAME + Usage: docker push NAME[:TAG] Push an image or a repository to the registry -- cgit v1.2.1 From 4bf70317ed3c2467f444eac9b7865b170da6366c Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Tue, 1 Apr 2014 13:33:46 -0600 Subject: Simplify the kill "SIG" prefix stripping code Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- server/server.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/server/server.go b/server/server.go index bcaaad742b..96a37527c0 100644 --- a/server/server.go +++ b/server/server.go @@ -142,12 +142,8 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status { // The largest legal signal is 31, so let's parse on 5 bits sig, err = strconv.ParseUint(job.Args[1], 10, 5) if err != nil { - // The signal is not a number, treat it as a string - sig = uint64(signal.SignalMap[job.Args[1]]) - if sig == 0 && strings.HasPrefix(job.Args[1], "SIG") { - // If signal is prefixed with SIG, try with it stripped (ie, "SIGKILL", etc) - sig = uint64(signal.SignalMap[job.Args[1][3:]]) - } + // The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL") + sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")]) if sig == 0 { return job.Errorf("Invalid signal: %s", job.Args[1]) } -- cgit v1.2.1 From dcf2b72f5b6732a4b9b1897cb2b3f7019e3d547e Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 1 Apr 2014 21:07:40 +0000 Subject: add test Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- integration-cli/docker_cli_diff_test.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go index 5f8ba74161..0ae9cca38d 100644 --- a/integration-cli/docker_cli_diff_test.go +++ b/integration-cli/docker_cli_diff_test.go @@ -64,3 +64,28 @@ func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) { logDone("diff - check if ignored files show up in diff") } + +func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sleep 0") + cid, _, err := runCommandWithOutput(runCmd) + errorOut(err, t, fmt.Sprintf("%s", err)) + cleanCID := stripTrailingCharacters(cid) + + diffCmd := exec.Command(dockerBinary, "diff", cleanCID) + out, _, err := runCommandWithOutput(diffCmd) + errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) + go deleteContainer(cleanCID) + + expected := map[string]bool{ + "C /dev": true, + "A /dev/full": true, // busybox + "C /dev/ptmx": true, // libcontainer + "A /dev/kmsg": true, // lxc + } + + for _, line := range strings.Split(out, "\n") { + if line != "" && !expected[line] { + t.Errorf("'%s' is shown in the diff but shouldn't", line) + } + } +} -- cgit v1.2.1 From 026aebdebbaa05eab25134949ed5d5bda655ba67 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Tue, 1 Apr 2014 14:17:31 -0700 Subject: Change ownership to root for ADD file/directory Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- server/buildfile.go | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/server/buildfile.go b/server/buildfile.go index 5b098ce3e9..b4a860ad4d 100644 --- a/server/buildfile.go +++ b/server/buildfile.go @@ -419,10 +419,22 @@ func (b *buildFile) addContext(container *runtime.Container, orig, dest string, return err } + chownR := func(destPath string, uid, gid int) error { + return filepath.Walk(destPath, func(path string, info os.FileInfo, err error) error { + if err := os.Lchown(path, uid, gid); err != nil { + return err + } + return nil + }) + } + if fi.IsDir() { if err := archive.CopyWithTar(origPath, destPath); err != nil { return err } + if err := chownR(destPath, 0, 0); err != nil { + return err + } return nil } @@ -452,6 +464,10 @@ func (b *buildFile) addContext(container *runtime.Container, orig, dest string, if err := archive.CopyWithTar(origPath, destPath); err != nil { return err } + + if err := chownR(destPath, 0, 0); err != nil { + return err + } return nil } @@ -486,28 +502,36 @@ func (b *buildFile) CmdAdd(args string) error { ) if utils.IsURL(orig) { + // Initiate the download isRemote = true resp, err := utils.Download(orig) if err != nil { return err } + + // Create a tmp dir tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") if err != nil { return err } + + // Create a tmp file within our tmp dir tmpFileName := path.Join(tmpDirName, "tmp") tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } defer os.RemoveAll(tmpDirName) - if _, err = io.Copy(tmpFile, resp.Body); err != nil { + + // Download and dump result to tmp file + if _, err := io.Copy(tmpFile, resp.Body); err != nil { tmpFile.Close() return err } - origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) tmpFile.Close() + origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) + // Process the checksum r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { -- cgit v1.2.1 From 3ee37f547f4685ab88bfc39517cc18c1911451e5 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Tue, 1 Apr 2014 15:46:52 -0700 Subject: Update Version to not use string anymore Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- api/common.go | 7 ++++--- api/server/server.go | 2 +- pkg/version/version.go | 14 +++++++------- pkg/version/version_test.go | 2 +- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/api/common.go b/api/common.go index 7273e5c56d..44bd901379 100644 --- a/api/common.go +++ b/api/common.go @@ -3,15 +3,16 @@ package api import ( "fmt" "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/pkg/version" "github.com/dotcloud/docker/utils" "mime" "strings" ) const ( - APIVERSION = "1.10" - DEFAULTHTTPHOST = "127.0.0.1" - DEFAULTUNIXSOCKET = "/var/run/docker.sock" + APIVERSION version.Version = "1.10" + DEFAULTHTTPHOST = "127.0.0.1" + DEFAULTUNIXSOCKET = "/var/run/docker.sock" ) func ValidateHost(val string) (string, error) { diff --git a/api/server/server.go b/api/server/server.go index 5597d8b92c..93dd2094b6 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -940,7 +940,7 @@ func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, local if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { userAgent := strings.Split(r.Header.Get("User-Agent"), "/") - if len(userAgent) == 2 && !dockerVersion.Equal(userAgent[1]) { + if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) } } diff --git a/pkg/version/version.go b/pkg/version/version.go index 3721d64aa8..5ff9d2ed2a 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -7,10 +7,10 @@ import ( type Version string -func (me Version) compareTo(other string) int { +func (me Version) compareTo(other Version) int { var ( meTab = strings.Split(string(me), ".") - otherTab = strings.Split(other, ".") + otherTab = strings.Split(string(other), ".") ) for i, s := range meTab { var meInt, otherInt int @@ -31,22 +31,22 @@ func (me Version) compareTo(other string) int { return 0 } -func (me Version) LessThan(other string) bool { +func (me Version) LessThan(other Version) bool { return me.compareTo(other) == -1 } -func (me Version) LessThanOrEqualTo(other string) bool { +func (me Version) LessThanOrEqualTo(other Version) bool { return me.compareTo(other) <= 0 } -func (me Version) GreaterThan(other string) bool { +func (me Version) GreaterThan(other Version) bool { return me.compareTo(other) == 1 } -func (me Version) GreaterThanOrEqualTo(other string) bool { +func (me Version) GreaterThanOrEqualTo(other Version) bool { return me.compareTo(other) >= 0 } -func (me Version) Equal(other string) bool { +func (me Version) Equal(other Version) bool { return me.compareTo(other) == 0 } diff --git a/pkg/version/version_test.go b/pkg/version/version_test.go index 4bebd0c434..27c0536c2f 100644 --- a/pkg/version/version_test.go +++ b/pkg/version/version_test.go @@ -5,7 +5,7 @@ import ( ) func assertVersion(t *testing.T, a, b string, result int) { - if r := Version(a).compareTo(b); r != result { + if r := Version(a).compareTo(Version(b)); r != result { t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) } } -- cgit v1.2.1 From 9c0c4aeda47fcebc4470f7ba786749af2999ec78 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Tue, 1 Apr 2014 17:42:54 -0600 Subject: Add basic initial "check-config" script to contrib Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- contrib/check-config.sh | 102 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100755 contrib/check-config.sh diff --git a/contrib/check-config.sh b/contrib/check-config.sh new file mode 100755 index 0000000000..62606aca04 --- /dev/null +++ b/contrib/check-config.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash +set -e + +# bits of this were adapted from lxc-checkconfig +# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in + +: ${CONFIG:=/proc/config.gz} +: ${GREP:=zgrep} + +if [ ! -e "$CONFIG" ]; then + echo >&2 "warning: $CONFIG does not exist, searching other paths for kernel config..." + if [ -e "/boot/config-$(uname -r)" ]; then + CONFIG="/boot/config-$(uname -r)" + elif [ -e '/usr/src/linux/.config' ]; then + CONFIG='/usr/src/linux/.config' + else + echo >&2 "error: cannot find kernel config" + echo >&2 " try running this script again, specifying the kernel config:" + echo >&2 " CONFIG=/path/to/kernel/.config $0" + exit 1 + fi +fi + +is_set() { + $GREP "CONFIG_$1=[y|m]" $CONFIG > /dev/null +} + +color() { + color= + prefix= + if [ "$1" = 'bold' ]; then + prefix='1;' + shift + fi + case "$1" in + green) color='32' ;; + red) color='31' ;; + gray) color='30' ;; + reset) color='' ;; + esac + echo -en '\033['"$prefix$color"m +} + +check_flag() { + if is_set "$1"; then + color green + echo -n enabled + else + color bold red + echo -n missing + fi + color reset +} + +check_flags() { + for flag in "$@"; do + echo "- CONFIG_$flag: $(check_flag "$flag")" + done +} + +echo + +# TODO check that the cgroupfs hierarchy is properly mounted + +echo 'Generally Necessary:' +flags=( + NAMESPACES {NET,PID,IPC,UTS}_NS + DEVPTS_MULTIPLE_INSTANCES + CGROUPS CGROUP_DEVICE + MACVLAN VETH BRIDGE + IP_NF_TARGET_MASQUERADE NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK} + NF_NAT NF_NAT_NEEDED +) +check_flags "${flags[@]}" +echo + +echo 'Optional Features:' +flags=( + MEMCG_SWAP + RESOURCE_COUNTERS +) +check_flags "${flags[@]}" + +echo '- Storage Drivers:' +{ + echo '- "aufs":' + check_flags AUFS_FS | sed 's/^/ /' + if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then + echo " $(color bold gray)(note that some kernels include AUFS patches but not the AUFS_FS flag)$(color reset)" + fi + + echo '- "btrfs":' + check_flags BTRFS_FS | sed 's/^/ /' + + echo '- "devicemapper":' + check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS | sed 's/^/ /' +} | sed 's/^/ /' +echo + +#echo 'Potential Future Features:' +#check_flags USER_NS +#echo -- cgit v1.2.1 From b246fc33ae4f05b5084fed8fc9f1034e36d87d78 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Tue, 1 Apr 2014 17:30:02 -0700 Subject: Add API version to `docker version` Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- api/client/commands.go | 4 ++++ integration-cli/docker_cli_version_test.go | 12 +++++++++++- server/server.go | 2 ++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/api/client/commands.go b/api/client/commands.go index eddfad5c30..53b8822d69 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -357,6 +357,7 @@ func (cli *DockerCli) CmdVersion(args ...string) error { if dockerversion.VERSION != "" { fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) } + fmt.Fprintf(cli.out, "Client API version: %s\n", api.APIVERSION) fmt.Fprintf(cli.out, "Go version (client): %s\n", goruntime.Version()) if dockerversion.GITCOMMIT != "" { fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) @@ -379,6 +380,9 @@ func (cli *DockerCli) CmdVersion(args ...string) error { } out.Close() fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version")) + if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" { + fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion) + } fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion")) release := utils.GetReleaseVersion() diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go index 8adedd540b..f18d5bede6 100644 --- a/integration-cli/docker_cli_version_test.go +++ b/integration-cli/docker_cli_version_test.go @@ -17,7 +17,17 @@ func TestVersionEnsureSucceeds(t *testing.T) { t.Fatal("failed to execute docker version") } - stringsToCheck := []string{"Client version:", "Go version (client):", "Git commit (client):", "Server version:", "Git commit (server):", "Go version (server):", "Last stable version:"} + stringsToCheck := []string{ + "Client version:", + "Client API version:", + "Go version (client):", + "Git commit (client):", + "Server version:", + "Server API version:", + "Git commit (server):", + "Go version (server):", + "Last stable version:", + } for _, linePrefix := range stringsToCheck { if !strings.Contains(out, linePrefix) { diff --git a/server/server.go b/server/server.go index 65dbcca47b..a47434f73f 100644 --- a/server/server.go +++ b/server/server.go @@ -3,6 +3,7 @@ package server import ( "encoding/json" "fmt" + "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/archive" "github.com/dotcloud/docker/daemonconfig" "github.com/dotcloud/docker/dockerversion" @@ -823,6 +824,7 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { func (srv *Server) DockerVersion(job *engine.Job) engine.Status { v := &engine.Env{} v.Set("Version", dockerversion.VERSION) + v.Set("ApiVersion", api.APIVERSION) v.Set("GitCommit", dockerversion.GITCOMMIT) v.Set("GoVersion", goruntime.Version()) v.Set("Os", goruntime.GOOS) -- cgit v1.2.1 From d7ec39a4cfbee7b68e0c7973fb629da6f54d873c Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Wed, 2 Apr 2014 01:19:26 +0000 Subject: <3 Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- server/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/server.go b/server/server.go index a47434f73f..1c6c561375 100644 --- a/server/server.go +++ b/server/server.go @@ -824,7 +824,7 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { func (srv *Server) DockerVersion(job *engine.Job) engine.Status { v := &engine.Env{} v.Set("Version", dockerversion.VERSION) - v.Set("ApiVersion", api.APIVERSION) + v.SetJson("ApiVersion", api.APIVERSION) v.Set("GitCommit", dockerversion.GITCOMMIT) v.Set("GoVersion", goruntime.Version()) v.Set("Os", goruntime.GOOS) -- cgit v1.2.1 From b51fe1783347c1bf679870925a271531a925b7e9 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Tue, 1 Apr 2014 22:43:38 -0600 Subject: Update Makefile with several improvements Especially but not limited to: - make BINDDIR= ... - for when you don't want a bind mount at all - make DOCSPORT=9000 docs - for when you want a not-8000 docs port - when we can't determine a branch name, we don't try to "docker build -t docker: ." anymore - we just "docker build -t docker ." (thus allowing Docker to assume ":latest") Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- Makefile | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 9f8e70b7fe..776d57951f 100644 --- a/Makefile +++ b/Makefile @@ -1,14 +1,17 @@ .PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration test-integration-cli -GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD) -DOCKER_IMAGE := docker:$(GIT_BRANCH) -DOCKER_DOCS_IMAGE := docker-docs:$(GIT_BRANCH) - -# to allow `make BINDDIR=. shell` +# to allow `make BINDDIR=. shell` or `make BINDDIR= test` BINDDIR := bundles +# to allow `make DOCSPORT=9000 docs` +DOCSPORT := 8000 -DOCKER_RUN_DOCKER := docker run --rm -i -t --privileged -e TESTFLAGS -v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)" "$(DOCKER_IMAGE)" +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) +DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)") +DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" +DOCKER_RUN_DOCS := docker run --rm -it -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" default: binary @@ -22,10 +25,10 @@ cross: build $(DOCKER_RUN_DOCKER) hack/make.sh binary cross docs: docs-build - docker run --rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" + $(DOCKER_RUN_DOCS) docs-shell: docs-build - docker run --rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" bash + $(DOCKER_RUN_DOCS) bash test: build $(DOCKER_RUN_DOCKER) hack/make.sh binary test test-integration test-integration-cli -- cgit v1.2.1 From 2b64453adbfdf715542b0b4274ed13e6f2a444da Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Wed, 2 Apr 2014 16:52:07 +1000 Subject: add RHEL6 kernel version, and a 3.8 hint to the binaries doc Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/installation/binaries.rst | 3 +++ docs/sources/installation/rhel.rst | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/docs/sources/installation/binaries.rst b/docs/sources/installation/binaries.rst index ae548e7657..7ba8c596ef 100644 --- a/docs/sources/installation/binaries.rst +++ b/docs/sources/installation/binaries.rst @@ -43,6 +43,9 @@ Check kernel dependencies Docker in daemon mode has specific kernel requirements. For details, check your distribution in :ref:`installation_list`. +In general, a 3.8 Linux kernel (or higher) is preferred, as some of the +prior versions have known issues that are triggered by Docker. + Note that Docker also has a client mode, which can run on virtually any Linux kernel (it even builds on OSX!). diff --git a/docs/sources/installation/rhel.rst b/docs/sources/installation/rhel.rst index 7930da6309..151fba6f1f 100644 --- a/docs/sources/installation/rhel.rst +++ b/docs/sources/installation/rhel.rst @@ -22,6 +22,9 @@ for the RHEL distribution. Also note that due to the current Docker limitations, Docker is able to run only on the **64 bit** architecture. +You will need `RHEL 6.5`_ or higher, with a RHEL 6 kernel version 2.6.32-431 or higher +as this has specific kernel fixes to allow Docker to work. + Installation ------------ @@ -78,4 +81,5 @@ If you have any issues - please report them directly in the `Red Hat Bugzilla fo .. _EPEL installation instructions: https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F .. _Red Hat Bugzilla for docker-io component : https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora%20EPEL&component=docker-io .. _bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1043676 +.. _RHEL 6.5: https://access.redhat.com/site/articles/3078#RHEL6 -- cgit v1.2.1 From e5394e35c7a8f730ac76d24dee74d769049a0428 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 1 Apr 2014 10:21:53 +0200 Subject: devmapper: Pass info rather than hash to activateDeviceIfNeeded There is no need to look this up again, we have it already in all callers. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- runtime/graphdriver/devmapper/deviceset.go | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/runtime/graphdriver/devmapper/deviceset.go b/runtime/graphdriver/devmapper/deviceset.go index 731e9dab8b..b323627ac2 100644 --- a/runtime/graphdriver/devmapper/deviceset.go +++ b/runtime/graphdriver/devmapper/deviceset.go @@ -235,12 +235,8 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev return info, nil } -func (devices *DeviceSet) activateDeviceIfNeeded(hash string) error { - utils.Debugf("activateDeviceIfNeeded(%v)", hash) - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } +func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error { + utils.Debugf("activateDeviceIfNeeded(%v)", info.Hash) if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { return nil @@ -343,7 +339,7 @@ func (devices *DeviceSet) setupBaseImage() error { utils.Debugf("Creating filesystem on base device-manager snapshot") - if err = devices.activateDeviceIfNeeded(""); err != nil { + if err = devices.activateDeviceIfNeeded(info); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } @@ -605,7 +601,7 @@ func (devices *DeviceSet) deleteDevice(hash string) error { // This is a workaround for the kernel not discarding block so // on the thin pool when we remove a thinp device, so we do it // manually - if err := devices.activateDeviceIfNeeded(hash); err == nil { + if err := devices.activateDeviceIfNeeded(info); err == nil { if err := BlockDeviceDiscard(info.DevName()); err != nil { utils.Debugf("Error discarding block on device: %s (ignoring)\n", err) } @@ -858,7 +854,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) erro return nil } - if err := devices.activateDeviceIfNeeded(hash); err != nil { + if err := devices.activateDeviceIfNeeded(info); err != nil { return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) } @@ -1028,7 +1024,7 @@ func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { TransactionId: info.TransactionId, } - if err := devices.activateDeviceIfNeeded(hash); err != nil { + if err := devices.activateDeviceIfNeeded(info); err != nil { return nil, fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err) } -- cgit v1.2.1 From 8e39b35c7cd02bbb644b7faf2a434de0098e6dea Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 1 Apr 2014 10:24:26 +0200 Subject: devmapper: Pass info rather than hash to deleteDevice All the callers already have the info, no need for an extra lookup. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- runtime/graphdriver/devmapper/deviceset.go | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/runtime/graphdriver/devmapper/deviceset.go b/runtime/graphdriver/devmapper/deviceset.go index b323627ac2..c74db036d2 100644 --- a/runtime/graphdriver/devmapper/deviceset.go +++ b/runtime/graphdriver/devmapper/deviceset.go @@ -313,7 +313,7 @@ func (devices *DeviceSet) setupBaseImage() error { if oldInfo != nil && !oldInfo.Initialized { utils.Debugf("Removing uninitialized base image") - if err := devices.deleteDevice(""); err != nil { + if err := devices.deleteDevice(oldInfo); err != nil { utils.Debugf("\n--->Err: %s\n", err) return err } @@ -592,12 +592,7 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { return nil } -func (devices *DeviceSet) deleteDevice(hash string) error { - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("hash %s doesn't exists", hash) - } - +func (devices *DeviceSet) deleteDevice(info *DevInfo) error { // This is a workaround for the kernel not discarding block so // on the thin pool when we remove a thinp device, so we do it // manually @@ -652,7 +647,7 @@ func (devices *DeviceSet) DeleteDevice(hash string) error { info.lock.Lock() defer info.lock.Unlock() - return devices.deleteDevice(hash) + return devices.deleteDevice(info) } func (devices *DeviceSet) deactivatePool() error { -- cgit v1.2.1 From 5955846774c9b43291d6de0584fa8c3f62414c43 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 1 Apr 2014 10:31:34 +0200 Subject: devmapper: Pass info rather than hash to deactivateDevice() We already have the info in most cases, no need to look this up multiple times. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- runtime/graphdriver/devmapper/deviceset.go | 33 +++++++++++++----------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/runtime/graphdriver/devmapper/deviceset.go b/runtime/graphdriver/devmapper/deviceset.go index c74db036d2..fa6b259b63 100644 --- a/runtime/graphdriver/devmapper/deviceset.go +++ b/runtime/graphdriver/devmapper/deviceset.go @@ -666,20 +666,16 @@ func (devices *DeviceSet) deactivatePool() error { return nil } -func (devices *DeviceSet) deactivateDevice(hash string) error { - utils.Debugf("[devmapper] deactivateDevice(%s)", hash) +func (devices *DeviceSet) deactivateDevice(info *DevInfo) error { + utils.Debugf("[devmapper] deactivateDevice(%s)", info.Hash) defer utils.Debugf("[devmapper] deactivateDevice END") // Wait for the unmount to be effective, // by watching the value of Info.OpenCount for the device - if err := devices.waitClose(hash); err != nil { - utils.Errorf("Warning: error waiting for device %s to close: %s\n", hash, err) + if err := devices.waitClose(info); err != nil { + utils.Errorf("Warning: error waiting for device %s to close: %s\n", info.Hash, err) } - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } devinfo, err := getInfo(info.Name()) if err != nil { utils.Debugf("\n--->Err: %s\n", err) @@ -760,11 +756,7 @@ func (devices *DeviceSet) waitRemove(devname string) error { // waitClose blocks until either: // a) the device registered at - is closed, // or b) the 10 second timeout expires. -func (devices *DeviceSet) waitClose(hash string) error { - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } +func (devices *DeviceSet) waitClose(info *DevInfo) error { i := 0 for ; i < 1000; i += 1 { devinfo, err := getInfo(info.Name()) @@ -772,7 +764,7 @@ func (devices *DeviceSet) waitClose(hash string) error { return err } if i%100 == 0 { - utils.Debugf("Waiting for unmount of %s: opencount=%d", hash, devinfo.OpenCount) + utils.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount) } if devinfo.OpenCount == 0 { break @@ -782,7 +774,7 @@ func (devices *DeviceSet) waitClose(hash string) error { devices.Lock() } if i == 1000 { - return fmt.Errorf("Timeout while waiting for device %s to close", hash) + return fmt.Errorf("Timeout while waiting for device %s to close", info.Hash) } return nil } @@ -805,15 +797,18 @@ func (devices *DeviceSet) Shutdown() error { utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) } - if err := devices.deactivateDevice(info.Hash); err != nil { + if err := devices.deactivateDevice(info); err != nil { utils.Debugf("Shutdown deactivate %s , error: %s\n", info.Hash, err) } } info.lock.Unlock() } - if err := devices.deactivateDevice(""); err != nil { - utils.Debugf("Shutdown deactivate base , error: %s\n", err) + info := devices.Devices[""] + if info != nil { + if err := devices.deactivateDevice(info); err != nil { + utils.Debugf("Shutdown deactivate base , error: %s\n", err) + } } if err := devices.deactivatePool(); err != nil { @@ -920,7 +915,7 @@ func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { } utils.Debugf("[devmapper] Unmount done") - if err := devices.deactivateDevice(hash); err != nil { + if err := devices.deactivateDevice(info); err != nil { return err } -- cgit v1.2.1 From 74edcaf1e84aa8bf35e496b2bead833172a79fca Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 1 Apr 2014 10:34:44 +0200 Subject: devmapper: Pass info rather than hash to setInitialized We already have this at the caller, no need to look up again. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- runtime/graphdriver/devmapper/deviceset.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/runtime/graphdriver/devmapper/deviceset.go b/runtime/graphdriver/devmapper/deviceset.go index fa6b259b63..12407af1b2 100644 --- a/runtime/graphdriver/devmapper/deviceset.go +++ b/runtime/graphdriver/devmapper/deviceset.go @@ -864,7 +864,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) erro info.mountPath = path info.floating = false - return devices.setInitialized(hash) + return devices.setInitialized(info) } func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { @@ -955,12 +955,7 @@ func (devices *DeviceSet) HasActivatedDevice(hash string) bool { return devinfo != nil && devinfo.Exists != 0 } -func (devices *DeviceSet) setInitialized(hash string) error { - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) - } - +func (devices *DeviceSet) setInitialized(info *DevInfo) error { info.Initialized = true if err := devices.saveMetadata(); err != nil { info.Initialized = false -- cgit v1.2.1 From e01b71cebeb96755641a18762dea5b843f107bee Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 1 Apr 2014 10:45:40 +0200 Subject: devmapper: Add lookupDevice() helper This centralizes the lookup of devices so it is only done in one place. This will be needed later when we change the locking for it. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- runtime/graphdriver/devmapper/deviceset.go | 55 +++++++++++++++++------------- 1 file changed, 32 insertions(+), 23 deletions(-) diff --git a/runtime/graphdriver/devmapper/deviceset.go b/runtime/graphdriver/devmapper/deviceset.go index 12407af1b2..4faf997002 100644 --- a/runtime/graphdriver/devmapper/deviceset.go +++ b/runtime/graphdriver/devmapper/deviceset.go @@ -214,6 +214,14 @@ func (devices *DeviceSet) saveMetadata() error { return nil } +func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { + info := devices.Devices[hash] + if info == nil { + return nil, fmt.Errorf("Unknown device %s", hash) + } + return info, nil +} + func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) { utils.Debugf("registerDevice(%v, %v)", id, hash) info := &DevInfo{ @@ -306,7 +314,7 @@ func (devices *DeviceSet) loadMetaData() error { } func (devices *DeviceSet) setupBaseImage() error { - oldInfo := devices.Devices[""] + oldInfo, _ := devices.lookupDevice("") if oldInfo != nil && oldInfo.Initialized { return nil } @@ -565,13 +573,13 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { devices.Lock() defer devices.Unlock() - if devices.Devices[hash] != nil { - return fmt.Errorf("hash %s already exists", hash) + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("device %s already exists", hash) } - baseInfo := devices.Devices[baseHash] - if baseInfo == nil { - return fmt.Errorf("Error adding device for '%s': can't find device for parent '%s'", hash, baseHash) + baseInfo, err := devices.lookupDevice(baseHash) + if err != nil { + return err } baseInfo.lock.Lock() @@ -639,9 +647,9 @@ func (devices *DeviceSet) DeleteDevice(hash string) error { devices.Lock() defer devices.Unlock() - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) + info, err := devices.lookupDevice(hash) + if err != nil { + return err } info.lock.Lock() @@ -804,7 +812,7 @@ func (devices *DeviceSet) Shutdown() error { info.lock.Unlock() } - info := devices.Devices[""] + info, _ := devices.lookupDevice("") if info != nil { if err := devices.deactivateDevice(info); err != nil { utils.Debugf("Shutdown deactivate base , error: %s\n", err) @@ -822,9 +830,9 @@ func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) erro devices.Lock() defer devices.Unlock() - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("Unknown device %s", hash) + info, err := devices.lookupDevice(hash) + if err != nil { + return err } info.lock.Lock() @@ -851,7 +859,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) erro var flags uintptr = sysMsMgcVal mountOptions := label.FormatMountLabel("discard", mountLabel) - err := sysMount(info.DevName(), path, "ext4", flags, mountOptions) + err = sysMount(info.DevName(), path, "ext4", flags, mountOptions) if err != nil && err == sysEInval { mountOptions = label.FormatMountLabel(mountLabel, "") err = sysMount(info.DevName(), path, "ext4", flags, mountOptions) @@ -873,9 +881,9 @@ func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { devices.Lock() defer devices.Unlock() - info := devices.Devices[hash] - if info == nil { - return fmt.Errorf("UnmountDevice: no such device %s\n", hash) + info, err := devices.lookupDevice(hash) + if err != nil { + return err } info.lock.Lock() @@ -928,14 +936,15 @@ func (devices *DeviceSet) HasDevice(hash string) bool { devices.Lock() defer devices.Unlock() - return devices.Devices[hash] != nil + info, _ := devices.lookupDevice(hash) + return info != nil } func (devices *DeviceSet) HasInitializedDevice(hash string) bool { devices.Lock() defer devices.Unlock() - info := devices.Devices[hash] + info, _ := devices.lookupDevice(hash) return info != nil && info.Initialized } @@ -943,7 +952,7 @@ func (devices *DeviceSet) HasActivatedDevice(hash string) bool { devices.Lock() defer devices.Unlock() - info := devices.Devices[hash] + info, _ := devices.lookupDevice(hash) if info == nil { return false } @@ -995,9 +1004,9 @@ func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { devices.Lock() defer devices.Unlock() - info := devices.Devices[hash] - if info == nil { - return nil, fmt.Errorf("No device %s", hash) + info, err := devices.lookupDevice(hash) + if err != nil { + return nil, err } info.lock.Lock() -- cgit v1.2.1 From 70826e8b3fee27b971852aad89053507c6866d3e Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 1 Apr 2014 11:05:30 +0200 Subject: devmapper: Add lock to protext Devices map Currently access to the Devices map is serialized by the main DeviceSet lock, but we need to access it outside that lock, so we add a separate lock for this and grab that everywhere we modify or read the map. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- runtime/graphdriver/devmapper/deviceset.go | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/runtime/graphdriver/devmapper/deviceset.go b/runtime/graphdriver/devmapper/deviceset.go index 4faf997002..f972c34bb1 100644 --- a/runtime/graphdriver/devmapper/deviceset.go +++ b/runtime/graphdriver/devmapper/deviceset.go @@ -51,7 +51,8 @@ type DevInfo struct { } type MetaData struct { - Devices map[string]*DevInfo `json:devices` + Devices map[string]*DevInfo `json:devices` + devicesLock sync.Mutex `json:"-"` // Protects all read/writes to Devices map } type DeviceSet struct { @@ -179,7 +180,9 @@ func (devices *DeviceSet) allocateTransactionId() uint64 { } func (devices *DeviceSet) saveMetadata() error { + devices.devicesLock.Lock() jsonData, err := json.Marshal(devices.MetaData) + devices.devicesLock.Unlock() if err != nil { return fmt.Errorf("Error encoding metadata to json: %s", err) } @@ -215,6 +218,8 @@ func (devices *DeviceSet) saveMetadata() error { } func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) { + devices.devicesLock.Lock() + defer devices.devicesLock.Unlock() info := devices.Devices[hash] if info == nil { return nil, fmt.Errorf("Unknown device %s", hash) @@ -233,10 +238,15 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev devices: devices, } + devices.devicesLock.Lock() devices.Devices[hash] = info + devices.devicesLock.Unlock() + if err := devices.saveMetadata(); err != nil { // Try to remove unused device + devices.devicesLock.Lock() delete(devices.Devices, hash) + devices.devicesLock.Unlock() return nil, err } @@ -632,10 +642,14 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { } devices.allocateTransactionId() + devices.devicesLock.Lock() delete(devices.Devices, info.Hash) + devices.devicesLock.Unlock() if err := devices.saveMetadata(); err != nil { + devices.devicesLock.Lock() devices.Devices[info.Hash] = info + devices.devicesLock.Unlock() utils.Debugf("Error saving meta data: %s\n", err) return err } @@ -795,7 +809,15 @@ func (devices *DeviceSet) Shutdown() error { utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix) + var devs []*DevInfo + + devices.devicesLock.Lock() for _, info := range devices.Devices { + devs = append(devs, info) + } + devices.devicesLock.Unlock() + + for _, info := range devs { info.lock.Lock() if info.mountCount > 0 { // We use MNT_DETACH here in case it is still busy in some running @@ -979,12 +1001,15 @@ func (devices *DeviceSet) List() []string { devices.Lock() defer devices.Unlock() + devices.devicesLock.Lock() ids := make([]string, len(devices.Devices)) i := 0 for k := range devices.Devices { ids[i] = k i++ } + devices.devicesLock.Unlock() + return ids } -- cgit v1.2.1 From 2ffef1b7eb618162673c6ffabccb9ca57c7dfce3 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Tue, 1 Apr 2014 11:28:21 +0200 Subject: devmapper: Avoid AB-BA deadlock We currently drop the global lock while holding a per-device lock when waiting for device removal, and then we re-aquire it when the sleep is done. This is causing a AB-BA deadlock if anyone at the same time tries to do any operation on that device like this: thread A: thread B grabs global lock grabs device lock releases global lock sleeps grabs global lock blocks on device lock wakes up blocks on global lock To trigger this you can for instance do: ID=`docker run -d fedora sleep 5` cd /var/lib/docker/devicemapper/mnt/$ID docker wait $ID docker rm $ID & docker rm $ID The unmount will fail due to the mount being busy thus causing the timeout and the second rm will then trigger the deadlock. We fix this by adding a lock ordering such that the device locks are always grabbed before the global lock. This is safe since the device lookups now have a separate lock. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- runtime/graphdriver/devmapper/deviceset.go | 58 ++++++++++++++++++------------ 1 file changed, 35 insertions(+), 23 deletions(-) diff --git a/runtime/graphdriver/devmapper/deviceset.go b/runtime/graphdriver/devmapper/deviceset.go index f972c34bb1..97d670a3d9 100644 --- a/runtime/graphdriver/devmapper/deviceset.go +++ b/runtime/graphdriver/devmapper/deviceset.go @@ -47,6 +47,11 @@ type DevInfo struct { // sometimes release that lock while sleeping. In that case // this per-device lock is still held, protecting against // other accesses to the device that we're doing the wait on. + // + // WARNING: In order to avoid AB-BA deadlocks when releasing + // the global lock while holding the per-device locks all + // device locks must be aquired *before* the device lock, and + // multiple device locks should be aquired parent before child. lock sync.Mutex `json:"-"` } @@ -580,13 +585,6 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { } func (devices *DeviceSet) AddDevice(hash, baseHash string) error { - devices.Lock() - defer devices.Unlock() - - if info, _ := devices.lookupDevice(hash); info != nil { - return fmt.Errorf("device %s already exists", hash) - } - baseInfo, err := devices.lookupDevice(baseHash) if err != nil { return err @@ -595,6 +593,13 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error { baseInfo.lock.Lock() defer baseInfo.lock.Unlock() + devices.Lock() + defer devices.Unlock() + + if info, _ := devices.lookupDevice(hash); info != nil { + return fmt.Errorf("device %s already exists", hash) + } + deviceId := devices.allocateDeviceId() if err := devices.createSnapDevice(devices.getPoolDevName(), deviceId, baseInfo.Name(), baseInfo.DeviceId); err != nil { @@ -658,9 +663,6 @@ func (devices *DeviceSet) deleteDevice(info *DevInfo) error { } func (devices *DeviceSet) DeleteDevice(hash string) error { - devices.Lock() - defer devices.Unlock() - info, err := devices.lookupDevice(hash) if err != nil { return err @@ -669,6 +671,9 @@ func (devices *DeviceSet) DeleteDevice(hash string) error { info.lock.Lock() defer info.lock.Unlock() + devices.Lock() + defer devices.Unlock() + return devices.deleteDevice(info) } @@ -802,8 +807,6 @@ func (devices *DeviceSet) waitClose(info *DevInfo) error { } func (devices *DeviceSet) Shutdown() error { - devices.Lock() - defer devices.Unlock() utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix) utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root) @@ -827,31 +830,36 @@ func (devices *DeviceSet) Shutdown() error { utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err) } + devices.Lock() if err := devices.deactivateDevice(info); err != nil { utils.Debugf("Shutdown deactivate %s , error: %s\n", info.Hash, err) } + devices.Unlock() } info.lock.Unlock() } info, _ := devices.lookupDevice("") if info != nil { + info.lock.Lock() + devices.Lock() if err := devices.deactivateDevice(info); err != nil { utils.Debugf("Shutdown deactivate base , error: %s\n", err) } + devices.Unlock() + info.lock.Unlock() } + devices.Lock() if err := devices.deactivatePool(); err != nil { utils.Debugf("Shutdown deactivate pool , error: %s\n", err) } + devices.Unlock() return nil } func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) error { - devices.Lock() - defer devices.Unlock() - info, err := devices.lookupDevice(hash) if err != nil { return err @@ -860,6 +868,9 @@ func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) erro info.lock.Lock() defer info.lock.Unlock() + devices.Lock() + defer devices.Unlock() + if info.mountCount > 0 { if path != info.mountPath { return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path) @@ -900,8 +911,6 @@ func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) erro func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { utils.Debugf("[devmapper] UnmountDevice(hash=%s, mode=%d)", hash, mode) defer utils.Debugf("[devmapper] UnmountDevice END") - devices.Lock() - defer devices.Unlock() info, err := devices.lookupDevice(hash) if err != nil { @@ -911,6 +920,9 @@ func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error { info.lock.Lock() defer info.lock.Unlock() + devices.Lock() + defer devices.Unlock() + if mode == UnmountFloat { if info.floating { return fmt.Errorf("UnmountDevice: can't float floating reference %s\n", hash) @@ -971,9 +983,6 @@ func (devices *DeviceSet) HasInitializedDevice(hash string) bool { } func (devices *DeviceSet) HasActivatedDevice(hash string) bool { - devices.Lock() - defer devices.Unlock() - info, _ := devices.lookupDevice(hash) if info == nil { return false @@ -982,6 +991,9 @@ func (devices *DeviceSet) HasActivatedDevice(hash string) bool { info.lock.Lock() defer info.lock.Unlock() + devices.Lock() + defer devices.Unlock() + devinfo, _ := getInfo(info.Name()) return devinfo != nil && devinfo.Exists != 0 } @@ -1026,9 +1038,6 @@ func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSec } func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { - devices.Lock() - defer devices.Unlock() - info, err := devices.lookupDevice(hash) if err != nil { return nil, err @@ -1037,6 +1046,9 @@ func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { info.lock.Lock() defer info.lock.Unlock() + devices.Lock() + defer devices.Unlock() + status := &DevStatus{ DeviceId: info.DeviceId, Size: info.Size, -- cgit v1.2.1 From aec989bd0801657efeeb81bafb2c6c61f60de6d4 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Wed, 2 Apr 2014 02:44:12 -0600 Subject: Add more color and cgroupfs hierarchy verification to check-config.sh Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- contrib/check-config.sh | 111 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 75 insertions(+), 36 deletions(-) diff --git a/contrib/check-config.sh b/contrib/check-config.sh index 62606aca04..f225443138 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -7,62 +7,101 @@ set -e : ${CONFIG:=/proc/config.gz} : ${GREP:=zgrep} -if [ ! -e "$CONFIG" ]; then - echo >&2 "warning: $CONFIG does not exist, searching other paths for kernel config..." - if [ -e "/boot/config-$(uname -r)" ]; then - CONFIG="/boot/config-$(uname -r)" - elif [ -e '/usr/src/linux/.config' ]; then - CONFIG='/usr/src/linux/.config' - else - echo >&2 "error: cannot find kernel config" - echo >&2 " try running this script again, specifying the kernel config:" - echo >&2 " CONFIG=/path/to/kernel/.config $0" - exit 1 - fi -fi - is_set() { $GREP "CONFIG_$1=[y|m]" $CONFIG > /dev/null } +# see http://en.wikipedia.org/wiki/ANSI_escape_code#Colors +declare -A colors=( + [black]=30 + [red]=31 + [green]=32 + [yellow]=33 + [blue]=34 + [magenta]=35 + [cyan]=36 + [white]=37 +) color() { - color= - prefix= + color=() if [ "$1" = 'bold' ]; then - prefix='1;' + color+=( '1' ) shift fi - case "$1" in - green) color='32' ;; - red) color='31' ;; - gray) color='30' ;; - reset) color='' ;; - esac - echo -en '\033['"$prefix$color"m + if [ $# -gt 0 ] && [ "${colors[$1]}" ]; then + color+=( "${colors[$1]}" ) + fi + local IFS=';' + echo -en '\033['"${color[*]}"m +} +wrap_color() { + text="$1" + shift + color "$@" + echo -n "$text" + color reset + echo +} + +wrap_good() { + echo "$(wrap_color "$1" white): $(wrap_color "$2" green)" +} +wrap_bad() { + echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)" +} +wrap_warning() { + wrap_color >&2 "$*" red } check_flag() { if is_set "$1"; then - color green - echo -n enabled + wrap_good "CONFIG_$1" 'enabled' else - color bold red - echo -n missing + wrap_bad "CONFIG_$1" 'missing' fi - color reset } check_flags() { for flag in "$@"; do - echo "- CONFIG_$flag: $(check_flag "$flag")" + echo "- $(check_flag "$flag")" done } -echo +if [ ! -e "$CONFIG" ]; then + wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..." + for tryConfig in \ + '/proc/config.gz' \ + "/boot/config-$(uname -r)" \ + '/usr/src/linux/.config' \ + ; do + if [ -e "$tryConfig" ]; then + CONFIG="$tryConfig" + break + fi + done + if [ ! -e "$CONFIG" ]; then + wrap_warning "error: cannot find kernel config" + wrap_warning " try running this script again, specifying the kernel config:" + wrap_warning " CONFIG=/path/to/kernel/.config $0" + exit 1 + fi +fi -# TODO check that the cgroupfs hierarchy is properly mounted +wrap_color "info: reading kernel config from $CONFIG ..." white +echo echo 'Generally Necessary:' + +echo -n '- ' +cgroupCpuDir="$(awk '/[, ]cpu[, ]/ && $8 == "cgroup" { print $5 }' /proc/$$/mountinfo | head -n1)" +cgroupDir="$(dirname "$cgroupCpuDir")" +if [ -d "$cgroupDir/cpu" ]; then + echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" +else + echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupCpuDir]" + echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" +fi + flags=( NAMESPACES {NET,PID,IPC,UTS}_NS DEVPTS_MULTIPLE_INSTANCES @@ -83,16 +122,16 @@ check_flags "${flags[@]}" echo '- Storage Drivers:' { - echo '- "aufs":' + echo '- "'$(wrap_color 'aufs' blue)'":' check_flags AUFS_FS | sed 's/^/ /' if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then - echo " $(color bold gray)(note that some kernels include AUFS patches but not the AUFS_FS flag)$(color reset)" + echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" fi - echo '- "btrfs":' + echo '- "'$(wrap_color 'btrfs' blue)'":' check_flags BTRFS_FS | sed 's/^/ /' - echo '- "devicemapper":' + echo '- "'$(wrap_color 'devicemapper' blue)'":' check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS | sed 's/^/ /' } | sed 's/^/ /' echo -- cgit v1.2.1 From 3b3f4bf052e442543ec5772875ce7fbc77924596 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Wed, 2 Apr 2014 05:56:11 -0700 Subject: Return correct exit code upon signal + SIGQUIT now quits without cleanup Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- server/server.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/server/server.go b/server/server.go index 1c6c561375..d6fa7a0c2a 100644 --- a/server/server.go +++ b/server/server.go @@ -54,11 +54,16 @@ func InitServer(job *engine.Job) engine.Status { c := make(chan os.Signal, 1) gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) go func() { - sig := <-c - log.Printf("Received signal '%v', starting shutdown of docker...\n", sig) - utils.RemovePidFile(srv.runtime.Config().Pidfile) - srv.Close() - os.Exit(0) + for sig := range c { + log.Printf("Received signal '%v', starting shutdown of docker...\n", sig) + switch sig { + case os.Interrupt, syscall.SIGTERM: + utils.RemovePidFile(srv.runtime.Config().Pidfile) + srv.Close() + case syscall.SIGQUIT: + } + os.Exit(128 + int(sig.(syscall.Signal))) + } }() job.Eng.Hack_SetGlobalVar("httpapi.server", srv) job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime) -- cgit v1.2.1 From f80fd5da09013d7cd25a0f246ffffd7b6c064073 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 2 Apr 2014 13:07:11 +0000 Subject: Fix configuration test for MKNOD Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/execdriver/native/template/default_template.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/runtime/execdriver/native/template/default_template.go b/runtime/execdriver/native/template/default_template.go index 6828812336..a1ecb04d76 100644 --- a/runtime/execdriver/native/template/default_template.go +++ b/runtime/execdriver/native/template/default_template.go @@ -7,7 +7,7 @@ import ( // New returns the docker default configuration for libcontainer func New() *libcontainer.Container { - return &libcontainer.Container{ + container := &libcontainer.Container{ CapabilitiesMask: libcontainer.Capabilities{ libcontainer.GetCapability("SETPCAP"), libcontainer.GetCapability("SYS_MODULE"), @@ -23,6 +23,7 @@ func New() *libcontainer.Container { libcontainer.GetCapability("MAC_OVERRIDE"), libcontainer.GetCapability("MAC_ADMIN"), libcontainer.GetCapability("NET_ADMIN"), + libcontainer.GetCapability("MKNOD"), }, Namespaces: libcontainer.Namespaces{ libcontainer.GetNamespace("NEWNS"), @@ -39,4 +40,6 @@ func New() *libcontainer.Container { "apparmor_profile": "docker-default", }, } + container.CapabilitiesMask.Get("MKNOD").Enabled = true + return container } -- cgit v1.2.1 From 18ef3cc24a933cbf403c2aaf8b374cfc84a722a4 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 2 Apr 2014 13:12:52 +0000 Subject: Remove loopback setup for native driver Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/libcontainer/nsinit/mount.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/nsinit/mount.go index e2975d0a9a..4b5a42b1ac 100644 --- a/pkg/libcontainer/nsinit/mount.go +++ b/pkg/libcontainer/nsinit/mount.go @@ -55,8 +55,6 @@ func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, cons if err := copyDevNodes(rootfs); err != nil { return fmt.Errorf("copy dev nodes %s", err) } - // In non-privileged mode, this fails. Discard the error. - setupLoopbackDevices(rootfs) if err := setupPtmx(rootfs, console, mountLabel); err != nil { return err } @@ -142,19 +140,6 @@ func copyDevNodes(rootfs string) error { return nil } -func setupLoopbackDevices(rootfs string) error { - for i := 0; ; i++ { - if err := copyDevNode(rootfs, fmt.Sprintf("loop%d", i)); err != nil { - if !os.IsNotExist(err) { - return err - } - break - } - - } - return nil -} - func copyDevNode(rootfs, node string) error { stat, err := os.Stat(filepath.Join("/dev", node)) if err != nil { -- cgit v1.2.1 From a9d6eef2386a3d08840e2a30bd8d6f2ae3679688 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 2 Apr 2014 13:22:51 +0000 Subject: Remove racy test causing tests to stall Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- integration-cli/docker_cli_start_test.go | 34 -------------------------------- 1 file changed, 34 deletions(-) delete mode 100644 integration-cli/docker_cli_start_test.go diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go deleted file mode 100644 index c3059a66c4..0000000000 --- a/integration-cli/docker_cli_start_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package main - -import ( - "os/exec" - "testing" -) - -// Regression test for #3364 -func TestDockerStartWithPortCollision(t *testing.T) { - runCmd := exec.Command(dockerBinary, "run", "--name", "fail", "-p", "25:25", "busybox", "true") - out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) - if err != nil && exitCode != 0 { - t.Fatal(out, stderr, err) - } - - runCmd = exec.Command(dockerBinary, "run", "--name", "conflict", "-dti", "-p", "25:25", "busybox", "sh") - out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd) - if err != nil && exitCode != 0 { - t.Fatal(out, stderr, err) - } - - startCmd := exec.Command(dockerBinary, "start", "-a", "fail") - out, stderr, exitCode, err = runCommandWithStdoutStderr(startCmd) - if err != nil && exitCode != 1 { - t.Fatal(out, err) - } - - killCmd := exec.Command(dockerBinary, "kill", "conflict") - runCommand(killCmd) - - deleteAllContainers() - - logDone("start - -a=true error on port use") -} -- cgit v1.2.1 From 63c7941172376e81c5e17206f39d7c78c0e95b69 Mon Sep 17 00:00:00 2001 From: unclejack Date: Wed, 2 Apr 2014 16:00:12 +0300 Subject: docs: explain what docker run -a does This adds a bit of documentation for the `-a` flag for docker run. Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- docs/sources/reference/commandline/cli.rst | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 324b84b0ae..64dff1e1c2 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -1359,6 +1359,35 @@ ID may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in read-only or read-write mode, respectively. By default, the volumes are mounted in the same mode (read write or read only) as the reference container. +The ``-a`` flag tells ``docker run`` to bind to the container's stdin, stdout +or stderr. This makes it possible to manipulate the output and input as needed. + +.. code-block:: bash + + $ sudo echo "test" | docker run -i -a stdin ubuntu cat - + +This pipes data into a container and prints the container's ID by attaching +only to the container's stdin. + +.. code-block:: bash + + $ sudo docker run -a stderr ubuntu echo test + +This isn't going to print anything unless there's an error because we've only +attached to the stderr of the container. The container's logs still store +what's been written to stderr and stdout. + +.. code-block:: bash + + $ sudo cat somefile | docker run -i -a stdin mybuilder dobuild + +This is how piping a file into a container could be done for a build. +The container's ID will be printed after the build is done and the build logs +could be retrieved using ``docker logs``. This is useful if you need to pipe +a file or something else into a container and retrieve the container's ID once +the container has finished running. + + A complete example .................. -- cgit v1.2.1 From 32ad78b0430079dcc53c245826a244afa2d9b6b6 Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Tue, 1 Apr 2014 09:24:24 -0400 Subject: Remove hard coding of SELinux labels on systems without proper selinux policy. If a system is configured for SELinux but does not know about docker or containers, then we want the transitions of the policy to work. Hard coding the labels causes docker to break on older Fedora and RHEL systems Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: rhatdan) --- pkg/selinux/selinux.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/selinux/selinux.go b/pkg/selinux/selinux.go index 5236d3fb87..5362308617 100644 --- a/pkg/selinux/selinux.go +++ b/pkg/selinux/selinux.go @@ -312,13 +312,10 @@ func GetLxcContexts() (processLabel string, fileLabel string) { if !SelinuxEnabled() { return "", "" } - lxcPath := fmt.Sprintf("%s/content/lxc_contexts", GetSELinuxPolicyRoot()) - fileLabel = "system_u:object_r:svirt_sandbox_file_t:s0" - processLabel = "system_u:system_r:svirt_lxc_net_t:s0" - + lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", GetSELinuxPolicyRoot()) in, err := os.Open(lxcPath) if err != nil { - goto exit + return "", "" } defer in.Close() @@ -352,6 +349,11 @@ func GetLxcContexts() (processLabel string, fileLabel string) { } } } + + if processLabel == "" || fileLabel == "" { + return "", "" + } + exit: mcs := IntToMcs(os.Getpid(), 1024) scon := NewContext(processLabel) -- cgit v1.2.1 From d76ac4d429e474a7c79f7aab396e318f4e176025 Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Tue, 1 Apr 2014 10:03:29 -0400 Subject: In certain cases, setting the process label will not happen. When the code attempts to set the ProcessLabel, it checks if SELinux Is enabled. We have seen a case with some of our patches where the code is fooled by the container to think that SELinux is not enabled. Calling label.Init before setting up the rest of the container, tells the library that SELinux is enabled and everything works fine. Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: rhatdan) --- pkg/label/label.go | 3 +++ pkg/label/label_selinux.go | 4 ++++ pkg/libcontainer/nsinit/init.go | 2 ++ 3 files changed, 9 insertions(+) diff --git a/pkg/label/label.go b/pkg/label/label.go index ba1e9f48ea..be0d0ae079 100644 --- a/pkg/label/label.go +++ b/pkg/label/label.go @@ -21,3 +21,6 @@ func SetFileLabel(path string, fileLabel string) error { func GetPidCon(pid int) (string, error) { return "", nil } + +func Init() { +} diff --git a/pkg/label/label_selinux.go b/pkg/label/label_selinux.go index 300a8b6d14..64a1720996 100644 --- a/pkg/label/label_selinux.go +++ b/pkg/label/label_selinux.go @@ -67,3 +67,7 @@ func SetFileLabel(path string, fileLabel string) error { func GetPidCon(pid int) (string, error) { return selinux.Getpidcon(pid) } + +func Init() { + selinux.SelinuxEnabled() +} diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go index 5aa5f9f5b5..e5d69f5453 100644 --- a/pkg/libcontainer/nsinit/init.go +++ b/pkg/libcontainer/nsinit/init.go @@ -58,6 +58,8 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol if err := system.ParentDeathSignal(uintptr(syscall.SIGTERM)); err != nil { return fmt.Errorf("parent death signal %s", err) } + + label.Init() ns.logger.Println("setup mount namespace") if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot, container.Context["mount_label"]); err != nil { return fmt.Errorf("setup mount namespace %s", err) -- cgit v1.2.1 From ca4224762b5fe9a319b6c1724ee16d1552403269 Mon Sep 17 00:00:00 2001 From: Dan Walsh Date: Wed, 2 Apr 2014 13:56:30 -0400 Subject: Fix SELinux issue with missing Contexts in lxc execdriver There is a bug in the SELinux patch for the lxc execdriver, that causes lxc containers to blow up whether or not SELinux is enabled. Docker-DCO-1.1-Signed-off-by: Dan Walsh (github: rhatdan) --- daemonconfig/config.go | 3 +-- runtime/execdriver/lxc/lxc_template.go | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/daemonconfig/config.go b/daemonconfig/config.go index 6cb3659e18..1abb6f8b89 100644 --- a/daemonconfig/config.go +++ b/daemonconfig/config.go @@ -1,10 +1,9 @@ package daemonconfig import ( - "net" - "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/runtime/networkdriver" + "net" ) const ( diff --git a/runtime/execdriver/lxc/lxc_template.go b/runtime/execdriver/lxc/lxc_template.go index f325ffcaef..83723285d0 100644 --- a/runtime/execdriver/lxc/lxc_template.go +++ b/runtime/execdriver/lxc/lxc_template.go @@ -32,8 +32,8 @@ lxc.pts = 1024 lxc.console = none {{if getProcessLabel .Context}} lxc.se_context = {{ getProcessLabel .Context}} -{{$MOUNTLABEL := getMountLabel .Context}} {{end}} +{{$MOUNTLABEL := getMountLabel .Context}} # no controlling tty at all lxc.tty = 1 @@ -90,8 +90,8 @@ lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noe lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0 {{end}} -lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMountLabel "newinstance,ptmxmode=0666,nosuid,noexec" "$MOUNTLABEL"}} 0 0 -lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec" "$MOUNTLABEL"}} 0 0 +lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMountLabel "newinstance,ptmxmode=0666,nosuid,noexec" $MOUNTLABEL}} 0 0 +lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec" $MOUNTLABEL}} 0 0 {{range $value := .Mounts}} {{if $value.Writable}} -- cgit v1.2.1 From 94233a204f82f857536c16f36f94d3a8ff0069dd Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Wed, 2 Apr 2014 16:52:49 +0000 Subject: Fix lxc label handleing This also improves the logic around formatting the labels for selinux Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/label/label.go | 2 +- pkg/label/label_selinux.go | 30 +++++++++++++++--------------- runtime/execdriver/lxc/lxc_template.go | 13 +------------ 3 files changed, 17 insertions(+), 28 deletions(-) diff --git a/pkg/label/label.go b/pkg/label/label.go index be0d0ae079..38f026bc5a 100644 --- a/pkg/label/label.go +++ b/pkg/label/label.go @@ -6,7 +6,7 @@ func GenLabels(options string) (string, string, error) { return "", "", nil } -func FormatMountLabel(src string, MountLabel string) string { +func FormatMountLabel(src string, mountLabel string) string { return src } diff --git a/pkg/label/label_selinux.go b/pkg/label/label_selinux.go index 64a1720996..d807b2b408 100644 --- a/pkg/label/label_selinux.go +++ b/pkg/label/label_selinux.go @@ -10,12 +10,15 @@ import ( func GenLabels(options string) (string, string, error) { processLabel, mountLabel := selinux.GetLxcContexts() - var err error if processLabel == "" { // SELinux is disabled - return "", "", err + return "", "", nil } - s := strings.Fields(options) - l := len(s) + + var ( + err error + s = strings.Fields(options) + l = len(s) + ) if l > 0 { pcon := selinux.NewContext(processLabel) for i := 0; i < l; i++ { @@ -28,19 +31,16 @@ func GenLabels(options string) (string, string, error) { return processLabel, mountLabel, err } -func FormatMountLabel(src string, MountLabel string) string { - var mountLabel string - if src != "" { - mountLabel = src - if MountLabel != "" { - mountLabel = fmt.Sprintf("%s,context=\"%s\"", mountLabel, MountLabel) - } - } else { - if MountLabel != "" { - mountLabel = fmt.Sprintf("context=\"%s\"", MountLabel) +func FormatMountLabel(src string, mountLabel string) string { + if mountLabel != "" { + switch src { + case "": + src = fmt.Sprintf("%s,context=%s", src, mountLabel) + default: + src = fmt.Sprintf("context=%s", mountLabel) } } - return mountLabel + return src } func SetProcessLabel(processLabel string) error { diff --git a/runtime/execdriver/lxc/lxc_template.go b/runtime/execdriver/lxc/lxc_template.go index e3582e2369..c49753c6aa 100644 --- a/runtime/execdriver/lxc/lxc_template.go +++ b/runtime/execdriver/lxc/lxc_template.go @@ -32,9 +32,8 @@ lxc.pts = 1024 lxc.console = none {{if .ProcessLabel}} lxc.se_context = {{ .ProcessLabel}} -{{$MOUNTLABEL := .MountLabel}} {{end}} -{{$MOUNTLABEL := getMountLabel .Context}} +{{$MOUNTLABEL := .MountLabel}} # no controlling tty at all lxc.tty = 1 @@ -152,14 +151,6 @@ func getMemorySwap(v *execdriver.Resources) int64 { return v.Memory * 2 } -func getProcessLabel(c map[string][]string) string { - return getLabel(c, "process") -} - -func getMountLabel(c map[string][]string) string { - return getLabel(c, "mount") -} - func getLabel(c map[string][]string, name string) string { label := c["label"] for _, l := range label { @@ -175,8 +166,6 @@ func init() { var err error funcMap := template.FuncMap{ "getMemorySwap": getMemorySwap, - "getProcessLabel": getProcessLabel, - "getMountLabel": getMountLabel, "escapeFstabSpaces": escapeFstabSpaces, "formatMountLabel": label.FormatMountLabel, } -- cgit v1.2.1 From e2779e11db113c5551094dba8079d44d8a210e41 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 3 Apr 2014 04:40:38 +0000 Subject: Remove runtime options from config Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runconfig/config.go | 12 ------------ runtime/runtime.go | 4 ++-- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/runconfig/config.go b/runconfig/config.go index c3ade575c5..4b334c6848 100644 --- a/runconfig/config.go +++ b/runconfig/config.go @@ -1,10 +1,8 @@ package runconfig import ( - "encoding/json" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/nat" - "github.com/dotcloud/docker/runtime/execdriver" ) // Note: the Config structure should hold only portable information about the container. @@ -36,17 +34,9 @@ type Config struct { Entrypoint []string NetworkDisabled bool OnBuild []string - Context execdriver.Context } func ContainerConfigFromJob(job *engine.Job) *Config { - var context execdriver.Context - val := job.Getenv("Context") - if val != "" { - if err := json.Unmarshal([]byte(val), &context); err != nil { - panic(err) - } - } config := &Config{ Hostname: job.Getenv("Hostname"), Domainname: job.Getenv("Domainname"), @@ -64,7 +54,6 @@ func ContainerConfigFromJob(job *engine.Job) *Config { VolumesFrom: job.Getenv("VolumesFrom"), WorkingDir: job.Getenv("WorkingDir"), NetworkDisabled: job.GetenvBool("NetworkDisabled"), - Context: context, } job.GetenvJson("ExposedPorts", &config.ExposedPorts) job.GetenvJson("Volumes", &config.Volumes) @@ -86,6 +75,5 @@ func ContainerConfigFromJob(job *engine.Job) *Config { if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { config.Entrypoint = Entrypoint } - return config } diff --git a/runtime/runtime.go b/runtime/runtime.go index 9e8323279e..842dbf8b0b 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -499,7 +499,7 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe } initID := fmt.Sprintf("%s-init", container.ID) - if err := runtime.driver.Create(initID, img.ID, config.Context["mount_label"]); err != nil { + if err := runtime.driver.Create(initID, img.ID, ""); err != nil { return nil, nil, err } initPath, err := runtime.driver.Get(initID) @@ -512,7 +512,7 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe return nil, nil, err } - if err := runtime.driver.Create(container.ID, initID, config.Context["mount_label"]); err != nil { + if err := runtime.driver.Create(container.ID, initID, ""); err != nil { return nil, nil, err } resolvConf, err := utils.GetResolvConf() -- cgit v1.2.1 From 8b450a93b8bb3b1cd0c32754dd499ec0c9b66537 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Thu, 3 Apr 2014 06:34:57 +0000 Subject: Remove driver wide mount label for dm Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/graphdriver/devmapper/driver.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/runtime/graphdriver/devmapper/driver.go b/runtime/graphdriver/devmapper/driver.go index 1324ddab81..35fe883f26 100644 --- a/runtime/graphdriver/devmapper/driver.go +++ b/runtime/graphdriver/devmapper/driver.go @@ -22,8 +22,7 @@ func init() { type Driver struct { *DeviceSet - home string - MountLabel string + home string } var Init = func(home string) (graphdriver.Driver, error) { @@ -62,12 +61,11 @@ func (d *Driver) Cleanup() error { } func (d *Driver) Create(id, parent string, mountLabel string) error { - d.MountLabel = mountLabel if err := d.DeviceSet.AddDevice(id, parent); err != nil { return err } mp := path.Join(d.home, "mnt", id) - if err := d.mount(id, mp, d.MountLabel); err != nil { + if err := d.mount(id, mp); err != nil { return err } @@ -117,7 +115,7 @@ func (d *Driver) Remove(id string) error { func (d *Driver) Get(id string) (string, error) { mp := path.Join(d.home, "mnt", id) - if err := d.mount(id, mp, d.MountLabel); err != nil { + if err := d.mount(id, mp); err != nil { return "", err } @@ -130,13 +128,13 @@ func (d *Driver) Put(id string) { } } -func (d *Driver) mount(id, mountPoint string, mountLabel string) error { +func (d *Driver) mount(id, mountPoint string) error { // Create the target directories if they don't exist if err := osMkdirAll(mountPoint, 0755); err != nil && !osIsExist(err) { return err } // Mount the device - return d.DeviceSet.MountDevice(id, mountPoint, mountLabel) + return d.DeviceSet.MountDevice(id, mountPoint, "") } func (d *Driver) Exists(id string) bool { -- cgit v1.2.1 From 8c4617e0ae3b9c7e5167883ca171ad8e23fc06b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20R=C3=B6thlisberger?= Date: Thu, 3 Apr 2014 09:40:28 +0100 Subject: docs: Fix typo in hello world example --- docs/sources/examples/hello_world.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/examples/hello_world.rst b/docs/sources/examples/hello_world.rst index 507056da85..39d7abea2c 100644 --- a/docs/sources/examples/hello_world.rst +++ b/docs/sources/examples/hello_world.rst @@ -127,7 +127,7 @@ Attach to the container to see the results in real-time. process to see what is going on. - **"--sig-proxy=false"** Do not forward signals to the container; allows us to exit the attachment using Control-C without stopping the container. -- **$container_id** The Id of the container we want to attach too. +- **$container_id** The Id of the container we want to attach to. Exit from the container attachment by pressing Control-C. -- cgit v1.2.1 From 75633a0451a98bf0c803e742625c4de27dbcc2e8 Mon Sep 17 00:00:00 2001 From: Michael Neale Date: Thu, 3 Apr 2014 17:33:34 +1100 Subject: explained what authConfig actually is. Docker-DCO-1.1-Signed-off-by: Michael Neale (github: michaelneale) --- docs/sources/reference/api/docker_remote_api.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/reference/api/docker_remote_api.rst b/docs/sources/reference/api/docker_remote_api.rst index ca7463f351..7fa8468f3c 100644 --- a/docs/sources/reference/api/docker_remote_api.rst +++ b/docs/sources/reference/api/docker_remote_api.rst @@ -22,6 +22,8 @@ Docker Remote API - Since API version 1.2, the auth configuration is now handled client side, so the client has to send the authConfig as POST in /images/(name)/push +- authConfig, set as the ``X-Registry-Auth`` header, is currently a Base64 encoded (json) string with credentials: + ``{'username': string, 'password': string, 'email': string, 'serveraddress' : string}`` 2. Versions =========== -- cgit v1.2.1 From cd910cb6858541b432e20b650fad262772c9ef18 Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Wed, 2 Apr 2014 18:00:13 -0700 Subject: Allow force sigint and allow sigquit after sigint Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- server/server.go | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/server/server.go b/server/server.go index d6fa7a0c2a..fae50094c2 100644 --- a/server/server.go +++ b/server/server.go @@ -54,15 +54,29 @@ func InitServer(job *engine.Job) engine.Status { c := make(chan os.Signal, 1) gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) go func() { + interruptCount := 0 for sig := range c { - log.Printf("Received signal '%v', starting shutdown of docker...\n", sig) - switch sig { - case os.Interrupt, syscall.SIGTERM: - utils.RemovePidFile(srv.runtime.Config().Pidfile) - srv.Close() - case syscall.SIGQUIT: - } - os.Exit(128 + int(sig.(syscall.Signal))) + go func() { + log.Printf("Received signal '%v', starting shutdown of docker...\n", sig) + switch sig { + case os.Interrupt, syscall.SIGTERM: + // If the user really wants to interrupt, let him do so. + if interruptCount < 3 { + interruptCount++ + // Initiate the cleanup only once + if interruptCount == 1 { + utils.RemovePidFile(srv.runtime.Config().Pidfile) + srv.Close() + } else { + return + } + } else { + log.Printf("Force shutdown of docker, interrupting cleanup\n") + } + case syscall.SIGQUIT: + } + os.Exit(128 + int(sig.(syscall.Signal))) + }() } }() job.Eng.Hack_SetGlobalVar("httpapi.server", srv) -- cgit v1.2.1 From bd94f84ded944ab69c18cf9d23c35deee3b15963 Mon Sep 17 00:00:00 2001 From: Alexander Larsson Date: Thu, 3 Apr 2014 16:27:07 +0200 Subject: Fix --volumes-from mount failure As explained in https://github.com/dotcloud/docker/issues/4979 --volumes-from fails with ENOFILE errors. This is because the code tries to look at the "from" volume without ensuring that it is mounted yet. We fix this by mounting the containers before stating in it. Also includes a regression test. Docker-DCO-1.1-Signed-off-by: Alexander Larsson (github: alexlarsson) --- integration-cli/docker_cli_run_test.go | 18 ++++++++++++++++++ runtime/volumes.go | 7 ++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index 8d62108fed..fbb09737fc 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -272,6 +272,24 @@ func TestDockerRunWithVolumesAsFiles(t *testing.T) { logDone("run - regression test for #4741 - volumes from as files") } +// Regression test for #4979 +func TestDockerRunWithVolumesFromExited(t *testing.T) { + runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") + out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal("1", out, stderr, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") + out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd) + if err != nil && exitCode != 0 { + t.Fatal("2", out, stderr, err) + } + deleteAllContainers() + + logDone("run - regression test for #4979 - volumes-from on exited container") +} + // Regression test for #4830 func TestDockerRunWithRelativePath(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-v", "tmp:/other-tmp", "busybox", "true") diff --git a/runtime/volumes.go b/runtime/volumes.go index c504644ae8..0b6f3734e0 100644 --- a/runtime/volumes.go +++ b/runtime/volumes.go @@ -81,9 +81,14 @@ func applyVolumesFrom(container *Container) error { c := container.runtime.Get(specParts[0]) if c == nil { - return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID) + return fmt.Errorf("Container %s not found. Impossible to mount its volumes", specParts[0]) } + if err := c.Mount(); err != nil { + return fmt.Errorf("Container %s failed to mount. Impossible to mount its volumes", specParts[0]) + } + defer c.Unmount() + for volPath, id := range c.Volumes { if _, exists := container.Volumes[volPath]; exists { continue -- cgit v1.2.1 From 9712f8127a9ac47a3679e20faea08fb971ee1ecc Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 3 Apr 2014 11:46:24 -0600 Subject: Update contrib/check-config.sh to use zcat and grep if zgrep isn't available Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- contrib/check-config.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/contrib/check-config.sh b/contrib/check-config.sh index f225443138..3b1ff8ad85 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -5,10 +5,15 @@ set -e # see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in : ${CONFIG:=/proc/config.gz} -: ${GREP:=zgrep} + +if ! command -v zgrep &> /dev/null; then + zgrep() { + zcat "$2" | grep "$1" + } +fi is_set() { - $GREP "CONFIG_$1=[y|m]" $CONFIG > /dev/null + zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null } # see http://en.wikipedia.org/wiki/ANSI_escape_code#Colors -- cgit v1.2.1 From fee16d42163822bb23a51c5ebcb1115efc761947 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Thu, 3 Apr 2014 11:52:19 -0600 Subject: Update contrib/check-config.sh cgroupfs check to allow for something like '... cgroup rw,cpu' (looking at you, boot2docker) Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- contrib/check-config.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/check-config.sh b/contrib/check-config.sh index 3b1ff8ad85..53bf708404 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -98,7 +98,7 @@ echo echo 'Generally Necessary:' echo -n '- ' -cgroupCpuDir="$(awk '/[, ]cpu[, ]/ && $8 == "cgroup" { print $5 }' /proc/$$/mountinfo | head -n1)" +cgroupCpuDir="$(awk '/[, ]cpu([, ]|$)/ && $8 == "cgroup" { print $5 }' /proc/$$/mountinfo | head -n1)" cgroupDir="$(dirname "$cgroupCpuDir")" if [ -d "$cgroupDir/cpu" ]; then echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" -- cgit v1.2.1 From c94111b61988ad32d87f99d4421cbcde018c3fb4 Mon Sep 17 00:00:00 2001 From: Kevin Wallace Date: Sun, 1 Dec 2013 15:27:24 -0800 Subject: Allow non-privileged containers to create device nodes. Such nodes could already be created by importing a tarball to a container; now they can be created from within the container itself. This gives non-privileged containers the mknod kernel capability, and modifies their cgroup settings to allow creation of *any* node, not just whitelisted ones. Use of such nodes is still controlled by the existing cgroup whitelist. Docker-DCO-1.1-Signed-off-by: Kevin Wallace (github: kevinwallace) --- integration/container_test.go | 8 ++++---- pkg/cgroups/apply_raw.go | 4 ++++ runtime/execdriver/lxc/init.go | 1 - runtime/execdriver/lxc/lxc_template.go | 4 ++++ runtime/execdriver/native/template/default_template.go | 1 - 5 files changed, 12 insertions(+), 6 deletions(-) diff --git a/integration/container_test.go b/integration/container_test.go index d3d35734ed..c64f9e610b 100644 --- a/integration/container_test.go +++ b/integration/container_test.go @@ -1619,16 +1619,16 @@ func TestPrivilegedCanMount(t *testing.T) { } } -func TestPrivilegedCannotMknod(t *testing.T) { +func TestUnprivilegedCanMknod(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer runtime.Nuke() - if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 || echo ok"}, t); output != "ok\n" { - t.Fatal("Could mknod into secure container") + if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" { + t.Fatal("Couldn't mknod into secure container") } } -func TestPrivilegedCannotMount(t *testing.T) { +func TestUnprivilegedCannotMount(t *testing.T) { eng := NewTestEngine(t) runtime := mkRuntimeFromEngine(eng, t) defer runtime.Nuke() diff --git a/pkg/cgroups/apply_raw.go b/pkg/cgroups/apply_raw.go index 5fe317937a..220f08f1dc 100644 --- a/pkg/cgroups/apply_raw.go +++ b/pkg/cgroups/apply_raw.go @@ -95,6 +95,10 @@ func (raw *rawCgroup) setupDevices(c *Cgroup, pid int) (err error) { } allow := []string{ + // allow mknod for any device + "c *:* m", + "b *:* m", + // /dev/null, zero, full "c 1:3 rwm", "c 1:5 rwm", diff --git a/runtime/execdriver/lxc/init.go b/runtime/execdriver/lxc/init.go index a64bca15b2..c1933a5e43 100644 --- a/runtime/execdriver/lxc/init.go +++ b/runtime/execdriver/lxc/init.go @@ -144,7 +144,6 @@ func setupCapabilities(args *execdriver.InitArgs) error { capability.CAP_SYS_RESOURCE, capability.CAP_SYS_TIME, capability.CAP_SYS_TTY_CONFIG, - capability.CAP_MKNOD, capability.CAP_AUDIT_WRITE, capability.CAP_AUDIT_CONTROL, capability.CAP_MAC_OVERRIDE, diff --git a/runtime/execdriver/lxc/lxc_template.go b/runtime/execdriver/lxc/lxc_template.go index e5248375a8..bad3249b31 100644 --- a/runtime/execdriver/lxc/lxc_template.go +++ b/runtime/execdriver/lxc/lxc_template.go @@ -44,6 +44,10 @@ lxc.cgroup.devices.allow = a # no implicit access to devices lxc.cgroup.devices.deny = a +# but allow mknod for any device +lxc.cgroup.devices.allow = c *:* m +lxc.cgroup.devices.allow = b *:* m + # /dev/null and zero lxc.cgroup.devices.allow = c 1:3 rwm lxc.cgroup.devices.allow = c 1:5 rwm diff --git a/runtime/execdriver/native/template/default_template.go b/runtime/execdriver/native/template/default_template.go index b9eb87713e..6828812336 100644 --- a/runtime/execdriver/native/template/default_template.go +++ b/runtime/execdriver/native/template/default_template.go @@ -18,7 +18,6 @@ func New() *libcontainer.Container { libcontainer.GetCapability("SYS_RESOURCE"), libcontainer.GetCapability("SYS_TIME"), libcontainer.GetCapability("SYS_TTY_CONFIG"), - libcontainer.GetCapability("MKNOD"), libcontainer.GetCapability("AUDIT_WRITE"), libcontainer.GetCapability("AUDIT_CONTROL"), libcontainer.GetCapability("MAC_OVERRIDE"), -- cgit v1.2.1 From e21607341cd8dc575098bd09a3c992da166f7884 Mon Sep 17 00:00:00 2001 From: Kevin Wallace Date: Sun, 1 Dec 2013 15:33:44 -0800 Subject: Add myself to AUTHORS. Docker-DCO-1.1-Signed-off-by: Kevin Wallace (github: kevinwallace) --- AUTHORS | 1 + 1 file changed, 1 insertion(+) diff --git a/AUTHORS b/AUTHORS index df091d5950..6e34065266 100644 --- a/AUTHORS +++ b/AUTHORS @@ -177,6 +177,7 @@ Keli Hu Ken Cochrane Kevin Clark Kevin J. Lynagh +Kevin Wallace Keyvan Fatehi kim0 Kim BKC Carlbacker -- cgit v1.2.1 From 3c36f82f181c4ce3b65dd15c4b6cb5699ea75075 Mon Sep 17 00:00:00 2001 From: Rajat Pandit Date: Thu, 3 Apr 2014 20:54:57 +0100 Subject: Update nodejs_web_app.rst --- docs/sources/examples/nodejs_web_app.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/examples/nodejs_web_app.rst b/docs/sources/examples/nodejs_web_app.rst index a9e9b1c5e3..87bc1d5aaa 100644 --- a/docs/sources/examples/nodejs_web_app.rst +++ b/docs/sources/examples/nodejs_web_app.rst @@ -50,7 +50,7 @@ Then, create an ``index.js`` file that defines a web app using the res.send('Hello World\n'); }); - app.listen(PORT) + app.listen(PORT); console.log('Running on http://localhost:' + PORT); -- cgit v1.2.1 From 32d6041cc30a636de2f8da89f778c4b6c7d4df19 Mon Sep 17 00:00:00 2001 From: Rajat Pandit Date: Thu, 3 Apr 2014 21:05:50 +0100 Subject: Update nodejs_web_app.rst --- docs/sources/examples/nodejs_web_app.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/examples/nodejs_web_app.rst b/docs/sources/examples/nodejs_web_app.rst index a9e9b1c5e3..e880db555f 100644 --- a/docs/sources/examples/nodejs_web_app.rst +++ b/docs/sources/examples/nodejs_web_app.rst @@ -18,7 +18,7 @@ https://github.com/gasi/docker-node-hello. Create Node.js app ++++++++++++++++++ -First, create a ``package.json`` file that describes your app and its +First, create a directory ``src`` where all the files would live. Then create a ``package.json`` file that describes your app and its dependencies: .. code-block:: json -- cgit v1.2.1 From 887eeb2b022a4c6d3de8c9bfc586ee82855d3cb9 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Thu, 3 Apr 2014 21:55:33 +0000 Subject: Skip login tests because of external dependency to a hosted service. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- integration/auth_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/integration/auth_test.go b/integration/auth_test.go index 1d9d450573..8109bbb130 100644 --- a/integration/auth_test.go +++ b/integration/auth_test.go @@ -16,6 +16,7 @@ import ( // - Integration tests should have side-effects limited to the host environment being tested. func TestLogin(t *testing.T) { + t.Skip("FIXME: please remove dependency on external services") os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com") defer os.Setenv("DOCKER_INDEX_URL", "") authConfig := ®istry.AuthConfig{ @@ -34,6 +35,7 @@ func TestLogin(t *testing.T) { } func TestCreateAccount(t *testing.T) { + t.Skip("FIXME: please remove dependency on external services") tokenBuffer := make([]byte, 16) _, err := rand.Read(tokenBuffer) if err != nil { -- cgit v1.2.1 From 7c3b955b907c33238c1c155ae8860b2cec929c8b Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Thu, 3 Apr 2014 22:02:03 +0000 Subject: Deprecate 'docker images --tree' and 'docker images --viz' * The commands are no longer listed or documented. * The commands still work but print a deprecation warning. * The commands should be removed in a future version. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- api/client/commands.go | 9 +++++-- docs/sources/reference/commandline/cli.rst | 42 ------------------------------ 2 files changed, 7 insertions(+), 44 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 53b8822d69..93627613a5 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -1137,8 +1137,9 @@ func (cli *DockerCli) CmdImages(args ...string) error { quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate images used to build)") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - flViz := cmd.Bool([]string{"v", "#viz", "-viz"}, false, "Output graph in graphviz format") - flTree := cmd.Bool([]string{"t", "#tree", "-tree"}, false, "Output graph in tree format") + // FIXME: --viz and --tree are deprecated. Remove them in a future version. + flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format") + flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format") if err := cmd.Parse(args); err != nil { return nil @@ -1150,6 +1151,7 @@ func (cli *DockerCli) CmdImages(args ...string) error { filter := cmd.Arg(0) + // FIXME: --viz and --tree are deprecated. Remove them in a future version. if *flViz || *flTree { body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false)) if err != nil { @@ -1260,6 +1262,7 @@ func (cli *DockerCli) CmdImages(args ...string) error { return nil } +// FIXME: --viz and --tree are deprecated. Remove them in a future version. func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) { length := images.Len() if length > 1 { @@ -1286,6 +1289,7 @@ func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[ } } +// FIXME: --viz and --tree are deprecated. Remove them in a future version. func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) { var ( imageID string @@ -1309,6 +1313,7 @@ func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix strin } } +// FIXME: --viz and --tree are deprecated. Remove them in a future version. func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { var imageID string if noTrunc { diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 64dff1e1c2..6ff66feeb7 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -600,8 +600,6 @@ To see how the ``docker:latest`` image was built: -a, --all=false: Show all images (by default filter out the intermediate images used to build) --no-trunc=false: Don't truncate output -q, --quiet=false: Only show numeric IDs - -t, --tree=false: Output graph in tree format - -v, --viz=false: Output graph in graphviz format Listing the most recently created images ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -637,46 +635,6 @@ Listing the full length image IDs tryout latest 2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB 5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB -Displaying images visually -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: bash - - $ sudo docker images --viz | dot -Tpng -o docker.png - -.. image:: docker_images.gif - :alt: Example inheritance graph of Docker images. - - -Displaying image hierarchy -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: bash - - $ sudo docker images --tree - - ├─8dbd9e392a96 Size: 131.5 MB (virtual 131.5 MB) Tags: ubuntu:12.04,ubuntu:latest,ubuntu:precise - └─27cf78414709 Size: 180.1 MB (virtual 180.1 MB) - └─b750fe79269d Size: 24.65 kB (virtual 180.1 MB) Tags: ubuntu:12.10,ubuntu:quantal - ├─f98de3b610d5 Size: 12.29 kB (virtual 180.1 MB) - │ └─7da80deb7dbf Size: 16.38 kB (virtual 180.1 MB) - │ └─65ed2fee0a34 Size: 20.66 kB (virtual 180.2 MB) - │ └─a2b9ea53dddc Size: 819.7 MB (virtual 999.8 MB) - │ └─a29b932eaba8 Size: 28.67 kB (virtual 999.9 MB) - │ └─e270a44f124d Size: 12.29 kB (virtual 999.9 MB) Tags: progrium/buildstep:latest - └─17e74ac162d8 Size: 53.93 kB (virtual 180.2 MB) - └─339a3f56b760 Size: 24.65 kB (virtual 180.2 MB) - └─904fcc40e34d Size: 96.7 MB (virtual 276.9 MB) - └─b1b0235328dd Size: 363.3 MB (virtual 640.2 MB) - └─7cb05d1acb3b Size: 20.48 kB (virtual 640.2 MB) - └─47bf6f34832d Size: 20.48 kB (virtual 640.2 MB) - └─f165104e82ed Size: 12.29 kB (virtual 640.2 MB) - └─d9cf85a47b7e Size: 1.911 MB (virtual 642.2 MB) - └─3ee562df86ca Size: 17.07 kB (virtual 642.2 MB) - └─b05fc2d00e4a Size: 24.96 kB (virtual 642.2 MB) - └─c96a99614930 Size: 12.29 kB (virtual 642.2 MB) - └─a6a357a48c49 Size: 12.29 kB (virtual 642.2 MB) Tags: ndj/mongodb:latest - .. _cli_import: ``import`` -- cgit v1.2.1 From 6cf137860102b8df5db75dd68924375a7b74c1c3 Mon Sep 17 00:00:00 2001 From: Goffert van Gool Date: Thu, 3 Apr 2014 20:47:49 +0200 Subject: Fix typo in names-generator Docker-DCO-1.1-Signed-off-by: Goffert van Gool (github: ruphin) --- pkg/namesgenerator/names-generator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go index cb18eb8c06..07fadf8171 100644 --- a/pkg/namesgenerator/names-generator.go +++ b/pkg/namesgenerator/names-generator.go @@ -76,7 +76,7 @@ var ( // http://en.wikipedia.org/wiki/John_Bardeen // http://en.wikipedia.org/wiki/Walter_Houser_Brattain // http://en.wikipedia.org/wiki/William_Shockley - right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclid", "newton", "fermat", "archimedes", "poincare", "heisenberg", "feynman", "hawking", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley", "goldstine", "hoover", "hopper", "bartik", "sammet", "jones", "perlman", "wilson", "kowalevski", "hypatia", "goodall", "mayer", "elion", "blackwell", "lalande", "kirch", "ardinghelli", "colden", "almeida", "leakey", "meitner", "mestorf", "rosalind", "sinoussi", "carson", "mcmclintock", "yonath"} + right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclid", "newton", "fermat", "archimedes", "poincare", "heisenberg", "feynman", "hawking", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley", "goldstine", "hoover", "hopper", "bartik", "sammet", "jones", "perlman", "wilson", "kowalevski", "hypatia", "goodall", "mayer", "elion", "blackwell", "lalande", "kirch", "ardinghelli", "colden", "almeida", "leakey", "meitner", "mestorf", "rosalind", "sinoussi", "carson", "mcclintock", "yonath"} ) func GenerateRandomName(checker NameChecker) (string, error) { -- cgit v1.2.1 From 615ac8feb27b2b3db0c06b37ecd87b710eabffef Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Thu, 3 Apr 2014 23:47:58 +0000 Subject: Deprecate 'docker insert' 'docker insert' is an old command which predates 'docker build'. We no longer recommend using it, it is not actively maintained, and can be replaced with the combination of 'docker build' and 'ADD'. This removes the command from usage and documentation, and prints a warning when it is called. The command still works but it will be removed in a future version. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- api/client/commands.go | 3 ++- api/server/server.go | 1 + docs/sources/reference/commandline/cli.rst | 28 ---------------------------- integration/server_test.go | 1 + server/server.go | 3 +++ 5 files changed, 7 insertions(+), 29 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 53b8822d69..168252a1b7 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -56,7 +56,6 @@ func (cli *DockerCli) CmdHelp(args ...string) error { {"images", "List images"}, {"import", "Create a new filesystem image from the contents of a tarball"}, {"info", "Display system-wide information"}, - {"insert", "Insert a file in an image"}, {"inspect", "Return low-level information on a container"}, {"kill", "Kill a running container"}, {"load", "Load an image from a tar archive"}, @@ -85,7 +84,9 @@ func (cli *DockerCli) CmdHelp(args ...string) error { return nil } +// FIXME: 'insert' is deprecated. func (cli *DockerCli) CmdInsert(args ...string) error { + fmt.Fprintf(os.Stderr, "Warning: '%s' is deprecated and will be removed in a future version. Please use 'docker build' and 'ADD' instead.\n") cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH") if err := cmd.Parse(args); err != nil { return nil diff --git a/api/server/server.go b/api/server/server.go index 93dd2094b6..c6eafaf265 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -458,6 +458,7 @@ func getImagesSearch(eng *engine.Engine, version version.Version, w http.Respons return job.Run() } +// FIXME: 'insert' is deprecated as of 0.10, and should be removed in a future version. func postImagesInsert(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index 64dff1e1c2..09ef6e0679 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -753,34 +753,6 @@ preserved. WARNING: No swap limit support -.. _cli_insert: - -``insert`` ----------- - -:: - - Usage: docker insert IMAGE URL PATH - - Insert a file from URL in the IMAGE at PATH - -Use the specified ``IMAGE`` as the parent for a new image which adds a -:ref:`layer ` containing the new file. The ``insert`` command does -not modify the original image, and the new image has the contents of the parent -image, plus the new file. - - -Examples -~~~~~~~~ - -Insert file from GitHub -....................... - -.. code-block:: bash - - $ sudo docker insert 8283e18b24bc https://raw.github.com/metalivedev/django/master/postinstall /tmp/postinstall.sh - 06fd35556d7b - .. _cli_inspect: ``inspect`` diff --git a/integration/server_test.go b/integration/server_test.go index a401f1306e..4ad5ec0f92 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -640,6 +640,7 @@ func TestImagesFilter(t *testing.T) { } } +// FIXE: 'insert' is deprecated and should be removed in a future version. func TestImageInsert(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() diff --git a/server/server.go b/server/server.go index fae50094c2..9cabf17889 100644 --- a/server/server.go +++ b/server/server.go @@ -82,6 +82,7 @@ func InitServer(job *engine.Job) engine.Status { job.Eng.Hack_SetGlobalVar("httpapi.server", srv) job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime) + // FIXME: 'insert' is deprecated and should be removed in a future version. for name, handler := range map[string]engine.Handler{ "export": srv.ContainerExport, "create": srv.ContainerCreate, @@ -641,7 +642,9 @@ func (srv *Server) ImagesSearch(job *engine.Job) engine.Status { return engine.StatusOK } +// FIXME: 'insert' is deprecated and should be removed in a future version. func (srv *Server) ImageInsert(job *engine.Job) engine.Status { + fmt.Fprintf(job.Stderr, "Warning: '%s' is deprecated and will be removed in a future version. Please use 'build' and 'ADD' instead.\n", job.Name) if len(job.Args) != 3 { return job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name) } -- cgit v1.2.1 From c8f437aee0d90d4955a6aaa35f8e0b74e7ac99a3 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Thu, 3 Apr 2014 23:14:51 +0000 Subject: api/server: replace an integration test with a unit test using engine mocking. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- api/server/server_unit_test.go | 61 ++++++++++++++++++++++++++++++++++++++++++ integration/api_test.go | 37 ------------------------- 2 files changed, 61 insertions(+), 37 deletions(-) diff --git a/api/server/server_unit_test.go b/api/server/server_unit_test.go index 5ea5af411c..c14fd8ba9e 100644 --- a/api/server/server_unit_test.go +++ b/api/server/server_unit_test.go @@ -2,8 +2,13 @@ package server import ( "fmt" + "github.com/dotcloud/docker/api" + "github.com/dotcloud/docker/engine" + "github.com/dotcloud/docker/utils" + "io" "net/http" "net/http/httptest" + "os" "testing" ) @@ -50,3 +55,59 @@ func TesthttpError(t *testing.T) { t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) } } + +func TestGetVersion(t *testing.T) { + tmp, err := utils.TestDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + eng, err := engine.New(tmp) + if err != nil { + t.Fatal(err) + } + var called bool + eng.Register("version", func(job *engine.Job) engine.Status { + called = true + v := &engine.Env{} + v.Set("Version", "42.1") + v.Set("ApiVersion", "1.1.1.1.1") + v.Set("GoVersion", "2.42") + v.Set("Os", "Linux") + v.Set("Arch", "x86_64") + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + + r := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/version", nil) + if err != nil { + t.Fatal(err) + } + // FIXME getting the version should require an actual running Server + if err := ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("handler was not called") + } + out := engine.NewOutput() + v, err := out.AddEnv() + if err != nil { + t.Fatal(err) + } + if _, err := io.Copy(out, r.Body); err != nil { + t.Fatal(err) + } + out.Close() + expected := "42.1" + if result := v.Get("Version"); result != expected { + t.Errorf("Expected version %s, %s found", expected, result) + } + expected = "application/json" + if result := r.HeaderMap.Get("Content-Type"); result != expected { + t.Errorf("Expected Content-Type %s, %s found", expected, result) + } +} diff --git a/integration/api_test.go b/integration/api_test.go index d08617ea69..61697af8a1 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -16,7 +16,6 @@ import ( "github.com/dotcloud/docker/api" "github.com/dotcloud/docker/api/server" - "github.com/dotcloud/docker/dockerversion" "github.com/dotcloud/docker/engine" "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/runconfig" @@ -25,42 +24,6 @@ import ( "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) -func TestGetVersion(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - var err error - r := httptest.NewRecorder() - - req, err := http.NewRequest("GET", "/version", nil) - if err != nil { - t.Fatal(err) - } - // FIXME getting the version should require an actual running Server - if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - - out := engine.NewOutput() - v, err := out.AddEnv() - if err != nil { - t.Fatal(err) - } - if _, err := io.Copy(out, r.Body); err != nil { - t.Fatal(err) - } - out.Close() - expected := dockerversion.VERSION - if result := v.Get("Version"); result != expected { - t.Errorf("Expected version %s, %s found", expected, result) - } - expected = "application/json" - if result := r.HeaderMap.Get("Content-Type"); result != expected { - t.Errorf("Expected Content-Type %s, %s found", expected, result) - } -} - func TestGetInfo(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() -- cgit v1.2.1 From 76057addb255e6f14dd03c276317abc759a15a80 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Thu, 3 Apr 2014 23:15:56 +0000 Subject: engine: fix engine.Env.Encode() to stop auto-guessing types. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- engine/env.go | 20 +------------------- engine/env_test.go | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/engine/env.go b/engine/env.go index c43a5ec971..1da3ae52e0 100644 --- a/engine/env.go +++ b/engine/env.go @@ -194,25 +194,7 @@ func (env *Env) SetAuto(k string, v interface{}) { } func (env *Env) Encode(dst io.Writer) error { - m := make(map[string]interface{}) - for k, v := range env.Map() { - var val interface{} - if err := json.Unmarshal([]byte(v), &val); err == nil { - // FIXME: we fix-convert float values to int, because - // encoding/json decodes integers to float64, but cannot encode them back. - // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) - if fval, isFloat := val.(float64); isFloat { - val = int(fval) - } - m[k] = val - } else { - m[k] = v - } - } - if err := json.NewEncoder(dst).Encode(&m); err != nil { - return err - } - return nil + return json.NewEncoder(dst).Encode(env.Map()) } func (env *Env) WriteTo(dst io.Writer) (n int64, err error) { diff --git a/engine/env_test.go b/engine/env_test.go index c7079ff942..da7d919f03 100644 --- a/engine/env_test.go +++ b/engine/env_test.go @@ -95,3 +95,21 @@ func TestEnviron(t *testing.T) { t.Fatalf("bar not found in the environ") } } + +func TestEnvWriteTo(t *testing.T) { + e := &Env{} + inputKey := "Version" + inputVal := "42.1" + e.Set(inputKey, inputVal) + out := NewOutput() + e2, err := out.AddEnv() + if err != nil { + t.Fatal(err) + } + e.WriteTo(out) + result := e2.Get(inputKey) + expected := inputVal + if expected != result { + t.Fatalf("%#v\n", result) + } +} -- cgit v1.2.1 From 2cb560988b111ee736c4ab22588d2091cb04075e Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Fri, 4 Apr 2014 00:02:44 +0000 Subject: api/server: convert TestGetInfo from an integration test to a unit test. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- api/server/server_unit_test.go | 67 ++++++++++++++++++++++++++++++++++++++++++ integration/api_test.go | 41 -------------------------- 2 files changed, 67 insertions(+), 41 deletions(-) diff --git a/api/server/server_unit_test.go b/api/server/server_unit_test.go index c14fd8ba9e..3fc1cea064 100644 --- a/api/server/server_unit_test.go +++ b/api/server/server_unit_test.go @@ -111,3 +111,70 @@ func TestGetVersion(t *testing.T) { t.Errorf("Expected Content-Type %s, %s found", expected, result) } } + +func TestGetInfo(t *testing.T) { + tmp, err := utils.TestDirectory("") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmp) + eng, err := engine.New(tmp) + if err != nil { + t.Fatal(err) + } + + var called bool + eng.Register("info", func(job *engine.Job) engine.Status { + called = true + v := &engine.Env{} + v.SetInt("Containers", 1) + v.SetInt("Images", 42000) + if _, err := v.WriteTo(job.Stdout); err != nil { + return job.Error(err) + } + return engine.StatusOK + }) + + r := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/info", nil) + if err != nil { + t.Fatal(err) + } + // FIXME getting the version should require an actual running Server + if err := ServeRequest(eng, api.APIVERSION, r, req); err != nil { + t.Fatal(err) + } + if !called { + t.Fatalf("handler was not called") + } + + out := engine.NewOutput() + i, err := out.AddEnv() + if err != nil { + t.Fatal(err) + } + if _, err := io.Copy(out, r.Body); err != nil { + t.Fatal(err) + } + out.Close() + { + expected := 42000 + result := i.GetInt("Images") + if expected != result { + t.Fatalf("%#v\n", result) + } + } + { + expected := 1 + result := i.GetInt("Containers") + if expected != result { + t.Fatalf("%#v\n", result) + } + } + { + expected := "application/json" + if result := r.HeaderMap.Get("Content-Type"); result != expected { + t.Fatalf("%#v\n", result) + } + } +} diff --git a/integration/api_test.go b/integration/api_test.go index 61697af8a1..26441a2668 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -24,47 +24,6 @@ import ( "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) -func TestGetInfo(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - job := eng.Job("images") - initialImages, err := job.Stdout.AddListTable() - if err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - req, err := http.NewRequest("GET", "/info", nil) - if err != nil { - t.Fatal(err) - } - r := httptest.NewRecorder() - - if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil { - t.Fatal(err) - } - assertHttpNotError(r, t) - - out := engine.NewOutput() - i, err := out.AddEnv() - if err != nil { - t.Fatal(err) - } - if _, err := io.Copy(out, r.Body); err != nil { - t.Fatal(err) - } - out.Close() - if images := i.GetInt("Images"); images != initialImages.Len() { - t.Errorf("Expected images: %d, %d found", initialImages.Len(), images) - } - expected := "application/json" - if result := r.HeaderMap.Get("Content-Type"); result != expected { - t.Errorf("Expected Content-Type %s, %s found", expected, result) - } -} - func TestGetEvents(t *testing.T) { eng := NewTestEngine(t) srv := mkServerFromEngine(eng, t) -- cgit v1.2.1 From b2b9334f27e1a773b77241efa214af2e87439d3b Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Fri, 4 Apr 2014 00:08:51 +0000 Subject: remove hack in version Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- server/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/server.go b/server/server.go index fae50094c2..a6a0c14a84 100644 --- a/server/server.go +++ b/server/server.go @@ -843,7 +843,7 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { func (srv *Server) DockerVersion(job *engine.Job) engine.Status { v := &engine.Env{} v.Set("Version", dockerversion.VERSION) - v.SetJson("ApiVersion", api.APIVERSION) + v.Set("ApiVersion", string(api.APIVERSION)) v.Set("GitCommit", dockerversion.GITCOMMIT) v.Set("GoVersion", goruntime.Version()) v.Set("Os", goruntime.GOOS) -- cgit v1.2.1 From e09274476f889c08416a819dfb28f2c425868c6b Mon Sep 17 00:00:00 2001 From: unclejack Date: Fri, 4 Apr 2014 03:22:32 +0300 Subject: cli integration: sync container & image deletion This makes container and image removal in the tests run synchronously. Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- integration-cli/docker_cli_build_test.go | 2 +- integration-cli/docker_cli_commit_test.go | 4 ++-- integration-cli/docker_cli_diff_test.go | 6 +++--- integration-cli/docker_cli_export_import_test.go | 4 ++-- integration-cli/docker_cli_kill_test.go | 2 +- integration-cli/docker_cli_logs_test.go | 6 +++--- integration-cli/docker_cli_push_test.go | 2 +- integration-cli/docker_cli_save_load_test.go | 4 ++-- integration-cli/docker_cli_tag_test.go | 2 +- integration-cli/docker_cli_top_test.go | 2 +- 10 files changed, 17 insertions(+), 17 deletions(-) diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index e6f3096892..7cd42dc69c 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -18,7 +18,7 @@ func TestBuildSixtySteps(t *testing.T) { t.Fatal("failed to build the image") } - go deleteImages("foobuildsixtysteps") + deleteImages("foobuildsixtysteps") logDone("build - build an image with sixty build steps") } diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go index 5ed55ef62a..51adaac9df 100644 --- a/integration-cli/docker_cli_commit_test.go +++ b/integration-cli/docker_cli_commit_test.go @@ -27,8 +27,8 @@ func TestCommitAfterContainerIsDone(t *testing.T) { out, _, err = runCommandWithOutput(inspectCmd) errorOut(err, t, fmt.Sprintf("failed to inspect image: %v %v", out, err)) - go deleteContainer(cleanedContainerID) - go deleteImages(cleanedImageID) + deleteContainer(cleanedContainerID) + deleteImages(cleanedImageID) logDone("commit - echo foo and commit the image") } diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go index 0ae9cca38d..478ebd2df1 100644 --- a/integration-cli/docker_cli_diff_test.go +++ b/integration-cli/docker_cli_diff_test.go @@ -30,7 +30,7 @@ func TestDiffFilenameShownInOutput(t *testing.T) { if !found { t.Errorf("couldn't find the new file in docker diff's output: %v", out) } - go deleteContainer(cleanCID) + deleteContainer(cleanCID) logDone("diff - check if created file shows up") } @@ -53,7 +53,7 @@ func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) { out, _, err := runCommandWithOutput(diffCmd) errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) - go deleteContainer(cleanCID) + deleteContainer(cleanCID) for _, filename := range dockerinitFiles { if strings.Contains(out, filename) { @@ -74,7 +74,7 @@ func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) { diffCmd := exec.Command(dockerBinary, "diff", cleanCID) out, _, err := runCommandWithOutput(diffCmd) errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err)) - go deleteContainer(cleanCID) + deleteContainer(cleanCID) expected := map[string]bool{ "C /dev": true, diff --git a/integration-cli/docker_cli_export_import_test.go b/integration-cli/docker_cli_export_import_test.go index 66ff1055ba..2e443cd39e 100644 --- a/integration-cli/docker_cli_export_import_test.go +++ b/integration-cli/docker_cli_export_import_test.go @@ -40,8 +40,8 @@ func TestExportContainerAndImportImage(t *testing.T) { out, _, err = runCommandWithOutput(inspectCmd) errorOut(err, t, fmt.Sprintf("output should've been an image id: %v %v", out, err)) - go deleteImages("testexp") - go deleteContainer(cleanedContainerID) + deleteContainer(cleanedContainerID) + deleteImages("testexp") os.Remove("/tmp/testexp.tar") diff --git a/integration-cli/docker_cli_kill_test.go b/integration-cli/docker_cli_kill_test.go index 676ccd0ca0..b8265d8cfb 100644 --- a/integration-cli/docker_cli_kill_test.go +++ b/integration-cli/docker_cli_kill_test.go @@ -30,7 +30,7 @@ func TestKillContainer(t *testing.T) { t.Fatal("killed container is still running") } - go deleteContainer(cleanedContainerID) + deleteContainer(cleanedContainerID) logDone("kill - kill container running sleep 10") } diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go index f8fcbe8832..8fcf4d7333 100644 --- a/integration-cli/docker_cli_logs_test.go +++ b/integration-cli/docker_cli_logs_test.go @@ -24,7 +24,7 @@ func TestLogsContainerSmallerThanPage(t *testing.T) { t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) } - go deleteContainer(cleanedContainerID) + deleteContainer(cleanedContainerID) logDone("logs - logs container running echo smaller than page size") } @@ -47,7 +47,7 @@ func TestLogsContainerBiggerThanPage(t *testing.T) { t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) } - go deleteContainer(cleanedContainerID) + deleteContainer(cleanedContainerID) logDone("logs - logs container running echo bigger than page size") } @@ -70,7 +70,7 @@ func TestLogsContainerMuchBiggerThanPage(t *testing.T) { t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) } - go deleteContainer(cleanedContainerID) + deleteContainer(cleanedContainerID) logDone("logs - logs container running echo much bigger than page size") } diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go index 8117c077bc..160bb9e286 100644 --- a/integration-cli/docker_cli_push_test.go +++ b/integration-cli/docker_cli_push_test.go @@ -26,7 +26,7 @@ func TestPushBusyboxImage(t *testing.T) { out, exitCode, err = runCommandWithOutput(pushCmd) errorOut(err, t, fmt.Sprintf("%v %v", out, err)) - go deleteImages(repoName) + deleteImages(repoName) if err != nil || exitCode != 0 { t.Fatal("pushing the image to the private registry has failed") diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go index 7f04f7ca53..d728c7de95 100644 --- a/integration-cli/docker_cli_save_load_test.go +++ b/integration-cli/docker_cli_save_load_test.go @@ -42,8 +42,8 @@ func TestSaveAndLoadRepo(t *testing.T) { out, _, err = runCommandWithOutput(inspectCmd) errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", out, err)) - go deleteImages(repoName) - go deleteContainer(cleanedContainerID) + deleteContainer(cleanedContainerID) + deleteImages(repoName) os.Remove("/tmp/foobar-save-load-test.tar") diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go index 67c28c570a..d75b7db385 100644 --- a/integration-cli/docker_cli_tag_test.go +++ b/integration-cli/docker_cli_tag_test.go @@ -79,7 +79,7 @@ func TestTagValidPrefixedRepo(t *testing.T) { t.Errorf("tag busybox %v should have worked: %s", repo, err) continue } - go deleteImages(repo) + deleteImages(repo) logMessage := fmt.Sprintf("tag - busybox %v", repo) logDone(logMessage) } diff --git a/integration-cli/docker_cli_top_test.go b/integration-cli/docker_cli_top_test.go index 1895054ccc..73d590cf06 100644 --- a/integration-cli/docker_cli_top_test.go +++ b/integration-cli/docker_cli_top_test.go @@ -22,7 +22,7 @@ func TestTop(t *testing.T) { _, err = runCommand(killCmd) errorOut(err, t, fmt.Sprintf("failed to kill container: %v", err)) - go deleteContainer(cleanedContainerID) + deleteContainer(cleanedContainerID) if !strings.Contains(out, "sleep 20") { t.Fatal("top should've listed sleep 20 in the process list") -- cgit v1.2.1 From bea71245c8165e0dfdc6b2485c548c04f4d3edd3 Mon Sep 17 00:00:00 2001 From: Dan Stine Date: Fri, 4 Apr 2014 08:12:17 -0400 Subject: fixed two readme typos --- pkg/libcontainer/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/libcontainer/README.md b/pkg/libcontainer/README.md index e967f6d76d..3bf79549e3 100644 --- a/pkg/libcontainer/README.md +++ b/pkg/libcontainer/README.md @@ -3,7 +3,7 @@ #### background libcontainer specifies configuration options for what a container is. It provides a native Go implementation -for using linux namespaces with no external dependencies. libcontainer provides many convience functions for working with namespaces, networking, and management. +for using linux namespaces with no external dependencies. libcontainer provides many convenience functions for working with namespaces, networking, and management. #### container @@ -91,7 +91,7 @@ Sample `container.json` file: ``` Using this configuration and the current directory holding the rootfs for a process, one can use libcontainer to exec the container. Running the life of the namespace, a `pid` file -is written to the current directory with the pid of the namespaced process to the external world. A client can use this pid to wait, kill, or perform other operation with the container. If a user tries to run an new process inside an existing container with a live namespace the namespace will be joined by the new process. +is written to the current directory with the pid of the namespaced process to the external world. A client can use this pid to wait, kill, or perform other operation with the container. If a user tries to run a new process inside an existing container with a live namespace the namespace will be joined by the new process. You may also specify an alternate root place where the `container.json` file is read and where the `pid` file will be saved. -- cgit v1.2.1 From 62b08f557db91cc5cd12ea9ceb0a4d8cf3d6e0f1 Mon Sep 17 00:00:00 2001 From: unclejack Date: Fri, 4 Apr 2014 19:03:07 +0300 Subject: cli integration: allow driver selection via vars This makes it possible to choose the graphdriver and the execdriver which is going to be used for the cli integration tests. Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- Makefile | 2 +- hack/make/test-integration-cli | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 776d57951f..d49aa3b667 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH)) DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)") -DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" +DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)" DOCKER_RUN_DOCS := docker run --rm -it -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" default: binary diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli index 5c6fc367fc..1760171dd5 100644 --- a/hack/make/test-integration-cli +++ b/hack/make/test-integration-cli @@ -7,6 +7,8 @@ set -e # subshell so that we can export PATH without breaking other things ( export PATH="$DEST/../binary:$DEST/../dynbinary:$PATH" +DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} +DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native} bundle_test_integration_cli() { go_test_dir ./integration-cli @@ -17,7 +19,8 @@ if ! command -v docker &> /dev/null; then false fi -docker -d -D -p $DEST/docker.pid &> $DEST/docker.log & +echo "running cli integration tests using graphdriver: '$DOCKER_GRAPHDRIVER' and execdriver: '$DOCKER_EXECDRIVER'" +docker -d -D -s $DOCKER_GRAPHDRIVER -e $DOCKER_EXECDRIVER -p $DEST/docker.pid &> $DEST/docker.log & # pull the busybox image before running the tests sleep 2 -- cgit v1.2.1 From 22152ccc47e641050da85b80cebf2912b42fd122 Mon Sep 17 00:00:00 2001 From: unclejack Date: Fri, 4 Apr 2014 19:06:55 +0300 Subject: cli integration: fix wait race The wait at the end of cli integration script could end up failing if the process had already exited. This was making it look like the tests have failed. This change fixes the problem. Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- hack/make/test-integration-cli | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli index 1760171dd5..18e4ee6602 100644 --- a/hack/make/test-integration-cli +++ b/hack/make/test-integration-cli @@ -31,5 +31,5 @@ bundle_test_integration_cli 2>&1 \ DOCKERD_PID=$(cat $DEST/docker.pid) kill $DOCKERD_PID -wait $DOCKERD_PID +wait $DOCKERD_PID || true ) -- cgit v1.2.1 From 95e6fd819bbef09032bf680e0f7dadd7fbf44559 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 4 Apr 2014 11:29:56 -0700 Subject: Revert "engine: fix engine.Env.Encode() to stop auto-guessing types." This reverts commit 76057addb255e6f14dd03c276317abc759a15a80. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- engine/env.go | 20 +++++++++++++++++++- engine/env_test.go | 18 ------------------ 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/engine/env.go b/engine/env.go index 1da3ae52e0..c43a5ec971 100644 --- a/engine/env.go +++ b/engine/env.go @@ -194,7 +194,25 @@ func (env *Env) SetAuto(k string, v interface{}) { } func (env *Env) Encode(dst io.Writer) error { - return json.NewEncoder(dst).Encode(env.Map()) + m := make(map[string]interface{}) + for k, v := range env.Map() { + var val interface{} + if err := json.Unmarshal([]byte(v), &val); err == nil { + // FIXME: we fix-convert float values to int, because + // encoding/json decodes integers to float64, but cannot encode them back. + // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) + if fval, isFloat := val.(float64); isFloat { + val = int(fval) + } + m[k] = val + } else { + m[k] = v + } + } + if err := json.NewEncoder(dst).Encode(&m); err != nil { + return err + } + return nil } func (env *Env) WriteTo(dst io.Writer) (n int64, err error) { diff --git a/engine/env_test.go b/engine/env_test.go index da7d919f03..c7079ff942 100644 --- a/engine/env_test.go +++ b/engine/env_test.go @@ -95,21 +95,3 @@ func TestEnviron(t *testing.T) { t.Fatalf("bar not found in the environ") } } - -func TestEnvWriteTo(t *testing.T) { - e := &Env{} - inputKey := "Version" - inputVal := "42.1" - e.Set(inputKey, inputVal) - out := NewOutput() - e2, err := out.AddEnv() - if err != nil { - t.Fatal(err) - } - e.WriteTo(out) - result := e2.Get(inputKey) - expected := inputVal - if expected != result { - t.Fatalf("%#v\n", result) - } -} -- cgit v1.2.1 From 4c6cf9e27fd0ee6c09e836f03722d1c679b6bd29 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 4 Apr 2014 11:35:07 -0700 Subject: Use setjson hack again for version Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- api/server/server_unit_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/server/server_unit_test.go b/api/server/server_unit_test.go index 3fc1cea064..3dbba640ff 100644 --- a/api/server/server_unit_test.go +++ b/api/server/server_unit_test.go @@ -70,7 +70,7 @@ func TestGetVersion(t *testing.T) { eng.Register("version", func(job *engine.Job) engine.Status { called = true v := &engine.Env{} - v.Set("Version", "42.1") + v.SetJson("Version", "42.1") v.Set("ApiVersion", "1.1.1.1.1") v.Set("GoVersion", "2.42") v.Set("Os", "Linux") -- cgit v1.2.1 From 07887f65de7f909e56bf965b3875a1dd46bd3619 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Fri, 4 Apr 2014 11:38:03 -0700 Subject: Revert "remove hack in version" This reverts commit b2b9334f27e1a773b77241efa214af2e87439d3b. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- server/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/server.go b/server/server.go index 55ac10fd0d..9cabf17889 100644 --- a/server/server.go +++ b/server/server.go @@ -846,7 +846,7 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status { func (srv *Server) DockerVersion(job *engine.Job) engine.Status { v := &engine.Env{} v.Set("Version", dockerversion.VERSION) - v.Set("ApiVersion", string(api.APIVERSION)) + v.SetJson("ApiVersion", api.APIVERSION) v.Set("GitCommit", dockerversion.GITCOMMIT) v.Set("GoVersion", goruntime.Version()) v.Set("Os", goruntime.GOOS) -- cgit v1.2.1 From da8aa712d28cb7177b0fe5b4cc9d7de33ea1da60 Mon Sep 17 00:00:00 2001 From: Kato Kazuyoshi Date: Sat, 5 Apr 2014 10:09:04 +0900 Subject: Remove archive/stat_unsupported.go because it is not used LUtimesNano and all other functions were implemented on pkg/system after d6114c0da0e844199e3d23c60a04434566fb5392. Docker-DCO-1.1-Signed-off-by: Kato Kazuyoshi (github: kzys) --- archive/stat_unsupported.go | 21 --------------------- 1 file changed, 21 deletions(-) delete mode 100644 archive/stat_unsupported.go diff --git a/archive/stat_unsupported.go b/archive/stat_unsupported.go deleted file mode 100644 index 004fa0f0a4..0000000000 --- a/archive/stat_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !linux !amd64 - -package archive - -import "syscall" - -func getLastAccess(stat *syscall.Stat_t) syscall.Timespec { - return syscall.Timespec{} -} - -func getLastModification(stat *syscall.Stat_t) syscall.Timespec { - return syscall.Timespec{} -} - -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotImplemented -} - -func UtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotImplemented -} -- cgit v1.2.1 From 794b5de749fceea906222917e90bbc19e131ecc3 Mon Sep 17 00:00:00 2001 From: Kato Kazuyoshi Date: Sat, 5 Apr 2014 10:29:40 +0900 Subject: Don't assume the file system has sub-second precision timestamp For example, FreeBSD doesn't have that (see http://lists.freebsd.org/pipermail/freebsd-fs/2012-February/013677.html). Docker-DCO-1.1-Signed-off-by: Kato Kazuyoshi (github: kzys) --- archive/changes_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/archive/changes_test.go b/archive/changes_test.go index 1302b76f47..34c0f0da64 100644 --- a/archive/changes_test.go +++ b/archive/changes_test.go @@ -138,7 +138,7 @@ func mutateSampleDir(t *testing.T, root string) { } // Rewrite a file - if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileN\n"), 0777); err != nil { + if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { t.Fatal(err) } @@ -146,12 +146,12 @@ func mutateSampleDir(t *testing.T, root string) { if err := os.RemoveAll(path.Join(root, "file3")); err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileM\n"), 0404); err != nil { + if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { t.Fatal(err) } // Touch file - if err := os.Chtimes(path.Join(root, "file4"), time.Now(), time.Now()); err != nil { + if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { t.Fatal(err) } @@ -195,7 +195,7 @@ func mutateSampleDir(t *testing.T, root string) { } // Touch dir - if err := os.Chtimes(path.Join(root, "dir3"), time.Now(), time.Now()); err != nil { + if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { t.Fatal(err) } } -- cgit v1.2.1 From e35c23311fce853fab318527789f11cc8c150ea2 Mon Sep 17 00:00:00 2001 From: Michael Brown Date: Mon, 7 Apr 2014 02:02:11 -0400 Subject: apparmor: docker-default: Include base abstraction Encountered problems on 14.04 relating to signals between container processes being blocked by apparmor. The base abstraction contains appropriate rules to allow this communication. Docker-DCO-1.1-Signed-off-by: Michael Brown (github: Supermathie) --- pkg/libcontainer/apparmor/setup.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/libcontainer/apparmor/setup.go b/pkg/libcontainer/apparmor/setup.go index 4e1c95143a..cc786de9aa 100644 --- a/pkg/libcontainer/apparmor/setup.go +++ b/pkg/libcontainer/apparmor/setup.go @@ -18,6 +18,7 @@ const DefaultProfile = ` @{PROC}=/proc/ profile docker-default flags=(attach_disconnected,mediate_deleted) { + #include network, capability, file, -- cgit v1.2.1 From 320b3e0d211d389addda02998a0f47839827b2af Mon Sep 17 00:00:00 2001 From: Michael Brown Date: Mon, 7 Apr 2014 02:47:43 -0400 Subject: apparmor: abstractions/base expects pid variable Add 'pid' variable pointing to 'self' to allow parsing of profile to succeed Docker-DCO-1.1-Signed-off-by: Michael Brown (github: Supermathie) --- pkg/libcontainer/apparmor/setup.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/libcontainer/apparmor/setup.go b/pkg/libcontainer/apparmor/setup.go index cc786de9aa..d9deec470e 100644 --- a/pkg/libcontainer/apparmor/setup.go +++ b/pkg/libcontainer/apparmor/setup.go @@ -16,6 +16,7 @@ const DefaultProfile = ` #@{HOMEDIRS}+= @{multiarch}=*-linux-gnu* @{PROC}=/proc/ +@{pid}=self profile docker-default flags=(attach_disconnected,mediate_deleted) { #include -- cgit v1.2.1 From 726206f2aa45b8a537ae6d6c819f21befc2e0aca Mon Sep 17 00:00:00 2001 From: Michael Brown Date: Mon, 7 Apr 2014 03:04:27 -0400 Subject: apparmor: pull in variables from tunables/global The variables that were defined at the top of the apparmor profile are best pulled in via the include. Docker-DCO-1.1-Signed-off-by: Michael Brown (github: Supermathie) --- pkg/libcontainer/apparmor/setup.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/pkg/libcontainer/apparmor/setup.go b/pkg/libcontainer/apparmor/setup.go index d9deec470e..4c664598ad 100644 --- a/pkg/libcontainer/apparmor/setup.go +++ b/pkg/libcontainer/apparmor/setup.go @@ -11,13 +11,8 @@ import ( const DefaultProfilePath = "/etc/apparmor.d/docker" const DefaultProfile = ` # AppArmor profile from lxc for containers. -@{HOME}=@{HOMEDIRS}/*/ /root/ -@{HOMEDIRS}=/home/ -#@{HOMEDIRS}+= -@{multiarch}=*-linux-gnu* -@{PROC}=/proc/ -@{pid}=self +#include profile docker-default flags=(attach_disconnected,mediate_deleted) { #include network, -- cgit v1.2.1 From 87ea27e80b131ca11d74c89446d4992af0f6c5b9 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Mon, 7 Apr 2014 17:18:45 +1000 Subject: intermediate image layers are used for more than the build Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- api/client/commands.go | 2 +- contrib/completion/fish/docker.fish | 2 +- docs/sources/reference/commandline/cli.rst | 9 ++++++++- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index 2007ae9c2d..bd23b3c7fd 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -1136,7 +1136,7 @@ func (cli *DockerCli) CmdPull(args ...string) error { func (cli *DockerCli) CmdImages(args ...string) error { cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images") quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") - all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate images used to build)") + all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate image layers)") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") // FIXME: --viz and --tree are deprecated. Remove them in a future version. flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format") diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index edaa5ca8c6..e3bb72aebe 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -107,7 +107,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_pr # images complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate images used to build)' +complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'Output graph in tree format' diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index da643f249a..c0487302dd 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -597,10 +597,17 @@ To see how the ``docker:latest`` image was built: List images - -a, --all=false: Show all images (by default filter out the intermediate images used to build) + -a, --all=false: Show all images (by default filter out the intermediate image layers) --no-trunc=false: Don't truncate output -q, --quiet=false: Only show numeric IDs +The default ``docker images`` will show all top level images, their repository +and tags, and their virtual size. + +Docker images have intermediate layers that increase reuseability, decrease +disk usage, and speed up ``docker build`` by allowing each step to be cached. +These intermediate layers are not shown by default. + Listing the most recently created images ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- cgit v1.2.1 From c987aa09d81a6916e3893c41b7ec2880570b5c65 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 7 Apr 2014 11:01:35 -0700 Subject: Move history to separate file Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/history.go | 30 ++++++++++++++++++++++++++++++ runtime/runtime.go | 27 --------------------------- 2 files changed, 30 insertions(+), 27 deletions(-) create mode 100644 runtime/history.go diff --git a/runtime/history.go b/runtime/history.go new file mode 100644 index 0000000000..835ac9c11e --- /dev/null +++ b/runtime/history.go @@ -0,0 +1,30 @@ +package runtime + +import ( + "sort" +) + +// History is a convenience type for storing a list of containers, +// ordered by creation date. +type History []*Container + +func (history *History) Len() int { + return len(*history) +} + +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].When().Before(containers[i].When()) +} + +func (history *History) Swap(i, j int) { + containers := *history + tmp := containers[i] + containers[i] = containers[j] + containers[j] = tmp +} + +func (history *History) Add(container *Container) { + *history = append(*history, container) + sort.Sort(history) +} diff --git a/runtime/runtime.go b/runtime/runtime.go index 842dbf8b0b..f4c4b09a39 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -26,7 +26,6 @@ import ( "os" "path" "regexp" - "sort" "strings" "sync" "time" @@ -62,7 +61,6 @@ type Runtime struct { // Mountpoints should be private to the container func remountPrivate(mountPoint string) error { - mounted, err := mount.Mounted(mountPoint) if err != nil { return err @@ -973,28 +971,3 @@ func (runtime *Runtime) ContainerGraph() *graphdb.Database { func (runtime *Runtime) SetServer(server Server) { runtime.srv = server } - -// History is a convenience type for storing a list of containers, -// ordered by creation date. -type History []*Container - -func (history *History) Len() int { - return len(*history) -} - -func (history *History) Less(i, j int) bool { - containers := *history - return containers[j].When().Before(containers[i].When()) -} - -func (history *History) Swap(i, j int) { - containers := *history - tmp := containers[i] - containers[i] = containers[j] - containers[j] = tmp -} - -func (history *History) Add(container *Container) { - *history = append(*history, container) - sort.Sort(history) -} -- cgit v1.2.1 From dc7fefc16bfcc4e6d0ccb30233e50b0ab3d172f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Petazzoni?= Date: Thu, 3 Apr 2014 13:50:19 -0700 Subject: Use https://get.docker.io/ubuntu consistently MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The install script (on https://get.docker.io/) installs an APT sources.list entry referencing an HTTPS repository, and takes care of installing the apt-transport-https package. However, the Debian/Ubuntu specific installation script (on https://get.docker.io/ubuntu) used an HTTPS repository but without installing that package, causing the installation to fail on some platforms. This will use HTTPS everywhere, and updates the documentation accordingly. Docker-DCO-1.1-Signed-off-by: Jérôme Petazzoni (github: jpetazzo) Docker-DCO-1.1-Signed-off-by: Jérôme Petazzoni (github: jpetazzo) --- docs/sources/installation/ubuntulinux.rst | 15 +++++++++++++-- hack/release.sh | 5 +++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst index 44dba6b97e..51f303e88a 100644 --- a/docs/sources/installation/ubuntulinux.rst +++ b/docs/sources/installation/ubuntulinux.rst @@ -68,7 +68,18 @@ easy. **See the** :ref:`installmirrors` **section below if you are not in the United States.** Other sources of the Debian packages may be faster for you to install. -First add the Docker repository key to your local keychain. +First, check that your APT system can deal with ``https`` URLs: +the file ``/usr/lib/apt/methods/https`` should exist. If it doesn't, +you need to install the package ``apt-transport-https``. + +.. code-block:: bash + + [ -e /usr/lib/apt/methods/https ] || { + apt-get update + apt-get install apt-transport-https + } + +Then, add the Docker repository key to your local keychain. .. code-block:: bash @@ -82,7 +93,7 @@ continue installation.* .. code-block:: bash - sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\ + sudo sh -c "echo deb https://get.docker.io/ubuntu docker main\ > /etc/apt/sources.list.d/docker.list" sudo apt-get update sudo apt-get install lxc-docker diff --git a/hack/release.sh b/hack/release.sh index 6f9df8c7e6..84e1c42383 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -273,6 +273,11 @@ EOF # Upload repo s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/ cat < /etc/apt/sources.list.d/docker.list # Then import the repository key -- cgit v1.2.1 From 1277885420b069abd7468fe3e69deb4fb0a3f4fc Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 7 Apr 2014 12:20:23 -0700 Subject: Clean runtime create and make it simple Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runtime/runtime.go | 156 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 103 insertions(+), 53 deletions(-) diff --git a/runtime/runtime.go b/runtime/runtime.go index f4c4b09a39..d35e2d653a 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -371,53 +371,86 @@ func (runtime *Runtime) restore() error { // Create creates a new container from the given configuration with a given name. func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Container, []string, error) { - // Lookup image + var ( + container *Container + warnings []string + ) + img, err := runtime.repositories.LookupImage(config.Image) if err != nil { return nil, nil, err } + if err := runtime.checkImageDepth(img); err != nil { + return nil, nil, err + } + if warnings, err = runtime.mergeAndVerifyConfig(config, img); err != nil { + return nil, nil, err + } + if container, err = runtime.newContainer(name, config, img); err != nil { + return nil, nil, err + } + if err := runtime.createRootfs(container, img); err != nil { + return nil, nil, err + } + if err := runtime.setupContainerDns(container, config); err != nil { + return nil, nil, err + } + if err := container.ToDisk(); err != nil { + return nil, nil, err + } + if err := runtime.Register(container); err != nil { + return nil, nil, err + } + return container, warnings, nil +} +func (runtime *Runtime) checkImageDepth(img *image.Image) error { // We add 2 layers to the depth because the container's rw and // init layer add to the restriction depth, err := img.Depth() if err != nil { - return nil, nil, err + return err } - if depth+2 >= MaxImageDepth { - return nil, nil, fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) + return fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth) } + return nil +} - checkDeprecatedExpose := func(config *runconfig.Config) bool { - if config != nil { - if config.PortSpecs != nil { - for _, p := range config.PortSpecs { - if strings.Contains(p, ":") { - return true - } +func (runtime *Runtime) checkDeprecatedExpose(config *runconfig.Config) bool { + if config != nil { + if config.PortSpecs != nil { + for _, p := range config.PortSpecs { + if strings.Contains(p, ":") { + return true } } } - return false } + return false +} +func (runtime *Runtime) mergeAndVerifyConfig(config *runconfig.Config, img *image.Image) ([]string, error) { warnings := []string{} - if checkDeprecatedExpose(img.Config) || checkDeprecatedExpose(config) { + if runtime.checkDeprecatedExpose(img.Config) || runtime.checkDeprecatedExpose(config) { warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.") } - if img.Config != nil { if err := runconfig.Merge(config, img.Config); err != nil { - return nil, nil, err + return nil, err } } - if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { - return nil, nil, fmt.Errorf("No command specified") + return nil, fmt.Errorf("No command specified") } + return warnings, nil +} - // Generate id - id := utils.GenerateRandomID() +func (runtime *Runtime) generateIdAndName(name string) (string, string, error) { + var ( + err error + id = utils.GenerateRandomID() + ) if name == "" { name, err = generateRandomName(runtime) @@ -426,47 +459,51 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe } } else { if !validContainerNamePattern.MatchString(name) { - return nil, nil, fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) + return "", "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) } } - if name[0] != '/' { name = "/" + name } - // Set the enitity in the graph using the default name specified if _, err := runtime.containerGraph.Set(name, id); err != nil { if !graphdb.IsNonUniqueNameError(err) { - return nil, nil, err + return "", "", err } conflictingContainer, err := runtime.GetByName(name) if err != nil { if strings.Contains(err.Error(), "Could not find entity") { - return nil, nil, err + return "", "", err } // Remove name and continue starting the container if err := runtime.containerGraph.Delete(name); err != nil { - return nil, nil, err + return "", "", err } } else { nameAsKnownByUser := strings.TrimPrefix(name, "/") - return nil, nil, fmt.Errorf( + return "", "", fmt.Errorf( "Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser, utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser) } } + return id, name, nil +} +func (runtime *Runtime) generateHostname(id string, config *runconfig.Config) { // Generate default hostname // FIXME: the lxc template no longer needs to set a default hostname if config.Hostname == "" { config.Hostname = id[:12] } +} - var args []string - var entrypoint string - +func (runtime *Runtime) getEntrypointAndArgs(config *runconfig.Config) (string, []string) { + var ( + entrypoint string + args []string + ) if len(config.Entrypoint) != 0 { entrypoint = config.Entrypoint[0] args = append(config.Entrypoint[1:], config.Cmd...) @@ -474,6 +511,21 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe entrypoint = config.Cmd[0] args = config.Cmd[1:] } + return entrypoint, args +} + +func (runtime *Runtime) newContainer(name string, config *runconfig.Config, img *image.Image) (*Container, error) { + var ( + id string + err error + ) + id, name, err = runtime.generateIdAndName(name) + if err != nil { + return nil, err + } + + runtime.generateHostname(id, config) + entrypoint, args := runtime.getEntrypointAndArgs(config) container := &Container{ // FIXME: we should generate the ID here instead of receiving it as an argument @@ -490,42 +542,50 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe ExecDriver: runtime.execDriver.Name(), } container.root = runtime.containerRoot(container.ID) + return container, nil +} + +func (runtime *Runtime) createRootfs(container *Container, img *image.Image) error { // Step 1: create the container directory. // This doubles as a barrier to avoid race conditions. if err := os.Mkdir(container.root, 0700); err != nil { - return nil, nil, err + return err } - initID := fmt.Sprintf("%s-init", container.ID) if err := runtime.driver.Create(initID, img.ID, ""); err != nil { - return nil, nil, err + return err } initPath, err := runtime.driver.Get(initID) if err != nil { - return nil, nil, err + return err } defer runtime.driver.Put(initID) if err := graph.SetupInitLayer(initPath); err != nil { - return nil, nil, err + return err } if err := runtime.driver.Create(container.ID, initID, ""); err != nil { - return nil, nil, err + return err } + return nil +} + +func (runtime *Runtime) setupContainerDns(container *Container, config *runconfig.Config) error { resolvConf, err := utils.GetResolvConf() if err != nil { - return nil, nil, err + return err } - if len(config.Dns) == 0 && len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { runtime.config.Dns = DefaultDns } // If custom dns exists, then create a resolv.conf for the container if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(runtime.config.DnsSearch) > 0 { - dns := utils.GetNameservers(resolvConf) - dnsSearch := utils.GetSearchDomains(resolvConf) + var ( + dns = utils.GetNameservers(resolvConf) + dnsSearch = utils.GetSearchDomains(resolvConf) + ) if len(config.Dns) > 0 { dns = config.Dns } else if len(runtime.config.Dns) > 0 { @@ -539,33 +599,23 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe container.ResolvConfPath = path.Join(container.root, "resolv.conf") f, err := os.Create(container.ResolvConfPath) if err != nil { - return nil, nil, err + return err } defer f.Close() for _, dns := range dns { if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil { - return nil, nil, err + return err } } if len(dnsSearch) > 0 { if _, err := f.Write([]byte("search " + strings.Join(dnsSearch, " ") + "\n")); err != nil { - return nil, nil, err + return err } } } else { container.ResolvConfPath = "/etc/resolv.conf" } - - // Step 2: save the container json - if err := container.ToDisk(); err != nil { - return nil, nil, err - } - - // Step 3: register the container - if err := runtime.Register(container); err != nil { - return nil, nil, err - } - return container, warnings, nil + return nil } // Commit creates a new filesystem image from the current state of a container. -- cgit v1.2.1 From 30f22ee9e3ea1012ca663a0383c8c9c2330c52cc Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Fri, 4 Apr 2014 00:57:41 +0000 Subject: Convert a legacy integration test to a clean v2 CLI integration test. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- integration-cli/docker_cli_images_test.go | 38 ++++++++++++++++++++ integration-cli/docker_utils.go | 7 ++++ integration-cli/utils.go | 4 +++ integration/server_test.go | 59 ------------------------------- 4 files changed, 49 insertions(+), 59 deletions(-) diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go index 17efc6f5c4..82b70bab40 100644 --- a/integration-cli/docker_cli_images_test.go +++ b/integration-cli/docker_cli_images_test.go @@ -18,3 +18,41 @@ func TestImagesEnsureImageIsListed(t *testing.T) { logDone("images - busybox should be listed") } + +func TestCLIImageTagRemove(t *testing.T) { + imagesBefore, _, _ := cmd(t, "images", "-a") + cmd(t, "tag", "busybox", "utest:tag1") + cmd(t, "tag", "busybox", "utest/docker:tag2") + cmd(t, "tag", "busybox", "utest:5000/docker:tag3") + { + imagesAfter, _, _ := cmd(t, "images", "-a") + if nLines(imagesAfter) != nLines(imagesBefore)+3 { + t.Fatalf("before: %#s\n\nafter: %#s\n", imagesBefore, imagesAfter) + } + } + cmd(t, "rmi", "utest/docker:tag2") + { + imagesAfter, _, _ := cmd(t, "images", "-a") + if nLines(imagesAfter) != nLines(imagesBefore)+2 { + t.Fatalf("before: %#s\n\nafter: %#s\n", imagesBefore, imagesAfter) + } + + } + cmd(t, "rmi", "utest:5000/docker:tag3") + { + imagesAfter, _, _ := cmd(t, "images", "-a") + if nLines(imagesAfter) != nLines(imagesBefore)+1 { + t.Fatalf("before: %#s\n\nafter: %#s\n", imagesBefore, imagesAfter) + } + + } + cmd(t, "rmi", "utest:tag1") + { + imagesAfter, _, _ := cmd(t, "images", "-a") + if nLines(imagesAfter) != nLines(imagesBefore)+0 { + t.Fatalf("before: %#s\n\nafter: %#s\n", imagesBefore, imagesAfter) + } + + } + logDone("tag,rmi- tagging the same images multiple times then removing tags") +} diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 8e9d0a23ff..6da86c9753 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -4,6 +4,7 @@ import ( "fmt" "os/exec" "strings" + "testing" ) func deleteContainer(container string) error { @@ -54,3 +55,9 @@ func deleteImages(images string) error { return err } + +func cmd(t *testing.T, args ...string) (string, int, error) { + out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) + errorOut(err, t, fmt.Sprintf("'%s' failed with errors: %v (%v)", strings.Join(args, " "), err, out)) + return out, status, err +} diff --git a/integration-cli/utils.go b/integration-cli/utils.go index 680cc6cfcf..ae7af52687 100644 --- a/integration-cli/utils.go +++ b/integration-cli/utils.go @@ -107,3 +107,7 @@ func errorOutOnNonNilError(err error, t *testing.T, message string) { t.Fatalf(message) } } + +func nLines(s string) int { + return strings.Count(s, "\n") +} diff --git a/integration/server_test.go b/integration/server_test.go index 4ad5ec0f92..9137e8031b 100644 --- a/integration/server_test.go +++ b/integration/server_test.go @@ -9,65 +9,6 @@ import ( "time" ) -func TestImageTagImageDelete(t *testing.T) { - eng := NewTestEngine(t) - defer mkRuntimeFromEngine(eng, t).Nuke() - - srv := mkServerFromEngine(eng, t) - - initialImages := getAllImages(eng, t) - if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil { - t.Fatal(err) - } - - if err := eng.Job("tag", unitTestImageName, "utest/docker", "tag2").Run(); err != nil { - t.Fatal(err) - } - - if err := eng.Job("tag", unitTestImageName, "utest:5000/docker", "tag3").Run(); err != nil { - t.Fatal(err) - } - - images := getAllImages(eng, t) - - nExpected := len(initialImages.Data[0].GetList("RepoTags")) + 3 - nActual := len(images.Data[0].GetList("RepoTags")) - if nExpected != nActual { - t.Errorf("Expected %d images, %d found", nExpected, nActual) - } - - if err := srv.DeleteImage("utest/docker:tag2", engine.NewTable("", 0), true, false, false); err != nil { - t.Fatal(err) - } - - images = getAllImages(eng, t) - - nExpected = len(initialImages.Data[0].GetList("RepoTags")) + 2 - nActual = len(images.Data[0].GetList("RepoTags")) - if nExpected != nActual { - t.Errorf("Expected %d images, %d found", nExpected, nActual) - } - - if err := srv.DeleteImage("utest:5000/docker:tag3", engine.NewTable("", 0), true, false, false); err != nil { - t.Fatal(err) - } - - images = getAllImages(eng, t) - - nExpected = len(initialImages.Data[0].GetList("RepoTags")) + 1 - nActual = len(images.Data[0].GetList("RepoTags")) - - if err := srv.DeleteImage("utest:tag1", engine.NewTable("", 0), true, false, false); err != nil { - t.Fatal(err) - } - - images = getAllImages(eng, t) - - if images.Len() != initialImages.Len() { - t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len()) - } -} - func TestCreateRm(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() -- cgit v1.2.1 From ffebcb660f666e3a2a7be6b838ebd55f524d5b5d Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 7 Apr 2014 12:40:41 -0700 Subject: Move -o cli flag and DriverConfig from HostConfig Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runconfig/hostconfig.go | 2 -- runconfig/parse.go | 10 +--------- runtime/container.go | 6 +----- 3 files changed, 2 insertions(+), 16 deletions(-) diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index 9a92258644..55a308a5b8 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -14,7 +14,6 @@ type HostConfig struct { PortBindings nat.PortMap Links []string PublishAllPorts bool - DriverOptions map[string][]string } func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { @@ -25,7 +24,6 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) - job.GetenvJson("DriverOptions", &hostConfig.DriverOptions) if Binds := job.GetenvList("Binds"); Binds != nil { hostConfig.Binds = Binds } diff --git a/runconfig/parse.go b/runconfig/parse.go index c93ec26ed1..3ca326fca6 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -45,7 +45,6 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf flDnsSearch = opts.NewListOpts(opts.ValidateDomain) flVolumesFrom opts.ListOpts flLxcOpts opts.ListOpts - flDriverOpts opts.ListOpts flEnvFile opts.ListOpts flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)") @@ -79,8 +78,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers") cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom dns search domains") cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)") - cmd.Var(&flLxcOpts, []string{"#lxc-conf", "#-lxc-conf"}, "(lxc exec-driver only) Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") - cmd.Var(&flDriverOpts, []string{"o", "-opt"}, "Add custom driver options") + cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "(lxc exec-driver only) Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"") if err := cmd.Parse(args); err != nil { return nil, nil, cmd, err @@ -224,11 +222,6 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf WorkingDir: *flWorkingDir, } - driverOptions, err := parseDriverOpts(flDriverOpts) - if err != nil { - return nil, nil, cmd, err - } - hostConfig := &HostConfig{ Binds: binds, ContainerIDFile: *flContainerIDFile, @@ -237,7 +230,6 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf PortBindings: portBindings, Links: flLinks.GetAll(), PublishAllPorts: *flPublishAll, - DriverOptions: driverOptions, } if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { diff --git a/runtime/container.go b/runtime/container.go index bd4a6f2bea..a5a2f25c64 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -361,12 +361,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s func populateCommand(c *Container) { var ( en *execdriver.Network - driverConfig = c.hostConfig.DriverOptions - ) - - if driverConfig == nil { driverConfig = make(map[string][]string) - } + ) en = &execdriver.Network{ Mtu: c.runtime.config.Mtu, -- cgit v1.2.1 From b1e98e06dc62b0d25f98ea9a2fd94e41cc1d20e2 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 7 Apr 2014 13:29:24 -0700 Subject: Remove selinux build tag Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 2de5b34171..42438e3946 100644 --- a/Dockerfile +++ b/Dockerfile @@ -87,7 +87,7 @@ RUN git config --global user.email 'docker-dummy@example.com' VOLUME /var/lib/docker WORKDIR /go/src/github.com/dotcloud/docker -ENV DOCKER_BUILDTAGS apparmor selinux +ENV DOCKER_BUILDTAGS apparmor # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] -- cgit v1.2.1 From aaf018017c88a707b35115a9411e4069d9356748 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 7 Apr 2014 14:09:46 -0700 Subject: Add more label checks for selinux enabled Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- Dockerfile | 2 +- pkg/label/label_selinux.go | 36 ++++++++++++++++++++---------------- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/Dockerfile b/Dockerfile index 42438e3946..2de5b34171 100644 --- a/Dockerfile +++ b/Dockerfile @@ -87,7 +87,7 @@ RUN git config --global user.email 'docker-dummy@example.com' VOLUME /var/lib/docker WORKDIR /go/src/github.com/dotcloud/docker -ENV DOCKER_BUILDTAGS apparmor +ENV DOCKER_BUILDTAGS apparmor selinux # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] diff --git a/pkg/label/label_selinux.go b/pkg/label/label_selinux.go index d807b2b408..9f7463f79b 100644 --- a/pkg/label/label_selinux.go +++ b/pkg/label/label_selinux.go @@ -9,30 +9,31 @@ import ( ) func GenLabels(options string) (string, string, error) { - processLabel, mountLabel := selinux.GetLxcContexts() - if processLabel == "" { // SELinux is disabled + if !selinux.SelinuxEnabled() { return "", "", nil } - - var ( - err error - s = strings.Fields(options) - l = len(s) - ) - if l > 0 { - pcon := selinux.NewContext(processLabel) - for i := 0; i < l; i++ { - o := strings.Split(s[i], "=") - pcon[o[0]] = o[1] + var err error + processLabel, mountLabel := selinux.GetLxcContexts() + if processLabel != "" { + var ( + s = strings.Fields(options) + l = len(s) + ) + if l > 0 { + pcon := selinux.NewContext(processLabel) + for i := 0; i < l; i++ { + o := strings.Split(s[i], "=") + pcon[o[0]] = o[1] + } + processLabel = pcon.Get() + mountLabel, err = selinux.CopyLevel(processLabel, mountLabel) } - processLabel = pcon.Get() - mountLabel, err = selinux.CopyLevel(processLabel, mountLabel) } return processLabel, mountLabel, err } func FormatMountLabel(src string, mountLabel string) string { - if mountLabel != "" { + if selinux.SelinuxEnabled() && mountLabel != "" { switch src { case "": src = fmt.Sprintf("%s,context=%s", src, mountLabel) @@ -65,6 +66,9 @@ func SetFileLabel(path string, fileLabel string) error { } func GetPidCon(pid int) (string, error) { + if !selinux.SelinuxEnabled() { + return "", nil + } return selinux.Getpidcon(pid) } -- cgit v1.2.1 From 82f37b874ea17c5e0040f3e41dc761c88d576e33 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 7 Apr 2014 14:43:50 -0700 Subject: Ensure that selinux is disabled by default This also includes some portability changes so that the package can be imported with the top level runtime. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- daemonconfig/config.go | 2 ++ pkg/selinux/selinux.go | 16 ++++++---------- pkg/selinux/selinux_test.go | 5 +---- pkg/system/calls_linux.go | 4 ++++ pkg/system/unsupported.go | 4 ++++ runtime/runtime.go | 4 ++++ 6 files changed, 21 insertions(+), 14 deletions(-) diff --git a/daemonconfig/config.go b/daemonconfig/config.go index 1abb6f8b89..146916d79a 100644 --- a/daemonconfig/config.go +++ b/daemonconfig/config.go @@ -28,6 +28,7 @@ type Config struct { ExecDriver string Mtu int DisableNetwork bool + EnableSelinuxSupport bool } // ConfigFromJob creates and returns a new DaemonConfig object @@ -45,6 +46,7 @@ func ConfigFromJob(job *engine.Job) *Config { InterContainerCommunication: job.GetenvBool("InterContainerCommunication"), GraphDriver: job.Getenv("GraphDriver"), ExecDriver: job.Getenv("ExecDriver"), + EnableSelinuxSupport: false, // FIXME: hardcoded default to disable selinux for .10 release } if dns := job.GetenvList("Dns"); dns != nil { config.Dns = dns diff --git a/pkg/selinux/selinux.go b/pkg/selinux/selinux.go index 5362308617..d2d90b1b37 100644 --- a/pkg/selinux/selinux.go +++ b/pkg/selinux/selinux.go @@ -39,6 +39,11 @@ var ( type SELinuxContext map[string]string +// SetDisabled disables selinux support for the package +func SetDisabled() { + selinuxEnabled, selinuxEnabledChecked = false, true +} + func GetSelinuxMountPoint() string { if selinuxfs != "unknown" { return selinuxfs @@ -140,15 +145,6 @@ func Setfilecon(path string, scon string) error { return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0) } -func Getfilecon(path string) (string, error) { - var scon []byte - - cnt, err := syscall.Getxattr(path, xattrNameSelinux, scon) - scon = make([]byte, cnt) - cnt, err = syscall.Getxattr(path, xattrNameSelinux, scon) - return string(scon), err -} - func Setfscreatecon(scon string) error { return writeCon("/proc/self/attr/fscreate", scon) } @@ -188,7 +184,7 @@ func writeCon(name string, val string) error { } func Setexeccon(scon string) error { - return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), scon) + return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", system.Gettid()), scon) } func (c SELinuxContext) Get() string { diff --git a/pkg/selinux/selinux_test.go b/pkg/selinux/selinux_test.go index 6b59c1db11..181452ae75 100644 --- a/pkg/selinux/selinux_test.go +++ b/pkg/selinux/selinux_test.go @@ -12,9 +12,7 @@ func testSetfilecon(t *testing.T) { out, _ := os.OpenFile(tmp, os.O_WRONLY, 0) out.Close() err := selinux.Setfilecon(tmp, "system_u:object_r:bin_t:s0") - if err == nil { - t.Log(selinux.Getfilecon(tmp)) - } else { + if err != nil { t.Log("Setfilecon failed") t.Fatal(err) } @@ -41,7 +39,6 @@ func TestSELinux(t *testing.T) { pid := os.Getpid() t.Log("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023)) t.Log(selinux.Getcon()) - t.Log(selinux.Getfilecon("/etc/passwd")) err = selinux.Setfscreatecon("unconfined_u:unconfined_r:unconfined_t:s0") if err == nil { t.Log(selinux.Getfscreatecon()) diff --git a/pkg/system/calls_linux.go b/pkg/system/calls_linux.go index 43c00ed554..cc4727aaa2 100644 --- a/pkg/system/calls_linux.go +++ b/pkg/system/calls_linux.go @@ -143,3 +143,7 @@ func SetCloneFlags(cmd *exec.Cmd, flag uintptr) { } cmd.SysProcAttr.Cloneflags = flag } + +func Gettid() int { + return syscall.Gettid() +} diff --git a/pkg/system/unsupported.go b/pkg/system/unsupported.go index eb3ec7ee92..c52a1e5d00 100644 --- a/pkg/system/unsupported.go +++ b/pkg/system/unsupported.go @@ -13,3 +13,7 @@ func SetCloneFlags(cmd *exec.Cmd, flag uintptr) { func UsetCloseOnExec(fd uintptr) error { return ErrNotSupportedPlatform } + +func Gettid() int { + return 0 +} diff --git a/runtime/runtime.go b/runtime/runtime.go index d35e2d653a..864874c8e4 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -11,6 +11,7 @@ import ( "github.com/dotcloud/docker/image" "github.com/dotcloud/docker/pkg/graphdb" "github.com/dotcloud/docker/pkg/mount" + "github.com/dotcloud/docker/pkg/selinux" "github.com/dotcloud/docker/pkg/sysinfo" "github.com/dotcloud/docker/runconfig" "github.com/dotcloud/docker/runtime/execdriver" @@ -723,6 +724,9 @@ func NewRuntime(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, erro } func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) { + if !config.EnableSelinuxSupport { + selinux.SetDisabled() + } // Set the default driver graphdriver.DefaultDriver = config.GraphDriver -- cgit v1.2.1 From 028d44d12683b170704537c3435361ae8a4e74d8 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 7 Apr 2014 14:59:44 -0700 Subject: Remove and unexport selinux functions Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- pkg/selinux/selinux.go | 25 +++++++++---------------- pkg/selinux/selinux_test.go | 2 -- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/pkg/selinux/selinux.go b/pkg/selinux/selinux.go index d2d90b1b37..edabc4f7dd 100644 --- a/pkg/selinux/selinux.go +++ b/pkg/selinux/selinux.go @@ -44,7 +44,7 @@ func SetDisabled() { selinuxEnabled, selinuxEnabledChecked = false, true } -func GetSelinuxMountPoint() string { +func getSelinuxMountPoint() string { if selinuxfs != "unknown" { return selinuxfs } @@ -75,15 +75,15 @@ func SelinuxEnabled() bool { return selinuxEnabled } selinuxEnabledChecked = true - if fs := GetSelinuxMountPoint(); fs != "" { - if con, _ := Getcon(); con != "kernel" { + if fs := getSelinuxMountPoint(); fs != "" { + if con, _ := getcon(); con != "kernel" { selinuxEnabled = true } } return selinuxEnabled } -func ReadConfig(target string) (value string) { +func readConfig(target string) (value string) { var ( val, key string bufin *bufio.Reader @@ -124,8 +124,8 @@ func ReadConfig(target string) (value string) { return "" } -func GetSELinuxPolicyRoot() string { - return selinuxDir + ReadConfig(selinuxTypeTag) +func getSELinuxPolicyRoot() string { + return selinuxDir + readConfig(selinuxTypeTag) } func readCon(name string) (string, error) { @@ -153,7 +153,7 @@ func Getfscreatecon() (string, error) { return readCon("/proc/self/attr/fscreate") } -func Getcon() (string, error) { +func getcon() (string, error) { return readCon("/proc/self/attr/current") } @@ -220,7 +220,7 @@ func SelinuxGetEnforce() int { } func SelinuxGetEnforceMode() int { - switch ReadConfig(selinuxTag) { + switch readConfig(selinuxTag) { case "enforcing": return Enforcing case "permissive": @@ -292,13 +292,6 @@ func uniqMcs(catRange uint32) string { return mcs } -func FreeContext(con string) { - if con != "" { - scon := NewContext(con) - mcsDelete(scon["level"]) - } -} - func GetLxcContexts() (processLabel string, fileLabel string) { var ( val, key string @@ -308,7 +301,7 @@ func GetLxcContexts() (processLabel string, fileLabel string) { if !SelinuxEnabled() { return "", "" } - lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", GetSELinuxPolicyRoot()) + lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", getSELinuxPolicyRoot()) in, err := os.Open(lxcPath) if err != nil { return "", "" diff --git a/pkg/selinux/selinux_test.go b/pkg/selinux/selinux_test.go index 181452ae75..fde6ab147d 100644 --- a/pkg/selinux/selinux_test.go +++ b/pkg/selinux/selinux_test.go @@ -38,7 +38,6 @@ func TestSELinux(t *testing.T) { t.Log("getenforcemode ", selinux.SelinuxGetEnforceMode()) pid := os.Getpid() t.Log("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023)) - t.Log(selinux.Getcon()) err = selinux.Setfscreatecon("unconfined_u:unconfined_r:unconfined_t:s0") if err == nil { t.Log(selinux.Getfscreatecon()) @@ -54,7 +53,6 @@ func TestSELinux(t *testing.T) { t.Fatal(err) } t.Log(selinux.Getpidcon(1)) - t.Log(selinux.GetSelinuxMountPoint()) } else { t.Log("Disabled") } -- cgit v1.2.1 From da8231a26bc4532ded49f93a82e731694ee6587c Mon Sep 17 00:00:00 2001 From: William Henry Date: Fri, 4 Apr 2014 11:57:58 -0600 Subject: Added man pages for several docker commands. Docker-DCO-1.1-Signed-off-by: William Henry (github: ipbabble) new file: contrib/man/man1/docker-attach.1 new file: contrib/man/man1/docker-build.1 new file: contrib/man/man1/docker-images.1 new file: contrib/man/man1/docker-info.1 new file: contrib/man/man1/docker-inspect.1 new file: contrib/man/man1/docker-rm.1 new file: contrib/man/man1/docker-rmi.1 new file: contrib/man/man1/docker-run.1 new file: contrib/man/man1/docker-tag.1 new file: contrib/man/man1/docker.1 --- contrib/man/man1/docker-attach.1 | 56 ++++++++ contrib/man/man1/docker-build.1 | 65 +++++++++ contrib/man/man1/docker-images.1 | 84 ++++++++++++ contrib/man/man1/docker-info.1 | 39 ++++++ contrib/man/man1/docker-inspect.1 | 237 ++++++++++++++++++++++++++++++++ contrib/man/man1/docker-rm.1 | 45 +++++++ contrib/man/man1/docker-rmi.1 | 29 ++++ contrib/man/man1/docker-run.1 | 277 ++++++++++++++++++++++++++++++++++++++ contrib/man/man1/docker-tag.1 | 49 +++++++ contrib/man/man1/docker.1 | 172 +++++++++++++++++++++++ 10 files changed, 1053 insertions(+) create mode 100644 contrib/man/man1/docker-attach.1 create mode 100644 contrib/man/man1/docker-build.1 create mode 100644 contrib/man/man1/docker-images.1 create mode 100644 contrib/man/man1/docker-info.1 create mode 100644 contrib/man/man1/docker-inspect.1 create mode 100644 contrib/man/man1/docker-rm.1 create mode 100644 contrib/man/man1/docker-rmi.1 create mode 100644 contrib/man/man1/docker-run.1 create mode 100644 contrib/man/man1/docker-tag.1 create mode 100644 contrib/man/man1/docker.1 diff --git a/contrib/man/man1/docker-attach.1 b/contrib/man/man1/docker-attach.1 new file mode 100644 index 0000000000..f0879d7507 --- /dev/null +++ b/contrib/man/man1/docker-attach.1 @@ -0,0 +1,56 @@ +.\" Process this file with +.\" nroff -man -Tascii docker-attach.1 +.\" +.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker" +.SH NAME +docker-attach \- Attach to a running container +.SH SYNOPSIS +.B docker attach +\fB--no-stdin\fR[=\fIfalse\fR] +\fB--sig-proxy\fR[=\fItrue\fR] +container +.SH DESCRIPTION +If you \fBdocker run\fR a container in detached mode (\fB-d\fR), you can reattach to the detached container with \fBdocker attach\fR using the container's ID or name. +.sp +You can detach from the container again (and leave it running) with CTRL-c (for a quiet exit) or CTRL-\ to get a stacktrace of the Docker client when it quits. When you detach from the container the exit code will be returned to the client. +.SH "OPTIONS" +.TP +.B --no-stdin=\fItrue\fR|\fIfalse\fR: +When set to true, do not attach to stdin. The default is \fIfalse\fR. +.TP +.B --sig-proxy=\fItrue\fR|\fIfalse\fR: +When set to true, proxify all received signal to the process (even in non-tty mode). The default is \fItrue\fR. +.sp +.SH EXAMPLES +.sp +.PP +.B Attaching to a container +.TP +In this example the top command is run inside a container, from an image called fedora, in detached mode. The ID from the container is passed into the \fBdocker attach\fR command: +.sp +.nf +.RS +# ID=$(sudo docker run -d fedora /usr/bin/top -b) +# sudo docker attach $ID +top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 +Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie +Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st +Mem: 373572k total, 355560k used, 18012k free, 27872k buffers +Swap: 786428k total, 0k used, 786428k free, 221740k cached + +PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top + +top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 +Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie +Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st +Mem: 373572k total, 355244k used, 18328k free, 27872k buffers +Swap: 786428k total, 0k used, 786428k free, 221776k cached + +PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top +.RE +.fi +.sp +.SH HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work. diff --git a/contrib/man/man1/docker-build.1 b/contrib/man/man1/docker-build.1 new file mode 100644 index 0000000000..6546b7be2a --- /dev/null +++ b/contrib/man/man1/docker-build.1 @@ -0,0 +1,65 @@ +.\" Process this file with +.\" nroff -man -Tascii docker-build.1 +.\" +.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker" +.SH NAME +docker-build \- Build a container image from a Dockerfile source at PATH +.SH SYNOPSIS +.B docker build +[\fB--no-cache\fR[=\fIfalse\fR] +[\fB-q\fR|\fB--quiet\fR[=\fIfalse\fR] +[\fB--rm\fR[=\fitrue\fR]] +[\fB-t\fR|\fB--tag\fR=\fItag\fR] +PATH | URL | - +.SH DESCRIPTION +This will read the Dockerfile from the directory specified in \fBPATH\fR. It also sends any other files and directories found in the current directory to the Docker daemon. The contents of this directory would be used by ADD command found within the Dockerfile. +Warning, this will send a lot of data to the Docker daemon if the current directory contains a lot of data. +If the absolute path is provided instead of ‘.’, only the files and directories required by the ADD commands from the Dockerfile will be added to the context and transferred to the Docker daemon. +.sp +When a single Dockerfile is given as URL, then no context is set. When a Git repository is set as URL, the repository is used as context. +.SH "OPTIONS" +.TP +.B -q, --quiet=\fItrue\fR|\fIfalse\fR: +When set to true, suppress verbose build output. Default is \fIfalse\fR. +.TP +.B --rm=\fItrue\fr|\fIfalse\fR: +When true, remove intermediate containers that are created during the build process. The default is true. +.TP +.B -t, --tag=\fItag\fR: +Tag to be applied to the resulting image on successful completion of the build. +.TP +.B --no-cache=\fItrue\fR|\fIfalse\fR +When set to true, do not use a cache when building the image. The default is \fIfalse\fR. +.sp +.SH EXAMPLES +.sp +.sp +.B Building an image from current directory +.TP +USing a Dockerfile, Docker images are built using the build command: +.sp +.RS +docker build . +.RE +.sp +If, for some reasone, you do not what to remove the intermediate containers created during the build you must set--rm=false. +.sp +.RS +docker build --rm=false . +.sp +.RE +.sp +A good practice is to make a subdirectory with a related name and create the Dockerfile in that directory. E.g. a directory called mongo may contain a Dockerfile for a MongoDB image, or a directory called httpd may contain an Dockerfile for an Apache web server. +.sp +It is also good practice to add the files required for the image to the subdirectory. These files will be then specified with the `ADD` instruction in the Dockerfile. Note: if you include a tar file, which is good practice, then Docker will automatically extract the contents of the tar file specified in the `ADD` instruction into the specified target. +.sp +.B Building an image container using a URL +.TP +This will clone the Github repository and use it as context. The Dockerfile at the root of the repository is used as Dockerfile. This only works if the Github repository is a dedicated repository. Note that you can specify an arbitrary Git repository by using the ‘git://’ schema. +.sp +.RS +docker build github.com/scollier/Fedora-Dockerfiles/tree/master/apache +.RE +.sp +.SH HISTORY +March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work. diff --git a/contrib/man/man1/docker-images.1 b/contrib/man/man1/docker-images.1 new file mode 100644 index 0000000000..e540ba2b79 --- /dev/null +++ b/contrib/man/man1/docker-images.1 @@ -0,0 +1,84 @@ +.\" Process this file with +.\" nroff -man -Tascii docker-images.1 +.\" +.TH "DOCKER" "1" "April 2014" "0.1" "Docker" +.SH NAME +docker-images \- List the images in the local repository +.SH SYNOPSIS +.B docker images +[\fB-a\fR|\fB--all\fR=\fIfalse\fR] +[\fB--no-trunc\fR[=\fIfalse\fR] +[\fB-q\fR|\fB--quiet\fR[=\fIfalse\fR] +[\fB-t\fR|\fB--tree\fR=\fIfalse\fR] +[\fB-v\fR|\fB--viz\fR=\fIfalse\fR] +[NAME] +.SH DESCRIPTION +This command lists the images stored in the local Docker repository. +.sp +By default, intermediate images, used during builds, are not listed. Some of the output, e.g. image ID, is truncated, for space reasons. However the truncated image ID, and often the first few characters, are enough to be used in other Docker commands that use the image ID. The output includes repository, tag, image ID, date created and the virtual size. +.sp +The title REPOSITORY for the first title may seem confusing. It is essentially the image name. However, because you can tag a specific image, and multiple tags (image instances) can be associated with a single name, the name is really a repository for all tagged images of the same name. +.SH "OPTIONS" +.TP +.B -a, --all=\fItrue\fR|\fIfalse\fR: +When set to true, also include all intermediate images in the list. The default is false. +.TP +.B --no-trunc=\fItrue\fR|\fIfalse\fR: +When set to true, list the full image ID and not the truncated ID. The default is false. +.TP +.B -q, --quiet=\fItrue\fR|\fIfalse\fR: +When set to true, list the complete image ID as part of the output. The default is false. +.TP +.B -t, --tree=\fItrue\fR|\fIfalse\fR: +When set to true, list the images in a tree dependency tree (hierarchy) format. The default is false. +.TP +.B -v, --viz=\fItrue\fR|\fIfalse\fR +When set to true, list the graph in graphviz format. The default is \fIfalse\fR. +.sp +.SH EXAMPLES +.sp +.B Listing the images +.TP +To list the images in a local repository (not the registry) run: +.sp +.RS +docker images +.RE +.sp +The list will contain the image repository name, a tag for the image, and an image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, IMAGE ID, CREATED, and VIRTUAL SIZE. +.sp +To get a verbose list of images which contains all the intermediate images used in builds use \fB-a\fR: +.sp +.RS +docker images -a +.RE +.sp +.B List images dependency tree hierarchy +.TP +To list the images in the local repository (not the registry) in a dependency tree format then use the \fB-t\fR|\fB--tree=true\fR option. +.sp +.RS +docker images -t +.RE +.sp +This displays a staggered hierarchy tree where the less indented image is the oldest with dependent image layers branching inward (to the right) on subsequent lines. The newest or top level image layer is listed last in any tree branch. +.sp +.B List images in GraphViz format +.TP +To display the list in a format consumable by a GraphViz tools run with \fB-v\fR|\fB--viz=true\fR. For example to produce a .png graph file of the hierarchy use: +.sp +.RS +docker images --viz | dot -Tpng -o docker.png +.sp +.RE +.sp +.B Listing only the shortened image IDs +.TP +Listing just the shortened image IDs. This can be useful for some automated tools. +.sp +.RS +docker images -q +.RE +.sp +.SH HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work. diff --git a/contrib/man/man1/docker-info.1 b/contrib/man/man1/docker-info.1 new file mode 100644 index 0000000000..dca2600af0 --- /dev/null +++ b/contrib/man/man1/docker-info.1 @@ -0,0 +1,39 @@ +.\" Process this file with +.\" nroff -man -Tascii docker-info.1 +.\" +.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker" +.SH NAME +docker-info \- Display system wide information +.SH SYNOPSIS +.B docker info +.SH DESCRIPTION +This command displays system wide information regarding the Docker installation. Information displayed includes the number of containers and images, pool name, data file, metadata file, data space used, total data space, metadata space used, total metadata space, execution driver, and the kernel version. +.sp +The data file is where the images are stored and the metadata file is where the meta data regarding those images are stored. When run for the first time Docker allocates a certain amount of data space and meta data space from the space available on the volume where /var/lib/docker is mounted. +.SH "OPTIONS" +There are no available options. +.sp +.SH EXAMPLES +.sp +.B Display Docker system information +.TP +Here is a sample output: +.sp +.RS + # docker info + Containers: 18 + Images: 95 + Storage Driver: devicemapper + Pool Name: docker-8:1-170408448-pool + Data file: /var/lib/docker/devicemapper/devicemapper/data + Metadata file: /var/lib/docker/devicemapper/devicemapper/metadata + Data Space Used: 9946.3 Mb + Data Space Total: 102400.0 Mb + Metadata Space Used: 9.9 Mb + Metadata Space Total: 2048.0 Mb + Execution Driver: native-0.1 + Kernel Version: 3.10.0-116.el7.x86_64 +.RE +.sp +.SH HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work. diff --git a/contrib/man/man1/docker-inspect.1 b/contrib/man/man1/docker-inspect.1 new file mode 100644 index 0000000000..225125e564 --- /dev/null +++ b/contrib/man/man1/docker-inspect.1 @@ -0,0 +1,237 @@ +.\" Process this file with +.\" nroff -man -Tascii docker-inspect.1 +.\" +.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker" +.SH NAME +docker-inspect \- Return low-level information on a container/image +.SH SYNOPSIS +.B docker inspect +[\fB-f\fR|\fB--format\fR="" +CONTAINER|IMAGE [CONTAINER|IMAGE...] +.SH DESCRIPTION +This displays all the information available in Docker for a given container or image. By default, this will render all results in a JSON array. If a format is specified, the given template will be executed for each result. +.SH "OPTIONS" +.TP +.B -f, --format="": +The text/template package of Go describes all the details of the format. See examples section +.SH EXAMPLES +.sp +.PP +.B Getting information on a container +.TP +To get information on a container use it's ID or instance name +.sp +.fi +.RS +#docker inspect 1eb5fabf5a03 + +[{ + "ID": "1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b", + "Created": "2014-04-04T21:33:52.02361335Z", + "Path": "/usr/sbin/nginx", + "Args": [], + "Config": { + "Hostname": "1eb5fabf5a03", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "ExposedPorts": { + "80/tcp": {} + }, + "Tty": true, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "/usr/sbin/nginx" + ], + "Dns": null, + "DnsSearch": null, + "Image": "summit/nginx", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "OnBuild": null, + "Context": { + "mount_label": "system_u:object_r:svirt_sandbox_file_t:s0:c0,c650", + "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c0,c650" + } + }, + "State": { + "Running": true, + "Pid": 858, + "ExitCode": 0, + "StartedAt": "2014-04-04T21:33:54.16259207Z", + "FinishedAt": "0001-01-01T00:00:00Z", + "Ghost": false + }, + "Image": "df53773a4390e25936f9fd3739e0c0e60a62d024ea7b669282b27e65ae8458e6", + "NetworkSettings": { + "IPAddress": "172.17.0.2", + "IPPrefixLen": 16, + "Gateway": "172.17.42.1", + "Bridge": "docker0", + "PortMapping": null, + "Ports": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "80" + } + ] + } + }, + "ResolvConfPath": "/etc/resolv.conf", + "HostnamePath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hostname", + "HostsPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hosts", + "Name": "/ecstatic_ptolemy", + "Driver": "devicemapper", + "ExecDriver": "native-0.1", + "Volumes": {}, + "VolumesRW": {}, + "HostConfig": { + "Binds": null, + "ContainerIDFile": "", + "LxcConf": [], + "Privileged": false, + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "80" + } + ] + }, + "Links": null, + "PublishAllPorts": false, + "DriverOptions": { + "lxc": null + }, + "CliAddress": "" + } +.RE +.nf +.sp +.B Getting the IP address of a container instance +.TP +To get the IP address of a container use: +.sp +.fi +.RS +# docker inspect --format='{{.NetworkSettings.IPAddress}}' 1eb5fabf5a03 + +172.17.0.2 +.RE +.nf +.sp +.B Listing all port bindings +.TP +One can loop over arrays and maps in the results to produce simple text output: +.sp +.fi +.RS +# docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' 1eb5fabf5a03 + +80/tcp -> 80 +.RE +.nf +.sp +.B Getting information on an image +.TP +Use an image's ID or name (e.g. repository/name[:tag]) to get information on it. +.sp +.fi +.RS +docker inspect 58394af37342 +[{ + "id": "58394af373423902a1b97f209a31e3777932d9321ef10e64feaaa7b4df609cf9", + "parent": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", + "created": "2014-02-03T16:10:40.500814677Z", + "container": "f718f19a28a5147da49313c54620306243734bafa63c76942ef6f8c4b4113bc5", + "container_config": { + "Hostname": "88807319f25e", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": [ + "/bin/sh", + "-c", + "#(nop) ADD fedora-20-medium.tar.xz in /" + ], + "Dns": null, + "DnsSearch": null, + "Image": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "OnBuild": null, + "Context": null + }, + "docker_version": "0.6.3", + "author": "Lokesh Mandvekar \u003clsm5@redhat.com\u003e - ./buildcontainers.sh", + "config": { + "Hostname": "88807319f25e", + "Domainname": "", + "User": "", + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 0, + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "PortSpecs": null, + "ExposedPorts": null, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": null, + "Dns": null, + "DnsSearch": null, + "Image": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", + "Volumes": null, + "VolumesFrom": "", + "WorkingDir": "", + "Entrypoint": null, + "NetworkDisabled": false, + "OnBuild": null, + "Context": null + }, + "architecture": "x86_64", + "Size": 385520098 +}] +.RE +.nf +.sp +.SH HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work. diff --git a/contrib/man/man1/docker-rm.1 b/contrib/man/man1/docker-rm.1 new file mode 100644 index 0000000000..b06e014d3b --- /dev/null +++ b/contrib/man/man1/docker-rm.1 @@ -0,0 +1,45 @@ +.\" Process this file with +.\" nroff -man -Tascii docker-rm.1 +.\" +.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker" +.SH NAME +docker-rm \- Remove one or more containers. +.SH SYNOPSIS +.B docker rm +[\fB-f\fR|\fB--force\fR[=\fIfalse\fR] +[\fB-l\fR|\fB--link\fR[=\fIfalse\fR] +[\fB-v\fR|\fB--volumes\fR[=\fIfalse\fR] +CONTAINER [CONTAINER...] +.SH DESCRIPTION +This will remove one or more containers from the host node. The container name or ID can be used. This does not remove images. You cannot remove a running container unless you use the \fB-f\fR option. To see all containers on a host use the \fBdocker ps -a\fR command. +.SH "OPTIONS" +.TP +.B -f, --force=\fItrue\fR|\fIfalse\fR: +When set to true, force the removal of the container. The default is \fIfalse\fR. +.TP +.B -l, --link=\fItrue\fR|\fIfalse\fR: +When set to true, remove the specified link and not the underlying container. The default is \fIfalse\fR. +.TP +.B -v, --volumes=\fItrue\fR|\fIfalse\fR: +When set to true, remove the volumes associated to the container. The default is \fIfalse\fR. +.SH EXAMPLES +.sp +.PP +.B Removing a container using its ID +.TP +To remove a container using its ID, find either from a \fBdocker ps -a\fR command, or use the ID returned from the \fBdocker run\fR command, or retrieve it from a file used to store it using the \fBdocker run --cidfile\fR: +.sp +.RS +docker rm abebf7571666 +.RE +.sp +.B Removing a container using the container name: +.TP +The name of the container can be found using the \fBdocker ps -a\fR command. The use that name as follows: +.sp +.RS +docker rm hopeful_morse +.RE +.sp +.SH HISTORY +March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work. diff --git a/contrib/man/man1/docker-rmi.1 b/contrib/man/man1/docker-rmi.1 new file mode 100644 index 0000000000..6f33446ecd --- /dev/null +++ b/contrib/man/man1/docker-rmi.1 @@ -0,0 +1,29 @@ +.\" Process this file with +.\" nroff -man -Tascii docker-run.1 +.\" +.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker" +.SH NAME +docker-rmi \- Remove one or more images. +.SH SYNOPSIS +.B docker rmi +[\fB-f\fR|\fB--force\fR[=\fIfalse\fR] +IMAGE [IMAGE...] +.SH DESCRIPTION +This will remove one or more images from the host node. This does not remove images from a registry. You cannot remove an image of a running container unless you use the \fB-f\fR option. To see all images on a host use the \fBdocker images\fR command. +.SH "OPTIONS" +.TP +.B -f, --force=\fItrue\fR|\fIfalse\fR: +When set to true, force the removal of the image. The default is \fIfalse\fR. +.SH EXAMPLES +.sp +.PP +.B Removing an image +.TP +Here is an example of removing and image: +.sp +.RS +docker rmi fedora/httpd +.RE +.sp +.SH HISTORY +March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work. diff --git a/contrib/man/man1/docker-run.1 b/contrib/man/man1/docker-run.1 new file mode 100644 index 0000000000..fd449374e3 --- /dev/null +++ b/contrib/man/man1/docker-run.1 @@ -0,0 +1,277 @@ +.\" Process this file with +.\" nroff -man -Tascii docker-run.1 +.\" +.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker" +.SH NAME +docker-run \- Run a process in an isolated container +.SH SYNOPSIS +.B docker run +[\fB-a\fR|\fB--attach\fR[=]] [\fB-c\fR|\fB--cpu-shares\fR[=0] [\fB-m\fR|\fB--memory\fR=\fImemory-limit\fR] +[\fB--cidfile\fR=\fIfile\fR] [\fB-d\fR|\fB--detach\fR[=\fIfalse\fR]] [\fB--dns\fR=\fIIP-address\fR] +[\fB--name\fR=\fIname\fR] [\fB-u\fR|\fB--user\fR=\fIusername\fR|\fIuid\fR] +[\fB--link\fR=\fIname\fR:\fIalias\fR] +[\fB-e\fR|\fB--env\fR=\fIenvironment\fR] [\fB--entrypoint\fR=\fIcommand\fR] +[\fB--expose\fR=\fIport\fR] [\fB-P\fR|\fB--publish-all\fR[=\fIfalse\fR]] +[\fB-p\fR|\fB--publish\fR=\fIport-mappping\fR] [\fB-h\fR|\fB--hostname\fR=\fIhostname\fR] +[\fB--rm\fR[=\fIfalse\fR]] [\fB--priviledged\fR[=\fIfalse\fR] +[\fB-i\fR|\fB--interactive\fR[=\fIfalse\fR] +[\fB-t\fR|\fB--tty\fR[=\fIfalse\fR]] [\fB--lxc-conf\fR=\fIoptions\fR] +[\fB-n\fR|\fB--networking\fR[=\fItrue\fR]] +[\fB-v\fR|\fB--volume\fR=\fIvolume\fR] [\fB--volumes-from\fR=\fIcontainer-id\fR] +[\fB-w\fR|\fB--workdir\fR=\fIdirectory\fR] [\fB--sig-proxy\fR[=\fItrue\fR]] +IMAGE [COMMAND] [ARG...] +.SH DESCRIPTION +.PP +Run a process in a new container. \fBdocker run\fR starts a process with its own file system, its own networking, and its own isolated process tree. The \fIIMAGE\fR which starts the process may define defaults related to the process that will be run in the container, the networking to expose, and more, but \fBdocker run\fR gives final control to the operator or administrator who starts the container from the image. For that reason \fBdocker run\fR has more options than any other docker command. + +If the \fIIMAGE\fR is not already loaded then \fBdocker run\fR will pull the \fIIMAGE\fR, and all image dependencies, from the repository in the same way running \fBdocker pull\fR \fIIMAGE\fR, before it starts the container from that image. + + +.SH "OPTIONS" + +.TP +.B -a, --attach=\fIstdin\fR|\fIstdout\fR|\fIstderr\fR: +Attach to stdin, stdout or stderr. In foreground mode (the default when -d is not specified), \fBdocker run\fR can start the process in the container and attach the console to the process’s standard input, output, and standard error. It can even pretend to be a TTY (this is what most commandline executables expect) and pass along signals. The \fB-a\fR option can be set for each of stdin, stdout, and stderr. + +.TP +.B -c, --cpu-shares=0: +CPU shares in relative weight. You can increase the priority of a container with the -c option. By default, all containers run at the same priority and get the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via \fBdocker run\fR. + +.TP +.B -m, --memory=\fImemory-limit\fR: +Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical RAM. The memory limit format: , where unit = b, k, m or g. + +.TP +.B --cidfile=\fIfile\fR: +Write the container ID to the file specified. + +.TP +.B -d, --detach=\fItrue\fR|\fIfalse\fR: +Detached mode. This runs the container in the background. It outputs the new container's id and and error messages. At any time you can run \fBdocker ps\fR in the other shell to view a list of the running containers. You can reattach to a detached container with \fBdocker attach\fR. If you choose to run a container in the detached mode, then you cannot use the -rm option. + +.TP +.B --dns=\fIIP-address\fR: +Set custom DNS servers. This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (eg. 127.0.0.1). When this is the case the \fB-dns\fR flags is necessary for every run. + +.TP +.B -e, --env=\fIenvironment\fR: +Set environment variables. This option allows you to specify arbitrary environment variables that are available for the process that will be launched inside of the container. + +.TP +.B --entrypoint=\ficommand\fR: +This option allows you to overwrite the default entrypoint of the image that is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND because it specifies what executable to run when the container starts, but it is (purposely) more difficult to override. The ENTRYPOINT gives a container its default nature or behavior, so that when you set an ENTRYPOINT you can run the container as if it were that binary, complete with default options, and you can pass in more options via the COMMAND. But, sometimes an operator may want to run something else inside the container, so you can override the default ENTRYPOINT at runtime by using a \fB--entrypoint\fR and a string to specify the new ENTRYPOINT. + +.TP +.B --expose=\fIport\fR: +Expose a port from the container without publishing it to your host. A containers port can be exposed to other containers in three ways: 1) The developer can expose the port using the EXPOSE parameter of the Dockerfile, 2) the operator can use the \fB--expose\fR option with \fBdocker run\fR, or 3) the container can be started with the \fB--link\fR. + +.TP +.B -P, --publish-all=\fItrue\fR|\fIfalse\fR: +When set to true publish all exposed ports to the host interfaces. The default is false. If the operator uses -P (or -p) then Docker will make the exposed port accessible on the host and the ports will be available to any client that can reach the host. To find the map between the host ports and the exposed ports, use \fBdocker port\fR. + +.TP +.B -p, --publish=[]: +Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping) + +.TP +.B -h , --hostname=\fIhostname\fR: +Sets the container host name that is available inside the container. + +.TP +.B -i , --interactive=\fItrue\fR|\fIfalse\fR: +When set to true, keep stdin open even if not attached. The default is false. + +.TP +.B --link=\fIname\fR:\fIalias\fR: +Add link to another container. The format is name:alias. If the operator uses \fB--link\fR when starting the new client container, then the client container can access the exposed port via a private networking interface. Docker will set some environment variables in the client container to help indicate which interface and port to use. + +.TP +.B -n, --networking=\fItrue\fR|\fIfalse\fR: +By default, all containers have networking enabled (true) and can make outgoing connections. The operator can disable networking with \fB--networking\fR to false. This disables all incoming and outgoing networking. In cases like this, I/O can only be performed through files or by using STDIN/STDOUT. + +Also by default, the container will use the same DNS servers as the host. but you canThe operator may override this with \fB-dns\fR. + +.TP +.B --name=\fIname\fR: +Assign a name to the container. The operator can identify a container in three ways: +.sp +.nf +UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”) +UUID short identifier (“f78375b1c487”) +Name (“jonah”) +.fi +.sp +The UUID identifiers come from the Docker daemon, and if a name is not assigned to the container with \fB--name\fR then the daemon will also generate a random string name. The name is useful when defining links (see \fB--link\fR) (or any other place you need to identify a container). This works for both background and foreground Docker containers. + +.TP +.B --privileged=\fItrue\fR|\fIfalse\fR: +Give extended privileges to this container. By default, Docker containers are “unprivileged” (=false) and cannot, for example, run a Docker daemon inside the Docker container. This is because by default a container is not allowed to access any devices. A “privileged” container is given access to all devices. + +When the operator executes \fBdocker run -privileged\fR, Docker will enable access to all devices on the host as well as set some configuration in AppArmor (\fB???\fR) to allow the container nearly all the same access to the host as processes running outside of a container on the host. + +.TP +.B --rm=\fItrue\fR|\fIfalse\fR: +If set to \fItrue\fR the container is automatically removed when it exits. The default is \fIfalse\fR. This option is incompatible with \fB-d\fR. + +.TP +.B --sig-proxy=\fItrue\fR|\fIfalse\fR: +When set to true, proxify all received signals to the process (even in non-tty mode). The default is true. + +.TP +.B -t, --tty=\fItrue\fR|\fIfalse\fR: +When set to true Docker can allocate a pseudo-tty and attach to the standard input of any container. This can be used, for example, to run a throwaway interactive shell. The default is value is false. + +.TP +.B -u, --user=\fIusername\fR,\fRuid\fR: +Set a username or UID for the container. + +.TP +.B -v, --volume=\fIvolume\fR: +Bind mount a volume to the container. The \fB-v\fR option can be used one or more times to add one or more mounts to a container. These mounts can then be used in other containers using the \fB--volumes-from\fR option. See examples. + +.TP +.B --volumes-from=\fIcontainer-id\fR: +Will mount volumes from the specified container identified by container-id. Once a volume is mounted in a one container it can be shared with other containers using the \fB--volumes-from\fR option when running those other containers. The volumes can be shared even if the original container with the mount is not running. + +.TP +.B -w, --workdir=\fIdirectory\fR: +Working directory inside the container. The default working directory for running binaries within a container is the root directory (/). The developer can set a different default with the Dockerfile WORKDIR instruction. The operator can override the working directory by using the \fB-w\fR option. + +.TP +.B IMAGE: +The image name or ID. + +.TP +.B COMMAND: +The command or program to run inside the image. + +.TP +.B ARG: +The arguments for the command to be run in the container. + +.SH EXAMPLES +.sp +.sp +.B Exposing log messages from the container to the host's log +.TP +If you want messages that are logged in your container to show up in the host's syslog/journal then you should bind mount the /var/log directory as follows. +.sp +.RS +docker run -v /dev/log:/dev/log -i -t fedora /bin/bash +.RE +.sp +From inside the container you can test this by sending a message to the log. +.sp +.RS +logger "Hello from my container" +.sp +.RE +Then exit and check the journal. +.RS +.sp +exit +.sp +journalctl -b | grep hello +.RE +.sp +This should list the message sent to logger. +.sp +.B Attaching to one or more from STDIN, STDOUT, STDERR +.TP +If you do not specify -a then Docker will attach everything (stdin,stdout,stderr). You can specify to which of the three standard streams (stdin, stdout, stderr) you’d like to connect instead, as in: +.sp +.RS +docker run -a stdin -a stdout -i -t fedora /bin/bash +.RE +.sp +.B Linking Containers +.TP +The link feature allows multiple containers to communicate with each other. For example, a container whose Dockerfile has exposed port 80 can be run and named as follows: +.sp +.RS +docker run --name=link-test -d -i -t fedora/httpd +.RE +.sp +.TP +A second container, in this case called linker, can communicate with the httpd container, named link-test, by running with the \fB--link=:\fR +.sp +.RS +docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash +.RE +.sp +.TP +Now the container linker is linked to container link-test with the alias lt. Running the \fBenv\fR command in the linker container shows environment variables with the LT (alias) context (\fBLT_\fR) +.sp +.nf +.RS +# env +HOSTNAME=668231cb0978 +TERM=xterm +LT_PORT_80_TCP=tcp://172.17.0.3:80 +LT_PORT_80_TCP_PORT=80 +LT_PORT_80_TCP_PROTO=tcp +LT_PORT=tcp://172.17.0.3:80 +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +PWD=/ +LT_NAME=/linker/lt +SHLVL=1 +HOME=/ +LT_PORT_80_TCP_ADDR=172.17.0.3 +_=/usr/bin/env +.RE +.fi +.sp +.TP +When linking two containers Docker will use the exposed ports of the container to create a secure tunnel for the parent to access. +.TP +.sp +.B Mapping Ports for External Usage +.TP +The exposed port of an application can be mapped to a host port using the \fB-p\fR flag. For example a httpd port 80 can be mapped to the host port 8080 using the following: +.sp +.RS +docker run -p 8080:80 -d -i -t fedora/httpd +.RE +.sp +.TP +.B Creating and Mounting a Data Volume Container +.TP +Many applications require the sharing of persistent data across several containers. Docker allows you to create a Data Volume Container that other containers can mount from. For example, create a named container that contains directories /var/volume1 and /tmp/volume2. The image will need to contain these directories so a couple of RUN mkdir instructions might be required for you fedora-data image: +.sp +.RS +docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true +.sp +docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash +.RE +.sp +.TP +Multiple -volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data: +.sp +.RS +docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash +.RE +.TP +.sp +.B Mounting External Volumes +.TP +To mount a host directory as a container volume, specify the absolute path to the directory and the absolute path for the container directory separated by a colon: +.sp +.RS +docker run -v /var/db:/data1 -i -t fedora bash +.RE +.sp +.TP +When using SELinux, be aware that the host has no knowledge of container SELinux policy. Therefore, in the above example, if SELinux policy is enforced, the /var/db directory is not writable to the container. A "Permission Denied" message will occur and an avc: message in the host's syslog. +.sp +.TP +To work around this, at time of writing this man page, the following command needs to be run in order for the proper SELinux policy type label to be attached to the host directory: +.sp +.RS +chcon -Rt svirt_sandbox_file_t /var/db +.RE +.sp +.TP +Now, writing to the /data1 volume in the container will be allowed and the changes will also be reflected on the host in /var/db. +.sp +.SH HISTORY +March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work. diff --git a/contrib/man/man1/docker-tag.1 b/contrib/man/man1/docker-tag.1 new file mode 100644 index 0000000000..df85a1e8c1 --- /dev/null +++ b/contrib/man/man1/docker-tag.1 @@ -0,0 +1,49 @@ +.\" Process this file with +.\" nroff -man -Tascii docker-tag.1 +.\" +.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker" +.SH NAME +docker-tag \- Tag an image in the repository +.SH SYNOPSIS +.B docker tag +[\fB-f\fR|\fB--force\fR[=\fIfalse\fR] +\fBIMAGE\fR [REGISTRYHOST/][USERNAME/]NAME[:TAG] +.SH DESCRIPTION +This will tag an image in the repository. +.SH "OPTIONS" +.TP +.B -f, --force=\fItrue\fR|\fIfalse\fR: +When set to true, force the tag name. The default is \fIfalse\fR. +.TP +.B REGISTRYHOST: +The hostname of the registry if required. This may also include the port separated by a ':' +.TP +.B USERNAME: +The username or other qualifying identifier for the image. +.TP +.B NAME: +The image name. +.TP +.B TAG: +The tag you are assigning to the image. +.SH EXAMPLES +.sp +.PP +.B Tagging an image +.TP +Here is an example where an image is tagged with the tag 'Version-1.0' : +.sp +.RS +docker tag 0e5574283393 fedora/httpd:Version-1.0 +.RE +.sp +.B Tagging an image for an internal repository +.TP +To push an image to an internal Registry and not the default docker.io based registry you must tag it with the registry hostname and port (if needed). +.sp +.RS +docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 +.RE +.sp +.SH HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work. diff --git a/contrib/man/man1/docker.1 b/contrib/man/man1/docker.1 new file mode 100644 index 0000000000..4a36e5baf5 --- /dev/null +++ b/contrib/man/man1/docker.1 @@ -0,0 +1,172 @@ +.\" Process this file with +.\" nroff -man -Tascii docker.1 +.\" +.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker" +.SH NAME +docker \- Docker image and container command line interface +.SH SYNOPSIS +.B docker [OPTIONS] [COMMAND] [arg...] +.SH DESCRIPTION +\fBdocker\fR has two distinct functions. It is used for starting the Docker daemon and to run the CLI (i.e., to command the daemon to manage images, containers etc.) So \fBdocker\fR is both a server as deamon and a client to the daemon through the CLI. +.sp +To run the Docker deamon you do not specify any of the commands listed below but must specify the \fB-d\fR option. The other options listed below are for the daemon only. +.sp +The Docker CLI has over 30 commands. The commands are listed below and each has its own man page which explain usage and arguements. +.sp +To see the man page for a command run \fBman docker \fR. +.SH "OPTIONS" +.B \-D=false: +Enable debug mode +.TP +.B\-H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or unix://[/path/to/socket] to use. +When host=[0.0.0.0], port=[4243] or path +=[/var/run/docker.sock] is omitted, default values are used. +.TP +.B \-\-api-enable-cors=false +Enable CORS headers in the remote API +.TP +.B \-b="" +Attach containers to a pre\-existing network bridge; use 'none' to disable container networking +.TP +.B \-\-bip="" +Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of \-b +.TP +.B \-d=false +Enable daemon mode +.TP +.B \-\-dns="" +Force Docker to use specific DNS servers +.TP +.B \-g="/var/lib/docker" +Path to use as the root of the Docker runtime +.TP +.B \-\-icc=true +Enable inter\-container communication +.TP +.B \-\-ip="0.0.0.0" +Default IP address to use when binding container ports +.TP +.B \-\-iptables=true +Disable Docker's addition of iptables rules +.TP +.B \-\-mtu=1500 +Set the containers network mtu +.TP +.B \-p="/var/run/docker.pid" +Path to use for daemon PID file +.TP +.B \-r=true +Restart previously running containers +.TP +.B \-s="" +Force the Docker runtime to use a specific storage driver +.TP +.B \-v=false +Print version information and quit +.SH "COMMANDS" +.TP +.B attach +Attach to a running container +.TP +.B build +Build a container from a Dockerfile +.TP +.B commit +Create a new image from a container's changes +.TP +.B cp +Copy files/folders from the containers filesystem to the host at path +.TP +.B diff +Inspect changes on a container's filesystem + +.TP +.B events +Get real time events from the server +.TP +.B export +Stream the contents of a container as a tar archive +.TP +.B history +Show the history of an image +.TP +.B images +List images +.TP +.B import +Create a new filesystem image from the contents of a tarball +.TP +.B info +Display system-wide information +.TP +.B insert +Insert a file in an image +.TP +.B inspect +Return low-level information on a container +.TP +.B kill +Kill a running container (which includes the wrapper process and everything inside it) +.TP +.B load +Load an image from a tar archive +.TP +.B login +Register or Login to a Docker registry server +.TP +.B logs +Fetch the logs of a container +.TP +.B port +Lookup the public-facing port which is NAT-ed to PRIVATE_PORT +.TP +.B ps +List containers +.TP +.B pull +Pull an image or a repository from a Docker registry server +.TP +.B push +Push an image or a repository to a Docker registry server +.TP +.B restart +Restart a running container +.TP +.B rm +Remove one or more containers +.TP +.B rmi +Remove one or more images +.TP +.B run +Run a command in a new container +.TP +.B save +Save an image to a tar archive +.TP +.B search +Search for an image in the Docker index +.TP +.B start +Start a stopped container +.TP +.B stop +Stop a running container +.TP +.B tag +Tag an image into a repository +.TP +.B top +Lookup the running processes of a container +.TP +.B version +Show the Docker version information +.TP +.B wait +Block until a container stops, then print its exit code +.SH EXAMPLES +.sp +For specific examples please see the man page for the specific Docker command. +.sp +.SH HISTORY +April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work. -- cgit v1.2.1 From b6042f252dd8a0c7a75da481b667f89c2e4ab071 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 7 Apr 2014 18:23:22 -0700 Subject: Ensure that ro mounts are remounted Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- integration-cli/docker_cli_run_test.go | 11 +++++++++++ pkg/libcontainer/nsinit/mount.go | 11 +++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index fbb09737fc..d085574741 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -301,3 +301,14 @@ func TestDockerRunWithRelativePath(t *testing.T) { logDone("run - volume with relative path") } + +func TestVolumesMountedAsReadonly(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile") + if code, err := runCommand(cmd); err == nil || code == 0 { + t.Fatalf("run should fail because volume is ro: exit code %d", code) + } + + deleteAllContainers() + + logDone("run - volumes as readonly mount") +} diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/nsinit/mount.go index 4b5a42b1ac..3b0cf13bc9 100644 --- a/pkg/libcontainer/nsinit/mount.go +++ b/pkg/libcontainer/nsinit/mount.go @@ -37,14 +37,21 @@ func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, cons } for _, m := range bindMounts { - flags := syscall.MS_BIND | syscall.MS_REC + var ( + flags = syscall.MS_BIND | syscall.MS_REC + dest = filepath.Join(rootfs, m.Destination) + ) if !m.Writable { flags = flags | syscall.MS_RDONLY } - dest := filepath.Join(rootfs, m.Destination) if err := system.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil { return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err) } + if !m.Writable { + if err := system.Mount(m.Source, dest, "bind", uintptr(flags|syscall.MS_REMOUNT), ""); err != nil { + return fmt.Errorf("remounting %s into %s %s", m.Source, dest, err) + } + } if m.Private { if err := system.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil { return fmt.Errorf("mounting %s private %s", dest, err) -- cgit v1.2.1 From 9c4d10b9a91b9f11794ceb094331496c733410bb Mon Sep 17 00:00:00 2001 From: Dan Stine Date: Mon, 7 Apr 2014 22:09:15 -0400 Subject: fixed three more typos --- pkg/libcontainer/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/libcontainer/README.md b/pkg/libcontainer/README.md index 3bf79549e3..d6d0fbae44 100644 --- a/pkg/libcontainer/README.md +++ b/pkg/libcontainer/README.md @@ -3,7 +3,7 @@ #### background libcontainer specifies configuration options for what a container is. It provides a native Go implementation -for using linux namespaces with no external dependencies. libcontainer provides many convenience functions for working with namespaces, networking, and management. +for using Linux namespaces with no external dependencies. libcontainer provides many convenience functions for working with namespaces, networking, and management. #### container @@ -91,7 +91,7 @@ Sample `container.json` file: ``` Using this configuration and the current directory holding the rootfs for a process, one can use libcontainer to exec the container. Running the life of the namespace, a `pid` file -is written to the current directory with the pid of the namespaced process to the external world. A client can use this pid to wait, kill, or perform other operation with the container. If a user tries to run a new process inside an existing container with a live namespace the namespace will be joined by the new process. +is written to the current directory with the pid of the namespaced process to the external world. A client can use this pid to wait, kill, or perform other operation with the container. If a user tries to run a new process inside an existing container with a live namespace, the namespace will be joined by the new process. You may also specify an alternate root place where the `container.json` file is read and where the `pid` file will be saved. @@ -99,7 +99,7 @@ You may also specify an alternate root place where the `container.json` file is #### nsinit `nsinit` is a cli application used as the reference implementation of libcontainer. It is able to -spawn or join new containers giving the current directory. To use `nsinit` cd into a linux +spawn or join new containers giving the current directory. To use `nsinit` cd into a Linux rootfs and copy a `container.json` file into the directory with your specified configuration. To execute `/bin/bash` in the current directory as a container just run: -- cgit v1.2.1 From 919dbbe44df0722ca35538223a9c89e71856ec88 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Mon, 7 Apr 2014 19:12:22 -0700 Subject: Move DNS options to hostconfig The local resolver warning needed to be moved at daemon start because it was only show for the first container started anyways before having a default value set. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runconfig/compare.go | 12 --------- runconfig/config.go | 8 ------ runconfig/config_test.go | 41 ------------------------------ runconfig/hostconfig.go | 9 ++++++- runconfig/merge.go | 19 -------------- runconfig/parse.go | 4 +-- runtime/container.go | 53 ++++++++++++++++++++++++++++++++++++++ runtime/runtime.go | 66 ++++++++++++------------------------------------ server/server.go | 9 ------- 9 files changed, 79 insertions(+), 142 deletions(-) diff --git a/runconfig/compare.go b/runconfig/compare.go index 6ed7405246..122687f669 100644 --- a/runconfig/compare.go +++ b/runconfig/compare.go @@ -19,8 +19,6 @@ func Compare(a, b *Config) bool { return false } if len(a.Cmd) != len(b.Cmd) || - len(a.Dns) != len(b.Dns) || - len(a.DnsSearch) != len(b.DnsSearch) || len(a.Env) != len(b.Env) || len(a.PortSpecs) != len(b.PortSpecs) || len(a.ExposedPorts) != len(b.ExposedPorts) || @@ -34,16 +32,6 @@ func Compare(a, b *Config) bool { return false } } - for i := 0; i < len(a.Dns); i++ { - if a.Dns[i] != b.Dns[i] { - return false - } - } - for i := 0; i < len(a.DnsSearch); i++ { - if a.DnsSearch[i] != b.DnsSearch[i] { - return false - } - } for i := 0; i < len(a.Env); i++ { if a.Env[i] != b.Env[i] { return false diff --git a/runconfig/config.go b/runconfig/config.go index 4b334c6848..9db545976b 100644 --- a/runconfig/config.go +++ b/runconfig/config.go @@ -25,8 +25,6 @@ type Config struct { StdinOnce bool // If true, close stdin after the 1 attached client disconnects. Env []string Cmd []string - Dns []string - DnsSearch []string Image string // Name of the image as it was passed by the operator (eg. could be symbolic) Volumes map[string]struct{} VolumesFrom string @@ -66,12 +64,6 @@ func ContainerConfigFromJob(job *engine.Job) *Config { if Cmd := job.GetenvList("Cmd"); Cmd != nil { config.Cmd = Cmd } - if Dns := job.GetenvList("Dns"); Dns != nil { - config.Dns = Dns - } - if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { - config.DnsSearch = DnsSearch - } if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { config.Entrypoint = Entrypoint } diff --git a/runconfig/config_test.go b/runconfig/config_test.go index 784341b08c..d5ab75b3b9 100644 --- a/runconfig/config_test.go +++ b/runconfig/config_test.go @@ -163,32 +163,18 @@ func TestCompare(t *testing.T) { volumes1 := make(map[string]struct{}) volumes1["/test1"] = struct{}{} config1 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - DnsSearch: []string{"foo", "bar"}, - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes1, - } - config2 := Config{ - Dns: []string{"0.0.0.0", "2.2.2.2"}, - DnsSearch: []string{"foo", "bar"}, PortSpecs: []string{"1111:1111", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "11111111", Volumes: volumes1, } config3 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - DnsSearch: []string{"foo", "bar"}, PortSpecs: []string{"0000:0000", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "11111111", Volumes: volumes1, } config4 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - DnsSearch: []string{"foo", "bar"}, PortSpecs: []string{"0000:0000", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "22222222", @@ -197,24 +183,11 @@ func TestCompare(t *testing.T) { volumes2 := make(map[string]struct{}) volumes2["/test2"] = struct{}{} config5 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - DnsSearch: []string{"foo", "bar"}, PortSpecs: []string{"0000:0000", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "11111111", Volumes: volumes2, } - config6 := Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, - DnsSearch: []string{"foos", "bars"}, - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes1, - } - if Compare(&config1, &config2) { - t.Fatalf("Compare should return false, Dns are different") - } if Compare(&config1, &config3) { t.Fatalf("Compare should return false, PortSpecs are different") } @@ -224,9 +197,6 @@ func TestCompare(t *testing.T) { if Compare(&config1, &config5) { t.Fatalf("Compare should return false, Volumes are different") } - if Compare(&config1, &config6) { - t.Fatalf("Compare should return false, DnsSearch are different") - } if !Compare(&config1, &config1) { t.Fatalf("Compare should return true") } @@ -237,7 +207,6 @@ func TestMerge(t *testing.T) { volumesImage["/test1"] = struct{}{} volumesImage["/test2"] = struct{}{} configImage := &Config{ - Dns: []string{"1.1.1.1", "2.2.2.2"}, PortSpecs: []string{"1111:1111", "2222:2222"}, Env: []string{"VAR1=1", "VAR2=2"}, VolumesFrom: "1111", @@ -247,7 +216,6 @@ func TestMerge(t *testing.T) { volumesUser := make(map[string]struct{}) volumesUser["/test3"] = struct{}{} configUser := &Config{ - Dns: []string{"2.2.2.2", "3.3.3.3"}, PortSpecs: []string{"3333:2222", "3333:3333"}, Env: []string{"VAR2=3", "VAR3=3"}, Volumes: volumesUser, @@ -257,15 +225,6 @@ func TestMerge(t *testing.T) { t.Error(err) } - if len(configUser.Dns) != 3 { - t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns)) - } - for _, dns := range configUser.Dns { - if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" { - t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns) - } - } - if len(configUser.ExposedPorts) != 3 { t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) } diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index 55a308a5b8..5c5e291bad 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -14,6 +14,8 @@ type HostConfig struct { PortBindings nat.PortMap Links []string PublishAllPorts bool + Dns []string + DnsSearch []string } func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { @@ -30,6 +32,11 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { if Links := job.GetenvList("Links"); Links != nil { hostConfig.Links = Links } - + if Dns := job.GetenvList("Dns"); Dns != nil { + hostConfig.Dns = Dns + } + if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { + hostConfig.DnsSearch = DnsSearch + } return hostConfig } diff --git a/runconfig/merge.go b/runconfig/merge.go index a154d4caf5..7a089855b2 100644 --- a/runconfig/merge.go +++ b/runconfig/merge.go @@ -94,25 +94,6 @@ func Merge(userConf, imageConf *Config) error { if userConf.Cmd == nil || len(userConf.Cmd) == 0 { userConf.Cmd = imageConf.Cmd } - if userConf.Dns == nil || len(userConf.Dns) == 0 { - userConf.Dns = imageConf.Dns - } else { - dnsSet := make(map[string]struct{}, len(userConf.Dns)) - for _, dns := range userConf.Dns { - dnsSet[dns] = struct{}{} - } - for _, dns := range imageConf.Dns { - if _, exists := dnsSet[dns]; !exists { - userConf.Dns = append(userConf.Dns, dns) - } - } - } - if userConf.DnsSearch == nil || len(userConf.DnsSearch) == 0 { - userConf.DnsSearch = imageConf.DnsSearch - } else { - //duplicates aren't an issue here - userConf.DnsSearch = append(userConf.DnsSearch, imageConf.DnsSearch...) - } if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 { userConf.Entrypoint = imageConf.Entrypoint } diff --git a/runconfig/parse.go b/runconfig/parse.go index 3ca326fca6..58d6c9ebb9 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -213,8 +213,6 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf AttachStderr: flAttach.Get("stderr"), Env: envVariables, Cmd: runCmd, - Dns: flDns.GetAll(), - DnsSearch: flDnsSearch.GetAll(), Image: image, Volumes: flVolumes.GetMap(), VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","), @@ -230,6 +228,8 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf PortBindings: portBindings, Links: flLinks.GetAll(), PublishAllPorts: *flPublishAll, + Dns: flDns.GetAll(), + DnsSearch: flDnsSearch.GetAll(), } if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { diff --git a/runtime/container.go b/runtime/container.go index a5a2f25c64..c8053b146c 100644 --- a/runtime/container.go +++ b/runtime/container.go @@ -430,6 +430,12 @@ func (container *Container) Start() (err error) { } }() + if container.ResolvConfPath == "" { + if err := container.setupContainerDns(); err != nil { + return err + } + } + if err := container.Mount(); err != nil { return err } @@ -1174,3 +1180,50 @@ func (container *Container) DisableLink(name string) { } } } + +func (container *Container) setupContainerDns() error { + var ( + config = container.hostConfig + runtime = container.runtime + ) + resolvConf, err := utils.GetResolvConf() + if err != nil { + return err + } + // If custom dns exists, then create a resolv.conf for the container + if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(runtime.config.DnsSearch) > 0 { + var ( + dns = utils.GetNameservers(resolvConf) + dnsSearch = utils.GetSearchDomains(resolvConf) + ) + if len(config.Dns) > 0 { + dns = config.Dns + } else if len(runtime.config.Dns) > 0 { + dns = runtime.config.Dns + } + if len(config.DnsSearch) > 0 { + dnsSearch = config.DnsSearch + } else if len(runtime.config.DnsSearch) > 0 { + dnsSearch = runtime.config.DnsSearch + } + container.ResolvConfPath = path.Join(container.root, "resolv.conf") + f, err := os.Create(container.ResolvConfPath) + if err != nil { + return err + } + defer f.Close() + for _, dns := range dns { + if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil { + return err + } + } + if len(dnsSearch) > 0 { + if _, err := f.Write([]byte("search " + strings.Join(dnsSearch, " ") + "\n")); err != nil { + return err + } + } + } else { + container.ResolvConfPath = "/etc/resolv.conf" + } + return nil +} diff --git a/runtime/runtime.go b/runtime/runtime.go index 864874c8e4..98903cfa08 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -24,6 +24,7 @@ import ( "github.com/dotcloud/docker/utils" "io" "io/ioutil" + "log" "os" "path" "regexp" @@ -393,9 +394,6 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe if err := runtime.createRootfs(container, img); err != nil { return nil, nil, err } - if err := runtime.setupContainerDns(container, config); err != nil { - return nil, nil, err - } if err := container.ToDisk(); err != nil { return nil, nil, err } @@ -572,53 +570,6 @@ func (runtime *Runtime) createRootfs(container *Container, img *image.Image) err return nil } -func (runtime *Runtime) setupContainerDns(container *Container, config *runconfig.Config) error { - resolvConf, err := utils.GetResolvConf() - if err != nil { - return err - } - if len(config.Dns) == 0 && len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { - runtime.config.Dns = DefaultDns - } - - // If custom dns exists, then create a resolv.conf for the container - if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(runtime.config.DnsSearch) > 0 { - var ( - dns = utils.GetNameservers(resolvConf) - dnsSearch = utils.GetSearchDomains(resolvConf) - ) - if len(config.Dns) > 0 { - dns = config.Dns - } else if len(runtime.config.Dns) > 0 { - dns = runtime.config.Dns - } - if len(config.DnsSearch) > 0 { - dnsSearch = config.DnsSearch - } else if len(runtime.config.DnsSearch) > 0 { - dnsSearch = runtime.config.DnsSearch - } - container.ResolvConfPath = path.Join(container.root, "resolv.conf") - f, err := os.Create(container.ResolvConfPath) - if err != nil { - return err - } - defer f.Close() - for _, dns := range dns { - if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil { - return err - } - } - if len(dnsSearch) > 0 { - if _, err := f.Write([]byte("search " + strings.Join(dnsSearch, " ") + "\n")); err != nil { - return err - } - } - } else { - container.ResolvConfPath = "/etc/resolv.conf" - } - return nil -} - // Commit creates a new filesystem image from the current state of a container. // The image can optionally be tagged into a repository func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) { @@ -839,6 +790,9 @@ func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (* eng: eng, } + if err := runtime.checkLocaldns(); err != nil { + return nil, err + } if err := runtime.restore(); err != nil { return nil, err } @@ -1025,3 +979,15 @@ func (runtime *Runtime) ContainerGraph() *graphdb.Database { func (runtime *Runtime) SetServer(server Server) { runtime.srv = server } + +func (runtime *Runtime) checkLocaldns() error { + resolvConf, err := utils.GetResolvConf() + if err != nil { + return err + } + if len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) { + log.Printf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", DefaultDns) + runtime.config.Dns = DefaultDns + } + return nil +} diff --git a/server/server.go b/server/server.go index 9cabf17889..0feaff4eac 100644 --- a/server/server.go +++ b/server/server.go @@ -1731,15 +1731,6 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status { job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") config.MemorySwap = -1 } - resolvConf, err := utils.GetResolvConf() - if err != nil { - return job.Error(err) - } - if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.Config().Dns) == 0 && utils.CheckLocalDns(resolvConf) { - job.Errorf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", runtime.DefaultDns) - config.Dns = runtime.DefaultDns - } - container, buildWarnings, err := srv.runtime.Create(config, name) if err != nil { if srv.runtime.Graph().IsNotExist(err) { -- cgit v1.2.1 From 603088be928564be2d863a897fbc1729adc74814 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 7 Apr 2014 21:20:03 -0600 Subject: Fix edge case in bind mount absolute path detection `filepath.Abs` does more than just `filepath.IsAbs` - namely, `filepath.Clean`, which resolves things like `.../.` or `.../../...`, and causes even an absolute path like `/some/path/../absolute` to fail (or, in my case, `/path/to/docker/.`) Just using `filepath.IsAbs` directly is a much cheaper check, too. :) Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- runtime/volumes.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/runtime/volumes.go b/runtime/volumes.go index 0b6f3734e0..40db177174 100644 --- a/runtime/volumes.go +++ b/runtime/volumes.go @@ -177,12 +177,8 @@ func createVolumes(container *Container) error { if bindMap, exists := binds[volPath]; exists { isBindMount = true srcPath = bindMap.SrcPath - srcAbs, err := filepath.Abs(srcPath) - if err != nil { - return err - } - if srcPath != srcAbs { - return fmt.Errorf("%s should be an absolute path", srcPath) + if !filepath.IsAbs(srcPath) { + return fmt.Errorf("%s must be an absolute path", srcPath) } if strings.ToLower(bindMap.Mode) == "rw" { srcRW = true -- cgit v1.2.1 From 886eb85dec7f4e9e193151befa7e6b4213ea67a0 Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 7 Apr 2014 22:22:03 -0600 Subject: Update RELEASE_BUNDLES to include integration tests Previously, running just "hack/release.sh" only ran the unit tests. This updates that to run the unit tests, then the integration tests, then build the binaries, then run the cli integration tests (so we're literally testing the binary we're about to release, which is super freaking cool IMO <3). Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/release.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/hack/release.sh b/hack/release.sh index 84e1c42383..d77d454e27 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -53,9 +53,13 @@ RELEASE_BUNDLES=( ) if [ "$1" != '--release-regardless-of-test-failure' ]; then - RELEASE_BUNDLES=( test "${RELEASE_BUNDLES[@]}" ) + RELEASE_BUNDLES=( + test test-integration + "${RELEASE_BUNDLES[@]}" + test-integration-cli + ) fi - + VERSION=$(cat VERSION) BUCKET=$AWS_S3_BUCKET -- cgit v1.2.1 From e6a64af966698d8b9dfe8721ca4404fc06331d69 Mon Sep 17 00:00:00 2001 From: Sven Dowideit Date: Tue, 8 Apr 2014 14:25:57 +1000 Subject: use the docs sidebar to be the TOC means that level 2 headings are the most important Docker-DCO-1.1-Signed-off-by: Sven Dowideit (github: SvenDowideit) --- docs/sources/reference/builder.rst | 62 ++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 33 deletions(-) diff --git a/docs/sources/reference/builder.rst b/docs/sources/reference/builder.rst index 6462512da0..4858a0b17c 100644 --- a/docs/sources/reference/builder.rst +++ b/docs/sources/reference/builder.rst @@ -13,12 +13,10 @@ Dockerfile Reference to create an image. Executing ``docker build`` will run your steps and commit them along the way, giving you a final image. -.. contents:: Table of Contents - .. _dockerfile_usage: -1. Usage -======== +Usage +===== To :ref:`build ` an image from a source repository, create a description file called ``Dockerfile`` at the root of your @@ -71,8 +69,8 @@ When you're done with your build, you're ready to look into .. _dockerfile_format: -2. Format -========= +Format +====== Here is the format of the Dockerfile: @@ -99,16 +97,14 @@ allows statements like: .. _dockerfile_instructions: -3. Instructions -=============== Here is the set of instructions you can use in a ``Dockerfile`` for building images. .. _dockerfile_from: -3.1 FROM --------- +``FROM`` +======== ``FROM `` @@ -134,8 +130,8 @@ assumed. If the used tag does not exist, an error will be returned. .. _dockerfile_maintainer: -3.2 MAINTAINER --------------- +``MAINTAINER`` +============== ``MAINTAINER `` @@ -144,8 +140,8 @@ the generated images. .. _dockerfile_run: -3.3 RUN -------- +``RUN`` +======= RUN has 2 forms: @@ -174,8 +170,8 @@ Known Issues (RUN) .. _dockerfile_cmd: -3.4 CMD -------- +``CMD`` +======= CMD has three forms: @@ -229,8 +225,8 @@ override the default specified in CMD. .. _dockerfile_expose: -3.5 EXPOSE ----------- +``EXPOSE`` +========== ``EXPOSE [...]`` @@ -241,8 +237,8 @@ functionally equivalent to running ``docker commit --run '{"PortSpecs": .. _dockerfile_env: -3.6 ENV -------- +``ENV`` +======= ``ENV `` @@ -262,8 +258,8 @@ from the resulting image. You can view the values using ``docker inspect``, and .. _dockerfile_add: -3.7 ADD -------- +``ADD`` +======= ``ADD `` @@ -329,8 +325,8 @@ The copy obeys the following rules: .. _dockerfile_entrypoint: -3.8 ENTRYPOINT --------------- +``ENTRYPOINT`` +============== ENTRYPOINT has two forms: @@ -378,8 +374,8 @@ this optional but default, you could use a CMD: .. _dockerfile_volume: -3.9 VOLUME ----------- +``VOLUME`` +========== ``VOLUME ["/data"]`` @@ -389,8 +385,8 @@ and mounting instructions via docker client, refer to :ref:`volume_def` document .. _dockerfile_user: -3.10 USER ---------- +``USER`` +======== ``USER daemon`` @@ -399,8 +395,8 @@ the image. .. _dockerfile_workdir: -3.11 WORKDIR ------------- +``WORKDIR`` +=========== ``WORKDIR /path/to/workdir`` @@ -418,8 +414,8 @@ instruction. For example: The output of the final ``pwd`` command in this Dockerfile would be ``/a/b/c``. -3.11 ONBUILD ------------- +``ONBUILD`` +=========== ``ONBUILD [INSTRUCTION]`` @@ -480,7 +476,7 @@ For example you might add something like this: .. _dockerfile_examples: -4. Dockerfile Examples +Dockerfile Examples ====================== .. code-block:: bash -- cgit v1.2.1 From 4c3eb7db675c7f8f15ef0d55e99a6699908f930c Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Mon, 7 Apr 2014 23:10:40 -0600 Subject: Update test-integration-cli bundlescript for consistency with other bundlescripts and slightly more verbose logging of which commands were executed Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- hack/make/test-integration-cli | 49 +++++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli index 18e4ee6602..b0506d261a 100644 --- a/hack/make/test-integration-cli +++ b/hack/make/test-integration-cli @@ -4,9 +4,6 @@ DEST=$1 set -e -# subshell so that we can export PATH without breaking other things -( -export PATH="$DEST/../binary:$DEST/../dynbinary:$PATH" DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native} @@ -14,22 +11,30 @@ bundle_test_integration_cli() { go_test_dir ./integration-cli } -if ! command -v docker &> /dev/null; then - echo >&2 'error: binary or dynbinary must be run before test-integration-cli' - false -fi - -echo "running cli integration tests using graphdriver: '$DOCKER_GRAPHDRIVER' and execdriver: '$DOCKER_EXECDRIVER'" -docker -d -D -s $DOCKER_GRAPHDRIVER -e $DOCKER_EXECDRIVER -p $DEST/docker.pid &> $DEST/docker.log & - -# pull the busybox image before running the tests -sleep 2 -docker pull busybox - -bundle_test_integration_cli 2>&1 \ - | tee $DEST/test.log - -DOCKERD_PID=$(cat $DEST/docker.pid) -kill $DOCKERD_PID -wait $DOCKERD_PID || true -) +# subshell so that we can export PATH without breaking other things +( + export PATH="$DEST/../binary:$DEST/../dynbinary:$PATH" + + if ! command -v docker &> /dev/null; then + echo >&2 'error: binary or dynbinary must be run before test-integration-cli' + false + fi + + ( set -x; exec \ + docker --daemon --debug \ + --storage-driver "$DOCKER_GRAPHDRIVER" \ + --exec-driver "$DOCKER_EXECDRIVER" \ + --pidfile "$DEST/docker.pid" \ + &> "$DEST/docker.log" + ) & + + # pull the busybox image before running the tests + sleep 2 + ( set -x; docker pull busybox ) + + bundle_test_integration_cli + + DOCKERD_PID=$(set -x; cat $DEST/docker.pid) + ( set -x; kill $DOCKERD_PID ) + wait $DOCKERD_PID || true +) 2>&1 | tee $DEST/test.log -- cgit v1.2.1 From 8b2bcd9a4bddf1b99ca201d81ac0a67c2b09fbe0 Mon Sep 17 00:00:00 2001 From: shin- Date: Tue, 8 Apr 2014 16:53:16 +0200 Subject: Added specific error message when hitting 401 over HTTP on push Docker-DCO-1.1-Signed-off-by: Joffrey F (github: shin-) --- registry/registry.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/registry/registry.go b/registry/registry.go index 5ac04f9e7e..817c08afa9 100644 --- a/registry/registry.go +++ b/registry/registry.go @@ -417,6 +417,9 @@ func (r *Registry) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regis return fmt.Errorf("Failed to upload metadata: %s", err) } defer res.Body.Close() + if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { + return utils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + } if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { -- cgit v1.2.1 From cf655ca98d9c230d02454a39e815fc6b11ac5aed Mon Sep 17 00:00:00 2001 From: Tianon Gravi Date: Tue, 8 Apr 2014 09:42:47 -0600 Subject: Update DOCKER_CROSSPLATFORMS to be more readable Docker-DCO-1.1-Signed-off-by: Andrew Page (github: tianon) --- Dockerfile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 2de5b34171..ec95bad293 100644 --- a/Dockerfile +++ b/Dockerfile @@ -68,7 +68,10 @@ ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 # Compile Go for cross compilation -ENV DOCKER_CROSSPLATFORMS linux/386 linux/arm darwin/amd64 darwin/386 freebsd/amd64 freebsd/386 freebsd/arm +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 darwin/386 \ + freebsd/amd64 freebsd/386 freebsd/arm # (set an explicit GOARM of 5 for maximum compatibility) ENV GOARM 5 RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' -- cgit v1.2.1 From 4f828d67f00449182eaada50dfba37e00f8f01ef Mon Sep 17 00:00:00 2001 From: "Guillaume J. Charmes" Date: Tue, 8 Apr 2014 10:10:51 -0700 Subject: Backup current docker apparmor profile and replace it with the new one Docker-DCO-1.1-Signed-off-by: Guillaume J. Charmes (github: creack) --- pkg/libcontainer/apparmor/setup.go | 36 ++++++++++++++++++++++++++++++++---- runtime/execdriver/native/driver.go | 8 +++++--- 2 files changed, 37 insertions(+), 7 deletions(-) diff --git a/pkg/libcontainer/apparmor/setup.go b/pkg/libcontainer/apparmor/setup.go index 4c664598ad..548e72f550 100644 --- a/pkg/libcontainer/apparmor/setup.go +++ b/pkg/libcontainer/apparmor/setup.go @@ -2,13 +2,17 @@ package apparmor import ( "fmt" + "io" "io/ioutil" "os" "os/exec" "path" ) -const DefaultProfilePath = "/etc/apparmor.d/docker" +const ( + DefaultProfilePath = "/etc/apparmor.d/docker" +) + const DefaultProfile = ` # AppArmor profile from lxc for containers. @@ -73,14 +77,38 @@ profile docker-default flags=(attach_disconnected,mediate_deleted) { } ` -func InstallDefaultProfile() error { +func InstallDefaultProfile(backupPath string) error { if !IsEnabled() { return nil } - // If the profile already exists, let it be. + // If the profile already exists, check if we already have a backup + // if not, do the backup and override it. (docker 0.10 upgrade changed the apparmor profile) + // see gh#5049, apparmor blocks signals in ubuntu 14.04 if _, err := os.Stat(DefaultProfilePath); err == nil { - return nil + if _, err := os.Stat(backupPath); err == nil { + // If both the profile and the backup are present, do nothing + return nil + } + // Make sure the directory exists + if err := os.MkdirAll(path.Dir(backupPath), 0755); err != nil { + return err + } + + // Create the backup file + f, err := os.Create(backupPath) + if err != nil { + return err + } + defer f.Close() + src, err := os.Open(DefaultProfilePath) + if err != nil { + return err + } + defer src.Close() + if _, err := io.Copy(f, src); err != nil { + return err + } } // Make sure /etc/apparmor.d exists diff --git a/runtime/execdriver/native/driver.go b/runtime/execdriver/native/driver.go index c5a3837615..d18865e508 100644 --- a/runtime/execdriver/native/driver.go +++ b/runtime/execdriver/native/driver.go @@ -21,8 +21,9 @@ import ( ) const ( - DriverName = "native" - Version = "0.1" + DriverName = "native" + Version = "0.1" + BackupApparmorProfilePath = "apparmor/docker.back" // relative to docker root ) func init() { @@ -66,7 +67,8 @@ func NewDriver(root, initPath string) (*driver, error) { if err := os.MkdirAll(root, 0700); err != nil { return nil, err } - if err := apparmor.InstallDefaultProfile(); err != nil { + // native driver root is at docker_root/execdriver/native. Put apparmor at docker_root + if err := apparmor.InstallDefaultProfile(filepath.Join(root, "../..", BackupApparmorProfilePath)); err != nil { return nil, err } return &driver{ -- cgit v1.2.1 From fa5223dab5b9f5cef2a0a341ee5065fec9c6d663 Mon Sep 17 00:00:00 2001 From: James Turnbull Date: Tue, 8 Apr 2014 11:59:02 -0700 Subject: Added Fedora installation method Docker-DCO-1.1-Signed-off-by: James Turnbull (github: jamtur01) --- hack/install.sh | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/hack/install.sh b/hack/install.sh index 205b57ecc7..575f328292 100755 --- a/hack/install.sh +++ b/hack/install.sh @@ -72,8 +72,35 @@ fi if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then lsb_dist='Debian' fi +if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then + lsb_dist='Fedora' +fi case "$lsb_dist" in + Fedora) + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker-io' + ) + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker run busybox echo "Docker has been successfully installed!"' + ) || true + fi + your_user=your-user + [ "$user" != 'root' ] && your_user="$user" + echo + echo 'If you would like to use Docker as a non-root user, you should now consider' + echo 'adding your user to the "docker" group with something like:' + echo + echo ' sudo usermod -aG docker' $your_user + echo + echo 'Remember that you will have to log out and back in for this to take effect!' + echo + exit 0 + ;; + Ubuntu|Debian) export DEBIAN_FRONTEND=noninteractive -- cgit v1.2.1 From a2aa902ec194169431fea6784c4a7cdab25aaf24 Mon Sep 17 00:00:00 2001 From: James Turnbull Date: Tue, 8 Apr 2014 11:59:36 -0700 Subject: Removed extra whitespace Docker-DCO-1.1-Signed-off-by: James Turnbull (github: jamtur01) --- hack/install.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hack/install.sh b/hack/install.sh index 575f328292..43248cf2c0 100755 --- a/hack/install.sh +++ b/hack/install.sh @@ -103,7 +103,7 @@ case "$lsb_dist" in Ubuntu|Debian) export DEBIAN_FRONTEND=noninteractive - + did_apt_get_update= apt_get_update() { if [ -z "$did_apt_get_update" ]; then @@ -111,21 +111,21 @@ case "$lsb_dist" in did_apt_get_update=1 fi } - + # aufs is preferred over devicemapper; try to ensure the driver is available. if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then kern_extras="linux-image-extra-$(uname -r)" - + apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true - + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' ( set -x; sleep 10 ) fi fi - + if [ ! -e /usr/lib/apt/methods/https ]; then apt_get_update ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https' ) @@ -165,7 +165,7 @@ case "$lsb_dist" in echo exit 0 ;; - + Gentoo) if [ "$url" = "https://test.docker.io/" ]; then echo >&2 @@ -180,7 +180,7 @@ case "$lsb_dist" in echo >&2 exit 1 fi - + ( set -x $sh_c 'sleep 3; emerge app-emulation/docker' -- cgit v1.2.1 From 168f8aba74d9c2996acec6fe1b93a2301523e305 Mon Sep 17 00:00:00 2001 From: Solomon Hykes Date: Tue, 8 Apr 2014 09:49:48 -0700 Subject: Early deprecation warning for 'docker commit --run' Warn users of the planned deprecation of 'docker commit --run', and hide it from the docs and usage message. The option continues to work. Note that an alternative to 'commit --run' is being implemented but is not yet available. We are printing the warning anyway because on the basis that it never hurts to give more advance warning. The 'commit --run' flag is a leftover from the very early days of Docker, and has several problems: 1) It is very user unfriendly. You have to pass a literal json dict which is poorly documented and changes regularly (see PortSpecs vs ExposedPorts). The merge behavior is not clear and also changes regularly. it's not possible to unset a value. 2) It overlaps with the Dockerfile syntax. There are 2 ways to set a default command, expose a port or change an env variable. Some things can be done in a Dockerfile but not in --run. Some things can be done in --run but not in a Dockerfile. It would be better to push a single syntax, allow using it both in a file and via the command line, and make improvements in a single place. 3) It exposes data structures which should not be publicly exposed. There are several planned improvements to Docker which require moving around the content and schema of the various Config, Image and Container structures. The less of those we expose in public interfaces, the easier it is to move things around without a reverse compatibility nightmare. Docker-DCO-1.1-Signed-off-by: Solomon Hykes (github: shykes) --- api/client/commands.go | 4 +- docs/sources/reference/builder.rst | 12 ++-- docs/sources/reference/commandline/cli.rst | 92 ------------------------------ 3 files changed, 8 insertions(+), 100 deletions(-) diff --git a/api/client/commands.go b/api/client/commands.go index bd23b3c7fd..ef6bbd055a 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -1433,7 +1433,8 @@ func (cli *DockerCli) CmdCommit(args ...string) error { cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes") flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith \"") - flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: --run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`) + // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. + flConfig := cmd.String([]string{"#run", "#-run"}, "", "this option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") if err := cmd.Parse(args); err != nil { return nil } @@ -1471,6 +1472,7 @@ func (cli *DockerCli) CmdCommit(args ...string) error { env engine.Env ) if *flConfig != "" { + fmt.Fprintf(cli.err, "WARNING: 'commit --run' is deprecated and will be removed in a future version, in favor of inline Dockerfile-compatible commands.\n") config = &runconfig.Config{} if err := json.Unmarshal([]byte(*flConfig), config); err != nil { return err diff --git a/docs/sources/reference/builder.rst b/docs/sources/reference/builder.rst index 4858a0b17c..e8897d1b09 100644 --- a/docs/sources/reference/builder.rst +++ b/docs/sources/reference/builder.rst @@ -188,9 +188,7 @@ omit the executable, in which case you must specify an ENTRYPOINT as well. When used in the shell or exec formats, the ``CMD`` instruction sets -the command to be executed when running the image. This is -functionally equivalent to running ``docker commit --run '{"Cmd": -}'`` outside the builder. +the command to be executed when running the image. If you use the *shell* form of the CMD, then the ```` will execute in ``/bin/sh -c``: @@ -230,10 +228,10 @@ override the default specified in CMD. ``EXPOSE [...]`` -The ``EXPOSE`` instruction exposes ports for use within links. This is -functionally equivalent to running ``docker commit --run '{"PortSpecs": -["", ""]}'`` outside the builder. Refer to -:ref:`port_redirection` for detailed information. +The ``EXPOSE`` instructions informs Docker that the container will listen +on the specified network ports at runtime. Docker uses this information +to interconnect containers using links (see :ref:`links `), +and to setup port redirection on the host system (see :ref:`port_redirection`). .. _dockerfile_env: diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst index c0487302dd..c0df5f8175 100644 --- a/docs/sources/reference/commandline/cli.rst +++ b/docs/sources/reference/commandline/cli.rst @@ -316,8 +316,6 @@ by using the ``git://`` schema. -m, --message="": Commit message -a, --author="": Author (eg. "John Hannibal Smith " - --run="": Configuration changes to be applied when the image is launched with `docker run`. - (ex: --run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}') .. _cli_commit_examples: @@ -336,96 +334,6 @@ Commit an existing container REPOSITORY TAG ID CREATED VIRTUAL SIZE SvenDowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB -Change the command that a container runs -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Sometimes you have an application container running just a service and you need -to make a quick change and then change it back. - -In this example, we run a container with ``ls`` and then change the image to -run ``ls /etc``. - -.. code-block:: bash - - $ docker run -t --name test ubuntu ls - bin boot dev etc home lib lib64 media mnt opt proc root run sbin selinux srv sys tmp usr var - $ docker commit --run='{"Cmd": ["ls","/etc"]}' test test2 - 933d16de9e70005304c1717b5c6f2f39d6fd50752834c6f34a155c70790011eb - $ docker run -t test2 - adduser.conf gshadow login.defs rc0.d - alternatives gshadow- logrotate.d rc1.d - apt host.conf lsb-base rc2.d - ... - -Merged configs example -...................... - -Say you have a Dockerfile like so: - -.. code-block:: bash - - ENV MYVAR foobar - RUN apt-get install openssh - EXPOSE 22 - CMD ["/usr/sbin/sshd -D"] - ... - -If you run that, make some changes, and then commit, Docker will merge the environment variable and exposed port configuration settings with any that you specify in the --run= option. This is a change from Docker 0.8.0 and prior where no attempt was made to preserve any existing configuration on commit. - -.. code-block:: bash - - $ docker build -t me/foo . - $ docker run -t -i me/foo /bin/bash - foo-container$ [make changes in the container] - foo-container$ exit - $ docker commit --run='{"Cmd": ["ls"]}' [container-id] me/bar - ... - -The me/bar image will now have port 22 exposed, MYVAR env var set to 'foobar', and its default command will be ["ls"]. - -Note that this is currently a shallow merge. So, for example, if you had specified a new port spec in the --run= config above, that would have clobbered the 'EXPOSE 22' setting from the parent container. - -Full --run example -.................. - -The ``--run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID`` -or ``config`` when running ``docker inspect IMAGEID``. Existing configuration key-values that are -not overridden in the JSON hash will be merged in. - -(Multiline is okay within a single quote ``'``) - -.. code-block:: bash - - $ sudo docker commit --run=' - { - "Entrypoint" : null, - "Privileged" : false, - "User" : "", - "VolumesFrom" : "", - "Cmd" : ["cat", "-e", "/etc/resolv.conf"], - "Dns" : ["8.8.8.8", "8.8.4.4"], - "DnsSearch" : ["example.com"], - "MemorySwap" : 0, - "AttachStdin" : false, - "AttachStderr" : false, - "CpuShares" : 0, - "OpenStdin" : false, - "Volumes" : null, - "Hostname" : "122612f45831", - "PortSpecs" : ["22", "80", "443"], - "Image" : "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Tty" : false, - "Env" : [ - "HOME=/", - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - ], - "StdinOnce" : false, - "Domainname" : "", - "WorkingDir" : "/", - "NetworkDisabled" : false, - "Memory" : 0, - "AttachStdout" : false - }' $CONTAINER_ID .. _cli_cp: -- cgit v1.2.1 From 77a04357a1b66b9e5b2bae2efc0192b927f926fe Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 8 Apr 2014 20:56:30 +0000 Subject: Remove restart ghost test We do not allow ghosts anymore and this test does not add any value Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- integration/container_test.go | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/integration/container_test.go b/integration/container_test.go index c64f9e610b..d089e7dc45 100644 --- a/integration/container_test.go +++ b/integration/container_test.go @@ -1714,31 +1714,3 @@ func TestMultipleVolumesFrom(t *testing.T) { t.Fail() } } - -func TestRestartGhost(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - container, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, - Volumes: map[string]struct{}{"/test": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - - if err := container.Kill(); err != nil { - t.Fatal(err) - } - - container.State.SetGhost(true) - - _, err = container.Output() - if err != nil { - t.Fatal(err) - } -} -- cgit v1.2.1 From 9260c06f7a7cb172205dc45af96870ec0d02ebcd Mon Sep 17 00:00:00 2001 From: Victor Vieux Date: Tue, 8 Apr 2014 20:58:19 +0000 Subject: remove double deprecation warning Docker-DCO-1.1-Signed-off-by: Victor Vieux (github: vieux) --- api/client/commands.go | 1 - 1 file changed, 1 deletion(-) diff --git a/api/client/commands.go b/api/client/commands.go index ef6bbd055a..443917d3fb 100644 --- a/api/client/commands.go +++ b/api/client/commands.go @@ -1472,7 +1472,6 @@ func (cli *DockerCli) CmdCommit(args ...string) error { env engine.Env ) if *flConfig != "" { - fmt.Fprintf(cli.err, "WARNING: 'commit --run' is deprecated and will be removed in a future version, in favor of inline Dockerfile-compatible commands.\n") config = &runconfig.Config{} if err := json.Unmarshal([]byte(*flConfig), config); err != nil { return err -- cgit v1.2.1 From af9746412b6070063f105ae97eba1f8fbd56bd22 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 8 Apr 2014 09:26:09 +0000 Subject: Move volumesfrom to hostconfig This also migrates the volumes from integration tests into the new cli integration test framework. Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- integration-cli/docker_cli_run_test.go | 72 +++++++++ integration/container_test.go | 262 --------------------------------- runconfig/compare.go | 3 +- runconfig/config.go | 2 - runconfig/config_test.go | 41 ++---- runconfig/hostconfig.go | 2 + runconfig/merge.go | 3 - runconfig/parse.go | 2 +- runtime/volumes.go | 7 +- 9 files changed, 92 insertions(+), 302 deletions(-) diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index d085574741..b0805dd35c 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -312,3 +312,75 @@ func TestVolumesMountedAsReadonly(t *testing.T) { logDone("run - volumes as readonly mount") } + +func TestVolumesFromInReadonlyMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:ro", "busybox", "touch", "/test/file") + if code, err := runCommand(cmd); err == nil || code == 0 { + t.Fatalf("run should fail because volume is ro: exit code %d", code) + } + + deleteAllContainers() + + logDone("run - volumes from as readonly mount") +} + +// Regression test for #1201 +func TestVolumesFromInReadWriteMode(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "busybox", "touch", "/test/file") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - volumes from as read write mount") +} + +// Test for #1351 +func TestApplyVolumesFromBeforeVolumes(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "touch", "/test/foo") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "-v", "/test", "busybox", "cat", "/test/foo") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - volumes from mounted first") +} + +func TestMultipleVolumesFrom(t *testing.T) { + cmd := exec.Command(dockerBinary, "run", "--name", "parent1", "-v", "/test", "busybox", "touch", "/test/foo") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--name", "parent2", "-v", "/other", "busybox", "touch", "/other/bar") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", + "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") + if _, err := runCommand(cmd); err != nil { + t.Fatal(err) + } + + deleteAllContainers() + + logDone("run - multiple volumes from") +} diff --git a/integration/container_test.go b/integration/container_test.go index d089e7dc45..43f51c1e5f 100644 --- a/integration/container_test.go +++ b/integration/container_test.go @@ -1273,123 +1273,6 @@ func TestBindMounts(t *testing.T) { } } -// Test that -volumes-from supports both read-only mounts -func TestFromVolumesInReadonlyMode(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/echo", "-n", "foobar"}, - Volumes: map[string]struct{}{"/test": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - _, err = container.Output() - if err != nil { - t.Fatal(err) - } - if !container.VolumesRW["/test"] { - t.Fail() - } - - container2, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/echo", "-n", "foobar"}, - VolumesFrom: container.ID + ":ro", - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container2) - - _, err = container2.Output() - if err != nil { - t.Fatal(err) - } - - if container.Volumes["/test"] != container2.Volumes["/test"] { - t.Logf("container volumes do not match: %s | %s ", - container.Volumes["/test"], - container2.Volumes["/test"]) - t.Fail() - } - - _, exists := container2.VolumesRW["/test"] - if !exists { - t.Logf("container2 is missing '/test' volume: %s", container2.VolumesRW) - t.Fail() - } - - if container2.VolumesRW["/test"] != false { - t.Log("'/test' volume mounted in read-write mode, expected read-only") - t.Fail() - } -} - -// Test that VolumesRW values are copied to the new container. Regression test for #1201 -func TestVolumesFromReadonlyMount(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - container, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/echo", "-n", "foobar"}, - Volumes: map[string]struct{}{"/test": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - _, err = container.Output() - if err != nil { - t.Fatal(err) - } - if !container.VolumesRW["/test"] { - t.Fail() - } - - container2, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/echo", "-n", "foobar"}, - VolumesFrom: container.ID, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container2) - - _, err = container2.Output() - if err != nil { - t.Fatal(err) - } - - if container.Volumes["/test"] != container2.Volumes["/test"] { - t.Fail() - } - - actual, exists := container2.VolumesRW["/test"] - if !exists { - t.Fail() - } - - if container.VolumesRW["/test"] != actual { - t.Fail() - } -} - // Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. func TestRestartWithVolumes(t *testing.T) { runtime := mkRuntime(t) @@ -1434,73 +1317,6 @@ func TestRestartWithVolumes(t *testing.T) { } } -// Test for #1351 -func TestVolumesFromWithVolumes(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, - Volumes: map[string]struct{}{"/test": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - for key := range container.Config.Volumes { - if key != "/test" { - t.Fail() - } - } - - _, err = container.Output() - if err != nil { - t.Fatal(err) - } - - expected := container.Volumes["/test"] - if expected == "" { - t.Fail() - } - - container2, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"cat", "/test/foo"}, - VolumesFrom: container.ID, - Volumes: map[string]struct{}{"/test": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container2) - - output, err := container2.Output() - if err != nil { - t.Fatal(err) - } - - if string(output) != "bar" { - t.Fail() - } - - if container.Volumes["/test"] != container2.Volumes["/test"] { - t.Fail() - } - - // Ensure it restarts successfully - _, err = container2.Output() - if err != nil { - t.Fatal(err) - } -} - func TestContainerNetwork(t *testing.T) { runtime := mkRuntime(t) defer nuke(runtime) @@ -1636,81 +1452,3 @@ func TestUnprivilegedCannotMount(t *testing.T) { t.Fatal("Could mount into secure container") } } - -func TestMultipleVolumesFrom(t *testing.T) { - runtime := mkRuntime(t) - defer nuke(runtime) - - container, _, err := runtime.Create(&runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"}, - Volumes: map[string]struct{}{"/test": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container) - - for key := range container.Config.Volumes { - if key != "/test" { - t.Fail() - } - } - - _, err = container.Output() - if err != nil { - t.Fatal(err) - } - - expected := container.Volumes["/test"] - if expected == "" { - t.Fail() - } - - container2, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"sh", "-c", "echo -n bar > /other/foo"}, - Volumes: map[string]struct{}{"/other": {}}, - }, - "", - ) - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container2) - - for key := range container2.Config.Volumes { - if key != "/other" { - t.FailNow() - } - } - if _, err := container2.Output(); err != nil { - t.Fatal(err) - } - - container3, _, err := runtime.Create( - &runconfig.Config{ - Image: GetTestImage(runtime).ID, - Cmd: []string{"/bin/echo", "-n", "foobar"}, - VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","), - }, "") - - if err != nil { - t.Fatal(err) - } - defer runtime.Destroy(container3) - - if _, err := container3.Output(); err != nil { - t.Fatal(err) - } - - if container3.Volumes["/test"] != container.Volumes["/test"] { - t.Fail() - } - if container3.Volumes["/other"] != container2.Volumes["/other"] { - t.Fail() - } -} diff --git a/runconfig/compare.go b/runconfig/compare.go index 122687f669..5c1bf46575 100644 --- a/runconfig/compare.go +++ b/runconfig/compare.go @@ -14,8 +14,7 @@ func Compare(a, b *Config) bool { a.MemorySwap != b.MemorySwap || a.CpuShares != b.CpuShares || a.OpenStdin != b.OpenStdin || - a.Tty != b.Tty || - a.VolumesFrom != b.VolumesFrom { + a.Tty != b.Tty { return false } if len(a.Cmd) != len(b.Cmd) || diff --git a/runconfig/config.go b/runconfig/config.go index 9db545976b..33a7882b6f 100644 --- a/runconfig/config.go +++ b/runconfig/config.go @@ -27,7 +27,6 @@ type Config struct { Cmd []string Image string // Name of the image as it was passed by the operator (eg. could be symbolic) Volumes map[string]struct{} - VolumesFrom string WorkingDir string Entrypoint []string NetworkDisabled bool @@ -49,7 +48,6 @@ func ContainerConfigFromJob(job *engine.Job) *Config { OpenStdin: job.GetenvBool("OpenStdin"), StdinOnce: job.GetenvBool("StdinOnce"), Image: job.Getenv("Image"), - VolumesFrom: job.Getenv("VolumesFrom"), WorkingDir: job.Getenv("WorkingDir"), NetworkDisabled: job.GetenvBool("NetworkDisabled"), } diff --git a/runconfig/config_test.go b/runconfig/config_test.go index d5ab75b3b9..f71528ff8e 100644 --- a/runconfig/config_test.go +++ b/runconfig/config_test.go @@ -163,37 +163,25 @@ func TestCompare(t *testing.T) { volumes1 := make(map[string]struct{}) volumes1["/test1"] = struct{}{} config1 := Config{ - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes1, + PortSpecs: []string{"1111:1111", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumes1, } config3 := Config{ - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes1, - } - config4 := Config{ - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "22222222", - Volumes: volumes1, + PortSpecs: []string{"0000:0000", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumes1, } volumes2 := make(map[string]struct{}) volumes2["/test2"] = struct{}{} config5 := Config{ - PortSpecs: []string{"0000:0000", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "11111111", - Volumes: volumes2, + PortSpecs: []string{"0000:0000", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumes2, } if Compare(&config1, &config3) { t.Fatalf("Compare should return false, PortSpecs are different") } - if Compare(&config1, &config4) { - t.Fatalf("Compare should return false, VolumesFrom are different") - } if Compare(&config1, &config5) { t.Fatalf("Compare should return false, Volumes are different") } @@ -207,10 +195,9 @@ func TestMerge(t *testing.T) { volumesImage["/test1"] = struct{}{} volumesImage["/test2"] = struct{}{} configImage := &Config{ - PortSpecs: []string{"1111:1111", "2222:2222"}, - Env: []string{"VAR1=1", "VAR2=2"}, - VolumesFrom: "1111", - Volumes: volumesImage, + PortSpecs: []string{"1111:1111", "2222:2222"}, + Env: []string{"VAR1=1", "VAR2=2"}, + Volumes: volumesImage, } volumesUser := make(map[string]struct{}) @@ -251,10 +238,6 @@ func TestMerge(t *testing.T) { } } - if configUser.VolumesFrom != "1111" { - t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom) - } - ports, _, err := nat.ParsePortSpecs([]string{"0000"}) if err != nil { t.Error(err) diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index 5c5e291bad..127c06d9cb 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -16,6 +16,7 @@ type HostConfig struct { PublishAllPorts bool Dns []string DnsSearch []string + VolumesFrom string } func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { @@ -23,6 +24,7 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { ContainerIDFile: job.Getenv("ContainerIDFile"), Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), + VolumesFrom: job.Getenv("VolumesFrom"), } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) diff --git a/runconfig/merge.go b/runconfig/merge.go index 7a089855b2..1240dbcacd 100644 --- a/runconfig/merge.go +++ b/runconfig/merge.go @@ -100,9 +100,6 @@ func Merge(userConf, imageConf *Config) error { if userConf.WorkingDir == "" { userConf.WorkingDir = imageConf.WorkingDir } - if userConf.VolumesFrom == "" { - userConf.VolumesFrom = imageConf.VolumesFrom - } if userConf.Volumes == nil || len(userConf.Volumes) == 0 { userConf.Volumes = imageConf.Volumes } else { diff --git a/runconfig/parse.go b/runconfig/parse.go index 58d6c9ebb9..b76f59a360 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -215,7 +215,6 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf Cmd: runCmd, Image: image, Volumes: flVolumes.GetMap(), - VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","), Entrypoint: entrypoint, WorkingDir: *flWorkingDir, } @@ -230,6 +229,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf PublishAllPorts: *flPublishAll, Dns: flDns.GetAll(), DnsSearch: flDnsSearch.GetAll(), + VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","), } if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { diff --git a/runtime/volumes.go b/runtime/volumes.go index 40db177174..e74442e1b5 100644 --- a/runtime/volumes.go +++ b/runtime/volumes.go @@ -59,8 +59,9 @@ func setupMountsForContainer(container *Container, envPath string) error { } func applyVolumesFrom(container *Container) error { - if container.Config.VolumesFrom != "" { - for _, containerSpec := range strings.Split(container.Config.VolumesFrom, ",") { + volumesFrom := container.hostConfig.VolumesFrom + if volumesFrom != "" { + for _, containerSpec := range strings.Split(volumesFrom, ",") { var ( mountRW = true specParts = strings.SplitN(containerSpec, ":", 2) @@ -68,7 +69,7 @@ func applyVolumesFrom(container *Container) error { switch len(specParts) { case 0: - return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom) + return fmt.Errorf("Malformed volumes-from specification: %s", volumesFrom) case 2: switch specParts[1] { case "ro": -- cgit v1.2.1 From b4f2821e6d4ba6f6073365a244681df21f5d4472 Mon Sep 17 00:00:00 2001 From: Michael Crosby Date: Tue, 8 Apr 2014 10:02:17 +0000 Subject: Make volumes-from a slice instead of string split Docker-DCO-1.1-Signed-off-by: Michael Crosby (github: crosbymichael) --- runconfig/hostconfig.go | 6 ++++-- runconfig/parse.go | 2 +- runtime/volumes.go | 6 +++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index 127c06d9cb..3235bf1f4e 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -16,7 +16,7 @@ type HostConfig struct { PublishAllPorts bool Dns []string DnsSearch []string - VolumesFrom string + VolumesFrom []string } func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { @@ -24,7 +24,6 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { ContainerIDFile: job.Getenv("ContainerIDFile"), Privileged: job.GetenvBool("Privileged"), PublishAllPorts: job.GetenvBool("PublishAllPorts"), - VolumesFrom: job.Getenv("VolumesFrom"), } job.GetenvJson("LxcConf", &hostConfig.LxcConf) job.GetenvJson("PortBindings", &hostConfig.PortBindings) @@ -40,5 +39,8 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { hostConfig.DnsSearch = DnsSearch } + if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil { + hostConfig.VolumesFrom = VolumesFrom + } return hostConfig } diff --git a/runconfig/parse.go b/runconfig/parse.go index b76f59a360..d395b49e80 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -229,7 +229,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf PublishAllPorts: *flPublishAll, Dns: flDns.GetAll(), DnsSearch: flDnsSearch.GetAll(), - VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","), + VolumesFrom: flVolumesFrom.GetAll(), } if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit { diff --git a/runtime/volumes.go b/runtime/volumes.go index e74442e1b5..004f1bb024 100644 --- a/runtime/volumes.go +++ b/runtime/volumes.go @@ -60,8 +60,8 @@ func setupMountsForContainer(container *Container, envPath string) error { func applyVolumesFrom(container *Container) error { volumesFrom := container.hostConfig.VolumesFrom - if volumesFrom != "" { - for _, containerSpec := range strings.Split(volumesFrom, ",") { + if len(volumesFrom) > 0 { + for _, containerSpec := range volumesFrom { var ( mountRW = true specParts = strings.SplitN(containerSpec, ":", 2) @@ -69,7 +69,7 @@ func applyVolumesFrom(container *Container) error { switch len(specParts) { case 0: - return fmt.Errorf("Malformed volumes-from specification: %s", volumesFrom) + return fmt.Errorf("Malformed volumes-from specification: %s", containerSpec) case 2: switch specParts[1] { case "ro": -- cgit v1.2.1 From dc9c28f51d669d6b09e81c2381f800f1a33bb659 Mon Sep 17 00:00:00 2001 From: unclejack Date: Wed, 9 Apr 2014 00:49:33 +0300 Subject: Bump version to v0.10.0 Docker-DCO-1.1-Signed-off-by: Cristian Staretu (github: unclejack) --- CHANGELOG.md | 137 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ VERSION | 2 +- 2 files changed, 138 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c8ea94361b..8743d3a7db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,142 @@ # Changelog +## 0.10.0 (2014-04-08) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. +- Follow symlinks inside container's root for ADD build instructions. +- Fix EXPOSE caching. + +#### Documentation +- Add the new options of `docker ps` to the documentation. +- Add the options of `docker restart` to the documentation. +- Update daemon docs and help messages for --iptables and --ip-forward. +- Updated apt-cacher-ng docs example. +- Remove duplicate description of --mtu from docs. +- Add missing -t and -v for `docker images` to the docs. +- Add fixes to the cli docs. +- Update libcontainer docs. +- Update images in docs to remove references to AUFS and LXC. +- Update the nodejs_web_app in the docs to use the new epel RPM address. +- Fix external link on security of containers. +- Update remote API docs. +- Add image size to history docs. +- Be explicit about binding to all interfaces in redis example. +- Document DisableNetwork flag in the 1.10 remote api. +- Document that `--lxc-conf` is lxc only. +- Add chef usage documentation. +- Add example for an image with multiple for `docker load`. +- Explain what `docker run -a` does in the docs. + +#### Contrib +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. +- Add check-config script to contrib. +- Fix fish shell completion. + +#### Hack +* Clean up "go test" output from "make test" to be much more readable/scannable. +* Excluse more "definitely not unit tested Go source code" directories from hack/make/test. ++ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. +- Include contributed completions in Ubuntu PPA. ++ Add cli integration tests. +* Add tweaks to the hack scripts to make them simpler. + +#### Remote API ++ Add TLS auth support for API. +* Move git clone from daemon to client. +- Fix content-type detection in docker cp. +* Split API into 2 go packages. + +#### Runtime +* Support hairpin NAT without going through Docker server. +- devicemapper: succeed immediately when removing non-existing devices. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). +- devicemapper: increase timeout in waitClose to 10 seconds. +- devicemapper: ensure we shut down thin pool cleanly. +- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. +- devicemapper: avoid AB-BA deadlock. +- devicemapper: make shutdown better/faster. +- improve alpha sorting in mflag. +- Remove manual http cookie management because the cookiejar is being used. +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Add FreeBSD support for the client. +- Merge auth package into registry. +- Add deprecation warning for -t on `docker pull`. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. +- Fix attach exit on darwin. +- Improve deprecation message. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Only unshare the mount namespace for execin. +- Merge existing config when committing. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Mount cgroups automatically if they're not mounted already. +- Use mock for search tests. +- Update to double-dash everywhere. +- Move .dockerenv parsing to lxc driver. +- Move all bind-mounts in the container inside the namespace. +- Don't use separate bind mount for container. +- Always symlink /dev/ptmx for libcontainer. +- Don't kill by pid for other drivers. +- Add initial logging to libcontainer. +* Sort by port in `docker ps`. +- Move networking drivers into runtime top level package. ++ Add --no-prune to `docker rmi`. ++ Add time since exit in `docker ps`. +- graphdriver: add build tags. +- Prevent allocation of previously allocated ports & prevent improve port allocation. +* Add support for --since/--before in `docker ps`. +- Clean up container stop. ++ Add support for configurable dns search domains. +- Add support for relative WORKDIR instructions. +- Add --output flag for docker save. +- Remove duplication of DNS entries in config merging. +- Add cpuset.cpus to cgroups and native driver options. +- Remove docker-ci. +- Promote btrfs. btrfs is no longer considered experimental. +- Add --input flag to `docker load`. +- Return error when existing bridge doesn't match IP address. +- Strip comments before parsing line continuations to avoid interpreting instructions as comments. +- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. +- Add systemd implementation of cgroups and make containers show up as systemd units. +- Fix commit and import when no repository is specified. +- Remount /var/lib/docker as --private to fix scaling issue. +- Use the environment's proxy when pinging the remote registry. +- Reduce error level from harmless errors. +* Allow --volumes-from to be individual files. +- Fix expanding buffer in StdCopy. +- Set error regardless of attach or stdin. This fixes #3364. +- Add support for --env-file to load environment variables from files. +- Symlink /etc/mtab and /proc/mounts. +- Allow pushing a single tag. +- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. +- Don't throw error when starting an already running container. +- Fix dynamic port allocation limit. +- remove setupDev from libcontainer. +- Add API version to `docker version`. +- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. +- Fix --volumes-from mount failure. +- Allow non-privileged containers to create device nodes. +- Skip login tests because of external dependency on a hosted service. +- Deprecate `docker images --tree` and `docker images --viz`. +- Deprecate `docker insert`. +- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. +- Add specific error message when hitting 401 over HTTP on push. +- Fix absolute volume check. +- Remove volumes-from from the config. +- Move DNS options to hostconfig. +- Update the apparmor profile for libcontainer. +- Add deprecation notice for `docker commit -run`. + ## 0.9.1 (2014-03-24) #### Builder diff --git a/VERSION b/VERSION index dc9bff91aa..78bc1abd14 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.9.1-dev +0.10.0 -- cgit v1.2.1