summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFred Lifton <fred.lifton@docker.com>2014-08-27 09:05:02 -0700
committerFred Lifton <fred.lifton@docker.com>2014-08-27 09:05:02 -0700
commitaba575c7546ba7d2914f0c3b8b3450667b3959ac (patch)
treed0158f0eab4b9710815c1a87e2ef3699e5185205
parent4b54484843eacf6ce57d2fc16e6aacc70d964041 (diff)
parent7565cb309988345f9ed9daba87052f2bba116033 (diff)
downloaddocker-docs-1.1.tar.gz
Merge pull request #7755 from SvenDowideit/post-1.1.2-docs-update-1docs-1.1
Post 1.1.2 docs update 1
-rw-r--r--Dockerfile3
-rw-r--r--README.md3
-rw-r--r--contrib/desktop-integration/iceweasel/Dockerfile2
-rw-r--r--docs/Dockerfile2
-rw-r--r--docs/man/docker-attach.1.md2
-rwxr-xr-xdocs/mkdocs.yml2
-rw-r--r--docs/sources/articles/cfengine_process_management.md7
-rw-r--r--docs/sources/articles/dsc.md90
-rw-r--r--docs/sources/articles/https.md109
-rw-r--r--docs/sources/articles/using_supervisord.md8
-rw-r--r--docs/sources/docker-hub/builds.md160
-rw-r--r--docs/sources/docker-hub/hub-images/deploy_key.pngbin0 -> 162811 bytes
-rw-r--r--docs/sources/docker-hub/hub-images/github_deploy_key.pngbin0 -> 80447 bytes
-rw-r--r--docs/sources/docker-hub/repos.md2
-rw-r--r--docs/sources/examples/apt-cacher-ng.Dockerfile4
-rw-r--r--docs/sources/examples/apt-cacher-ng.md6
-rw-r--r--docs/sources/examples/mongodb.md5
-rw-r--r--docs/sources/examples/mongodb/Dockerfile7
-rw-r--r--docs/sources/examples/nodejs_web_app.md12
-rw-r--r--docs/sources/examples/postgresql_service.Dockerfile8
-rw-r--r--docs/sources/examples/postgresql_service.md10
-rw-r--r--docs/sources/examples/running_redis_service.md9
-rw-r--r--docs/sources/examples/running_riak_service.md20
-rw-r--r--docs/sources/examples/running_ssh_service.Dockerfile5
-rw-r--r--docs/sources/examples/running_ssh_service.md5
-rw-r--r--docs/sources/faq.md32
-rw-r--r--docs/sources/index.md46
-rw-r--r--docs/sources/installation/centos.md62
-rw-r--r--docs/sources/installation/fedora.md2
-rw-r--r--docs/sources/installation/gentoolinux.md3
-rw-r--r--docs/sources/installation/google.md2
-rw-r--r--docs/sources/installation/openSUSE.md20
-rw-r--r--docs/sources/installation/oracle.md120
-rw-r--r--docs/sources/installation/softlayer.md24
-rw-r--r--docs/sources/installation/ubuntulinux.md13
-rw-r--r--docs/sources/reference/api/docker_io_oauth_api.md254
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.0.md13
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.1.md13
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.10.md20
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.11.md13
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.12.md36
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.13.md36
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.2.md13
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.3.md13
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.4.md13
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.5.md13
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.6.md15
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.7.md15
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.8.md15
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.9.md15
-rw-r--r--docs/sources/reference/api/registry_api.md12
-rw-r--r--docs/sources/reference/builder.md41
-rw-r--r--docs/sources/reference/commandline/cli.md20
-rw-r--r--docs/sources/userguide/dockerimages.md175
-rw-r--r--docs/sources/userguide/dockerlinks.md171
-rw-r--r--docs/sources/userguide/dockervolumes.md4
-rw-r--r--docs/sources/userguide/search.pngbin101216 -> 72860 bytes
-rw-r--r--docs/sources/userguide/usingdocker.md4
-rw-r--r--docs/theme/mkdocs/base.html29
-rw-r--r--docs/theme/mkdocs/footer.html2
-rwxr-xr-xhack/release.sh3
61 files changed, 967 insertions, 796 deletions
diff --git a/Dockerfile b/Dockerfile
index d2e32a4ae5..b8c4f34dc4 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -28,8 +28,7 @@ FROM ubuntu:14.04
MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
# Packaged dependencies
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \
- apt-utils \
+RUN apt-get update && apt-get install -y \
aufs-tools \
automake \
btrfs-tools \
diff --git a/README.md b/README.md
index 08d839c3cc..46acc5ab10 100644
--- a/README.md
+++ b/README.md
@@ -131,8 +131,7 @@ Here's a typical Docker build process:
```bash
FROM ubuntu:12.04
-RUN apt-get update
-RUN apt-get install -q -y python python-pip curl
+RUN apt-get update && apt-get install -y python python-pip curl
RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv
RUN cd helloflask-master && pip install -r requirements.txt
```
diff --git a/contrib/desktop-integration/iceweasel/Dockerfile b/contrib/desktop-integration/iceweasel/Dockerfile
index 80d6a55e4a..0f3e8f2527 100644
--- a/contrib/desktop-integration/iceweasel/Dockerfile
+++ b/contrib/desktop-integration/iceweasel/Dockerfile
@@ -29,7 +29,7 @@ FROM debian:wheezy
MAINTAINER Daniel Mizyrycki <daniel@docker.com>
# Install Iceweasel and "sudo"
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq iceweasel sudo
+RUN apt-get update && apt-get install -y iceweasel sudo
# create sysadmin account
RUN useradd -m -d /data -p saIVpsc0EVTwA sysadmin
diff --git a/docs/Dockerfile b/docs/Dockerfile
index 345de92bd0..219999e4de 100644
--- a/docs/Dockerfile
+++ b/docs/Dockerfile
@@ -4,7 +4,7 @@
FROM debian:jessie
MAINTAINER Sven Dowideit <SvenDowideit@docker.com> (@SvenDowideit)
-RUN apt-get update && apt-get install -yq make python-pip python-setuptools vim-tiny git gettext
+RUN apt-get update && apt-get install -y make python-pip python-setuptools vim-tiny git gettext
RUN pip install mkdocs
diff --git a/docs/man/docker-attach.1.md b/docs/man/docker-attach.1.md
index e26db07f3f..7deda6c75e 100644
--- a/docs/man/docker-attach.1.md
+++ b/docs/man/docker-attach.1.md
@@ -14,7 +14,7 @@ docker-attach - Attach to a running container
If you **docker run** a container in detached mode (**-d**), you can reattach to
the detached container with **docker attach** using the container's ID or name.
-You can detach from the container again (and leave it running) with `CTRL-q
+You can detach from the container again (and leave it running) with `CTRL-p
CTRL-q` (for a quiet exit), or `CTRL-c` which will send a SIGKILL to the
container, or `CTRL-\` to get a stacktrace of the Docker client when it quits.
When you detach from a container the exit code will be returned to
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index f4ebcb68fe..007d47dd83 100755
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -34,6 +34,7 @@ pages:
- ['installation/mac.md', 'Installation', 'Mac OS X']
- ['installation/ubuntulinux.md', 'Installation', 'Ubuntu']
- ['installation/rhel.md', 'Installation', 'Red Hat Enterprise Linux']
+- ['installation/oracle.md', 'Installation', 'Oracle Linux']
- ['installation/centos.md', 'Installation', 'CentOS']
- ['installation/debian.md', 'Installation', 'Debian']
- ['installation/gentoolinux.md', 'Installation', 'Gentoo']
@@ -119,7 +120,6 @@ pages:
- ['reference/api/docker_remote_api_v1.1.md', '**HIDDEN**']
- ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**']
- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries']
-- ['reference/api/docker_io_oauth_api.md', 'Reference', 'Docker Hub OAuth API']
- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub Accounts API']
- ['jsearch.md', '**HIDDEN**']
diff --git a/docs/sources/articles/cfengine_process_management.md b/docs/sources/articles/cfengine_process_management.md
index 6bb4df66ae..a9441a6d35 100644
--- a/docs/sources/articles/cfengine_process_management.md
+++ b/docs/sources/articles/cfengine_process_management.md
@@ -65,13 +65,12 @@ The first two steps can be done as part of a Dockerfile, as follows.
FROM ubuntu
MAINTAINER Eystein Måløy Stenberg <eytein.stenberg@gmail.com>
- RUN apt-get -y install wget lsb-release unzip ca-certificates
+ RUN apt-get update && apt-get install -y wget lsb-release unzip ca-certificates
# install latest CFEngine
RUN wget -qO- http://cfengine.com/pub/gpg.key | apt-key add -
RUN echo "deb http://cfengine.com/pub/apt $(lsb_release -cs) main" > /etc/apt/sources.list.d/cfengine-community.list
- RUN apt-get update
- RUN apt-get install cfengine-community
+ RUN apt-get update && apt-get install -y cfengine-community
# install cfe-docker process management policy
RUN wget https://github.com/estenberg/cfe-docker/archive/master.zip -P /tmp/ && unzip /tmp/master.zip -d /tmp/
@@ -80,7 +79,7 @@ The first two steps can be done as part of a Dockerfile, as follows.
RUN rm -rf /tmp/cfe-docker-master /tmp/master.zip
# apache2 and openssh are just for testing purposes, install your own apps here
- RUN apt-get -y install openssh-server apache2
+ RUN apt-get update && apt-get install -y openssh-server apache2
RUN mkdir -p /var/run/sshd
RUN echo "root:password" | chpasswd # need a password for ssh
diff --git a/docs/sources/articles/dsc.md b/docs/sources/articles/dsc.md
index 94f5e9d4db..5e05c40c14 100644
--- a/docs/sources/articles/dsc.md
+++ b/docs/sources/articles/dsc.md
@@ -8,7 +8,7 @@ Windows PowerShell Desired State Configuration (DSC) is a configuration
management tool that extends the existing functionality of Windows PowerShell.
DSC uses a declarative syntax to define the state in which a target should be
configured. More information about PowerShell DSC can be found at
-http://technet.microsoft.com/en-us/library/dn249912.aspx.
+[http://technet.microsoft.com/en-us/library/dn249912.aspx](http://technet.microsoft.com/en-us/library/dn249912.aspx).
## Requirements
@@ -17,14 +17,14 @@ To use this guide you'll need a Windows host with PowerShell v4.0 or newer.
The included DSC configuration script also uses the official PPA so
only an Ubuntu target is supported. The Ubuntu target must already have the
required OMI Server and PowerShell DSC for Linux providers installed. More
-information can be found at https://github.com/MSFTOSSMgmt/WPSDSCLinux. The
-source repository listed below also includes PowerShell DSC for Linux
+information can be found at [https://github.com/MSFTOSSMgmt/WPSDSCLinux](https://github.com/MSFTOSSMgmt/WPSDSCLinux).
+The source repository listed below also includes PowerShell DSC for Linux
installation and init scripts along with more detailed installation information.
## Installation
The DSC configuration example source is available in the following repository:
-https://github.com/anweiss/DockerClientDSC. It can be cloned with:
+[https://github.com/anweiss/DockerClientDSC](https://github.com/anweiss/DockerClientDSC). It can be cloned with:
$ git clone https://github.com/anweiss/DockerClientDSC.git
@@ -37,15 +37,18 @@ be used to establish the required CIM session(s) and execute the
`Set-DscConfiguration` cmdlet.
More detailed usage information can be found at
-https://github.com/anweiss/DockerClientDSC.
+[https://github.com/anweiss/DockerClientDSC](https://github.com/anweiss/DockerClientDSC).
-### Run Configuration
+### Install Docker
The Docker installation configuration is equivalent to running:
```
-apt-get install docker.io
-ln -sf /usr/bin/docker.io /usr/local/bin/docker
-sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io
+apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys\
+36A1D7869245C8950F966E92D8576A8BA88D21E9
+sh -c "echo deb https://get.docker.io/ubuntu docker main\
+> /etc/apt/sources.list.d/docker.list"
+apt-get update
+apt-get install lxc-docker
```
Ensure that your current working directory is set to the `DockerClientDSC`
@@ -83,35 +86,82 @@ file and execute configurations against multiple nodes as such:
```
### Images
-Image configuration is equivalent to running: `docker pull [image]`.
+Image configuration is equivalent to running: `docker pull [image]` or
+`docker rmi -f [IMAGE]`.
-Using the same Run Configuration steps defined above, execute `DockerClient`
-with the `Image` parameter:
+Using the same steps defined above, execute `DockerClient` with the `Image`
+parameter and apply the configuration:
```powershell
-DockerClient -Hostname "myhost" -Image node
+DockerClient -Hostname "myhost" -Image "node"
+.\RunDockerClientConfig.ps1 -Hostname "myhost"
```
-The configuration process can be initiated as before:
+You can also configure the host to pull multiple images:
```powershell
+DockerClient -Hostname "myhost" -Image "node","mongo"
.\RunDockerClientConfig.ps1 -Hostname "myhost"
```
+To remove images, use a hashtable as follows:
+
+```powershell
+DockerClient -Hostname "myhost" -Image @{Name="node"; Remove=$true}
+.\RunDockerClientConfig.ps1 -Hostname $hostname
+```
+
### Containers
Container configuration is equivalent to running:
-`docker run -d --name="[containername]" [image] '[command]'`.
-Using the same Run Configuration steps defined above, execute `DockerClient`
-with the `Image`, `ContainerName`, and `Command` parameters:
+```
+docker run -d --name="[containername]" -p '[port]' -e '[env]' --link '[link]'\
+'[image]' '[command]'
+```
+or
+
+```
+docker rm -f [containername]
+```
+
+To create or remove containers, you can use the `Container` parameter with one
+or more hashtables. The hashtable(s) passed to this parameter can have the
+following properties:
+
+- Name (required)
+- Image (required unless Remove property is set to `$true`)
+- Port
+- Env
+- Link
+- Command
+- Remove
+
+For example, create a hashtable with the settings for your container:
+
+```powershell
+$webContainer = @{Name="web"; Image="anweiss/docker-platynem"; Port="80:80"}
+```
+
+Then, using the same steps defined above, execute
+`DockerClient` with the `-Image` and `-Container` parameters:
```powershell
-DockerClient -Hostname "myhost" -Image node -ContainerName "helloworld" `
--Command 'echo "Hello World!"'
+DockerClient -Hostname "myhost" -Image node -Container $webContainer
+.\RunDockerClientConfig.ps1 -Hostname "myhost"
```
-The configuration process can be initiated as before:
+Existing containers can also be removed as follows:
```powershell
+$containerToRemove = @{Name="web"; Remove=$true}
+DockerClient -Hostname "myhost" -Container $containerToRemove
.\RunDockerClientConfig.ps1 -Hostname "myhost"
```
+
+Here is a hashtable with all of the properties that can be used to create a
+container:
+
+```powershell
+$containerProps = @{Name="web"; Image="node:latest"; Port="80:80"; `
+Env="PORT=80"; Link="db:db"; Command="grunt"}
+``` \ No newline at end of file
diff --git a/docs/sources/articles/https.md b/docs/sources/articles/https.md
index 81570105e6..8a7b2dea4c 100644
--- a/docs/sources/articles/https.md
+++ b/docs/sources/articles/https.md
@@ -1,6 +1,6 @@
-page_title: Docker HTTPS Setup
-page_description: How to setup docker with https
-page_keywords: docker, example, https, daemon
+page_title: Running Docker with HTTPS
+page_description: How to setup and run Docker with HTTPS
+page_keywords: docker, docs, article, example, https, daemon, tls, ca, certificate
# Running Docker with https
@@ -11,9 +11,9 @@ If you need Docker reachable via the network in a safe manner, you can
enable TLS by specifying the tlsverify flag and pointing Docker's
tlscacert flag to a trusted CA certificate.
-In daemon mode, it will only allow connections from clients
-authenticated by a certificate signed by that CA. In client mode, it
-will only connect to servers with a certificate signed by that CA.
+In the daemon mode, it will only allow connections from clients
+authenticated by a certificate signed by that CA. In the client mode,
+it will only connect to servers with a certificate signed by that CA.
> **Warning**:
> Using TLS and managing a CA is an advanced topic. Please make you self
@@ -31,25 +31,64 @@ keys:
$ echo 01 > ca.srl
$ openssl genrsa -des3 -out ca-key.pem 2048
+ Generating RSA private key, 2048 bit long modulus
+ ......+++
+ ...............+++
+ e is 65537 (0x10001)
+ Enter pass phrase for ca-key.pem:
+ Verifying - Enter pass phrase for ca-key.pem:
$ openssl req -new -x509 -days 365 -key ca-key.pem -out ca.pem
+ Enter pass phrase for ca-key.pem:
+ You are about to be asked to enter information that will be incorporated
+ into your certificate request.
+ What you are about to enter is what is called a Distinguished Name or a DN.
+ There are quite a few fields but you can leave some blank
+ For some fields there will be a default value,
+ If you enter '.', the field will be left blank.
+ -----
+ Country Name (2 letter code) [AU]:
+ State or Province Name (full name) [Some-State]:Queensland
+ Locality Name (eg, city) []:Brisbane
+ Organization Name (eg, company) [Internet Widgits Pty Ltd]:Docker Inc
+ Organizational Unit Name (eg, section) []:Boot2Docker
+ Common Name (e.g. server FQDN or YOUR name) []:your.host.com
+ Email Address []:Sven@home.org.au
Now that we have a CA, you can create a server key and certificate
signing request. Make sure that "Common Name (e.g. server FQDN or YOUR
name)" matches the hostname you will use to connect to Docker:
$ openssl genrsa -des3 -out server-key.pem 2048
- $ openssl req -subj '/CN=**<Your Hostname Here>**' -new -key server-key.pem -out server.csr
+ Generating RSA private key, 2048 bit long modulus
+ ......................................................+++
+ ............................................+++
+ e is 65537 (0x10001)
+ Enter pass phrase for server-key.pem:
+ Verifying - Enter pass phrase for server-key.pem:
+ $ openssl req -subj '/CN=<Your Hostname Here>' -new -key server-key.pem -out server.csr
+ Enter pass phrase for server-key.pem:
-Next we're going to sign the key with our CA:
+Next, we're going to sign the key with our CA:
$ openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem \
-out server-cert.pem
+ Signature ok
+ subject=/CN=your.host.com
+ Getting CA Private Key
+ Enter pass phrase for ca-key.pem:
For client authentication, create a client key and certificate signing
request:
- $ openssl genrsa -des3 -out client-key.pem 2048
- $ openssl req -subj '/CN=client' -new -key client-key.pem -out client.csr
+ $ openssl genrsa -des3 -out key.pem 2048
+ Generating RSA private key, 2048 bit long modulus
+ ...............................................+++
+ ...............................................................+++
+ e is 65537 (0x10001)
+ Enter pass phrase for key.pem:
+ Verifying - Enter pass phrase for key.pem:
+ $ openssl req -subj '/CN=client' -new -key key.pem -out client.csr
+ Enter pass phrase for key.pem:
To make the key suitable for client authentication, create a extensions
config file:
@@ -59,13 +98,21 @@ config file:
Now sign the key:
$ openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem \
- -out client-cert.pem -extfile extfile.cnf
+ -out cert.pem -extfile extfile.cnf
+ Signature ok
+ subject=/CN=client
+ Getting CA Private Key
+ Enter pass phrase for ca-key.pem:
Finally you need to remove the passphrase from the client and server
key:
$ openssl rsa -in server-key.pem -out server-key.pem
- $ openssl rsa -in client-key.pem -out client-key.pem
+ Enter pass phrase for server-key.pem:
+ writing RSA key
+ $ openssl rsa -in key.pem -out key.pem
+ Enter pass phrase for key.pem:
+ writing RSA key
Now you can make the Docker daemon only accept connections from clients
providing a certificate trusted by our CA:
@@ -76,32 +123,31 @@ providing a certificate trusted by our CA:
To be able to connect to Docker and validate its certificate, you now
need to provide your client keys, certificates and trusted CA:
- $ docker --tlsverify --tlscacert=ca.pem --tlscert=client-cert.pem --tlskey=client-key.pem \
- -H=dns-name-of-docker-host:2376
+ $ docker --tlsverify --tlscacert=ca.pem --tlscert=cert.pem --tlskey=key.pem \
+ -H=dns-name-of-docker-host:2376 version
> **Note**:
> Docker over TLS should run on TCP port 2376.
> **Warning**:
-> As shown in the example above, you don't have to run the
-> `docker` client with `sudo` or
-> the `docker` group when you use certificate
-> authentication. That means anyone with the keys can give any
-> instructions to your Docker daemon, giving them root access to the
-> machine hosting the daemon. Guard these keys as you would a root
-> password!
+> As shown in the example above, you don't have to run the `docker` client
+> with `sudo` or the `docker` group when you use certificate authentication.
+> That means anyone with the keys can give any instructions to your Docker
+> daemon, giving them root access to the machine hosting the daemon. Guard
+> these keys as you would a root password!
-## Secure By Default
+## Secure by default
-If you want to secure your Docker client connections by default, you can move the files
-to the `.docker` directory in your home directory. Set the `DOCKER_HOST` variable as well.
+If you want to secure your Docker client connections by default, you can move
+the files to the `.docker` directory in your home directory - and set the
+`DOCKER_HOST` variable as well.
$ cp ca.pem ~/.docker/ca.pem
- $ cp client-cert.pem ~/.docker/cert.pem
- $ cp client-key.pem ~/.docker/key.pem
+ $ cp cert.pem ~/.docker/cert.pem
+ $ cp key.pem ~/.docker/key.pem
$ export DOCKER_HOST=tcp://:2376
-Then you can just run docker with the `--tlsverify` option.
+Then you can run Docker with the `--tlsverify` option.
$ docker --tlsverify ps
@@ -124,5 +170,10 @@ Docker in various other modes by mixing the flags.
- tlsverify, tlscacert, tlscert, tlskey: Authenticate with client
certificate, authenticate server based on given CA
-The client will send its client certificate if found, so you just need
-to drop your keys into ~/.docker/<ca, cert or key>.pem
+If found, the client will send its client certificate, so you just need
+to drop your keys into `~/.docker/<ca, cert or key>.pem`. Alternatively,
+if you want to store your keys in another location, you can specify that
+location using the environment variable `DOCKER_CONFIG`.
+
+ $ export DOCKER_CERT_PATH=${HOME}/.docker/zone1/
+ $ docker --tlsverify ps
diff --git a/docs/sources/articles/using_supervisord.md b/docs/sources/articles/using_supervisord.md
index 9188265199..10f32c7d1b 100644
--- a/docs/sources/articles/using_supervisord.md
+++ b/docs/sources/articles/using_supervisord.md
@@ -28,18 +28,14 @@ new image.
FROM ubuntu:13.04
MAINTAINER examples@docker.com
- RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
- RUN apt-get update
- RUN apt-get upgrade -y
## Installing Supervisor
We can now install our SSH and Apache daemons as well as Supervisor in
our container.
- RUN apt-get install -y openssh-server apache2 supervisor
- RUN mkdir -p /var/run/sshd
- RUN mkdir -p /var/log/supervisor
+ RUN apt-get update && apt-get install -y openssh-server apache2 supervisor
+ RUN mkdir -p /var/lock/apache2 /var/run/apache2 /var/run/sshd /var/log/supervisor
Here we're installing the `openssh-server`,
`apache2` and `supervisor`
diff --git a/docs/sources/docker-hub/builds.md b/docs/sources/docker-hub/builds.md
index bbca826e45..e3e2139f0a 100644
--- a/docs/sources/docker-hub/builds.md
+++ b/docs/sources/docker-hub/builds.md
@@ -1,41 +1,43 @@
page_title: Automated Builds on Docker Hub
page_description: Docker Hub Automated Builds
page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation, trusted, builds, trusted builds, automated builds
+
# Automated Builds on Docker Hub
## About Automated Builds
-*Automated Builds* are a special feature of Docker Hub which allow you to use
-[Docker Hub's](https://hub.docker.com) build clusters to automatically create images from
-a specified `Dockerfile` and a GitHub or Bitbucket repo (or "context"). The system will
-clone your repository and build the image described by the `Dockerfile` using the
-repository as the context. The resulting automated image will then be uploaded to the
-Docker Hub registry and marked as an *Automated Build*.
+*Automated Builds* are a special feature of Docker Hub which allow you to
+use [Docker Hub's](https://hub.docker.com) build clusters to automatically
+create images from a specified `Dockerfile` and a GitHub or Bitbucket repo
+(or "context"). The system will clone your repository and build the image
+described by the `Dockerfile` using the repository as the context. The
+resulting automated image will then be uploaded to the Docker Hub registry
+and marked as an *Automated Build*.
Automated Builds have several advantages:
-* Users of *your* Automated Build can trust that the resulting image was built exactly as
-specified.
+* Users of *your* Automated Build can trust that the resulting
+image was built exactly as specified.
-* The `Dockerfile` will be available to anyone with access to your repository
-on the Docker Hub registry.
+* The `Dockerfile` will be available to anyone with access to
+your repository on the Docker Hub registry.
-* Because the process is automated, Automated Builds help to make sure that your
-repository is always up to date.
+* Because the process is automated, Automated Builds help to
+make sure that your repository is always up to date.
-Automated Builds are supported for both public and private repositories on both
-[GitHub](http://github.com) and [Bitbucket](https://bitbucket.org/).
+Automated Builds are supported for both public and private repositories
+on both [GitHub](http://github.com) and [Bitbucket](https://bitbucket.org/).
-To use Automated Builds, you must have an
-[account on Docker Hub](http://docs.docker.com/userguide/dockerhub/#creating-a-docker-hub-account)
-and on GitHub and/or Bitbucket. In either case, the account needs to be properly
-validated and activated before you can link to it.
+To use Automated Builds, you must have an [account on Docker Hub](
+http://docs.docker.com/userguide/dockerhub/#creating-a-docker-hub-account)
+and on GitHub and/or Bitbucket. In either case, the account needs
+to be properly validated and activated before you can link to it.
## Setting up Automated Builds with GitHub
In order to set up an Automated Build, you need to first link your
-[Docker Hub](https://hub.docker.com) account with a GitHub account. This
-will allow the registry to see your repositories.
+[Docker Hub](https://hub.docker.com) account with a GitHub account.
+This will allow the registry to see your repositories.
> *Note:*
> Automated Builds currently require *read* and *write* access since
@@ -43,31 +45,66 @@ will allow the registry to see your repositories.
> hook. We have no choice here, this is how GitHub manages permissions, sorry!
> We do guarantee nothing else will be touched in your account.
-To get started, log into your Docker Hub account and click the "+ Add Repository" button
-at the upper right of the screen. Then select
+To get started, log into your Docker Hub account and click the
+"+ Add Repository" button at the upper right of the screen. Then select
[Automated Build](https://registry.hub.docker.com/builds/add/).
Select the [GitHub service](https://registry.hub.docker.com/associate/github/).
-Then follow the onscreen instructions to authorize and link your GitHub account to Docker
-Hub. Once it is linked, you'll be able to choose a repo from which to create the
-Automatic Build.
+Then follow the onscreen instructions to authorize and link your
+GitHub account to Docker Hub. Once it is linked, you'll be able to
+choose a repo from which to create the Automatic Build.
### Creating an Automated Build
-You can [create an Automated Build](https://registry.hub.docker.com/builds/github/select/)
-from any of your public or private GitHub repositories with a `Dockerfile`.
+You can [create an Automated Build](
+https://registry.hub.docker.com/builds/github/select/) from any of your
+public or private GitHub repositories with a `Dockerfile`.
+
+### GitHub Submodules
-### GitHub organizations
+If your GitHub repository contains links to private submodules, you'll
+need to add a deploy key from your Docker Hub repository.
+
+Your Docker Hub deploy key is located under the "Build Details"
+menu on the Automated Build's main page in the Hub. Add this key
+to your GitHub submodule by visiting the Settings page for the
+repository on GitHub and selecting "Deploy keys".
+
+<table class="table table-bordered">
+ <thead>
+ <tr>
+ <th>Step</th>
+ <th>Screenshot</th>
+ <th>Description</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td>1.</td>
+ <td><img src="/docker-hub/hub-images/deploy_key.png"></td>
+ <td>Your automated build's deploy key is in the "Build Details" menu
+under "Deploy keys".</td>
+ </tr>
+ <tr>
+ <td>2.</td>
+ <td><img src="/docker-hub/hub-images/github_deploy_key.png"></td>
+ <td>In your GitHub submodule's repository Settings page, add the
+deploy key from your Docker Hub Automated Build.</td>
+ </tr>
+ </tbody>
+</table>
+
+### GitHub Organizations
GitHub organizations will appear once your membership to that organization is
made public on GitHub. To verify, you can look at the members tab for your
organization on GitHub.
-### GitHub service hooks
+### GitHub Service Hooks
-Follow the steps below to configure the GitHub service hooks for your
-Automated Build:
+Follow the steps below to configure the GitHub service
+hooks for your Automated Build:
<table class="table table-bordered">
<thead>
@@ -99,18 +136,19 @@ Automated Build:
## Setting up Automated Builds with Bitbucket
In order to setup an Automated Build, you need to first link your
-[Docker Hub](https://hub.docker.com) account with a Bitbucket account. This
-will allow the registry to see your repositories.
+[Docker Hub](https://hub.docker.com) account with a Bitbucket account.
+This will allow the registry to see your repositories.
-To get started, log into your Docker Hub account and click the "+ Add Repository" button at
-the upper right of the screen. Then select [Automated Build](https://registry.hub.docker.com/builds/add/).
+To get started, log into your Docker Hub account and click the
+"+ Add Repository" button at the upper right of the screen. Then
+select [Automated Build](https://registry.hub.docker.com/builds/add/).
-Select the [Bitbucket
-source](https://registry.hub.docker.com/associate/bitbucket/).
+Select the [Bitbucket source](
+https://registry.hub.docker.com/associate/bitbucket/).
-Then follow the onscreen instructions to authorize and link your Bitbucket account
-to Docker Hub. Once it is linked, you'll be able to choose a repo from which to create
-the Automatic Build.
+Then follow the onscreen instructions to authorize and link your
+Bitbucket account to Docker Hub. Once it is linked, you'll be able
+to choose a repo from which to create the Automatic Build.
### Creating an Automated Build
@@ -120,9 +158,9 @@ public or private Bitbucket repositories with a `Dockerfile`.
### Adding a Hook
-When you link your Docker Hub account, a `POST` hook should get automatically added to
-your Bitbucket repo. Follow the steps below to confirm or modify the Bitbucket hooks for
-your Automated Build:
+When you link your Docker Hub account, a `POST` hook should get automatically
+added to your Bitbucket repo. Follow the steps below to confirm or modify the
+Bitbucket hooks for your Automated Build:
<table class="table table-bordered">
<thead>
@@ -155,10 +193,10 @@ your Automated Build:
## The Dockerfile and Automated Builds
-During the build process, Docker will copy the contents of your `Dockerfile`. It will
-also add it to the [Docker Hub](https://hub.docker.com) for the Docker community (for
-public repos) or approved team members/orgs (for private repos) to see on the repository
-page.
+During the build process, Docker will copy the contents of your `Dockerfile`.
+It will also add it to the [Docker Hub](https://hub.docker.com) for the Docker
+community (for public repos) or approved team members/orgs (for private repos)
+to see on the repository page.
## README.md
@@ -173,20 +211,20 @@ repository's full description.The build process will look for a
### Build triggers
-If you need a way to trigger Automated Builds outside of GitHub
-or Bitbucket, you can set up a build trigger. When you turn on the build
-trigger for an Automated Build, it will give you a URL to which you can
-send POST requests. This will trigger the Automated Build, much as with a GitHub webhook.
+If you need a way to trigger Automated Builds outside of GitHub or Bitbucket,
+you can set up a build trigger. When you turn on the build trigger for an
+Automated Build, it will give you a URL to which you can send POST requests.
+This will trigger the Automated Build, much as with a GitHub webhook.
-Build triggers are available under the Settings menu of each Automated Build repo on the
-Docker Hub.
+Build triggers are available under the Settings menu of each Automated Build
+repo on the Docker Hub.
> **Note:**
> You can only trigger one build at a time and no more than one
> every five minutes. If you already have a build pending, or if you
> recently submitted a build request, those requests *will be ignored*.
-> To verify everything is working correctly, check the logs of last ten triggers on the
-settings page .
+> To verify everything is working correctly, check the logs of last
+> ten triggers on the settings page .
### Webhooks
@@ -236,12 +274,14 @@ Build's repo.
### Repository links
-Repository links are a way to associate one Automated Build with another. If one
-gets updated,the linking system triggers a rebuild for the other Automated Build.
-This makes it easy to keep all your Automated Builds up to date.
+Repository links are a way to associate one Automated Build with
+another. If one gets updated,the linking system triggers a rebuild
+for the other Automated Build. This makes it easy to keep all your
+Automated Builds up to date.
-To add a link, go to the repo for the Automated Build you want to link to and click on
-*Repository Links* under the Settings menu at right. Then, enter the name of the repository that you want have linked.
+To add a link, go to the repo for the Automated Build you want to
+link to and click on *Repository Links* under the Settings menu at
+right. Then, enter the name of the repository that you want have linked.
> **Warning:**
> You can add more than one repository link, however, you should
diff --git a/docs/sources/docker-hub/hub-images/deploy_key.png b/docs/sources/docker-hub/hub-images/deploy_key.png
new file mode 100644
index 0000000000..c4377bba9b
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/deploy_key.png
Binary files differ
diff --git a/docs/sources/docker-hub/hub-images/github_deploy_key.png b/docs/sources/docker-hub/hub-images/github_deploy_key.png
new file mode 100644
index 0000000000..bd69054b14
--- /dev/null
+++ b/docs/sources/docker-hub/hub-images/github_deploy_key.png
Binary files differ
diff --git a/docs/sources/docker-hub/repos.md b/docs/sources/docker-hub/repos.md
index c219a1989a..dec6376f43 100644
--- a/docs/sources/docker-hub/repos.md
+++ b/docs/sources/docker-hub/repos.md
@@ -67,7 +67,7 @@ optimized and up-to-date image to power your applications.
> organization, product or team you can see more information
> [here](https://github.com/dotcloud/stackbrew).
-## Private Docker Repositories
+## Private Repositories
Private repositories allow you to have repositories that contain images
that you want to keep private, either to your own account or within an
diff --git a/docs/sources/examples/apt-cacher-ng.Dockerfile b/docs/sources/examples/apt-cacher-ng.Dockerfile
index 3b7862bb58..d1f76572b9 100644
--- a/docs/sources/examples/apt-cacher-ng.Dockerfile
+++ b/docs/sources/examples/apt-cacher-ng.Dockerfile
@@ -9,7 +9,7 @@ FROM ubuntu
MAINTAINER SvenDowideit@docker.com
VOLUME ["/var/cache/apt-cacher-ng"]
-RUN apt-get update ; apt-get install -yq apt-cacher-ng
+RUN apt-get update && apt-get install -y apt-cacher-ng
EXPOSE 3142
-CMD chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/*
+CMD chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/*
diff --git a/docs/sources/examples/apt-cacher-ng.md b/docs/sources/examples/apt-cacher-ng.md
index 34e4a4bf02..7dafec1593 100644
--- a/docs/sources/examples/apt-cacher-ng.md
+++ b/docs/sources/examples/apt-cacher-ng.md
@@ -28,10 +28,10 @@ Use the following Dockerfile:
MAINTAINER SvenDowideit@docker.com
VOLUME ["/var/cache/apt-cacher-ng"]
- RUN apt-get update ; apt-get install -yq apt-cacher-ng
+ RUN apt-get update && apt-get install -y apt-cacher-ng
EXPOSE 3142
- CMD chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/*
+ CMD chmod 777 /var/cache/apt-cacher-ng && /etc/init.d/apt-cacher-ng start && tail -f /var/log/apt-cacher-ng/*
To build the image using:
@@ -61,7 +61,7 @@ a local version of a common base:
FROM ubuntu
RUN echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy
- RUN apt-get update ; apt-get install vim git
+ RUN apt-get update && apt-get install -y vim git
# docker build -t my_ubuntu .
diff --git a/docs/sources/examples/mongodb.md b/docs/sources/examples/mongodb.md
index 602f55ca88..28f7824594 100644
--- a/docs/sources/examples/mongodb.md
+++ b/docs/sources/examples/mongodb.md
@@ -65,13 +65,12 @@ a MongoDB repository file for the package manager.
After this initial preparation we can update our packages and install MongoDB.
# Update apt-get sources AND install MongoDB
- RUN apt-get update
- RUN apt-get install -y -q mongodb-org
+ RUN apt-get update && apt-get install -y mongodb-org
> **Tip:** You can install a specific version of MongoDB by using a list
> of required packages with versions, e.g.:
>
-> RUN apt-get install -y -q mongodb-org=2.6.1 mongodb-org-server=2.6.1 mongodb-org-shell=2.6.1 mongodb-org-mongos=2.6.1 mongodb-org-tools=2.6.1
+> RUN apt-get update && apt-get install -y mongodb-org=2.6.1 mongodb-org-server=2.6.1 mongodb-org-shell=2.6.1 mongodb-org-mongos=2.6.1 mongodb-org-tools=2.6.1
MongoDB requires a data directory. Let's create it as the final step of our
installation instructions.
diff --git a/docs/sources/examples/mongodb/Dockerfile b/docs/sources/examples/mongodb/Dockerfile
index e7acc0fd85..9333eb5811 100644
--- a/docs/sources/examples/mongodb/Dockerfile
+++ b/docs/sources/examples/mongodb/Dockerfile
@@ -11,8 +11,7 @@ RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
RUN echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | tee /etc/apt/sources.list.d/10gen.list
# Update apt-get sources AND install MongoDB
-RUN apt-get update
-RUN apt-get install -y -q mongodb-org
+RUN apt-get update && apt-get install -y mongodb-org
# Create the MongoDB data directory
RUN mkdir -p /data/db
@@ -20,5 +19,5 @@ RUN mkdir -p /data/db
# Expose port #27017 from the container to the host
EXPOSE 27017
-# Set usr/bin/mongod as the dockerized entry-point application
-ENTRYPOINT usr/bin/mongod
+# Set /usr/bin/mongod as the dockerized entry-point application
+ENTRYPOINT ["/usr/bin/mongod"]
diff --git a/docs/sources/examples/nodejs_web_app.md b/docs/sources/examples/nodejs_web_app.md
index 03c48b5175..5d69fd713b 100644
--- a/docs/sources/examples/nodejs_web_app.md
+++ b/docs/sources/examples/nodejs_web_app.md
@@ -66,10 +66,10 @@ requires to build (this example uses Docker 0.3.4):
Next, define the parent image you want to use to build your own image on
top of. Here, we'll use
-[CentOS](https://registry.hub.docker.com/_/centos/) (tag: `6.4`)
+[CentOS](https://registry.hub.docker.com/_/centos/) (tag: `centos6`)
available on the [Docker Hub](https://hub.docker.com/):
- FROM centos:6.4
+ FROM centos:centos6
Since we're building a Node.js app, you'll have to install Node.js as
well as npm on your CentOS image. Node.js is required to run your app
@@ -109,7 +109,7 @@ defines your runtime, i.e. `node`, and the path to our app, i.e. `src/index.js`
Your `Dockerfile` should now look like this:
# DOCKER-VERSION 0.3.4
- FROM centos:6.4
+ FROM centos:centos6
# Enable EPEL for Node.js
RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
@@ -137,9 +137,9 @@ Your image will now be listed by Docker:
$ sudo docker images
# Example
- REPOSITORY TAG ID CREATED
- centos 6.4 539c0211cd76 8 weeks ago
- <your username>/centos-node-hello latest d64d3505b0d2 2 hours ago
+ REPOSITORY TAG ID CREATED
+ centos centos6 539c0211cd76 8 weeks ago
+ <your username>/centos-node-hello latest d64d3505b0d2 2 hours ago
## Run the image
diff --git a/docs/sources/examples/postgresql_service.Dockerfile b/docs/sources/examples/postgresql_service.Dockerfile
index 364a18a81d..d0f37669d1 100644
--- a/docs/sources/examples/postgresql_service.Dockerfile
+++ b/docs/sources/examples/postgresql_service.Dockerfile
@@ -13,17 +13,13 @@ RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys B97B0AFCAA
# of PostgreSQL, ``9.3``.
RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list
-# Update the Ubuntu and PostgreSQL repository indexes
-RUN apt-get update
-
# Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3
# There are some warnings (in red) that show up during the build. You can hide
# them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive
-RUN apt-get -y -q install python-software-properties software-properties-common
-RUN apt-get -y -q install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
+RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
# Note: The official Debian and Ubuntu images automatically ``apt-get clean``
-# after each ``apt-get``
+# after each ``apt-get``
# Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed``
USER postgres
diff --git a/docs/sources/examples/postgresql_service.md b/docs/sources/examples/postgresql_service.md
index 5265935e3d..ffd122ed58 100644
--- a/docs/sources/examples/postgresql_service.md
+++ b/docs/sources/examples/postgresql_service.md
@@ -35,17 +35,13 @@ Start by creating a new `Dockerfile`:
# of PostgreSQL, ``9.3``.
RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list
- # Update the Ubuntu and PostgreSQL repository indexes
- RUN apt-get update
-
# Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3
# There are some warnings (in red) that show up during the build. You can hide
# them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive
- RUN apt-get -y -q install python-software-properties software-properties-common
- RUN apt-get -y -q install postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
+ RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3
# Note: The official Debian and Ubuntu images automatically ``apt-get clean``
- # after each ``apt-get``
+ # after each ``apt-get``
# Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed``
USER postgres
@@ -88,7 +84,7 @@ Containers*](/userguide/dockerlinks), or we can access it from our host
> **Note**:
> The `--rm` removes the container and its image when
-> the container exists successfully.
+> the container exits successfully.
### Using container linking
diff --git a/docs/sources/examples/running_redis_service.md b/docs/sources/examples/running_redis_service.md
index 0eeef0625d..6d052da09e 100644
--- a/docs/sources/examples/running_redis_service.md
+++ b/docs/sources/examples/running_redis_service.md
@@ -13,8 +13,7 @@ Firstly, we create a `Dockerfile` for our new Redis
image.
FROM ubuntu:12.10
- RUN apt-get update
- RUN apt-get -y install redis-server
+ RUN apt-get update && apt-get install -y redis-server
EXPOSE 6379
ENTRYPOINT ["/usr/bin/redis-server"]
@@ -49,9 +48,9 @@ container to only this container.
Once inside our freshly created container we need to install Redis to
get the `redis-cli` binary to test our connection.
- $ apt-get update
- $ apt-get -y install redis-server
- $ service redis-server stop
+ $ sudo apt-get update
+ $ sudo apt-get install redis-server
+ $ sudo service redis-server stop
As we've used the `--link redis:db` option, Docker
has created some environment variables in our web application container.
diff --git a/docs/sources/examples/running_riak_service.md b/docs/sources/examples/running_riak_service.md
index d9f2c42850..c3d83bf663 100644
--- a/docs/sources/examples/running_riak_service.md
+++ b/docs/sources/examples/running_riak_service.md
@@ -25,13 +25,6 @@ of. We'll use [Ubuntu](https://registry.hub.docker.com/_/ubuntu/) (tag:
FROM ubuntu:latest
MAINTAINER Hector Castro hector@basho.com
-Next, we update the APT cache and apply any updates:
-
- # Update the APT cache
- RUN sed -i.bak 's/main$/main universe/' /etc/apt/sources.list
- RUN apt-get update
- RUN apt-get upgrade -y
-
After that, we install and setup a few dependencies:
- `curl` is used to download Basho's APT
@@ -46,7 +39,7 @@ After that, we install and setup a few dependencies:
<!-- -->
# Install and setup project dependencies
- RUN apt-get install -y curl lsb-release supervisor openssh-server
+ RUN apt-get update && apt-get install -y curl lsb-release supervisor openssh-server
RUN mkdir -p /var/run/sshd
RUN mkdir -p /var/log/supervisor
@@ -61,23 +54,14 @@ Next, we add Basho's APT repository:
RUN curl -sSL http://apt.basho.com/gpg/basho.apt.key | apt-key add --
RUN echo "deb http://apt.basho.com $(lsb_release -cs) main" > /etc/apt/sources.list.d/basho.list
- RUN apt-get update
After that, we install Riak and alter a few defaults:
# Install Riak and prepare it to run
- RUN apt-get install -y riak
+ RUN apt-get update && apt-get install -y riak
RUN sed -i.bak 's/127.0.0.1/0.0.0.0/' /etc/riak/app.config
RUN echo "ulimit -n 4096" >> /etc/default/riak
-Almost there. Next, we add a hack to get us by the lack of
-`initctl`:
-
- # Hack for initctl
- # See: https://github.com/dotcloud/docker/issues/1024
- RUN dpkg-divert --local --rename --add /sbin/initctl
- RUN ln -s /bin/true /sbin/initctl
-
Then, we expose the Riak Protocol Buffers and HTTP interfaces, along
with SSH:
diff --git a/docs/sources/examples/running_ssh_service.Dockerfile b/docs/sources/examples/running_ssh_service.Dockerfile
index 57baf88cef..1b8ed02a8a 100644
--- a/docs/sources/examples/running_ssh_service.Dockerfile
+++ b/docs/sources/examples/running_ssh_service.Dockerfile
@@ -5,10 +5,7 @@
FROM ubuntu:12.04
MAINTAINER Thatcher R. Peskens "thatcher@dotcloud.com"
-# make sure the package repository is up to date
-RUN apt-get update
-
-RUN apt-get install -y openssh-server
+RUN apt-get update && apt-get install -y openssh-server
RUN mkdir /var/run/sshd
RUN echo 'root:screencast' |chpasswd
diff --git a/docs/sources/examples/running_ssh_service.md b/docs/sources/examples/running_ssh_service.md
index a8405e748e..7140678e3b 100644
--- a/docs/sources/examples/running_ssh_service.md
+++ b/docs/sources/examples/running_ssh_service.md
@@ -15,10 +15,7 @@ quick access to a test container.
FROM ubuntu:12.04
MAINTAINER Thatcher R. Peskens "thatcher@dotcloud.com"
- # make sure the package repository is up to date
- RUN apt-get update
-
- RUN apt-get install -y openssh-server
+ RUN apt-get update && apt-get install -y openssh-server
RUN mkdir /var/run/sshd
RUN echo 'root:screencast' |chpasswd
diff --git a/docs/sources/faq.md b/docs/sources/faq.md
index 667058c86f..b76a749fe1 100644
--- a/docs/sources/faq.md
+++ b/docs/sources/faq.md
@@ -225,6 +225,38 @@ Downloading and installing an "all-in-one" .deb or .rpm sounds great at first,
except if you have no way to figure out that it contains a copy of the
OpenSSL library vulnerable to the [Heartbleed](http://heartbleed.com/) bug.
+### Why is `DEBIAN_FRONTEND=noninteractive` discouraged in Dockerfiles?
+
+When building Docker images on Debian and Ubuntu you may have seen errors like:
+
+ unable to initialize frontend: Dialog
+
+These errors don't stop the image from being built but inform you that the
+installation process tried to open a dialog box, but was unable to.
+Generally, these errors are safe to ignore.
+
+Some people circumvent these errors by changing the `DEBIAN_FRONTEND`
+environment variable inside the Dockerfile using:
+
+ ENV DEBIAN_FRONTEND=noninteractive
+
+This prevents the installer from opening dialog boxes during installation
+which stops the errors.
+
+While this may sound like a good idea, it *may* have side effects.
+The `DEBIAN_FRONTEND` environment variable will be inherited by all
+images and containers built from your image, effectively changing
+their behavior. People using those images will run into problems when
+installing software interactively, because installers will not show
+any dialog boxes.
+
+Because of this, and because setting `DEBIAN_FRONTEND` to `noninteractive` is
+mainly a 'cosmetic' change, we *discourage* changing it.
+
+If you *really* need to change its setting, make sure to change it
+back to its [default value](https://www.debian.org/releases/stable/i386/ch05s03.html.en)
+afterwards.
+
### Can I help by adding some questions and answers?
Definitely! You can fork [the repo](https://github.com/dotcloud/docker) and
diff --git a/docs/sources/index.md b/docs/sources/index.md
index 75414b4364..8a6882a010 100644
--- a/docs/sources/index.md
+++ b/docs/sources/index.md
@@ -85,49 +85,3 @@ Docker on a variety of platforms.
To learn about Docker in more detail and to answer questions about usage and implementation, check out the [Docker User Guide](/userguide/).
-## Release Notes
-
-<b>Version 1.1.0</b>
-
-### New Features
-
-*`.dockerignore` support*
-
-You can now add a `.dockerignore` file next to your `Dockerfile` and Docker will ignore files and directories specified in that file when sending the build context to the daemon.
-Example: https://github.com/dotcloud/docker/blob/master/.dockerignore
-
-*Pause containers during commit*
-
-Doing a commit on a running container was not recommended because you could end up with files in an inconsistent state (for example, if they were being written during the commit). Containers are now paused when a commit is made to them.
-You can disable this feature by doing a `docker commit --pause=false <container_id>`
-
-*Tailing logs*
-
-You can now tail the logs of a container. For example, you can get the last ten lines of a log by using `docker logs --tail 10 <container_id>`. You can also follow the logs of a container without having to read the whole log file with `docker logs --tail 0 -f <container_id>`.
-
-*Allow a tar file as context for docker build*
-
-You can now pass a tar archive to `docker build` as context. This can be used to automate docker builds, for example: `cat context.tar | docker build -` or `docker run builder_image | docker build -`
-
-*Bind mounting your whole filesystem in a container*
-
-`/` is now allowed as source of `--volumes`. This means you can bind-mount your whole system in a container if you need to. For example: `docker run -v /:/my_host ubuntu:ro ls /my_host`. However, it is now forbidden to mount to /.
-
-
-### Other Improvements & Changes
-
-* Port allocation has been improved. In the previous release, Docker could prevent you from starting a container with previously allocated ports which seemed to be in use when in fact they were not. This has been fixed.
-
-* A bug in `docker save` was introduced in the last release. The `docker save` command could produce images with invalid metadata. The command now produces images with correct metadata.
-
-* Running `docker inspect` in a container now returns which containers it is linked to.
-
-* Parsing of the `docker commit` flag has improved validation, to better prevent you from committing an image with a name such as `-m`. Image names with dashes in them potentially conflict with command line flags.
-
-* The API now has Improved status codes for `start` and `stop`. Trying to start a running container will now return a 304 error.
-
-* Performance has been improved overall. Starting the daemon is faster than in previous releases. The daemon’s performance has also been improved when it is working with large numbers of images and containers.
-
-* Fixed an issue with white-spaces and multi-lines in Dockerfiles.
-
-
diff --git a/docs/sources/installation/centos.md b/docs/sources/installation/centos.md
index 3966d0f092..2f7d57d604 100644
--- a/docs/sources/installation/centos.md
+++ b/docs/sources/installation/centos.md
@@ -4,23 +4,57 @@ page_keywords: Docker, Docker documentation, requirements, linux, centos, epel,
# CentOS
-The Docker package is available via the EPEL repository. These
-instructions work for CentOS 6 and later. They will likely work for
+While the Docker package is provided by default as part of CentOS-7,
+it is provided by the EPEL repository for CentOS-6. Please note that
+this changes the installation instructions slightly between versions. If you
+need the latest version, you can always use the latest binary which works on
+kernel 3.8 and above.
+
+These instructions work for CentOS 6 and later. They will likely work for
other binary compatible EL6 distributions such as Scientific Linux, but
they haven't been tested.
-Please note that this package is part of [Extra Packages for Enterprise
-Linux (EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort
-to create and maintain additional packages for the RHEL distribution.
-
-Also note that due to the current Docker limitations, Docker is able to
+Please note that due to the current Docker limitations, Docker is able to
run only on the **64 bit** architecture.
To run Docker, you will need [CentOS6](http://www.centos.org) or higher,
with a kernel version 2.6.32-431 or higher as this has specific kernel
fixes to allow Docker to run.
-## Installation
+## Installing Docker - CentOS-7
+Docker is included by default in the CentOS-Extras repository. To install
+simply run the following command.
+
+ $ sudo yum install docker
+
+### Manual installation of latest version
+
+While using a package is the recommended way of installing Docker,
+the above package might not be the latest version. If you need the latest
+version, [you can install the binary directly](
+https://docs.docker.com/installation/binaries/).
+
+When installing the binary without a package, you may want
+to integrate Docker with systemd. For this, simply install the two unit files
+(service and socket) from [the github
+repository](https://github.com/docker/docker/tree/master/contrib/init/systemd)
+to `/etc/systemd/system`.
+
+### FirewallD
+
+CentOS-7 introduced firewalld, which is a wrapper around iptables and can
+conflict with Docker.
+
+When firewalld is started or restarted it will remove the `DOCKER` chain
+from iptables, preventing Docker from working properly.
+
+When using systemd, firewalld is started before Docker, but if you
+start or restart firewalld after Docker, you will have to restart the Docker daemon.
+
+## Installing Docker - CentOS-6
+Please note that this for CentOS-6, this package is part of [Extra Packages
+for Enterprise Linux (EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort
+to create and maintain additional packages for the RHEL distribution.
Firstly, you need to ensure you have the EPEL repository enabled. Please
follow the [EPEL installation instructions](
@@ -39,7 +73,9 @@ will install Docker on our host.
$ sudo yum install docker-io
-Now that it's installed, let's start the Docker daemon.
+## Using Docker
+
+Once Docker is installed, you will need to start the docker daemon.
$ sudo service docker start
@@ -50,7 +86,7 @@ If we want Docker to start at boot, we should also:
Now let's verify that Docker is working. First we'll need to get the latest
`centos` image.
- $ sudo docker pull centos:latest
+ $ sudo docker pull centos
Next we'll make sure that we can see the image by running:
@@ -69,6 +105,12 @@ Run a simple bash shell to test the image:
If everything is working properly, you'll get a simple bash prompt. Type
exit to continue.
+## Dockerfiles
+The CentOS Project provides a number of sample Dockerfiles which you may use
+either as templates or to familiarize yourself with docker. These templates
+are available on github at [https://github.com/CentOS/CentOS-Dockerfiles](
+https://github.com/CentOS/CentOS-Dockerfiles)
+
**Done!** You can either continue with the [Docker User
Guide](/userguide/) or explore and build on the images yourself.
diff --git a/docs/sources/installation/fedora.md b/docs/sources/installation/fedora.md
index a230aa6cf5..757b3e9c44 100644
--- a/docs/sources/installation/fedora.md
+++ b/docs/sources/installation/fedora.md
@@ -68,7 +68,7 @@ and above.
If you are behind a HTTP proxy server, for example in corporate settings,
you will need to add this configuration in the Docker *systemd service file*.
-Edit file `/lib/systemd/system/docker.service`. Add the following to
+Edit file `/usr/lib/systemd/system/docker.service`. Add the following to
section `[Service]` :
Environment="HTTP_PROXY=http://proxy.example.com:80/"
diff --git a/docs/sources/installation/gentoolinux.md b/docs/sources/installation/gentoolinux.md
index 62fdc9f00e..ac92ad39c8 100644
--- a/docs/sources/installation/gentoolinux.md
+++ b/docs/sources/installation/gentoolinux.md
@@ -39,6 +39,9 @@ and especially missing kernel configuration flags and/or dependencies,
https://github.com/tianon/docker-overlay/issues) or ping
tianon directly in the #docker IRC channel on the freenode network.
+Other use flags are described in detail on [tianon's
+blog](https://tianon.github.io/post/2014/05/17/docker-on-gentoo.html).
+
## Starting Docker
Ensure that you are running a kernel that includes all the necessary
diff --git a/docs/sources/installation/google.md b/docs/sources/installation/google.md
index fa2fa61fc9..23a9bfbfb8 100644
--- a/docs/sources/installation/google.md
+++ b/docs/sources/installation/google.md
@@ -20,7 +20,7 @@ page_keywords: Docker, Docker documentation, installation, google, Google Comput
(select a zone close to you and the desired instance size)
$ gcloud compute instances create docker-playground \
- --image container-vm-v20140710 \
+ --image container-vm-v20140730 \
--image-project google-containers \
--zone us-central1-a \
--machine-type f1-micro
diff --git a/docs/sources/installation/openSUSE.md b/docs/sources/installation/openSUSE.md
index c03c74a811..951b8770cc 100644
--- a/docs/sources/installation/openSUSE.md
+++ b/docs/sources/installation/openSUSE.md
@@ -47,13 +47,27 @@ The docker package creates a new group named docker. Users, other than
root user, need to be part of this group in order to interact with the
Docker daemon. You can add users with:
- $ sudo usermod -a -G docker <username>
+ $ sudo /usr/sbin/usermod -a -G docker <username>
To verify that everything has worked as expected:
- $ sudo docker run --rm -i -t ubuntu /bin/bash
+ $ sudo docker run --rm -i -t opensuse /bin/bash
+
+This should download and import the `opensuse` image, and then start `bash` in
+a container. To exit the container type `exit`.
+
+If you want your containers to be able to access the external network you must
+enable the `net.ipv4.ip_forward` rule.
+This can be done using YaST by browsing to the
+`Network Devices -> Network Settings -> Routing` menu and ensuring that the
+`Enable IPv4 Forwarding` box is checked.
+
+This option cannot be changed when networking is handled by the Network Manager.
+In such cases the `/etc/sysconfig/SuSEfirewall2` file needs to be edited by
+hand to ensure the `FW_ROUTE` flag is set to `yes` like so:
+
+ FW_ROUTE="yes"
-This should download and import the `ubuntu` image, and then start `bash` in a container. To exit the container type `exit`.
**Done!**
diff --git a/docs/sources/installation/oracle.md b/docs/sources/installation/oracle.md
new file mode 100644
index 0000000000..05bb3d9808
--- /dev/null
+++ b/docs/sources/installation/oracle.md
@@ -0,0 +1,120 @@
+page_title: Installation on Oracle Linux
+page_description: Installation instructions for Docker on Oracle Linux.
+page_keywords: Docker, Docker documentation, requirements, linux, rhel, centos, oracle, ol
+
+# Oracle Linux 6 and 7
+
+You do not require an Oracle Linux Support subscription to install Docker on
+Oracle Linux.
+
+*For Oracle Linux customers with an active support subscription:*
+Docker is available in either the `ol6_x86_64_addons` or `ol7_x86_64_addons`
+channel for Oracle Linux 6 and Oracle Linux 7 on the [Unbreakable Linux Network
+(ULN)](https://linux.oracle.com).
+
+*For Oracle Linux users without an active support subscription:*
+Docker is available in the appropriate `ol6_addons` or `ol7_addons` repository
+on [Oracle Public Yum](http://public-yum.oracle.com).
+
+Docker requires the use of the Unbreakable Enterprise Kernel Release 3 (3.8.13)
+or higher on Oracle Linux. This kernel supports the Docker btrfs storage engine
+on both Oracle Linux 6 and 7.
+
+Due to current Docker limitations, Docker is only able to run only on the x86_64
+architecture.
+
+## To enable the *addons* channel via the Unbreakable Linux Network:
+
+1. Enable either the *ol6\_x86\_64\_addons* or *ol7\_x86\_64\_addons* channel
+via the ULN web interface.
+Consult the [Unbreakable Linux Network User's
+Guide](http://docs.oracle.com/cd/E52668_01/E39381/html/index.html) for
+documentation on subscribing to channels.
+
+## To enable the *addons* repository via Oracle Public Yum:
+
+The latest release of Oracle Linux 6 and 7 are automatically configured to use
+the Oracle Public Yum repositories during installation. However, the *addons*
+repository is not enabled by default.
+
+To enable the *addons* repository:
+
+1. Edit either `/etc/yum.repos.d/public-yum-ol6.repo` or
+`/etc/yum.repos.d/public-yum-ol7.repo`
+and set `enabled=1` in the `[ol6_addons]` or the `[ol7_addons]` stanza.
+
+## To install Docker:
+
+1. Ensure the appropriate *addons* channel or repository has been enabled.
+
+2. Use yum to install the Docker package:
+
+ $ sudo yum install docker
+
+## To start Docker:
+
+1. Now that it's installed, start the Docker daemon:
+
+ 1. On Oracle Linux 6:
+
+ $ sudo service docker start
+
+ 2. On Oracle Linux 7:
+
+ $ sudo systemctl start docker.service
+
+2. If you want the Docker daemon to start automatically at boot:
+
+ 1. On Oracle Linux 6:
+
+ $ sudo chkconfig docker on
+
+ 2. On Oracle Linux 7:
+
+ $ sudo systemctl enable docker.service
+
+**Done!**
+
+## Using the btrfs storage engine
+
+Docker on Oracle Linux 6 and 7 supports the use of the btrfs storage engine.
+Before enabling btrfs support, ensure that `/var/lib/docker` is stored on a
+btrfs-based filesystem. Review [Chapter
+5](http://docs.oracle.com/cd/E37670_01/E37355/html/ol_btrfs.html) of the [Oracle
+Linux Administrator's Solution
+Guide](http://docs.oracle.com/cd/E37670_01/E37355/html/index.html) for details
+on how to create and mount btrfs filesystems.
+
+To enable btrfs support on Oracle Linux:
+
+1. Ensure that `/var/lib/docker` is on a btrfs filesystem.
+1. Edit `/etc/sysconfig/docker` and add `-s btrfs` to the `OTHER_ARGS` field.
+2. Restart the Docker daemon:
+
+You can now continue with the [Docker User Guide](/userguide/).
+
+## Known issues
+
+### Docker unmounts btrfs filesystem on shutdown
+If you're running Docker using the btrfs storage engine and you stop the Docker
+service, it will unmount the btrfs filesystem during the shutdown process. You
+should ensure the filesystem is mounted properly prior to restarting the Docker
+service.
+
+On Oracle Linux 7, you can use a `systemd.mount` definition and modify the
+Docker `systemd.service` to depend on the btrfs mount defined in systemd.
+
+### SElinux Support on Oracle Linux 7
+SElinux must be set to `Permissive` or `Disabled` in `/etc/sysconfig/selinux` to
+use the btrfs storage engine on Oracle Linux 7.
+
+## Further issues?
+
+If you have a current Basic or Premier Support Subscription for Oracle Linux,
+you can report any issues you have with the installation of Docker via a Service
+Request at [My Oracle Support](http://support.oracle.com).
+
+If you do not have an Oracle Linux Support Subscription, you can use the [Oracle
+Linux
+Forum](https://community.oracle.com/community/server_%26_storage_systems/linux/
+oracle_linux) for community-based support.
diff --git a/docs/sources/installation/softlayer.md b/docs/sources/installation/softlayer.md
index d01866720c..d594896a92 100644
--- a/docs/sources/installation/softlayer.md
+++ b/docs/sources/installation/softlayer.md
@@ -6,22 +6,22 @@ page_keywords: IBM SoftLayer, virtualization, cloud, docker, documentation, inst
1. Create an [IBM SoftLayer account](
https://www.softlayer.com/cloud-servers/).
-2. Log in to the [SoftLayer Console](
- https://control.softlayer.com/devices/).
-3. Go to [Order Hourly Computing Instance Wizard](
- https://manage.softlayer.com/Sales/orderHourlyComputingInstance)
- on your SoftLayer Console.
-4. Create a new *CloudLayer Computing Instance* (CCI) using the default
+2. Log in to the [SoftLayer Customer Portal](
+ https://control.softlayer.com/).
+3. From the *Devices* menu select [*Device List*](https://control.softlayer.com/devices)
+4. Click *Order Devices* on the top right of the window below the menu bar.
+5. Under *Virtual Server* click [*Hourly*](https://manage.softlayer.com/Sales/orderHourlyComputingInstance)
+6. Create a new *SoftLayer Virtual Server Instance* (VSI) using the default
values for all the fields and choose:
- - *First Available* as `Datacenter` and
+ - The desired location for *Datacenter*
- *Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit)*
- as `Operating System`.
+ for *Operating System*.
-5. Click the *Continue Your Order* button at the bottom right and
- select *Go to checkout*.
-6. Insert the required *User Metadata* and place the order.
-7. Then continue with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux)
+7. Click the *Continue Your Order* button at the bottom right.
+8. Fill out VSI *hostname* and *domain*.
+9. Insert the required *User Metadata* and place the order.
+10. Then continue with the [*Ubuntu*](../ubuntulinux/#ubuntu-linux)
instructions.
## What next?
diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md
index 5ddc791f4f..fb8fdf66b7 100644
--- a/docs/sources/installation/ubuntulinux.md
+++ b/docs/sources/installation/ubuntulinux.md
@@ -87,9 +87,18 @@ VirtualBox guest additions. If you didn't install the headers for your
"precise" kernel, then you can skip these headers for the "raring"
kernel. But it is safer to include them if you're not sure.
+Please read the installation instructions for backported kernels at
+Ubuntu.org to understand why you also need to install the Xorg packages
+when running Docker on a machine with a graphical environment like Unity.
+[LTS Enablement Stack](https://wiki.ubuntu.com/Kernel/LTSEnablementStack) refer to note 5 under
+each version.
+
# install the backported kernel
$ sudo apt-get update
$ sudo apt-get install linux-image-generic-lts-raring linux-headers-generic-lts-raring
+
+ # install the backported kernel and xorg if using Unity/Xorg
+ $ sudo apt-get install --install-recommends linux-generic-lts-raring xserver-xorg-lts-raring libgl1-mesa-glx-lts-raring
# reboot
$ sudo reboot
@@ -266,11 +275,11 @@ These parameters will help you get rid of the following warnings:
## Troubleshooting
-On Linux Mint, the `cgroup-lite` package is not
+On Linux Mint, the `cgroup-lite` and `apparmor` packages are not
installed by default. Before Docker will work correctly, you will need
to install this via:
- $ sudo apt-get update && sudo apt-get install cgroup-lite
+ $ sudo apt-get update && sudo apt-get install cgroup-lite apparmor
## Docker and UFW
diff --git a/docs/sources/reference/api/docker_io_oauth_api.md b/docs/sources/reference/api/docker_io_oauth_api.md
deleted file mode 100644
index c5d07720b8..0000000000
--- a/docs/sources/reference/api/docker_io_oauth_api.md
+++ /dev/null
@@ -1,254 +0,0 @@
-page_title: docker.io OAuth API
-page_description: API Documentation for docker.io's OAuth flow.
-page_keywords: API, Docker, oauth, REST, documentation
-
-# docker.io OAuth API
-
-## 1. Brief introduction
-
-Some docker.io API requests will require an access token to
-authenticate. To get an access token for a user, that user must first
-grant your application access to their docker.io account. In order for
-them to grant your application access you must first register your
-application.
-
-Before continuing, we encourage you to familiarize yourself with [The
-OAuth 2.0 Authorization Framework](http://tools.ietf.org/html/rfc6749).
-
-*Also note that all OAuth interactions must take place over https
-connections*
-
-## 2. Register Your Application
-
-You will need to register your application with docker.io before users
-will be able to grant your application access to their account
-information. We are currently only allowing applications selectively. To
-request registration of your application send an email to
-[support-accounts@docker.com](mailto:support-accounts%40docker.com) with
-the following information:
-
- - The name of your application
- - A description of your application and the service it will provide to
- docker.io users.
- - A callback URI that we will use for redirecting authorization
- requests to your application. These are used in the step of getting
- an Authorization Code. The domain name of the callback URI will be
- visible to the user when they are requested to authorize your
- application.
-
-When your application is approved you will receive a response from the
-docker.io team with your `client_id` and
-`client_secret` which your application will use in
-the steps of getting an Authorization Code and getting an Access Token.
-
-# 3. Endpoints
-
-## 3.1 Get an Authorization Code
-
-Once You have registered you are ready to start integrating docker.io
-accounts into your application! The process is usually started by a user
-following a link in your application to an OAuth Authorization endpoint.
-
-`GET /api/v1.1/o/authorize/`
-
-Request that a docker.io user authorize your application. If the
-user is not already logged in, they will be prompted to login. The
-user is then presented with a form to authorize your application for
-the requested access scope. On submission, the user will be
-redirected to the specified `redirect_uri` with
-an Authorization Code.
-
- Query Parameters:
-
-  
-
- - **client_id** – The `client_id` given to
- your application at registration.
- - **response_type** – MUST be set to `code`.
- This specifies that you would like an Authorization Code
- returned.
- - **redirect_uri** – The URI to redirect back to after the user
- has authorized your application. If omitted, the first of your
- registered `response_uris` is used. If
- included, it must be one of the URIs which were submitted when
- registering your application.
- - **scope** – The extent of access permissions you are requesting.
- Currently, the scope options are `profile_read`, `profile_write`,
- `email_read`, and `email_write`. Scopes must be separated by a space. If omitted, the
- default scopes `profile_read email_read` are
- used.
- - **state** – (Recommended) Used by your application to maintain
- state between the authorization request and callback to protect
- against CSRF attacks.
-
- **Example Request**
-
- Asking the user for authorization.
-
- GET /api/v1.1/o/authorize/?client_id=TestClientID&response_type=code&redirect_uri=https%3A//my.app/auth_complete/&scope=profile_read%20email_read&state=abc123 HTTP/1.1
- Host: www.docker.io
-
- **Authorization Page**
-
- When the user follows a link, making the above GET request, they
- will be asked to login to their docker.io account if they are not
- already and then be presented with the following authorization
- prompt which asks the user to authorize your application with a
- description of the requested scopes.
-
- ![](/reference/api/_static/io_oauth_authorization_page.png)
-
- Once the user allows or denies your Authorization Request the user
- will be redirected back to your application. Included in that
- request will be the following query parameters:
-
- `code`
- : The Authorization code generated by the docker.io authorization
- server. Present it again to request an Access Token. This code
- expires in 60 seconds.
- `state`
- : If the `state` parameter was present in the
- authorization request this will be the exact value received from
- that request.
- `error`
- : An error message in the event of the user denying the
- authorization or some other kind of error with the request.
-
-## 3.2 Get an Access Token
-
-Once the user has authorized your application, a request will be made to
-your application's specified `redirect_uri` which
-includes a `code` parameter that you must then use
-to get an Access Token.
-
-`POST /api/v1.1/o/token/`
-
-Submit your newly granted Authorization Code and your application's
-credentials to receive an Access Token and Refresh Token. The code
-is valid for 60 seconds and cannot be used more than once.
-
- Request Headers:
-
-  
-
- - **Authorization** – HTTP basic authentication using your
- application's `client_id` and
- `client_secret`
-
- Form Parameters:
-
-  
-
- - **grant_type** – MUST be set to `authorization_code`
- - **code** – The authorization code received from the user's
- redirect request.
- - **redirect_uri** – The same `redirect_uri`
- used in the authentication request.
-
- **Example Request**
-
- Using an authorization code to get an access token.
-
- POST /api/v1.1/o/token/ HTTP/1.1
- Host: www.docker.io
- Authorization: Basic VGVzdENsaWVudElEOlRlc3RDbGllbnRTZWNyZXQ=
- Accept: application/json
- Content-Type: application/json
-
- {
- "grant_type": "code",
- "code": "YXV0aG9yaXphdGlvbl9jb2Rl",
- "redirect_uri": "https://my.app/auth_complete/"
- }
-
- **Example Response**
-
- HTTP/1.1 200 OK
- Content-Type: application/json;charset=UTF-8
-
- {
- "username": "janedoe",
- "user_id": 42,
- "access_token": "t6k2BqgRw59hphQBsbBoPPWLqu6FmS",
- "expires_in": 15552000,
- "token_type": "Bearer",
- "scope": "profile_read email_read",
- "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc"
- }
-
- In the case of an error, there will be a non-200 HTTP Status and and
- data detailing the error.
-
-## 3.3 Refresh a Token
-
-Once the Access Token expires you can use your `refresh_token`
-to have docker.io issue your application a new Access Token,
-if the user has not revoked access from your application.
-
-`POST /api/v1.1/o/token/`
-
-Submit your `refresh_token` and application's
-credentials to receive a new Access Token and Refresh Token. The
-`refresh_token` can be used only once.
-
- Request Headers:
-
-  
-
- - **Authorization** – HTTP basic authentication using your
- application's `client_id` and
- `client_secret`
-
- Form Parameters:
-
-  
-
- - **grant_type** – MUST be set to `refresh_token`
- - **refresh_token** – The `refresh_token`
- which was issued to your application.
- - **scope** – (optional) The scope of the access token to be
- returned. Must not include any scope not originally granted by
- the user and if omitted is treated as equal to the scope
- originally granted.
-
- **Example Request**
-
- Refreshing an access token.
-
- POST /api/v1.1/o/token/ HTTP/1.1
- Host: www.docker.io
- Authorization: Basic VGVzdENsaWVudElEOlRlc3RDbGllbnRTZWNyZXQ=
- Accept: application/json
- Content-Type: application/json
-
- {
- "grant_type": "refresh_token",
- "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc",
- }
-
- **Example Response**
-
- HTTP/1.1 200 OK
- Content-Type: application/json;charset=UTF-8
-
- {
- "username": "janedoe",
- "user_id": 42,
- "access_token": "t6k2BqgRw59hphQBsbBoPPWLqu6FmS",
- "expires_in": 15552000,
- "token_type": "Bearer",
- "scope": "profile_read email_read",
- "refresh_token": "hJDhLH3cfsUrQlT4MxA6s8xAFEqdgc"
- }
-
- In the case of an error, there will be a non-200 HTTP Status and and
- data detailing the error.
-
-# 4. Use an Access Token with the API
-
-Many of the docker.io API requests will require a Authorization request
-header field. Simply ensure you add this header with "Bearer <`access_token`>":
-
- GET /api/v1.1/resource HTTP/1.1
- Host: docker.io
- Authorization: Bearer 2YotnFZFEjr1zCsicMWpAA
diff --git a/docs/sources/reference/api/docker_remote_api_v1.0.md b/docs/sources/reference/api/docker_remote_api_v1.0.md
index b906298b85..ce45e63994 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.0.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.0.md
@@ -566,6 +566,13 @@ Insert a file from `url` in the image `name` at `path`
{{ STREAM }}
+ Query Parameters:
+
+
+
+ - **url** – The url from where the file is taken
+ - **path** – The path where the file is stored
+
Status Codes:
- **200** – no error
@@ -670,12 +677,6 @@ Push the image `name` on the registry
{{ STREAM }}
- Query Parameters:
-
-  
-
- - **registry** – the registry you wan to push, optional
-
Status Codes:
- **200** – no error
diff --git a/docs/sources/reference/api/docker_remote_api_v1.1.md b/docs/sources/reference/api/docker_remote_api_v1.1.md
index 4e449bccec..776ba3e505 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.1.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.1.md
@@ -573,6 +573,13 @@ Insert a file from `url` in the image `name` at `path`
{"error":"Invalid..."}
...
+ Query Parameters:
+
+
+
+ - **url** – The url from where the file is taken
+ - **path** – The path where the file is stored
+
Status Codes:
- **200** – no error
@@ -680,12 +687,6 @@ Push the image `name` on the registry
{"error":"Invalid..."}
...
- Query Parameters:
-
-  
-
- - **registry** – the registry you wan to push, optional
-
Status Codes:
- **200** – no error
diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md
index 264cdefc20..876d0fc370 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.10.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.10.md
@@ -366,7 +366,7 @@ Start the container `id`
{
"Binds":["/tmp:/tmp"],
- "LxcConf":{"lxc.utsname":"docker"},
+ "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}],
"PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] },
"PublishAllPorts":false,
"Privileged":false
@@ -739,6 +739,13 @@ Insert a file from `url` in the image
{"error":"Invalid..."}
...
+ Query Parameters:
+
+
+
+ - **url** – The url from where the file is taken
+ - **path** – The path where the file is stored
+
Status Codes:
- **200** – no error
@@ -846,11 +853,20 @@ Push the image `name` on the registry
{"error":"Invalid..."}
...
+ If you wish to push an image on to a private registry, that image must already have been tagged
+ into a repository which references that registry host name and port. This repository name should
+ then be used in the URL. This mirrors the flow of the CLI.
+
+ **Example request**:
+
+ POST /images/registry.acme.com:5000/test/push HTTP/1.1
+
+
Query Parameters:
 
- - **registry** – the registry you wan to push, optional
+ - **tag** – the tag to associate with the image on the registry, optional
Request Headers:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md
index ae2daae407..09d478678a 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.11.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.11.md
@@ -406,7 +406,7 @@ Start the container `id`
{
"Binds":["/tmp:/tmp"],
- "LxcConf":{"lxc.utsname":"docker"},
+ "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}],
"PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] },
"PublishAllPorts":false,
"Privileged":false,
@@ -861,11 +861,20 @@ Push the image `name` on the registry
{"error":"Invalid..."}
...
+ If you wish to push an image on to a private registry, that image must already have been tagged
+ into a repository which references that registry host name and port. This repository name should
+ then be used in the URL. This mirrors the flow of the CLI.
+
+ **Example request**:
+
+ POST /images/registry.acme.com:5000/test/push HTTP/1.1
+
+
Query Parameters:
 
- - **registry** – the registry you wan to push, optional
+ - **tag** – the tag to associate with the image on the registry, optional
Request Headers:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.12.md b/docs/sources/reference/api/docker_remote_api_v1.12.md
index 19fb24fe48..3102345c52 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.12.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.12.md
@@ -407,7 +407,7 @@ Start the container `id`
{
"Binds":["/tmp:/tmp"],
"Links":["redis3:redis"],
- "LxcConf":{"lxc.utsname":"docker"},
+ "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}],
"PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] },
"PublishAllPorts":false,
"Privileged":false,
@@ -808,30 +808,7 @@ Create an image, either by pull it from the registry or by importing it
- **200** – no error
- **500** – server error
-### Insert a file in an image
-`POST /images/(name)/insert`
-
-Insert a file from `url` in the image `name` at `path`
-
- **Example request**:
-
- POST /images/test/insert?path=/usr&url=myurl HTTP/1.1
-
- **Example response**:
-
- HTTP/1.1 200 OK
- Content-Type: application/json
-
- {"status":"Inserting..."}
- {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}}
- {"error":"Invalid..."}
- ...
-
- Status Codes:
-
- - **200** – no error
- - **500** – server error
### Inspect an image
@@ -937,11 +914,20 @@ Push the image `name` on the registry
{"error":"Invalid..."}
...
+ If you wish to push an image on to a private registry, that image must already have been tagged
+ into a repository which references that registry host name and port. This repository name should
+ then be used in the URL. This mirrors the flow of the CLI.
+
+ **Example request**:
+
+ POST /images/registry.acme.com:5000/test/push HTTP/1.1
+
+
Query Parameters:
 
- - **registry** – the registry you wan to push, optional
+ - **tag** – the tag to associate with the image on the registry, optional
Request Headers:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.13.md b/docs/sources/reference/api/docker_remote_api_v1.13.md
index e0ad957941..cb62a62c0d 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.13.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.13.md
@@ -405,7 +405,7 @@ Start the container `id`
{
"Binds":["/tmp:/tmp"],
"Links":["redis3:redis"],
- "LxcConf":{"lxc.utsname":"docker"},
+ "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}],
"PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] },
"PublishAllPorts":false,
"Privileged":false,
@@ -808,30 +808,7 @@ Create an image, either by pull it from the registry or by importing it
- **200** – no error
- **500** – server error
-### Insert a file in an image
-`POST /images/(name)/insert`
-
-Insert a file from `url` in the image `name` at `path`
-
- **Example request**:
-
- POST /images/test/insert?path=/usr&url=myurl HTTP/1.1
-
- **Example response**:
-
- HTTP/1.1 200 OK
- Content-Type: application/json
-
- {"status":"Inserting..."}
- {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}}
- {"error":"Invalid..."}
- ...
-
- Status Codes:
-
- - **200** – no error
- - **500** – server error
### Inspect an image
@@ -937,11 +914,20 @@ Push the image `name` on the registry
{"error":"Invalid..."}
...
+ If you wish to push an image on to a private registry, that image must already have been tagged
+ into a repository which references that registry host name and port. This repository name should
+ then be used in the URL. This mirrors the flow of the CLI.
+
+ **Example request**:
+
+ POST /images/registry.acme.com:5000/test/push HTTP/1.1
+
+
Query Parameters:
 
- - **registry** – the registry you wan to push, optional
+ - **tag** – the tag to associate with the image on the registry, optional
Request Headers:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.2.md b/docs/sources/reference/api/docker_remote_api_v1.2.md
index 37a8e1c012..bf68213864 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.2.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.2.md
@@ -589,6 +589,13 @@ Insert a file from `url` in the image `name` at `path`
{"error":"Invalid..."}
...
+ Query Parameters:
+
+
+
+ - **url** – The url from where the file is taken
+ - **path** – The path where the file is stored
+
Status Codes:
- **200** – no error
@@ -699,12 +706,6 @@ Push the image `name` on the registry
{"error":"Invalid..."}
...
- Query Parameters:
-
-  
-
- - **registry** – the registry you wan to push, optional
-
Status Codes:
- **200** – no error
diff --git a/docs/sources/reference/api/docker_remote_api_v1.3.md b/docs/sources/reference/api/docker_remote_api_v1.3.md
index b510f660fd..e9d643cf77 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.3.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.3.md
@@ -639,6 +639,13 @@ Insert a file from `url` in the image `name` at `path`
{"error":"Invalid..."}
...
+ Query Parameters:
+
+
+
+ - **url** – The url from where the file is taken
+ - **path** – The path where the file is stored
+
Status Codes:
- **200** – no error
@@ -748,12 +755,6 @@ Push the image `name` on the registry
{"error":"Invalid..."}
...
- Query Parameters:
-
-  
-
- - **registry** – the registry you wan to push, optional
-
Status Codes:
- **200** – no error
diff --git a/docs/sources/reference/api/docker_remote_api_v1.4.md b/docs/sources/reference/api/docker_remote_api_v1.4.md
index 0e49402621..1045f74203 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.4.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.4.md
@@ -685,6 +685,13 @@ Insert a file from `url` in the image `name` at `path`
{"error":"Invalid..."}
...
+ Query Parameters:
+
+
+
+ - **url** – The url from where the file is taken
+ - **path** – The path where the file is stored
+
Status Codes:
- **200** – no error
@@ -794,12 +801,6 @@ Push the image `name` on the registry
{"status":"Pushing..."} {"status":"Pushing", "progress":"1/? (n/a)"}
{"error":"Invalid..."} ...
- Query Parameters:
-
-  
-
- - **registry** – the registry you wan to push, optional
-
Status Codes:
- **200** – no error :statuscode 404: no such image :statuscode
diff --git a/docs/sources/reference/api/docker_remote_api_v1.5.md b/docs/sources/reference/api/docker_remote_api_v1.5.md
index 33c1aeca1e..5b04957894 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.5.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.5.md
@@ -686,6 +686,13 @@ Insert a file from `url` in the image `name` at `path`
{"error":"Invalid..."}
...
+ Query Parameters:
+
+
+
+ - **url** – The url from where the file is taken
+ - **path** – The path where the file is stored
+
Status Codes:
- **200** – no error
@@ -798,12 +805,6 @@ Push the image `name` on the registry
The `X-Registry-Auth` header can be used to
include a base64-encoded AuthConfig object.
- Query Parameters:
-
-  
-
- - **registry** – the registry you wan to push, optional
-
Status Codes:
- **200** – no error
diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.md b/docs/sources/reference/api/docker_remote_api_v1.6.md
index 4500c1554c..105a21a53a 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.6.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.6.md
@@ -391,7 +391,7 @@ Start the container `id`
{
"Binds":["/tmp:/tmp"],
- "LxcConf":{"lxc.utsname":"docker"},
+ "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}],
"ContainerIDFile": "",
"Privileged": false,
"PortBindings": {"22/tcp": [{HostIp:"", HostPort:""}]},
@@ -793,6 +793,13 @@ Insert a file from `url` in the image `name` at `path`
{"error":"Invalid..."}
...
+ Query Parameters:
+
+
+
+ - **url** – The url from where the file is taken
+ - **path** – The path where the file is stored
+
Status Codes:
- **200** – no error
@@ -903,12 +910,6 @@ Push the image `name` on the registry
> The `X-Registry-Auth` header can be used to
> include a base64-encoded AuthConfig object.
- Query Parameters:
-
-  
-
- - **registry** – the registry you wan to push, optional
-
Status Codes:
- **200** – no error :statuscode 404: no such image :statuscode
diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.md b/docs/sources/reference/api/docker_remote_api_v1.7.md
index 402efa4262..a79ca863b6 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.7.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.7.md
@@ -347,7 +347,7 @@ Start the container `id`
{
"Binds":["/tmp:/tmp"],
- "LxcConf":{"lxc.utsname":"docker"},
+ "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}],
"PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] },
"Privileged":false,
"PublishAllPorts":false
@@ -712,6 +712,13 @@ Insert a file from `url` in the image `name` at `path`
{"error":"Invalid..."}
...
+ Query Parameters:
+
+
+
+ - **url** – The url from where the file is taken
+ - **path** – The path where the file is stored
+
Status Codes:
- **200** – no error
@@ -821,12 +828,6 @@ Push the image `name` on the registry
{"error":"Invalid..."}
...
- Query Parameters:
-
-  
-
- - **registry** – the registry you wan to push, optional
-
Request Headers:
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.md b/docs/sources/reference/api/docker_remote_api_v1.8.md
index 78fccaf281..b0bc377fed 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.8.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.8.md
@@ -383,7 +383,7 @@ Start the container `id`
{
"Binds":["/tmp:/tmp"],
- "LxcConf":{"lxc.utsname":"docker"},
+ "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}],
"PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] },
"PublishAllPorts":false,
"Privileged":false
@@ -754,6 +754,13 @@ Insert a file from `url` in the image `name` at `path`
{"error":"Invalid..."}
...
+ Query Parameters:
+
+
+
+ - **url** – The url from where the file is taken
+ - **path** – The path where the file is stored
+
Status Codes:
- **200** – no error
@@ -863,12 +870,6 @@ Push the image `name` on the registry
{"error":"Invalid..."}
...
- Query Parameters:
-
-  
-
- - **registry** – the registry you wan to push, optional
-
Request Headers:
 
diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md
index 741a9ac955..56eb025b08 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.9.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.9.md
@@ -383,7 +383,7 @@ Start the container `id`
{
"Binds":["/tmp:/tmp"],
- "LxcConf":{"lxc.utsname":"docker"},
+ "LxcConf":[{"Key":"lxc.utsname","Value":"docker"}],
"PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] },
"PublishAllPorts":false,
"Privileged":false
@@ -758,6 +758,13 @@ Insert a file from `url` in the image `name` at `path`
{"error":"Invalid..."}
...
+ Query Parameters:
+
+
+
+ - **url** – The url from where the file is taken
+ - **path** – The path where the file is stored
+
Status Codes:
- **200** – no error
@@ -867,12 +874,6 @@ Push the image `name` on the registry
{"error":"Invalid..."}
...
- Query Parameters:
-
-  
-
- - **registry** – the registry you wan to push, optional
-
Request Headers:
 
diff --git a/docs/sources/reference/api/registry_api.md b/docs/sources/reference/api/registry_api.md
index a3d4f23d66..49776b9b18 100644
--- a/docs/sources/reference/api/registry_api.md
+++ b/docs/sources/reference/api/registry_api.md
@@ -67,6 +67,8 @@ The latter would only require two new commands in docker, e.g.,
(and optionally doing consistency checks). Authentication and authorization
are then delegated to SSH (e.g., with public keys).
+The default namespace for a private repository is `library`.
+
# Endpoints
## Images
@@ -305,7 +307,7 @@ Get all of the tags for the given repo.
**Example Request**:
- GET /v1/repositories/foo/bar/tags HTTP/1.1
+ GET /v1/repositories/reynholm/help-system-server/tags HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
@@ -341,7 +343,7 @@ Get a tag for the given repo.
**Example Request**:
- GET /v1/repositories/foo/bar/tags/latest HTTP/1.1
+ GET /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
@@ -375,7 +377,7 @@ Delete the tag for the repo
**Example Request**:
- DELETE /v1/repositories/foo/bar/tags/latest HTTP/1.1
+ DELETE /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
@@ -408,7 +410,7 @@ Put a tag for the given repo.
**Example Request**:
- PUT /v1/repositories/foo/bar/tags/latest HTTP/1.1
+ PUT /v1/repositories/reynholm/help-system-server/tags/latest HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
@@ -446,7 +448,7 @@ Delete a repository
**Example Request**:
- DELETE /v1/repositories/foo/bar/ HTTP/1.1
+ DELETE /v1/repositories/reynholm/help-system-server/ HTTP/1.1
Host: registry-1.docker.io
Accept: application/json
Content-Type: application/json
diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md
index 57ddb52984..cbda7cef95 100644
--- a/docs/sources/reference/builder.md
+++ b/docs/sources/reference/builder.md
@@ -27,6 +27,19 @@ the build. The build is run by the Docker daemon, not by the CLI, so the
whole context must be transferred to the daemon. The Docker CLI reports
"Sending build context to Docker daemon" when the context is sent to the daemon.
+> **Warning**
+> Avoid using your root directory, `/`, as the root of the source repository. The
+> `docker build` command will use whatever directory contains the Dockerfile as the build
+> context (including all of its subdirectories). The build context will be sent to the
+> Docker daemon before building the image, which means if you use `/` as the source
+> repository, the entire contents of your hard drive will get sent to the daemon (and
+> thus to the machine running the daemon). You probably don't want that.
+
+In most cases, it's best to put each Dockerfile in an empty directory, and then add only
+the files needed for building that Dockerfile to that directory. To further speed up the
+build, you can exclude files and directories by adding a `.dockerignore` file to the same
+directory.
+
You can specify a repository and tag at which to save the new image if
the build succeeds:
@@ -164,6 +177,11 @@ any point in an image's history, much like source control.
The *exec* form makes it possible to avoid shell string munging, and to `RUN`
commands using a base image that does not contain `/bin/sh`.
+> **Note**:
+> To use a different shell, other than '/bin/sh', use the *exec* form
+> passing in the desired shell. For example,
+> `RUN ["/bin/bash", "-c", "echo hello"]`
+
The cache for `RUN` instructions isn't invalidated automatically during
the next build. The cache for an instruction like `RUN apt-get
dist-upgrade -y` will be reused during the next build. The cache for
@@ -196,6 +214,11 @@ container.** These defaults can include an executable, or they can omit
the executable, in which case you must specify an `ENTRYPOINT`
instruction as well.
+> **Note**:
+> If `CMD` is used to provide default arguments for the `ENTRYPOINT`
+> instruction, both the `CMD` and `ENTRYPOINT` instructions should be specified
+> with the JSON array format.
+
When used in the shell or exec formats, the `CMD` instruction sets the command
to be executed when running the image.
@@ -409,7 +432,7 @@ optional but default, you could use a `CMD` instruction:
FROM ubuntu
CMD ["-l"]
- ENTRYPOINT ["/usr/bin/ls"]
+ ENTRYPOINT ["ls"]
> **Note**:
> It is preferable to use the JSON array format for specifying
@@ -444,7 +467,10 @@ It can be used multiple times in the one `Dockerfile`. If a relative path
is provided, it will be relative to the path of the previous `WORKDIR`
instruction. For example:
- WORKDIR /a WORKDIR b WORKDIR c RUN pwd
+ WORKDIR /a
+ WORKDIR b
+ WORKDIR c
+ RUN pwd
The output of the final `pwd` command in this Dockerfile would be
`/a/b/c`.
@@ -516,23 +542,16 @@ For example you might add something like this:
FROM ubuntu
MAINTAINER Victor Vieux <victor@docker.com>
- # make sure the package repository is up to date
- RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
- RUN apt-get update
-
- RUN apt-get install -y inotify-tools nginx apache2 openssh-server
+ RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server
# Firefox over VNC
#
# VERSION 0.3
FROM ubuntu
- # make sure the package repository is up to date
- RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
- RUN apt-get update
# Install vnc, xvfb in order to create a 'fake' display and firefox
- RUN apt-get install -y x11vnc xvfb firefox
+ RUN apt-get update && apt-get install -y x11vnc xvfb firefox
RUN mkdir /.vnc
# Setup a password
RUN x11vnc -storepasswd 1234 ~/.vnc/passwd
diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md
index e82a5b1127..8166bf190d 100644
--- a/docs/sources/reference/commandline/cli.md
+++ b/docs/sources/reference/commandline/cli.md
@@ -324,7 +324,7 @@ schema.
> **Note:** `docker build` will return a `no such file or directory` error
> if the file or directory does not exist in the uploaded context. This may
-> happen if there is no context, or if you specify a file that is elsewhere
+> happen if there is no context, or if you specify a file that is elsewhere
> on the Host system. The context is limited to the current directory (and its
> children) for security reasons, and to ensure repeatable builds on remote
> Docker hosts. This is also the reason why `ADD ../file` will not work.
@@ -500,7 +500,7 @@ by default.
<none> <none> 77af4d6b9913 19 hours ago 1.089 GB
committest latest b6fa739cedf5 19 hours ago 1.089 GB
<none> <none> 78a85c484f71 19 hours ago 1.089 GB
- $ docker latest 30557a29d5ab 20 hours ago 1.089 GB
+ docker latest 30557a29d5ab 20 hours ago 1.089 GB
<none> <none> 0124422dd9f9 20 hours ago 1.089 GB
<none> <none> 18ad6fad3402 22 hours ago 1.082 GB
<none> <none> f9f1e26352f0 23 hours ago 1.089 GB
@@ -514,7 +514,7 @@ by default.
<none> <none> 77af4d6b9913e693e8d0b4b294fa62ade6054e6b2f1ffb617ac955dd63fb0182 19 hours ago 1.089 GB
committest latest b6fa739cedf5ea12a620a439402b6004d057da800f91c7524b5086a5e4749c9f 19 hours ago 1.089 GB
<none> <none> 78a85c484f71509adeaace20e72e941f6bdd2b25b4c75da8693efd9f61a37921 19 hours ago 1.089 GB
- $ docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB
+ docker latest 30557a29d5abc51e5f1d5b472e79b7e296f595abcf19fe6b9199dbbc809c6ff4 20 hours ago 1.089 GB
<none> <none> 0124422dd9f9cf7ef15c0617cda3931ee68346455441d66ab8bdc5b05e9fdce5 20 hours ago 1.089 GB
<none> <none> 18ad6fad340262ac2a636efd98a6d1f0ea775ae3d45240d3418466495a19a81b 22 hours ago 1.082 GB
<none> <none> f9f1e26352f0a3ba6a0ff68167559f64f3e21ff7ada60366e2d44a04befd1d3a 23 hours ago 1.089 GB
@@ -1103,14 +1103,14 @@ network and environment of the `redis` container via environment variables.
The `--name` flag will assign the name `console` to the newly created
container.
- $ sudo docker run --volumes-from 777f7dc92da7,ba8c0c54f0f2:ro -i -t ubuntu pwd
+ $ sudo docker run --volumes-from 777f7dc92da7 --volumes-from ba8c0c54f0f2:ro -i -t ubuntu pwd
The `--volumes-from` flag mounts all the defined volumes from the referenced
-containers. Containers can be specified by a comma separated list or by
-repetitions of the `--volumes-from` argument. The container ID may be
-optionally suffixed with `:ro` or `:rw` to mount the volumes in read-only
-or read-write mode, respectively. By default, the volumes are mounted in
-the same mode (read write or read only) as the reference container.
+containers. Containers can be specified by repetitions of the `--volumes-from`
+argument. The container ID may be optionally suffixed with `:ro` or `:rw` to
+mount the volumes in read-only or read-write mode, respectively. By default,
+the volumes are mounted in the same mode (read write or read only) as
+the reference container.
The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` or
`STDERR`. This makes it possible to manipulate the output and input as needed.
@@ -1213,7 +1213,7 @@ more details on finding shared images from the command line.
-a, --attach=false Attach container's STDOUT and STDERR and forward all signals to the process
-i, --interactive=false Attach container's STDIN
-When run on a container that has already been started,
+When run on a container that has already been started,
takes no action and succeeds unconditionally.
## stop
diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md
index 26f969abe8..b7e49058e5 100644
--- a/docs/sources/userguide/dockerimages.md
+++ b/docs/sources/userguide/dockerimages.md
@@ -245,8 +245,7 @@ example now for building our own Sinatra image for our development team.
# This is a comment
FROM ubuntu:14.04
MAINTAINER Kate Smith <ksmith@example.com>
- RUN apt-get -qq update
- RUN apt-get -qqy install ruby ruby-dev
+ RUN apt-get update && apt-get install -y ruby ruby-dev
RUN gem install sinatra
Let's look at what our `Dockerfile` does. Each instruction prefixes a statement and is capitalized.
@@ -272,38 +271,168 @@ Sinatra gem.
Now let's take our `Dockerfile` and use the `docker build` command to build an image.
$ sudo docker build -t="ouruser/sinatra:v2" .
- Uploading context 2.56 kB
- Uploading context
+ Sending build context to Docker daemon 2.048 kB
+ Sending build context to Docker daemon
Step 0 : FROM ubuntu:14.04
- ---> 99ec81b80c55
+ ---> e54ca5efa2e9
Step 1 : MAINTAINER Kate Smith <ksmith@example.com>
- ---> Running in 7c5664a8a0c1
- ---> 2fa8ca4e2a13
- Removing intermediate container 7c5664a8a0c1
- Step 2 : RUN apt-get -qq update
- ---> Running in b07cc3fb4256
- ---> 50d21070ec0c
- Removing intermediate container b07cc3fb4256
- Step 3 : RUN apt-get -qqy install ruby ruby-dev
- ---> Running in a5b038dd127e
+ ---> Using cache
+ ---> 851baf55332b
+ Step 2 : RUN apt-get update && apt-get install -y ruby ruby-dev
+ ---> Running in 3a2558904e9b
Selecting previously unselected package libasan0:amd64.
(Reading database ... 11518 files and directories currently installed.)
Preparing to unpack .../libasan0_4.8.2-19ubuntu1_amd64.deb ...
- . . .
+ Unpacking libasan0:amd64 (4.8.2-19ubuntu1) ...
+ Selecting previously unselected package libatomic1:amd64.
+ Preparing to unpack .../libatomic1_4.8.2-19ubuntu1_amd64.deb ...
+ Unpacking libatomic1:amd64 (4.8.2-19ubuntu1) ...
+ Selecting previously unselected package libgmp10:amd64.
+ Preparing to unpack .../libgmp10_2%3a5.1.3+dfsg-1ubuntu1_amd64.deb ...
+ Unpacking libgmp10:amd64 (2:5.1.3+dfsg-1ubuntu1) ...
+ Selecting previously unselected package libisl10:amd64.
+ Preparing to unpack .../libisl10_0.12.2-1_amd64.deb ...
+ Unpacking libisl10:amd64 (0.12.2-1) ...
+ Selecting previously unselected package libcloog-isl4:amd64.
+ Preparing to unpack .../libcloog-isl4_0.18.2-1_amd64.deb ...
+ Unpacking libcloog-isl4:amd64 (0.18.2-1) ...
+ Selecting previously unselected package libgomp1:amd64.
+ Preparing to unpack .../libgomp1_4.8.2-19ubuntu1_amd64.deb ...
+ Unpacking libgomp1:amd64 (4.8.2-19ubuntu1) ...
+ Selecting previously unselected package libitm1:amd64.
+ Preparing to unpack .../libitm1_4.8.2-19ubuntu1_amd64.deb ...
+ Unpacking libitm1:amd64 (4.8.2-19ubuntu1) ...
+ Selecting previously unselected package libmpfr4:amd64.
+ Preparing to unpack .../libmpfr4_3.1.2-1_amd64.deb ...
+ Unpacking libmpfr4:amd64 (3.1.2-1) ...
+ Selecting previously unselected package libquadmath0:amd64.
+ Preparing to unpack .../libquadmath0_4.8.2-19ubuntu1_amd64.deb ...
+ Unpacking libquadmath0:amd64 (4.8.2-19ubuntu1) ...
+ Selecting previously unselected package libtsan0:amd64.
+ Preparing to unpack .../libtsan0_4.8.2-19ubuntu1_amd64.deb ...
+ Unpacking libtsan0:amd64 (4.8.2-19ubuntu1) ...
+ Selecting previously unselected package libyaml-0-2:amd64.
+ Preparing to unpack .../libyaml-0-2_0.1.4-3ubuntu3_amd64.deb ...
+ Unpacking libyaml-0-2:amd64 (0.1.4-3ubuntu3) ...
+ Selecting previously unselected package libmpc3:amd64.
+ Preparing to unpack .../libmpc3_1.0.1-1ubuntu1_amd64.deb ...
+ Unpacking libmpc3:amd64 (1.0.1-1ubuntu1) ...
+ Selecting previously unselected package openssl.
+ Preparing to unpack .../openssl_1.0.1f-1ubuntu2.4_amd64.deb ...
+ Unpacking openssl (1.0.1f-1ubuntu2.4) ...
+ Selecting previously unselected package ca-certificates.
+ Preparing to unpack .../ca-certificates_20130906ubuntu2_all.deb ...
+ Unpacking ca-certificates (20130906ubuntu2) ...
+ Selecting previously unselected package manpages.
+ Preparing to unpack .../manpages_3.54-1ubuntu1_all.deb ...
+ Unpacking manpages (3.54-1ubuntu1) ...
+ Selecting previously unselected package binutils.
+ Preparing to unpack .../binutils_2.24-5ubuntu3_amd64.deb ...
+ Unpacking binutils (2.24-5ubuntu3) ...
+ Selecting previously unselected package cpp-4.8.
+ Preparing to unpack .../cpp-4.8_4.8.2-19ubuntu1_amd64.deb ...
+ Unpacking cpp-4.8 (4.8.2-19ubuntu1) ...
+ Selecting previously unselected package cpp.
+ Preparing to unpack .../cpp_4%3a4.8.2-1ubuntu6_amd64.deb ...
+ Unpacking cpp (4:4.8.2-1ubuntu6) ...
+ Selecting previously unselected package libgcc-4.8-dev:amd64.
+ Preparing to unpack .../libgcc-4.8-dev_4.8.2-19ubuntu1_amd64.deb ...
+ Unpacking libgcc-4.8-dev:amd64 (4.8.2-19ubuntu1) ...
+ Selecting previously unselected package gcc-4.8.
+ Preparing to unpack .../gcc-4.8_4.8.2-19ubuntu1_amd64.deb ...
+ Unpacking gcc-4.8 (4.8.2-19ubuntu1) ...
+ Selecting previously unselected package gcc.
+ Preparing to unpack .../gcc_4%3a4.8.2-1ubuntu6_amd64.deb ...
+ Unpacking gcc (4:4.8.2-1ubuntu6) ...
+ Selecting previously unselected package libc-dev-bin.
+ Preparing to unpack .../libc-dev-bin_2.19-0ubuntu6_amd64.deb ...
+ Unpacking libc-dev-bin (2.19-0ubuntu6) ...
+ Selecting previously unselected package linux-libc-dev:amd64.
+ Preparing to unpack .../linux-libc-dev_3.13.0-30.55_amd64.deb ...
+ Unpacking linux-libc-dev:amd64 (3.13.0-30.55) ...
+ Selecting previously unselected package libc6-dev:amd64.
+ Preparing to unpack .../libc6-dev_2.19-0ubuntu6_amd64.deb ...
+ Unpacking libc6-dev:amd64 (2.19-0ubuntu6) ...
+ Selecting previously unselected package ruby.
+ Preparing to unpack .../ruby_1%3a1.9.3.4_all.deb ...
+ Unpacking ruby (1:1.9.3.4) ...
+ Selecting previously unselected package ruby1.9.1.
+ Preparing to unpack .../ruby1.9.1_1.9.3.484-2ubuntu1_amd64.deb ...
+ Unpacking ruby1.9.1 (1.9.3.484-2ubuntu1) ...
+ Selecting previously unselected package libruby1.9.1.
+ Preparing to unpack .../libruby1.9.1_1.9.3.484-2ubuntu1_amd64.deb ...
+ Unpacking libruby1.9.1 (1.9.3.484-2ubuntu1) ...
+ Selecting previously unselected package manpages-dev.
+ Preparing to unpack .../manpages-dev_3.54-1ubuntu1_all.deb ...
+ Unpacking manpages-dev (3.54-1ubuntu1) ...
+ Selecting previously unselected package ruby1.9.1-dev.
+ Preparing to unpack .../ruby1.9.1-dev_1.9.3.484-2ubuntu1_amd64.deb ...
+ Unpacking ruby1.9.1-dev (1.9.3.484-2ubuntu1) ...
+ Selecting previously unselected package ruby-dev.
+ Preparing to unpack .../ruby-dev_1%3a1.9.3.4_all.deb ...
+ Unpacking ruby-dev (1:1.9.3.4) ...
+ Setting up libasan0:amd64 (4.8.2-19ubuntu1) ...
+ Setting up libatomic1:amd64 (4.8.2-19ubuntu1) ...
+ Setting up libgmp10:amd64 (2:5.1.3+dfsg-1ubuntu1) ...
+ Setting up libisl10:amd64 (0.12.2-1) ...
+ Setting up libcloog-isl4:amd64 (0.18.2-1) ...
+ Setting up libgomp1:amd64 (4.8.2-19ubuntu1) ...
+ Setting up libitm1:amd64 (4.8.2-19ubuntu1) ...
+ Setting up libmpfr4:amd64 (3.1.2-1) ...
+ Setting up libquadmath0:amd64 (4.8.2-19ubuntu1) ...
+ Setting up libtsan0:amd64 (4.8.2-19ubuntu1) ...
+ Setting up libyaml-0-2:amd64 (0.1.4-3ubuntu3) ...
+ Setting up libmpc3:amd64 (1.0.1-1ubuntu1) ...
+ Setting up openssl (1.0.1f-1ubuntu2.4) ...
+ Setting up ca-certificates (20130906ubuntu2) ...
+ debconf: unable to initialize frontend: Dialog
+ debconf: (TERM is not set, so the dialog frontend is not usable.)
+ debconf: falling back to frontend: Readline
+ debconf: unable to initialize frontend: Readline
+ debconf: (This frontend requires a controlling tty.)
+ debconf: falling back to frontend: Teletype
+ Setting up manpages (3.54-1ubuntu1) ...
+ Setting up binutils (2.24-5ubuntu3) ...
+ Setting up cpp-4.8 (4.8.2-19ubuntu1) ...
+ Setting up cpp (4:4.8.2-1ubuntu6) ...
+ Setting up libgcc-4.8-dev:amd64 (4.8.2-19ubuntu1) ...
+ Setting up gcc-4.8 (4.8.2-19ubuntu1) ...
+ Setting up gcc (4:4.8.2-1ubuntu6) ...
+ Setting up libc-dev-bin (2.19-0ubuntu6) ...
+ Setting up linux-libc-dev:amd64 (3.13.0-30.55) ...
+ Setting up libc6-dev:amd64 (2.19-0ubuntu6) ...
+ Setting up manpages-dev (3.54-1ubuntu1) ...
+ Setting up libruby1.9.1 (1.9.3.484-2ubuntu1) ...
+ Setting up ruby1.9.1-dev (1.9.3.484-2ubuntu1) ...
+ Setting up ruby-dev (1:1.9.3.4) ...
Setting up ruby (1:1.9.3.4) ...
Setting up ruby1.9.1 (1.9.3.484-2ubuntu1) ...
Processing triggers for libc-bin (2.19-0ubuntu6) ...
- ---> 2acb20f17878
- Removing intermediate container a5b038dd127e
- Step 4 : RUN gem install sinatra
- ---> Running in 5e9d0065c1f7
- . . .
+ Processing triggers for ca-certificates (20130906ubuntu2) ...
+ Updating certificates in /etc/ssl/certs... 164 added, 0 removed; done.
+ Running hooks in /etc/ca-certificates/update.d....done.
+ ---> c55c31703134
+ Removing intermediate container 3a2558904e9b
+ Step 3 : RUN gem install sinatra
+ ---> Running in 6b81cb6313e5
+ unable to convert "\xC3" to UTF-8 in conversion from ASCII-8BIT to UTF-8 to US-ASCII for README.rdoc, skipping
+ unable to convert "\xC3" to UTF-8 in conversion from ASCII-8BIT to UTF-8 to US-ASCII for README.rdoc, skipping
+ Successfully installed rack-1.5.2
+ Successfully installed tilt-1.4.1
Successfully installed rack-protection-1.5.3
Successfully installed sinatra-1.4.5
4 gems installed
- ---> 324104cde6ad
- Removing intermediate container 5e9d0065c1f7
- Successfully built 324104cde6ad
+ Installing ri documentation for rack-1.5.2...
+ Installing ri documentation for tilt-1.4.1...
+ Installing ri documentation for rack-protection-1.5.3...
+ Installing ri documentation for sinatra-1.4.5...
+ Installing RDoc documentation for rack-1.5.2...
+ Installing RDoc documentation for tilt-1.4.1...
+ Installing RDoc documentation for rack-protection-1.5.3...
+ Installing RDoc documentation for sinatra-1.4.5...
+ ---> 97feabe5d2ed
+ Removing intermediate container 6b81cb6313e5
+ Successfully built 97feabe5d2ed
We've specified our `docker build` command and used the `-t` flag to identify
our new image as belonging to the user `ouruser`, the repository name `sinatra`
diff --git a/docs/sources/userguide/dockerlinks.md b/docs/sources/userguide/dockerlinks.md
index d7a2abff9c..3624bf72c3 100644
--- a/docs/sources/userguide/dockerlinks.md
+++ b/docs/sources/userguide/dockerlinks.md
@@ -4,48 +4,47 @@ page_keywords: Examples, Usage, user guide, links, linking, docker, documentatio
# Linking Containers Together
-In [the Using Docker section](/userguide/usingdocker) we touched on
-connecting to a service running inside a Docker container via a network
-port. This is one of the ways that you can interact with services and
-applications running inside Docker containers. In this section we're
-going to give you a refresher on connecting to a Docker container via a
-network port as well as introduce you to the concepts of container
-linking.
+In [the Using Docker section](/userguide/usingdocker), you saw how you can
+connect to a service running inside a Docker container via a network
+port. But a port connection is only one way you can interact with services and
+applications running inside Docker containers. In this section, we'll briefly revisit
+connecting via a network port and then we'll introduce you to another method of access:
+container linking.
## Network port mapping refresher
-In [the Using Docker section](/userguide/usingdocker) we created a
-container that ran a Python Flask application.
+In [the Using Docker section](/userguide/usingdocker), you created a
+container that ran a Python Flask application:
$ sudo docker run -d -P training/webapp python app.py
> **Note:**
> Containers have an internal network and an IP address
-> (remember we used the `docker inspect` command to show the container's
+> (as we saw when we used the `docker inspect` command to show the container's
> IP address in the [Using Docker](/userguide/usingdocker/) section).
> Docker can have a variety of network configurations. You can see more
> information on Docker networking [here](/articles/networking/).
-When we created that container we used the `-P` flag to automatically map any
-network ports inside that container to a random high port from the range 49000
-to 49900 on our Docker host. When we subsequently ran `docker ps` we saw that
-port 5000 was bound to port 49155.
+When that container was created, the `-P` flag was used to automatically map any
+network ports inside it to a random high port from the range 49000
+to 49900 on our Docker host. Next, when `docker ps` was run, you saw that
+port 5000 in the container was bound to port 49155 on the host.
$ sudo docker ps nostalgic_morse
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
bc533791f3f5 training/webapp:latest python app.py 5 seconds ago Up 2 seconds 0.0.0.0:49155->5000/tcp nostalgic_morse
-We also saw how we can bind a container's ports to a specific port using
-the `-p` flag.
+You also saw how you can bind a container's ports to a specific port using
+the `-p` flag:
$ sudo docker run -d -p 5000:5000 training/webapp python app.py
-And we saw why this isn't such a great idea because it constrains us to
+And you saw why this isn't such a great idea because it constrains you to
only one container on that specific port.
-There are also a few other ways we can configure the `-p` flag. By
+There are also a few other ways you can configure the `-p` flag. By
default the `-p` flag will bind the specified port to all interfaces on
-the host machine. But we can also specify a binding to a specific
+the host machine. But you can also specify a binding to a specific
interface, for example only to the `localhost`.
$ sudo docker run -d -p 127.0.0.1:5000:5000 training/webapp python app.py
@@ -53,20 +52,19 @@ interface, for example only to the `localhost`.
This would bind port 5000 inside the container to port 5000 on the
`localhost` or `127.0.0.1` interface on the host machine.
-Or to bind port 5000 of the container to a dynamic port but only on the
-`localhost` we could:
+Or, to bind port 5000 of the container to a dynamic port but only on the
+`localhost`, you could use:
$ sudo docker run -d -p 127.0.0.1::5000 training/webapp python app.py
-We can also bind UDP ports by adding a trailing `/udp`, for example:
+You can also bind UDP ports by adding a trailing `/udp`. For example:
$ sudo docker run -d -p 127.0.0.1:5000:5000/udp training/webapp python app.py
-We also saw the useful `docker port` shortcut which showed us the
-current port bindings, this is also useful for showing us specific port
-configurations. For example if we've bound the container port to the
-`localhost` on the host machine this will be shown in the `docker port`
-output.
+You also learned about the useful `docker port` shortcut which showed us the
+current port bindings. This is also useful for showing you specific port
+configurations. For example, if you've bound the container port to the
+`localhost` on the host machine, then the `docker port` output will reflect that.
$ docker port nostalgic_morse 5000
127.0.0.1:49155
@@ -78,38 +76,39 @@ output.
Network port mappings are not the only way Docker containers can connect
to one another. Docker also has a linking system that allows you to link
-multiple containers together and share connection information between
-them. Docker linking will create a parent child relationship where the
-parent container can see selected information about its child.
+multiple containers together and send connection information from one to another.
+When containers are linked, information about a source container can be sent to a
+recipient container. This allows the recipient to see selected data describing
+aspects of the source container.
## Container naming
-To perform this linking Docker relies on the names of your containers.
-We've already seen that each container we create has an automatically
-created name, indeed we've become familiar with our old friend
+To establish links, Docker relies on the names of your containers.
+You've already seen that each container you create has an automatically
+created name; indeed you've become familiar with our old friend
`nostalgic_morse` during this guide. You can also name containers
yourself. This naming provides two useful functions:
-1. It's useful to name containers that do specific functions in a way
+1. It can be useful to name containers that do specific functions in a way
that makes it easier for you to remember them, for example naming a
- container with a web application in it `web`.
+ container containing a web application `web`.
2. It provides Docker with a reference point that allows it to refer to other
- containers, for example link container `web` to container `db`.
+ containers, for example, you can specify to link the container `web` to container `db`.
You can name your container by using the `--name` flag, for example:
$ sudo docker run -d -P --name web training/webapp python app.py
-You can see we've launched a new container and used the `--name` flag to
-call the container `web`. We can see the container's name using the
+This launches a new container and uses the `--name` flag to
+name the container `web`. You can see the container's name using the
`docker ps` command.
$ sudo docker ps -l
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
aed84ee21bde training/webapp:latest python app.py 12 hours ago Up 2 seconds 0.0.0.0:49154->5000/tcp web
-We can also use `docker inspect` to return the container's name.
+You can also use `docker inspect` to return the container's name.
$ sudo docker inspect -f "{{ .Name }}" aed84ee21bde
/web
@@ -117,67 +116,70 @@ We can also use `docker inspect` to return the container's name.
> **Note:**
> Container names have to be unique. That means you can only call
> one container `web`. If you want to re-use a container name you must delete
-> the old container with the `docker rm` command before you can create a new
+> the old container (with `docker rm`) before you can create a new
> container with the same name. As an alternative you can use the `--rm`
> flag with the `docker run` command. This will delete the container
-> immediately after it stops.
+> immediately after it is stopped.
## Container Linking
-Links allow containers to discover and securely communicate with each
-other. To create a link you use the `--link` flag. Let's create a new
-container, this one a database.
+Links allow containers to discover each other and securely transfer information about one
+container to another container. When you set up a link, you create a conduit between a
+source container and a recipient container. The recipient can then access select data
+about the source. To create a link, you use the `--link` flag. First, create a new
+container, this time one containing a database.
$ sudo docker run -d --name db training/postgres
-Here we've created a new container called `db` using the `training/postgres`
+This creates a new container called `db` from the `training/postgres`
image, which contains a PostgreSQL database.
-We need to delete the `web` container we created previously so we can replace it
+Now, you need to delete the `web` container you created previously so you can replace it
with a linked one:
$ docker rm -f web
-Now let's create a new `web` container and link it with our `db` container.
+Now, create a new `web` container and link it with your `db` container.
$ sudo docker run -d -P --name web --link db:db training/webapp python app.py
-This will link the new `web` container with the `db` container we created
+This will link the new `web` container with the `db` container you created
earlier. The `--link` flag takes the form:
--link name:alias
Where `name` is the name of the container we're linking to and `alias` is an
-alias for the link name. We'll see how that alias gets used shortly.
+alias for the link name. You'll see how that alias gets used shortly.
-Let's look at our linked containers using `docker ps`.
+Next, look at your linked containers using `docker ps`.
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
349169744e49 training/postgres:latest su postgres -c '/usr About a minute ago Up About a minute 5432/tcp db, web/db
aed84ee21bde training/webapp:latest python app.py 16 hours ago Up 2 minutes 0.0.0.0:49154->5000/tcp web
-We can see our named containers, `db` and `web`, and we can see that the `db`
-containers also shows `web/db` in the `NAMES` column. This tells us that the
-`web` container is linked to the `db` container in a parent/child relationship.
+You can see your named containers, `db` and `web`, and you can see that the `db`
+container also shows `web/db` in the `NAMES` column. This tells you that the
+`web` container is linked to the `db` container, which allows it to access information
+about the `db` container.
-So what does linking the containers do? Well we've discovered the link creates
-a parent-child relationship between the two containers. The parent container,
-here `web`, can access information on the child container `db`. To do this
-Docker creates a secure tunnel between the containers without the need to
-expose any ports externally on the container. You'll note when we started the
-`db` container we did not use either of the `-P` or `-p` flags. As we're
-linking the containers we don't need to expose the PostgreSQL database via the
-network.
+So what does linking the containers actually do? You've learned that a link creates a
+source container that can provide information about itself to a recipient container. In
+our example, the recipient, `web`, can access information about the source `db`. To do
+this, Docker creates a secure tunnel between the containers that doesn't need to
+expose any ports externally on the container; you'll note when we started the
+`db` container we did not use either the `-P` or `-p` flags. That's a big benefit of
+linking: we don't need to expose the source container, here the PostgreSQL database, to
+the network.
-Docker exposes connectivity information for the parent container inside the
-child container in two ways:
+Docker exposes connectivity information for the source container to the
+recipient container in two ways:
* Environment variables,
* Updating the `/etc/hosts` file.
-Let's look first at the environment variables Docker sets. Let's run the `env`
-command to list the container's environment variables.
+Docker can set a number of environment variables. You run the `env`
+command to list the specified container's environment variables.
```
$ sudo docker run --rm --name web2 --link db:db training/webapp env
@@ -196,17 +198,17 @@ command to list the container's environment variables.
> container. Similarly, some daemons (such as `sshd`)
> will scrub them when spawning shells for connection.
-We can see that Docker has created a series of environment variables with
-useful information about our `db` container. Each variable is prefixed with
-`DB_` which is populated from the `alias` we specified above. If our `alias`
-were `db1` the variables would be prefixed with `DB1_`. You can use these
+You can see that Docker has created a series of environment variables with
+useful information about the source `db` container. Each variable is prefixed with
+`DB_`, which is populated from the `alias` you specified above. If the `alias`
+were `db1`, the variables would be prefixed with `DB1_`. You can use these
environment variables to configure your applications to connect to the database
-on the `db` container. The connection will be secure, private and only the
+on the `db` container. The connection will be secure and private; only the
linked `web` container will be able to talk to the `db` container.
-In addition to the environment variables Docker adds a host entry for the
-linked parent to the `/etc/hosts` file. Let's look at this file on the `web`
-container now.
+In addition to the environment variables, Docker adds a host entry for the
+source container to the `/etc/hosts` file. Here's an entry for the `web`
+container:
$ sudo docker run -t -i --rm --link db:db training/webapp /bin/bash
root@aed84ee21bde:/opt/webapp# cat /etc/hosts
@@ -214,9 +216,9 @@ container now.
. . .
172.17.0.5 db
-We can see two relevant host entries. The first is an entry for the `web`
+You can see two relevant host entries. The first is an entry for the `web`
container that uses the Container ID as a host name. The second entry uses the
-link alias to reference the IP address of the `db` container. Let's try to ping
+link alias to reference the IP address of the `db` container. You can ping
that host now via this host name.
root@aed84ee21bde:/opt/webapp# apt-get install -yqq inetutils-ping
@@ -227,21 +229,22 @@ that host now via this host name.
56 bytes from 172.17.0.5: icmp_seq=2 ttl=64 time=0.256 ms
> **Note:**
-> We had to install `ping` because our container didn't have it.
+> In the example, you'll note you had to install `ping` because it was not included
+> in the container initially.
-We've used the `ping` command to ping the `db` container using it's host entry
-which resolves to `172.17.0.5`. We can make use of this host entry to configure
-an application to make use of our `db` container.
+Here, you used the `ping` command to ping the `db` container using its host entry,
+which resolves to `172.17.0.5`. You can use this host entry to configure an application
+to make use of your `db` container.
> **Note:**
-> You can link multiple child containers to a single parent. For
-> example, we could have multiple web containers attached to our `db`
-> container.
+> You can link multiple recipient containers to a single source. For
+> example, you could have multiple (differently named) web containers attached to your
+>`db` container.
# Next step
-Now we know how to link Docker containers together the next step is
-learning how to manage data, volumes and mounts inside our containers.
+Now that you know how to link Docker containers together, the next step is
+learning how to manage data, volumes and mounts inside your containers.
Go to [Managing Data in Containers](/userguide/dockervolumes).
diff --git a/docs/sources/userguide/dockervolumes.md b/docs/sources/userguide/dockervolumes.md
index 42b01ecf8b..97593a1e04 100644
--- a/docs/sources/userguide/dockervolumes.md
+++ b/docs/sources/userguide/dockervolumes.md
@@ -131,14 +131,14 @@ like so:
$ sudo docker run --volumes-from dbdata -v $(pwd):/backup ubuntu tar cvf /backup/backup.tar /dbdata
-Here's we've launched a new container and mounted the volume from the
+Here we've launched a new container and mounted the volume from the
`dbdata` container. We've then mounted a local host directory as
`/backup`. Finally, we've passed a command that uses `tar` to backup the
contents of the `dbdata` volume to a `backup.tar` file inside our
`/backup` directory. When the command completes and the container stops
we'll be left with a backup of our `dbdata` volume.
-You could then to restore to the same container, or another that you've made
+You could then restore it to the same container, or another that you've made
elsewhere. Create a new container.
$ sudo docker run -v /dbdata --name dbdata2 ubuntu /bin/bash
diff --git a/docs/sources/userguide/search.png b/docs/sources/userguide/search.png
index 27370741a7..ded0d0d2d3 100644
--- a/docs/sources/userguide/search.png
+++ b/docs/sources/userguide/search.png
Binary files differ
diff --git a/docs/sources/userguide/usingdocker.md b/docs/sources/userguide/usingdocker.md
index a882a79c7d..ea13122fd6 100644
--- a/docs/sources/userguide/usingdocker.md
+++ b/docs/sources/userguide/usingdocker.md
@@ -156,9 +156,9 @@ In this case Docker has exposed port 5000 (the default Python Flask
port) on port 49155.
Network port bindings are very configurable in Docker. In our last
-example the `-P` flag is a shortcut for `-p 5000` that makes port 5000
+example the `-P` flag is a shortcut for `-p 5000` that maps port 5000
inside the container to a high port (from the range 49000 to 49900) on
-the local Docker host. We can also bind Docker container's to specific
+the local Docker host. We can also bind Docker containers to specific
ports using the `-p` flag, for example:
$ sudo docker run -d -p 5000:5000 training/webapp python app.py
diff --git a/docs/theme/mkdocs/base.html b/docs/theme/mkdocs/base.html
index 2b2b9bcbcf..c825423f89 100644
--- a/docs/theme/mkdocs/base.html
+++ b/docs/theme/mkdocs/base.html
@@ -28,15 +28,15 @@
<![endif]-->
{% if config.google_analytics %}
<script type="text/javascript">
- var _gaq = _gaq || [];
- _gaq.push(['_setAccount', '{{ config.google_analytics[0] }}']);
- _gaq.push(['_trackPageview']);
- _gaq.push(['_trackPageLoadTime']);
- (function() {
- var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
- ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
- var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
- })();
+ (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+ m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+ })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+ ga('create', '{{ config.google_analytics[0] }}', 'docker.com');
+ ga('require', 'linkid', 'linkid.js');
+ ga('send', 'pageview', {
+ 'page': location.pathname + location.search + location.hash,
+ });
</script>
{% endif %}
</head>
@@ -132,17 +132,6 @@ piCId = '1482';
})();
</script>
<script type="text/javascript">
- // Function to make the sticky header possible
- var shiftWindow = function() {
- scrollBy(0, -80);
- };
-
- window.addEventListener("hashchange", shiftWindow);
- $(window).load(function() {
- if (window.location.hash) {
- shiftWindow();
- }
- });
$(document).ready(function() {
$('#content').css("min-height", $(window).height() - 553 );
// load the complete versions list
diff --git a/docs/theme/mkdocs/footer.html b/docs/theme/mkdocs/footer.html
index 0b887b82d0..ba48c9bb13 100644
--- a/docs/theme/mkdocs/footer.html
+++ b/docs/theme/mkdocs/footer.html
@@ -56,7 +56,7 @@
<span class="footer-title">Connect</span>
<div class="search">
<span>Subscribe to our newsletter</span>
- <form action="http://www.docker.com/subscribe_newsletter/" method="post">
+ <form action="https://www.docker.com/subscribe_newsletter/" method="post">
<input type='hidden' name='csrfmiddlewaretoken' value='aWL78QXQkY8DSKNYh6cl08p5eTLl7sOa' />
<tr><th><label for="id_email">Email:</label></th><td><input class="form-control" id="id_email" name="email" placeholder="Enter your email" type="text" /></td></tr>
diff --git a/hack/release.sh b/hack/release.sh
index 2a6b3992ef..adc4280a30 100755
--- a/hack/release.sh
+++ b/hack/release.sh
@@ -290,7 +290,8 @@ echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
# Install docker
-apt-get update ; apt-get install -y lxc-docker
+apt-get update
+apt-get install -y lxc-docker
#
# Alternatively, just use the curl-able install.sh script provided at $(s3_url)