summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Sherwood <paul.sherwood@codethink.co.uk>2014-08-07 22:01:07 +0000
committerPaul Sherwood <paul.sherwood@codethink.co.uk>2014-08-07 22:01:07 +0000
commit622225b48e72a2d43eeb63d5301ce3aa1fd7edb2 (patch)
tree7fefd8da54773f4365fe32c119be24c58906c015
parentb3552c73bbc69ff3e8878e4b6ef3349c8cabc434 (diff)
parentd84a070e476ce923dd03e28232564a87704613ab (diff)
downloaddocker-baserock/v1.1.2.tar.gz
Merge tag 'v1.1.2' into foobaserock/v1.1.2
v1.1.2
-rw-r--r--.dockerignore2
-rw-r--r--.gitignore1
-rw-r--r--CHANGELOG.md38
-rw-r--r--CONTRIBUTING.md6
-rw-r--r--MAINTAINERS1
-rw-r--r--Makefile2
-rw-r--r--README.md2
-rw-r--r--VERSION2
-rw-r--r--api/client/commands.go142
-rw-r--r--api/common.go2
-rw-r--r--api/server/MAINTAINERS2
-rw-r--r--api/server/server.go53
-rw-r--r--api/server/server_unit_test.go172
-rw-r--r--archive/archive.go39
-rw-r--r--archive/archive_test.go68
-rwxr-xr-xcontrib/completion/bash/docker13
-rw-r--r--contrib/completion/fish/docker.fish4
-rw-r--r--contrib/completion/zsh/_docker2
-rwxr-xr-xcontrib/init/openrc/docker.initd3
-rw-r--r--contrib/init/systemd/docker.service2
-rw-r--r--contrib/init/systemd/socket-activation/docker.service2
-rwxr-xr-xcontrib/init/sysvinit-debian/docker19
-rwxr-xr-xcontrib/init/sysvinit-redhat/docker6
-rw-r--r--contrib/man/.gitignore2
-rw-r--r--contrib/man/md/docker-kill.1.md21
-rw-r--r--contrib/man/md/docker-port.1.md15
-rw-r--r--contrib/man/md/docker-restart.1.md21
-rw-r--r--contrib/man/md/docker-start.1.md29
-rw-r--r--contrib/man/md/docker-stop.1.md22
-rw-r--r--contrib/man/old-man/docker-attach.156
-rw-r--r--contrib/man/old-man/docker-build.165
-rw-r--r--contrib/man/old-man/docker-images.184
-rw-r--r--contrib/man/old-man/docker-info.139
-rw-r--r--contrib/man/old-man/docker-inspect.1237
-rw-r--r--contrib/man/old-man/docker-rm.145
-rw-r--r--contrib/man/old-man/docker-rm.md50
-rw-r--r--contrib/man/old-man/docker-rmi.129
-rw-r--r--contrib/man/old-man/docker-run.1277
-rw-r--r--contrib/man/old-man/docker-tag.149
-rw-r--r--contrib/man/old-man/docker.1172
-rwxr-xr-xcontrib/mkimage-alpine.sh4
-rwxr-xr-xcontrib/mkimage-arch.sh22
-rwxr-xr-xcontrib/mkimage-busybox.sh2
-rwxr-xr-xcontrib/mkimage-crux.sh6
-rwxr-xr-xcontrib/mkimage-debootstrap.sh2
-rwxr-xr-xcontrib/mkimage-rinse.sh2
-rwxr-xr-xcontrib/mkimage-unittest.sh2
-rwxr-xr-xcontrib/mkimage.sh6
-rwxr-xr-xcontrib/mkimage/.febootstrap-minimize2
-rwxr-xr-xcontrib/mkimage/debootstrap91
-rwxr-xr-xcontrib/mkimage/mageia-urpmi61
-rw-r--r--daemon/container.go88
-rw-r--r--daemon/daemon.go54
-rw-r--r--daemon/execdriver/MAINTAINERS1
-rw-r--r--daemon/execdriver/lxc/lxc_init_linux.go4
-rw-r--r--daemon/execdriver/native/configuration/parse.go57
-rw-r--r--daemon/execdriver/native/configuration/parse_test.go13
-rw-r--r--daemon/execdriver/native/create.go70
-rw-r--r--daemon/execdriver/native/driver.go41
-rw-r--r--daemon/execdriver/native/info.go7
-rw-r--r--daemon/execdriver/native/template/default_template.go10
-rw-r--r--daemon/graphdriver/aufs/aufs.go2
-rw-r--r--daemon/graphdriver/vfs/driver.go19
-rw-r--r--daemon/inspect.go12
-rw-r--r--daemon/networkdriver/bridge/driver.go95
-rw-r--r--daemon/networkdriver/bridge/driver_test.go106
-rw-r--r--daemon/networkdriver/portallocator/portallocator.go81
-rw-r--r--daemon/networkdriver/portallocator/portallocator_test.go7
-rw-r--r--daemon/networkdriver/portmapper/mapper.go69
-rw-r--r--daemon/networkdriver/portmapper/mapper_test.go53
-rw-r--r--daemon/state.go115
-rw-r--r--daemon/state_test.go102
-rw-r--r--daemonconfig/config.go4
-rw-r--r--docker/docker.go20
-rw-r--r--docs/.gitignore5
-rw-r--r--docs/Dockerfile6
-rw-r--r--docs/MAINTAINERS1
-rwxr-xr-xdocs/README.md10
-rwxr-xr-xdocs/docs-update.py214
-rw-r--r--docs/man/Dockerfile (renamed from contrib/man/md/Dockerfile)0
-rw-r--r--docs/man/Dockerfile.5.md (renamed from contrib/man/md/Dockerfile.5.md)4
-rw-r--r--docs/man/README.md (renamed from contrib/man/md/README.md)10
-rw-r--r--docs/man/docker-attach.1.md (renamed from contrib/man/md/docker-attach.1.md)19
-rw-r--r--docs/man/docker-build.1.md (renamed from contrib/man/md/docker-build.1.md)38
-rw-r--r--docs/man/docker-commit.1.md (renamed from contrib/man/md/docker-commit.1.md)25
-rw-r--r--docs/man/docker-cp.1.md (renamed from contrib/man/md/docker-cp.1.md)18
-rw-r--r--docs/man/docker-diff.1.md (renamed from contrib/man/md/docker-diff.1.md)17
-rw-r--r--docs/man/docker-events.1.md (renamed from contrib/man/md/docker-events.1.md)19
-rw-r--r--docs/man/docker-export.1.md (renamed from contrib/man/md/docker-export.1.md)18
-rw-r--r--docs/man/docker-history.1.md (renamed from contrib/man/md/docker-history.1.md)20
-rw-r--r--docs/man/docker-images.1.md (renamed from contrib/man/md/docker-images.1.md)43
-rw-r--r--docs/man/docker-import.1.md (renamed from contrib/man/md/docker-import.1.md)20
-rw-r--r--docs/man/docker-info.1.md (renamed from contrib/man/md/docker-info.1.md)10
-rw-r--r--docs/man/docker-inspect.1.md (renamed from contrib/man/md/docker-inspect.1.md)20
-rw-r--r--docs/man/docker-kill.1.md24
-rw-r--r--docs/man/docker-load.1.md (renamed from contrib/man/md/docker-load.1.md)14
-rw-r--r--docs/man/docker-login.1.md (renamed from contrib/man/md/docker-login.1.md)21
-rw-r--r--docs/man/docker-logs.1.md (renamed from contrib/man/md/docker-logs.1.md)19
-rw-r--r--docs/man/docker-pause.1.md15
-rw-r--r--docs/man/docker-port.1.md16
-rw-r--r--docs/man/docker-ps.1.md (renamed from contrib/man/md/docker-ps.1.md)44
-rw-r--r--docs/man/docker-pull.1.md (renamed from contrib/man/md/docker-pull.1.md)16
-rw-r--r--docs/man/docker-push.1.md (renamed from contrib/man/md/docker-push.1.md)19
-rw-r--r--docs/man/docker-restart.1.md22
-rw-r--r--docs/man/docker-rm.1.md (renamed from contrib/man/md/docker-rm.1.md)31
-rw-r--r--docs/man/docker-rmi.1.md (renamed from contrib/man/md/docker-rmi.1.md)23
-rw-r--r--docs/man/docker-run.1.md (renamed from contrib/man/md/docker-run.1.md)121
-rw-r--r--docs/man/docker-save.1.md (renamed from contrib/man/md/docker-save.1.md)14
-rw-r--r--docs/man/docker-search.1.md (renamed from contrib/man/md/docker-search.1.md)31
-rw-r--r--docs/man/docker-start.1.md27
-rw-r--r--docs/man/docker-stop.1.md23
-rw-r--r--docs/man/docker-tag.1.md (renamed from contrib/man/md/docker-tag.1.md)20
-rw-r--r--docs/man/docker-top.1.md (renamed from contrib/man/md/docker-top.1.md)18
-rw-r--r--docs/man/docker-unpause.1.md15
-rw-r--r--docs/man/docker-version.1.md15
-rw-r--r--docs/man/docker-wait.1.md (renamed from contrib/man/md/docker-wait.1.md)17
-rw-r--r--docs/man/docker.1.md (renamed from contrib/man/md/docker.1.md)12
-rwxr-xr-xdocs/man/md2man-all.sh (renamed from contrib/man/md/md2man-all.sh)4
-rwxr-xr-xdocs/mkdocs.yml4
-rwxr-xr-xdocs/release.sh26
-rw-r--r--docs/s3_website.json1
-rw-r--r--docs/sources/articles/cfengine_process_management.md2
-rw-r--r--docs/sources/articles/dsc.md117
-rw-r--r--docs/sources/articles/https.md2
-rw-r--r--docs/sources/articles/networking.md2
-rw-r--r--docs/sources/articles/runmetrics.md8
-rw-r--r--docs/sources/articles/security.md77
-rw-r--r--docs/sources/articles/using_supervisord.md2
-rw-r--r--docs/sources/contributing/devenvironment.md4
-rw-r--r--docs/sources/docker-hub/accounts.md4
-rw-r--r--docs/sources/examples/nodejs_web_app.md16
-rw-r--r--docs/sources/examples/postgresql_service.Dockerfile2
-rw-r--r--docs/sources/examples/postgresql_service.md2
-rw-r--r--docs/sources/examples/running_ssh_service.Dockerfile2
-rw-r--r--docs/sources/examples/running_ssh_service.md2
-rw-r--r--docs/sources/faq.md42
-rw-r--r--docs/sources/index.md92
-rw-r--r--docs/sources/installation/binaries.md2
-rw-r--r--docs/sources/installation/fedora.md38
-rw-r--r--docs/sources/installation/google.md7
-rw-r--r--docs/sources/installation/mac.md14
-rw-r--r--docs/sources/installation/openSUSE.md12
-rw-r--r--docs/sources/installation/rackspace.md2
-rw-r--r--docs/sources/installation/ubuntulinux.md10
-rw-r--r--docs/sources/installation/windows.md6
-rw-r--r--docs/sources/introduction/understanding-docker.md12
-rw-r--r--docs/sources/reference/api/docker_remote_api.md34
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.0.md6
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.1.md6
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.10.md6
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.11.md16
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.12.md58
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.13.md1422
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.2.md6
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.3.md6
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.4.md8
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.5.md8
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.6.md10
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.7.md10
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.8.md10
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.9.md10
-rw-r--r--docs/sources/reference/api/hub_registry_spec.md4
-rw-r--r--docs/sources/reference/api/registry_api.md4
-rw-r--r--docs/sources/reference/api/remote_api_client_libraries.md8
-rw-r--r--docs/sources/reference/builder.md53
-rw-r--r--docs/sources/reference/commandline/cli.md224
-rw-r--r--docs/sources/reference/run.md229
-rw-r--r--docs/sources/terms/repository.md6
-rw-r--r--docs/sources/userguide/dockerhub.md2
-rw-r--r--docs/sources/userguide/dockerimages.md4
-rw-r--r--docs/sources/userguide/dockerizing.md24
-rw-r--r--docs/sources/userguide/dockerlinks.md11
-rw-r--r--docs/sources/userguide/dockerrepos.md136
-rw-r--r--docs/sources/userguide/dockervolumes.md12
-rw-r--r--docs/sources/userguide/index.md10
-rw-r--r--docs/sources/userguide/usingdocker.md16
-rw-r--r--docs/theme/mkdocs/base.html7
-rw-r--r--docs/theme/mkdocs/css/main.css4
-rw-r--r--docs/theme/mkdocs/header.html6
-rw-r--r--engine/env.go21
-rw-r--r--engine/env_test.go167
-rw-r--r--graph/graph.go7
-rw-r--r--graph/service.go5
-rw-r--r--hack/RELEASE-CHECKLIST.md3
-rwxr-xr-xhack/dind2
-rw-r--r--hack/infrastructure/README.md13
-rw-r--r--hack/make/dynbinary1
-rw-r--r--hack/make/ubuntu8
-rwxr-xr-xhack/vendor.sh6
-rw-r--r--image/image.go16
-rw-r--r--integration-cli/build_tests/TestBuildAddTar/1/Dockerfile3
-rw-r--r--integration-cli/build_tests/TestBuildAddTar/1/test.tarbin0 -> 2560 bytes
-rw-r--r--integration-cli/build_tests/TestBuildAddTar/2/Dockerfile3
-rw-r--r--integration-cli/build_tests/TestBuildAddTar/2/test.tarbin0 -> 2560 bytes
-rw-r--r--integration-cli/build_tests/TestContextTar/Dockerfile3
-rw-r--r--integration-cli/build_tests/TestContextTar/foo1
-rw-r--r--integration-cli/docker_cli_build_test.go319
-rw-r--r--integration-cli/docker_cli_commit_test.go27
-rw-r--r--integration-cli/docker_cli_diff_test.go2
-rw-r--r--integration-cli/docker_cli_events_test.go29
-rw-r--r--integration-cli/docker_cli_history_test.go18
-rw-r--r--integration-cli/docker_cli_images_test.go42
-rw-r--r--integration-cli/docker_cli_links_test.go33
-rw-r--r--integration-cli/docker_cli_logs_test.go44
-rw-r--r--integration-cli/docker_cli_rm_test.go2
-rw-r--r--integration-cli/docker_cli_run_test.go37
-rw-r--r--integration-cli/docker_cli_save_load_test.go70
-rw-r--r--integration-cli/docker_cli_search_test.go8
-rw-r--r--integration-cli/docker_cli_tag_test.go22
-rw-r--r--integration-cli/docker_utils.go25
-rw-r--r--integration/api_test.go139
-rw-r--r--integration/commands_test.go300
-rw-r--r--integration/container_test.go21
-rw-r--r--integration/runtime_test.go6
-rw-r--r--integration/sorter_test.go56
-rw-r--r--integration/utils_test.go8
-rw-r--r--pkg/dockerscript/MAINTAINERS1
-rw-r--r--pkg/dockerscript/dockerscript.go121
-rw-r--r--pkg/dockerscript/scanner/extra.go21
-rw-r--r--pkg/dockerscript/scanner/scanner.go673
-rw-r--r--pkg/mflag/MAINTAINERS2
-rw-r--r--pkg/mflag/flag.go24
-rw-r--r--pkg/mount/flags.go62
-rw-r--r--pkg/mount/flags_freebsd.go28
-rw-r--r--pkg/mount/flags_linux.go73
-rw-r--r--pkg/mount/flags_unsupported.go23
-rw-r--r--pkg/mount/mount.go7
-rw-r--r--pkg/mount/mount_test.go55
-rw-r--r--pkg/mount/mounter_freebsd.go59
-rw-r--r--pkg/mount/mounter_unsupported.go2
-rw-r--r--pkg/mount/mountinfo.go72
-rw-r--r--pkg/mount/mountinfo_freebsd.go38
-rw-r--r--pkg/mount/mountinfo_linux.go73
-rw-r--r--pkg/mount/mountinfo_test_linux.go (renamed from pkg/mount/mountinfo_test.go)0
-rw-r--r--pkg/mount/mountinfo_unsupported.go12
-rw-r--r--pkg/networkfs/MAINTAINERS2
-rw-r--r--pkg/symlink/fs.go8
-rw-r--r--pkg/sysinfo/MAINTAINERS1
-rw-r--r--pkg/system/MAINTAINERS1
-rw-r--r--pkg/tailfile/tailfile.go61
-rw-r--r--pkg/tailfile/tailfile_test.go148
-rw-r--r--pkg/testutils/testutils.go14
-rw-r--r--pkg/truncindex/truncindex.go106
-rw-r--r--pkg/truncindex/truncindex_test.go401
-rw-r--r--runconfig/config_test.go17
-rw-r--r--runconfig/parse.go10
-rw-r--r--runconfig/parse_test.go12
-rw-r--r--server/MAINTAINERS2
-rw-r--r--server/buildfile.go7
-rw-r--r--server/server.go94
-rw-r--r--utils/stdcopy.go28
-rw-r--r--utils/utils.go95
-rw-r--r--utils/utils_test.go102
-rw-r--r--vendor/src/github.com/docker/libcontainer/README.md45
-rw-r--r--vendor/src/github.com/docker/libcontainer/api.go23
-rw-r--r--vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go9
-rw-r--r--vendor/src/github.com/docker/libcontainer/cgroups/fs/notify_linux.go82
-rw-r--r--vendor/src/github.com/docker/libcontainer/cgroups/fs/notify_linux_test.go86
-rw-r--r--vendor/src/github.com/docker/libcontainer/container.go65
-rw-r--r--vendor/src/github.com/docker/libcontainer/container_test.go106
-rw-r--r--vendor/src/github.com/docker/libcontainer/mount/init.go29
-rw-r--r--vendor/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go5
-rw-r--r--vendor/src/github.com/docker/libcontainer/mount/types.go48
-rw-r--r--vendor/src/github.com/docker/libcontainer/namespaces/create.go2
-rw-r--r--vendor/src/github.com/docker/libcontainer/namespaces/exec.go59
-rw-r--r--vendor/src/github.com/docker/libcontainer/namespaces/execin.go10
-rw-r--r--vendor/src/github.com/docker/libcontainer/namespaces/init.go46
-rw-r--r--vendor/src/github.com/docker/libcontainer/namespaces/pid.go28
-rw-r--r--vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe.go60
-rw-r--r--vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_linux.go20
-rw-r--r--vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_test.go61
-rw-r--r--vendor/src/github.com/docker/libcontainer/namespaces/types.go50
-rw-r--r--vendor/src/github.com/docker/libcontainer/namespaces/types_linux.go (renamed from vendor/src/github.com/docker/libcontainer/types_linux.go)2
-rw-r--r--vendor/src/github.com/docker/libcontainer/namespaces/types_test.go (renamed from vendor/src/github.com/docker/libcontainer/types_test.go)16
-rw-r--r--vendor/src/github.com/docker/libcontainer/namespaces/unsupported.go16
-rw-r--r--vendor/src/github.com/docker/libcontainer/network/loopback.go7
-rw-r--r--vendor/src/github.com/docker/libcontainer/network/netns.go16
-rw-r--r--vendor/src/github.com/docker/libcontainer/network/network.go5
-rw-r--r--vendor/src/github.com/docker/libcontainer/network/stats.go68
-rw-r--r--vendor/src/github.com/docker/libcontainer/network/strategy.go8
-rw-r--r--vendor/src/github.com/docker/libcontainer/network/types.go40
-rw-r--r--vendor/src/github.com/docker/libcontainer/network/veth.go35
-rw-r--r--vendor/src/github.com/docker/libcontainer/nsinit/exec.go15
-rw-r--r--vendor/src/github.com/docker/libcontainer/nsinit/spec.go2
-rw-r--r--vendor/src/github.com/docker/libcontainer/nsinit/stats.go12
-rw-r--r--vendor/src/github.com/docker/libcontainer/nsinit/utils.go24
-rw-r--r--vendor/src/github.com/docker/libcontainer/sample_configs/README.md5
-rw-r--r--vendor/src/github.com/docker/libcontainer/sample_configs/apparmor.json196
-rw-r--r--vendor/src/github.com/docker/libcontainer/sample_configs/attach_to_bridge.json (renamed from vendor/src/github.com/docker/libcontainer/container.json)119
-rw-r--r--vendor/src/github.com/docker/libcontainer/sample_configs/minimal.json195
-rw-r--r--vendor/src/github.com/docker/libcontainer/sample_configs/selinux.json197
-rw-r--r--vendor/src/github.com/docker/libcontainer/security/capabilities/capabilities.go15
-rw-r--r--vendor/src/github.com/docker/libcontainer/security/capabilities/types.go90
-rw-r--r--vendor/src/github.com/docker/libcontainer/security/capabilities/types_test.go19
-rw-r--r--vendor/src/github.com/docker/libcontainer/state.go55
-rw-r--r--vendor/src/github.com/docker/libcontainer/types.go164
-rw-r--r--vendor/src/github.com/gorilla/context/context.go21
-rw-r--r--vendor/src/github.com/tchap/go-patricia/.gitignore25
-rw-r--r--vendor/src/github.com/tchap/go-patricia/AUTHORS3
-rw-r--r--vendor/src/github.com/tchap/go-patricia/LICENSE20
-rw-r--r--vendor/src/github.com/tchap/go-patricia/README.md120
-rw-r--r--vendor/src/github.com/tchap/go-patricia/patricia/children.go230
-rw-r--r--vendor/src/github.com/tchap/go-patricia/patricia/patricia.go432
-rw-r--r--vendor/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go161
-rw-r--r--vendor/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go659
-rw-r--r--vendor/src/github.com/tchap/go-patricia/patricia/patricia_test.go78
306 files changed, 10034 insertions, 4802 deletions
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000..37abdef44f
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,2 @@
+bundles
+.gopath
diff --git a/.gitignore b/.gitignore
index 4f8f09c775..2a86e41caf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -26,3 +26,4 @@ Vagrantfile
docs/AWS_S3_BUCKET
docs/GIT_BRANCH
docs/VERSION
+docs/GITCOMMIT
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6a7d3c2e19..8ec9ce3df0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,43 @@
# Changelog
+## 1.1.2 (2014-07-23)
+
+#### Runtime
++ Fix port allocation for existing containers
++ Fix containers restart on daemon restart
+
+#### Packaging
++ Fix /etc/init.d/docker issue on Debian
+
+## 1.1.1 (2014-07-09)
+
+#### Builder
+* Fix issue with ADD
+
+## 1.1.0 (2014-07-03)
+
+#### Notable features since 1.0.1
++ Add `.dockerignore` support
++ Pause containers during `docker commit`
++ Add `--tail` to `docker logs`
+
+#### Builder
++ Allow a tar file as context for `docker build`
+* Fix issue with white-spaces and multi-lines in `Dockerfiles`
+
+#### Runtime
+* Overall performance improvements
+* Allow `/` as source of `docker run -v`
+* Fix port allocation
+* Fix bug in `docker save`
+* Add links information to `docker inspect`
+
+#### Client
+* Improve command line parsing for `docker commit`
+
+#### Remote API
+* Improve status code for the `start` and `stop` endpoints
+
## 1.0.1 (2014-06-19)
#### Notable features since 1.0.0
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index cb5c806514..d07b972eb7 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -9,7 +9,7 @@ feels wrong or incomplete.
When reporting [issues](https://github.com/dotcloud/docker/issues)
on GitHub please include your host OS (Ubuntu 12.04, Fedora 19, etc),
the output of `uname -a` and the output of `docker version` along with
-the output of `docker info`. Please include the steps required to reproduce
+the output of `docker -D info`. Please include the steps required to reproduce
the problem if possible and applicable.
This information will help us review and fix your issue faster.
@@ -17,7 +17,7 @@ This information will help us review and fix your issue faster.
For instructions on setting up your development environment, please
see our dedicated [dev environment setup
-docs](http://docs.docker.io/en/latest/contributing/devenvironment/).
+docs](http://docs.docker.com/contributing/devenvironment/).
## Contribution guidelines
@@ -190,7 +190,7 @@ There are several exceptions to the signing requirement. Currently these are:
* Your patch fixes Markdown formatting or syntax errors in the
documentation contained in the `docs` directory.
-If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io)
+If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.com)
### How can I become a maintainer?
diff --git a/MAINTAINERS b/MAINTAINERS
index 059ff79f09..2947eb355e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6,3 +6,4 @@ Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
AUTHORS: Tianon Gravi <admwiggin@gmail.com> (@tianon)
Dockerfile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
Makefile: Tianon Gravi <admwiggin@gmail.com> (@tianon)
+.dockerignore: Tianon Gravi <admwiggin@gmail.com> (@tianon)
diff --git a/Makefile b/Makefile
index a8e4dc5ca1..2d07b39c3b 100644
--- a/Makefile
+++ b/Makefile
@@ -6,6 +6,7 @@ BINDDIR := bundles
DOCSPORT := 8000
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
+GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null)
DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)")
@@ -59,6 +60,7 @@ docs-build:
cp ./VERSION docs/VERSION
echo "$(GIT_BRANCH)" > docs/GIT_BRANCH
echo "$(AWS_S3_BUCKET)" > docs/AWS_S3_BUCKET
+ echo "$(GITCOMMIT)" > docs/GITCOMMIT
docker build -t "$(DOCKER_DOCS_IMAGE)" docs
bundles:
diff --git a/README.md b/README.md
index 608e638eab..3c378de6f4 100644
--- a/README.md
+++ b/README.md
@@ -160,7 +160,7 @@ Docker can be used to run short-lived commands, long-running daemons
(app servers, databases etc.), interactive shell sessions, etc.
You can find a [list of real-world
-examples](http://docs.docker.io/en/latest/examples/) in the
+examples](http://docs.docker.com/examples/) in the
documentation.
Under the hood
diff --git a/VERSION b/VERSION
index 7dea76edb3..45a1b3f445 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.0.1
+1.1.2
diff --git a/api/client/commands.go b/api/client/commands.go
index 0cdf3f1acb..df2125f5f3 100644
--- a/api/client/commands.go
+++ b/api/client/commands.go
@@ -13,6 +13,7 @@ import (
"os"
"os/exec"
"path"
+ "path/filepath"
"runtime"
"strconv"
"strings"
@@ -36,6 +37,10 @@ import (
"github.com/dotcloud/docker/utils/filters"
)
+const (
+ tarHeaderSize = 512
+)
+
func (cli *DockerCli) CmdHelp(args ...string) error {
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
@@ -51,7 +56,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
{"attach", "Attach to a running container"},
{"build", "Build an image from a Dockerfile"},
{"commit", "Create a new image from a container's changes"},
- {"cp", "Copy files/folders from the containers filesystem to the host path"},
+ {"cp", "Copy files/folders from a container's filesystem to the host path"},
{"diff", "Inspect changes on a container's filesystem"},
{"events", "Get real time events from the server"},
{"export", "Stream the contents of a container as a tar archive"},
@@ -62,25 +67,25 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
{"inspect", "Return low-level information on a container"},
{"kill", "Kill a running container"},
{"load", "Load an image from a tar archive"},
- {"login", "Register or Login to the docker registry server"},
+ {"login", "Register or log in to the Docker registry server"},
{"logs", "Fetch the logs of a container"},
- {"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
+ {"port", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT"},
{"pause", "Pause all processes within a container"},
{"ps", "List containers"},
- {"pull", "Pull an image or a repository from the docker registry server"},
- {"push", "Push an image or a repository to the docker registry server"},
+ {"pull", "Pull an image or a repository from a Docker registry server"},
+ {"push", "Push an image or a repository to a Docker registry server"},
{"restart", "Restart a running container"},
{"rm", "Remove one or more containers"},
{"rmi", "Remove one or more images"},
{"run", "Run a command in a new container"},
{"save", "Save an image to a tar archive"},
- {"search", "Search for an image in the docker index"},
+ {"search", "Search for an image on the Docker Hub"},
{"start", "Start a stopped container"},
{"stop", "Stop a running container"},
{"tag", "Tag an image into a repository"},
{"top", "Lookup the running processes of a container"},
{"unpause", "Unpause a paused container"},
- {"version", "Show the docker version information"},
+ {"version", "Show the Docker version information"},
{"wait", "Block until a container stops, then print its exit code"},
} {
help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1])
@@ -113,13 +118,22 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
_, err = exec.LookPath("git")
hasGit := err == nil
if cmd.Arg(0) == "-" {
- // As a special case, 'docker build -' will build from an empty context with the
- // contents of stdin as a Dockerfile
- dockerfile, err := ioutil.ReadAll(cli.in)
- if err != nil {
- return err
+ // As a special case, 'docker build -' will build from either an empty context with the
+ // contents of stdin as a Dockerfile, or a tar-ed context from stdin.
+ buf := bufio.NewReader(cli.in)
+ magic, err := buf.Peek(tarHeaderSize)
+ if err != nil && err != io.EOF {
+ return fmt.Errorf("failed to peek context header from STDIN: %v", err)
+ }
+ if !archive.IsArchive(magic) {
+ dockerfile, err := ioutil.ReadAll(buf)
+ if err != nil {
+ return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err)
+ }
+ context, err = archive.Generate("Dockerfile", string(dockerfile))
+ } else {
+ context = ioutil.NopCloser(buf)
}
- context, err = archive.Generate("Dockerfile", string(dockerfile))
} else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) {
isRemote = true
} else {
@@ -150,7 +164,25 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
if err = utils.ValidateContextDirectory(root); err != nil {
return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
}
- context, err = archive.Tar(root, archive.Uncompressed)
+ options := &archive.TarOptions{
+ Compression: archive.Uncompressed,
+ }
+ if ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")); err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("Error reading .dockerignore: '%s'", err)
+ } else if err == nil {
+ for _, pattern := range strings.Split(string(ignore), "\n") {
+ ok, err := filepath.Match(pattern, "Dockerfile")
+ if err != nil {
+ utils.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err)
+ continue
+ }
+ if ok {
+ return fmt.Errorf("Dockerfile was excluded by .dockerignore pattern '%s'", pattern)
+ }
+ options.Excludes = append(options.Excludes, pattern)
+ }
+ }
+ context, err = archive.TarWithOptions(root, options)
}
var body io.Reader
// Setup an upload progress bar
@@ -216,7 +248,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
// 'docker login': login / register a user to registry service.
func (cli *DockerCli) CmdLogin(args ...string) error {
- cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.")
+ cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or log in to a Docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.")
var username, password, email string
@@ -342,7 +374,7 @@ func (cli *DockerCli) CmdWait(args ...string) error {
// 'docker version': show version information
func (cli *DockerCli) CmdVersion(args ...string) error {
- cmd := cli.Subcmd("version", "", "Show the docker version information.")
+ cmd := cli.Subcmd("version", "", "Show the Docker version information.")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -439,6 +471,9 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
if initPath := remoteInfo.Get("InitPath"); initPath != "" {
fmt.Fprintf(cli.out, "Init Path: %s\n", initPath)
}
+ if len(remoteInfo.GetList("Sockets")) != 0 {
+ fmt.Fprintf(cli.out, "Sockets: %v\n", remoteInfo.GetList("Sockets"))
+ }
}
if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
@@ -462,8 +497,8 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
}
func (cli *DockerCli) CmdStop(args ...string) error {
- cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)")
- nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.")
+ cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container by sending SIGTERM and then SIGKILL after a grace period")
+ nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -490,7 +525,7 @@ func (cli *DockerCli) CmdStop(args ...string) error {
func (cli *DockerCli) CmdRestart(args ...string) error {
cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container")
- nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10")
+ nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -547,8 +582,8 @@ func (cli *DockerCli) CmdStart(args ...string) error {
tty bool
cmd = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
- attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process")
- openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin")
+ attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach container's STDOUT and STDERR and forward all signals to the process")
+ openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN")
)
if err := cmd.Parse(args); err != nil {
@@ -679,7 +714,7 @@ func (cli *DockerCli) CmdPause(args ...string) error {
}
func (cli *DockerCli) CmdInspect(args ...string) error {
- cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image")
+ cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image")
tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.")
if err := cmd.Parse(args); err != nil {
return nil
@@ -759,7 +794,7 @@ func (cli *DockerCli) CmdInspect(args ...string) error {
}
func (cli *DockerCli) CmdTop(args ...string) error {
- cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Lookup the running processes of a container")
+ cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Display the running processes of a container")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -794,7 +829,7 @@ func (cli *DockerCli) CmdTop(args ...string) error {
}
func (cli *DockerCli) CmdPort(args ...string) error {
- cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT")
+ cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port that is NAT-ed to PRIVATE_PORT")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -842,7 +877,7 @@ func (cli *DockerCli) CmdPort(args ...string) error {
func (cli *DockerCli) CmdRmi(args ...string) error {
var (
cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
- force = cmd.Bool([]string{"f", "-force"}, false, "Force")
+ force = cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image")
noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents")
)
if err := cmd.Parse(args); err != nil {
@@ -945,7 +980,7 @@ func (cli *DockerCli) CmdHistory(args ...string) error {
func (cli *DockerCli) CmdRm(args ...string) error {
cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers")
- v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated to the container")
+ v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container")
link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container")
force := cmd.Bool([]string{"f", "-force"}, false, "Force removal of running container")
@@ -982,7 +1017,7 @@ func (cli *DockerCli) CmdRm(args ...string) error {
// 'docker kill NAME' kills a running container
func (cli *DockerCli) CmdKill(args ...string) error {
- cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL, or specified signal)")
+ cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container using SIGKILL or a specified signal")
signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container")
if err := cmd.Parse(args); err != nil {
@@ -1114,7 +1149,7 @@ func (cli *DockerCli) CmdPush(args ...string) error {
func (cli *DockerCli) CmdPull(args ...string) error {
cmd := cli.Subcmd("pull", "NAME[:TAG]", "Pull an image or a repository from the registry")
- tag := cmd.String([]string{"#t", "#-tag"}, "", "Download tagged image in repository")
+ tag := cmd.String([]string{"#t", "#-tag"}, "", "Download tagged image in a repository")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -1503,25 +1538,21 @@ func (cli *DockerCli) CmdPs(args ...string) error {
func (cli *DockerCli) CmdCommit(args ...string) error {
cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes")
+ flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit")
flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
- flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith <hannibal@a-team.com>\"")
+ flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith <hannibal@a-team.com>\")")
// FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands.
- flConfig := cmd.String([]string{"#run", "#-run"}, "", "this option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands")
+ flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands")
if err := cmd.Parse(args); err != nil {
return nil
}
- var name, repository, tag string
-
- if cmd.NArg() == 3 {
- fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n")
- name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
- } else {
- name = cmd.Arg(0)
+ var (
+ name = cmd.Arg(0)
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
- }
+ )
- if name == "" {
+ if name == "" || len(cmd.Args()) > 2 {
cmd.Usage()
return nil
}
@@ -1539,6 +1570,11 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
v.Set("tag", tag)
v.Set("comment", *flComment)
v.Set("author", *flAuthor)
+
+ if *flPause != true {
+ v.Set("pause", "0")
+ }
+
var (
config *runconfig.Config
env engine.Env
@@ -1657,6 +1693,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
cmd = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
times = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
+ tail = cmd.String([]string{"-tail"}, "all", "Output the specified number of lines at the end of logs (defaults to all logs)")
)
if err := cmd.Parse(args); err != nil {
@@ -1690,6 +1727,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
if *follow {
v.Set("follow", "1")
}
+ v.Set("tail", *tail)
return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), env.GetSubEnv("Config").GetBool("Tty"), nil, cli.out, cli.err, nil)
}
@@ -1697,8 +1735,8 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
func (cli *DockerCli) CmdAttach(args ...string) error {
var (
cmd = cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container")
- noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin")
- proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)")
+ noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN")
+ proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signals to the process (even in non-TTY mode). SIGCHLD is not proxied.")
)
if err := cmd.Parse(args); err != nil {
@@ -1769,11 +1807,11 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
}
func (cli *DockerCli) CmdSearch(args ...string) error {
- cmd := cli.Subcmd("search", "TERM", "Search the docker index for images")
+ cmd := cli.Subcmd("search", "TERM", "Search the Docker Hub for images")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds")
automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds")
- stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars")
+ stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least x stars")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -1829,21 +1867,15 @@ func (cli *DockerCli) CmdTag(args ...string) error {
if err := cmd.Parse(args); err != nil {
return nil
}
- if cmd.NArg() != 2 && cmd.NArg() != 3 {
+ if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
- var repository, tag string
-
- if cmd.NArg() == 3 {
- fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]]\n")
- repository, tag = cmd.Arg(1), cmd.Arg(2)
- } else {
+ var (
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
- }
-
- v := url.Values{}
+ v = url.Values{}
+ )
//Check if the given image name can be resolved
if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
@@ -1906,7 +1938,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
}
if cidFileInfo.Size() == 0 {
if err := os.Remove(hostConfig.ContainerIDFile); err != nil {
- fmt.Printf("failed to remove CID file '%s': %s \n", hostConfig.ContainerIDFile, err)
+ fmt.Printf("failed to remove Container ID file '%s': %s \n", hostConfig.ContainerIDFile, err)
}
}
}()
@@ -2156,7 +2188,7 @@ func (cli *DockerCli) CmdCp(args ...string) error {
}
func (cli *DockerCli) CmdSave(args ...string) error {
- cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout by default)")
+ cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to STDOUT by default)")
outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT")
if err := cmd.Parse(args); err != nil {
diff --git a/api/common.go b/api/common.go
index a20c5d7d1c..e73705000c 100644
--- a/api/common.go
+++ b/api/common.go
@@ -11,7 +11,7 @@ import (
)
const (
- APIVERSION version.Version = "1.12"
+ APIVERSION version.Version = "1.13"
DEFAULTHTTPHOST = "127.0.0.1"
DEFAULTUNIXSOCKET = "/var/run/docker.sock"
)
diff --git a/api/server/MAINTAINERS b/api/server/MAINTAINERS
new file mode 100644
index 0000000000..c92a061143
--- /dev/null
+++ b/api/server/MAINTAINERS
@@ -0,0 +1,2 @@
+Victor Vieux <vieux@docker.com> (@vieux)
+Johan Euphrosine <proppy@google.com> (@proppy)
diff --git a/api/server/server.go b/api/server/server.go
index ce1bdbd39e..b3a0590fda 100644
--- a/api/server/server.go
+++ b/api/server/server.go
@@ -370,13 +370,24 @@ func getContainersLogs(eng *engine.Engine, version version.Version, w http.Respo
}
var (
- job = eng.Job("container_inspect", vars["name"])
- c, err = job.Stdout.AddEnv()
+ inspectJob = eng.Job("container_inspect", vars["name"])
+ logsJob = eng.Job("logs", vars["name"])
+ c, err = inspectJob.Stdout.AddEnv()
)
if err != nil {
return err
}
- if err = job.Run(); err != nil {
+ logsJob.Setenv("follow", r.Form.Get("follow"))
+ logsJob.Setenv("tail", r.Form.Get("tail"))
+ logsJob.Setenv("stdout", r.Form.Get("stdout"))
+ logsJob.Setenv("stderr", r.Form.Get("stderr"))
+ logsJob.Setenv("timestamps", r.Form.Get("timestamps"))
+ // Validate args here, because we can't return not StatusOK after job.Run() call
+ stdout, stderr := logsJob.GetenvBool("stdout"), logsJob.GetenvBool("stderr")
+ if !(stdout || stderr) {
+ return fmt.Errorf("Bad parameters: you must choose at least one stream")
+ }
+ if err = inspectJob.Run(); err != nil {
return err
}
@@ -390,14 +401,9 @@ func getContainersLogs(eng *engine.Engine, version version.Version, w http.Respo
errStream = outStream
}
- job = eng.Job("logs", vars["name"])
- job.Setenv("follow", r.Form.Get("follow"))
- job.Setenv("stdout", r.Form.Get("stdout"))
- job.Setenv("stderr", r.Form.Get("stderr"))
- job.Setenv("timestamps", r.Form.Get("timestamps"))
- job.Stdout.Add(outStream)
- job.Stderr.Set(errStream)
- if err := job.Run(); err != nil {
+ logsJob.Stdout.Add(outStream)
+ logsJob.Stderr.Set(errStream)
+ if err := logsJob.Run(); err != nil {
fmt.Fprintf(outStream, "Error running logs job: %s\n", err)
}
return nil
@@ -434,6 +440,12 @@ func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWrit
utils.Errorf("%s", err)
}
+ if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") {
+ job.Setenv("pause", "1")
+ } else {
+ job.Setenv("pause", r.FormValue("pause"))
+ }
+
job.Setenv("repo", r.Form.Get("repo"))
job.Setenv("tag", r.Form.Get("tag"))
job.Setenv("author", r.Form.Get("author"))
@@ -688,8 +700,11 @@ func postContainersStart(eng *engine.Engine, version version.Version, w http.Res
if vars == nil {
return fmt.Errorf("Missing parameter")
}
- name := vars["name"]
- job := eng.Job("start", name)
+ var (
+ name = vars["name"]
+ job = eng.Job("start", name)
+ )
+
// allow a nil body for backwards compatibility
if r.Body != nil {
if api.MatchesContentType(r.Header.Get("Content-Type"), "application/json") {
@@ -699,6 +714,10 @@ func postContainersStart(eng *engine.Engine, version version.Version, w http.Res
}
}
if err := job.Run(); err != nil {
+ if err.Error() == "Container already started" {
+ w.WriteHeader(http.StatusNotModified)
+ return nil
+ }
return err
}
w.WriteHeader(http.StatusNoContent)
@@ -715,6 +734,10 @@ func postContainersStop(eng *engine.Engine, version version.Version, w http.Resp
job := eng.Job("stop", vars["name"])
job.Setenv("t", r.Form.Get("t"))
if err := job.Run(); err != nil {
+ if err.Error() == "Container already stopped" {
+ w.WriteHeader(http.StatusNotModified)
+ return nil
+ }
return err
}
w.WriteHeader(http.StatusNoContent)
@@ -855,7 +878,7 @@ func getContainersByName(eng *engine.Engine, version version.Version, w http.Res
}
var job = eng.Job("container_inspect", vars["name"])
if version.LessThan("1.12") {
- job.SetenvBool("dirty", true)
+ job.SetenvBool("raw", true)
}
streamJSON(job, w, false)
return job.Run()
@@ -867,7 +890,7 @@ func getImagesByName(eng *engine.Engine, version version.Version, w http.Respons
}
var job = eng.Job("image_inspect", vars["name"])
if version.LessThan("1.12") {
- job.SetenvBool("dirty", true)
+ job.SetenvBool("raw", true)
}
streamJSON(job, w, false)
return job.Run()
diff --git a/api/server/server_unit_test.go b/api/server/server_unit_test.go
index 32f8e42b18..2d14f89551 100644
--- a/api/server/server_unit_test.go
+++ b/api/server/server_unit_test.go
@@ -4,12 +4,14 @@ import (
"bytes"
"encoding/json"
"fmt"
- "github.com/dotcloud/docker/api"
- "github.com/dotcloud/docker/engine"
"io"
"net/http"
"net/http/httptest"
+ "strings"
"testing"
+
+ "github.com/dotcloud/docker/api"
+ "github.com/dotcloud/docker/engine"
)
func TestGetBoolParam(t *testing.T) {
@@ -151,6 +153,172 @@ func TestGetContainersByName(t *testing.T) {
}
}
+func TestGetEvents(t *testing.T) {
+ eng := engine.New()
+ var called bool
+ eng.Register("events", func(job *engine.Job) engine.Status {
+ called = true
+ since := job.Getenv("since")
+ if since != "1" {
+ t.Fatalf("'since' should be 1, found %#v instead", since)
+ }
+ until := job.Getenv("until")
+ if until != "0" {
+ t.Fatalf("'until' should be 0, found %#v instead", until)
+ }
+ v := &engine.Env{}
+ v.Set("since", since)
+ v.Set("until", until)
+ if _, err := v.WriteTo(job.Stdout); err != nil {
+ return job.Error(err)
+ }
+ return engine.StatusOK
+ })
+ r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t)
+ if !called {
+ t.Fatal("handler was not called")
+ }
+ if r.HeaderMap.Get("Content-Type") != "application/json" {
+ t.Fatalf("%#v\n", r)
+ }
+ var stdout_json struct {
+ Since int
+ Until int
+ }
+ if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil {
+ t.Fatalf("%#v", err)
+ }
+ if stdout_json.Since != 1 {
+ t.Fatalf("since != 1: %#v", stdout_json.Since)
+ }
+ if stdout_json.Until != 0 {
+ t.Fatalf("until != 0: %#v", stdout_json.Until)
+ }
+}
+
+func TestLogs(t *testing.T) {
+ eng := engine.New()
+ var inspect bool
+ var logs bool
+ eng.Register("container_inspect", func(job *engine.Job) engine.Status {
+ inspect = true
+ if len(job.Args) == 0 {
+ t.Fatal("Job arguments is empty")
+ }
+ if job.Args[0] != "test" {
+ t.Fatalf("Container name %s, must be test", job.Args[0])
+ }
+ return engine.StatusOK
+ })
+ expected := "logs"
+ eng.Register("logs", func(job *engine.Job) engine.Status {
+ logs = true
+ if len(job.Args) == 0 {
+ t.Fatal("Job arguments is empty")
+ }
+ if job.Args[0] != "test" {
+ t.Fatalf("Container name %s, must be test", job.Args[0])
+ }
+ follow := job.Getenv("follow")
+ if follow != "1" {
+ t.Fatalf("follow: %s, must be 1", follow)
+ }
+ stdout := job.Getenv("stdout")
+ if stdout != "1" {
+ t.Fatalf("stdout %s, must be 1", stdout)
+ }
+ stderr := job.Getenv("stderr")
+ if stderr != "" {
+ t.Fatalf("stderr %s, must be empty", stderr)
+ }
+ timestamps := job.Getenv("timestamps")
+ if timestamps != "1" {
+ t.Fatalf("timestamps %s, must be 1", timestamps)
+ }
+ job.Stdout.Write([]byte(expected))
+ return engine.StatusOK
+ })
+ r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1&timestamps=1", nil, eng, t)
+ if r.Code != http.StatusOK {
+ t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK)
+ }
+ if !inspect {
+ t.Fatal("container_inspect job was not called")
+ }
+ if !logs {
+ t.Fatal("logs job was not called")
+ }
+ res := r.Body.String()
+ if res != expected {
+ t.Fatalf("Output %s, expected %s", res, expected)
+ }
+}
+
+func TestLogsNoStreams(t *testing.T) {
+ eng := engine.New()
+ var inspect bool
+ var logs bool
+ eng.Register("container_inspect", func(job *engine.Job) engine.Status {
+ inspect = true
+ if len(job.Args) == 0 {
+ t.Fatal("Job arguments is empty")
+ }
+ if job.Args[0] != "test" {
+ t.Fatalf("Container name %s, must be test", job.Args[0])
+ }
+ return engine.StatusOK
+ })
+ eng.Register("logs", func(job *engine.Job) engine.Status {
+ logs = true
+ return engine.StatusOK
+ })
+ r := serveRequest("GET", "/containers/test/logs", nil, eng, t)
+ if r.Code != http.StatusBadRequest {
+ t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest)
+ }
+ if inspect {
+ t.Fatal("container_inspect job was called, but it shouldn't")
+ }
+ if logs {
+ t.Fatal("logs job was called, but it shouldn't")
+ }
+ res := strings.TrimSpace(r.Body.String())
+ expected := "Bad parameters: you must choose at least one stream"
+ if !strings.Contains(res, expected) {
+ t.Fatalf("Output %s, expected %s in it", res, expected)
+ }
+}
+
+func TestGetImagesHistory(t *testing.T) {
+ eng := engine.New()
+ imageName := "docker-test-image"
+ var called bool
+ eng.Register("history", func(job *engine.Job) engine.Status {
+ called = true
+ if len(job.Args) == 0 {
+ t.Fatal("Job arguments is empty")
+ }
+ if job.Args[0] != imageName {
+ t.Fatalf("name != '%s': %#v", imageName, job.Args[0])
+ }
+ v := &engine.Env{}
+ if _, err := v.WriteTo(job.Stdout); err != nil {
+ return job.Error(err)
+ }
+ return engine.StatusOK
+ })
+ r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t)
+ if !called {
+ t.Fatalf("handler was not called")
+ }
+ if r.Code != http.StatusOK {
+ t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK)
+ }
+ if r.HeaderMap.Get("Content-Type") != "application/json" {
+ t.Fatalf("%#v\n", r)
+ }
+}
+
func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder {
r := httptest.NewRecorder()
req, err := http.NewRequest(method, target, body)
diff --git a/archive/archive.go b/archive/archive.go
index 1982218b46..2ba62f5363 100644
--- a/archive/archive.go
+++ b/archive/archive.go
@@ -27,6 +27,7 @@ type (
Compression int
TarOptions struct {
Includes []string
+ Excludes []string
Compression Compression
NoLchown bool
}
@@ -43,6 +44,16 @@ const (
Xz
)
+func IsArchive(header []byte) bool {
+ compression := DetectCompression(header)
+ if compression != Uncompressed {
+ return true
+ }
+ r := tar.NewReader(bytes.NewBuffer(header))
+ _, err := r.Next()
+ return err == nil
+}
+
func DetectCompression(source []byte) Compression {
for compression, m := range map[Compression][]byte{
Bzip2: {0x42, 0x5A, 0x68},
@@ -276,7 +287,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
// Tar creates an archive from the directory at `path`, and returns it as a
// stream of bytes.
func Tar(path string, compression Compression) (io.ReadCloser, error) {
- return TarFilter(path, &TarOptions{Compression: compression})
+ return TarWithOptions(path, &TarOptions{Compression: compression})
}
func escapeName(name string) string {
@@ -295,12 +306,9 @@ func escapeName(name string) string {
return string(escaped)
}
-// TarFilter creates an archive from the directory at `srcPath` with `options`, and returns it as a
-// stream of bytes.
-//
-// Files are included according to `options.Includes`, default to including all files.
-// Stream is compressed according to `options.Compression', default to Uncompressed.
-func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.Includes` (if non-nil) or not in `options.Excludes`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
pipeReader, pipeWriter := io.Pipe()
compressWriter, err := CompressStream(pipeWriter, options.Compression)
@@ -332,6 +340,21 @@ func TarFilter(srcPath string, options *TarOptions) (io.ReadCloser, error) {
return nil
}
+ for _, exclude := range options.Excludes {
+ matched, err := filepath.Match(exclude, relFilePath)
+ if err != nil {
+ utils.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
+ return err
+ }
+ if matched {
+ utils.Debugf("Skipping excluded path: %s", relFilePath)
+ if f.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+ }
+
if err := addTarFile(filePath, relFilePath, tw); err != nil {
utils.Debugf("Can't add file %s to tar: %s\n", srcPath, err)
}
@@ -443,7 +466,7 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
// TarUntar aborts and returns the error.
func TarUntar(src string, dst string) error {
utils.Debugf("TarUntar(%s %s)", src, dst)
- archive, err := TarFilter(src, &TarOptions{Compression: Uncompressed})
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
if err != nil {
return err
}
diff --git a/archive/archive_test.go b/archive/archive_test.go
index ea34f0798b..61ee0af8e7 100644
--- a/archive/archive_test.go
+++ b/archive/archive_test.go
@@ -63,8 +63,8 @@ func TestCmdStreamGood(t *testing.T) {
}
}
-func tarUntar(t *testing.T, origin string, compression Compression) error {
- archive, err := Tar(origin, compression)
+func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) {
+ archive, err := TarWithOptions(origin, options)
if err != nil {
t.Fatal(err)
}
@@ -72,37 +72,29 @@ func tarUntar(t *testing.T, origin string, compression Compression) error {
buf := make([]byte, 10)
if _, err := archive.Read(buf); err != nil {
- return err
+ return nil, err
}
wrap := io.MultiReader(bytes.NewReader(buf), archive)
detectedCompression := DetectCompression(buf)
+ compression := options.Compression
if detectedCompression.Extension() != compression.Extension() {
- return fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
+ return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
}
tmp, err := ioutil.TempDir("", "docker-test-untar")
if err != nil {
- return err
+ return nil, err
}
defer os.RemoveAll(tmp)
if err := Untar(wrap, tmp, nil); err != nil {
- return err
+ return nil, err
}
if _, err := os.Stat(tmp); err != nil {
- return err
+ return nil, err
}
- changes, err := ChangesDirs(origin, tmp)
- if err != nil {
- return err
- }
-
- if len(changes) != 0 {
- t.Fatalf("Unexpected differences after tarUntar: %v", changes)
- }
-
- return nil
+ return ChangesDirs(origin, tmp)
}
func TestTarUntar(t *testing.T) {
@@ -122,9 +114,49 @@ func TestTarUntar(t *testing.T) {
Uncompressed,
Gzip,
} {
- if err := tarUntar(t, origin, c); err != nil {
+ changes, err := tarUntar(t, origin, &TarOptions{
+ Compression: c,
+ })
+
+ if err != nil {
t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
}
+
+ if len(changes) != 0 {
+ t.Fatalf("Unexpected differences after tarUntar: %v", changes)
+ }
+ }
+}
+
+func TestTarWithOptions(t *testing.T) {
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ cases := []struct {
+ opts *TarOptions
+ numChanges int
+ }{
+ {&TarOptions{Includes: []string{"1"}}, 1},
+ {&TarOptions{Excludes: []string{"2"}}, 1},
+ }
+ for _, testCase := range cases {
+ changes, err := tarUntar(t, origin, testCase.opts)
+ if err != nil {
+ t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err)
+ }
+ if len(changes) != testCase.numChanges {
+ t.Errorf("Expected %d changes, got %d for %+v:",
+ testCase.numChanges, len(changes), testCase.opts)
+ }
}
}
diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker
index f75b85ca8c..89395560f9 100755
--- a/contrib/completion/bash/docker
+++ b/contrib/completion/bash/docker
@@ -458,10 +458,21 @@ _docker_rm()
{
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "-v --volumes -l --link" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "-f --force -l --link -v --volumes" -- "$cur" ) )
+ return
;;
*)
+ local force=
+ for arg in "${COMP_WORDS[@]}"; do
+ case "$arg" in
+ -f|--force)
+ __docker_containers_all
+ return
+ ;;
+ esac
+ done
__docker_containers_stopped
+ return
;;
esac
}
diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish
index 00255bc0ae..a4a9365f92 100644
--- a/contrib/completion/fish/docker.fish
+++ b/contrib/completion/fish/docker.fish
@@ -79,13 +79,13 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d '
# commit
complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes"
-complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (eg. "John Hannibal Smith <hannibal@a-team.com>"'
+complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith <hannibal@a-team.com>"'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l run -d 'Config automatically applied when the image is run. (ex: -run=\'{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}\')'
complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container"
# cp
-complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d 'Copy files/folders from the containers filesystem to the host path'
+complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d 'Copy files/folders from a container's filesystem to the host path'
# diff
complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem"
diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker
index 4578d1eda7..3f96f00ef7 100644
--- a/contrib/completion/zsh/_docker
+++ b/contrib/completion/zsh/_docker
@@ -1,6 +1,6 @@
#compdef docker
#
-# zsh completion for docker (http://docker.io)
+# zsh completion for docker (http://docker.com)
#
# version: 0.2.2
# author: Felix Riedel
diff --git a/contrib/init/openrc/docker.initd b/contrib/init/openrc/docker.initd
index 2d79a73973..a9d21b1708 100755
--- a/contrib/init/openrc/docker.initd
+++ b/contrib/init/openrc/docker.initd
@@ -11,6 +11,9 @@ DOCKER_OPTS=${DOCKER_OPTS:-}
start() {
checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE"
+ ulimit -n 1048576
+ ulimit -u 1048576
+
ebegin "Starting docker daemon"
start-stop-daemon --start --background \
--exec "$DOCKER_BINARY" \
diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service
index 1bc4d1f569..6f3cc33c36 100644
--- a/contrib/init/systemd/docker.service
+++ b/contrib/init/systemd/docker.service
@@ -1,6 +1,6 @@
[Unit]
Description=Docker Application Container Engine
-Documentation=http://docs.docker.io
+Documentation=http://docs.docker.com
After=network.target
[Service]
diff --git a/contrib/init/systemd/socket-activation/docker.service b/contrib/init/systemd/socket-activation/docker.service
index a3382ab414..4af71378c8 100644
--- a/contrib/init/systemd/socket-activation/docker.service
+++ b/contrib/init/systemd/socket-activation/docker.service
@@ -1,6 +1,6 @@
[Unit]
Description=Docker Application Container Engine
-Documentation=http://docs.docker.io
+Documentation=http://docs.docker.com
After=network.target
[Service]
diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker
index 9b50fad448..d79d9c6c07 100755
--- a/contrib/init/sysvinit-debian/docker
+++ b/contrib/init/sysvinit-debian/docker
@@ -22,7 +22,10 @@ BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/docker)
DOCKER=/usr/bin/$BASE
+# This is the pid file managed by docker itself
DOCKER_PIDFILE=/var/run/$BASE.pid
+# This is the pid file created/managed by start-stop-daemon
+DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid
DOCKER_LOGFILE=/var/log/$BASE.log
DOCKER_OPTS=
DOCKER_DESC="Docker"
@@ -85,11 +88,19 @@ case "$1" in
touch "$DOCKER_LOGFILE"
chgrp docker "$DOCKER_LOGFILE"
+ ulimit -n 1048576
+ if [ "$BASH" ]; then
+ ulimit -u 1048576
+ else
+ ulimit -p 1048576
+ fi
+
log_begin_msg "Starting $DOCKER_DESC: $BASE"
start-stop-daemon --start --background \
--no-close \
--exec "$DOCKER" \
- --pidfile "$DOCKER_PIDFILE" \
+ --pidfile "$DOCKER_SSD_PIDFILE" \
+ --make-pidfile \
-- \
-d -p "$DOCKER_PIDFILE" \
$DOCKER_OPTS \
@@ -100,13 +111,13 @@ case "$1" in
stop)
fail_unless_root
log_begin_msg "Stopping $DOCKER_DESC: $BASE"
- start-stop-daemon --stop --pidfile "$DOCKER_PIDFILE"
+ start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE"
log_end_msg $?
;;
restart)
fail_unless_root
- docker_pid=`cat "$DOCKER_PIDFILE" 2>/dev/null`
+ docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null`
[ -n "$docker_pid" ] \
&& ps -p $docker_pid > /dev/null 2>&1 \
&& $0 stop
@@ -119,7 +130,7 @@ case "$1" in
;;
status)
- status_of_proc -p "$DOCKER_PIDFILE" "$DOCKER" docker
+ status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKER" docker
;;
*)
diff --git a/contrib/init/sysvinit-redhat/docker b/contrib/init/sysvinit-redhat/docker
index 06699f6ab1..aa94c04811 100755
--- a/contrib/init/sysvinit-redhat/docker
+++ b/contrib/init/sysvinit-redhat/docker
@@ -2,10 +2,10 @@
#
# /etc/rc.d/init.d/docker
#
-# Daemon for docker.io
+# Daemon for docker.com
#
# chkconfig: 2345 95 95
-# description: Daemon for docker.io
+# description: Daemon for docker.com
### BEGIN INIT INFO
# Provides: docker
@@ -16,7 +16,7 @@
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: start and stop docker
-# Description: Daemon for docker.io
+# Description: Daemon for docker.com
### END INIT INFO
# Source function library.
diff --git a/contrib/man/.gitignore b/contrib/man/.gitignore
deleted file mode 100644
index c2c63b5d2e..0000000000
--- a/contrib/man/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-# these are generated by the md/md2man-all.sh script
-man*
diff --git a/contrib/man/md/docker-kill.1.md b/contrib/man/md/docker-kill.1.md
deleted file mode 100644
index 8175002d33..0000000000
--- a/contrib/man/md/docker-kill.1.md
+++ /dev/null
@@ -1,21 +0,0 @@
-% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
-# NAME
-docker-kill - Kill a running container (send SIGKILL, or specified signal)
-
-# SYNOPSIS
-**docker kill** **--signal**[=*"KILL"*] CONTAINER [CONTAINER...]
-
-# DESCRIPTION
-
-The main process inside each container specified will be sent SIGKILL,
- or any signal specified with option --signal.
-
-# OPTIONS
-**-s**, **--signal**=*"KILL"*
- Signal to send to the container
-
-# HISTORY
-April 2014, Originally compiled by William Henry (whenry at redhat dot com)
- based on docker.io source material and internal work.
diff --git a/contrib/man/md/docker-port.1.md b/contrib/man/md/docker-port.1.md
deleted file mode 100644
index 9773e4d80c..0000000000
--- a/contrib/man/md/docker-port.1.md
+++ /dev/null
@@ -1,15 +0,0 @@
-% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
-# NAME
-docker-port - Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
-
-# SYNOPSIS
-**docker port** CONTAINER PRIVATE_PORT
-
-# DESCRIPTION
-Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
-
-# HISTORY
-April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
diff --git a/contrib/man/md/docker-restart.1.md b/contrib/man/md/docker-restart.1.md
deleted file mode 100644
index 44634f6613..0000000000
--- a/contrib/man/md/docker-restart.1.md
+++ /dev/null
@@ -1,21 +0,0 @@
-% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
-# NAME
-docker-restart - Restart a running container
-
-# SYNOPSIS
-**docker restart** [**-t**|**--time**[=*10*]] CONTAINER [CONTAINER...]
-
-# DESCRIPTION
-Restart each container listed.
-
-# OPTIONS
-**-t**, **--time**=NUM
- Number of seconds to try to stop for before killing the container. Once
-killed it will then be restarted. Default=10
-
-# HISTORY
-April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
-
diff --git a/contrib/man/md/docker-start.1.md b/contrib/man/md/docker-start.1.md
deleted file mode 100644
index 2815f1b07f..0000000000
--- a/contrib/man/md/docker-start.1.md
+++ /dev/null
@@ -1,29 +0,0 @@
-% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
-# NAME
-docker-start - Restart a stopped container
-
-# SYNOPSIS
-**docker start** [**a**|**--attach**[=*false*]] [**-i**|**--interactive**
-[=*true*] CONTAINER [CONTAINER...]
-
-# DESCRIPTION
-
-Start a stopped container.
-
-# OPTION
-**-a**, **--attach**=*true*|*false*
- When true attach to container's stdout/stderr and forward all signals to
-the process
-
-**-i**, **--interactive**=*true*|*false*
- When true attach to container's stdin
-
-# NOTES
-If run on a started container, start takes no action and succeeds
-unconditionally.
-
-# HISTORY
-April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
diff --git a/contrib/man/md/docker-stop.1.md b/contrib/man/md/docker-stop.1.md
deleted file mode 100644
index 6ec81cd472..0000000000
--- a/contrib/man/md/docker-stop.1.md
+++ /dev/null
@@ -1,22 +0,0 @@
-% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
-# NAME
-docker-stop - Stop a running container
- grace period)
-
-# SYNOPSIS
-**docker stop** [**-t**|**--time**[=*10*]] CONTAINER [CONTAINER...]
-
-# DESCRIPTION
-Stop a running container (Send SIGTERM, and then SIGKILL after
- grace period)
-
-# OPTIONS
-**-t**, **--time**=NUM
- Wait NUM number of seconds for the container to stop before killing it.
-The default is 10 seconds.
-
-# HISTORY
-April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
diff --git a/contrib/man/old-man/docker-attach.1 b/contrib/man/old-man/docker-attach.1
deleted file mode 100644
index f0879d7507..0000000000
--- a/contrib/man/old-man/docker-attach.1
+++ /dev/null
@@ -1,56 +0,0 @@
-.\" Process this file with
-.\" nroff -man -Tascii docker-attach.1
-.\"
-.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
-.SH NAME
-docker-attach \- Attach to a running container
-.SH SYNOPSIS
-.B docker attach
-\fB--no-stdin\fR[=\fIfalse\fR]
-\fB--sig-proxy\fR[=\fItrue\fR]
-container
-.SH DESCRIPTION
-If you \fBdocker run\fR a container in detached mode (\fB-d\fR), you can reattach to the detached container with \fBdocker attach\fR using the container's ID or name.
-.sp
-You can detach from the container again (and leave it running) with CTRL-c (for a quiet exit) or CTRL-\ to get a stacktrace of the Docker client when it quits. When you detach from the container the exit code will be returned to the client.
-.SH "OPTIONS"
-.TP
-.B --no-stdin=\fItrue\fR|\fIfalse\fR:
-When set to true, do not attach to stdin. The default is \fIfalse\fR.
-.TP
-.B --sig-proxy=\fItrue\fR|\fIfalse\fR:
-When set to true, proxify all received signal to the process (even in non-tty mode). The default is \fItrue\fR.
-.sp
-.SH EXAMPLES
-.sp
-.PP
-.B Attaching to a container
-.TP
-In this example the top command is run inside a container, from an image called fedora, in detached mode. The ID from the container is passed into the \fBdocker attach\fR command:
-.sp
-.nf
-.RS
-# ID=$(sudo docker run -d fedora /usr/bin/top -b)
-# sudo docker attach $ID
-top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05
-Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
-Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
-Mem: 373572k total, 355560k used, 18012k free, 27872k buffers
-Swap: 786428k total, 0k used, 786428k free, 221740k cached
-
-PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
-1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top
-
-top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05
-Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
-Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
-Mem: 373572k total, 355244k used, 18328k free, 27872k buffers
-Swap: 786428k total, 0k used, 786428k free, 221776k cached
-
-PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
-1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top
-.RE
-.fi
-.sp
-.SH HISTORY
-April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/old-man/docker-build.1 b/contrib/man/old-man/docker-build.1
deleted file mode 100644
index 2d189eb0e3..0000000000
--- a/contrib/man/old-man/docker-build.1
+++ /dev/null
@@ -1,65 +0,0 @@
-.\" Process this file with
-.\" nroff -man -Tascii docker-build.1
-.\"
-.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
-.SH NAME
-docker-build \- Build an image from a Dockerfile source at PATH
-.SH SYNOPSIS
-.B docker build
-[\fB--no-cache\fR[=\fIfalse\fR]
-[\fB-q\fR|\fB--quiet\fR[=\fIfalse\fR]
-[\fB--rm\fR[=\fitrue\fR]]
-[\fB-t\fR|\fB--tag\fR=\fItag\fR]
-PATH | URL | -
-.SH DESCRIPTION
-This will read the Dockerfile from the directory specified in \fBPATH\fR. It also sends any other files and directories found in the current directory to the Docker daemon. The contents of this directory would be used by ADD command found within the Dockerfile.
-Warning, this will send a lot of data to the Docker daemon if the current directory contains a lot of data.
-If the absolute path is provided instead of ‘.’, only the files and directories required by the ADD commands from the Dockerfile will be added to the context and transferred to the Docker daemon.
-.sp
-When a single Dockerfile is given as URL, then no context is set. When a Git repository is set as URL, the repository is used as context.
-.SH "OPTIONS"
-.TP
-.B -q, --quiet=\fItrue\fR|\fIfalse\fR:
-When set to true, suppress verbose build output. Default is \fIfalse\fR.
-.TP
-.B --rm=\fItrue\fr|\fIfalse\fR:
-When true, remove intermediate containers that are created during the build process. The default is true.
-.TP
-.B -t, --tag=\fItag\fR:
-Tag to be applied to the resulting image on successful completion of the build.
-.TP
-.B --no-cache=\fItrue\fR|\fIfalse\fR
-When set to true, do not use a cache when building the image. The default is \fIfalse\fR.
-.sp
-.SH EXAMPLES
-.sp
-.sp
-.B Building an image from current directory
-.TP
-USing a Dockerfile, Docker images are built using the build command:
-.sp
-.RS
-docker build .
-.RE
-.sp
-If, for some reasone, you do not what to remove the intermediate containers created during the build you must set--rm=false.
-.sp
-.RS
-docker build --rm=false .
-.sp
-.RE
-.sp
-A good practice is to make a subdirectory with a related name and create the Dockerfile in that directory. E.g. a directory called mongo may contain a Dockerfile for a MongoDB image, or a directory called httpd may contain an Dockerfile for an Apache web server.
-.sp
-It is also good practice to add the files required for the image to the subdirectory. These files will be then specified with the `ADD` instruction in the Dockerfile. Note: if you include a tar file, which is good practice, then Docker will automatically extract the contents of the tar file specified in the `ADD` instruction into the specified target.
-.sp
-.B Building an image container using a URL
-.TP
-This will clone the Github repository and use it as context. The Dockerfile at the root of the repository is used as Dockerfile. This only works if the Github repository is a dedicated repository. Note that you can specify an arbitrary Git repository by using the ‘git://’ schema.
-.sp
-.RS
-docker build github.com/scollier/Fedora-Dockerfiles/tree/master/apache
-.RE
-.sp
-.SH HISTORY
-March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/old-man/docker-images.1 b/contrib/man/old-man/docker-images.1
deleted file mode 100644
index e540ba2b79..0000000000
--- a/contrib/man/old-man/docker-images.1
+++ /dev/null
@@ -1,84 +0,0 @@
-.\" Process this file with
-.\" nroff -man -Tascii docker-images.1
-.\"
-.TH "DOCKER" "1" "April 2014" "0.1" "Docker"
-.SH NAME
-docker-images \- List the images in the local repository
-.SH SYNOPSIS
-.B docker images
-[\fB-a\fR|\fB--all\fR=\fIfalse\fR]
-[\fB--no-trunc\fR[=\fIfalse\fR]
-[\fB-q\fR|\fB--quiet\fR[=\fIfalse\fR]
-[\fB-t\fR|\fB--tree\fR=\fIfalse\fR]
-[\fB-v\fR|\fB--viz\fR=\fIfalse\fR]
-[NAME]
-.SH DESCRIPTION
-This command lists the images stored in the local Docker repository.
-.sp
-By default, intermediate images, used during builds, are not listed. Some of the output, e.g. image ID, is truncated, for space reasons. However the truncated image ID, and often the first few characters, are enough to be used in other Docker commands that use the image ID. The output includes repository, tag, image ID, date created and the virtual size.
-.sp
-The title REPOSITORY for the first title may seem confusing. It is essentially the image name. However, because you can tag a specific image, and multiple tags (image instances) can be associated with a single name, the name is really a repository for all tagged images of the same name.
-.SH "OPTIONS"
-.TP
-.B -a, --all=\fItrue\fR|\fIfalse\fR:
-When set to true, also include all intermediate images in the list. The default is false.
-.TP
-.B --no-trunc=\fItrue\fR|\fIfalse\fR:
-When set to true, list the full image ID and not the truncated ID. The default is false.
-.TP
-.B -q, --quiet=\fItrue\fR|\fIfalse\fR:
-When set to true, list the complete image ID as part of the output. The default is false.
-.TP
-.B -t, --tree=\fItrue\fR|\fIfalse\fR:
-When set to true, list the images in a tree dependency tree (hierarchy) format. The default is false.
-.TP
-.B -v, --viz=\fItrue\fR|\fIfalse\fR
-When set to true, list the graph in graphviz format. The default is \fIfalse\fR.
-.sp
-.SH EXAMPLES
-.sp
-.B Listing the images
-.TP
-To list the images in a local repository (not the registry) run:
-.sp
-.RS
-docker images
-.RE
-.sp
-The list will contain the image repository name, a tag for the image, and an image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, IMAGE ID, CREATED, and VIRTUAL SIZE.
-.sp
-To get a verbose list of images which contains all the intermediate images used in builds use \fB-a\fR:
-.sp
-.RS
-docker images -a
-.RE
-.sp
-.B List images dependency tree hierarchy
-.TP
-To list the images in the local repository (not the registry) in a dependency tree format then use the \fB-t\fR|\fB--tree=true\fR option.
-.sp
-.RS
-docker images -t
-.RE
-.sp
-This displays a staggered hierarchy tree where the less indented image is the oldest with dependent image layers branching inward (to the right) on subsequent lines. The newest or top level image layer is listed last in any tree branch.
-.sp
-.B List images in GraphViz format
-.TP
-To display the list in a format consumable by a GraphViz tools run with \fB-v\fR|\fB--viz=true\fR. For example to produce a .png graph file of the hierarchy use:
-.sp
-.RS
-docker images --viz | dot -Tpng -o docker.png
-.sp
-.RE
-.sp
-.B Listing only the shortened image IDs
-.TP
-Listing just the shortened image IDs. This can be useful for some automated tools.
-.sp
-.RS
-docker images -q
-.RE
-.sp
-.SH HISTORY
-April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/old-man/docker-info.1 b/contrib/man/old-man/docker-info.1
deleted file mode 100644
index dca2600af0..0000000000
--- a/contrib/man/old-man/docker-info.1
+++ /dev/null
@@ -1,39 +0,0 @@
-.\" Process this file with
-.\" nroff -man -Tascii docker-info.1
-.\"
-.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
-.SH NAME
-docker-info \- Display system wide information
-.SH SYNOPSIS
-.B docker info
-.SH DESCRIPTION
-This command displays system wide information regarding the Docker installation. Information displayed includes the number of containers and images, pool name, data file, metadata file, data space used, total data space, metadata space used, total metadata space, execution driver, and the kernel version.
-.sp
-The data file is where the images are stored and the metadata file is where the meta data regarding those images are stored. When run for the first time Docker allocates a certain amount of data space and meta data space from the space available on the volume where /var/lib/docker is mounted.
-.SH "OPTIONS"
-There are no available options.
-.sp
-.SH EXAMPLES
-.sp
-.B Display Docker system information
-.TP
-Here is a sample output:
-.sp
-.RS
- # docker info
- Containers: 18
- Images: 95
- Storage Driver: devicemapper
- Pool Name: docker-8:1-170408448-pool
- Data file: /var/lib/docker/devicemapper/devicemapper/data
- Metadata file: /var/lib/docker/devicemapper/devicemapper/metadata
- Data Space Used: 9946.3 Mb
- Data Space Total: 102400.0 Mb
- Metadata Space Used: 9.9 Mb
- Metadata Space Total: 2048.0 Mb
- Execution Driver: native-0.1
- Kernel Version: 3.10.0-116.el7.x86_64
-.RE
-.sp
-.SH HISTORY
-April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/old-man/docker-inspect.1 b/contrib/man/old-man/docker-inspect.1
deleted file mode 100644
index 225125e564..0000000000
--- a/contrib/man/old-man/docker-inspect.1
+++ /dev/null
@@ -1,237 +0,0 @@
-.\" Process this file with
-.\" nroff -man -Tascii docker-inspect.1
-.\"
-.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
-.SH NAME
-docker-inspect \- Return low-level information on a container/image
-.SH SYNOPSIS
-.B docker inspect
-[\fB-f\fR|\fB--format\fR=""
-CONTAINER|IMAGE [CONTAINER|IMAGE...]
-.SH DESCRIPTION
-This displays all the information available in Docker for a given container or image. By default, this will render all results in a JSON array. If a format is specified, the given template will be executed for each result.
-.SH "OPTIONS"
-.TP
-.B -f, --format="":
-The text/template package of Go describes all the details of the format. See examples section
-.SH EXAMPLES
-.sp
-.PP
-.B Getting information on a container
-.TP
-To get information on a container use it's ID or instance name
-.sp
-.fi
-.RS
-#docker inspect 1eb5fabf5a03
-
-[{
- "ID": "1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b",
- "Created": "2014-04-04T21:33:52.02361335Z",
- "Path": "/usr/sbin/nginx",
- "Args": [],
- "Config": {
- "Hostname": "1eb5fabf5a03",
- "Domainname": "",
- "User": "",
- "Memory": 0,
- "MemorySwap": 0,
- "CpuShares": 0,
- "AttachStdin": false,
- "AttachStdout": false,
- "AttachStderr": false,
- "PortSpecs": null,
- "ExposedPorts": {
- "80/tcp": {}
- },
- "Tty": true,
- "OpenStdin": false,
- "StdinOnce": false,
- "Env": [
- "HOME=/",
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- ],
- "Cmd": [
- "/usr/sbin/nginx"
- ],
- "Dns": null,
- "DnsSearch": null,
- "Image": "summit/nginx",
- "Volumes": null,
- "VolumesFrom": "",
- "WorkingDir": "",
- "Entrypoint": null,
- "NetworkDisabled": false,
- "OnBuild": null,
- "Context": {
- "mount_label": "system_u:object_r:svirt_sandbox_file_t:s0:c0,c650",
- "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c0,c650"
- }
- },
- "State": {
- "Running": true,
- "Pid": 858,
- "ExitCode": 0,
- "StartedAt": "2014-04-04T21:33:54.16259207Z",
- "FinishedAt": "0001-01-01T00:00:00Z",
- "Ghost": false
- },
- "Image": "df53773a4390e25936f9fd3739e0c0e60a62d024ea7b669282b27e65ae8458e6",
- "NetworkSettings": {
- "IPAddress": "172.17.0.2",
- "IPPrefixLen": 16,
- "Gateway": "172.17.42.1",
- "Bridge": "docker0",
- "PortMapping": null,
- "Ports": {
- "80/tcp": [
- {
- "HostIp": "0.0.0.0",
- "HostPort": "80"
- }
- ]
- }
- },
- "ResolvConfPath": "/etc/resolv.conf",
- "HostnamePath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hostname",
- "HostsPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hosts",
- "Name": "/ecstatic_ptolemy",
- "Driver": "devicemapper",
- "ExecDriver": "native-0.1",
- "Volumes": {},
- "VolumesRW": {},
- "HostConfig": {
- "Binds": null,
- "ContainerIDFile": "",
- "LxcConf": [],
- "Privileged": false,
- "PortBindings": {
- "80/tcp": [
- {
- "HostIp": "0.0.0.0",
- "HostPort": "80"
- }
- ]
- },
- "Links": null,
- "PublishAllPorts": false,
- "DriverOptions": {
- "lxc": null
- },
- "CliAddress": ""
- }
-.RE
-.nf
-.sp
-.B Getting the IP address of a container instance
-.TP
-To get the IP address of a container use:
-.sp
-.fi
-.RS
-# docker inspect --format='{{.NetworkSettings.IPAddress}}' 1eb5fabf5a03
-
-172.17.0.2
-.RE
-.nf
-.sp
-.B Listing all port bindings
-.TP
-One can loop over arrays and maps in the results to produce simple text output:
-.sp
-.fi
-.RS
-# docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' 1eb5fabf5a03
-
-80/tcp -> 80
-.RE
-.nf
-.sp
-.B Getting information on an image
-.TP
-Use an image's ID or name (e.g. repository/name[:tag]) to get information on it.
-.sp
-.fi
-.RS
-docker inspect 58394af37342
-[{
- "id": "58394af373423902a1b97f209a31e3777932d9321ef10e64feaaa7b4df609cf9",
- "parent": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
- "created": "2014-02-03T16:10:40.500814677Z",
- "container": "f718f19a28a5147da49313c54620306243734bafa63c76942ef6f8c4b4113bc5",
- "container_config": {
- "Hostname": "88807319f25e",
- "Domainname": "",
- "User": "",
- "Memory": 0,
- "MemorySwap": 0,
- "CpuShares": 0,
- "AttachStdin": false,
- "AttachStdout": false,
- "AttachStderr": false,
- "PortSpecs": null,
- "ExposedPorts": null,
- "Tty": false,
- "OpenStdin": false,
- "StdinOnce": false,
- "Env": [
- "HOME=/",
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- ],
- "Cmd": [
- "/bin/sh",
- "-c",
- "#(nop) ADD fedora-20-medium.tar.xz in /"
- ],
- "Dns": null,
- "DnsSearch": null,
- "Image": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
- "Volumes": null,
- "VolumesFrom": "",
- "WorkingDir": "",
- "Entrypoint": null,
- "NetworkDisabled": false,
- "OnBuild": null,
- "Context": null
- },
- "docker_version": "0.6.3",
- "author": "Lokesh Mandvekar \u003clsm5@redhat.com\u003e - ./buildcontainers.sh",
- "config": {
- "Hostname": "88807319f25e",
- "Domainname": "",
- "User": "",
- "Memory": 0,
- "MemorySwap": 0,
- "CpuShares": 0,
- "AttachStdin": false,
- "AttachStdout": false,
- "AttachStderr": false,
- "PortSpecs": null,
- "ExposedPorts": null,
- "Tty": false,
- "OpenStdin": false,
- "StdinOnce": false,
- "Env": [
- "HOME=/",
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- ],
- "Cmd": null,
- "Dns": null,
- "DnsSearch": null,
- "Image": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
- "Volumes": null,
- "VolumesFrom": "",
- "WorkingDir": "",
- "Entrypoint": null,
- "NetworkDisabled": false,
- "OnBuild": null,
- "Context": null
- },
- "architecture": "x86_64",
- "Size": 385520098
-}]
-.RE
-.nf
-.sp
-.SH HISTORY
-April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/old-man/docker-rm.1 b/contrib/man/old-man/docker-rm.1
deleted file mode 100644
index b06e014d3b..0000000000
--- a/contrib/man/old-man/docker-rm.1
+++ /dev/null
@@ -1,45 +0,0 @@
-.\" Process this file with
-.\" nroff -man -Tascii docker-rm.1
-.\"
-.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
-.SH NAME
-docker-rm \- Remove one or more containers.
-.SH SYNOPSIS
-.B docker rm
-[\fB-f\fR|\fB--force\fR[=\fIfalse\fR]
-[\fB-l\fR|\fB--link\fR[=\fIfalse\fR]
-[\fB-v\fR|\fB--volumes\fR[=\fIfalse\fR]
-CONTAINER [CONTAINER...]
-.SH DESCRIPTION
-This will remove one or more containers from the host node. The container name or ID can be used. This does not remove images. You cannot remove a running container unless you use the \fB-f\fR option. To see all containers on a host use the \fBdocker ps -a\fR command.
-.SH "OPTIONS"
-.TP
-.B -f, --force=\fItrue\fR|\fIfalse\fR:
-When set to true, force the removal of the container. The default is \fIfalse\fR.
-.TP
-.B -l, --link=\fItrue\fR|\fIfalse\fR:
-When set to true, remove the specified link and not the underlying container. The default is \fIfalse\fR.
-.TP
-.B -v, --volumes=\fItrue\fR|\fIfalse\fR:
-When set to true, remove the volumes associated to the container. The default is \fIfalse\fR.
-.SH EXAMPLES
-.sp
-.PP
-.B Removing a container using its ID
-.TP
-To remove a container using its ID, find either from a \fBdocker ps -a\fR command, or use the ID returned from the \fBdocker run\fR command, or retrieve it from a file used to store it using the \fBdocker run --cidfile\fR:
-.sp
-.RS
-docker rm abebf7571666
-.RE
-.sp
-.B Removing a container using the container name:
-.TP
-The name of the container can be found using the \fBdocker ps -a\fR command. The use that name as follows:
-.sp
-.RS
-docker rm hopeful_morse
-.RE
-.sp
-.SH HISTORY
-March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/old-man/docker-rm.md b/contrib/man/old-man/docker-rm.md
deleted file mode 100644
index a53aa77c98..0000000000
--- a/contrib/man/old-man/docker-rm.md
+++ /dev/null
@@ -1,50 +0,0 @@
-DOCKER "1" "APRIL 2014" "0.1" "Docker"
-=======================================
-
-NAME
-----
-
-docker-rm - Remove one or more containers.
-
-SYNOPSIS
---------
-
-`docker rm` [`-f`|`--force`[=*false*] [`-l`|`--link`[=*false*] [`-v`|`--volumes`[=*false*]
-CONTAINER [CONTAINER...]
-
-DESCRIPTION
------------
-
-`docker rm` will remove one or more containers from the host node. The container name or ID can be used. This does not remove images. You cannot remove a running container unless you use the \fB-f\fR option. To see all containers on a host use the `docker ps -a` command.
-
-OPTIONS
--------
-
-`-f`, `--force`=*true*|*false*:
- When set to true, force the removal of the container. The default is *false*.
-
-`-l`, `--link`=*true*|*false*:
- When set to true, remove the specified link and not the underlying container. The default is *false*.
-
-`-v`, `--volumes`=*true*|*false*:
- When set to true, remove the volumes associated to the container. The default is *false*.
-
-EXAMPLES
---------
-
-##Removing a container using its ID##
-
-To remove a container using its ID, find either from a `docker ps -a` command, or use the ID returned from the `docker run` command, or retrieve it from a file used to store it using the `docker run --cidfile`:
-
- docker rm abebf7571666
-
-##Removing a container using the container name##
-
-The name of the container can be found using the \fBdocker ps -a\fR command. The use that name as follows:
-
- docker rm hopeful_morse
-
-HISTORY
--------
-
-April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/old-man/docker-rmi.1 b/contrib/man/old-man/docker-rmi.1
deleted file mode 100644
index 6f33446ecd..0000000000
--- a/contrib/man/old-man/docker-rmi.1
+++ /dev/null
@@ -1,29 +0,0 @@
-.\" Process this file with
-.\" nroff -man -Tascii docker-run.1
-.\"
-.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
-.SH NAME
-docker-rmi \- Remove one or more images.
-.SH SYNOPSIS
-.B docker rmi
-[\fB-f\fR|\fB--force\fR[=\fIfalse\fR]
-IMAGE [IMAGE...]
-.SH DESCRIPTION
-This will remove one or more images from the host node. This does not remove images from a registry. You cannot remove an image of a running container unless you use the \fB-f\fR option. To see all images on a host use the \fBdocker images\fR command.
-.SH "OPTIONS"
-.TP
-.B -f, --force=\fItrue\fR|\fIfalse\fR:
-When set to true, force the removal of the image. The default is \fIfalse\fR.
-.SH EXAMPLES
-.sp
-.PP
-.B Removing an image
-.TP
-Here is an example of removing and image:
-.sp
-.RS
-docker rmi fedora/httpd
-.RE
-.sp
-.SH HISTORY
-March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/old-man/docker-run.1 b/contrib/man/old-man/docker-run.1
deleted file mode 100644
index 0e06e8d682..0000000000
--- a/contrib/man/old-man/docker-run.1
+++ /dev/null
@@ -1,277 +0,0 @@
-.\" Process this file with
-.\" nroff -man -Tascii docker-run.1
-.\"
-.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
-.SH NAME
-docker-run \- Run a process in an isolated container
-.SH SYNOPSIS
-.B docker run
-[\fB-a\fR|\fB--attach\fR[=]] [\fB-c\fR|\fB--cpu-shares\fR[=0] [\fB-m\fR|\fB--memory\fR=\fImemory-limit\fR]
-[\fB--cidfile\fR=\fIfile\fR] [\fB-d\fR|\fB--detach\fR[=\fIfalse\fR]] [\fB--dns\fR=\fIIP-address\fR]
-[\fB--name\fR=\fIname\fR] [\fB-u\fR|\fB--user\fR=\fIusername\fR|\fIuid\fR]
-[\fB--link\fR=\fIname\fR:\fIalias\fR]
-[\fB-e\fR|\fB--env\fR=\fIenvironment\fR] [\fB--entrypoint\fR=\fIcommand\fR]
-[\fB--expose\fR=\fIport\fR] [\fB-P\fR|\fB--publish-all\fR[=\fIfalse\fR]]
-[\fB-p\fR|\fB--publish\fR=\fIport-mappping\fR] [\fB-h\fR|\fB--hostname\fR=\fIhostname\fR]
-[\fB--rm\fR[=\fIfalse\fR]] [\fB--priviledged\fR[=\fIfalse\fR]
-[\fB-i\fR|\fB--interactive\fR[=\fIfalse\fR]
-[\fB-t\fR|\fB--tty\fR[=\fIfalse\fR]] [\fB--lxc-conf\fR=\fIoptions\fR]
-[\fB-n\fR|\fB--networking\fR[=\fItrue\fR]]
-[\fB-v\fR|\fB--volume\fR=\fIvolume\fR] [\fB--volumes-from\fR=\fIcontainer-id\fR]
-[\fB-w\fR|\fB--workdir\fR=\fIdirectory\fR] [\fB--sig-proxy\fR[=\fItrue\fR]]
-IMAGE [COMMAND] [ARG...]
-.SH DESCRIPTION
-.PP
-Run a process in a new container. \fBdocker run\fR starts a process with its own file system, its own networking, and its own isolated process tree. The \fIIMAGE\fR which starts the process may define defaults related to the process that will be run in the container, the networking to expose, and more, but \fBdocker run\fR gives final control to the operator or administrator who starts the container from the image. For that reason \fBdocker run\fR has more options than any other docker command.
-
-If the \fIIMAGE\fR is not already loaded then \fBdocker run\fR will pull the \fIIMAGE\fR, and all image dependencies, from the repository in the same way running \fBdocker pull\fR \fIIMAGE\fR, before it starts the container from that image.
-
-
-.SH "OPTIONS"
-
-.TP
-.B -a, --attach=\fIstdin\fR|\fIstdout\fR|\fIstderr\fR:
-Attach to stdin, stdout or stderr. In foreground mode (the default when -d is not specified), \fBdocker run\fR can start the process in the container and attach the console to the process’s standard input, output, and standard error. It can even pretend to be a TTY (this is what most commandline executables expect) and pass along signals. The \fB-a\fR option can be set for each of stdin, stdout, and stderr.
-
-.TP
-.B -c, --cpu-shares=0:
-CPU shares in relative weight. You can increase the priority of a container with the -c option. By default, all containers run at the same priority and get the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via \fBdocker run\fR.
-
-.TP
-.B -m, --memory=\fImemory-limit\fR:
-Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical RAM. If a limit of 0 is specified, the container's memory is not limited. The memory limit format: <number><optional unit>, where unit = b, k, m or g.
-
-.TP
-.B --cidfile=\fIfile\fR:
-Write the container ID to the file specified.
-
-.TP
-.B -d, --detach=\fItrue\fR|\fIfalse\fR:
-Detached mode. This runs the container in the background. It outputs the new container's id and and error messages. At any time you can run \fBdocker ps\fR in the other shell to view a list of the running containers. You can reattach to a detached container with \fBdocker attach\fR. If you choose to run a container in the detached mode, then you cannot use the -rm option.
-
-.TP
-.B --dns=\fIIP-address\fR:
-Set custom DNS servers. This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (eg. 127.0.0.1). When this is the case the \fB-dns\fR flags is necessary for every run.
-
-.TP
-.B -e, --env=\fIenvironment\fR:
-Set environment variables. This option allows you to specify arbitrary environment variables that are available for the process that will be launched inside of the container.
-
-.TP
-.B --entrypoint=\ficommand\fR:
-This option allows you to overwrite the default entrypoint of the image that is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND because it specifies what executable to run when the container starts, but it is (purposely) more difficult to override. The ENTRYPOINT gives a container its default nature or behavior, so that when you set an ENTRYPOINT you can run the container as if it were that binary, complete with default options, and you can pass in more options via the COMMAND. But, sometimes an operator may want to run something else inside the container, so you can override the default ENTRYPOINT at runtime by using a \fB--entrypoint\fR and a string to specify the new ENTRYPOINT.
-
-.TP
-.B --expose=\fIport\fR:
-Expose a port from the container without publishing it to your host. A containers port can be exposed to other containers in three ways: 1) The developer can expose the port using the EXPOSE parameter of the Dockerfile, 2) the operator can use the \fB--expose\fR option with \fBdocker run\fR, or 3) the container can be started with the \fB--link\fR.
-
-.TP
-.B -P, --publish-all=\fItrue\fR|\fIfalse\fR:
-When set to true publish all exposed ports to the host interfaces. The default is false. If the operator uses -P (or -p) then Docker will make the exposed port accessible on the host and the ports will be available to any client that can reach the host. To find the map between the host ports and the exposed ports, use \fBdocker port\fR.
-
-.TP
-.B -p, --publish=[]:
-Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)
-
-.TP
-.B -h , --hostname=\fIhostname\fR:
-Sets the container host name that is available inside the container.
-
-.TP
-.B -i , --interactive=\fItrue\fR|\fIfalse\fR:
-When set to true, keep stdin open even if not attached. The default is false.
-
-.TP
-.B --link=\fIname\fR:\fIalias\fR:
-Add link to another container. The format is name:alias. If the operator uses \fB--link\fR when starting the new client container, then the client container can access the exposed port via a private networking interface. Docker will set some environment variables in the client container to help indicate which interface and port to use.
-
-.TP
-.B -n, --networking=\fItrue\fR|\fIfalse\fR:
-By default, all containers have networking enabled (true) and can make outgoing connections. The operator can disable networking with \fB--networking\fR to false. This disables all incoming and outgoing networking. In cases like this, I/O can only be performed through files or by using STDIN/STDOUT.
-
-Also by default, the container will use the same DNS servers as the host. but you canThe operator may override this with \fB-dns\fR.
-
-.TP
-.B --name=\fIname\fR:
-Assign a name to the container. The operator can identify a container in three ways:
-.sp
-.nf
-UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”)
-UUID short identifier (“f78375b1c487”)
-Name (“jonah”)
-.fi
-.sp
-The UUID identifiers come from the Docker daemon, and if a name is not assigned to the container with \fB--name\fR then the daemon will also generate a random string name. The name is useful when defining links (see \fB--link\fR) (or any other place you need to identify a container). This works for both background and foreground Docker containers.
-
-.TP
-.B --privileged=\fItrue\fR|\fIfalse\fR:
-Give extended privileges to this container. By default, Docker containers are “unprivileged” (=false) and cannot, for example, run a Docker daemon inside the Docker container. This is because by default a container is not allowed to access any devices. A “privileged” container is given access to all devices.
-
-When the operator executes \fBdocker run -privileged\fR, Docker will enable access to all devices on the host as well as set some configuration in AppArmor (\fB???\fR) to allow the container nearly all the same access to the host as processes running outside of a container on the host.
-
-.TP
-.B --rm=\fItrue\fR|\fIfalse\fR:
-If set to \fItrue\fR the container is automatically removed when it exits. The default is \fIfalse\fR. This option is incompatible with \fB-d\fR.
-
-.TP
-.B --sig-proxy=\fItrue\fR|\fIfalse\fR:
-When set to true, proxify all received signals to the process (even in non-tty mode). The default is true.
-
-.TP
-.B -t, --tty=\fItrue\fR|\fIfalse\fR:
-When set to true Docker can allocate a pseudo-tty and attach to the standard input of any container. This can be used, for example, to run a throwaway interactive shell. The default is value is false.
-
-.TP
-.B -u, --user=\fIusername\fR,\fRuid\fR:
-Set a username or UID for the container.
-
-.TP
-.B -v, --volume=\fIvolume\fR:
-Bind mount a volume to the container. The \fB-v\fR option can be used one or more times to add one or more mounts to a container. These mounts can then be used in other containers using the \fB--volumes-from\fR option. See examples.
-
-.TP
-.B --volumes-from=\fIcontainer-id\fR:
-Will mount volumes from the specified container identified by container-id. Once a volume is mounted in a one container it can be shared with other containers using the \fB--volumes-from\fR option when running those other containers. The volumes can be shared even if the original container with the mount is not running.
-
-.TP
-.B -w, --workdir=\fIdirectory\fR:
-Working directory inside the container. The default working directory for running binaries within a container is the root directory (/). The developer can set a different default with the Dockerfile WORKDIR instruction. The operator can override the working directory by using the \fB-w\fR option.
-
-.TP
-.B IMAGE:
-The image name or ID.
-
-.TP
-.B COMMAND:
-The command or program to run inside the image.
-
-.TP
-.B ARG:
-The arguments for the command to be run in the container.
-
-.SH EXAMPLES
-.sp
-.sp
-.B Exposing log messages from the container to the host's log
-.TP
-If you want messages that are logged in your container to show up in the host's syslog/journal then you should bind mount the /var/log directory as follows.
-.sp
-.RS
-docker run -v /dev/log:/dev/log -i -t fedora /bin/bash
-.RE
-.sp
-From inside the container you can test this by sending a message to the log.
-.sp
-.RS
-logger "Hello from my container"
-.sp
-.RE
-Then exit and check the journal.
-.RS
-.sp
-exit
-.sp
-journalctl -b | grep hello
-.RE
-.sp
-This should list the message sent to logger.
-.sp
-.B Attaching to one or more from STDIN, STDOUT, STDERR
-.TP
-If you do not specify -a then Docker will attach everything (stdin,stdout,stderr). You can specify to which of the three standard streams (stdin, stdout, stderr) you’d like to connect instead, as in:
-.sp
-.RS
-docker run -a stdin -a stdout -i -t fedora /bin/bash
-.RE
-.sp
-.B Linking Containers
-.TP
-The link feature allows multiple containers to communicate with each other. For example, a container whose Dockerfile has exposed port 80 can be run and named as follows:
-.sp
-.RS
-docker run --name=link-test -d -i -t fedora/httpd
-.RE
-.sp
-.TP
-A second container, in this case called linker, can communicate with the httpd container, named link-test, by running with the \fB--link=<name>:<alias>\fR
-.sp
-.RS
-docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash
-.RE
-.sp
-.TP
-Now the container linker is linked to container link-test with the alias lt. Running the \fBenv\fR command in the linker container shows environment variables with the LT (alias) context (\fBLT_\fR)
-.sp
-.nf
-.RS
-# env
-HOSTNAME=668231cb0978
-TERM=xterm
-LT_PORT_80_TCP=tcp://172.17.0.3:80
-LT_PORT_80_TCP_PORT=80
-LT_PORT_80_TCP_PROTO=tcp
-LT_PORT=tcp://172.17.0.3:80
-PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
-PWD=/
-LT_NAME=/linker/lt
-SHLVL=1
-HOME=/
-LT_PORT_80_TCP_ADDR=172.17.0.3
-_=/usr/bin/env
-.RE
-.fi
-.sp
-.TP
-When linking two containers Docker will use the exposed ports of the container to create a secure tunnel for the parent to access.
-.TP
-.sp
-.B Mapping Ports for External Usage
-.TP
-The exposed port of an application can be mapped to a host port using the \fB-p\fR flag. For example a httpd port 80 can be mapped to the host port 8080 using the following:
-.sp
-.RS
-docker run -p 8080:80 -d -i -t fedora/httpd
-.RE
-.sp
-.TP
-.B Creating and Mounting a Data Volume Container
-.TP
-Many applications require the sharing of persistent data across several containers. Docker allows you to create a Data Volume Container that other containers can mount from. For example, create a named container that contains directories /var/volume1 and /tmp/volume2. The image will need to contain these directories so a couple of RUN mkdir instructions might be required for you fedora-data image:
-.sp
-.RS
-docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true
-.sp
-docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash
-.RE
-.sp
-.TP
-Multiple --volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data:
-.sp
-.RS
-docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash
-.RE
-.TP
-.sp
-.B Mounting External Volumes
-.TP
-To mount a host directory as a container volume, specify the absolute path to the directory and the absolute path for the container directory separated by a colon:
-.sp
-.RS
-docker run -v /var/db:/data1 -i -t fedora bash
-.RE
-.sp
-.TP
-When using SELinux, be aware that the host has no knowledge of container SELinux policy. Therefore, in the above example, if SELinux policy is enforced, the /var/db directory is not writable to the container. A "Permission Denied" message will occur and an avc: message in the host's syslog.
-.sp
-.TP
-To work around this, at time of writing this man page, the following command needs to be run in order for the proper SELinux policy type label to be attached to the host directory:
-.sp
-.RS
-chcon -Rt svirt_sandbox_file_t /var/db
-.RE
-.sp
-.TP
-Now, writing to the /data1 volume in the container will be allowed and the changes will also be reflected on the host in /var/db.
-.sp
-.SH HISTORY
-March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/old-man/docker-tag.1 b/contrib/man/old-man/docker-tag.1
deleted file mode 100644
index df85a1e8c1..0000000000
--- a/contrib/man/old-man/docker-tag.1
+++ /dev/null
@@ -1,49 +0,0 @@
-.\" Process this file with
-.\" nroff -man -Tascii docker-tag.1
-.\"
-.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
-.SH NAME
-docker-tag \- Tag an image in the repository
-.SH SYNOPSIS
-.B docker tag
-[\fB-f\fR|\fB--force\fR[=\fIfalse\fR]
-\fBIMAGE\fR [REGISTRYHOST/][USERNAME/]NAME[:TAG]
-.SH DESCRIPTION
-This will tag an image in the repository.
-.SH "OPTIONS"
-.TP
-.B -f, --force=\fItrue\fR|\fIfalse\fR:
-When set to true, force the tag name. The default is \fIfalse\fR.
-.TP
-.B REGISTRYHOST:
-The hostname of the registry if required. This may also include the port separated by a ':'
-.TP
-.B USERNAME:
-The username or other qualifying identifier for the image.
-.TP
-.B NAME:
-The image name.
-.TP
-.B TAG:
-The tag you are assigning to the image.
-.SH EXAMPLES
-.sp
-.PP
-.B Tagging an image
-.TP
-Here is an example where an image is tagged with the tag 'Version-1.0' :
-.sp
-.RS
-docker tag 0e5574283393 fedora/httpd:Version-1.0
-.RE
-.sp
-.B Tagging an image for an internal repository
-.TP
-To push an image to an internal Registry and not the default docker.io based registry you must tag it with the registry hostname and port (if needed).
-.sp
-.RS
-docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0
-.RE
-.sp
-.SH HISTORY
-April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/old-man/docker.1 b/contrib/man/old-man/docker.1
deleted file mode 100644
index 95f60891cb..0000000000
--- a/contrib/man/old-man/docker.1
+++ /dev/null
@@ -1,172 +0,0 @@
-.\" Process this file with
-.\" nroff -man -Tascii docker.1
-.\"
-.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
-.SH NAME
-docker \- Docker image and container command line interface
-.SH SYNOPSIS
-.B docker [OPTIONS] [COMMAND] [arg...]
-.SH DESCRIPTION
-\fBdocker\fR has two distinct functions. It is used for starting the Docker daemon and to run the CLI (i.e., to command the daemon to manage images, containers etc.) So \fBdocker\fR is both a server as deamon and a client to the daemon through the CLI.
-.sp
-To run the Docker deamon you do not specify any of the commands listed below but must specify the \fB-d\fR option. The other options listed below are for the daemon only.
-.sp
-The Docker CLI has over 30 commands. The commands are listed below and each has its own man page which explain usage and arguements.
-.sp
-To see the man page for a command run \fBman docker <command>\fR.
-.SH "OPTIONS"
-.B \-D=false:
-Enable debug mode
-.TP
-.B\-H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or unix://[/path/to/socket] to use.
-When host=[0.0.0.0], port=[2375] or path
-=[/var/run/docker.sock] is omitted, default values are used.
-.TP
-.B \-\-api-enable-cors=false
-Enable CORS headers in the remote API
-.TP
-.B \-b=""
-Attach containers to a pre\-existing network bridge; use 'none' to disable container networking
-.TP
-.B \-\-bip=""
-Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of \-b
-.TP
-.B \-d=false
-Enable daemon mode
-.TP
-.B \-\-dns=""
-Force Docker to use specific DNS servers
-.TP
-.B \-g="/var/lib/docker"
-Path to use as the root of the Docker runtime
-.TP
-.B \-\-icc=true
-Enable inter\-container communication
-.TP
-.B \-\-ip="0.0.0.0"
-Default IP address to use when binding container ports
-.TP
-.B \-\-iptables=true
-Disable Docker's addition of iptables rules
-.TP
-.B \-\-mtu=1500
-Set the containers network mtu
-.TP
-.B \-p="/var/run/docker.pid"
-Path to use for daemon PID file
-.TP
-.B \-r=true
-Restart previously running containers
-.TP
-.B \-s=""
-Force the Docker runtime to use a specific storage driver
-.TP
-.B \-v=false
-Print version information and quit
-.SH "COMMANDS"
-.TP
-.B attach
-Attach to a running container
-.TP
-.B build
-Build an image from a Dockerfile
-.TP
-.B commit
-Create a new image from a container's changes
-.TP
-.B cp
-Copy files/folders from the containers filesystem to the host at path
-.TP
-.B diff
-Inspect changes on a container's filesystem
-
-.TP
-.B events
-Get real time events from the server
-.TP
-.B export
-Stream the contents of a container as a tar archive
-.TP
-.B history
-Show the history of an image
-.TP
-.B images
-List images
-.TP
-.B import
-Create a new filesystem image from the contents of a tarball
-.TP
-.B info
-Display system-wide information
-.TP
-.B insert
-Insert a file in an image
-.TP
-.B inspect
-Return low-level information on a container
-.TP
-.B kill
-Kill a running container (which includes the wrapper process and everything inside it)
-.TP
-.B load
-Load an image from a tar archive
-.TP
-.B login
-Register or Login to a Docker registry server
-.TP
-.B logs
-Fetch the logs of a container
-.TP
-.B port
-Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
-.TP
-.B ps
-List containers
-.TP
-.B pull
-Pull an image or a repository from a Docker registry server
-.TP
-.B push
-Push an image or a repository to a Docker registry server
-.TP
-.B restart
-Restart a running container
-.TP
-.B rm
-Remove one or more containers
-.TP
-.B rmi
-Remove one or more images
-.TP
-.B run
-Run a command in a new container
-.TP
-.B save
-Save an image to a tar archive
-.TP
-.B search
-Search for an image in the Docker index
-.TP
-.B start
-Start a stopped container
-.TP
-.B stop
-Stop a running container
-.TP
-.B tag
-Tag an image into a repository
-.TP
-.B top
-Lookup the running processes of a container
-.TP
-.B version
-Show the Docker version information
-.TP
-.B wait
-Block until a container stops, then print its exit code
-.SH EXAMPLES
-.sp
-For specific examples please see the man page for the specific Docker command.
-.sp
-.SH HISTORY
-April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/mkimage-alpine.sh b/contrib/mkimage-alpine.sh
index 7444ffafb9..0bf328efa9 100755
--- a/contrib/mkimage-alpine.sh
+++ b/contrib/mkimage-alpine.sh
@@ -13,8 +13,8 @@ usage() {
}
tmp() {
- TMP=$(mktemp -d /tmp/alpine-docker-XXXXXXXXXX)
- ROOTFS=$(mktemp -d /tmp/alpine-docker-rootfs-XXXXXXXXXX)
+ TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX)
+ ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX)
trap "rm -rf $TMP $ROOTFS" EXIT TERM INT
}
diff --git a/contrib/mkimage-arch.sh b/contrib/mkimage-arch.sh
index dc21067473..1f52cbc1a1 100755
--- a/contrib/mkimage-arch.sh
+++ b/contrib/mkimage-arch.sh
@@ -9,31 +9,13 @@ hash pacstrap &>/dev/null || {
exit 1
}
-hash expect &>/dev/null || {
- echo "Could not find expect. Run pacman -S expect"
- exit 1
-}
-
-ROOTFS=$(mktemp -d /tmp/rootfs-archlinux-XXXXXXXXXX)
+ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX)
chmod 755 $ROOTFS
# packages to ignore for space savings
PKGIGNORE=linux,jfsutils,lvm2,cryptsetup,groff,man-db,man-pages,mdadm,pciutils,pcmciautils,reiserfsprogs,s-nail,xfsprogs
-expect <<EOF
- set timeout 60
- set send_slow {1 1}
- spawn pacstrap -C ./mkimage-arch-pacman.conf -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE
- expect {
- "Install anyway?" { send n\r; exp_continue }
- "(default=all)" { send \r; exp_continue }
- "Proceed with installation?" { send "\r"; exp_continue }
- "skip the above package" {send "y\r"; exp_continue }
- "checking" { exp_continue }
- "loading" { exp_continue }
- "installing" { exp_continue }
- }
-EOF
+pacstrap -C ./mkimage-arch-pacman.conf -c -d -G -i $ROOTFS base haveged --ignore $PKGIGNORE
arch-chroot $ROOTFS /bin/sh -c "haveged -w 1024; pacman-key --init; pkill haveged; pacman -Rs --noconfirm haveged; pacman-key --populate archlinux"
arch-chroot $ROOTFS /bin/sh -c "ln -s /usr/share/zoneinfo/UTC /etc/localtime"
diff --git a/contrib/mkimage-busybox.sh b/contrib/mkimage-busybox.sh
index cbaa567834..b11a6bb265 100755
--- a/contrib/mkimage-busybox.sh
+++ b/contrib/mkimage-busybox.sh
@@ -14,7 +14,7 @@ BUSYBOX=$(which busybox)
}
set -e
-ROOTFS=/tmp/rootfs-busybox-$$-$RANDOM
+ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM
mkdir $ROOTFS
cd $ROOTFS
diff --git a/contrib/mkimage-crux.sh b/contrib/mkimage-crux.sh
index 074c334bba..3f0bdcae3c 100755
--- a/contrib/mkimage-crux.sh
+++ b/contrib/mkimage-crux.sh
@@ -14,9 +14,9 @@ die () {
ISO=${1}
-ROOTFS=$(mktemp -d /tmp/rootfs-crux-XXXXXXXXXX)
-CRUX=$(mktemp -d /tmp/crux-XXXXXXXXXX)
-TMP=$(mktemp -d /tmp/XXXXXXXXXX)
+ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX)
+CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX)
+TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX)
VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/')
diff --git a/contrib/mkimage-debootstrap.sh b/contrib/mkimage-debootstrap.sh
index 808f393549..0a3df140db 100755
--- a/contrib/mkimage-debootstrap.sh
+++ b/contrib/mkimage-debootstrap.sh
@@ -118,7 +118,7 @@ fi
# will be filled in later, if [ -z "$skipDetection" ]
lsbDist=''
-target="/tmp/docker-rootfs-debootstrap-$suite-$$-$RANDOM"
+target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM"
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
returnTo="$(pwd -P)"
diff --git a/contrib/mkimage-rinse.sh b/contrib/mkimage-rinse.sh
index 0692ae1794..69a8bc8fe6 100755
--- a/contrib/mkimage-rinse.sh
+++ b/contrib/mkimage-rinse.sh
@@ -39,7 +39,7 @@ if [ ! "$repo" ] || [ ! "$distro" ]; then
exit 1
fi
-target="/tmp/docker-rootfs-rinse-$distro-$$-$RANDOM"
+target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM"
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
returnTo="$(pwd -P)"
diff --git a/contrib/mkimage-unittest.sh b/contrib/mkimage-unittest.sh
index a33f238845..feebb17b0e 100755
--- a/contrib/mkimage-unittest.sh
+++ b/contrib/mkimage-unittest.sh
@@ -15,7 +15,7 @@ SOCAT=$(which socat)
shopt -s extglob
set -ex
-ROOTFS=`mktemp -d /tmp/rootfs-busybox.XXXXXXXXXX`
+ROOTFS=`mktemp -d ${TMPDIR:-/var/tmp}/rootfs-busybox.XXXXXXXXXX`
trap "rm -rf $ROOTFS" INT QUIT TERM
cd $ROOTFS
diff --git a/contrib/mkimage.sh b/contrib/mkimage.sh
index db4815c204..803d1627df 100755
--- a/contrib/mkimage.sh
+++ b/contrib/mkimage.sh
@@ -6,9 +6,11 @@ mkimg="$(basename "$0")"
usage() {
echo >&2 "usage: $mkimg [-d dir] [-t tag] script [script-args]"
echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie"
- echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal trusty"
+ echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components main,universe trusty"
echo >&2 " $mkimg -t someuser/busybox busybox-static"
echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5"
+ echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4"
+ echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/"
exit 1
}
@@ -48,7 +50,7 @@ fi
delDir=
if [ -z "$dir" ]; then
- dir="$(mktemp -d ${TMPDIR:-/tmp}/docker-mkimage.XXXXXXXXXX)"
+ dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)"
delDir=1
fi
diff --git a/contrib/mkimage/.febootstrap-minimize b/contrib/mkimage/.febootstrap-minimize
index 7dab4eb8b5..8a71f5ed67 100755
--- a/contrib/mkimage/.febootstrap-minimize
+++ b/contrib/mkimage/.febootstrap-minimize
@@ -13,7 +13,7 @@ shift
# docs
rm -rf usr/share/{man,doc,info,gnome/help}
# cracklib
- #rm -rf usr/share/cracklib
+ rm -rf usr/share/cracklib
# i18n
rm -rf usr/share/i18n
# yum cache
diff --git a/contrib/mkimage/debootstrap b/contrib/mkimage/debootstrap
index 4747a84d31..96d22ddddc 100755
--- a/contrib/mkimage/debootstrap
+++ b/contrib/mkimage/debootstrap
@@ -23,9 +23,14 @@ shift
# now for some Docker-specific tweaks
# prevent init scripts from running during install/update
-echo >&2 "+ cat > '$rootfsDir/usr/sbin/policy-rc.d'"
+echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'"
cat > "$rootfsDir/usr/sbin/policy-rc.d" <<'EOF'
#!/bin/sh
+
+# For most Docker users, "apt-get install" only happens during "docker build",
+# where starting services doesn't work and often fails in humorous ways. This
+# prevents those failures by stopping the services from attempting to start.
+
exit 101
EOF
chmod +x "$rootfsDir/usr/sbin/policy-rc.d"
@@ -34,17 +39,25 @@ chmod +x "$rootfsDir/usr/sbin/policy-rc.d"
(
set -x
chroot "$rootfsDir" dpkg-divert --local --rename --add /sbin/initctl
- ln -sf /bin/true "$rootfsDir/sbin/initctl"
+ cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl"
+ sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl"
)
-# shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
+# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB)
( set -x; chroot "$rootfsDir" apt-get clean )
# Ubuntu 10.04 sucks... :)
if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then
# force dpkg not to call sync() after package extraction (speeding up installs)
echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'"
- echo 'force-unsafe-io' > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup"
+ cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF'
+ # For most Docker users, package installs happen during "docker build", which
+ # doesn't survive power loss and gets restarted clean afterwards anyhow, so
+ # this minor tweak gives us a nice speedup (much nicer on spinning disks,
+ # obviously).
+
+ force-unsafe-io
+ EOF
fi
if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then
@@ -52,16 +65,36 @@ if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then
aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";'
echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'"
cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF
+ # Since for most Docker users, package installs happen in "docker build" steps,
+ # they essentially become individual layers due to the way Docker handles
+ # layering, especially using CoW filesystems. What this means for us is that
+ # the caches that APT keeps end up just wasting space in those layers, making
+ # our layers unnecessarily large (especially since we'll normally never use
+ # these caches again and will instead just "docker build" again and make a brand
+ # new image).
+
+ # Ideally, these would just be invoking "apt-get clean", but in our testing,
+ # that ended up being cyclic and we got stuck on APT's lock, so we get this fun
+ # creation that's essentially just "apt-get clean".
DPkg::Post-Invoke { ${aptGetClean} };
APT::Update::Post-Invoke { ${aptGetClean} };
Dir::Cache::pkgcache "";
Dir::Cache::srcpkgcache "";
+
+ # Note that we do realize this isn't the ideal way to do this, and are always
+ # open to better suggestions (https://github.com/dotcloud/docker/issues).
EOF
# remove apt-cache translations for fast "apt-get update"
- echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'"
- echo 'Acquire::Languages "none";' > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages"
+ echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'"
+ cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF'
+ # In Docker, we don't often need the "Translations" files, so we're just wasting
+ # time and space by downloading them, and this inhibits that. For users that do
+ # need them, it's a simple matter to delete this file and "apt-get update". :)
+
+ Acquire::Languages "none";
+ EOF
fi
if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then
@@ -76,39 +109,53 @@ if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then
if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then
lsbDist='Debian'
fi
+ # normalize to lowercase for easier matching
+ lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')"
case "$lsbDist" in
- debian|Debian)
+ debian)
# updates and security!
if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then
(
set -x
- sed -i "p; s/ $suite main$/ ${suite}-updates main/" "$rootfsDir/etc/apt/sources.list"
+ sed -i "
+ p;
+ s/ $suite / ${suite}-updates /
+ " "$rootfsDir/etc/apt/sources.list"
echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list"
+ # LTS
+ if [ "$suite" = 'squeeze' ]; then
+ head -1 "$rootfsDir/etc/apt/sources.list" \
+ | sed "s/ $suite / ${suite}-lts /" \
+ >> "$rootfsDir/etc/apt/sources.list"
+ fi
)
fi
;;
- ubuntu|Ubuntu)
- # add the universe, updates, and security repositories
+ ubuntu)
+ # add the updates and security repositories
(
set -x
sed -i "
- s/ $suite main$/ $suite main universe/; p;
- s/ $suite main/ ${suite}-updates main/; p;
- s/ $suite-updates main/ ${suite}-security main/
+ p;
+ s/ $suite / ${suite}-updates /; p;
+ s/ $suite-updates / ${suite}-security /
" "$rootfsDir/etc/apt/sources.list"
)
;;
- tanglu|Tanglu)
+ tanglu)
# add the updates repository
if [ "$suite" != 'devel' ]; then
(
set -x
- sed -i "p; s/ $suite main$/ ${suite}-updates main/" "$rootfsDir/etc/apt/sources.list"
+ sed -i "
+ p;
+ s/ $suite / ${suite}-updates /
+ " "$rootfsDir/etc/apt/sources.list"
)
fi
;;
- steamos|SteamOS)
- # add contrib and non-free
+ steamos)
+ # add contrib and non-free if "main" is the only component
(
set -x
sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list"
@@ -117,9 +164,13 @@ if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then
esac
fi
-# make sure we're fully up-to-date, too
(
set -x
- chroot "$rootfsDir" apt-get update
- chroot "$rootfsDir" apt-get dist-upgrade -y
+
+ # make sure we're fully up-to-date
+ chroot "$rootfsDir" bash -c 'apt-get update && apt-get dist-upgrade -y'
+
+ # delete all the apt list files since they're big and get stale quickly
+ rm -rf "$rootfsDir/var/lib/apt/lists"/*
+ # this forces "apt-get update" in dependent images, which is also good
)
diff --git a/contrib/mkimage/mageia-urpmi b/contrib/mkimage/mageia-urpmi
new file mode 100755
index 0000000000..93fb289cac
--- /dev/null
+++ b/contrib/mkimage/mageia-urpmi
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+#
+# Needs to be run from Mageia 4 or greater for kernel support for docker.
+#
+# Mageia 4 does not have docker available in official repos, so please
+# install and run the docker binary manually.
+#
+# Tested working versions are for Mageia 2 onwards (inc. cauldron).
+#
+set -e
+
+rootfsDir="$1"
+shift
+
+optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@")
+eval set -- "$optTemp"
+unset optTemp
+
+installversion=
+mirror=
+while true; do
+ case "$1" in
+ -v|--version) installversion="$2" ; shift 2 ;;
+ -m|--mirror) mirror="$2" ; shift 2 ;;
+ --) shift ; break ;;
+ esac
+done
+
+if [ -z $installversion ]; then
+ # Attempt to match host version
+ if [ -r /etc/mageia-release ]; then
+ installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)"
+ else
+ echo "Error: no version supplied and unable to detect host mageia version"
+ exit 1
+ fi
+fi
+
+if [ -z $mirror ]; then
+ # No mirror provided, default to mirrorlist
+ mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list"
+fi
+
+(
+ set -x
+ urpmi.addmedia --distrib \
+ $mirror \
+ --urpmi-root "$rootfsDir"
+ urpmi basesystem-minimal urpmi \
+ --auto \
+ --no-suggests \
+ --urpmi-root "$rootfsDir" \
+ --root "$rootfsDir"
+)
+
+"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir"
+
+if [ -d "$rootfsDir/etc/sysconfig" ]; then
+ # allow networking init scripts inside the container to work without extra steps
+ echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network"
+fi
diff --git a/daemon/container.go b/daemon/container.go
index 2fd827eb9c..30337de6b5 100644
--- a/daemon/container.go
+++ b/daemon/container.go
@@ -53,7 +53,7 @@ type Container struct {
Args []string
Config *runconfig.Config
- State State
+ State *State
Image string
NetworkSettings *NetworkSettings
@@ -74,8 +74,7 @@ type Container struct {
daemon *Daemon
MountLabel, ProcessLabel string
- waitLock chan struct{}
- Volumes map[string]string
+ Volumes map[string]string
// Store rw/ro in a separate structure to preserve reverse-compatibility on-disk.
// Easier than migrating older container configs :)
VolumesRW map[string]bool
@@ -284,7 +283,6 @@ func (container *Container) Start() (err error) {
if err := container.startLoggingToDisk(); err != nil {
return err
}
- container.waitLock = make(chan struct{})
return container.waitForStart()
}
@@ -293,7 +291,7 @@ func (container *Container) Run() error {
if err := container.Start(); err != nil {
return err
}
- container.Wait()
+ container.State.WaitStop(-1 * time.Second)
return nil
}
@@ -307,7 +305,7 @@ func (container *Container) Output() (output []byte, err error) {
return nil, err
}
output, err = ioutil.ReadAll(pipe)
- container.Wait()
+ container.State.WaitStop(-1 * time.Second)
return output, err
}
@@ -424,8 +422,17 @@ func (container *Container) allocateNetwork() error {
if container.Config.ExposedPorts != nil {
portSpecs = container.Config.ExposedPorts
}
+
if container.hostConfig.PortBindings != nil {
- bindings = container.hostConfig.PortBindings
+ for p, b := range container.hostConfig.PortBindings {
+ bindings[p] = []nat.PortBinding{}
+ for _, bb := range b {
+ bindings[p] = append(bindings[p], nat.PortBinding{
+ HostIp: bb.HostIp,
+ HostPort: bb.HostPort,
+ })
+ }
+ }
}
container.NetworkSettings.PortMapping = nil
@@ -467,6 +474,7 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
if err != nil {
utils.Errorf("Error running container: %s", err)
}
+ container.State.SetStopped(exitCode)
// Cleanup
container.cleanup()
@@ -475,28 +483,17 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
if container.Config.OpenStdin {
container.stdin, container.stdinPipe = io.Pipe()
}
-
if container.daemon != nil && container.daemon.srv != nil {
container.daemon.srv.LogEvent("die", container.ID, container.daemon.repositories.ImageName(container.Image))
}
-
- close(container.waitLock)
-
if container.daemon != nil && container.daemon.srv != nil && container.daemon.srv.IsRunning() {
- container.State.SetStopped(exitCode)
-
- // FIXME: there is a race condition here which causes this to fail during the unit tests.
- // If another goroutine was waiting for Wait() to return before removing the container's root
- // from the filesystem... At this point it may already have done so.
- // This is because State.setStopped() has already been called, and has caused Wait()
- // to return.
- // FIXME: why are we serializing running state to disk in the first place?
- //log.Printf("%s: Failed to dump configuration to the disk: %s", container.ID, err)
+ // FIXME: here is race condition between two RUN instructions in Dockerfile
+ // because they share same runconfig and change image. Must be fixed
+ // in server/buildfile.go
if err := container.ToDisk(); err != nil {
- utils.Errorf("Error dumping container state to disk: %s\n", err)
+ utils.Errorf("Error dumping container %s state to disk: %s\n", container.ID, err)
}
}
-
return err
}
@@ -532,6 +529,7 @@ func (container *Container) cleanup() {
}
func (container *Container) KillSig(sig int) error {
+ utils.Debugf("Sending %d to %s", sig, container.ID)
container.Lock()
defer container.Unlock()
@@ -577,9 +575,9 @@ func (container *Container) Kill() error {
}
// 2. Wait for the process to die, in last resort, try to kill the process directly
- if err := container.WaitTimeout(10 * time.Second); err != nil {
+ if _, err := container.State.WaitStop(10 * time.Second); err != nil {
// Ensure that we don't kill ourselves
- if pid := container.State.Pid; pid != 0 {
+ if pid := container.State.GetPid(); pid != 0 {
log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
if err := syscall.Kill(pid, 9); err != nil {
return err
@@ -587,7 +585,7 @@ func (container *Container) Kill() error {
}
}
- container.Wait()
+ container.State.WaitStop(-1 * time.Second)
return nil
}
@@ -605,11 +603,11 @@ func (container *Container) Stop(seconds int) error {
}
// 2. Wait for the process to exit on its own
- if err := container.WaitTimeout(time.Duration(seconds) * time.Second); err != nil {
+ if _, err := container.State.WaitStop(time.Duration(seconds) * time.Second); err != nil {
log.Printf("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds)
// 3. If it doesn't, then send SIGKILL
if err := container.Kill(); err != nil {
- container.Wait()
+ container.State.WaitStop(-1 * time.Second)
return err
}
}
@@ -630,12 +628,6 @@ func (container *Container) Restart(seconds int) error {
return container.Start()
}
-// Wait blocks until the container stops running, then returns its exit code.
-func (container *Container) Wait() int {
- <-container.waitLock
- return container.State.GetExitCode()
-}
-
func (container *Container) Resize(h, w int) error {
return container.command.Terminal.Resize(h, w)
}
@@ -678,21 +670,6 @@ func (container *Container) Export() (archive.Archive, error) {
nil
}
-func (container *Container) WaitTimeout(timeout time.Duration) error {
- done := make(chan bool, 1)
- go func() {
- container.Wait()
- done <- true
- }()
-
- select {
- case <-time.After(timeout):
- return fmt.Errorf("Timed Out")
- case <-done:
- return nil
- }
-}
-
func (container *Container) Mount() error {
return container.daemon.Mount(container)
}
@@ -813,7 +790,7 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
basePath = path.Dir(basePath)
}
- archive, err := archive.TarFilter(basePath, &archive.TarOptions{
+ archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
Compression: archive.Uncompressed,
Includes: filter,
})
@@ -1103,9 +1080,7 @@ func (container *Container) startLoggingToDisk() error {
}
func (container *Container) waitForStart() error {
- callbackLock := make(chan struct{})
callback := func(command *execdriver.Command) {
- container.State.SetRunning(command.Pid())
if command.Tty {
// The callback is called after the process Start()
// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace
@@ -1114,19 +1089,26 @@ func (container *Container) waitForStart() error {
c.Close()
}
}
+ container.State.SetRunning(command.Pid())
if err := container.ToDisk(); err != nil {
utils.Debugf("%s", err)
}
- close(callbackLock)
}
// We use a callback here instead of a goroutine and an chan for
// syncronization purposes
cErr := utils.Go(func() error { return container.monitor(callback) })
+ waitStart := make(chan struct{})
+
+ go func() {
+ container.State.WaitRunning(-1 * time.Second)
+ close(waitStart)
+ }()
+
// Start should not return until the process is actually running
select {
- case <-callbackLock:
+ case <-waitStart:
case err := <-cErr:
return err
}
diff --git a/daemon/daemon.go b/daemon/daemon.go
index c21ba3a38c..a94a4458ad 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -31,6 +31,7 @@ import (
"github.com/dotcloud/docker/pkg/namesgenerator"
"github.com/dotcloud/docker/pkg/networkfs/resolvconf"
"github.com/dotcloud/docker/pkg/sysinfo"
+ "github.com/dotcloud/docker/pkg/truncindex"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/utils"
)
@@ -87,7 +88,7 @@ type Daemon struct {
containers *contStore
graph *graph.Graph
repositories *graph.TagStore
- idIndex *utils.TruncIndex
+ idIndex *truncindex.TruncIndex
sysInfo *sysinfo.SysInfo
volumes *graph.Graph
srv Server
@@ -96,6 +97,7 @@ type Daemon struct {
containerGraph *graphdb.Database
driver graphdriver.Driver
execDriver execdriver.Driver
+ Sockets []string
}
// Install installs daemon capabilities to eng.
@@ -136,7 +138,7 @@ func (daemon *Daemon) containerRoot(id string) string {
// Load reads the contents of a container from disk
// This is typically done at startup.
func (daemon *Daemon) load(id string) (*Container, error) {
- container := &Container{root: daemon.containerRoot(id)}
+ container := &Container{root: daemon.containerRoot(id), State: NewState()}
if err := container.FromDisk(); err != nil {
return nil, err
}
@@ -180,11 +182,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con
// don't update the Suffixarray if we're starting up
// we'll waste time if we update it for every container
- if updateSuffixarray {
- daemon.idIndex.Add(container.ID)
- } else {
- daemon.idIndex.AddWithoutSuffixarrayUpdate(container.ID)
- }
+ daemon.idIndex.Add(container.ID)
// FIXME: if the container is supposed to be running but is not, auto restart it?
// if so, then we need to restart monitor and init a new lock
@@ -211,6 +209,7 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con
}
daemon.execDriver.Terminate(cmd)
}
+
if err := container.Unmount(); err != nil {
utils.Debugf("unmount error %s", err)
}
@@ -221,29 +220,22 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool, con
info := daemon.execDriver.Info(container.ID)
if !info.IsRunning() {
utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
+
+ utils.Debugf("Marking as stopped")
+
+ container.State.SetStopped(-127)
+ if err := container.ToDisk(); err != nil {
+ return err
+ }
+
if daemon.config.AutoRestart {
utils.Debugf("Marking as restarting")
- if err := container.Unmount(); err != nil {
- utils.Debugf("restart unmount error %s", err)
- }
if containersToStart != nil {
*containersToStart = append(*containersToStart, container)
}
- } else {
- utils.Debugf("Marking as stopped")
- container.State.SetStopped(-127)
- if err := container.ToDisk(); err != nil {
- return err
- }
}
}
- } else {
- // When the container is not running, we still initialize the waitLock
- // chan and close it. Receiving on nil chan blocks whereas receiving on a
- // closed chan does not. In this case we do not want to block.
- container.waitLock = make(chan struct{})
- close(container.waitLock)
}
return nil
}
@@ -375,8 +367,6 @@ func (daemon *Daemon) restore() error {
}
}
- daemon.idIndex.UpdateSuffixarray()
-
for _, container := range containersToStart {
utils.Debugf("Starting container %d", container.ID)
if err := container.Start(); err != nil {
@@ -592,6 +582,7 @@ func (daemon *Daemon) newContainer(name string, config *runconfig.Config, img *i
Name: name,
Driver: daemon.driver.String(),
ExecDriver: daemon.execDriver.Name(),
+ State: NewState(),
}
container.root = daemon.containerRoot(container.ID)
@@ -629,8 +620,12 @@ func (daemon *Daemon) createRootfs(container *Container, img *image.Image) error
// Commit creates a new filesystem image from the current state of a container.
// The image can optionally be tagged into a repository
-func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) {
- // FIXME: freeze the container before copying it to avoid data corruption?
+func (daemon *Daemon) Commit(container *Container, repository, tag, comment, author string, pause bool, config *runconfig.Config) (*image.Image, error) {
+ if pause {
+ container.Pause()
+ defer container.Unpause()
+ }
+
if err := container.Mount(); err != nil {
return nil, err
}
@@ -841,7 +836,7 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION))
sysInitPath := utils.DockerInitPath(localCopy)
if sysInitPath == "" {
- return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.io/en/latest/contributing/devenvironment for official build instructions.")
+ return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.com/contributing/devenvironment for official build instructions.")
}
if sysInitPath != localCopy {
@@ -869,7 +864,7 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
containers: &contStore{s: make(map[string]*Container)},
graph: g,
repositories: repositories,
- idIndex: utils.NewTruncIndex([]string{}),
+ idIndex: truncindex.NewTruncIndex([]string{}),
sysInfo: sysInfo,
volumes: volumes,
config: config,
@@ -878,6 +873,7 @@ func NewDaemonFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*D
sysInitPath: sysInitPath,
execDriver: ed,
eng: eng,
+ Sockets: config.Sockets,
}
if err := daemon.checkLocaldns(); err != nil {
@@ -903,7 +899,7 @@ func (daemon *Daemon) shutdown() error {
if err := c.KillSig(15); err != nil {
utils.Debugf("kill 15 error for %s - %s", c.ID, err)
}
- c.Wait()
+ c.State.WaitStop(-1 * time.Second)
utils.Debugf("container stopped %s", c.ID)
}()
}
diff --git a/daemon/execdriver/MAINTAINERS b/daemon/execdriver/MAINTAINERS
index 1e998f8ac1..68a97d2fc2 100644
--- a/daemon/execdriver/MAINTAINERS
+++ b/daemon/execdriver/MAINTAINERS
@@ -1 +1,2 @@
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
+Victor Vieux <vieux@docker.com> (@vieux)
diff --git a/daemon/execdriver/lxc/lxc_init_linux.go b/daemon/execdriver/lxc/lxc_init_linux.go
index 3b15d096af..1fd497e9aa 100644
--- a/daemon/execdriver/lxc/lxc_init_linux.go
+++ b/daemon/execdriver/lxc/lxc_init_linux.go
@@ -29,7 +29,7 @@ func finalizeNamespace(args *execdriver.InitArgs) error {
if !args.Privileged {
// drop capabilities in bounding set before changing user
- if err := capabilities.DropBoundingSet(container); err != nil {
+ if err := capabilities.DropBoundingSet(container.Capabilities); err != nil {
return fmt.Errorf("drop bounding set %s", err)
}
@@ -49,7 +49,7 @@ func finalizeNamespace(args *execdriver.InitArgs) error {
}
// drop all other capabilities
- if err := capabilities.DropCapabilities(container); err != nil {
+ if err := capabilities.DropCapabilities(container.Capabilities); err != nil {
return fmt.Errorf("drop capabilities %s", err)
}
}
diff --git a/daemon/execdriver/native/configuration/parse.go b/daemon/execdriver/native/configuration/parse.go
index 77d4b297cb..8fb1b452b9 100644
--- a/daemon/execdriver/native/configuration/parse.go
+++ b/daemon/execdriver/native/configuration/parse.go
@@ -11,7 +11,7 @@ import (
"github.com/dotcloud/docker/pkg/units"
)
-type Action func(*libcontainer.Container, interface{}, string) error
+type Action func(*libcontainer.Config, interface{}, string) error
var actions = map[string]Action{
"cap.add": addCap, // add a cap
@@ -35,7 +35,7 @@ var actions = map[string]Action{
"fs.readonly": readonlyFs, // make the rootfs of the container read only
}
-func cpusetCpus(container *libcontainer.Container, context interface{}, value string) error {
+func cpusetCpus(container *libcontainer.Config, context interface{}, value string) error {
if container.Cgroups == nil {
return fmt.Errorf("cannot set cgroups when they are disabled")
}
@@ -44,7 +44,7 @@ func cpusetCpus(container *libcontainer.Container, context interface{}, value st
return nil
}
-func systemdSlice(container *libcontainer.Container, context interface{}, value string) error {
+func systemdSlice(container *libcontainer.Config, context interface{}, value string) error {
if container.Cgroups == nil {
return fmt.Errorf("cannot set slice when cgroups are disabled")
}
@@ -53,12 +53,12 @@ func systemdSlice(container *libcontainer.Container, context interface{}, value
return nil
}
-func apparmorProfile(container *libcontainer.Container, context interface{}, value string) error {
- container.Context["apparmor_profile"] = value
+func apparmorProfile(container *libcontainer.Config, context interface{}, value string) error {
+ container.AppArmorProfile = value
return nil
}
-func cpuShares(container *libcontainer.Container, context interface{}, value string) error {
+func cpuShares(container *libcontainer.Config, context interface{}, value string) error {
if container.Cgroups == nil {
return fmt.Errorf("cannot set cgroups when they are disabled")
}
@@ -70,7 +70,7 @@ func cpuShares(container *libcontainer.Container, context interface{}, value str
return nil
}
-func memory(container *libcontainer.Container, context interface{}, value string) error {
+func memory(container *libcontainer.Config, context interface{}, value string) error {
if container.Cgroups == nil {
return fmt.Errorf("cannot set cgroups when they are disabled")
}
@@ -83,7 +83,7 @@ func memory(container *libcontainer.Container, context interface{}, value string
return nil
}
-func memoryReservation(container *libcontainer.Container, context interface{}, value string) error {
+func memoryReservation(container *libcontainer.Config, context interface{}, value string) error {
if container.Cgroups == nil {
return fmt.Errorf("cannot set cgroups when they are disabled")
}
@@ -96,7 +96,7 @@ func memoryReservation(container *libcontainer.Container, context interface{}, v
return nil
}
-func memorySwap(container *libcontainer.Container, context interface{}, value string) error {
+func memorySwap(container *libcontainer.Config, context interface{}, value string) error {
if container.Cgroups == nil {
return fmt.Errorf("cannot set cgroups when they are disabled")
}
@@ -108,12 +108,12 @@ func memorySwap(container *libcontainer.Container, context interface{}, value st
return nil
}
-func addCap(container *libcontainer.Container, context interface{}, value string) error {
+func addCap(container *libcontainer.Config, context interface{}, value string) error {
container.Capabilities = append(container.Capabilities, value)
return nil
}
-func dropCap(container *libcontainer.Container, context interface{}, value string) error {
+func dropCap(container *libcontainer.Config, context interface{}, value string) error {
// If the capability is specified multiple times, remove all instances.
for i, capability := range container.Capabilities {
if capability == value {
@@ -125,27 +125,27 @@ func dropCap(container *libcontainer.Container, context interface{}, value strin
return nil
}
-func addNamespace(container *libcontainer.Container, context interface{}, value string) error {
+func addNamespace(container *libcontainer.Config, context interface{}, value string) error {
container.Namespaces[value] = true
return nil
}
-func dropNamespace(container *libcontainer.Container, context interface{}, value string) error {
+func dropNamespace(container *libcontainer.Config, context interface{}, value string) error {
container.Namespaces[value] = false
return nil
}
-func readonlyFs(container *libcontainer.Container, context interface{}, value string) error {
+func readonlyFs(container *libcontainer.Config, context interface{}, value string) error {
switch value {
case "1", "true":
- container.ReadonlyFs = true
+ container.MountConfig.ReadonlyFs = true
default:
- container.ReadonlyFs = false
+ container.MountConfig.ReadonlyFs = false
}
return nil
}
-func joinNetNamespace(container *libcontainer.Container, context interface{}, value string) error {
+func joinNetNamespace(container *libcontainer.Config, context interface{}, value string) error {
var (
running = context.(map[string]*exec.Cmd)
cmd = running[value]
@@ -154,28 +154,13 @@ func joinNetNamespace(container *libcontainer.Container, context interface{}, va
if cmd == nil || cmd.Process == nil {
return fmt.Errorf("%s is not a valid running container to join", value)
}
+
nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net")
container.Networks = append(container.Networks, &libcontainer.Network{
- Type: "netns",
- Context: libcontainer.Context{
- "nspath": nspath,
- },
+ Type: "netns",
+ NsPath: nspath,
})
- return nil
-}
-func vethMacAddress(container *libcontainer.Container, context interface{}, value string) error {
- var veth *libcontainer.Network
- for _, network := range container.Networks {
- if network.Type == "veth" {
- veth = network
- break
- }
- }
- if veth == nil {
- return fmt.Errorf("not veth configured for container")
- }
- veth.Context["mac"] = value
return nil
}
@@ -183,7 +168,7 @@ func vethMacAddress(container *libcontainer.Container, context interface{}, valu
// container's default configuration.
//
// TODO: this can be moved to a general utils or parser in pkg
-func ParseConfiguration(container *libcontainer.Container, running map[string]*exec.Cmd, opts []string) error {
+func ParseConfiguration(container *libcontainer.Config, running map[string]*exec.Cmd, opts []string) error {
for _, opt := range opts {
kv := strings.SplitN(opt, "=", 2)
if len(kv) < 2 {
diff --git a/daemon/execdriver/native/configuration/parse_test.go b/daemon/execdriver/native/configuration/parse_test.go
index c561f5e2d3..0401d7b37e 100644
--- a/daemon/execdriver/native/configuration/parse_test.go
+++ b/daemon/execdriver/native/configuration/parse_test.go
@@ -3,7 +3,7 @@ package configuration
import (
"testing"
- "github.com/docker/libcontainer"
+ "github.com/docker/libcontainer/security/capabilities"
"github.com/dotcloud/docker/daemon/execdriver/native/template"
)
@@ -25,14 +25,14 @@ func TestSetReadonlyRootFs(t *testing.T) {
}
)
- if container.ReadonlyFs {
+ if container.MountConfig.ReadonlyFs {
t.Fatal("container should not have a readonly rootfs by default")
}
if err := ParseConfiguration(container, nil, opts); err != nil {
t.Fatal(err)
}
- if !container.ReadonlyFs {
+ if !container.MountConfig.ReadonlyFs {
t.Fatal("container should have a readonly rootfs")
}
}
@@ -84,8 +84,9 @@ func TestAppArmorProfile(t *testing.T) {
if err := ParseConfiguration(container, nil, opts); err != nil {
t.Fatal(err)
}
- if expected := "koye-the-protector"; container.Context["apparmor_profile"] != expected {
- t.Fatalf("expected profile %s got %s", expected, container.Context["apparmor_profile"])
+
+ if expected := "koye-the-protector"; container.AppArmorProfile != expected {
+ t.Fatalf("expected profile %s got %s", expected, container.AppArmorProfile)
}
}
@@ -165,7 +166,7 @@ func TestDropCap(t *testing.T) {
}
)
// enabled all caps like in privileged mode
- container.Capabilities = libcontainer.GetAllCapabilities()
+ container.Capabilities = capabilities.GetAllCapabilities()
if err := ParseConfiguration(container, nil, opts); err != nil {
t.Fatal(err)
}
diff --git a/daemon/execdriver/native/create.go b/daemon/execdriver/native/create.go
index b19620514e..f28507b046 100644
--- a/daemon/execdriver/native/create.go
+++ b/daemon/execdriver/native/create.go
@@ -9,6 +9,8 @@ import (
"github.com/docker/libcontainer"
"github.com/docker/libcontainer/apparmor"
"github.com/docker/libcontainer/devices"
+ "github.com/docker/libcontainer/mount"
+ "github.com/docker/libcontainer/security/capabilities"
"github.com/dotcloud/docker/daemon/execdriver"
"github.com/dotcloud/docker/daemon/execdriver/native/configuration"
"github.com/dotcloud/docker/daemon/execdriver/native/template"
@@ -16,7 +18,7 @@ import (
// createContainer populates and configures the container type with the
// data provided by the execdriver.Command
-func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container, error) {
+func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, error) {
container := template.New()
container.Hostname = getEnv("HOSTNAME", c.Env)
@@ -26,65 +28,71 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container
container.Env = c.Env
container.Cgroups.Name = c.ID
container.Cgroups.AllowedDevices = c.AllowedDevices
- container.DeviceNodes = c.AutoCreatedDevices
+ container.MountConfig.DeviceNodes = c.AutoCreatedDevices
+
// check to see if we are running in ramdisk to disable pivot root
- container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
- container.Context["restrictions"] = "true"
+ container.MountConfig.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
+ container.RestrictSys = true
if err := d.createNetwork(container, c); err != nil {
return nil, err
}
+
if c.Privileged {
if err := d.setPrivileged(container); err != nil {
return nil, err
}
}
+
if err := d.setupCgroups(container, c); err != nil {
return nil, err
}
+
if err := d.setupMounts(container, c); err != nil {
return nil, err
}
+
if err := d.setupLabels(container, c); err != nil {
return nil, err
}
+
cmds := make(map[string]*exec.Cmd)
d.Lock()
for k, v := range d.activeContainers {
cmds[k] = v.cmd
}
d.Unlock()
+
if err := configuration.ParseConfiguration(container, cmds, c.Config["native"]); err != nil {
return nil, err
}
+
return container, nil
}
-func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.Command) error {
+func (d *driver) createNetwork(container *libcontainer.Config, c *execdriver.Command) error {
if c.Network.HostNetworking {
container.Namespaces["NEWNET"] = false
return nil
}
+
container.Networks = []*libcontainer.Network{
{
Mtu: c.Network.Mtu,
Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0),
Gateway: "localhost",
Type: "loopback",
- Context: libcontainer.Context{},
},
}
if c.Network.Interface != nil {
vethNetwork := libcontainer.Network{
- Mtu: c.Network.Mtu,
- Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
- Gateway: c.Network.Interface.Gateway,
- Type: "veth",
- Context: libcontainer.Context{
- "prefix": "veth",
- "bridge": c.Network.Interface.Bridge,
- },
+ Mtu: c.Network.Mtu,
+ Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
+ Gateway: c.Network.Interface.Gateway,
+ Type: "veth",
+ Bridge: c.Network.Interface.Bridge,
+ VethPrefix: "veth",
}
container.Networks = append(container.Networks, &vethNetwork)
}
@@ -93,6 +101,7 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.
d.Lock()
active := d.activeContainers[c.Network.ContainerID]
d.Unlock()
+
if active == nil || active.cmd.Process == nil {
return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID)
}
@@ -100,34 +109,34 @@ func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.
nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net")
container.Networks = append(container.Networks, &libcontainer.Network{
- Type: "netns",
- Context: libcontainer.Context{
- "nspath": nspath,
- },
+ Type: "netns",
+ NsPath: nspath,
})
}
+
return nil
}
-func (d *driver) setPrivileged(container *libcontainer.Container) (err error) {
- container.Capabilities = libcontainer.GetAllCapabilities()
+func (d *driver) setPrivileged(container *libcontainer.Config) (err error) {
+ container.Capabilities = capabilities.GetAllCapabilities()
container.Cgroups.AllowAllDevices = true
hostDeviceNodes, err := devices.GetHostDeviceNodes()
if err != nil {
return err
}
- container.DeviceNodes = hostDeviceNodes
+ container.MountConfig.DeviceNodes = hostDeviceNodes
- delete(container.Context, "restrictions")
+ container.RestrictSys = false
if apparmor.IsEnabled() {
- container.Context["apparmor_profile"] = "unconfined"
+ container.AppArmorProfile = "unconfined"
}
+
return nil
}
-func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.Command) error {
+func (d *driver) setupCgroups(container *libcontainer.Config, c *execdriver.Command) error {
if c.Resources != nil {
container.Cgroups.CpuShares = c.Resources.CpuShares
container.Cgroups.Memory = c.Resources.Memory
@@ -135,12 +144,13 @@ func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.C
container.Cgroups.MemorySwap = c.Resources.MemorySwap
container.Cgroups.CpusetCpus = c.Resources.Cpuset
}
+
return nil
}
-func (d *driver) setupMounts(container *libcontainer.Container, c *execdriver.Command) error {
+func (d *driver) setupMounts(container *libcontainer.Config, c *execdriver.Command) error {
for _, m := range c.Mounts {
- container.Mounts = append(container.Mounts, libcontainer.Mount{
+ container.MountConfig.Mounts = append(container.MountConfig.Mounts, mount.Mount{
Type: "bind",
Source: m.Source,
Destination: m.Destination,
@@ -148,11 +158,13 @@ func (d *driver) setupMounts(container *libcontainer.Container, c *execdriver.Co
Private: m.Private,
})
}
+
return nil
}
-func (d *driver) setupLabels(container *libcontainer.Container, c *execdriver.Command) error {
- container.Context["process_label"] = c.Config["process_label"][0]
- container.Context["mount_label"] = c.Config["mount_label"][0]
+func (d *driver) setupLabels(container *libcontainer.Config, c *execdriver.Command) error {
+ container.ProcessLabel = c.Config["process_label"][0]
+ container.MountConfig.MountLabel = c.Config["mount_label"][0]
+
return nil
}
diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go
index 840d9fbc41..90333703c5 100644
--- a/daemon/execdriver/native/driver.go
+++ b/daemon/execdriver/native/driver.go
@@ -27,7 +27,7 @@ const (
func init() {
execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error {
- var container *libcontainer.Container
+ var container *libcontainer.Config
f, err := os.Open(filepath.Join(args.Root, "container.json"))
if err != nil {
return err
@@ -54,7 +54,7 @@ func init() {
}
type activeContainer struct {
- container *libcontainer.Container
+ container *libcontainer.Config
cmd *exec.Cmd
}
@@ -83,7 +83,7 @@ func NewDriver(root, initPath string) (*driver, error) {
}
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
- // take the Command and populate the libcontainer.Container from it
+ // take the Command and populate the libcontainer.Config from it
container, err := d.createContainer(c)
if err != nil {
return -1, err
@@ -110,7 +110,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
term := getTerminal(c, pipes)
- return namespaces.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Container, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
+ return namespaces.Exec(container, term, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
// we need to join the rootfs because namespaces will setup the rootfs and chroot
initPath := filepath.Join(c.Rootfs, c.InitPath)
@@ -171,21 +171,30 @@ func (d *driver) Unpause(c *execdriver.Command) error {
func (d *driver) Terminate(p *execdriver.Command) error {
// lets check the start time for the process
- started, err := d.readStartTime(p)
+ state, err := libcontainer.GetState(filepath.Join(d.root, p.ID))
if err != nil {
- // if we don't have the data on disk then we can assume the process is gone
- // because this is only removed after we know the process has stopped
- if os.IsNotExist(err) {
- return nil
+ if !os.IsNotExist(err) {
+ return err
}
- return err
+ // TODO: Remove this part for version 1.2.0
+ // This is added only to ensure smooth upgrades from pre 1.1.0 to 1.1.0
+ data, err := ioutil.ReadFile(filepath.Join(d.root, p.ID, "start"))
+ if err != nil {
+ // if we don't have the data on disk then we can assume the process is gone
+ // because this is only removed after we know the process has stopped
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+ state = &libcontainer.State{InitStartTime: string(data)}
}
currentStartTime, err := system.GetProcessStartTime(p.Process.Pid)
if err != nil {
return err
}
- if started == currentStartTime {
+ if state.InitStartTime == currentStartTime {
err = syscall.Kill(p.Process.Pid, 9)
syscall.Wait4(p.Process.Pid, nil, 0, nil)
}
@@ -194,14 +203,6 @@ func (d *driver) Terminate(p *execdriver.Command) error {
}
-func (d *driver) readStartTime(p *execdriver.Command) (string, error) {
- data, err := ioutil.ReadFile(filepath.Join(d.root, p.ID, "start"))
- if err != nil {
- return "", err
- }
- return string(data), nil
-}
-
func (d *driver) Info(id string) execdriver.Info {
return &info{
ID: id,
@@ -229,7 +230,7 @@ func (d *driver) GetPidsForContainer(id string) ([]int, error) {
return fs.GetPids(c)
}
-func (d *driver) writeContainerFile(container *libcontainer.Container, id string) error {
+func (d *driver) writeContainerFile(container *libcontainer.Config, id string) error {
data, err := json.Marshal(container)
if err != nil {
return err
diff --git a/daemon/execdriver/native/info.go b/daemon/execdriver/native/info.go
index aef2f85c6b..c34d0297b1 100644
--- a/daemon/execdriver/native/info.go
+++ b/daemon/execdriver/native/info.go
@@ -3,6 +3,8 @@ package native
import (
"os"
"path/filepath"
+
+ "github.com/docker/libcontainer"
)
type info struct {
@@ -14,6 +16,11 @@ type info struct {
// pid file for a container. If the file exists then the
// container is currently running
func (i *info) IsRunning() bool {
+ if _, err := libcontainer.GetState(filepath.Join(i.driver.root, i.ID)); err == nil {
+ return true
+ }
+ // TODO: Remove this part for version 1.2.0
+ // This is added only to ensure smooth upgrades from pre 1.1.0 to 1.1.0
if _, err := os.Stat(filepath.Join(i.driver.root, i.ID, "pid")); err == nil {
return true
}
diff --git a/daemon/execdriver/native/template/default_template.go b/daemon/execdriver/native/template/default_template.go
index e2f52f4445..d0894a0c9f 100644
--- a/daemon/execdriver/native/template/default_template.go
+++ b/daemon/execdriver/native/template/default_template.go
@@ -7,8 +7,8 @@ import (
)
// New returns the docker default configuration for libcontainer
-func New() *libcontainer.Container {
- container := &libcontainer.Container{
+func New() *libcontainer.Config {
+ container := &libcontainer.Config{
Capabilities: []string{
"CHOWN",
"DAC_OVERRIDE",
@@ -34,10 +34,12 @@ func New() *libcontainer.Container {
Parent: "docker",
AllowAllDevices: false,
},
- Context: libcontainer.Context{},
+ MountConfig: &libcontainer.MountConfig{},
}
+
if apparmor.IsEnabled() {
- container.Context["apparmor_profile"] = "docker-default"
+ container.AppArmorProfile = "docker-default"
}
+
return container
}
diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go
index eb8ff77cde..0206b92e17 100644
--- a/daemon/graphdriver/aufs/aufs.go
+++ b/daemon/graphdriver/aufs/aufs.go
@@ -295,7 +295,7 @@ func (a *Driver) Put(id string) {
// Returns an archive of the contents for the id
func (a *Driver) Diff(id string) (archive.Archive, error) {
- return archive.TarFilter(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
+ return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{
Compression: archive.Uncompressed,
})
}
diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go
index 992af0e149..7eaa22461f 100644
--- a/daemon/graphdriver/vfs/driver.go
+++ b/daemon/graphdriver/vfs/driver.go
@@ -1,6 +1,7 @@
package vfs
import (
+ "bytes"
"fmt"
"github.com/dotcloud/docker/daemon/graphdriver"
"os"
@@ -35,8 +36,24 @@ func (d *Driver) Cleanup() error {
return nil
}
+func isGNUcoreutils() bool {
+ if stdout, err := exec.Command("cp", "--version").Output(); err == nil {
+ return bytes.Contains(stdout, []byte("GNU coreutils"))
+ }
+
+ return false
+}
+
func copyDir(src, dst string) error {
- if output, err := exec.Command("cp", "-aT", "--reflink=auto", src, dst).CombinedOutput(); err != nil {
+ argv := make([]string, 0, 4)
+
+ if isGNUcoreutils() {
+ argv = append(argv, "-aT", "--reflink=auto", src, dst)
+ } else {
+ argv = append(argv, "-a", src+"/.", dst+"/.")
+ }
+
+ if output, err := exec.Command("cp", argv...).CombinedOutput(); err != nil {
return fmt.Errorf("Error VFS copying directory: %s (%s)", err, output)
}
return nil
diff --git a/daemon/inspect.go b/daemon/inspect.go
index af6d4520fb..b93aec5059 100644
--- a/daemon/inspect.go
+++ b/daemon/inspect.go
@@ -2,6 +2,7 @@ package daemon
import (
"encoding/json"
+ "fmt"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/runconfig"
@@ -15,7 +16,7 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
if container := daemon.Get(name); container != nil {
container.Lock()
defer container.Unlock()
- if job.GetenvBool("dirty") {
+ if job.GetenvBool("raw") {
b, err := json.Marshal(&struct {
*Container
HostConfig *runconfig.HostConfig
@@ -46,7 +47,16 @@ func (daemon *Daemon) ContainerInspect(job *engine.Job) engine.Status {
out.Set("ProcessLabel", container.ProcessLabel)
out.SetJson("Volumes", container.Volumes)
out.SetJson("VolumesRW", container.VolumesRW)
+
+ if children, err := daemon.Children(container.Name); err == nil {
+ for linkAlias, child := range children {
+ container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
+ }
+ }
+
out.SetJson("HostConfig", container.hostConfig)
+
+ container.hostConfig.Links = nil
if _, err := out.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go
index 8c5db9f843..a843da0499 100644
--- a/daemon/networkdriver/bridge/driver.go
+++ b/daemon/networkdriver/bridge/driver.go
@@ -20,7 +20,8 @@ import (
)
const (
- DefaultNetworkBridge = "docker0"
+ DefaultNetworkBridge = "docker0"
+ MaxAllocatedPortAttempts = 10
)
// Network interface represents the networking stack of a container
@@ -354,9 +355,6 @@ func Release(job *engine.Job) engine.Status {
var (
id = job.Args[0]
containerInterface = currentInterfaces.Get(id)
- ip net.IP
- port int
- proto string
)
if containerInterface == nil {
@@ -367,22 +365,6 @@ func Release(job *engine.Job) engine.Status {
if err := portmapper.Unmap(nat); err != nil {
log.Printf("Unable to unmap port %s: %s", nat, err)
}
-
- // this is host mappings
- switch a := nat.(type) {
- case *net.TCPAddr:
- proto = "tcp"
- ip = a.IP
- port = a.Port
- case *net.UDPAddr:
- proto = "udp"
- ip = a.IP
- port = a.Port
- }
-
- if err := portallocator.ReleasePort(ip, proto, port); err != nil {
- log.Printf("Unable to release port %s", nat)
- }
}
if err := ipallocator.ReleaseIP(bridgeNetwork, &containerInterface.IP); err != nil {
@@ -399,7 +381,7 @@ func AllocatePort(job *engine.Job) engine.Status {
ip = defaultBindingIP
id = job.Args[0]
hostIP = job.Getenv("HostIP")
- origHostPort = job.GetenvInt("HostPort")
+ hostPort = job.GetenvInt("HostPort")
containerPort = job.GetenvInt("ContainerPort")
proto = job.Getenv("Proto")
network = currentInterfaces.Get(id)
@@ -409,39 +391,46 @@ func AllocatePort(job *engine.Job) engine.Status {
ip = net.ParseIP(hostIP)
}
- var (
- hostPort int
- container net.Addr
- host net.Addr
- )
-
- /*
- Try up to 10 times to get a port that's not already allocated.
+ // host ip, proto, and host port
+ var container net.Addr
+ switch proto {
+ case "tcp":
+ container = &net.TCPAddr{IP: network.IP, Port: containerPort}
+ case "udp":
+ container = &net.UDPAddr{IP: network.IP, Port: containerPort}
+ default:
+ return job.Errorf("unsupported address type %s", proto)
+ }
- In the event of failure to bind, return the error that portmapper.Map
- yields.
- */
- for i := 0; i < 10; i++ {
- // host ip, proto, and host port
- hostPort, err = portallocator.RequestPort(ip, proto, origHostPort)
+ //
+ // Try up to 10 times to get a port that's not already allocated.
+ //
+ // In the event of failure to bind, return the error that portmapper.Map
+ // yields.
+ //
- if err != nil {
- return job.Error(err)
+ var host net.Addr
+ for i := 0; i < MaxAllocatedPortAttempts; i++ {
+ if host, err = portmapper.Map(container, ip, hostPort); err == nil {
+ break
}
- if proto == "tcp" {
- host = &net.TCPAddr{IP: ip, Port: hostPort}
- container = &net.TCPAddr{IP: network.IP, Port: containerPort}
- } else {
- host = &net.UDPAddr{IP: ip, Port: hostPort}
- container = &net.UDPAddr{IP: network.IP, Port: containerPort}
- }
+ switch allocerr := err.(type) {
+ case portallocator.ErrPortAlreadyAllocated:
+ // There is no point in immediately retrying to map an explicitly
+ // chosen port.
+ if hostPort != 0 {
+ job.Logf("Failed to bind %s for container address %s: %s", allocerr.IPPort(), container.String(), allocerr.Error())
+ break
+ }
- if err = portmapper.Map(container, ip, hostPort); err == nil {
+ // Automatically chosen 'free' port failed to bind: move on the next.
+ job.Logf("Failed to bind %s for container address %s. Trying another port.", allocerr.IPPort(), container.String())
+ default:
+ // some other error during mapping
+ job.Logf("Received an unexpected error during port allocation: %s", err.Error())
break
}
-
- job.Logf("Failed to bind %s:%d for container address %s:%d. Trying another port.", ip.String(), hostPort, network.IP.String(), containerPort)
}
if err != nil {
@@ -451,12 +440,18 @@ func AllocatePort(job *engine.Job) engine.Status {
network.PortMappings = append(network.PortMappings, host)
out := engine.Env{}
- out.Set("HostIP", ip.String())
- out.SetInt("HostPort", hostPort)
-
+ switch netAddr := host.(type) {
+ case *net.TCPAddr:
+ out.Set("HostIP", netAddr.IP.String())
+ out.SetInt("HostPort", netAddr.Port)
+ case *net.UDPAddr:
+ out.Set("HostIP", netAddr.IP.String())
+ out.SetInt("HostPort", netAddr.Port)
+ }
if _, err := out.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
+
return engine.StatusOK
}
diff --git a/daemon/networkdriver/bridge/driver_test.go b/daemon/networkdriver/bridge/driver_test.go
new file mode 100644
index 0000000000..f8ddd4c64e
--- /dev/null
+++ b/daemon/networkdriver/bridge/driver_test.go
@@ -0,0 +1,106 @@
+package bridge
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "testing"
+
+ "github.com/dotcloud/docker/engine"
+)
+
+func findFreePort(t *testing.T) int {
+ l, err := net.Listen("tcp", ":0")
+ if err != nil {
+ t.Fatal("Failed to find a free port")
+ }
+ defer l.Close()
+
+ result, err := net.ResolveTCPAddr("tcp", l.Addr().String())
+ if err != nil {
+ t.Fatal("Failed to resolve address to identify free port")
+ }
+ return result.Port
+}
+
+func newPortAllocationJob(eng *engine.Engine, port int) (job *engine.Job) {
+ strPort := strconv.Itoa(port)
+
+ job = eng.Job("allocate_port", "container_id")
+ job.Setenv("HostIP", "127.0.0.1")
+ job.Setenv("HostPort", strPort)
+ job.Setenv("Proto", "tcp")
+ job.Setenv("ContainerPort", strPort)
+ return
+}
+
+func TestAllocatePortDetection(t *testing.T) {
+ eng := engine.New()
+ eng.Logging = false
+
+ freePort := findFreePort(t)
+
+ // Init driver
+ job := eng.Job("initdriver")
+ if res := InitDriver(job); res != engine.StatusOK {
+ t.Fatal("Failed to initialize network driver")
+ }
+
+ // Allocate interface
+ job = eng.Job("allocate_interface", "container_id")
+ if res := Allocate(job); res != engine.StatusOK {
+ t.Fatal("Failed to allocate network interface")
+ }
+
+ // Allocate same port twice, expect failure on second call
+ job = newPortAllocationJob(eng, freePort)
+ if res := AllocatePort(job); res != engine.StatusOK {
+ t.Fatal("Failed to find a free port to allocate")
+ }
+ if res := AllocatePort(job); res == engine.StatusOK {
+ t.Fatal("Duplicate port allocation granted by AllocatePort")
+ }
+}
+
+func TestAllocatePortReclaim(t *testing.T) {
+ eng := engine.New()
+ eng.Logging = false
+
+ freePort := findFreePort(t)
+
+ // Init driver
+ job := eng.Job("initdriver")
+ if res := InitDriver(job); res != engine.StatusOK {
+ t.Fatal("Failed to initialize network driver")
+ }
+
+ // Allocate interface
+ job = eng.Job("allocate_interface", "container_id")
+ if res := Allocate(job); res != engine.StatusOK {
+ t.Fatal("Failed to allocate network interface")
+ }
+
+ // Occupy port
+ listenAddr := fmt.Sprintf(":%d", freePort)
+ tcpListenAddr, err := net.ResolveTCPAddr("tcp", listenAddr)
+ if err != nil {
+ t.Fatalf("Failed to resolve TCP address '%s'", listenAddr)
+ }
+
+ l, err := net.ListenTCP("tcp", tcpListenAddr)
+ if err != nil {
+ t.Fatalf("Fail to listen on port %d", freePort)
+ }
+
+ // Allocate port, expect failure
+ job = newPortAllocationJob(eng, freePort)
+ if res := AllocatePort(job); res == engine.StatusOK {
+ t.Fatal("Successfully allocated currently used port")
+ }
+
+ // Reclaim port, retry allocation
+ l.Close()
+ if res := AllocatePort(job); res != engine.StatusOK {
+ t.Fatal("Failed to allocate previously reclaimed port")
+ }
+}
diff --git a/daemon/networkdriver/portallocator/portallocator.go b/daemon/networkdriver/portallocator/portallocator.go
index 251ab94473..c722ba98ba 100644
--- a/daemon/networkdriver/portallocator/portallocator.go
+++ b/daemon/networkdriver/portallocator/portallocator.go
@@ -2,13 +2,18 @@ package portallocator
import (
"errors"
+ "fmt"
"net"
"sync"
)
+type portMap struct {
+ p map[int]struct{}
+ last int
+}
+
type (
- portMap map[int]bool
- protocolMap map[string]portMap
+ protocolMap map[string]*portMap
ipMapping map[string]protocolMap
)
@@ -18,9 +23,8 @@ const (
)
var (
- ErrAllPortsAllocated = errors.New("all ports are allocated")
- ErrPortAlreadyAllocated = errors.New("port has already been allocated")
- ErrUnknownProtocol = errors.New("unknown protocol")
+ ErrAllPortsAllocated = errors.New("all ports are allocated")
+ ErrUnknownProtocol = errors.New("unknown protocol")
)
var (
@@ -30,6 +34,34 @@ var (
globalMap = ipMapping{}
)
+type ErrPortAlreadyAllocated struct {
+ ip string
+ port int
+}
+
+func NewErrPortAlreadyAllocated(ip string, port int) ErrPortAlreadyAllocated {
+ return ErrPortAlreadyAllocated{
+ ip: ip,
+ port: port,
+ }
+}
+
+func (e ErrPortAlreadyAllocated) IP() string {
+ return e.ip
+}
+
+func (e ErrPortAlreadyAllocated) Port() int {
+ return e.port
+}
+
+func (e ErrPortAlreadyAllocated) IPPort() string {
+ return fmt.Sprintf("%s:%d", e.ip, e.port)
+}
+
+func (e ErrPortAlreadyAllocated) Error() string {
+ return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port)
+}
+
func RequestPort(ip net.IP, proto string, port int) (int, error) {
mutex.Lock()
defer mutex.Unlock()
@@ -43,11 +75,11 @@ func RequestPort(ip net.IP, proto string, port int) (int, error) {
mapping := getOrCreate(ip)
if port > 0 {
- if !mapping[proto][port] {
- mapping[proto][port] = true
+ if _, ok := mapping[proto].p[port]; !ok {
+ mapping[proto].p[port] = struct{}{}
return port, nil
} else {
- return 0, ErrPortAlreadyAllocated
+ return 0, NewErrPortAlreadyAllocated(ip.String(), port)
}
} else {
port, err := findPort(ip, proto)
@@ -66,8 +98,8 @@ func ReleasePort(ip net.IP, proto string, port int) error {
ip = getDefault(ip)
- mapping := getOrCreate(ip)
- delete(mapping[proto], port)
+ mapping := getOrCreate(ip)[proto]
+ delete(mapping.p, port)
return nil
}
@@ -86,8 +118,8 @@ func getOrCreate(ip net.IP) protocolMap {
if _, ok := globalMap[ipstr]; !ok {
globalMap[ipstr] = protocolMap{
- "tcp": portMap{},
- "udp": portMap{},
+ "tcp": &portMap{p: map[int]struct{}{}, last: 0},
+ "udp": &portMap{p: map[int]struct{}{}, last: 0},
}
}
@@ -95,21 +127,28 @@ func getOrCreate(ip net.IP) protocolMap {
}
func findPort(ip net.IP, proto string) (int, error) {
- port := BeginPortRange
-
- mapping := getOrCreate(ip)
+ mapping := getOrCreate(ip)[proto]
- for mapping[proto][port] {
- port++
+ if mapping.last == 0 {
+ mapping.p[BeginPortRange] = struct{}{}
+ mapping.last = BeginPortRange
+ return BeginPortRange, nil
+ }
+ for port := mapping.last + 1; port != mapping.last; port++ {
if port > EndPortRange {
- return 0, ErrAllPortsAllocated
+ port = BeginPortRange
+ }
+
+ if _, ok := mapping.p[port]; !ok {
+ mapping.p[port] = struct{}{}
+ mapping.last = port
+ return port, nil
}
- }
- mapping[proto][port] = true
+ }
- return port, nil
+ return 0, ErrAllPortsAllocated
}
func getDefault(ip net.IP) net.IP {
diff --git a/daemon/networkdriver/portallocator/portallocator_test.go b/daemon/networkdriver/portallocator/portallocator_test.go
index 5a4765ddd4..9869c332e9 100644
--- a/daemon/networkdriver/portallocator/portallocator_test.go
+++ b/daemon/networkdriver/portallocator/portallocator_test.go
@@ -83,8 +83,11 @@ func TestReleaseUnreadledPort(t *testing.T) {
}
port, err = RequestPort(defaultIP, "tcp", 5000)
- if err != ErrPortAlreadyAllocated {
- t.Fatalf("Expected error %s got %s", ErrPortAlreadyAllocated, err)
+
+ switch err.(type) {
+ case ErrPortAlreadyAllocated:
+ default:
+ t.Fatalf("Expected port allocation error got %s", err)
}
}
diff --git a/daemon/networkdriver/portmapper/mapper.go b/daemon/networkdriver/portmapper/mapper.go
index e29959a245..1bd332271f 100644
--- a/daemon/networkdriver/portmapper/mapper.go
+++ b/daemon/networkdriver/portmapper/mapper.go
@@ -3,10 +3,12 @@ package portmapper
import (
"errors"
"fmt"
- "github.com/dotcloud/docker/pkg/iptables"
- "github.com/dotcloud/docker/pkg/proxy"
"net"
"sync"
+
+ "github.com/dotcloud/docker/daemon/networkdriver/portallocator"
+ "github.com/dotcloud/docker/pkg/iptables"
+ "github.com/dotcloud/docker/pkg/proxy"
)
type mapping struct {
@@ -35,43 +37,66 @@ func SetIptablesChain(c *iptables.Chain) {
chain = c
}
-func Map(container net.Addr, hostIP net.IP, hostPort int) error {
+func Map(container net.Addr, hostIP net.IP, hostPort int) (net.Addr, error) {
lock.Lock()
defer lock.Unlock()
- var m *mapping
+ var (
+ m *mapping
+ err error
+ proto string
+ allocatedHostPort int
+ )
+
+ // release the port on any error during return.
+ defer func() {
+ if err != nil {
+ portallocator.ReleasePort(hostIP, proto, allocatedHostPort)
+ }
+ }()
+
switch container.(type) {
case *net.TCPAddr:
+ proto = "tcp"
+ if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil {
+ return nil, err
+ }
m = &mapping{
- proto: "tcp",
- host: &net.TCPAddr{IP: hostIP, Port: hostPort},
+ proto: proto,
+ host: &net.TCPAddr{IP: hostIP, Port: allocatedHostPort},
container: container,
}
case *net.UDPAddr:
+ proto = "udp"
+ if allocatedHostPort, err = portallocator.RequestPort(hostIP, proto, hostPort); err != nil {
+ return nil, err
+ }
m = &mapping{
- proto: "udp",
- host: &net.UDPAddr{IP: hostIP, Port: hostPort},
+ proto: proto,
+ host: &net.UDPAddr{IP: hostIP, Port: allocatedHostPort},
container: container,
}
default:
- return ErrUnknownBackendAddressType
+ err = ErrUnknownBackendAddressType
+ return nil, err
}
key := getKey(m.host)
if _, exists := currentMappings[key]; exists {
- return ErrPortMappedForIP
+ err = ErrPortMappedForIP
+ return nil, err
}
containerIP, containerPort := getIPAndPort(m.container)
- if err := forward(iptables.Add, m.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
- return err
+ if err := forward(iptables.Add, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil {
+ return nil, err
}
p, err := newProxy(m.host, m.container)
if err != nil {
- // need to undo the iptables rules before we reutrn
- forward(iptables.Delete, m.proto, hostIP, hostPort, containerIP.String(), containerPort)
- return err
+ // need to undo the iptables rules before we return
+ forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort)
+ return nil, err
}
m.userlandProxy = p
@@ -79,7 +104,7 @@ func Map(container net.Addr, hostIP net.IP, hostPort int) error {
go p.Run()
- return nil
+ return m.host, nil
}
func Unmap(host net.Addr) error {
@@ -100,6 +125,18 @@ func Unmap(host net.Addr) error {
if err := forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
return err
}
+
+ switch a := host.(type) {
+ case *net.TCPAddr:
+ if err := portallocator.ReleasePort(a.IP, "tcp", a.Port); err != nil {
+ return err
+ }
+ case *net.UDPAddr:
+ if err := portallocator.ReleasePort(a.IP, "udp", a.Port); err != nil {
+ return err
+ }
+ }
+
return nil
}
diff --git a/daemon/networkdriver/portmapper/mapper_test.go b/daemon/networkdriver/portmapper/mapper_test.go
index 4c09f3c651..6affdc5445 100644
--- a/daemon/networkdriver/portmapper/mapper_test.go
+++ b/daemon/networkdriver/portmapper/mapper_test.go
@@ -1,6 +1,7 @@
package portmapper
import (
+ "github.com/dotcloud/docker/daemon/networkdriver/portallocator"
"github.com/dotcloud/docker/pkg/iptables"
"github.com/dotcloud/docker/pkg/proxy"
"net"
@@ -44,19 +45,26 @@ func TestMapPorts(t *testing.T) {
srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
srcAddr2 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.2")}
- if err := Map(srcAddr1, dstIp1, 80); err != nil {
+ addrEqual := func(addr1, addr2 net.Addr) bool {
+ return (addr1.Network() == addr2.Network()) && (addr1.String() == addr2.String())
+ }
+
+ if host, err := Map(srcAddr1, dstIp1, 80); err != nil {
t.Fatalf("Failed to allocate port: %s", err)
+ } else if !addrEqual(dstAddr1, host) {
+ t.Fatalf("Incorrect mapping result: expected %s:%s, got %s:%s",
+ dstAddr1.String(), dstAddr1.Network(), host.String(), host.Network())
}
- if Map(srcAddr1, dstIp1, 80) == nil {
+ if _, err := Map(srcAddr1, dstIp1, 80); err == nil {
t.Fatalf("Port is in use - mapping should have failed")
}
- if Map(srcAddr2, dstIp1, 80) == nil {
+ if _, err := Map(srcAddr2, dstIp1, 80); err == nil {
t.Fatalf("Port is in use - mapping should have failed")
}
- if err := Map(srcAddr2, dstIp2, 80); err != nil {
+ if _, err := Map(srcAddr2, dstIp2, 80); err != nil {
t.Fatalf("Failed to allocate port: %s", err)
}
@@ -105,3 +113,40 @@ func TestGetUDPIPAndPort(t *testing.T) {
t.Fatalf("expected port %d got %d", ep, port)
}
}
+
+func TestMapAllPortsSingleInterface(t *testing.T) {
+ dstIp1 := net.ParseIP("0.0.0.0")
+ srcAddr1 := &net.TCPAddr{Port: 1080, IP: net.ParseIP("172.16.0.1")}
+
+ hosts := []net.Addr{}
+ var host net.Addr
+ var err error
+
+ defer func() {
+ for _, val := range hosts {
+ Unmap(val)
+ }
+ }()
+
+ for i := 0; i < 10; i++ {
+ for i := portallocator.BeginPortRange; i < portallocator.EndPortRange; i++ {
+ if host, err = Map(srcAddr1, dstIp1, 0); err != nil {
+ t.Fatal(err)
+ }
+
+ hosts = append(hosts, host)
+ }
+
+ if _, err := Map(srcAddr1, dstIp1, portallocator.BeginPortRange); err == nil {
+ t.Fatal("Port %d should be bound but is not", portallocator.BeginPortRange)
+ }
+
+ for _, val := range hosts {
+ if err := Unmap(val); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ hosts = []net.Addr{}
+ }
+}
diff --git a/daemon/state.go b/daemon/state.go
index 7ee8fc48c3..3f904d7829 100644
--- a/daemon/state.go
+++ b/daemon/state.go
@@ -16,6 +16,13 @@ type State struct {
ExitCode int
StartedAt time.Time
FinishedAt time.Time
+ waitChan chan struct{}
+}
+
+func NewState() *State {
+ return &State{
+ waitChan: make(chan struct{}),
+ }
}
// String returns a human-readable description of the state
@@ -35,56 +42,118 @@ func (s *State) String() string {
return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
}
+func wait(waitChan <-chan struct{}, timeout time.Duration) error {
+ if timeout < 0 {
+ <-waitChan
+ return nil
+ }
+ select {
+ case <-time.After(timeout):
+ return fmt.Errorf("Timed out: %v", timeout)
+ case <-waitChan:
+ return nil
+ }
+}
+
+// WaitRunning waits until state is running. If state already running it returns
+// immediatly. If you want wait forever you must supply negative timeout.
+// Returns pid, that was passed to SetRunning
+func (s *State) WaitRunning(timeout time.Duration) (int, error) {
+ s.RLock()
+ if s.IsRunning() {
+ pid := s.Pid
+ s.RUnlock()
+ return pid, nil
+ }
+ waitChan := s.waitChan
+ s.RUnlock()
+ if err := wait(waitChan, timeout); err != nil {
+ return -1, err
+ }
+ return s.GetPid(), nil
+}
+
+// WaitStop waits until state is stopped. If state already stopped it returns
+// immediatly. If you want wait forever you must supply negative timeout.
+// Returns exit code, that was passed to SetStopped
+func (s *State) WaitStop(timeout time.Duration) (int, error) {
+ s.RLock()
+ if !s.Running {
+ exitCode := s.ExitCode
+ s.RUnlock()
+ return exitCode, nil
+ }
+ waitChan := s.waitChan
+ s.RUnlock()
+ if err := wait(waitChan, timeout); err != nil {
+ return -1, err
+ }
+ return s.GetExitCode(), nil
+}
+
func (s *State) IsRunning() bool {
s.RLock()
- defer s.RUnlock()
+ res := s.Running
+ s.RUnlock()
+ return res
+}
- return s.Running
+func (s *State) GetPid() int {
+ s.RLock()
+ res := s.Pid
+ s.RUnlock()
+ return res
}
func (s *State) GetExitCode() int {
s.RLock()
- defer s.RUnlock()
-
- return s.ExitCode
+ res := s.ExitCode
+ s.RUnlock()
+ return res
}
func (s *State) SetRunning(pid int) {
s.Lock()
- defer s.Unlock()
-
- s.Running = true
- s.Paused = false
- s.ExitCode = 0
- s.Pid = pid
- s.StartedAt = time.Now().UTC()
+ if !s.Running {
+ s.Running = true
+ s.Paused = false
+ s.ExitCode = 0
+ s.Pid = pid
+ s.StartedAt = time.Now().UTC()
+ close(s.waitChan) // fire waiters for start
+ s.waitChan = make(chan struct{})
+ }
+ s.Unlock()
}
func (s *State) SetStopped(exitCode int) {
s.Lock()
- defer s.Unlock()
-
- s.Running = false
- s.Pid = 0
- s.FinishedAt = time.Now().UTC()
- s.ExitCode = exitCode
+ if s.Running {
+ s.Running = false
+ s.Pid = 0
+ s.FinishedAt = time.Now().UTC()
+ s.ExitCode = exitCode
+ close(s.waitChan) // fire waiters for stop
+ s.waitChan = make(chan struct{})
+ }
+ s.Unlock()
}
func (s *State) SetPaused() {
s.Lock()
- defer s.Unlock()
s.Paused = true
+ s.Unlock()
}
func (s *State) SetUnpaused() {
s.Lock()
- defer s.Unlock()
s.Paused = false
+ s.Unlock()
}
func (s *State) IsPaused() bool {
s.RLock()
- defer s.RUnlock()
-
- return s.Paused
+ res := s.Paused
+ s.RUnlock()
+ return res
}
diff --git a/daemon/state_test.go b/daemon/state_test.go
new file mode 100644
index 0000000000..7b02f3aeac
--- /dev/null
+++ b/daemon/state_test.go
@@ -0,0 +1,102 @@
+package daemon
+
+import (
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+func TestStateRunStop(t *testing.T) {
+ s := NewState()
+ for i := 1; i < 3; i++ { // full lifecycle two times
+ started := make(chan struct{})
+ var pid int64
+ go func() {
+ runPid, _ := s.WaitRunning(-1 * time.Second)
+ atomic.StoreInt64(&pid, int64(runPid))
+ close(started)
+ }()
+ s.SetRunning(i + 100)
+ if !s.IsRunning() {
+ t.Fatal("State not running")
+ }
+ if s.Pid != i+100 {
+ t.Fatalf("Pid %v, expected %v", s.Pid, i+100)
+ }
+ if s.ExitCode != 0 {
+ t.Fatalf("ExitCode %v, expected 0", s.ExitCode)
+ }
+ select {
+ case <-time.After(100 * time.Millisecond):
+ t.Fatal("Start callback doesn't fire in 100 milliseconds")
+ case <-started:
+ t.Log("Start callback fired")
+ }
+ runPid := int(atomic.LoadInt64(&pid))
+ if runPid != i+100 {
+ t.Fatalf("Pid %v, expected %v", runPid, i+100)
+ }
+ if pid, err := s.WaitRunning(-1 * time.Second); err != nil || pid != i+100 {
+ t.Fatal("WaitRunning returned pid: %v, err: %v, expected pid: %v, err: %v", pid, err, i+100, nil)
+ }
+
+ stopped := make(chan struct{})
+ var exit int64
+ go func() {
+ exitCode, _ := s.WaitStop(-1 * time.Second)
+ atomic.StoreInt64(&exit, int64(exitCode))
+ close(stopped)
+ }()
+ s.SetStopped(i)
+ if s.IsRunning() {
+ t.Fatal("State is running")
+ }
+ if s.ExitCode != i {
+ t.Fatalf("ExitCode %v, expected %v", s.ExitCode, i)
+ }
+ if s.Pid != 0 {
+ t.Fatalf("Pid %v, expected 0", s.Pid)
+ }
+ select {
+ case <-time.After(100 * time.Millisecond):
+ t.Fatal("Stop callback doesn't fire in 100 milliseconds")
+ case <-stopped:
+ t.Log("Stop callback fired")
+ }
+ exitCode := int(atomic.LoadInt64(&exit))
+ if exitCode != i {
+ t.Fatalf("ExitCode %v, expected %v", exitCode, i)
+ }
+ if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i {
+ t.Fatal("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil)
+ }
+ }
+}
+
+func TestStateTimeoutWait(t *testing.T) {
+ s := NewState()
+ started := make(chan struct{})
+ go func() {
+ s.WaitRunning(100 * time.Millisecond)
+ close(started)
+ }()
+ select {
+ case <-time.After(200 * time.Millisecond):
+ t.Fatal("Start callback doesn't fire in 100 milliseconds")
+ case <-started:
+ t.Log("Start callback fired")
+ }
+ s.SetRunning(42)
+ stopped := make(chan struct{})
+ go func() {
+ s.WaitRunning(100 * time.Millisecond)
+ close(stopped)
+ }()
+ select {
+ case <-time.After(200 * time.Millisecond):
+ t.Fatal("Start callback doesn't fire in 100 milliseconds")
+ case <-stopped:
+ t.Log("Start callback fired")
+ }
+
+}
diff --git a/daemonconfig/config.go b/daemonconfig/config.go
index 9f77d84a58..1d2bb60dd6 100644
--- a/daemonconfig/config.go
+++ b/daemonconfig/config.go
@@ -31,6 +31,7 @@ type Config struct {
DisableNetwork bool
EnableSelinuxSupport bool
Context map[string][]string
+ Sockets []string
}
// ConfigFromJob creates and returns a new DaemonConfig object
@@ -66,6 +67,9 @@ func ConfigFromJob(job *engine.Job) *Config {
config.Mtu = GetDefaultNetworkMtu()
}
config.DisableNetwork = config.BridgeIface == DisableNetworkBridge
+ if sockets := job.GetenvList("Sockets"); sockets != nil {
+ config.Sockets = sockets
+ }
return config
}
diff --git a/docker/docker.go b/docker/docker.go
index 56bcb04e41..30d43bc6a8 100644
--- a/docker/docker.go
+++ b/docker/docker.go
@@ -6,6 +6,7 @@ import (
"fmt"
"io/ioutil"
"log"
+ "net"
"os"
"runtime"
"strings"
@@ -47,7 +48,7 @@ func main() {
bridgeName = flag.String([]string{"b", "-bridge"}, "", "Attach containers to a pre-existing network bridge\nuse 'none' to disable container networking")
bridgeIp = flag.String([]string{"#bip", "-bip"}, "", "Use this CIDR notation address for the network bridge's IP, not compatible with -b")
pidfile = flag.String([]string{"p", "-pidfile"}, "/var/run/docker.pid", "Path to use for daemon PID file")
- flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the docker runtime")
+ flRoot = flag.String([]string{"g", "-graph"}, "/var/lib/docker", "Path to use as the root of the Docker runtime")
flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode\nuse '' (the empty string) to disable setting of a group")
flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API")
flDns = opts.NewListOpts(opts.ValidateIp4Address)
@@ -56,8 +57,8 @@ func main() {
flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports")
flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication")
- flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the docker runtime to use a specific storage driver")
- flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the docker runtime to use a specific exec driver")
+ flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the Docker runtime to use a specific storage driver")
+ flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the Docker runtime to use a specific exec driver")
flHosts = opts.NewListOpts(api.ValidateHost)
flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU\nif no value is provided: default to the default route MTU or 1500 if no default route is available")
flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags")
@@ -67,7 +68,7 @@ func main() {
flKey = flag.String([]string{"-tlskey"}, dockerConfDir+defaultKeyFile, "Path to TLS key file")
flSelinuxEnabled = flag.Bool([]string{"-selinux-enabled"}, false, "Enable selinux support")
)
- flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers")
+ flag.Var(&flDns, []string{"#dns", "-dns"}, "Force Docker to use specific DNS servers")
flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
flag.Var(&flHosts, []string{"H", "-host"}, "The socket(s) to bind to in daemon mode\nspecified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.")
flag.Var(&flGraphOpts, []string{"-storage-opt"}, "Set storage driver options")
@@ -95,6 +96,14 @@ func main() {
log.Fatal("You specified -b & --bip, mutually exclusive options. Please specify only one.")
}
+ if !*flEnableIptables && !*flInterContainerComm {
+ log.Fatal("You specified --iptables=false with --icc=false. ICC uses iptables to function. Please set --icc or --iptables to true.")
+ }
+
+ if net.ParseIP(*flDefaultIp) == nil {
+ log.Fatalf("Specified --ip=%s is not in correct format \"0.0.0.0\".", *flDefaultIp)
+ }
+
if *flDebug {
os.Setenv("DEBUG", "1")
}
@@ -162,6 +171,7 @@ func main() {
job.Setenv("ExecDriver", *flExecDriver)
job.SetenvInt("Mtu", *flMtu)
job.SetenvBool("EnableSelinuxSupport", *flSelinuxEnabled)
+ job.SetenvList("Sockets", flHosts.GetAll())
if err := job.Run(); err != nil {
log.Fatal(err)
}
@@ -259,7 +269,7 @@ func showVersion() {
func checkKernelAndArch() error {
// Check for unsupported architectures
if runtime.GOARCH != "amd64" {
- return fmt.Errorf("The docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
+ return fmt.Errorf("The Docker runtime currently only supports amd64 (not %s). This will change in the future. Aborting.", runtime.GOARCH)
}
// Check for unsupported kernel versions
// FIXME: it would be cleaner to not test for specific versions, but rather
diff --git a/docs/.gitignore b/docs/.gitignore
new file mode 100644
index 0000000000..8da058a806
--- /dev/null
+++ b/docs/.gitignore
@@ -0,0 +1,5 @@
+# generated by man/man/md2man-all.sh
+man1/
+man5/
+# avoid commiting the awsconfig file used for releases
+awsconfig
diff --git a/docs/Dockerfile b/docs/Dockerfile
index 68dbbec594..329646ed01 100644
--- a/docs/Dockerfile
+++ b/docs/Dockerfile
@@ -28,8 +28,12 @@ WORKDIR /docs
RUN VERSION=$(cat /docs/VERSION) &&\
GIT_BRANCH=$(cat /docs/GIT_BRANCH) &&\
+ GITCOMMIT=$(cat /docs/GITCOMMIT) &&\
AWS_S3_BUCKET=$(cat /docs/AWS_S3_BUCKET) &&\
- echo "{% set docker_version = \"${VERSION}\" %}{% set docker_branch = \"${GIT_BRANCH}\" %}{% set aws_bucket = \"${AWS_S3_BUCKET}\" %}{% include \"beta_warning.html\" %}" > /docs/theme/mkdocs/version.html
+ sed -i "s/\$VERSION/$VERSION/g" /docs/theme/mkdocs/base.html &&\
+ sed -i "s/\$GITCOMMIT/$GITCOMMIT/g" /docs/theme/mkdocs/base.html &&\
+ sed -i "s/\$GIT_BRANCH/$GIT_BRANCH/g" /docs/theme/mkdocs/base.html &&\
+ sed -i "s/\$AWS_S3_BUCKET/$AWS_S3_BUCKET/g" /docs/theme/mkdocs/base.html
# note, EXPOSE is only last because of https://github.com/dotcloud/docker/issues/3525
EXPOSE 8000
diff --git a/docs/MAINTAINERS b/docs/MAINTAINERS
index afbbde4099..55489fd5c2 100644
--- a/docs/MAINTAINERS
+++ b/docs/MAINTAINERS
@@ -1,3 +1,4 @@
James Turnbull <james@lovedthanlost.net> (@jamtur01)
Sven Dowideit <SvenDowideit@fosiki.com> (@SvenDowideit)
O.S. Tezer <ostezer@gmail.com> (@OSTezer)
+Fred Lifton <fred.lifton@docker.com> (@fredlf)
diff --git a/docs/README.md b/docs/README.md
index d74ec4ee87..17299401e7 100755
--- a/docs/README.md
+++ b/docs/README.md
@@ -3,7 +3,7 @@
The source for Docker documentation is here under `sources/` and uses extended
Markdown, as implemented by [MkDocs](http://mkdocs.org).
-The HTML files are built and hosted on `https://docs.docker.io`, and update
+The HTML files are built and hosted on `https://docs.docker.com`, and update
automatically after each change to the master or release branch of [Docker on
GitHub](https://github.com/dotcloud/docker) thanks to post-commit hooks. The
`docs` branch maps to the "latest" documentation and the `master` (unreleased
@@ -21,14 +21,14 @@ In the rare case where your change is not forward-compatible, you may need to
base your changes on the `docs` branch.
Also, now that we have a `docs` branch, we can keep the
-[http://docs.docker.io](http://docs.docker.io) docs up to date with any bugs
+[http://docs.docker.com](http://docs.docker.com) docs up to date with any bugs
found between Docker code releases.
**Warning**: When *reading* the docs, the
-[http://beta-docs.docker.io](http://beta-docs.docker.io) documentation may
+[http://docs-stage.docker.com](http://docs-stage.docker.com) documentation may
include features not yet part of any official Docker release. The `beta-docs`
site should be used only for understanding bleeding-edge development and
-`docs.docker.io` (which points to the `docs` branch`) should be used for the
+`docs.docker.com` (which points to the `docs` branch`) should be used for the
latest official release.
## Contributing
@@ -70,7 +70,7 @@ in their shell:
### Images
-When you need to add images, try to make them as small as possible (e.g. as
+When you need to add images, try to make them as small as possible (e.g., as
gifs). Usually images should go in the same directory as the `.md` file which
references them, or in a subdirectory if one already exists.
diff --git a/docs/docs-update.py b/docs/docs-update.py
new file mode 100755
index 0000000000..31bb47db3b
--- /dev/null
+++ b/docs/docs-update.py
@@ -0,0 +1,214 @@
+#!/usr/bin/env python
+
+#
+# Sven's quick hack script to update the documentation
+#
+# call with:
+# ./docs/update.py /usr/bin/docker
+#
+
+import re
+from sys import argv
+import subprocess
+import os
+import os.path
+
+script, docker_cmd = argv
+
+def print_usage(outtext, docker_cmd, command):
+ help = ""
+ try:
+ #print "RUN ", "".join((docker_cmd, " ", command, " --help"))
+ help = subprocess.check_output("".join((docker_cmd, " ", command, " --help")), stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError, e:
+ help = e.output
+ for l in str(help).strip().split("\n"):
+ l = l.rstrip()
+ if l == '':
+ outtext.write("\n")
+ else:
+ # `docker --help` tells the user the path they called it with
+ l = re.sub(docker_cmd, "docker", l)
+ outtext.write(" "+l+"\n")
+ outtext.write("\n")
+
+# TODO: look for an complain about any missing commands
+def update_cli_reference():
+ originalFile = "docs/sources/reference/commandline/cli.md"
+ os.rename(originalFile, originalFile+".bak")
+
+ intext = open(originalFile+".bak", "r")
+ outtext = open(originalFile, "w")
+
+ mode = 'p'
+ space = " "
+ command = ""
+ # 2 mode line-by line parser
+ for line in intext:
+ if mode=='p':
+ # Prose
+ match = re.match("( \s*)Usage: docker ([a-z]+)", line)
+ if match:
+ # the begining of a Docker command usage block
+ space = match.group(1)
+ command = match.group(2)
+ mode = 'c'
+ else:
+ match = re.match("( \s*)Usage of .*docker.*:", line)
+ if match:
+ # the begining of the Docker --help usage block
+ space = match.group(1)
+ command = ""
+ mode = 'c'
+ else:
+ outtext.write(line)
+ else:
+ # command usage block
+ match = re.match("("+space+")(.*)|^$", line)
+ #print "CMD ", command
+ if not match:
+ # The end of the current usage block - Shell out to run docker to see the new output
+ print_usage(outtext, docker_cmd, command)
+ outtext.write(line)
+ mode = 'p'
+ if mode == 'c':
+ print_usage(outtext, docker_cmd, command)
+
+def update_man_pages():
+ cmds = []
+ try:
+ help = subprocess.check_output("".join((docker_cmd)), stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError, e:
+ help = e.output
+ for l in str(help).strip().split("\n"):
+ l = l.rstrip()
+ if l != "":
+ match = re.match(" (.*?) .*", l)
+ if match:
+ cmds.append(match.group(1))
+
+ desc_re = re.compile(r".*# DESCRIPTION(.*?)# (OPTIONS|EXAMPLES?).*", re.MULTILINE|re.DOTALL)
+ example_re = re.compile(r".*# EXAMPLES?(.*)# HISTORY.*", re.MULTILINE|re.DOTALL)
+ history_re = re.compile(r".*# HISTORY(.*)", re.MULTILINE|re.DOTALL)
+
+ for command in cmds:
+ print "COMMAND: "+command
+ history = ""
+ description = ""
+ examples = ""
+ if os.path.isfile("docs/man/docker-"+command+".1.md"):
+ intext = open("docs/man/docker-"+command+".1.md", "r")
+ txt = intext.read()
+ intext.close()
+ match = desc_re.match(txt)
+ if match:
+ description = match.group(1)
+ match = example_re.match(txt)
+ if match:
+ examples = match.group(1)
+ match = history_re.match(txt)
+ if match:
+ history = match.group(1).strip()
+
+ usage = ""
+ usage_description = ""
+ params = {}
+ key_params = {}
+
+ help = ""
+ try:
+ help = subprocess.check_output("".join((docker_cmd, " ", command, " --help")), stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError, e:
+ help = e.output
+ last_key = ""
+ for l in str(help).split("\n"):
+ l = l.rstrip()
+ if l != "":
+ match = re.match("Usage: docker "+command+"(.*)", l)
+ if match:
+ usage = match.group(1).strip()
+ else:
+ #print ">>>>"+l
+ match = re.match(" (-+)(.*) \s+(.*)", l)
+ if match:
+ last_key = match.group(2).rstrip()
+ #print " found "+match.group(1)
+ key_params[last_key] = match.group(1)+last_key
+ params[last_key] = match.group(3)
+ else:
+ if last_key != "":
+ params[last_key] = params[last_key] + "\n" + l
+ else:
+ if usage_description != "":
+ usage_description = usage_description + "\n"
+ usage_description = usage_description + l
+
+ # replace [OPTIONS] with the list of params
+ options = ""
+ match = re.match("\[OPTIONS\](.*)", usage)
+ if match:
+ usage = match.group(1)
+
+ new_usage = ""
+ # TODO: sort without the `-`'s
+ for key in sorted(params.keys(), key=lambda s: s.lower()):
+ # split on commas, remove --?.*=.*, put in *'s mumble
+ ps = []
+ opts = []
+ for k in key_params[key].split(","):
+ #print "......"+k
+ match = re.match("(-+)([A-Za-z-0-9]*)(?:=(.*))?", k.lstrip())
+ if match:
+ p = "**"+match.group(1)+match.group(2)+"**"
+ o = "**"+match.group(1)+match.group(2)+"**"
+ if match.group(3):
+ # if ="" then use UPPERCASE(group(2))"
+ val = match.group(3)
+ if val == "\"\"":
+ val = match.group(2).upper()
+ p = p+"[=*"+val+"*]"
+ val = match.group(3)
+ if val in ("true", "false"):
+ params[key] = params[key].rstrip()
+ if not params[key].endswith('.'):
+ params[key] = params[key]+ "."
+ params[key] = params[key] + " The default is *"+val+"*."
+ val = "*true*|*false*"
+ o = o+"="+val
+ ps.append(p)
+ opts.append(o)
+ else:
+ print "nomatch:"+k
+ new_usage = new_usage+ "\n["+"|".join(ps)+"]"
+ options = options + ", ".join(opts) + "\n "+ params[key]+"\n\n"
+ if new_usage != "":
+ new_usage = new_usage.strip() + "\n"
+ usage = new_usage + usage
+
+
+ outtext = open("docs/man/docker-"+command+".1.md", "w")
+ outtext.write("""% DOCKER(1) Docker User Manuals
+% Docker Community
+% JUNE 2014
+# NAME
+""")
+ outtext.write("docker-"+command+" - "+usage_description+"\n\n")
+ outtext.write("# SYNOPSIS\n**docker "+command+"**\n"+usage+"\n\n")
+ if description != "":
+ outtext.write("# DESCRIPTION"+description)
+ if options == "":
+ options = "There are no available options.\n\n"
+ outtext.write("# OPTIONS\n"+options)
+ if examples != "":
+ outtext.write("# EXAMPLES"+examples)
+ outtext.write("# HISTORY\n")
+ if history != "":
+ outtext.write(history+"\n")
+ recent_history_re = re.compile(".*June 2014.*", re.MULTILINE|re.DOTALL)
+ if not recent_history_re.match(history):
+ outtext.write("June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>\n")
+ outtext.close()
+
+# main
+update_cli_reference()
+update_man_pages()
diff --git a/contrib/man/md/Dockerfile b/docs/man/Dockerfile
index 438227df89..438227df89 100644
--- a/contrib/man/md/Dockerfile
+++ b/docs/man/Dockerfile
diff --git a/contrib/man/md/Dockerfile.5.md b/docs/man/Dockerfile.5.md
index d669122107..b0a863f657 100644
--- a/contrib/man/md/Dockerfile.5.md
+++ b/docs/man/Dockerfile.5.md
@@ -93,7 +93,7 @@ or
they omit the executable, an ENTRYPOINT must be specified.
When used in the shell or exec formats, the CMD instruction sets the command to
be executed when running the image.
- If you use the shell form of of the CMD, the <command> executes in /bin/sh -c:
+ If you use the shell form of the CMD, the <command> executes in /bin/sh -c:
**FROM ubuntu**
**CMD echo "This is a test." | wc -**
If you run <command> wihtout a shell, then you must express the command as a
@@ -203,4 +203,4 @@ or
run later, during the next build stage.
# HISTORY
-*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.io Dockerfile documentation.
+*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.com Dockerfile documentation.
diff --git a/contrib/man/md/README.md b/docs/man/README.md
index d49b39b7a2..45f1a91c00 100644
--- a/contrib/man/md/README.md
+++ b/docs/man/README.md
@@ -51,7 +51,7 @@ saving you from dealing with Pandoc and dependencies on your own computer.
## Building the Fedora / Pandoc image
-There is a Dockerfile provided in the `docker/contrib/man/md` directory.
+There is a Dockerfile provided in the `docker/docs/man` directory.
Using this Dockerfile, create a Docker image tagged `fedora/pandoc`:
@@ -61,11 +61,11 @@ Using this Dockerfile, create a Docker image tagged `fedora/pandoc`:
Once the image is built, run a container using the image with *volumes*:
- docker run -v /<path-to-git-dir>/docker/contrib/man:/pandoc:rw \
- -w /pandoc -i fedora/pandoc /pandoc/md/md2man-all.sh
+ docker run -v /<path-to-git-dir>/docker/docs/man:/pandoc:rw \
+ -w /pandoc -i fedora/pandoc /pandoc/md2man-all.sh
The Pandoc Docker container will process the Markdown files and generate
-the man pages inside the `docker/contrib/man/man1` directory using
+the man pages inside the `docker/docs/man/man1` directory using
Docker volumes. For more information on Docker volumes see the man page for
`docker run` and also look at the article [Sharing Directories via Volumes]
-(http://docs.docker.io/use/working_with_volumes/).
+(http://docs.docker.com/use/working_with_volumes/).
diff --git a/contrib/man/md/docker-attach.1.md b/docs/man/docker-attach.1.md
index 5a3b7a2856..1b4e68b65f 100644
--- a/contrib/man/md/docker-attach.1.md
+++ b/docs/man/docker-attach.1.md
@@ -1,11 +1,14 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
docker-attach - Attach to a running container
# SYNOPSIS
-**docker attach** **--no-stdin**[=*false*] **--sig-proxy**[=*true*] CONTAINER
+**docker attach**
+[**--no-stdin**[=*false*]]
+[**--sig-proxy**[=*true*]]
+ CONTAINER
# DESCRIPTION
If you **docker run** a container in detached mode (**-d**), you can reattach to
@@ -19,11 +22,10 @@ the client.
# OPTIONS
**--no-stdin**=*true*|*false*
-When set to true, do not attach to stdin. The default is *false*.
+ Do not attach STDIN. The default is *false*.
-**--sig-proxy**=*true*|*false*:
-When set to true, proxify all received signal to the process (even in non-tty
-mode). The default is *true*.
+**--sig-proxy**=*true*|*false*
+ Proxify all received signals to the process (even in non-TTY mode). SIGCHLD is not proxied. The default is *true*.
# EXAMPLES
@@ -55,4 +57,5 @@ attach** command:
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-build.1.md b/docs/man/docker-build.1.md
index 3c031445aa..c562660b6f 100644
--- a/contrib/man/md/docker-build.1.md
+++ b/docs/man/docker-build.1.md
@@ -1,12 +1,17 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
-docker-build - Build an image from a Dockerfile source at PATH
+docker-build - Build a new image from the source code at PATH
# SYNOPSIS
-**docker build** [**--no-cache**[=*false*]] [**-q**|**--quiet**[=*false*]]
- [**--rm**] [**-t**|**--tag**=TAG] PATH | URL | -
+**docker build**
+[**--force-rm**[=*false*]]
+[**--no-cache**[=*false*]]
+[**-q**|**--quiet**[=*false*]]
+[**--rm**[=*true*]]
+[**-t**|**--tag**[=*TAG*]]
+ PATH | URL | -
# DESCRIPTION
This will read the Dockerfile from the directory specified in **PATH**.
@@ -25,22 +30,20 @@ When a Git repository is set as the **URL**, the repository is used
as context.
# OPTIONS
+**--force-rm**=*true*|*false*
+ Always remove intermediate containers, even after unsuccessful builds. The default is *false*.
+
+**--no-cache**=*true*|*false*
+ Do not use cache when building the image. The default is *false*.
**-q**, **--quiet**=*true*|*false*
- When set to true, suppress verbose build output. Default is *false*.
+ Suppress the verbose output generated by the containers. The default is *false*.
**--rm**=*true*|*false*
- When true, remove intermediate containers that are created during the
-build process. The default is true.
+ Remove intermediate containers after a successful build. The default is *true*.
-**-t**, **--tag**=*tag*
- The name to be applied to the resulting image on successful completion of
-the build. `tag` in this context means the entire image name including the
-optional TAG after the ':'.
-
-**--no-cache**=*true*|*false*
- When set to true, do not use a cache when building the image. The
-default is *false*.
+**-t**, **--tag**=""
+ Repository name (and optionally a tag) to be applied to the resulting image in case of success
# EXAMPLES
@@ -114,4 +117,5 @@ Note: You can set an arbitrary Git repository via the `git://` schema.
# HISTORY
March 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-commit.1.md b/docs/man/docker-commit.1.md
index 03bf17872d..bbd1db21b0 100644
--- a/contrib/man/md/docker-commit.1.md
+++ b/docs/man/docker-commit.1.md
@@ -1,24 +1,28 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
-docker-commit - Create a new image from the changes to an existing
-container
+docker-commit - Create a new image from a container's changes
# SYNOPSIS
-**docker commit** **-a**|**--author**[=""] **-m**|**--message**[=""]
-CONTAINER [REPOSITORY[:TAG]]
+**docker commit**
+[**-a**|**--author**[=*AUTHOR*]]
+[**-m**|**--message**[=*MESSAGE*]]
+ CONTAINER [REPOSITORY[:TAG]]
# DESCRIPTION
Using an existing container's name or ID you can create a new image.
# OPTIONS
-**-a, --author**=""
- Author name. (eg. "John Hannibal Smith <hannibal@a-team.com>"
+**-a**, **--author**=""
+ Author (e.g., "John Hannibal Smith <hannibal@a-team.com>")
-**-m, --message**=""
+**-m**, **--message**=""
Commit message
+**-p, --pause**=true
+ Pause container during commit
+
# EXAMPLES
## Creating a new image from an existing container
@@ -31,4 +35,5 @@ create a new image run docker ps to find the container's ID and then run:
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and in
+based on docker.com source material and in
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-cp.1.md b/docs/man/docker-cp.1.md
index f787198669..dc8f295bbe 100644
--- a/contrib/man/md/docker-cp.1.md
+++ b/docs/man/docker-cp.1.md
@@ -1,18 +1,22 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
docker-cp - Copy files/folders from the PATH to the HOSTPATH
# SYNOPSIS
-**docker cp** CONTAINER:PATH HOSTPATH
+**docker cp**
+CONTAINER:PATH HOSTPATH
# DESCRIPTION
-Copy files/folders from the containers filesystem to the host
+Copy files/folders from a container's filesystem to the host
path. Paths are relative to the root of the filesystem. Files
can be copied from a running or stopped container.
-# EXAMPLE
+# OPTIONS
+There are no available options.
+
+# EXAMPLES
An important shell script file, created in a bash shell, is copied from
the exited container to the current dir on the host:
@@ -20,5 +24,5 @@ the exited container to the current dir on the host:
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
-
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-diff.1.md b/docs/man/docker-diff.1.md
index 2053f2c3d2..acf0911b04 100644
--- a/contrib/man/md/docker-diff.1.md
+++ b/docs/man/docker-diff.1.md
@@ -1,18 +1,22 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
docker-diff - Inspect changes on a container's filesystem
# SYNOPSIS
-**docker diff** CONTAINER
+**docker diff**
+CONTAINER
# DESCRIPTION
Inspect changes on a container's filesystem. You can use the full or
shortened container ID or the container name set using
**docker run --name** option.
-# EXAMPLE
+# OPTIONS
+There are no available options.
+
+# EXAMPLES
Inspect the changes to on a nginx container:
# docker diff 1fdfd1f54c1b
@@ -39,6 +43,5 @@ Inspect the changes to on a nginx container:
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
-
-
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-events.1.md b/docs/man/docker-events.1.md
index 2ebe9247d4..8fa85871a8 100644
--- a/contrib/man/md/docker-events.1.md
+++ b/docs/man/docker-events.1.md
@@ -1,10 +1,14 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
docker-events - Get real time events from the server
-**docker events** **--since**=""|*epoch-time*
+# SYNOPSIS
+**docker events**
+[**--since**[=*SINCE*]]
+[**--until**[=*UNTIL*]]
+
# DESCRIPTION
Get event information from the Docker daemon. Information can include historical
@@ -12,8 +16,10 @@ information and real-time information.
# OPTIONS
**--since**=""
-Show previously created events and then stream. This can be in either
-seconds since epoch, or date string.
+ Show all events created since timestamp
+
+**--until**=""
+ Stream events until this timestamp
# EXAMPLES
@@ -43,4 +49,5 @@ Again the output container IDs have been shortened for the purposes of this docu
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-export.1.md b/docs/man/docker-export.1.md
index ab11aa1266..8fd7834a15 100644
--- a/contrib/man/md/docker-export.1.md
+++ b/docs/man/docker-export.1.md
@@ -1,19 +1,22 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
-docker-export - Export the contents of a filesystem as a tar archive to
-STDOUT.
+docker-export - Export the contents of a filesystem as a tar archive to STDOUT
# SYNOPSIS
-**docker export** CONTAINER
+**docker export**
+CONTAINER
# DESCRIPTION
Export the contents of a container's filesystem using the full or shortened
container ID or container name. The output is exported to STDOUT and can be
redirected to a tar file.
-# EXAMPLE
+# OPTIONS
+There are no available options.
+
+# EXAMPLES
Export the contents of the container called angry_bell to a tar file
called test.tar:
@@ -23,4 +26,5 @@ called test.tar:
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-history.1.md b/docs/man/docker-history.1.md
index 1b3a9858b5..ddb164e50b 100644
--- a/contrib/man/md/docker-history.1.md
+++ b/docs/man/docker-history.1.md
@@ -1,11 +1,13 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
docker-history - Show the history of an image
# SYNOPSIS
-**docker history** **--no-trunc**[=*false*] [**-q**|**--quiet**[=*false*]]
+**docker history**
+[**--no-trunc**[=*false*]]
+[**-q**|**--quiet**[=*false*]]
IMAGE
# DESCRIPTION
@@ -13,14 +15,13 @@ docker-history - Show the history of an image
Show the history of when and how an image was created.
# OPTIONS
-
**--no-trunc**=*true*|*false*
- When true don't truncate output. Default is false
+ Don't truncate output. The default is *false*.
-**-q**, **--quiet=*true*|*false*
- When true only show numeric IDs. Default is false.
+**-q**, **--quiet**=*true*|*false*
+ Only show numeric IDs. The default is *false*.
-# EXAMPLE
+# EXAMPLES
$ sudo docker history fedora
IMAGE CREATED CREATED BY SIZE
105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB
@@ -29,4 +30,5 @@ Show the history of when and how an image was created.
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-images.1.md b/docs/man/docker-images.1.md
index a466798096..c572ee674b 100644
--- a/contrib/man/md/docker-images.1.md
+++ b/docs/man/docker-images.1.md
@@ -1,23 +1,22 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
-docker-images - List the images in the local repository
+docker-images - List images
# SYNOPSIS
**docker images**
-[**-a**|**--all**=*false*]
-[**--no-trunc**[=*false*]
-[**-q**|**--quiet**[=*false*]
-[**-t**|**--tree**=*false*]
-[**-v**|**--viz**=*false*]
-[NAME]
+[**-a**|**--all**[=*false*]]
+[**-f**|**--filter**[=*[]*]]
+[**--no-trunc**[=*false*]]
+[**-q**|**--quiet**[=*false*]]
+ [NAME]
# DESCRIPTION
This command lists the images stored in the local Docker repository.
By default, intermediate images, used during builds, are not listed. Some of the
-output, e.g. image ID, is truncated, for space reasons. However the truncated
+output, e.g., image ID, is truncated, for space reasons. However the truncated
image ID, and often the first few characters, are enough to be used in other
Docker commands that use the image ID. The output includes repository, tag, image
ID, date created and the virtual size.
@@ -30,26 +29,17 @@ called fedora. It may be tagged with 18, 19, or 20, etc. to manage different
versions.
# OPTIONS
-
**-a**, **--all**=*true*|*false*
- When set to true, also include all intermediate images in the list. The
-default is false.
+ Show all images (by default filter out the intermediate image layers). The default is *false*.
+
+**-f**, **--filter**=[]
+ Provide filter values (i.e. 'dangling=true')
**--no-trunc**=*true*|*false*
- When set to true, list the full image ID and not the truncated ID. The
-default is false.
+ Don't truncate output. The default is *false*.
**-q**, **--quiet**=*true*|*false*
- When set to true, list the complete image ID as part of the output. The
-default is false.
-
-**-t**, **--tree**=*true*|*false*
- When set to true, list the images in a tree dependency tree (hierarchy)
-format. The default is false.
-
-**-v**, **--viz**=*true*|*false*
- When set to true, list the graph in graphviz format. The default is
-*false*.
+ Only show numeric IDs. The default is *false*.
# EXAMPLES
@@ -96,4 +86,5 @@ tools.
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-import.1.md b/docs/man/docker-import.1.md
index a0db89eef4..2d67b8bc78 100644
--- a/contrib/man/md/docker-import.1.md
+++ b/docs/man/docker-import.1.md
@@ -1,16 +1,19 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
-docker-import - Create an empty filesystem image and import the contents
-of the tarball into it.
+docker-import - Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.
# SYNOPSIS
-**docker import** URL|- [REPOSITORY[:TAG]]
+**docker import**
+URL|- [REPOSITORY[:TAG]]
# DESCRIPTION
-Create a new filesystem image from the contents of a tarball (.tar,
-.tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.
+Create a new filesystem image from the contents of a tarball (`.tar`,
+`.tar.gz`, `.tgz`, `.bzip`, `.tar.xz`, `.txz`) into it, then optionally tag it.
+
+# OPTIONS
+There are no available options.
# EXAMPLES
@@ -36,4 +39,5 @@ Import to docker via pipe and stdin:
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-info.1.md b/docs/man/docker-info.1.md
index 8c03945dbe..2945d61dfe 100644
--- a/contrib/man/md/docker-info.1.md
+++ b/docs/man/docker-info.1.md
@@ -1,12 +1,13 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
-docker-info - Display system wide information
+docker-info - Display system-wide information
# SYNOPSIS
**docker info**
+
# DESCRIPTION
This command displays system wide information regarding the Docker installation.
Information displayed includes the number of containers and images, pool name,
@@ -43,4 +44,5 @@ Here is a sample output:
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-inspect.1.md b/docs/man/docker-inspect.1.md
index a49e42138f..a52d57c974 100644
--- a/contrib/man/md/docker-inspect.1.md
+++ b/docs/man/docker-inspect.1.md
@@ -1,12 +1,13 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
-docker-inspect - Return low-level information on a container/image
+docker-inspect - Return low-level information on a container or image
# SYNOPSIS
-**docker inspect** [**-f**|**--format**="" CONTAINER|IMAGE
-[CONTAINER|IMAGE...]
+**docker inspect**
+[**-f**|**--format**[=*FORMAT*]]
+CONTAINER|IMAGE [CONTAINER|IMAGE...]
# DESCRIPTION
@@ -17,8 +18,7 @@ each result.
# OPTIONS
**-f**, **--format**=""
- The text/template package of Go describes all the details of the
-format. See examples section
+ Format the output using the given go template.
# EXAMPLES
@@ -142,7 +142,7 @@ output:
## Getting information on an image
-Use an image's ID or name (e.g. repository/name[:tag]) to get information
+Use an image's ID or name (e.g., repository/name[:tag]) to get information
on it.
# docker inspect 58394af37342
@@ -224,6 +224,6 @@ Use an image's ID or name (e.g. repository/name[:tag]) to get information
}]
# HISTORY
-
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-kill.1.md b/docs/man/docker-kill.1.md
new file mode 100644
index 0000000000..3c8d59e6d5
--- /dev/null
+++ b/docs/man/docker-kill.1.md
@@ -0,0 +1,24 @@
+% DOCKER(1) Docker User Manuals
+% Docker Community
+% JUNE 2014
+# NAME
+docker-kill - Kill a running container using SIGKILL or a specified signal
+
+# SYNOPSIS
+**docker kill**
+[**-s**|**--signal**[=*"KILL"*]]
+ CONTAINER [CONTAINER...]
+
+# DESCRIPTION
+
+The main process inside each container specified will be sent SIGKILL,
+ or any signal specified with option --signal.
+
+# OPTIONS
+**-s**, **--signal**="KILL"
+ Signal to send to the container
+
+# HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com)
+ based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-load.1.md b/docs/man/docker-load.1.md
index 535b701cca..07dac46138 100644
--- a/contrib/man/md/docker-load.1.md
+++ b/docs/man/docker-load.1.md
@@ -1,11 +1,13 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
docker-load - Load an image from a tar archive on STDIN
# SYNOPSIS
-**docker load** **--input**=""
+**docker load**
+[**-i**|**--input**[=*INPUT*]]
+
# DESCRIPTION
@@ -13,11 +15,10 @@ Loads a tarred repository from a file or the standard input stream.
Restores both images and tags.
# OPTIONS
-
**-i**, **--input**=""
Read from a tar archive file, instead of STDIN
-# EXAMPLE
+# EXAMPLES
$ sudo docker images
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
@@ -33,4 +34,5 @@ Restores both images and tags.
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-login.1.md b/docs/man/docker-login.1.md
index 0a9cb283dd..c269353079 100644
--- a/contrib/man/md/docker-login.1.md
+++ b/docs/man/docker-login.1.md
@@ -1,12 +1,15 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
-docker-login - Register or Login to a docker registry server.
+docker-login - Register or log in to a Docker registry server, if no server is specified "https://index.docker.io/v1/" is the default.
# SYNOPSIS
-**docker login** [**-e**|**-email**=""] [**-p**|**--password**=""]
- [**-u**|**--username**=""] [SERVER]
+**docker login**
+[**-e**|**--email**[=*EMAIL*]]
+[**-p**|**--password**[=*PASSWORD*]]
+[**-u**|**--username**[=*USERNAME*]]
+ [SERVER]
# DESCRIPTION
Register or Login to a docker registry server, if no server is
@@ -15,7 +18,7 @@ login to a private registry you can specify this by adding the server name.
# OPTIONS
**-e**, **--email**=""
- Email address
+ Email
**-p**, **--password**=""
Password
@@ -23,7 +26,7 @@ login to a private registry you can specify this by adding the server name.
**-u**, **--username**=""
Username
-# EXAMPLE
+# EXAMPLES
## Login to a local registry
@@ -31,5 +34,5 @@ login to a private registry you can specify this by adding the server name.
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
-
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-logs.1.md b/docs/man/docker-logs.1.md
index 0b9ce867e9..5c3df75b9e 100644
--- a/contrib/man/md/docker-logs.1.md
+++ b/docs/man/docker-logs.1.md
@@ -1,11 +1,14 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
docker-logs - Fetch the logs of a container
# SYNOPSIS
-**docker logs** **--follow**[=*false*] CONTAINER
+**docker logs**
+[**-f**|**--follow**[=*false*]]
+[**-t**|**--timestamps**[=*false*]]
+CONTAINER
# DESCRIPTION
The **docker logs** command batch-retrieves whatever logs are present for
@@ -18,9 +21,13 @@ The **docker logs --follow** command combines commands **docker logs** and
then continue streaming new output from the container’s stdout and stderr.
# OPTIONS
-**-f, --follow**=*true*|*false*
- When *true*, follow log output. The default is false.
+**-f**, **--follow**=*true*|*false*
+ Follow log output. The default is *false*.
+
+**-t**, **--timestamps**=*true*|*false*
+ Show timestamps. The default is *false*.
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-pause.1.md b/docs/man/docker-pause.1.md
new file mode 100644
index 0000000000..e6c0c2455d
--- /dev/null
+++ b/docs/man/docker-pause.1.md
@@ -0,0 +1,15 @@
+% DOCKER(1) Docker User Manuals
+% Docker Community
+% JUNE 2014
+# NAME
+docker-pause - Pause all processes within a container
+
+# SYNOPSIS
+**docker pause**
+CONTAINER
+
+# OPTIONS
+There are no available options.
+
+# HISTORY
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-port.1.md b/docs/man/docker-port.1.md
new file mode 100644
index 0000000000..07b84b12d9
--- /dev/null
+++ b/docs/man/docker-port.1.md
@@ -0,0 +1,16 @@
+% DOCKER(1) Docker User Manuals
+% Docker Community
+% JUNE 2014
+# NAME
+docker-port - Lookup the public-facing port that is NAT-ed to PRIVATE_PORT
+
+# SYNOPSIS
+**docker port**
+CONTAINER PRIVATE_PORT
+
+# OPTIONS
+There are no available options.
+
+# HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com)
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-ps.1.md b/docs/man/docker-ps.1.md
index 60fce0213a..9264d53a66 100644
--- a/contrib/man/md/docker-ps.1.md
+++ b/docs/man/docker-ps.1.md
@@ -1,14 +1,20 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
docker-ps - List containers
# SYNOPSIS
-**docker ps** [**-a**|**--all**=*false*] [**--before**=""]
-[**-l**|**--latest**=*false*] [**-n**=*-1*] [**--no-trunc**=*false*]
-[**-q**|**--quiet**=*false*] [**-s**|**--size**=*false*]
-[**--since**=""]
+**docker ps**
+[**-a**|**--all**[=*false*]]
+[**--before**[=*BEFORE*]]
+[**-l**|**--latest**[=*false*]]
+[**-n**[=*-1*]]
+[**--no-trunc**[=*false*]]
+[**-q**|**--quiet**[=*false*]]
+[**-s**|**--size**[=*false*]]
+[**--since**[=*SINCE*]]
+
# DESCRIPTION
@@ -16,36 +22,31 @@ List the containers in the local repository. By default this show only
the running containers.
# OPTIONS
-
**-a**, **--all**=*true*|*false*
- When true show all containers. Only running containers are shown by
-default. Default is false.
+ Show all containers. Only running containers are shown by default. The default is *false*.
**--before**=""
- Show only container created before Id or Name, include non-running
-ones.
+ Show only container created before Id or Name, include non-running ones.
**-l**, **--latest**=*true*|*false*
- When true show only the latest created container, include non-running
-ones. The default is false.
+ Show only the latest created container, include non-running ones. The default is *false*.
-**-n**=NUM
- Show NUM (integer) last created containers, include non-running ones.
-The default is -1 (none)
+**-n**=-1
+ Show n last created containers, include non-running ones.
**--no-trunc**=*true*|*false*
- When true truncate output. Default is false.
+ Don't truncate output. The default is *false*.
**-q**, **--quiet**=*true*|*false*
- When false only display numeric IDs. Default is false.
+ Only display numeric IDs. The default is *false*.
**-s**, **--size**=*true*|*false*
- When true display container sizes. Default is false.
+ Display sizes. The default is *false*.
**--since**=""
Show only containers created since Id or Name, include non-running ones.
-# EXAMPLE
+# EXAMPLES
# Display all containers, including non-running
# docker ps -a
@@ -65,4 +66,5 @@ The default is -1 (none)
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-pull.1.md b/docs/man/docker-pull.1.md
index 40b7425f77..465c97aadd 100644
--- a/contrib/man/md/docker-pull.1.md
+++ b/docs/man/docker-pull.1.md
@@ -1,19 +1,23 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
docker-pull - Pull an image or a repository from the registry
# SYNOPSIS
-**docker pull** [REGISTRY_PATH/]NAME[:TAG]
+**docker pull**
+NAME[:TAG]
# DESCRIPTION
This command pulls down an image or a repository from the registry. If
-there is more than one image for a repository (e.g. fedora) then all
+there is more than one image for a repository (e.g., fedora) then all
images for that repository name are pulled down including any tags.
It is also possible to specify a non-default registry to pull from.
+# OPTIONS
+There are no available options.
+
# EXAMPLES
# Pull a repository with multiple images
@@ -47,5 +51,5 @@ It is also possible to specify a non-default registry to pull from.
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
-
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-push.1.md b/docs/man/docker-push.1.md
index dbb6e7d1b1..8523cb539e 100644
--- a/contrib/man/md/docker-push.1.md
+++ b/docs/man/docker-push.1.md
@@ -1,19 +1,23 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
docker-push - Push an image or a repository to the registry
# SYNOPSIS
-**docker push** NAME[:TAG]
+**docker push**
+NAME[:TAG]
# DESCRIPTION
Push an image or a repository to a registry. The default registry is the Docker
-Index located at [index.docker.io](https://index.docker.io/v1/). However the
+Hub located at [hub.docker.com](https://hub.docker.com/). However the
image can be pushed to another, perhaps private, registry as demonstrated in
the example below.
-# EXAMPLE
+# OPTIONS
+There are no available options.
+
+# EXAMPLES
# Pushing a new image to a registry
@@ -24,7 +28,7 @@ and then committing it to a new image name:
Now push the image to the registry using the image ID. In this example
the registry is on host named registry-host and listening on port 5000.
-Default Docker commands will push to the default `index.docker.io`
+Default Docker commands will push to the default `hub.docker.com`
registry. Instead, push to the local registry, which is on a host called
registry-host*. To do this, tag the image with the host name or IP
address, and the port of the registry:
@@ -41,4 +45,5 @@ listed.
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-restart.1.md b/docs/man/docker-restart.1.md
new file mode 100644
index 0000000000..2a08caa5e8
--- /dev/null
+++ b/docs/man/docker-restart.1.md
@@ -0,0 +1,22 @@
+% DOCKER(1) Docker User Manuals
+% Docker Community
+% JUNE 2014
+# NAME
+docker-restart - Restart a running container
+
+# SYNOPSIS
+**docker restart**
+[**-t**|**--time**[=*10*]]
+ CONTAINER [CONTAINER...]
+
+# DESCRIPTION
+Restart each container listed.
+
+# OPTIONS
+**-t**, **--time**=10
+ Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.
+
+# HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com)
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-rm.1.md b/docs/man/docker-rm.1.md
index ae85af5277..1b45376976 100644
--- a/contrib/man/md/docker-rm.1.md
+++ b/docs/man/docker-rm.1.md
@@ -1,16 +1,15 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
-
+% Docker Community
+% JUNE 2014
# NAME
-
-docker-rm - Remove one or more containers.
+docker-rm - Remove one or more containers
# SYNOPSIS
-
-**docker rm** [**-f**|**--force**[=*false*] [**-l**|**--link**[=*false*] [**-v**|
-**--volumes**[=*false*]
-CONTAINER [CONTAINER...]
+**docker rm**
+[**-f**|**--force**[=*false*]]
+[**-l**|**--link**[=*false*]]
+[**-v**|**--volumes**[=*false*]]
+ CONTAINER [CONTAINER...]
# DESCRIPTION
@@ -20,18 +19,14 @@ remove a running container unless you use the \fB-f\fR option. To see all
containers on a host use the **docker ps -a** command.
# OPTIONS
-
**-f**, **--force**=*true*|*false*
- When set to true, force the removal of the container. The default is
-*false*.
+ Force removal of running container. The default is *false*.
**-l**, **--link**=*true*|*false*
- When set to true, remove the specified link and not the underlying
-container. The default is *false*.
+ Remove the specified link and not the underlying container. The default is *false*.
**-v**, **--volumes**=*true*|*false*
- When set to true, remove the volumes associated to the container. The
-default is *false*.
+ Remove the volumes associated with the container. The default is *false*.
# EXAMPLES
@@ -51,6 +46,6 @@ command. The use that name as follows:
docker rm hopeful_morse
# HISTORY
-
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-rmi.1.md b/docs/man/docker-rmi.1.md
index b728dc16a9..08d740a3be 100644
--- a/contrib/man/md/docker-rmi.1.md
+++ b/docs/man/docker-rmi.1.md
@@ -1,12 +1,14 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
-docker-rmi \- Remove one or more images.
+docker-rmi - Remove one or more images
# SYNOPSIS
-
-**docker rmi** [**-f**|**--force**[=*false*] IMAGE [IMAGE...]
+**docker rmi**
+[**-f**|**--force**[=*false*]]
+[**--no-prune**[=*false*]]
+IMAGE [IMAGE...]
# DESCRIPTION
@@ -16,10 +18,11 @@ container unless you use the **-f** option. To see all images on a host
use the **docker images** command.
# OPTIONS
-
**-f**, **--force**=*true*|*false*
- When set to true, force the removal of the image. The default is
-*false*.
+ Force removal of the image. The default is *false*.
+
+**--no-prune**=*true*|*false*
+ Do not delete untagged parents. The default is *false*.
# EXAMPLES
@@ -30,6 +33,6 @@ Here is an example of removing and image:
docker rmi fedora/httpd
# HISTORY
-
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-run.1.md b/docs/man/docker-run.1.md
index 447d9e13c3..e7571ac21a 100644
--- a/contrib/man/md/docker-run.1.md
+++ b/docs/man/docker-run.1.md
@@ -1,26 +1,40 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
-docker-run - Run a process in an isolated container
+docker-run - Run a command in a new container
# SYNOPSIS
**docker run**
-[**-a**|**--attach**[=]] [**-c**|**--cpu-shares**[=0]
-[**-m**|**--memory**=*memory-limit*]
-[**--cidfile**=*file*] [**-d**|**--detach**[=*false*]] [**--dns**=*IP-address*]
-[**--name**=*name*] [**-u**|**--user**=*username*|*uid*]
-[**--link**=*name*:*alias*]
-[**-e**|**--env**=*environment*] [**--entrypoint**=*command*]
-[**--expose**=*port*] [**-P**|**--publish-all**[=*false*]]
-[**-p**|**--publish**=*port-mappping*] [**-h**|**--hostname**=*hostname*]
-[**--rm**[=*false*]] [**--privileged**[=*false*]]
+[**-a**|**--attach**[=*[]*]]
+[**-c**|**--cpu-shares**[=*0*]]
+[**--cidfile**[=*CIDFILE*]]
+[**--cpuset**[=*CPUSET*]]
+[**-d**|**--detach**[=*false*]]
+[**--dns-search**[=*[]*]]
+[**--dns**[=*[]*]]
+[**-e**|**--env**[=*[]*]]
+[**--entrypoint**[=*ENTRYPOINT*]]
+[**--env-file**[=*[]*]]
+[**--expose**[=*[]*]]
+[**-h**|**--hostname**[=*HOSTNAME*]]
[**-i**|**--interactive**[=*false*]]
-[**-t**|**--tty**[=*false*]] [**--lxc-conf**=*options*]
-[**-n**|**--networking**[=*true*]]
-[**-v**|**--volume**=*volume*] [**--volumes-from**=*container-id*]
-[**-w**|**--workdir**=*directory*] [**--sig-proxy**[=*true*]]
-IMAGE [COMMAND] [ARG...]
+[**--link**[=*[]*]]
+[**--lxc-conf**[=*[]*]]
+[**-m**|**--memory**[=*MEMORY*]]
+[**--name**[=*NAME*]]
+[**--net**[=*"bridge"*]]
+[**-P**|**--publish-all**[=*false*]]
+[**-p**|**--publish**[=*[]*]]
+[**--privileged**[=*false*]]
+[**--rm**[=*false*]]
+[**--sig-proxy**[=*true*]]
+[**-t**|**--tty**[=*false*]]
+[**-u**|**--user**[=*USER*]]
+[**-v**|**--volume**[=*[]*]]
+[**--volumes-from**[=*[]*]]
+[**-w**|**--workdir**[=*WORKDIR*]]
+ IMAGE [COMMAND] [ARG...]
# DESCRIPTION
@@ -56,6 +70,8 @@ run**.
**--cidfile**=*file*
Write the container ID to the file specified.
+**--cpuset**=""
+ CPUs in which to allow execution (0-3, 0,1)
**-d**, **-detach**=*true*|*false*
Detached mode. This runs the container in the background. It outputs the new
@@ -67,11 +83,13 @@ the detached mode, then you cannot use the **-rm** option.
When attached in the tty mode, you can detach from a running container without
stopping the process by pressing the keys CTRL-P CTRL-Q.
+**--dns-search**=[]
+ Set custom dns search domains
**--dns**=*IP-address*
Set custom DNS servers. This option can be used to override the DNS
configuration passed to the container. Typically this is necessary when the
-host DNS configuration is invalid for the container (eg. 127.0.0.1). When this
+host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this
is the case the **-dns** flags is necessary for every run.
@@ -92,6 +110,8 @@ pass in more options via the COMMAND. But, sometimes an operator may want to run
something else inside the container, so you can override the default ENTRYPOINT
at runtime by using a **--entrypoint** and a string to specify the new
ENTRYPOINT.
+**--env-file**=[]
+ Read in a line delimited file of ENV variables
**--expose**=*port*
Expose a port from the container without publishing it to your host. A
@@ -100,34 +120,12 @@ developer can expose the port using the EXPOSE parameter of the Dockerfile, 2)
the operator can use the **--expose** option with **docker run**, or 3) the
container can be started with the **--link**.
-**-m**, **-memory**=*memory-limit*
- Allows you to constrain the memory available to a container. If the host
-supports swap memory, then the -m memory setting can be larger than physical
-RAM. If a limit of 0 is specified, the container's memory is not limited. The
-memory limit format: <number><optional unit>, where unit = b, k, m or g.
-
-**-P**, **-publish-all**=*true*|*false*
- When set to true publish all exposed ports to the host interfaces. The
-default is false. If the operator uses -P (or -p) then Docker will make the
-exposed port accessible on the host and the ports will be available to any
-client that can reach the host. To find the map between the host ports and the
-exposed ports, use **docker port**.
-
-
-**-p**, **-publish**=[]
- Publish a container's port to the host (format: ip:hostPort:containerPort |
-ip::containerPort | hostPort:containerPort) (use **docker port** to see the
-actual mapping)
-
-
**-h**, **-hostname**=*hostname*
Sets the container host name that is available inside the container.
-
**-i**, **-interactive**=*true*|*false*
When set to true, keep stdin open even if not attached. The default is false.
-
**--link**=*name*:*alias*
Add link to another container. The format is name:alias. If the operator
uses **--link** when starting the new client container, then the client
@@ -135,16 +133,16 @@ container can access the exposed port via a private networking interface. Docker
will set some environment variables in the client container to help indicate
which interface and port to use.
+**--lxc-conf**=[]
+ (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
-**-n**, **-networking**=*true*|*false*
- By default, all containers have networking enabled (true) and can make
-outgoing connections. The operator can disable networking with **--networking**
-to false. This disables all incoming and outgoing networking. In cases like this
-, I/O can only be performed through files or by using STDIN/STDOUT.
-
-Also by default, the container will use the same DNS servers as the host. The
-operator may override this with **-dns**.
-
+**-m**, **-memory**=*memory-limit*
+ Allows you to constrain the memory available to a container. If the host
+supports swap memory, then the -m memory setting can be larger than physical
+RAM. If a limit of 0 is specified, the container's memory is not limited. The
+actual limit may be rounded up to a multiple of the operating system's page
+size, if it is not already. The memory limit should be formatted as follows:
+`<number><optional unit>`, where unit = b, k, m or g.
**--name**=*name*
Assign a name to the container. The operator can identify a container in
@@ -160,6 +158,24 @@ string name. The name is useful when defining links (see **--link**) (or any
other place you need to identify a container). This works for both background
and foreground Docker containers.
+**--net**="bridge"
+ Set the Network mode for the container
+ 'bridge': creates a new network stack for the container on the docker bridge
+ 'none': no networking for this container
+ 'container:<name|id>': reuses another container network stack
+ 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.
+
+**-P**, **-publish-all**=*true*|*false*
+ When set to true publish all exposed ports to the host interfaces. The
+default is false. If the operator uses -P (or -p) then Docker will make the
+exposed port accessible on the host and the ports will be available to any
+client that can reach the host. To find the map between the host ports and the
+exposed ports, use **docker port**.
+
+**-p**, **-publish**=[]
+ Publish a container's port to the host (format: ip:hostPort:containerPort |
+ip::containerPort | hostPort:containerPort) (use **docker port** to see the
+actual mapping)
**--privileged**=*true*|*false*
Give extended privileges to this container. By default, Docker containers are
@@ -179,8 +195,8 @@ default is *false*. This option is incompatible with **-d**.
**--sig-proxy**=*true*|*false*
- When set to true, proxify all received signals to the process (even in
-non-tty mode). The default is true.
+ When set to true, proxify received signals to the process (even in
+non-tty mode). SIGCHLD is not proxied. The default is *true*.
**-t**, **-tty**=*true*|*false*
@@ -353,4 +369,5 @@ changes will also be reflected on the host in /var/db.
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-save.1.md b/docs/man/docker-save.1.md
index 126af6b154..533b4c8435 100644
--- a/contrib/man/md/docker-save.1.md
+++ b/docs/man/docker-save.1.md
@@ -1,11 +1,13 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
docker-save - Save an image to a tar archive (streamed to STDOUT by default)
# SYNOPSIS
-**docker save** [**-o**|**--output**=""] IMAGE
+**docker save**
+[**-o**|**--output**[=*OUTPUT*]]
+IMAGE
# DESCRIPTION
Produces a tarred repository to the standard output stream. Contains all
@@ -17,7 +19,7 @@ Stream to a file instead of STDOUT by using **-o**.
**-o**, **--output**=""
Write to an file, instead of STDOUT
-# EXAMPLE
+# EXAMPLES
Save all fedora repository images to a fedora-all.tar and save the latest
fedora image to a fedora-latest.tar:
@@ -31,5 +33,5 @@ fedora image to a fedora-latest.tar:
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
-
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-search.1.md b/docs/man/docker-search.1.md
index 945dd34e59..3937b870a3 100644
--- a/contrib/man/md/docker-search.1.md
+++ b/docs/man/docker-search.1.md
@@ -1,12 +1,15 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
-docker-search - Search the docker index for images
+docker-search - Search the Docker Hub for images
# SYNOPSIS
-**docker search** **--no-trunc**[=*false*] **--automated**[=*false*]
- **-s**|**--stars**[=*0*] TERM
+**docker search**
+[**--automated**[=*false*]]
+[**--no-trunc**[=*false*]]
+[**-s**|**--stars**[=*0*]]
+TERM
# DESCRIPTION
@@ -16,17 +19,16 @@ number of stars awarded, whether the image is official, and whether it
is automated.
# OPTIONS
-**--no-trunc**=*true*|*false*
- When true display the complete description. The default is false.
+**--automated**=*true*|*false*
+ Only show automated builds. The default is *false*.
-**-s**, **--stars**=NUM
- Only displays with at least NUM (integer) stars. I.e. only those images
-ranked >=NUM.
+**--no-trunc**=*true*|*false*
+ Don't truncate output. The default is *false*.
-**--automated**=*true*|*false*
- When true only show automated builds. The default is false.
+**-s**, **--stars**=0
+ Only displays with at least x stars
-# EXAMPLE
+# EXAMPLES
## Search the registry for ranked images
@@ -52,4 +54,5 @@ ranked 1 or higher:
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-start.1.md b/docs/man/docker-start.1.md
new file mode 100644
index 0000000000..e23fd70ab4
--- /dev/null
+++ b/docs/man/docker-start.1.md
@@ -0,0 +1,27 @@
+% DOCKER(1) Docker User Manuals
+% Docker Community
+% JUNE 2014
+# NAME
+docker-start - Restart a stopped container
+
+# SYNOPSIS
+**docker start**
+[**-a**|**--attach**[=*false*]]
+[**-i**|**--interactive**[=*false*]]
+CONTAINER [CONTAINER...]
+
+# DESCRIPTION
+
+Start a stopped container.
+
+# OPTIONS
+**-a**, **--attach**=*true*|*false*
+ Attach container's STDOUT and STDERR and forward all signals to the process. The default is *false*.
+
+**-i**, **--interactive**=*true*|*false*
+ Attach container's STDIN. The default is *false*.
+
+# HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com)
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-stop.1.md b/docs/man/docker-stop.1.md
new file mode 100644
index 0000000000..0cc19918c3
--- /dev/null
+++ b/docs/man/docker-stop.1.md
@@ -0,0 +1,23 @@
+% DOCKER(1) Docker User Manuals
+% Docker Community
+% JUNE 2014
+# NAME
+docker-stop - Stop a running container by sending SIGTERM and then SIGKILL after a grace period
+
+# SYNOPSIS
+**docker stop**
+[**-t**|**--time**[=*10*]]
+ CONTAINER [CONTAINER...]
+
+# DESCRIPTION
+Stop a running container (Send SIGTERM, and then SIGKILL after
+ grace period)
+
+# OPTIONS
+**-t**, **--time**=10
+ Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.
+
+# HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com)
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-tag.1.md b/docs/man/docker-tag.1.md
index 0c42769908..041c9e1cb5 100644
--- a/contrib/man/md/docker-tag.1.md
+++ b/docs/man/docker-tag.1.md
@@ -1,12 +1,13 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
-docker-tag - Tag an image in the repository
+docker-tag - Tag an image into a repository
# SYNOPSIS
-**docker tag** [**-f**|**--force**[=*false*]
-IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]
+**docker tag**
+[**-f**|**--force**[=*false*]]
+ IMAGE [REGISTRYHOST/][USERNAME/]NAME[:TAG]
# DESCRIPTION
This will give a new alias to an image in the repository. This refers to the
@@ -31,11 +32,15 @@ separated by a ':'
recommended to be used for a version to disinguish images with the same name.
Note that here TAG is a part of the overall name or "tag".
+# OPTIONS
+**-f**, **--force**=*true*|*false*
+ Force. The default is *false*.
+
# EXAMPLES
## Giving an image a new alias
-Here is an example of aliasing an image (e.g. 0e5574283393) as "httpd" and
+Here is an example of aliasing an image (e.g., 0e5574283393) as "httpd" and
tagging it into the "fedora" repository with "version1.0":
docker tag 0e5574283393 fedora/httpd:version1.0
@@ -49,4 +54,5 @@ registry you must tag it with the registry hostname and port (if needed).
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-top.1.md b/docs/man/docker-top.1.md
index 2c00c527a5..9781739cde 100644
--- a/contrib/man/md/docker-top.1.md
+++ b/docs/man/docker-top.1.md
@@ -1,18 +1,22 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
-docker-top - Lookup the running processes of a container
+docker-top - Display the running processes of a container
# SYNOPSIS
-**docker top** CONTAINER [ps-OPTION]
+**docker top**
+CONTAINER [ps OPTIONS]
# DESCRIPTION
Look up the running process of the container. ps-OPTION can be any of the
options you would pass to a Linux ps command.
-# EXAMPLE
+# OPTIONS
+There are no available options.
+
+# EXAMPLES
Run **docker top** with the ps option of -x:
@@ -23,5 +27,5 @@ Run **docker top** with the ps option of -x:
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
-
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-unpause.1.md b/docs/man/docker-unpause.1.md
new file mode 100644
index 0000000000..8949548b67
--- /dev/null
+++ b/docs/man/docker-unpause.1.md
@@ -0,0 +1,15 @@
+% DOCKER(1) Docker User Manuals
+% Docker Community
+% JUNE 2014
+# NAME
+docker-unpause - Unpause all processes within a container
+
+# SYNOPSIS
+**docker unpause**
+CONTAINER
+
+# OPTIONS
+There are no available options.
+
+# HISTORY
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/docs/man/docker-version.1.md b/docs/man/docker-version.1.md
new file mode 100644
index 0000000000..9c029b239d
--- /dev/null
+++ b/docs/man/docker-version.1.md
@@ -0,0 +1,15 @@
+% DOCKER(1) Docker User Manuals
+% Docker Community
+% JUNE 2014
+# NAME
+docker-version - Show the Docker version information.
+
+# SYNOPSIS
+**docker version**
+
+
+# OPTIONS
+There are no available options.
+
+# HISTORY
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker-wait.1.md b/docs/man/docker-wait.1.md
index 6754151f09..798f6d652c 100644
--- a/contrib/man/md/docker-wait.1.md
+++ b/docs/man/docker-wait.1.md
@@ -1,16 +1,21 @@
% DOCKER(1) Docker User Manuals
-% William Henry
-% APRIL 2014
+% Docker Community
+% JUNE 2014
# NAME
docker-wait - Block until a container stops, then print its exit code.
# SYNOPSIS
-**docker wait** CONTAINER [CONTAINER...]
+**docker wait**
+CONTAINER [CONTAINER...]
# DESCRIPTION
+
Block until a container stops, then print its exit code.
-#EXAMPLE
+# OPTIONS
+There are no available options.
+
+# EXAMPLES
$ sudo docker run -d fedora sleep 99
079b83f558a2bc52ecad6b2a5de13622d584e6bb1aea058c11b36511e85e7622
@@ -19,5 +24,5 @@ Block until a container stops, then print its exit code.
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com)
-based on docker.io source material and internal work.
-
+based on docker.com source material and internal work.
+June 2014, updated by Sven Dowideit <SvenDowideit@home.org.au>
diff --git a/contrib/man/md/docker.1.md b/docs/man/docker.1.md
index ab5b67d11f..a7a826ed9f 100644
--- a/contrib/man/md/docker.1.md
+++ b/docs/man/docker.1.md
@@ -87,7 +87,7 @@ unix://[/path/to/socket] to use.
Create a new image from a container's changes
**docker-cp(1)**
- Copy files/folders from the containers filesystem to the host at path
+ Copy files/folders from a container's filesystem to the host at path
**docker-diff(1)**
Inspect changes on a container's filesystem
@@ -127,6 +127,9 @@ inside it)
**docker-logs(1)**
Fetch the logs of a container
+**docker-pause(1)**
+ Pause all processes within a container
+
**docker-port(1)**
Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
@@ -169,7 +172,10 @@ inside it)
**docker-top(1)**
Lookup the running processes of a container
-**version**
+**docker-unpause(1)**
+ Unpause all processes within a container
+
+**docker-version(1)**
Show the Docker version information
**docker-wait(1)**
@@ -184,4 +190,4 @@ For example:
# HISTORY
April 2014, Originally compiled by William Henry (whenry at redhat dot com) based
- on docker.io source material and internal work.
+ on docker.com source material and internal work.
diff --git a/contrib/man/md/md2man-all.sh b/docs/man/md2man-all.sh
index def876f47a..12d84de232 100755
--- a/contrib/man/md/md2man-all.sh
+++ b/docs/man/md2man-all.sh
@@ -17,6 +17,6 @@ for FILE in *.md; do
# skip files that aren't of the format xxxx.N.md (like README.md)
continue
fi
- mkdir -p "../man${num}"
- pandoc -s -t man "$FILE" -o "../man${num}/${name}"
+ mkdir -p "./man${num}"
+ pandoc -s -t man "$FILE" -o "./man${num}/${name}"
done
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 5c3147a285..f4ebcb68fe 100755
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -1,5 +1,5 @@
site_name: Docker Documentation
-#site_url: http://docs.docker.io/
+#site_url: http://docs.docker.com/
site_url: /
site_description: Documentation for fast and lightweight Docker container based virtualization framework.
site_favicon: img/favicon.png
@@ -87,6 +87,7 @@ pages:
- ['articles/cfengine_process_management.md', 'Articles', 'Process management with CFEngine']
- ['articles/puppet.md', 'Articles', 'Using Puppet']
- ['articles/chef.md', 'Articles', 'Using Chef']
+- ['articles/dsc.md', 'Articles', 'Using PowerShell DSC']
- ['articles/ambassador_pattern_linking.md', 'Articles', 'Cross-Host linking using Ambassador Containers']
- ['articles/runmetrics.md', 'Articles', 'Runtime metrics']
- ['articles/baseimages.md', 'Articles', 'Creating a Base Image']
@@ -103,6 +104,7 @@ pages:
- ['reference/api/registry_api.md', 'Reference', 'Docker Registry API']
- ['reference/api/hub_registry_spec.md', 'Reference', 'Docker Hub and Registry Spec']
- ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API']
+- ['reference/api/docker_remote_api_v1.13.md', 'Reference', 'Docker Remote API v1.13']
- ['reference/api/docker_remote_api_v1.12.md', 'Reference', 'Docker Remote API v1.12']
- ['reference/api/docker_remote_api_v1.11.md', 'Reference', 'Docker Remote API v1.11']
- ['reference/api/docker_remote_api_v1.10.md', '**HIDDEN**']
diff --git a/docs/release.sh b/docs/release.sh
index 2168dfe1ee..f6dc2ec59f 100755
--- a/docs/release.sh
+++ b/docs/release.sh
@@ -9,7 +9,7 @@ To publish the Docker documentation you need to set your access_key and secret_k
(with the keys in a [profile $AWS_S3_BUCKET] section - so you can have more than one set of keys in your file)
and set the AWS_S3_BUCKET env var to the name of your bucket.
-make AWS_S3_BUCKET=beta-docs.docker.io docs-release
+make AWS_S3_BUCKET=docs-stage.docker.com docs-release
will then push the documentation site to your s3 bucket.
EOF
@@ -18,7 +18,15 @@ EOF
[ "$AWS_S3_BUCKET" ] || usage
-#VERSION=$(cat VERSION)
+VERSION=$(cat VERSION)
+
+if [ "$$AWS_S3_BUCKET" == "docs.docker.com" ]; then
+ if [ "${VERSION%-dev}" != "$VERSION" ]; then
+ echo "Please do not push '-dev' documentation to docs.docker.com ($VERSION)"
+ exit 1
+ fi
+fi
+
export BUCKET=$AWS_S3_BUCKET
export AWS_CONFIG_FILE=$(pwd)/awsconfig
@@ -50,7 +58,7 @@ build_current_documentation() {
upload_current_documentation() {
src=site/
- dst=s3://$BUCKET
+ dst=s3://$BUCKET$1
echo
echo "Uploading $src"
@@ -61,7 +69,7 @@ upload_current_documentation() {
# a really complicated way to send only the files we want
# if there are too many in any one set, aws s3 sync seems to fall over with 2 files to go
- endings=( json html xml css js gif png JPG )
+ endings=( json html xml css js gif png JPG ttf svg woff)
for i in ${endings[@]}; do
include=""
for j in ${endings[@]}; do
@@ -78,11 +86,8 @@ upload_current_documentation() {
--exclude *.DS_Store \
--exclude *.psd \
--exclude *.ai \
- --exclude *.svg \
--exclude *.eot \
--exclude *.otf \
- --exclude *.ttf \
- --exclude *.woff \
--exclude *.rej \
--exclude *.rst \
--exclude *.orig \
@@ -99,3 +104,10 @@ setup_s3
build_current_documentation
upload_current_documentation
+# Remove the last version - 1.0.2-dev -> 1.0
+MAJOR_MINOR="v${VERSION%.*}"
+
+#build again with /v1.0/ prefix
+sed -i "s/^site_url:.*/site_url: \/$MAJOR_MINOR\//" mkdocs.yml
+build_current_documentation
+upload_current_documentation "/$MAJOR_MINOR/"
diff --git a/docs/s3_website.json b/docs/s3_website.json
index 8a6f99beb7..224ba816e2 100644
--- a/docs/s3_website.json
+++ b/docs/s3_website.json
@@ -27,6 +27,7 @@
{ "Condition": { "KeyPrefixEquals": "docker-io/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "docker-hub/" } },
{ "Condition": { "KeyPrefixEquals": "examples/cfengine_process_management/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/cfengine_process_management/" } },
{ "Condition": { "KeyPrefixEquals": "examples/https/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/https/" } },
+ { "Condition": { "KeyPrefixEquals": "examples/ambassador_pattern_linking/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/ambassador_pattern_linking/" } },
{ "Condition": { "KeyPrefixEquals": "examples/using_supervisord/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "articles/using_supervisord/" } },
{ "Condition": { "KeyPrefixEquals": "reference/api/registry_index_spec/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "reference/api/hub_registry_spec/" } },
{ "Condition": { "KeyPrefixEquals": "use/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "examples/" } }
diff --git a/docs/sources/articles/cfengine_process_management.md b/docs/sources/articles/cfengine_process_management.md
index ee5ba238a0..6bb4df66ae 100644
--- a/docs/sources/articles/cfengine_process_management.md
+++ b/docs/sources/articles/cfengine_process_management.md
@@ -87,7 +87,7 @@ The first two steps can be done as part of a Dockerfile, as follows.
ENTRYPOINT ["/var/cfengine/bin/docker_processes_run.sh"]
By saving this file as Dockerfile to a working directory, you can then build
-your image with the docker build command, e.g.
+your image with the docker build command, e.g.,
`docker build -t managed_image`.
### Testing the container
diff --git a/docs/sources/articles/dsc.md b/docs/sources/articles/dsc.md
new file mode 100644
index 0000000000..94f5e9d4db
--- /dev/null
+++ b/docs/sources/articles/dsc.md
@@ -0,0 +1,117 @@
+page_title: PowerShell DSC Usage
+page_description: Using DSC to configure a new Docker host
+page_keywords: powershell, dsc, installation, usage, docker, documentation
+
+# Using PowerShell DSC
+
+Windows PowerShell Desired State Configuration (DSC) is a configuration
+management tool that extends the existing functionality of Windows PowerShell.
+DSC uses a declarative syntax to define the state in which a target should be
+configured. More information about PowerShell DSC can be found at
+http://technet.microsoft.com/en-us/library/dn249912.aspx.
+
+## Requirements
+
+To use this guide you'll need a Windows host with PowerShell v4.0 or newer.
+
+The included DSC configuration script also uses the official PPA so
+only an Ubuntu target is supported. The Ubuntu target must already have the
+required OMI Server and PowerShell DSC for Linux providers installed. More
+information can be found at https://github.com/MSFTOSSMgmt/WPSDSCLinux. The
+source repository listed below also includes PowerShell DSC for Linux
+installation and init scripts along with more detailed installation information.
+
+## Installation
+
+The DSC configuration example source is available in the following repository:
+https://github.com/anweiss/DockerClientDSC. It can be cloned with:
+
+ $ git clone https://github.com/anweiss/DockerClientDSC.git
+
+## Usage
+
+The DSC configuration utilizes a set of shell scripts to determine whether or
+not the specified Docker components are configured on the target node(s). The
+source repository also includes a script (`RunDockerClientConfig.ps1`) that can
+be used to establish the required CIM session(s) and execute the
+`Set-DscConfiguration` cmdlet.
+
+More detailed usage information can be found at
+https://github.com/anweiss/DockerClientDSC.
+
+### Run Configuration
+The Docker installation configuration is equivalent to running:
+
+```
+apt-get install docker.io
+ln -sf /usr/bin/docker.io /usr/local/bin/docker
+sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io
+```
+
+Ensure that your current working directory is set to the `DockerClientDSC`
+source and load the DockerClient configuration into the current PowerShell
+session
+
+```powershell
+. .\DockerClient.ps1
+```
+
+Generate the required DSC configuration .mof file for the targeted node
+
+```powershell
+DockerClient -Hostname "myhost"
+```
+
+A sample DSC configuration data file has also been included and can be modified
+and used in conjunction with or in place of the `Hostname` parameter:
+
+```powershell
+DockerClient -ConfigurationData .\DockerConfigData.psd1
+```
+
+Start the configuration application process on the targeted node
+
+```powershell
+.\RunDockerClientConfig.ps1 -Hostname "myhost"
+```
+
+The `RunDockerClientConfig.ps1` script can also parse a DSC configuration data
+file and execute configurations against multiple nodes as such:
+
+```powershell
+.\RunDockerClientConfig.ps1 -ConfigurationData .\DockerConfigData.psd1
+```
+
+### Images
+Image configuration is equivalent to running: `docker pull [image]`.
+
+Using the same Run Configuration steps defined above, execute `DockerClient`
+with the `Image` parameter:
+
+```powershell
+DockerClient -Hostname "myhost" -Image node
+```
+
+The configuration process can be initiated as before:
+
+```powershell
+.\RunDockerClientConfig.ps1 -Hostname "myhost"
+```
+
+### Containers
+Container configuration is equivalent to running:
+`docker run -d --name="[containername]" [image] '[command]'`.
+
+Using the same Run Configuration steps defined above, execute `DockerClient`
+with the `Image`, `ContainerName`, and `Command` parameters:
+
+```powershell
+DockerClient -Hostname "myhost" -Image node -ContainerName "helloworld" `
+-Command 'echo "Hello World!"'
+```
+
+The configuration process can be initiated as before:
+
+```powershell
+.\RunDockerClientConfig.ps1 -Hostname "myhost"
+```
diff --git a/docs/sources/articles/https.md b/docs/sources/articles/https.md
index cc8c6a9761..b6ae4ef37d 100644
--- a/docs/sources/articles/https.md
+++ b/docs/sources/articles/https.md
@@ -29,7 +29,7 @@ keys:
$ openssl req -new -x509 -days 365 -key ca-key.pem -out ca.pem
Now that we have a CA, you can create a server key and certificate
-signing request. Make sure that "Common Name (e.g. server FQDN or YOUR
+signing request. Make sure that "Common Name (e.g., server FQDN or YOUR
name)" matches the hostname you will use to connect to Docker or just
use `\*` for a certificate valid for any hostname:
diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md
index 927cd80875..bf46b90ea2 100644
--- a/docs/sources/articles/networking.md
+++ b/docs/sources/articles/networking.md
@@ -26,7 +26,7 @@ bridge* that automatically forwards packets between any other network
interfaces that are attached to it. This lets containers communicate
both with the host machine and with each other. Every time Docker
creates a container, it creates a pair of “peer” interfaces that are
-like opposite ends of a pipe — a packet send on one will be received on
+like opposite ends of a pipe — a packet sent on one will be received on
the other. It gives one of the peers to the container to become its
`eth0` interface and keeps the other peer, with a unique name like
`vethAQI2QT`, out in the namespace of the host machine. By binding
diff --git a/docs/sources/articles/runmetrics.md b/docs/sources/articles/runmetrics.md
index bf4fe21c4e..9c871a24f6 100644
--- a/docs/sources/articles/runmetrics.md
+++ b/docs/sources/articles/runmetrics.md
@@ -35,7 +35,7 @@ known to the system, the hierarchy they belong to, and how many groups they cont
You can also look at `/proc/<pid>/cgroup` to see which control groups a process
belongs to. The control group will be shown as a path relative to the root of
-the hierarchy mountpoint; e.g. `/` means “this process has not been assigned into
+the hierarchy mountpoint; e.g., `/` means “this process has not been assigned into
a particular group”, while `/lxc/pumpkin` means that the process is likely to be
a member of a container named `pumpkin`.
@@ -106,9 +106,9 @@ to the processes within the cgroup, excluding sub-cgroups. The second half
(with the `total_` prefix) includes sub-cgroups as well.
Some metrics are "gauges", i.e. values that can increase or decrease
-(e.g. swap, the amount of swap space used by the members of the cgroup).
+(e.g., swap, the amount of swap space used by the members of the cgroup).
Some others are "counters", i.e. values that can only go up, because
-they represent occurrences of a specific event (e.g. pgfault, which
+they represent occurrences of a specific event (e.g., pgfault, which
indicates the number of page faults which happened since the creation of
the cgroup; this number can never decrease).
@@ -410,7 +410,7 @@ used.
Docker makes this difficult because it relies on `lxc-start`, which
carefully cleans up after itself, but it is still possible. It is
-usually easier to collect metrics at regular intervals (e.g. every
+usually easier to collect metrics at regular intervals (e.g., every
minute, with the collectd LXC plugin) and rely on that instead.
But, if you'd still like to gather the stats when a container stops,
diff --git a/docs/sources/articles/security.md b/docs/sources/articles/security.md
index cdf5fdddd0..dcc61f386c 100644
--- a/docs/sources/articles/security.md
+++ b/docs/sources/articles/security.md
@@ -5,7 +5,7 @@ page_keywords: Docker, Docker documentation, security
# Docker Security
> *Adapted from* [Containers & Docker: How Secure are
-> They?](http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/)
+> They?](http://blog.docker.com/2013/08/containers-docker-how-secure-are-they/)
There are three major areas to consider when reviewing Docker security:
@@ -17,15 +17,10 @@ There are three major areas to consider when reviewing Docker security:
## Kernel Namespaces
-Docker containers are essentially LXC containers, and they come with the
-same security features. When you start a container with
-`docker run`, behind the scenes Docker uses
-`lxc-start` to execute the Docker container. This
-creates a set of namespaces and control groups for the container. Those
-namespaces and control groups are not created by Docker itself, but by
-`lxc-start`. This means that as the LXC userland
-tools evolve (and provide additional namespaces and isolation features),
-Docker will automatically make use of them.
+Docker containers are very similar to LXC containers, and they come with
+the similar security features. When you start a container with `docker
+run`, behind the scenes Docker creates a set of namespaces and control
+groups for the container.
**Namespaces provide the first and most straightforward form of
isolation**: processes running within a container cannot see, and even
@@ -55,10 +50,9 @@ ago), namespace code has been exercised and scrutinized on a large
number of production systems. And there is more: the design and
inspiration for the namespaces code are even older. Namespaces are
actually an effort to reimplement the features of [OpenVZ](
-http://en.wikipedia.org/wiki/OpenVZ) in such a way that they
-could be merged within the mainstream kernel. And OpenVZ was initially
-released in 2005, so both the design and the implementation are pretty
-mature.
+http://en.wikipedia.org/wiki/OpenVZ) in such a way that they could be
+merged within the mainstream kernel. And OpenVZ was initially released
+in 2005, so both the design and the implementation are pretty mature.
## Control Groups
@@ -82,7 +76,7 @@ started in 2006, and initially merged in kernel 2.6.24.
## Docker Daemon Attack Surface
Running containers (and applications) with Docker implies running the
-Docker daemon. This daemon currently requires root privileges, and you
+Docker daemon. This daemon currently requires `root` privileges, and you
should therefore be aware of some important details.
First of all, **only trusted users should be allowed to control your
@@ -97,8 +91,8 @@ without any restriction. This sounds crazy? Well, you have to know that
same way**. Nothing prevents you from sharing your root filesystem (or
even your root block device) with a virtual machine.
-This has a strong security implication: if you instrument Docker from
-e.g. a web server to provision containers through an API, you should be
+This has a strong security implication: for example, if you instrument Docker
+from a web server to provision containers through an API, you should be
even more careful than usual with parameter checking, to make sure that
a malicious user cannot pass crafted parameters causing Docker to create
arbitrary containers.
@@ -114,8 +108,9 @@ socket.
You can also expose the REST API over HTTP if you explicitly decide so.
However, if you do that, being aware of the above mentioned security
implication, you should ensure that it will be reachable only from a
-trusted network or VPN; or protected with e.g. `stunnel`
-and client SSL certificates.
+trusted network or VPN; or protected with e.g., `stunnel` and client SSL
+certificates. You can also secure them with [HTTPS and
+certificates](/articles/https/).
Recent improvements in Linux namespaces will soon allow to run
full-featured containers without root privileges, thanks to the new user
@@ -141,7 +136,7 @@ Finally, if you run Docker on a server, it is recommended to run
exclusively Docker in the server, and move all other services within
containers controlled by Docker. Of course, it is fine to keep your
favorite admin tools (probably at least an SSH server), as well as
-existing monitoring/supervision processes (e.g. NRPE, collectd, etc).
+existing monitoring/supervision processes (e.g., NRPE, collectd, etc).
## Linux Kernel Capabilities
@@ -159,8 +154,8 @@ This means a lot for container security; let's see why!
Your average server (bare metal or virtual machine) needs to run a bunch
of processes as root. Those typically include SSH, cron, syslogd;
-hardware management tools (to e.g. load modules), network configuration
-tools (to handle e.g. DHCP, WPA, or VPNs), and much more. A container is
+hardware management tools (e.g., load modules), network configuration
+tools (e.g., to handle DHCP, WPA, or VPNs), and much more. A container is
very different, because almost all of those tasks are handled by the
infrastructure around the container:
@@ -199,15 +194,18 @@ container, it will be much harder to do serious damage, or to escalate
to the host.
This won't affect regular web apps; but malicious users will find that
-the arsenal at their disposal has shrunk considerably! You can see [the
-list of dropped capabilities in the Docker
-code](https://github.com/dotcloud/docker/blob/v0.5.0/lxc_template.go#L97),
-and a full list of available capabilities in [Linux
+the arsenal at their disposal has shrunk considerably! By default Docker
+drops all capabilities except [those
+needed](https://github.com/dotcloud/docker/blob/master/daemon/execdriver/native/template/default_template.go),
+a whitelist instead of a blacklist approach. You can see a full list of
+available capabilities in [Linux
manpages](http://man7.org/linux/man-pages/man7/capabilities.7.html).
Of course, you can always enable extra capabilities if you really need
them (for instance, if you want to use a FUSE-based filesystem), but by
-default, Docker containers will be locked down to ensure maximum safety.
+default, Docker containers use only a
+[whitelist](https://github.com/dotcloud/docker/blob/master/daemon/execdriver/native/template/default_template.go)
+of kernel capabilities by default.
## Other Kernel Security Features
@@ -222,20 +220,19 @@ harden a Docker host. Here are a few examples.
- You can run a kernel with GRSEC and PAX. This will add many safety
checks, both at compile-time and run-time; it will also defeat many
- exploits, thanks to techniques like address randomization. It
- doesn't require Docker-specific configuration, since those security
- features apply system-wide, independently of containers.
- - If your distribution comes with security model templates for LXC
- containers, you can use them out of the box. For instance, Ubuntu
- comes with AppArmor templates for LXC, and those templates provide
- an extra safety net (even though it overlaps greatly with
- capabilities).
+ exploits, thanks to techniques like address randomization. It doesn't
+ require Docker-specific configuration, since those security features
+ apply system-wide, independently of containers.
+ - If your distribution comes with security model templates for
+ Docker containers, you can use them out of the box. For instance, we
+ ship a template that works with AppArmor and Red Hat comes with SELinux
+ policies for Docker. These templates provide an extra safety net (even
+ though it overlaps greatly with capabilities).
- You can define your own policies using your favorite access control
- mechanism. Since Docker containers are standard LXC containers,
- there is nothing “magic” or specific to Docker.
+ mechanism.
Just like there are many third-party tools to augment Docker containers
-with e.g. special network topologies or shared filesystems, you can
+with e.g., special network topologies or shared filesystems, you can
expect to see tools to harden existing Docker containers without
affecting Docker's core.
@@ -243,7 +240,7 @@ affecting Docker's core.
Docker containers are, by default, quite secure; especially if you take
care of running your processes inside the containers as non-privileged
-users (i.e. non root).
+users (i.e. non-`root`).
You can add an extra layer of safety by enabling Apparmor, SELinux,
GRSEC, or your favorite hardening solution.
@@ -254,4 +251,4 @@ with Docker, since everything is provided by the kernel anyway.
For more context and especially for comparisons with VMs and other
container systems, please also see the [original blog post](
-http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/).
+http://blog.docker.com/2013/08/containers-docker-how-secure-are-they/).
diff --git a/docs/sources/articles/using_supervisord.md b/docs/sources/articles/using_supervisord.md
index fd7c07cabf..91b8976d78 100644
--- a/docs/sources/articles/using_supervisord.md
+++ b/docs/sources/articles/using_supervisord.md
@@ -27,7 +27,7 @@ Let's start by creating a basic `Dockerfile` for our
new image.
FROM ubuntu:13.04
- MAINTAINER examples@docker.io
+ MAINTAINER examples@docker.com
RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
RUN apt-get update
RUN apt-get upgrade -y
diff --git a/docs/sources/contributing/devenvironment.md b/docs/sources/contributing/devenvironment.md
index 54b867cf40..606f9302fc 100644
--- a/docs/sources/contributing/devenvironment.md
+++ b/docs/sources/contributing/devenvironment.md
@@ -16,7 +16,7 @@ Docker's build environment itself is a Docker container, so the first
step is to install Docker on your system.
You can follow the [install instructions most relevant to your
-system](https://docs.docker.io/installation/). Make sure you
+system](https://docs.docker.com/installation/). Make sure you
have a working, up-to-date docker installation, then continue to the
next step.
@@ -113,7 +113,7 @@ something like this
ok github.com/dotcloud/docker/utils 0.017s
If $TESTFLAGS is set in the environment, it is passed as extra arguments
-to `go test`. You can use this to select certain tests to run, e.g.
+to `go test`. You can use this to select certain tests to run, e.g.,
$ TESTFLAGS=`-run \^TestBuild\$` make test
diff --git a/docs/sources/docker-hub/accounts.md b/docs/sources/docker-hub/accounts.md
index 7e951448d3..304010fb5a 100644
--- a/docs/sources/docker-hub/accounts.md
+++ b/docs/sources/docker-hub/accounts.md
@@ -36,8 +36,8 @@ page.
Also available on the Docker Hub are organizations and groups that allow
you to collaborate across your organization or team. You can see what
-organizations [you belong to and add new organizations](Sam Alba
-<sam@docker.com>) from the Account
+organizations [you belong to and add new organizations](
+https://hub.docker.com/account/organizations/) from the Account
tab.
![organizations](/docker-hub/orgs.png)
diff --git a/docs/sources/examples/nodejs_web_app.md b/docs/sources/examples/nodejs_web_app.md
index cf00e88bed..a7b8eea7e3 100644
--- a/docs/sources/examples/nodejs_web_app.md
+++ b/docs/sources/examples/nodejs_web_app.md
@@ -24,7 +24,7 @@ describes your app and its dependencies:
"name": "docker-centos-hello",
"private": true,
"version": "0.0.1",
- "description": "Node.js Hello World app on CentOS using docker",
+ "description": "Node.js Hello world app on CentOS using docker",
"author": "Daniel Gasienica <daniel@gasienica.ch>",
"dependencies": {
"express": "3.2.4"
@@ -42,7 +42,7 @@ app using the [Express.js](http://expressjs.com/) framework:
// App
var app = express();
app.get('/', function (req, res) {
- res.send('Hello World\n');
+ res.send('Hello world\n');
});
app.listen(PORT);
@@ -137,9 +137,9 @@ Your image will now be listed by Docker:
$ sudo docker images
# Example
- REPOSITORY TAG ID CREATED
- centos 6.4 539c0211cd76 8 weeks ago
- gasi/centos-node-hello latest d64d3505b0d2 2 hours ago
+ REPOSITORY TAG ID CREATED
+ centos 6.4 539c0211cd76 8 weeks ago
+ <your username>/centos-node-hello latest d64d3505b0d2 2 hours ago
## Run the image
@@ -167,8 +167,8 @@ To test your app, get the the port of your app that Docker mapped:
$ sudo docker ps
# Example
- ID IMAGE COMMAND ... PORTS
- ecce33b30ebf gasi/centos-node-hello:latest node /src/index.js 49160->8080
+ ID IMAGE COMMAND ... PORTS
+ ecce33b30ebf <your username>/centos-node-hello:latest node /src/index.js 49160->8080
In the example above, Docker mapped the `8080` port of the container to `49160`.
@@ -184,7 +184,7 @@ Now you can call your app using `curl` (install if needed via:
Date: Sun, 02 Jun 2013 03:53:22 GMT
Connection: keep-alive
- Hello World
+ Hello world
We hope this tutorial helped you get up and running with Node.js and
CentOS on Docker. You can get the full source code at
diff --git a/docs/sources/examples/postgresql_service.Dockerfile b/docs/sources/examples/postgresql_service.Dockerfile
index 219a537882..364a18a81d 100644
--- a/docs/sources/examples/postgresql_service.Dockerfile
+++ b/docs/sources/examples/postgresql_service.Dockerfile
@@ -1,5 +1,5 @@
#
-# example Dockerfile for http://docs.docker.io/en/latest/examples/postgresql_service/
+# example Dockerfile for http://docs.docker.com/examples/postgresql_service/
#
FROM ubuntu
diff --git a/docs/sources/examples/postgresql_service.md b/docs/sources/examples/postgresql_service.md
index b9fae49d99..5265935e3d 100644
--- a/docs/sources/examples/postgresql_service.md
+++ b/docs/sources/examples/postgresql_service.md
@@ -21,7 +21,7 @@ Start by creating a new `Dockerfile`:
> suitably secure.
#
- # example Dockerfile for http://docs.docker.io/examples/postgresql_service/
+ # example Dockerfile for http://docs.docker.com/examples/postgresql_service/
#
FROM ubuntu
diff --git a/docs/sources/examples/running_ssh_service.Dockerfile b/docs/sources/examples/running_ssh_service.Dockerfile
index 978e610422..57baf88cef 100644
--- a/docs/sources/examples/running_ssh_service.Dockerfile
+++ b/docs/sources/examples/running_ssh_service.Dockerfile
@@ -2,7 +2,7 @@
#
# VERSION 0.0.1
-FROM debian
+FROM ubuntu:12.04
MAINTAINER Thatcher R. Peskens "thatcher@dotcloud.com"
# make sure the package repository is up to date
diff --git a/docs/sources/examples/running_ssh_service.md b/docs/sources/examples/running_ssh_service.md
index 27439f998f..579d372ba7 100644
--- a/docs/sources/examples/running_ssh_service.md
+++ b/docs/sources/examples/running_ssh_service.md
@@ -12,7 +12,7 @@ quick access to a test container.
#
# VERSION 0.0.1
- FROM debian
+ FROM ubuntu:12.04
MAINTAINER Thatcher R. Peskens "thatcher@dotcloud.com"
# make sure the package repository is up to date
diff --git a/docs/sources/faq.md b/docs/sources/faq.md
index 2d38cf2ff8..667058c86f 100644
--- a/docs/sources/faq.md
+++ b/docs/sources/faq.md
@@ -178,15 +178,53 @@ Cloud:
### How do I report a security issue with Docker?
You can learn about the project's security policy
-[here](https://www.docker.io/security/) and report security issues to
+[here](https://www.docker.com/security/) and report security issues to
this [mailbox](mailto:security@docker.com).
### Why do I need to sign my commits to Docker with the DCO?
Please read [our blog post](
-http://blog.docker.io/2014/01/docker-code-contributions-require-developer-certificate-of-origin/)
+http://blog.docker.com/2014/01/docker-code-contributions-require-developer-certificate-of-origin/)
on the introduction of the DCO.
+### When building an image, should I prefer system libraries or bundled ones?
+
+*This is a summary of a discussion on the [docker-dev mailing list](
+https://groups.google.com/forum/#!topic/docker-dev/L2RBSPDu1L0).*
+
+Virtually all programs depend on third-party libraries. Most frequently,
+they will use dynamic linking and some kind of package dependency, so
+that when multiple programs need the same library, it is installed only once.
+
+Some programs, however, will bundle their third-party libraries, because
+they rely on very specific versions of those libraries. For instance,
+Node.js bundles OpenSSL; MongoDB bundles V8 and Boost (among others).
+
+When creating a Docker image, is it better to use the bundled libraries,
+or should you build those programs so that they use the default system
+libraries instead?
+
+The key point about system libraries is not about saving disk or memory
+space. It is about security. All major distributions handle security
+seriously, by having dedicated security teams, following up closely
+with published vulnerabilities, and disclosing advisories themselves.
+(Look at the [Debian Security Information](https://www.debian.org/security/)
+for an example of those procedures.) Upstream developers, however,
+do not always implement similar practices.
+
+Before setting up a Docker image to compile a program from source,
+if you want to use bundled libraries, you should check if the upstream
+authors provide a convenient way to announce security vulnerabilities,
+and if they update their bundled libraries in a timely manner. If they
+don't, you are exposing yourself (and the users of your image) to
+security vulnerabilities.
+
+Likewise, before using packages built by others, you should check if the
+channels providing those packages implement similar security best practices.
+Downloading and installing an "all-in-one" .deb or .rpm sounds great at first,
+except if you have no way to figure out that it contains a copy of the
+OpenSSL library vulnerable to the [Heartbleed](http://heartbleed.com/) bug.
+
### Can I help by adding some questions and answers?
Definitely! You can fork [the repo](https://github.com/dotcloud/docker) and
diff --git a/docs/sources/index.md b/docs/sources/index.md
index 06e1ac6d57..75414b4364 100644
--- a/docs/sources/index.md
+++ b/docs/sources/index.md
@@ -6,7 +6,7 @@ page_keywords: docker, introduction, documentation, about, technology, understan
**Develop, Ship and Run Any Application, Anywhere**
-[**Docker**](https://www.docker.io) is a platform for developers and sysadmins
+[**Docker**](https://www.docker.com) is a platform for developers and sysadmins
to develop, ship, and run applications. Docker lets you quickly assemble
applications from components and eliminates the friction that can come when
shipping code. Docker lets you get your code tested and deployed into production
@@ -22,8 +22,9 @@ Docker consists of:
## Why Docker?
-- **Faster delivery of your applications**
- * We want your environment to work better. Docker containers,
+*Faster delivery of your applications*
+
+* We want your environment to work better. Docker containers,
and the work flow that comes with them, help your developers,
sysadmins, QA folks, and release engineers work together to get your code
into production and make it useful. We've created a standard
@@ -31,40 +32,42 @@ Docker consists of:
inside containers while sysadmins and operators can work on running the
container in your deployment. This separation of duties streamlines and
simplifies the management and deployment of code.
- * We make it easy to build new containers, enable rapid iteration of
+* We make it easy to build new containers, enable rapid iteration of
your applications, and increase the visibility of changes. This
helps everyone in your organization understand how an application works
and how it is built.
- * Docker containers are lightweight and fast! Containers have
+* Docker containers are lightweight and fast! Containers have
sub-second launch times, reducing the cycle
time of development, testing, and deployment.
-- **Deploy and scale more easily**
- * Docker containers run (almost) everywhere. You can deploy
+*Deploy and scale more easily*
+
+* Docker containers run (almost) everywhere. You can deploy
containers on desktops, physical servers, virtual machines, into
data centers, and up to public and private clouds.
- * Since Docker runs on so many platforms, it's easy to move your
+* Since Docker runs on so many platforms, it's easy to move your
applications around. You can easily move an application from a
testing environment into the cloud and back whenever you need.
- * Docker's lightweight containers Docker also make scaling up and
+* Docker's lightweight containers also make scaling up and
down fast and easy. You can quickly launch more containers when
needed and then shut them down easily when they're no longer needed.
-- **Get higher density and run more workloads**
- * Docker containers don't need a hypervisor, so you can pack more of
+*Get higher density and run more workloads*
+
+* Docker containers don't need a hypervisor, so you can pack more of
them onto your hosts. This means you get more value out of every
server and can potentially reduce what you spend on equipment and
licenses.
-- **Faster deployment makes for easier management**
- * As Docker speeds up your work flow, it gets easier to make lots
+*Faster deployment makes for easier management*
+
+* As Docker speeds up your work flow, it gets easier to make lots
of small changes instead of huge, big bang updates. Smaller
changes mean reduced risk and more uptime.
## About this guide
-First, the [Understanding Docker
-section](introduction/understanding-docker.md) will help you:
+The [Understanding Docker section](introduction/understanding-docker.md) will help you:
- See how Docker works at a high level
- Understand the architecture of Docker
@@ -72,22 +75,59 @@ section](introduction/understanding-docker.md) will help you:
- See how Docker compares to virtual machines
- See some common use cases.
-> [Click here to go to the Understanding
-> Docker section](introduction/understanding-docker.md).
-
### Installation Guides
-Next, we'll show you how to install Docker on a variety of platforms in the
-[installation](/installation/#installation) section.
+The [installation section](/installation/#installation) will show you how to install
+Docker on a variety of platforms.
-> [Click here to go to the Installation
-> section](/installation/#installation).
### Docker User Guide
-Once you've gotten Docker installed we recommend you work through the
-[Docker User Guide](/userguide/), to learn about Docker in more detail and
-answer questions about usage and implementation.
+To learn about Docker in more detail and to answer questions about usage and implementation, check out the [Docker User Guide](/userguide/).
+
+## Release Notes
+
+<b>Version 1.1.0</b>
+
+### New Features
+
+*`.dockerignore` support*
+
+You can now add a `.dockerignore` file next to your `Dockerfile` and Docker will ignore files and directories specified in that file when sending the build context to the daemon.
+Example: https://github.com/dotcloud/docker/blob/master/.dockerignore
+
+*Pause containers during commit*
+
+Doing a commit on a running container was not recommended because you could end up with files in an inconsistent state (for example, if they were being written during the commit). Containers are now paused when a commit is made to them.
+You can disable this feature by doing a `docker commit --pause=false <container_id>`
+
+*Tailing logs*
+
+You can now tail the logs of a container. For example, you can get the last ten lines of a log by using `docker logs --tail 10 <container_id>`. You can also follow the logs of a container without having to read the whole log file with `docker logs --tail 0 -f <container_id>`.
+
+*Allow a tar file as context for docker build*
+
+You can now pass a tar archive to `docker build` as context. This can be used to automate docker builds, for example: `cat context.tar | docker build -` or `docker run builder_image | docker build -`
+
+*Bind mounting your whole filesystem in a container*
+
+`/` is now allowed as source of `--volumes`. This means you can bind-mount your whole system in a container if you need to. For example: `docker run -v /:/my_host ubuntu:ro ls /my_host`. However, it is now forbidden to mount to /.
+
+
+### Other Improvements & Changes
+
+* Port allocation has been improved. In the previous release, Docker could prevent you from starting a container with previously allocated ports which seemed to be in use when in fact they were not. This has been fixed.
+
+* A bug in `docker save` was introduced in the last release. The `docker save` command could produce images with invalid metadata. The command now produces images with correct metadata.
+
+* Running `docker inspect` in a container now returns which containers it is linked to.
+
+* Parsing of the `docker commit` flag has improved validation, to better prevent you from committing an image with a name such as `-m`. Image names with dashes in them potentially conflict with command line flags.
+
+* The API now has Improved status codes for `start` and `stop`. Trying to start a running container will now return a 304 error.
+
+* Performance has been improved overall. Starting the daemon is faster than in previous releases. The daemon’s performance has also been improved when it is working with large numbers of images and containers.
+
+* Fixed an issue with white-spaces and multi-lines in Dockerfiles.
-> [Click here to go to the Docker User Guide](/userguide/).
diff --git a/docs/sources/installation/binaries.md b/docs/sources/installation/binaries.md
index 97e2f93c4e..f6eb44fa64 100644
--- a/docs/sources/installation/binaries.md
+++ b/docs/sources/installation/binaries.md
@@ -36,7 +36,7 @@ In general, a 3.8 Linux kernel (or higher) is preferred, as some of the
prior versions have known issues that are triggered by Docker.
Note that Docker also has a client mode, which can run on virtually any
-Linux kernel (it even builds on OSX!).
+Linux kernel (it even builds on OS X!).
## Get the docker binary:
diff --git a/docs/sources/installation/fedora.md b/docs/sources/installation/fedora.md
index bcd54e6bd6..a230aa6cf5 100644
--- a/docs/sources/installation/fedora.md
+++ b/docs/sources/installation/fedora.md
@@ -48,6 +48,44 @@ Now let's verify that Docker is working.
$ sudo docker run -i -t fedora /bin/bash
+## Granting rights to users to use Docker
+
+Fedora 19 and 20 shipped with Docker 0.11. The package has already been updated
+to 1.0 in Fedora 20. If you are still using the 0.11 version you will need to
+grant rights to users of Docker.
+
+The `docker` command line tool contacts the `docker` daemon process via a
+socket file `/var/run/docker.sock` owned by group `docker`. One must be
+member of that group in order to contact the `docker -d` process.
+
+ $ usermod -a -G docker login_name
+
+Adding users to the `docker` group is *not* necessary for Docker versions 1.0
+and above.
+
+## HTTP Proxy
+
+If you are behind a HTTP proxy server, for example in corporate settings,
+you will need to add this configuration in the Docker *systemd service file*.
+
+Edit file `/lib/systemd/system/docker.service`. Add the following to
+section `[Service]` :
+
+ Environment="HTTP_PROXY=http://proxy.example.com:80/"
+
+If you have internal Docker registries that you need to contact without
+proxying you can specify them via the `NO_PROXY` environment variable:
+
+ Environment="HTTP_PROXY=http://proxy.example.com:80/" "NO_PROXY=localhost,127.0.0.0/8,docker-registry.somecorporation.com"
+
+Flush changes:
+
+ $ systemctl daemon-reload
+
+Restart Docker:
+
+ $ systemctl start docker
+
## What next?
Continue with the [User Guide](/userguide/).
diff --git a/docs/sources/installation/google.md b/docs/sources/installation/google.md
index c91d13612f..b6c1b3d275 100644
--- a/docs/sources/installation/google.md
+++ b/docs/sources/installation/google.md
@@ -12,16 +12,15 @@ page_keywords: Docker, Docker documentation, installation, google, Google Comput
2. Download and configure the [Google Cloud SDK][3] to use your
project with the following commands:
- $ curl https://dl.google.com/dl/cloudsdk/release/install_google_cloud_sdk.bash | bash
+ $ curl https://sdk.cloud.google.com | bash
$ gcloud auth login
- Enter a cloud project id (or leave blank to not set): <google-cloud-project-id>
- ...
+ $ gcloud config set project <google-cloud-project-id>
3. Start a new instance using the latest [Container-optimized image][4]:
(select a zone close to you and the desired instance size)
$ gcloud compute instances create docker-playground \
- --image projects/google-containers/global/images/container-vm-v20140522 \
+ --image https://www.googleapis.com/compute/v1/projects/google-containers/global/images/container-vm-v20140522 \
--zone us-central1-a \
--machine-type f1-micro
diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md
index a982c59845..2aff0e5b89 100644
--- a/docs/sources/installation/mac.md
+++ b/docs/sources/installation/mac.md
@@ -21,7 +21,7 @@ virtual machine and runs the Docker daemon.
## Installation
-1. Download the latest release of the [Docker for OSX Installer](
+1. Download the latest release of the [Docker for OS X Installer](
https://github.com/boot2docker/osx-installer/releases)
2. Run the installer, which will install VirtualBox and the Boot2Docker management
@@ -31,22 +31,18 @@ virtual machine and runs the Docker daemon.
3. Run the `Boot2Docker` app in the `Applications` folder:
![](/installation/images/osx-Boot2Docker-Start-app.png)
- Or, to initiate Boot2Docker manually, open a terminal and run:
+ Or, to initialize Boot2Docker manually, open a terminal and run:
$ boot2docker init
$ boot2docker start
$ export DOCKER_HOST=tcp://$(boot2docker ip 2>/dev/null):2375
- The `boot2docker init` command will ask you to enter an SSH key passphrase - the simplest
- (but least secure) is to just hit [Enter]. This passphrase is used by the
- `boot2docker ssh` command.
-
Once you have an initialized virtual machine, you can control it with `boot2docker stop`
and `boot2docker start`.
## Upgrading
-1. Download the latest release of the [Docker for OSX Installer](
+1. Download the latest release of the [Docker for OS X Installer](
https://github.com/boot2docker/osx-installer/releases)
2. Run the installer, which will update VirtualBox and the Boot2Docker management
@@ -78,7 +74,7 @@ If you run a container with an exposed port,
then you should be able to access that Nginx server using the IP address reported by:
- $ boot2docker ssh ip addr show dev eth1
+ $ boot2docker ip
Typically, it is 192.168.59.103, but it could get changed by Virtualbox's DHCP
implementation.
@@ -91,7 +87,7 @@ The Boot2Docker management tool provides several commands:
$ ./boot2docker
Usage: ./boot2docker [<options>]
- {help|init|up|ssh|save|down|poweroff|reset|restart|config|status|info|delete|download|version}
+ {help|init|up|ssh|save|down|poweroff|reset|restart|config|status|info|ip|delete|download|version} [<args>]
Continue with the [User Guide](/userguide/).
diff --git a/docs/sources/installation/openSUSE.md b/docs/sources/installation/openSUSE.md
index ce79de2699..c03c74a811 100644
--- a/docs/sources/installation/openSUSE.md
+++ b/docs/sources/installation/openSUSE.md
@@ -19,9 +19,11 @@ repository.
# openSUSE 12.3
$ sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_12.3/ Virtualization
+ $ sudo rpm --import http://download.opensuse.org/repositories/Virtualization/openSUSE_12.3/repodata/repomd.xml.key
# openSUSE 13.1
$ sudo zypper ar -f http://download.opensuse.org/repositories/Virtualization/openSUSE_13.1/ Virtualization
+ $ sudo rpm --import http://download.opensuse.org/repositories/Virtualization/openSUSE_13.1/repodata/repomd.xml.key
Install the Docker package.
@@ -43,9 +45,15 @@ If we want Docker to start at boot, we should also:
The docker package creates a new group named docker. Users, other than
root user, need to be part of this group in order to interact with the
-Docker daemon.
+Docker daemon. You can add users with:
- $ sudo usermod -G docker <username>
+ $ sudo usermod -a -G docker <username>
+
+To verify that everything has worked as expected:
+
+ $ sudo docker run --rm -i -t ubuntu /bin/bash
+
+This should download and import the `ubuntu` image, and then start `bash` in a container. To exit the container type `exit`.
**Done!**
diff --git a/docs/sources/installation/rackspace.md b/docs/sources/installation/rackspace.md
index 1aa969d1e5..9fddf5e450 100644
--- a/docs/sources/installation/rackspace.md
+++ b/docs/sources/installation/rackspace.md
@@ -15,7 +15,7 @@ will need to install it. And this is a little more difficult on
Rackspace.
Rackspace boots their servers using grub's `menu.lst`
-and does not like non `virtual` packages (e.g. Xen compatible)
+and does not like non `virtual` packages (e.g., Xen compatible)
kernels there, although they do work. This results in
`update-grub` not having the expected result, and
you will need to set the kernel manually.
diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md
index f1ba4971eb..5d1b6c3fbf 100644
--- a/docs/sources/installation/ubuntulinux.md
+++ b/docs/sources/installation/ubuntulinux.md
@@ -244,18 +244,18 @@ To install the latest version of docker, use the standard
If you want to enable memory and swap accounting, you must add the
following command-line parameters to your kernel:
- $ cgroup_enable=memory swapaccount=1
+ cgroup_enable=memory swapaccount=1
On systems using GRUB (which is the default for Ubuntu), you can add
those parameters by editing `/etc/default/grub` and
extending `GRUB_CMDLINE_LINUX`. Look for the
following line:
- $ GRUB_CMDLINE_LINUX=""
+ GRUB_CMDLINE_LINUX=""
And replace it by the following one:
- $ GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
+ GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"
Then run `sudo update-grub`, and reboot.
@@ -283,7 +283,7 @@ forwarding:
# Change:
# DEFAULT_FORWARD_POLICY="DROP"
# to
- $ DEFAULT_FORWARD_POLICY="ACCEPT"
+ DEFAULT_FORWARD_POLICY="ACCEPT"
Then reload UFW:
@@ -316,7 +316,7 @@ Docker daemon for the containers:
$ sudo nano /etc/default/docker
---
# Add:
- $ DOCKER_OPTS="--dns 8.8.8.8"
+ DOCKER_OPTS="--dns 8.8.8.8"
# 8.8.8.8 could be replaced with a local DNS server, such as 192.168.1.1
# multiple DNS servers can be specified: --dns 8.8.8.8 --dns 192.168.1.1
diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md
index 447d8b280f..9908c053d0 100644
--- a/docs/sources/installation/windows.md
+++ b/docs/sources/installation/windows.md
@@ -5,7 +5,7 @@ page_keywords: Docker, Docker documentation, Windows, requirements, virtualbox,
# Windows
> **Note:**
> Docker has been tested on Windows 7.1 and 8; it may also run on older versions.
-
+> Your processor needs to support hardware virtualization.
The Docker Engine uses Linux-specific kernel features, so to run it on Windows
we need to use a lightweight virtual machine (vm). You use the Windows Docker client to
@@ -25,7 +25,7 @@ virtual machine and runs the Docker daemon.
2. Run the installer, which will install VirtualBox, MSYS-git, the boot2docker Linux ISO,
and the Boot2Docker management tool.
![](/installation/images/windows-installer.png)
-3. Run the `Boot2Docker Start` shell script from your Desktop or Program Files > Docker.
+3. Run the `Boot2Docker Start` shell script from your Desktop or Program Files > Boot2Docker for Windows.
The Start script will ask you to enter an ssh key passphrase - the simplest
(but least secure) is to just hit [Enter].
@@ -63,7 +63,7 @@ This will download the small busybox image and print "hello world".
The Boot2Docker management tool provides several commands:
$ ./boot2docker
- Usage: ./boot2docker [<options>] {help|init|up|ssh|save|down|poweroff|reset|restart|config|status|info|delete|download|version} [<args>]
+ Usage: ./boot2docker [<options>] {help|init|up|ssh|save|down|poweroff|reset|restart|config|status|info|ip|delete|download|version} [<args>]
## Container port redirection
diff --git a/docs/sources/introduction/understanding-docker.md b/docs/sources/introduction/understanding-docker.md
index 3a7615ebc8..c79573a635 100644
--- a/docs/sources/introduction/understanding-docker.md
+++ b/docs/sources/introduction/understanding-docker.md
@@ -112,7 +112,7 @@ Docker images are the **build** component of Docker.
#### Docker Registries
Docker registries hold images. These are public or private stores from which you upload
or download images. The public Docker registry is called
-[Docker Hub](http://index.docker.io). It provides a huge collection of existing
+[Docker Hub](http://hub.docker.com). It provides a huge collection of existing
images for your use. These can be images you create yourself or you
can use images that others have previously created. Docker registries are the
**distribution** component of Docker.
@@ -156,7 +156,7 @@ basis for a new image, for example if you have a base Apache image you could use
this as the base of all your web application images.
> **Note:** Docker usually gets these base images from
-> [Docker Hub](https://index.docker.io).
+> [Docker Hub](https://hub.docker.com).
>
Docker images are then built from these base images using a simple, descriptive
set of steps we call *instructions*. Each instruction creates a new layer in our
@@ -173,17 +173,17 @@ returns a final image.
### How does a Docker registry work?
The Docker registry is the store for your Docker images. Once you build a Docker
-image you can *push* it to a public registry [Docker Hub](https://index.docker.io) or to
+image you can *push* it to a public registry [Docker Hub](https://hub.docker.com) or to
your own registry running behind your firewall.
Using the Docker client, you can search for already published images and then
pull them down to your Docker host to build containers from them.
-[Docker Hub](https://index.docker.io) provides both public and private storage
+[Docker Hub](https://hub.docker.com) provides both public and private storage
for images. Public storage is searchable and can be downloaded by anyone.
Private storage is excluded from search results and only you and your users can
pull images down and use them to build containers. You can [sign up for a storage plan
-here](https://index.docker.io/plans).
+here](https://hub.docker.com/plans).
### How does a container work?
A container consists of an operating system, user-added files, and meta-data. As
@@ -216,7 +216,7 @@ In order, Docker does the following:
- **Pulls the `ubuntu` image:** Docker checks for the presence of the `ubuntu`
image and, if it doesn't exist locally on the host, then Docker downloads it from
-[Docker Hub](https://index.docker.io). If the image already exists, then Docker
+[Docker Hub](https://hub.docker.com). If the image already exists, then Docker
uses it for the new container.
- **Creates a new container:** Once Docker has the image, it uses it to create a
container.
diff --git a/docs/sources/reference/api/docker_remote_api.md b/docs/sources/reference/api/docker_remote_api.md
index 38cfc244e1..36f35383e1 100644
--- a/docs/sources/reference/api/docker_remote_api.md
+++ b/docs/sources/reference/api/docker_remote_api.md
@@ -18,14 +18,42 @@ page_keywords: API, Docker, rcli, REST, documentation
encoded (JSON) string with credentials:
`{'username': string, 'password': string, 'email': string, 'serveraddress' : string}`
-The current version of the API is v1.12
+The current version of the API is v1.13
Calling `/images/<name>/insert` is the same as calling
-`/v1.12/images/<name>/insert`.
+`/v1.13/images/<name>/insert`.
You can still call an old version of the API using
`/v1.12/images/<name>/insert`.
+## v1.13
+
+### Full Documentation
+
+[*Docker Remote API v1.13*](/reference/api/docker_remote_api_v1.13/)
+
+### What's new
+
+`GET /containers/(name)/json`
+
+**New!**
+The `HostConfig.Links` field is now filled correctly
+
+**New!**
+`Sockets` parameter added to the `/info` endpoint listing all the sockets the
+daemon is configured to listen on.
+
+`POST /containers/(name)/start`
+`POST /containers/(name)/stop`
+
+**New!**
+`start` and `stop` will now return 304 if the container's status is not modified
+
+`POST /commit`
+
+**New!**
+Added a `pause` parameter (default `true`) to pause the container during commit
+
## v1.12
### Full Documentation
@@ -350,7 +378,7 @@ List containers (/containers/json):
Start containers (/containers/<id>/start):
- - You can now pass host-specific configuration (e.g. bind mounts) in
+ - You can now pass host-specific configuration (e.g., bind mounts) in
the POST body for start calls
## v1.2
diff --git a/docs/sources/reference/api/docker_remote_api_v1.0.md b/docs/sources/reference/api/docker_remote_api_v1.0.md
index 2f17b2a74d..b906298b85 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.0.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.0.md
@@ -605,8 +605,8 @@ Return low-level information on the image `name`
"OpenStdin":true,
"StdinOnce":false,
"Env":null,
- "Cmd": ["/bin/bash"]
- ,"Dns":null,
+ "Cmd": ["/bin/bash"],
+ "Dns":null,
"Image":"centos",
"Volumes":null,
"VolumesFrom":""
@@ -935,7 +935,7 @@ Create a new image from a container's changes
- **repo** – repository
- **tag** – tag
- **m** – commit message
- - **author** – author (eg. "John Hannibal Smith
+ - **author** – author (e.g., "John Hannibal Smith
<[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
Status Codes:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.1.md b/docs/sources/reference/api/docker_remote_api_v1.1.md
index e777901c6c..4e449bccec 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.1.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.1.md
@@ -612,8 +612,8 @@ Return low-level information on the image `name`
"OpenStdin":true,
"StdinOnce":false,
"Env":null,
- "Cmd": ["/bin/bash"]
- ,"Dns":null,
+ "Cmd": ["/bin/bash"],
+ "Dns":null,
"Image":"centos",
"Volumes":null,
"VolumesFrom":""
@@ -946,7 +946,7 @@ Create a new image from a container's changes
- **repo** – repository
- **tag** – tag
- **m** – commit message
- - **author** – author (eg. "John Hannibal Smith
+ - **author** – author (e.g., "John Hannibal Smith
<[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
Status Codes:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.md b/docs/sources/reference/api/docker_remote_api_v1.10.md
index 0292c1ab2f..264cdefc20 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.10.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.10.md
@@ -286,7 +286,7 @@ List processes running inside the container `id`
 
- - **ps\_args** – ps arguments to use (eg. aux)
+ - **ps\_args** – ps arguments to use (e.g., aux)
Status Codes:
@@ -530,7 +530,7 @@ Attach to the container `id`
`STREAM_TYPE` can be:
- - 0: stdin (will be writen on stdout)
+ - 0: stdin (will be written on stdout)
- 1: stdout
- 2: stderr
@@ -1181,7 +1181,7 @@ Create a new image from a container's changes
- **repo** – repository
- **tag** – tag
- **m** – commit message
- - **author** – author (eg. "John Hannibal Smith
+ - **author** – author (e.g., "John Hannibal Smith
<[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
Status Codes:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.11.md b/docs/sources/reference/api/docker_remote_api_v1.11.md
index 90a5e7f362..ae2daae407 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.11.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.11.md
@@ -290,7 +290,7 @@ List processes running inside the container `id`
 
- - **ps_args** – ps arguments to use (eg. aux)
+ - **ps_args** – ps arguments to use (e.g., aux)
Status Codes:
@@ -570,7 +570,7 @@ Attach to the container `id`
`STREAM_TYPE` can be:
- - 0: stdin (will be writen on stdout)
+ - 0: stdin (will be written on stdout)
- 1: stdout
- 2: stderr
@@ -791,8 +791,8 @@ Return low-level information on the image `name`
"OpenStdin":true,
"StdinOnce":false,
"Env":null,
- "Cmd": ["/bin/bash"]
- ,"Dns":null,
+ "Cmd": ["/bin/bash"],
+ "Dns":null,
"Image":"base",
"Volumes":null,
"VolumesFrom":"",
@@ -1099,9 +1099,15 @@ Display system-wide information
{
"Containers":11,
"Images":16,
+ "Driver":"btrfs",
+ "ExecutionDriver":"native-0.1",
+ "KernelVersion":"3.12.0-1-amd64"
"Debug":false,
"NFd": 11,
"NGoroutines":21,
+ "NEventsListener":0,
+ "InitPath":"/usr/bin/docker",
+ "IndexServerAddress":["https://index.docker.io/v1/"],
"MemoryLimit":true,
"SwapLimit":false,
"IPv4Forwarding":true
@@ -1217,7 +1223,7 @@ Create a new image from a container's changes
- **repo** – repository
- **tag** – tag
- **m** – commit message
- - **author** – author (eg. "John Hannibal Smith
+ - **author** – author (e.g., "John Hannibal Smith
<[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
Status Codes:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.12.md b/docs/sources/reference/api/docker_remote_api_v1.12.md
index 5b6d79d2fb..19fb24fe48 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.12.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.12.md
@@ -290,7 +290,7 @@ List processes running inside the container `id`
 
- - **ps_args** – ps arguments to use (eg. aux)
+ - **ps_args** – ps arguments to use (e.g., aux)
Status Codes:
@@ -509,6 +509,46 @@ Kill the container `id`
- **404** – no such container
- **500** – server error
+### Pause a container
+
+`POST /containers/(id)/pause`
+
+Pause the container `id`
+
+ **Example request**:
+
+ POST /containers/e90e34656806/pause HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 204 OK
+
+ Status Codes:
+
+ - **204** – no error
+ - **404** – no such container
+ - **500** – server error
+
+### Unpause a container
+
+`POST /containers/(id)/unpause`
+
+Unpause the container `id`
+
+ **Example request**:
+
+ POST /containers/e90e34656806/unpause HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 204 OK
+
+ Status Codes:
+
+ - **204** – no error
+ - **404** – no such container
+ - **500** – server error
+
### Attach to a container
`POST /containers/(id)/attach`
@@ -571,7 +611,7 @@ Attach to the container `id`
`STREAM_TYPE` can be:
- - 0: stdin (will be writen on stdout)
+ - 0: stdin (will be written on stdout)
- 1: stdout
- 2: stderr
@@ -720,7 +760,7 @@ Copy files or folders of container `id`
- **all** – 1/True/true or 0/False/false, default false
- **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list.
-
+
### Create an image
@@ -825,8 +865,8 @@ Return low-level information on the image `name`
"OpenStdin":true,
"StdinOnce":false,
"Env":null,
- "Cmd": ["/bin/bash"]
- ,"Dns":null,
+ "Cmd": ["/bin/bash"],
+ "Dns":null,
"Image":"base",
"Volumes":null,
"VolumesFrom":"",
@@ -1136,9 +1176,15 @@ Display system-wide information
{
"Containers":11,
"Images":16,
+ "Driver":"btrfs",
+ "ExecutionDriver":"native-0.1",
+ "KernelVersion":"3.12.0-1-amd64"
"Debug":false,
"NFd": 11,
"NGoroutines":21,
+ "NEventsListener":0,
+ "InitPath":"/usr/bin/docker",
+ "IndexServerAddress":["https://index.docker.io/v1/"],
"MemoryLimit":true,
"SwapLimit":false,
"IPv4Forwarding":true
@@ -1255,7 +1301,7 @@ Create a new image from a container's changes
- **repo** – repository
- **tag** – tag
- **m** – commit message
- - **author** – author (eg. "John Hannibal Smith
+ - **author** – author (e.g., "John Hannibal Smith
<[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
Status Codes:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.13.md b/docs/sources/reference/api/docker_remote_api_v1.13.md
new file mode 100644
index 0000000000..e0ad957941
--- /dev/null
+++ b/docs/sources/reference/api/docker_remote_api_v1.13.md
@@ -0,0 +1,1422 @@
+page_title: Remote API v1.12
+page_description: API Documentation for Docker
+page_keywords: API, Docker, rcli, REST, documentation
+
+# Docker Remote API v1.13
+
+## 1. Brief introduction
+
+ - The Remote API has replaced `rcli`.
+ - The daemon listens on `unix:///var/run/docker.sock` but you can
+ [*Bind Docker to another host/port or a Unix socket*](
+ /use/basics/#bind-docker).
+ - The API tends to be REST, but for some complex commands, like `attach`
+ or `pull`, the HTTP connection is hijacked to transport `STDOUT`,
+ `STDIN` and `STDERR`.
+
+# 2. Endpoints
+
+## 2.1 Containers
+
+### List containers
+
+`GET /containers/json`
+
+List containers
+
+ **Example request**:
+
+ GET /containers/json?all=1&before=8dfafdbc3a40&size=1 HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ [
+ {
+ "Id": "8dfafdbc3a40",
+ "Image": "base:latest",
+ "Command": "echo 1",
+ "Created": 1367854155,
+ "Status": "Exit 0",
+ "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}],
+ "SizeRw":12288,
+ "SizeRootFs":0
+ },
+ {
+ "Id": "9cd87474be90",
+ "Image": "base:latest",
+ "Command": "echo 222222",
+ "Created": 1367854155,
+ "Status": "Exit 0",
+ "Ports":[],
+ "SizeRw":12288,
+ "SizeRootFs":0
+ },
+ {
+ "Id": "3176a2479c92",
+ "Image": "base:latest",
+ "Command": "echo 3333333333333333",
+ "Created": 1367854154,
+ "Status": "Exit 0",
+ "Ports":[],
+ "SizeRw":12288,
+ "SizeRootFs":0
+ },
+ {
+ "Id": "4cb07b47f9fb",
+ "Image": "base:latest",
+ "Command": "echo 444444444444444444444444444444444",
+ "Created": 1367854152,
+ "Status": "Exit 0",
+ "Ports":[],
+ "SizeRw":12288,
+ "SizeRootFs":0
+ }
+ ]
+
+ Query Parameters:
+
+  
+
+ - **all** – 1/True/true or 0/False/false, Show all containers.
+ Only running containers are shown by default
+ - **limit** – Show `limit` last created
+ containers, include non-running ones.
+ - **since** – Show only containers created since Id, include
+ non-running ones.
+ - **before** – Show only containers created before Id, include
+ non-running ones.
+ - **size** – 1/True/true or 0/False/false, Show the containers
+ sizes
+
+ Status Codes:
+
+ - **200** – no error
+ - **400** – bad parameter
+ - **500** – server error
+
+### Create a container
+
+`POST /containers/create`
+
+Create a container
+
+ **Example request**:
+
+ POST /containers/create HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "Hostname":"",
+ "User":"",
+ "Memory":0,
+ "MemorySwap":0,
+ "AttachStdin":false,
+ "AttachStdout":true,
+ "AttachStderr":true,
+ "PortSpecs":null,
+ "Tty":false,
+ "OpenStdin":false,
+ "StdinOnce":false,
+ "Env":null,
+ "Cmd":[
+ "date"
+ ],
+ "Image":"base",
+ "Volumes":{
+ "/tmp": {}
+ },
+ "WorkingDir":"",
+ "DisableNetwork": false,
+ "ExposedPorts":{
+ "22/tcp": {}
+ }
+ }
+
+ **Example response**:
+
+ HTTP/1.1 201 OK
+ Content-Type: application/json
+
+ {
+ "Id":"e90e34656806"
+ "Warnings":[]
+ }
+
+ Json Parameters:
+
+  
+
+ - **config** – the container's configuration
+
+ Query Parameters:
+
+  
+
+ - **name** – Assign the specified name to the container. Must
+ match `/?[a-zA-Z0-9_-]+`.
+
+ Status Codes:
+
+ - **201** – no error
+ - **404** – no such container
+ - **406** – impossible to attach (container not running)
+ - **500** – server error
+
+### Inspect a container
+
+`GET /containers/(id)/json`
+
+Return low-level information on the container `id`
+
+
+ **Example request**:
+
+ GET /containers/4fa6e0f0c678/json HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {
+ "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2",
+ "Created": "2013-05-07T14:51:42.041847+02:00",
+ "Path": "date",
+ "Args": [],
+ "Config": {
+ "Hostname": "4fa6e0f0c678",
+ "User": "",
+ "Memory": 0,
+ "MemorySwap": 0,
+ "AttachStdin": false,
+ "AttachStdout": true,
+ "AttachStderr": true,
+ "PortSpecs": null,
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": null,
+ "Cmd": [
+ "date"
+ ],
+ "Dns": null,
+ "Image": "base",
+ "Volumes": {},
+ "VolumesFrom": "",
+ "WorkingDir":""
+
+ },
+ "State": {
+ "Running": false,
+ "Pid": 0,
+ "ExitCode": 0,
+ "StartedAt": "2013-05-07T14:51:42.087658+02:01360",
+ "Ghost": false
+ },
+ "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+ "NetworkSettings": {
+ "IpAddress": "",
+ "IpPrefixLen": 0,
+ "Gateway": "",
+ "Bridge": "",
+ "PortMapping": null
+ },
+ "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker",
+ "ResolvConfPath": "/etc/resolv.conf",
+ "Volumes": {},
+ "HostConfig": {
+ "Binds": null,
+ "ContainerIDFile": "",
+ "LxcConf": [],
+ "Privileged": false,
+ "PortBindings": {
+ "80/tcp": [
+ {
+ "HostIp": "0.0.0.0",
+ "HostPort": "49153"
+ }
+ ]
+ },
+ "Links": ["/name:alias"],
+ "PublishAllPorts": false
+ }
+ }
+
+ Status Codes:
+
+ - **200** – no error
+ - **404** – no such container
+ - **500** – server error
+
+### List processes running inside a container
+
+`GET /containers/(id)/top`
+
+List processes running inside the container `id`
+
+ **Example request**:
+
+ GET /containers/4fa6e0f0c678/top HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {
+ "Titles":[
+ "USER",
+ "PID",
+ "%CPU",
+ "%MEM",
+ "VSZ",
+ "RSS",
+ "TTY",
+ "STAT",
+ "START",
+ "TIME",
+ "COMMAND"
+ ],
+ "Processes":[
+ ["root","20147","0.0","0.1","18060","1864","pts/4","S","10:06","0:00","bash"],
+ ["root","20271","0.0","0.0","4312","352","pts/4","S+","10:07","0:00","sleep","10"]
+ ]
+ }
+
+ Query Parameters:
+
+  
+
+ - **ps_args** – ps arguments to use (e.g., aux)
+
+ Status Codes:
+
+ - **200** – no error
+ - **404** – no such container
+ - **500** – server error
+
+### Get container logs
+
+`GET /containers/(id)/logs`
+
+Get stdout and stderr logs from the container ``id``
+
+ **Example request**:
+
+ GET /containers/4fa6e0f0c678/logs?stderr=1&stdout=1&timestamps=1&follow=1&tail=10 HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/vnd.docker.raw-stream
+
+ {{ STREAM }}
+
+ Query Parameters:
+
+  
+
+ - **follow** – 1/True/true or 0/False/false, return stream. Default false
+ - **stdout** – 1/True/true or 0/False/false, show stdout log. Default false
+ - **stderr** – 1/True/true or 0/False/false, show stderr log. Default false
+ - **timestamps** – 1/True/true or 0/False/false, print timestamps for
+ every log line. Default false
+ - **tail** – Output specified number of lines at the end of logs: `all` or `<number>`. Default all
+
+ Status Codes:
+
+ - **200** – no error
+ - **404** – no such container
+ - **500** – server error
+
+### Inspect changes on a container's filesystem
+
+`GET /containers/(id)/changes`
+
+Inspect changes on container `id`'s filesystem
+
+ **Example request**:
+
+ GET /containers/4fa6e0f0c678/changes HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ [
+ {
+ "Path":"/dev",
+ "Kind":0
+ },
+ {
+ "Path":"/dev/kmsg",
+ "Kind":1
+ },
+ {
+ "Path":"/test",
+ "Kind":1
+ }
+ ]
+
+ Status Codes:
+
+ - **200** – no error
+ - **404** – no such container
+ - **500** – server error
+
+### Export a container
+
+`GET /containers/(id)/export`
+
+Export the contents of container `id`
+
+ **Example request**:
+
+ GET /containers/4fa6e0f0c678/export HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/octet-stream
+
+ {{ STREAM }}
+
+ Status Codes:
+
+ - **200** – no error
+ - **404** – no such container
+ - **500** – server error
+
+### Start a container
+
+`POST /containers/(id)/start`
+
+Start the container `id`
+
+ **Example request**:
+
+ POST /containers/(id)/start HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "Binds":["/tmp:/tmp"],
+ "Links":["redis3:redis"],
+ "LxcConf":{"lxc.utsname":"docker"},
+ "PortBindings":{ "22/tcp": [{ "HostPort": "11022" }] },
+ "PublishAllPorts":false,
+ "Privileged":false,
+ "Dns": ["8.8.8.8"],
+ "VolumesFrom": ["parent", "other:ro"]
+ }
+
+ **Example response**:
+
+ HTTP/1.1 204 No Content
+ Content-Type: text/plain
+
+ Json Parameters:
+
+  
+
+ - **hostConfig** – the container's host configuration (optional)
+
+ Status Codes:
+
+ - **204** – no error
+ - **304** – container already started
+ - **404** – no such container
+ - **500** – server error
+
+### Stop a container
+
+`POST /containers/(id)/stop`
+
+Stop the container `id`
+
+ **Example request**:
+
+ POST /containers/e90e34656806/stop?t=5 HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 204 OK
+
+ Query Parameters:
+
+  
+
+ - **t** – number of seconds to wait before killing the container
+
+ Status Codes:
+
+ - **204** – no error
+ - **304** – container already stopped
+ - **404** – no such container
+ - **500** – server error
+
+### Restart a container
+
+`POST /containers/(id)/restart`
+
+Restart the container `id`
+
+ **Example request**:
+
+ POST /containers/e90e34656806/restart?t=5 HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 204 OK
+
+ Query Parameters:
+
+  
+
+ - **t** – number of seconds to wait before killing the container
+
+ Status Codes:
+
+ - **204** – no error
+ - **404** – no such container
+ - **500** – server error
+
+### Kill a container
+
+`POST /containers/(id)/kill`
+
+Kill the container `id`
+
+ **Example request**:
+
+ POST /containers/e90e34656806/kill HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 204 OK
+
+ Query Parameters
+
+ - **signal** - Signal to send to the container: integer or string like "SIGINT".
+ When not set, SIGKILL is assumed and the call will waits for the container to exit.
+
+ Status Codes:
+
+ - **204** – no error
+ - **404** – no such container
+ - **500** – server error
+
+### Pause a container
+
+`POST /containers/(id)/pause`
+
+Pause the container `id`
+
+ **Example request**:
+
+ POST /containers/e90e34656806/pause HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 204 OK
+
+ Status Codes:
+
+ - **204** – no error
+ - **404** – no such container
+ - **500** – server error
+
+### Unpause a container
+
+`POST /containers/(id)/unpause`
+
+Unpause the container `id`
+
+ **Example request**:
+
+ POST /containers/e90e34656806/unpause HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 204 OK
+
+ Status Codes:
+
+ - **204** – no error
+ - **404** – no such container
+ - **500** – server error
+
+### Attach to a container
+
+`POST /containers/(id)/attach`
+
+Attach to the container `id`
+
+ **Example request**:
+
+ POST /containers/16253994b7c4/attach?logs=1&stream=0&stdout=1 HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/vnd.docker.raw-stream
+
+ {{ STREAM }}
+
+ Query Parameters:
+
+  
+
+ - **logs** – 1/True/true or 0/False/false, return logs. Default
+ false
+ - **stream** – 1/True/true or 0/False/false, return stream.
+ Default false
+ - **stdin** – 1/True/true or 0/False/false, if stream=true, attach
+ to stdin. Default false
+ - **stdout** – 1/True/true or 0/False/false, if logs=true, return
+ stdout log, if stream=true, attach to stdout. Default false
+ - **stderr** – 1/True/true or 0/False/false, if logs=true, return
+ stderr log, if stream=true, attach to stderr. Default false
+
+ Status Codes:
+
+ - **200** – no error
+ - **400** – bad parameter
+ - **404** – no such container
+ - **500** – server error
+
+ **Stream details**:
+
+ When using the TTY setting is enabled in
+ [`POST /containers/create`
+ ](../docker_remote_api_v1.9/#post--containers-create "POST /containers/create"),
+ the stream is the raw data from the process PTY and client's stdin.
+ When the TTY is disabled, then the stream is multiplexed to separate
+ stdout and stderr.
+
+ The format is a **Header** and a **Payload** (frame).
+
+ **HEADER**
+
+ The header will contain the information on which stream write the
+ stream (stdout or stderr). It also contain the size of the
+ associated frame encoded on the last 4 bytes (uint32).
+
+ It is encoded on the first 8 bytes like this:
+
+ header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}
+
+ `STREAM_TYPE` can be:
+
+ - 0: stdin (will be written on stdout)
+ - 1: stdout
+ - 2: stderr
+
+ `SIZE1, SIZE2, SIZE3, SIZE4` are the 4 bytes of
+ the uint32 size encoded as big endian.
+
+ **PAYLOAD**
+
+ The payload is the raw stream.
+
+ **IMPLEMENTATION**
+
+ The simplest way to implement the Attach protocol is the following:
+
+ 1. Read 8 bytes
+ 2. chose stdout or stderr depending on the first byte
+ 3. Extract the frame size from the last 4 byets
+ 4. Read the extracted size and output it on the correct output
+ 5. Goto 1)
+
+### Wait a container
+
+`POST /containers/(id)/wait`
+
+Block until container `id` stops, then returns the exit code
+
+ **Example request**:
+
+ POST /containers/16253994b7c4/wait HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {"StatusCode":0}
+
+ Status Codes:
+
+ - **200** – no error
+ - **404** – no such container
+ - **500** – server error
+
+### Remove a container
+
+`DELETE /containers/(id)`
+
+Remove the container `id` from the filesystem
+
+ **Example request**:
+
+ DELETE /containers/16253994b7c4?v=1 HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 204 OK
+
+ Query Parameters:
+
+  
+
+ - **v** – 1/True/true or 0/False/false, Remove the volumes
+ associated to the container. Default false
+ - **force** – 1/True/true or 0/False/false, Removes the container
+ even if it was running. Default false
+
+ Status Codes:
+
+ - **204** – no error
+ - **400** – bad parameter
+ - **404** – no such container
+ - **500** – server error
+
+### Copy files or folders from a container
+
+`POST /containers/(id)/copy`
+
+Copy files or folders of container `id`
+
+ **Example request**:
+
+ POST /containers/4fa6e0f0c678/copy HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "Resource":"test.txt"
+ }
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/octet-stream
+
+ {{ STREAM }}
+
+ Status Codes:
+
+ - **200** – no error
+ - **404** – no such container
+ - **500** – server error
+
+## 2.2 Images
+
+### List Images
+
+`GET /images/json`
+
+**Example request**:
+
+ GET /images/json?all=0 HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ [
+ {
+ "RepoTags": [
+ "ubuntu:12.04",
+ "ubuntu:precise",
+ "ubuntu:latest"
+ ],
+ "Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
+ "Created": 1365714795,
+ "Size": 131506275,
+ "VirtualSize": 131506275
+ },
+ {
+ "RepoTags": [
+ "ubuntu:12.10",
+ "ubuntu:quantal"
+ ],
+ "ParentId": "27cf784147099545",
+ "Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+ "Created": 1364102658,
+ "Size": 24653,
+ "VirtualSize": 180116135
+ }
+ ]
+
+
+ Query Parameters:
+
+  
+
+ - **all** – 1/True/true or 0/False/false, default false
+ - **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list.
+
+
+
+### Create an image
+
+`POST /images/create`
+
+Create an image, either by pull it from the registry or by importing it
+
+ **Example request**:
+
+ POST /images/create?fromImage=base HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {"status":"Pulling..."}
+ {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}}
+ {"error":"Invalid..."}
+ ...
+
+ When using this endpoint to pull an image from the registry, the
+ `X-Registry-Auth` header can be used to include
+ a base64-encoded AuthConfig object.
+
+ Query Parameters:
+
+  
+
+ - **fromImage** – name of the image to pull
+ - **fromSrc** – source to import, - means stdin
+ - **repo** – repository
+ - **tag** – tag
+ - **registry** – the registry to pull from
+
+ Request Headers:
+
+  
+
+ - **X-Registry-Auth** – base64-encoded AuthConfig object
+
+ Status Codes:
+
+ - **200** – no error
+ - **500** – server error
+
+### Insert a file in an image
+
+`POST /images/(name)/insert`
+
+Insert a file from `url` in the image `name` at `path`
+
+ **Example request**:
+
+ POST /images/test/insert?path=/usr&url=myurl HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {"status":"Inserting..."}
+ {"status":"Inserting", "progress":"1/? (n/a)", "progressDetail":{"current":1}}
+ {"error":"Invalid..."}
+ ...
+
+ Status Codes:
+
+ - **200** – no error
+ - **500** – server error
+
+### Inspect an image
+
+`GET /images/(name)/json`
+
+Return low-level information on the image `name`
+
+ **Example request**:
+
+ GET /images/base/json HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {
+ "Created":"2013-03-23T22:24:18.818426-07:00",
+ "Container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0",
+ "ContainerConfig":
+ {
+ "Hostname":"",
+ "User":"",
+ "Memory":0,
+ "MemorySwap":0,
+ "AttachStdin":false,
+ "AttachStdout":false,
+ "AttachStderr":false,
+ "PortSpecs":null,
+ "Tty":true,
+ "OpenStdin":true,
+ "StdinOnce":false,
+ "Env":null,
+ "Cmd": ["/bin/bash"],
+ "Dns":null,
+ "Image":"base",
+ "Volumes":null,
+ "VolumesFrom":"",
+ "WorkingDir":""
+ },
+ "Id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
+ "Parent":"27cf784147099545",
+ "Size": 6824592
+ }
+
+ Status Codes:
+
+ - **200** – no error
+ - **404** – no such image
+ - **500** – server error
+
+### Get the history of an image
+
+`GET /images/(name)/history`
+
+Return the history of the image `name`
+
+ **Example request**:
+
+ GET /images/base/history HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ [
+ {
+ "Id":"b750fe79269d",
+ "Created":1364102658,
+ "CreatedBy":"/bin/bash"
+ },
+ {
+ "Id":"27cf78414709",
+ "Created":1364068391,
+ "CreatedBy":""
+ }
+ ]
+
+ Status Codes:
+
+ - **200** – no error
+ - **404** – no such image
+ - **500** – server error
+
+### Push an image on the registry
+
+`POST /images/(name)/push`
+
+Push the image `name` on the registry
+
+ **Example request**:
+
+ POST /images/test/push HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {"status":"Pushing..."}
+ {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}}
+ {"error":"Invalid..."}
+ ...
+
+ Query Parameters:
+
+  
+
+ - **registry** – the registry you wan to push, optional
+
+ Request Headers:
+
+  
+
+ - **X-Registry-Auth** – include a base64-encoded AuthConfig
+ object.
+
+ Status Codes:
+
+ - **200** – no error
+ - **404** – no such image
+ - **500** – server error
+
+### Tag an image into a repository
+
+`POST /images/(name)/tag`
+
+Tag the image `name` into a repository
+
+ **Example request**:
+
+ POST /images/test/tag?repo=myrepo&force=0 HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 201 OK
+
+ Query Parameters:
+
+  
+
+ - **repo** – The repository to tag in
+ - **force** – 1/True/true or 0/False/false, default false
+
+ Status Codes:
+
+ - **201** – no error
+ - **400** – bad parameter
+ - **404** – no such image
+ - **409** – conflict
+ - **500** – server error
+
+### Remove an image
+
+`DELETE /images/(name)`
+
+Remove the image `name` from the filesystem
+
+ **Example request**:
+
+ DELETE /images/test HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-type: application/json
+
+ [
+ {"Untagged":"3e2f21a89f"},
+ {"Deleted":"3e2f21a89f"},
+ {"Deleted":"53b4f83ac9"}
+ ]
+
+ Query Parameters:
+
+  
+
+ - **force** – 1/True/true or 0/False/false, default false
+ - **noprune** – 1/True/true or 0/False/false, default false
+
+ Status Codes:
+
+ - **200** – no error
+ - **404** – no such image
+ - **409** – conflict
+ - **500** – server error
+
+### Search images
+
+`GET /images/search`
+
+Search for an image on [Docker Hub](https://hub.docker.com).
+
+> **Note**:
+> The response keys have changed from API v1.6 to reflect the JSON
+> sent by the registry server to the docker daemon's request.
+
+ **Example request**:
+
+ GET /images/search?term=sshd HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ [
+ {
+ "description": "",
+ "is_official": false,
+ "is_automated": false,
+ "name": "wma55/u1210sshd",
+ "star_count": 0
+ },
+ {
+ "description": "",
+ "is_official": false,
+ "is_automated": false,
+ "name": "jdswinbank/sshd",
+ "star_count": 0
+ },
+ {
+ "description": "",
+ "is_official": false,
+ "is_automated": false,
+ "name": "vgauthier/sshd",
+ "star_count": 0
+ }
+ ...
+ ]
+
+ Query Parameters:
+
+  
+
+ - **term** – term to search
+
+ Status Codes:
+
+ - **200** – no error
+ - **500** – server error
+
+## 2.3 Misc
+
+### Build an image from Dockerfile via stdin
+
+`POST /build`
+
+Build an image from Dockerfile via stdin
+
+ **Example request**:
+
+ POST /build HTTP/1.1
+
+ {{ STREAM }}
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {"stream":"Step 1..."}
+ {"stream":"..."}
+ {"error":"Error...", "errorDetail":{"code": 123, "message": "Error..."}}
+
+ The stream must be a tar archive compressed with one of the
+ following algorithms: identity (no compression), gzip, bzip2, xz.
+
+ The archive must include a file called `Dockerfile`
+ at its root. It may include any number of other files,
+ which will be accessible in the build context (See the [*ADD build
+ command*](/reference/builder/#dockerbuilder)).
+
+ Query Parameters:
+
+  
+
+ - **t** – repository name (and optionally a tag) to be applied to
+ the resulting image in case of success
+ - **q** – suppress verbose build output
+ - **nocache** – do not use the cache when building the image
+ - **rm** - remove intermediate containers after a successful build (default behavior)
+ - **forcerm - always remove intermediate containers (includes rm)
+
+ Request Headers:
+
+  
+
+ - **Content-type** – should be set to
+ `"application/tar"`.
+ - **X-Registry-Config** – base64-encoded ConfigFile object
+
+ Status Codes:
+
+ - **200** – no error
+ - **500** – server error
+
+### Check auth configuration
+
+`POST /auth`
+
+Get the default username and email
+
+ **Example request**:
+
+ POST /auth HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "username":"hannibal",
+ "password:"xxxx",
+ "email":"hannibal@a-team.com",
+ "serveraddress":"https://index.docker.io/v1/"
+ }
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+
+ Status Codes:
+
+ - **200** – no error
+ - **204** – no error
+ - **500** – server error
+
+### Display system-wide information
+
+`GET /info`
+
+Display system-wide information
+
+ **Example request**:
+
+ GET /info HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {
+ "Containers":11,
+ "Images":16,
+ "Driver":"btrfs",
+ "ExecutionDriver":"native-0.1",
+ "KernelVersion":"3.12.0-1-amd64"
+ "Debug":false,
+ "NFd": 11,
+ "NGoroutines":21,
+ "NEventsListener":0,
+ "InitPath":"/usr/bin/docker",
+ "Sockets":["unix:///var/run/docker.sock"],
+ "IndexServerAddress":["https://index.docker.io/v1/"],
+ "MemoryLimit":true,
+ "SwapLimit":false,
+ "IPv4Forwarding":true
+ }
+
+ Status Codes:
+
+ - **200** – no error
+ - **500** – server error
+
+### Show the docker version information
+
+`GET /version`
+
+Show the docker version information
+
+ **Example request**:
+
+ GET /version HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {
+ "ApiVersion":"1.12",
+ "Version":"0.2.2",
+ "GitCommit":"5a2a5cc+CHANGES",
+ "GoVersion":"go1.0.3"
+ }
+
+ Status Codes:
+
+ - **200** – no error
+ - **500** – server error
+
+### Ping the docker server
+
+`GET /_ping`
+
+Ping the docker server
+
+ **Example request**:
+
+ GET /_ping HTTP/1.1
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+
+ OK
+
+ Status Codes:
+
+ - **200** - no error
+ - **500** - server error
+
+### Create a new image from a container's changes
+
+`POST /commit`
+
+Create a new image from a container's changes
+
+ **Example request**:
+
+ POST /commit?container=44c004db4b17&m=message&repo=myrepo HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "Hostname":"",
+ "User":"",
+ "Memory":0,
+ "MemorySwap":0,
+ "AttachStdin":false,
+ "AttachStdout":true,
+ "AttachStderr":true,
+ "PortSpecs":null,
+ "Tty":false,
+ "OpenStdin":false,
+ "StdinOnce":false,
+ "Env":null,
+ "Cmd":[
+ "date"
+ ],
+ "Volumes":{
+ "/tmp": {}
+ },
+ "WorkingDir":"",
+ "DisableNetwork": false,
+ "ExposedPorts":{
+ "22/tcp": {}
+ }
+ }
+
+ **Example response**:
+
+ HTTP/1.1 201 OK
+ Content-Type: application/vnd.docker.raw-stream
+
+ {"Id":"596069db4bf5"}
+
+ Json Parameters:
+
+
+
+ - **config** - the container's configuration
+
+ Query Parameters:
+
+  
+
+ - **container** – source container
+ - **repo** – repository
+ - **tag** – tag
+ - **m** – commit message
+ - **author** – author (e.g., "John Hannibal Smith
+ <[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
+
+ Status Codes:
+
+ - **201** – no error
+ - **404** – no such container
+ - **500** – server error
+
+### Monitor Docker's events
+
+`GET /events`
+
+Get events from docker, either in real time via streaming, or
+via polling (using since)
+
+ **Example request**:
+
+ GET /events?since=1374067924
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/json
+
+ {"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
+ {"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
+ {"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966}
+ {"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970}
+
+ Query Parameters:
+
+  
+
+ - **since** – timestamp used for polling
+ - **until** – timestamp used for polling
+
+ Status Codes:
+
+ - **200** – no error
+ - **500** – server error
+
+### Get a tarball containing all images and tags in a repository
+
+`GET /images/(name)/get`
+
+Get a tarball containing all images and metadata for the repository
+specified by `name`.
+
+ **Example request**
+
+ GET /images/ubuntu/get
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+ Content-Type: application/x-tar
+
+ Binary data stream
+
+ Status Codes:
+
+ - **200** – no error
+ - **500** – server error
+
+### Load a tarball with a set of images and tags into docker
+
+`POST /images/load`
+
+Load a set of images and tags into the docker repository.
+
+ **Example request**
+
+ POST /images/load
+
+ Tarball in body
+
+ **Example response**:
+
+ HTTP/1.1 200 OK
+
+ Status Codes:
+
+ - **200** – no error
+ - **500** – server error
+
+# 3. Going further
+
+## 3.1 Inside `docker run`
+
+Here are the steps of `docker run`:
+
+- Create the container
+
+- If the status code is 404, it means the image doesn't exists:
+ - Try to pull it
+ - Then retry to create the container
+
+- Start the container
+
+- If you are not in detached mode:
+ - Attach to the container, using logs=1 (to have stdout and
+ stderr from the container's start) and stream=1
+
+- If in detached mode or only stdin is attached:
+ - Display the container's id
+
+## 3.2 Hijacking
+
+In this version of the API, /attach, uses hijacking to transport stdin,
+stdout and stderr on the same socket. This might change in the future.
+
+## 3.3 CORS Requests
+
+To enable cross origin requests to the remote api add the flag
+"–api-enable-cors" when running docker in daemon mode.
+
+ $ docker -d -H="192.168.1.9:2375" --api-enable-cors
diff --git a/docs/sources/reference/api/docker_remote_api_v1.2.md b/docs/sources/reference/api/docker_remote_api_v1.2.md
index cecab5bb4e..37a8e1c012 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.2.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.2.md
@@ -628,8 +628,8 @@ Return low-level information on the image `name`
"OpenStdin":true,
"StdinOnce":false,
"Env":null,
- "Cmd": ["/bin/bash"]
- ,"Dns":null,
+ "Cmd": ["/bin/bash"],
+ "Dns":null,
"Image":"centos",
"Volumes":null,
"VolumesFrom":""
@@ -959,7 +959,7 @@ Create a new image from a container's changes
- **repo** – repository
- **tag** – tag
- **m** – commit message
- - **author** – author (eg. "John Hannibal Smith
+ - **author** – author (e.g., "John Hannibal Smith
<[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
Status Codes:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.3.md b/docs/sources/reference/api/docker_remote_api_v1.3.md
index 1d60b4300d..b510f660fd 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.3.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.3.md
@@ -678,8 +678,8 @@ Return low-level information on the image `name`
"OpenStdin":true,
"StdinOnce":false,
"Env":null,
- "Cmd": ["/bin/bash"]
- ,"Dns":null,
+ "Cmd": ["/bin/bash"],
+ "Dns":null,
"Image":"centos",
"Volumes":null,
"VolumesFrom":""
@@ -1009,7 +1009,7 @@ Create a new image from a container's changes
- **repo** – repository
- **tag** – tag
- **m** – commit message
- - **author** – author (eg. "John Hannibal Smith
+ - **author** – author (e.g., "John Hannibal Smith
<[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
Status Codes:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.4.md b/docs/sources/reference/api/docker_remote_api_v1.4.md
index f7d6e82c19..0e49402621 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.4.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.4.md
@@ -264,7 +264,7 @@ List processes running inside the container `id`
 
- - **ps_args** – ps arguments to use (eg. aux)
+ - **ps_args** – ps arguments to use (e.g., aux)
Status Codes:
@@ -724,8 +724,8 @@ Return low-level information on the image `name`
"OpenStdin":true,
"StdinOnce":false,
"Env":null,
- "Cmd": ["/bin/bash"]
- ,"Dns":null,
+ "Cmd": ["/bin/bash"],
+ "Dns":null,
"Image":"centos",
"Volumes":null,
"VolumesFrom":"",
@@ -1055,7 +1055,7 @@ Create a new image from a container's changes
- **repo** – repository
- **tag** – tag
- **m** – commit message
- - **author** – author (eg. "John Hannibal Smith
+ - **author** – author (e.g., "John Hannibal Smith
<[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
Status Codes:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.5.md b/docs/sources/reference/api/docker_remote_api_v1.5.md
index 53d970accd..33c1aeca1e 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.5.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.5.md
@@ -261,7 +261,7 @@ List processes running inside the container `id`
 
- - **ps_args** – ps arguments to use (eg. aux)
+ - **ps_args** – ps arguments to use (e.g., aux)
Status Codes:
@@ -725,8 +725,8 @@ Return low-level information on the image `name`
"OpenStdin":true,
"StdinOnce":false,
"Env":null,
- "Cmd": ["/bin/bash"]
- ,"Dns":null,
+ "Cmd": ["/bin/bash"],
+ "Dns":null,
"Image":"centos",
"Volumes":null,
"VolumesFrom":"",
@@ -1067,7 +1067,7 @@ Create a new image from a container's changes
- **repo** – repository
- **tag** – tag
- **m** – commit message
- - **author** – author (eg. "John Hannibal Smith
+ - **author** – author (e.g., "John Hannibal Smith
<[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
Status Codes:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.md b/docs/sources/reference/api/docker_remote_api_v1.6.md
index 9b7cded33f..4500c1554c 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.6.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.6.md
@@ -311,7 +311,7 @@ List processes running inside the container `id`
 
- - **ps_args** – ps arguments to use (eg. aux)
+ - **ps_args** – ps arguments to use (e.g., aux)
Status Codes:
@@ -558,7 +558,7 @@ Attach to the container `id`
`STREAM_TYPE` can be:
- - 0: stdin (will be writen on stdout)
+ - 0: stdin (will be written on stdout)
- 1: stdout
- 2: stderr
@@ -832,8 +832,8 @@ Return low-level information on the image `name`
"OpenStdin":true,
"StdinOnce":false,
"Env":null,
- "Cmd": ["/bin/bash"]
- ,"Dns":null,
+ "Cmd": ["/bin/bash"],
+ "Dns":null,
"Image":"base",
"Volumes":null,
"VolumesFrom":"",
@@ -1163,7 +1163,7 @@ Create a new image from a container's changes
- **repo** – repository
- **tag** – tag
- **m** – commit message
- - **author** – author (eg. "John Hannibal Smith
+ - **author** – author (e.g., "John Hannibal Smith
<[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
Status Codes:
diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.md b/docs/sources/reference/api/docker_remote_api_v1.7.md
index 3432e9bb21..402efa4262 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.7.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.7.md
@@ -267,7 +267,7 @@ List processes running inside the container `id`
 
- - **ps_args** – ps arguments to use (eg. aux)
+ - **ps_args** – ps arguments to use (e.g., aux)
Status Codes:
@@ -507,7 +507,7 @@ Attach to the container `id`
`STREAM_TYPE` can be:
- - 0: stdin (will be writen on stdout)
+ - 0: stdin (will be written on stdout)
- 1: stdout
- 2: stderr
@@ -751,8 +751,8 @@ Return low-level information on the image `name`
"OpenStdin":true,
"StdinOnce":false,
"Env":null,
- "Cmd": ["/bin/bash"]
- ,"Dns":null,
+ "Cmd": ["/bin/bash"],
+ "Dns":null,
"Image":"base",
"Volumes":null,
"VolumesFrom":"",
@@ -1112,7 +1112,7 @@ Create a new image from a container's changes
- **repo** – repository
- **tag** – tag
- **m** – commit message
- - **author** – author (eg. "John Hannibal Smith
+ - **author** – author (e.g., "John Hannibal Smith
<[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
- **run** – config automatically applied when the image is run.
(ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]})
diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.md b/docs/sources/reference/api/docker_remote_api_v1.8.md
index 184e107cdc..78fccaf281 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.8.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.8.md
@@ -303,7 +303,7 @@ List processes running inside the container `id`
 
- - **ps_args** – ps arguments to use (eg. aux)
+ - **ps_args** – ps arguments to use (e.g., aux)
Status Codes:
@@ -549,7 +549,7 @@ Attach to the container `id`
`STREAM_TYPE` can be:
- - 0: stdin (will be writen on stdout)
+ - 0: stdin (will be written on stdout)
- 1: stdout
- 2: stderr
@@ -793,8 +793,8 @@ Return low-level information on the image `name`
"OpenStdin":true,
"StdinOnce":false,
"Env":null,
- "Cmd": ["/bin/bash"]
- ,"Dns":null,
+ "Cmd": ["/bin/bash"],
+ "Dns":null,
"Image":"base",
"Volumes":null,
"VolumesFrom":"",
@@ -1157,7 +1157,7 @@ Create a new image from a container's changes
- **repo** – repository
- **tag** – tag
- **m** – commit message
- - **author** – author (eg. "John Hannibal Smith
+ - **author** – author (e.g., "John Hannibal Smith
<[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
- **run** – config automatically applied when the image is run.
(ex: {"Cmd": ["cat", "/world"], "PortSpecs":["22"]})
diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md
index fc9f9b8d5b..741a9ac955 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.9.md
+++ b/docs/sources/reference/api/docker_remote_api_v1.9.md
@@ -303,7 +303,7 @@ List processes running inside the container `id`
 
- - **ps_args** – ps arguments to use (eg. aux)
+ - **ps_args** – ps arguments to use (e.g., aux)
Status Codes:
@@ -553,7 +553,7 @@ Attach to the container `id`
`STREAM_TYPE` can be:
- - 0: stdin (will be writen on stdout)
+ - 0: stdin (will be written on stdout)
- 1: stdout
- 2: stderr
@@ -797,8 +797,8 @@ Return low-level information on the image `name`
"OpenStdin":true,
"StdinOnce":false,
"Env":null,
- "Cmd": ["/bin/bash"]
- ,"Dns":null,
+ "Cmd": ["/bin/bash"],
+ "Dns":null,
"Image":"base",
"Volumes":null,
"VolumesFrom":"",
@@ -1194,7 +1194,7 @@ Create a new image from a container's changes
- **repo** – repository
- **tag** – tag
- **m** – commit message
- - **author** – author (eg. "John Hannibal Smith
+ - **author** – author (e.g., "John Hannibal Smith
<[hannibal@a-team.com](mailto:hannibal%40a-team.com)>")
Status Codes:
diff --git a/docs/sources/reference/api/hub_registry_spec.md b/docs/sources/reference/api/hub_registry_spec.md
index bb0e4ec7e3..1a2cf9423d 100644
--- a/docs/sources/reference/api/hub_registry_spec.md
+++ b/docs/sources/reference/api/hub_registry_spec.md
@@ -77,11 +77,11 @@ grasp the context, here are some examples of registries:
> - local mount point;
> - remote docker addressed through SSH.
-The latter would only require two new commands in docker, e.g.
+The latter would only require two new commands in docker, e.g.,
`registryget` and `registryput`,
wrapping access to the local filesystem (and optionally doing
consistency checks). Authentication and authorization are then delegated
-to SSH (e.g. with public keys).
+to SSH (e.g., with public keys).
### Docker
diff --git a/docs/sources/reference/api/registry_api.md b/docs/sources/reference/api/registry_api.md
index f8bdd6657d..2840693fa8 100644
--- a/docs/sources/reference/api/registry_api.md
+++ b/docs/sources/reference/api/registry_api.md
@@ -62,10 +62,10 @@ grasp the context, here are some examples of registries:
> - local mount point;
> - remote docker addressed through SSH.
-The latter would only require two new commands in docker, e.g.
+The latter would only require two new commands in docker, e.g.,
`registryget` and `registryput`, wrapping access to the local filesystem
(and optionally doing consistency checks). Authentication and authorization
-are then delegated to SSH (e.g. with public keys).
+are then delegated to SSH (e.g., with public keys).
# Endpoints
diff --git a/docs/sources/reference/api/remote_api_client_libraries.md b/docs/sources/reference/api/remote_api_client_libraries.md
index e299e6ed81..d1d26a1ddf 100644
--- a/docs/sources/reference/api/remote_api_client_libraries.md
+++ b/docs/sources/reference/api/remote_api_client_libraries.md
@@ -124,11 +124,17 @@ will add the libraries here.
</tr>
<tr class="row-even">
<td>Scala</td>
+ <td>tugboat</td>
+ <td><a class="reference external" href="https://github.com/softprops/tugboat">https://github.com/softprops/tugboat</a></td>
+ <td>Active</td>
+ </tr>
+ <tr class="row-odd">
+ <td>Scala</td>
<td>reactive-docker</td>
<td><a class="reference external" href="https://github.com/almoehi/reactive-docker">https://github.com/almoehi/reactive-docker</a></td>
<td>Active</td>
</tr>
- <tr class="row-odd">
+ <tr class="row-even">
<td>Java</td>
<td>docker-client</td>
<td><a class="reference external" href="https://github.com/spotify/docker-client">https://github.com/spotify/docker-client</a></td>
diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md
index 8717eb7bfc..91190933c9 100644
--- a/docs/sources/reference/builder.md
+++ b/docs/sources/reference/builder.md
@@ -15,7 +15,7 @@ To [*build*](../commandline/cli/#cli-build) an image from a source repository,
create a description file called Dockerfile at the root of your repository.
This file will describe the steps to assemble the image.
-Then call `docker build` with the path of you source repository as argument
+Then call `docker build` with the path of your source repository as the argument
(for example, `.`):
$ sudo docker build .
@@ -83,6 +83,38 @@ be treated as an argument. This allows statements like:
Here is the set of instructions you can use in a Dockerfile
for building images.
+## .dockerignore
+
+If a file named `.dockerignore` exists in the source repository, then it
+is interpreted as a newline-separated list of exclusion patterns.
+Exclusion patterns match files or directories relative to the source repository
+that will be excluded from the context. Globbing is done using Go's
+[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules.
+
+The following example shows the use of the `.dockerignore` file to exclude the
+`.git` directory from the context. Its effect can be seen in the changed size of
+the uploaded context.
+
+ $ docker build .
+ Uploading context 18.829 MB
+ Uploading context
+ Step 0 : FROM busybox
+ ---> 769b9341d937
+ Step 1 : CMD echo Hello World
+ ---> Using cache
+ ---> 99cc1ad10469
+ Successfully built 99cc1ad10469
+ $ echo ".git" > .dockerignore
+ $ docker build .
+ Uploading context 6.76 MB
+ Uploading context
+ Step 0 : FROM busybox
+ ---> 769b9341d937
+ Step 1 : CMD echo Hello World
+ ---> Using cache
+ ---> 99cc1ad10469
+ Successfully built 99cc1ad10469
+
## FROM
FROM <image>
@@ -238,14 +270,19 @@ All new files and directories are created with a uid and gid of 0.
In the case where `<src>` is a remote file URL, the destination will have permissions 600.
> **Note**:
-> If you build using STDIN (`docker build - < somefile`), there is no
-> build context, so the Dockerfile can only contain an URL based ADD
-> statement.
-
+> If you build by passing a Dockerfile through STDIN (`docker build - < somefile`),
+> there is no build context, so the Dockerfile can only contain a URL
+> based ADD statement.
+
+> You can also pass a compressed archive through STDIN:
+> (`docker build - < archive.tar.gz`), the `Dockerfile` at the root of
+> the archive and the rest of the archive will get used at the context
+> of the build.
+>
> **Note**:
> If your URL files are protected using authentication, you will need to
-> use an `RUN wget` , `RUN curl`
-> or other tool from within the container as ADD does not support
+> use `RUN wget` , `RUN curl`
+> or use another tool from within the container as ADD does not support
> authentication.
The copy obeys the following rules:
@@ -361,7 +398,7 @@ execute in `/bin/sh -c`:
FROM ubuntu
ENTRYPOINT wc -l -
-For example, that Dockerfile's image will *always* take stdin as input
+For example, that Dockerfile's image will *always* take STDIN as input
("-") and print the number of lines ("-l"). If you wanted to make this
optional but default, you could use a CMD:
diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md
index e496ce425f..301593f2f1 100644
--- a/docs/sources/reference/commandline/cli.md
+++ b/docs/sources/reference/commandline/cli.md
@@ -54,14 +54,14 @@ expect an integer, and they can only be specified once.
-b, --bridge="" Attach containers to a pre-existing network bridge
use 'none' to disable container networking
--bip="" Use this CIDR notation address for the network bridge's IP, not compatible with -b
- -d, --daemon=false Enable daemon mode
-D, --debug=false Enable debug mode
- --dns=[] Force docker to use specific DNS servers
+ -d, --daemon=false Enable daemon mode
+ --dns=[] Force Docker to use specific DNS servers
--dns-search=[] Force Docker to use specific DNS search domains
- -e, --exec-driver="native" Force the docker runtime to use a specific exec driver
+ -e, --exec-driver="native" Force the Docker runtime to use a specific exec driver
-G, --group="docker" Group to assign the unix socket specified by -H when running in daemon mode
use '' (the empty string) to disable setting of a group
- -g, --graph="/var/lib/docker" Path to use as the root of the docker runtime
+ -g, --graph="/var/lib/docker" Path to use as the root of the Docker runtime
-H, --host=[] The socket(s) to bind to in daemon mode
specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.
--icc=true Enable inter-container communication
@@ -72,9 +72,9 @@ expect an integer, and they can only be specified once.
if no value is provided: default to the default route MTU or 1500 if no default route is available
-p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file
-r, --restart=true Restart previously running containers
- -s, --storage-driver="" Force the docker runtime to use a specific storage driver
- --storage-opt=[] Set storage driver options
+ -s, --storage-driver="" Force the Docker runtime to use a specific storage driver
--selinux-enabled=false Enable selinux support
+ --storage-opt=[] Set storage driver options
--tls=false Use TLS; implied by tls-verify flags
--tlscacert="/home/sven/.docker/ca.pem" Trust only remotes providing a certificate signed by the CA given here
--tlscert="/home/sven/.docker/cert.pem" Path to TLS certificate file
@@ -134,8 +134,8 @@ like this:
Attach to a running container
- --no-stdin=false Do not attach stdin
- --sig-proxy=true Proxify all received signal to the process (even in non-tty mode)
+ --no-stdin=false Do not attach STDIN
+ --sig-proxy=true Proxify all received signals to the process (even in non-TTY mode). SIGCHLD is not proxied.
The `attach` command will allow you to view or
interact with any running container, detached (`-d`)
@@ -199,25 +199,31 @@ To kill the container, use `docker kill`.
--rm=true Remove intermediate containers after a successful build
-t, --tag="" Repository name (and optionally a tag) to be applied to the resulting image in case of success
-Use this command to build Docker images from a Dockerfile
-and a "context".
+Use this command to build Docker images from a Dockerfile and a
+"context".
-The files at `PATH` or `URL` are called the "context" of the build. The build
-process may refer to any of the files in the context, for example when using an
-[*ADD*](/reference/builder/#dockerfile-add) instruction. When a single Dockerfile is
-given as `URL` or is piped through STDIN (`docker build - < Dockerfile`), then
-no context is set.
+The files at `PATH` or `URL` are called the "context" of the build. The
+build process may refer to any of the files in the context, for example
+when using an [*ADD*](/reference/builder/#dockerfile-add) instruction.
+When a single Dockerfile is given as `URL` or is piped through `STDIN`
+(`docker build - < Dockerfile`), then no context is set.
-When a Git repository is set as `URL`, then the
-repository is used as the context. The Git repository is cloned with its
-submodules (git clone –recursive). A fresh git clone occurs in a
-temporary directory on your local host, and then this is sent to the
-Docker daemon as the context. This way, your local user credentials and
-vpn's etc can be used to access private repositories.
+When a Git repository is set as `URL`, then the repository is used as
+the context. The Git repository is cloned with its submodules (`git
+clone -recursive`). A fresh `git clone` occurs in a temporary directory
+on your local host, and then this is sent to the Docker daemon as the
+context. This way, your local user credentials and VPN's etc can be
+used to access private repositories.
+
+If a file named `.dockerignore` exists in the root of `PATH` then it
+is interpreted as a newline-separated list of exclusion patterns.
+Exclusion patterns match files or directories relative to `PATH` that
+will be excluded from the context. Globbing is done using Go's
+[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules.
See also:
-[*Dockerfile Reference*](/reference/builder/#dockerbuilder).
+[*Dockerfile Reference*](/reference/builder).
### Examples:
@@ -240,7 +246,7 @@ See also:
drwxr-xr-x 2 root root 4.0K Mar 12 2013 tmp
drwxr-xr-x 2 root root 4.0K Nov 15 23:34 usr
---> b35f4035db3f
- Step 3 : CMD echo Hello World
+ Step 3 : CMD echo Hello world
---> Running in 02071fceb21b
---> f52f38b7823e
Successfully built f52f38b7823e
@@ -266,6 +272,30 @@ If you wish to keep the intermediate containers after the build is
complete, you must use `--rm=false`. This does not
affect the build cache.
+ $ docker build .
+ Uploading context 18.829 MB
+ Uploading context
+ Step 0 : FROM busybox
+ ---> 769b9341d937
+ Step 1 : CMD echo Hello world
+ ---> Using cache
+ ---> 99cc1ad10469
+ Successfully built 99cc1ad10469
+ $ echo ".git" > .dockerignore
+ $ docker build .
+ Uploading context 6.76 MB
+ Uploading context
+ Step 0 : FROM busybox
+ ---> 769b9341d937
+ Step 1 : CMD echo Hello world
+ ---> Using cache
+ ---> 99cc1ad10469
+ Successfully built 99cc1ad10469
+
+This example shows the use of the `.dockerignore` file to exclude the `.git`
+directory from the context. Its effect can be seen in the changed size of the
+uploaded context.
+
$ sudo docker build -t vieux/apache:2.0 .
This will build like the previous example, but it will then tag the
@@ -274,11 +304,15 @@ and the tag will be `2.0`
$ sudo docker build - < Dockerfile
-This will read a Dockerfile from *stdin* without
-context. Due to the lack of a context, no contents of any local
-directory will be sent to the `docker` daemon. Since
-there is no context, a Dockerfile `ADD`
-only works if it refers to a remote URL.
+This will read a Dockerfile from `STDIN` without context. Due to the
+lack of a context, no contents of any local directory will be sent to
+the Docker daemon. Since there is no context, a Dockerfile `ADD` only
+works if it refers to a remote URL.
+
+ $ sudo docker build - < context.tar.gz
+
+This will build an image for a compressed context read from `STDIN`.
+Supported formats are: bzip2, gzip and xz.
$ sudo docker build github.com/creack/docker-firefox
@@ -301,8 +335,9 @@ schema.
Create a new image from a container's changes
- -a, --author="" Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
+ -a, --author="" Author (e.g., "John Hannibal Smith <hannibal@a-team.com>")
-m, --message="" Commit message
+ -p, --pause=true Pause container during commit
It can be useful to commit a container's file changes or settings into a
new image. This allows you debug a container by running an interactive
@@ -310,6 +345,11 @@ shell, or to export a working dataset to another server. Generally, it
is better to use Dockerfiles to manage your images in a documented and
maintainable way.
+By default, the container being committed and its processes will be paused
+while the image is committed. This reduces the likelihood of
+encountering data corruption during the process of creating the commit.
+If this behavior is undesired, set the 'p' option to false.
+
### Commit an existing container
$ sudo docker ps
@@ -324,7 +364,7 @@ maintainable way.
## cp
-Copy files/folders from the containers filesystem to the host
+Copy files/folders from a container's filesystem to the host
path. Paths are relative to the root of the filesystem.
Usage: docker cp CONTAINER:PATH HOSTPATH
@@ -441,7 +481,7 @@ To see how the `docker:latest` image was built:
List images
-a, --all=false Show all images (by default filter out the intermediate image layers)
- -f, --filter=[]: Provide filter values (i.e. 'dangling=true')
+ -f, --filter=[] Provide filter values (i.e. 'dangling=true')
--no-trunc=false Don't truncate output
-q, --quiet=false Only show numeric IDs
@@ -483,8 +523,8 @@ by default.
### Filtering
-The filtering flag (-f or --filter) format is of "key=value". If there are more
-than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`)
+The filtering flag (`-f` or `--filter`) format is of "key=value". If there are more
+than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
Current filters:
* dangling (boolean - true or false)
@@ -527,11 +567,10 @@ NOTE: Docker will warn you if any containers exist that are using these untagged
Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.
-URLs must start with `http` and point to a single
-file archive (.tar, .tar.gz, .tgz, .bzip, .tar.xz, or .txz) containing a
-root filesystem. If you would like to import from a local directory or
-archive, you can use the `-` parameter to take the
-data from *stdin*.
+URLs must start with `http` and point to a single file archive (.tar,
+.tar.gz, .tgz, .bzip, .tar.xz, or .txz) containing a root filesystem. If
+you would like to import from a local directory or archive, you can use
+the `-` parameter to take the data from `STDIN`.
### Examples
@@ -543,7 +582,7 @@ This will create a new untagged image.
**Import from a local file:**
-Import to docker via pipe and *stdin*.
+Import to docker via pipe and `STDIN`.
$ cat exampleimage.tgz | sudo docker import - exampleimagelocal:new
@@ -558,32 +597,39 @@ tar, then the ownerships might not get preserved.
## info
+
Usage: docker info
Display system-wide information
For example:
- $ sudo docker info
- Containers: 292
- Images: 194
+ $ sudo docker -D info
+ Containers: 16
+ Images: 2138
+ Storage Driver: btrfs
+ Execution Driver: native-0.1
+ Kernel Version: 3.12.0-1-amd64
Debug mode (server): false
- Debug mode (client): false
- Fds: 22
- Goroutines: 67
- LXC Version: 0.9.0
- EventsListeners: 115
- Kernel Version: 3.8.0-33-generic
- WARNING: No swap limit support
-
-When sending issue reports, please use `docker version` and `docker info` to
+ Debug mode (client): true
+ Fds: 16
+ Goroutines: 104
+ EventsListeners: 0
+ Init Path: /usr/bin/docker
+ Sockets: [unix:///var/run/docker.sock tcp://0.0.0.0:4243]
+ Username: svendowideit
+ Registry: [https://index.docker.io/v1/]
+
+The global `-D` option tells all `docker` comands to output debug information.
+
+When sending issue reports, please use `docker version` and `docker -D info` to
ensure we know how your setup is configured.
## inspect
Usage: docker inspect CONTAINER|IMAGE [CONTAINER|IMAGE...]
- Return low-level information on a container/image
+ Return low-level information on a container or image
-f, --format="" Format the output using the given go template.
@@ -637,11 +683,11 @@ contains complex json object, so to grab it as JSON, you use
Usage: docker kill [OPTIONS] CONTAINER [CONTAINER...]
- Kill a running container (send SIGKILL, or specified signal)
+ Kill a running container using SIGKILL or a specified signal
-s, --signal="KILL" Signal to send to the container
-The main process inside the container will be sent SIGKILL, or any
+The main process inside the container will be sent `SIGKILL`, or any
signal specified with option `--signal`.
## load
@@ -674,7 +720,7 @@ Restores both images and tags.
Usage: docker login [OPTIONS] [SERVER]
- Register or Login to a docker registry server, if no server is specified "https://index.docker.io/v1/" is the default.
+ Register or log in to a Docker registry server, if no server is specified "https://index.docker.io/v1/" is the default.
-e, --email="" Email
-p, --password="" Password
@@ -694,19 +740,21 @@ specify this by adding the server name.
-f, --follow=false Follow log output
-t, --timestamps=false Show timestamps
+ --tail="all" Output the specified number of lines at the end of logs (defaults to all logs)
-The `docker logs` command batch-retrieves all logs
-present at the time of execution.
+The `docker logs` command batch-retrieves logs present at the time of execution.
-The ``docker logs --follow`` command will first return all logs from the
-beginning and then continue streaming new output from the container's stdout
-and stderr.
+The `docker logs --follow` command will continue streaming the new output from
+the container's `STDOUT` and `STDERR`.
+
+Passing a negative number or a non-integer to `--tail` is invalid and the
+value is set to `all` in that case. This behavior may change in the future.
## port
Usage: docker port CONTAINER PRIVATE_PORT
- Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
+ Lookup the public-facing port that is NAT-ed to PRIVATE_PORT
## ps
@@ -735,7 +783,7 @@ Running `docker ps` showing 2 linked containers.
## pull
- Usage: docker pull [REGISTRY_PATH/]NAME[:TAG]
+ Usage: docker pull NAME[:TAG]
Pull an image or a repository from the registry
@@ -778,7 +826,7 @@ registry or to a self-hosted one.
Restart a running container
- -t, --time=10 Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10
+ -t, --time=10 Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.
## rm
@@ -788,7 +836,7 @@ registry or to a self-hosted one.
-f, --force=false Force removal of running container
-l, --link=false Remove the specified link and not the underlying container
- -v, --volumes=false Remove the volumes associated to the container
+ -v, --volumes=false Remove the volumes associated with the container
### Known Issues (rm)
@@ -824,7 +872,7 @@ delete them. Any running containers will not be deleted.
Remove one or more images
- -f, --force=false Force
+ -f, --force=false Force removal of the image
--no-prune=false Do not delete untagged parents
### Removing tagged images
@@ -864,6 +912,7 @@ removed before the image is removed.
-a, --attach=[] Attach to stdin, stdout or stderr.
-c, --cpu-shares=0 CPU shares (relative weight)
--cidfile="" Write the container ID to the file
+ --cpuset="" CPUs in which to allow execution (0-3, 0,1)
-d, --detach=false Detached mode: Run container in the background, print new container id
--dns=[] Set custom dns servers
--dns-search=[] Set custom dns search domains
@@ -881,17 +930,17 @@ removed before the image is removed.
'bridge': creates a new network stack for the container on the docker bridge
'none': no networking for this container
'container:<name|id>': reuses another container network stack
- 'host': use the host network stack inside the container
+ 'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.
+ -P, --publish-all=false Publish all exposed ports to the host interfaces
-p, --publish=[] Publish a container's port to the host
format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort
(use 'docker port' to see the actual mapping)
- -P, --publish-all=false Publish all exposed ports to the host interfaces
--privileged=false Give extended privileges to this container
--rm=false Automatically remove the container when it exits (incompatible with -d)
- --sig-proxy=true Proxify all received signal to the process (even in non-tty mode)
+ --sig-proxy=true Proxify received signals to the process (even in non-tty mode). SIGCHLD is not proxied.
-t, --tty=false Allocate a pseudo-tty
-u, --user="" Username or UID
- -v, --volume=[] Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)
+ -v, --volume=[] Bind mount a volume (e.g., from the host: -v /host:/container, from docker: -v /container)
--volumes-from=[] Mount volumes from the specified container(s)
-w, --workdir="" Working directory inside the container
@@ -1034,7 +1083,7 @@ This will create and run a new container with the container name being
The `--link` flag will link the container named `/redis` into the newly
created container with the alias `redis`. The new container can access the
-network and environment of the redis container via environment variables.
+network and environment of the `redis` container via environment variables.
The `--name` flag will assign the name `console` to the newly created
container.
@@ -1047,19 +1096,19 @@ optionally suffixed with `:ro` or `:rw` to mount the volumes in read-only
or read-write mode, respectively. By default, the volumes are mounted in
the same mode (read write or read only) as the reference container.
-The `-a` flag tells `docker run` to bind to the container's stdin, stdout or
-stderr. This makes it possible to manipulate the output and input as needed.
+The `-a` flag tells `docker run` to bind to the container's `STDIN`, `STDOUT` or
+`STDERR`. This makes it possible to manipulate the output and input as needed.
$ echo "test" | sudo docker run -i -a stdin ubuntu cat -
This pipes data into a container and prints the container's ID by attaching
-only to the container'sstdin.
+only to the container's `STDIN`.
$ sudo docker run -a stderr ubuntu echo test
-This isn't going to print anything unless there's an error because We've
-only attached to the stderr of the container. The container's logs still
- store what's been written to stderr and stdout.
+This isn't going to print anything unless there's an error because we've
+only attached to the `STDERR` of the container. The container's logs
+still store what's been written to `STDERR` and `STDOUT`.
$ cat somefile | sudo docker run -i -a stdin mybuilder dobuild
@@ -1104,7 +1153,7 @@ application change:
Usage: docker save IMAGE
- Save an image to a tar archive (streamed to stdout by default)
+ Save an image to a tar archive (streamed to STDOUT by default)
-o, --output="" Write to an file, instead of STDOUT
@@ -1129,11 +1178,11 @@ Search [Docker Hub](https://hub.docker.com) for images
Usage: docker search TERM
- Search the docker index for images
+ Search the Docker Hub for images
- --no-trunc=false Don't truncate output
- -s, --stars=0 Only displays with at least xxx stars
- --automated=false Only show automated builds
+ --automated=false Only show automated builds
+ --no-trunc=false Don't truncate output
+ -s, --stars=0 Only displays with at least x stars
See [*Find Public Images on Docker Hub*](
/userguide/dockerrepos/#find-public-images-on-docker-hub) for
@@ -1145,8 +1194,8 @@ more details on finding shared images from the command line.
Restart a stopped container
- -a, --attach=false Attach container's stdout/stderr and forward all signals to the process
- -i, --interactive=false Attach container's stdin
+ -a, --attach=false Attach container's STDOUT and STDERR and forward all signals to the process
+ -i, --interactive=false Attach container's STDIN
When run on a container that has already been started,
takes no action and succeeds unconditionally.
@@ -1155,9 +1204,9 @@ takes no action and succeeds unconditionally.
Usage: docker stop [OPTIONS] CONTAINER [CONTAINER...]
- Stop a running container (Send SIGTERM, and then SIGKILL after grace period)
+ Stop a running container by sending SIGTERM and then SIGKILL after a grace period
- -t, --time=10 Number of seconds to wait for the container to stop before killing it.
+ -t, --time=10 Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.
The main process inside the container will receive SIGTERM, and after a
grace period, SIGKILL
@@ -1178,13 +1227,13 @@ them to [*Share Images via Repositories*](
Usage: docker top CONTAINER [ps OPTIONS]
- Lookup the running processes of a container
+ Display the running processes of a container
## version
Usage: docker version
- Show the docker version information.
+ Show the Docker version information.
Show the Docker version, API version, Git commit, and Go version of
both Docker client and daemon.
@@ -1194,3 +1243,4 @@ both Docker client and daemon.
Usage: docker wait CONTAINER [CONTAINER...]
Block until a container stops, then print its exit code.
+
diff --git a/docs/sources/reference/run.md b/docs/sources/reference/run.md
index 5cb050c025..a539ab0d18 100644
--- a/docs/sources/reference/run.md
+++ b/docs/sources/reference/run.md
@@ -5,13 +5,13 @@ page_keywords: docker, run, configure, runtime
# Docker Run Reference
**Docker runs processes in isolated containers**. When an operator
-executes `docker run`, she starts a process with its
-own file system, its own networking, and its own isolated process tree.
-The [*Image*](/terms/image/#image-def) which starts the process may
-define defaults related to the binary to run, the networking to expose,
-and more, but `docker run` gives final control to
-the operator who starts the container from the image. That's the main
-reason [*run*](/reference/commandline/cli/#cli-run) has more options than any
+executes `docker run`, she starts a process with its own file system,
+its own networking, and its own isolated process tree. The
+[*Image*](/terms/image/#image-def) which starts the process may define
+defaults related to the binary to run, the networking to expose, and
+more, but `docker run` gives final control to the operator who starts
+the container from the image. That's the main reason
+[*run*](/reference/commandline/cli/#cli-run) has more options than any
other `docker` command.
## General Form
@@ -36,10 +36,10 @@ The list of `[OPTIONS]` breaks down into two groups:
2. Setting shared between operators and developers, where operators can
override defaults developers set in images at build time.
-Together, the `docker run [OPTIONS]` give complete
-control over runtime behavior to the operator, allowing them to override
-all defaults set by the developer during `docker build`
-and nearly all the defaults set by the Docker runtime itself.
+Together, the `docker run [OPTIONS]` give complete control over runtime
+behavior to the operator, allowing them to override all defaults set by
+the developer during `docker build` and nearly all the defaults set by
+the Docker runtime itself.
## Operator Exclusive Options
@@ -54,10 +54,8 @@ following options.
- [PID Equivalent](#pid-equivalent)
- [Network Settings](#network-settings)
- [Clean Up (--rm)](#clean-up-rm)
- - [Runtime Constraints on CPU and
- Memory](#runtime-constraints-on-cpu-and-memory)
- - [Runtime Privilege and LXC
- Configuration](#runtime-privilege-and-lxc-configuration)
+ - [Runtime Constraints on CPU and Memory](#runtime-constraints-on-cpu-and-memory)
+ - [Runtime Privilege and LXC Configuration](#runtime-privilege-and-lxc-configuration)
## Detached vs Foreground
@@ -78,32 +76,32 @@ container in the detached mode, then you cannot use the `--rm` option.
### Foreground
-In foreground mode (the default when `-d` is not specified), `docker run`
-can start the process in the container and attach the console to the process's
-standard input, output, and standard error. It can even pretend to be a TTY
-(this is what most command line executables expect) and pass along signals. All
-of that is configurable:
+In foreground mode (the default when `-d` is not specified), `docker
+run` can start the process in the container and attach the console to
+the process's standard input, output, and standard error. It can even
+pretend to be a TTY (this is what most command line executables expect)
+and pass along signals. All of that is configurable:
- -a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr``
+ -a=[] : Attach to `STDIN`, `STDOUT` and/or `STDERR`
-t=false : Allocate a pseudo-tty
--sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
-i=false : Keep STDIN open even if not attached
-If you do not specify `-a` then Docker will [attach everything (stdin,stdout,stderr)](
-https://github.com/dotcloud/docker/blob/
-75a7f4d90cde0295bcfb7213004abce8d4779b75/commands.go#L1797). You can specify to which
-of the three standard streams (`stdin`, `stdout`, `stderr`) you'd like to connect
-instead, as in:
+If you do not specify `-a` then Docker will [attach all standard
+streams]( https://github.com/dotcloud/docker/blob/
+75a7f4d90cde0295bcfb7213004abce8d4779b75/commands.go#L1797). You can
+specify to which of the three standard streams (`STDIN`, `STDOUT`,
+`STDERR`) you'd like to connect instead, as in:
$ docker run -a stdin -a stdout -i -t ubuntu /bin/bash
-For interactive processes (like a shell) you will typically want a tty as well as
-persistent standard input (`stdin`), so you'll use `-i -t` together in most
-interactive cases.
+For interactive processes (like a shell) you will typically want a tty
+as well as persistent standard input (`STDIN`), so you'll use `-i -t`
+together in most interactive cases.
## Container Identification
-### Name (–name)
+### Name (–-name)
The operator can identify a container in three ways:
@@ -113,19 +111,18 @@ The operator can identify a container in three ways:
- Name ("evil_ptolemy")
The UUID identifiers come from the Docker daemon, and if you do not
-assign a name to the container with `--name` then
-the daemon will also generate a random string name too. The name can
-become a handy way to add meaning to a container since you can use this
-name when defining
-[*links*](/userguide/dockerlinks/#working-with-links-names)
-(or any other place you need to identify a container). This works for
-both background and foreground Docker containers.
+assign a name to the container with `--name` then the daemon will also
+generate a random string name too. The name can become a handy way to
+add meaning to a container since you can use this name when defining
+[*links*](/userguide/dockerlinks/#working-with-links-names) (or any
+other place you need to identify a container). This works for both
+background and foreground Docker containers.
-### PID Equivalent
+### PID Equivalent
-And finally, to help with automation, you can have Docker write the
+Finally, to help with automation, you can have Docker write the
container ID out to a file of your choosing. This is similar to how some
-programs might write out their process ID to a file (you`ve seen them as
+programs might write out their process ID to a file (you've seen them as
PID files):
--cidfile="": Write the container ID to the file
@@ -141,14 +138,14 @@ PID files):
By default, all containers have networking enabled and they can make any
outgoing connections. The operator can completely disable networking
-with `docker run --net none` which disables all incoming and
-outgoing networking. In cases like this, you would perform I/O through
-files or STDIN/STDOUT only.
+with `docker run --net none` which disables all incoming and outgoing
+networking. In cases like this, you would perform I/O through files or
+`STDIN` and `STDOUT` only.
Your container will use the same DNS servers as the host by default, but
you can override this with `--dns`.
-Supported networking modes are:
+Supported networking modes are:
* none - no networking in the container
* bridge - (default) connect the container to the bridge via veth interfaces
@@ -156,41 +153,46 @@ Supported networking modes are:
* container - use another container's network stack
#### Mode: none
-With the networking mode set to `none` a container will not have a access to
-any external routes. The container will still have a `loopback` interface
-enabled in the container but it does not have any routes to external traffic.
+
+With the networking mode set to `none` a container will not have a
+access to any external routes. The container will still have a
+`loopback` interface enabled in the container but it does not have any
+routes to external traffic.
#### Mode: bridge
-With the networking mode set to `bridge` a container will use docker's default
-networking setup. A bridge is setup on the host, commonly named `docker0`,
-and a pair of veth interfaces will be created for the container. One side of
-the veth pair will remain on the host attached to the bridge while the other
-side of the pair will be placed inside the container's namespaces in addition
-to the `loopback` interface. An IP address will be allocated for containers
-on the bridge's network and trafic will be routed though this bridge to the
-container.
+
+With the networking mode set to `bridge` a container will use docker's
+default networking setup. A bridge is setup on the host, commonly named
+`docker0`, and a pair of `veth` interfaces will be created for the
+container. One side of the `veth` pair will remain on the host attached
+to the bridge while the other side of the pair will be placed inside the
+container's namespaces in addition to the `loopback` interface. An IP
+address will be allocated for containers on the bridge's network and
+traffic will be routed though this bridge to the container.
#### Mode: host
+
With the networking mode set to `host` a container will share the host's
-network stack and all interfaces from the host will be available to the
-container. The container's hostname will match the hostname on the host
-system. Publishing ports and linking to other containers will not work
-when sharing the host's network stack.
+network stack and all interfaces from the host will be available to the
+container. The container's hostname will match the hostname on the host
+system. Publishing ports and linking to other containers will not work
+when sharing the host's network stack.
#### Mode: container
-With the networking mode set to `container` a container will share the
-network stack of another container. The other container's name must be
+
+With the networking mode set to `container` a container will share the
+network stack of another container. The other container's name must be
provided in the format of `--net container:<name|id>`.
-Example running a redis container with redis binding to localhost then
-running the redis-cli and connecting to the redis server over the
-localhost interface.
+Example running a Redis container with Redis binding to `localhost` then
+running the `redis-cli` command and connecting to the Redis server over the
+`localhost` interface.
$ docker run -d --name redis example/redis --bind 127.0.0.1
$ # use the redis container's network stack to access localhost
$ docker run --rm -ti --net container:redis example/redis-cli -h 127.0.0.1
-## Clean Up (–rm)
+## Clean Up (–-rm)
By default a container's file system persists even after the container
exits. This makes debugging a lot easier (since you can inspect the
@@ -211,15 +213,14 @@ container:
-c=0 : CPU shares (relative weight)
The operator can constrain the memory available to a container easily
-with `docker run -m`. If the host supports swap
-memory, then the `-m` memory setting can be larger
-than physical RAM.
+with `docker run -m`. If the host supports swap memory, then the `-m`
+memory setting can be larger than physical RAM.
Similarly the operator can increase the priority of this container with
-the `-c` option. By default, all containers run at
-the same priority and get the same proportion of CPU cycles, but you can
-tell the kernel to give more shares of CPU time to one or more
-containers when you start them via Docker.
+the `-c` option. By default, all containers run at the same priority and
+get the same proportion of CPU cycles, but you can tell the kernel to
+give more shares of CPU time to one or more containers when you start
+them via Docker.
## Runtime Privilege and LXC Configuration
@@ -239,7 +240,7 @@ to access to all devices on the host as well as set some configuration
in AppArmor to allow the container nearly all the same access to the
host as processes running outside containers on the host. Additional
information about running with `--privileged` is available on the
-[Docker Blog](http://blog.docker.io/2013/09/docker-can-now-run-within-docker/).
+[Docker Blog](http://blog.docker.com/2013/09/docker-can-now-run-within-docker/).
If the Docker daemon was started using the `lxc` exec-driver
(`docker -d --exec-driver=lxc`) then the operator can also specify LXC options
@@ -277,19 +278,20 @@ commandline:
$ docker run [OPTIONS] IMAGE[:TAG] [COMMAND] [ARG...]
-This command is optional because the person who created the `IMAGE` may have
-already provided a default `COMMAND` using the Dockerfile `CMD`. As the
-operator (the person running a container from the image), you can override that
-`CMD` just by specifying a new `COMMAND`.
+This command is optional because the person who created the `IMAGE` may
+have already provided a default `COMMAND` using the Dockerfile `CMD`
+instruction. As the operator (the person running a container from the
+image), you can override that `CMD` instruction just by specifying a new
+`COMMAND`.
-If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND` get
-appended as arguments to the `ENTRYPOINT`.
+If the image also specifies an `ENTRYPOINT` then the `CMD` or `COMMAND`
+get appended as arguments to the `ENTRYPOINT`.
## ENTRYPOINT (Default Command to Execute at Runtime)
--entrypoint="": Overwrite the default entrypoint set by the image
-The ENTRYPOINT of an image is similar to a `COMMAND` because it
+The `ENTRYPOINT` of an image is similar to a `COMMAND` because it
specifies what executable to run when the container starts, but it is
(purposely) more difficult to override. The `ENTRYPOINT` gives a
container its default nature or behavior, so that when you set an
@@ -310,10 +312,10 @@ or two examples of how to pass more parameters to that ENTRYPOINT:
## EXPOSE (Incoming Ports)
-The Dockerfile doesn't give much control over networking, only providing the
-`EXPOSE` instruction to give a hint to the operator about what incoming ports
-might provide services. The following options work with or override the
-Dockerfile's exposed defaults:
+The Dockerfile doesn't give much control over networking, only providing
+the `EXPOSE` instruction to give a hint to the operator about what
+incoming ports might provide services. The following options work with
+or override the Dockerfile's exposed defaults:
--expose=[]: Expose a port from the container
without publishing it to your host
@@ -324,34 +326,34 @@ Dockerfile's exposed defaults:
(use 'docker port' to see the actual mapping)
--link="" : Add link to another container (name:alias)
-As mentioned previously, `EXPOSE` (and `--expose`) make a port available **in**
-a container for incoming connections. The port number on the inside of the
-container (where the service listens) does not need to be the same number as the
-port exposed on the outside of the container (where clients connect), so inside
-the container you might have an HTTP service listening on port 80 (and so you
-`EXPOSE 80` in the Dockerfile), but outside the container the port might be
-42800.
+As mentioned previously, `EXPOSE` (and `--expose`) make a port available
+**in** a container for incoming connections. The port number on the
+inside of the container (where the service listens) does not need to be
+the same number as the port exposed on the outside of the container
+(where clients connect), so inside the container you might have an HTTP
+service listening on port 80 (and so you `EXPOSE 80` in the Dockerfile),
+but outside the container the port might be 42800.
-To help a new client container reach the server container's internal port
-operator `--expose`'d by the operator or `EXPOSE`'d by the developer, the
-operator has three choices: start the server container with `-P` or `-p,` or
-start the client container with `--link`.
+To help a new client container reach the server container's internal
+port operator `--expose`'d by the operator or `EXPOSE`'d by the
+developer, the operator has three choices: start the server container
+with `-P` or `-p,` or start the client container with `--link`.
If the operator uses `-P` or `-p` then Docker will make the exposed port
-accessible on the host and the ports will be available to any client that
-can reach the host. To find the map between the host ports and the exposed
-ports, use `docker port`)
+accessible on the host and the ports will be available to any client
+that can reach the host. To find the map between the host ports and the
+exposed ports, use `docker port`)
-If the operator uses `--link` when starting the new client container, then the
-client container can access the exposed port via a private networking interface.
-Docker will set some environment variables in the client container to help
-indicate which interface and port to use.
+If the operator uses `--link` when starting the new client container,
+then the client container can access the exposed port via a private
+networking interface. Docker will set some environment variables in the
+client container to help indicate which interface and port to use.
## ENV (Environment Variables)
-The operator can **set any environment variable** in the container by using one
-or more `-e` flags, even overriding those already defined by the developer with
-a Dockefile `ENV`:
+The operator can **set any environment variable** in the container by
+using one or more `-e` flags, even overriding those already defined by
+the developer with a Dockerfile `ENV`:
$ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export
declare -x HOME="/"
@@ -420,18 +422,19 @@ mechanism to communicate with a linked container by its alias:
If "container-dir" is missing, then docker creates a new volume.
--volumes-from="": Mount all volumes from the given container(s)
-The volumes commands are complex enough to have their own documentation in
-section [*Share Directories via Volumes*](/userguide/dockervolumes/#volume-def).
-A developer can define one or more `VOLUME's associated with an image, but only the
-operator can give access from one container to another (or from a container to a
+The volumes commands are complex enough to have their own documentation
+in section [*Share Directories via
+Volumes*](/userguide/dockervolumes/#volume-def). A developer can define
+one or more `VOLUME`'s associated with an image, but only the operator
+can give access from one container to another (or from a container to a
volume mounted on the host).
## USER
-The default user within a container is `root` (id = 0), but if the developer
-created additional users, those are accessible too. The developer can set a
-default user to run the first process with the `Dockerfile USER` command,
-but the operator can override it:
+The default user within a container is `root` (id = 0), but if the
+developer created additional users, those are accessible too. The
+developer can set a default user to run the first process with the
+Dockerfile `USER` instruction, but the operator can override it:
-u="": Username or UID
diff --git a/docs/sources/terms/repository.md b/docs/sources/terms/repository.md
index 52c83d45d8..c4d1d43539 100644
--- a/docs/sources/terms/repository.md
+++ b/docs/sources/terms/repository.md
@@ -13,10 +13,10 @@ server.
Images can be associated with a repository (or multiple) by giving them
an image name using one of three different commands:
-1. At build time (e.g. `sudo docker build -t IMAGENAME`),
-2. When committing a container (e.g.
+1. At build time (e.g., `sudo docker build -t IMAGENAME`),
+2. When committing a container (e.g.,
`sudo docker commit CONTAINERID IMAGENAME`) or
-3. When tagging an image id with an image name (e.g.
+3. When tagging an image id with an image name (e.g.,
`sudo docker tag IMAGEID IMAGENAME`).
A Fully Qualified Image Name (FQIN) can be made up of 3 parts:
diff --git a/docs/sources/userguide/dockerhub.md b/docs/sources/userguide/dockerhub.md
index 99e9a0a922..5bb1edec8a 100644
--- a/docs/sources/userguide/dockerhub.md
+++ b/docs/sources/userguide/dockerhub.md
@@ -65,7 +65,7 @@ Your Docker Hub account is now active and ready for you to use!
## Next steps
-Next, let's start learning how to Dockerize applications with our "Hello World!"
+Next, let's start learning how to Dockerize applications with our "Hello world"
exercise.
Go to [Dockerizing Applications](/userguide/dockerizing).
diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md
index b58be90449..c3f5461c2f 100644
--- a/docs/sources/userguide/dockerimages.md
+++ b/docs/sources/userguide/dockerimages.md
@@ -239,7 +239,7 @@ Let's create a directory and a `Dockerfile` first.
$ cd sinatra
$ touch Dockerfile
-Each instructions creates a new layer of the image. Let's look at a simple
+Each instruction creates a new layer of the image. Let's look at a simple
example now for building our own Sinatra image for our development team.
# This is a comment
@@ -380,7 +380,7 @@ containers](
Let's delete the `training/sinatra` image as we don't need it anymore.
- $ docker rmi training/sinatra
+ $ sudo docker rmi training/sinatra
Untagged: training/sinatra:latest
Deleted: 5bc342fa0b91cabf65246837015197eecfa24b2213ed6a51a8974ae250fedd8d
Deleted: ed0fffdcdae5eb2c3a55549857a8be7fc8bc4241fb19ad714364cbfd7a56b22f
diff --git a/docs/sources/userguide/dockerizing.md b/docs/sources/userguide/dockerizing.md
index afe18ce8df..02ac90306b 100644
--- a/docs/sources/userguide/dockerizing.md
+++ b/docs/sources/userguide/dockerizing.md
@@ -1,20 +1,20 @@
-page_title: Dockerizing Applications: A "Hello World!"
-page_description: A simple "Hello World!" exercise that introduced you to Docker.
+page_title: Dockerizing Applications: A "Hello world"
+page_description: A simple "Hello world" exercise that introduced you to Docker.
page_keywords: docker guide, docker, docker platform, virtualization framework, how to, dockerize, dockerizing apps, dockerizing applications, container, containers
-# Dockerizing Applications: A "Hello World!"
+# Dockerizing Applications: A "Hello world"
*So what's this Docker thing all about?*
Docker allows you to run applications inside containers. Running an
application inside a container takes a single command: `docker run`.
-## Hello World!
+## Hello world
Let's try it now.
- $ sudo docker run ubuntu:14.04 /bin/echo 'Hello World'
- Hello World!
+ $ sudo docker run ubuntu:14.04 /bin/echo 'Hello world'
+ Hello world
And you just launched your first container!
@@ -34,17 +34,17 @@ image registry: [Docker Hub](https://hub.docker.com).
Next we told Docker what command to run inside our new container:
- /bin/echo 'Hello World!'
+ /bin/echo 'Hello world'
When our container was launched Docker created a new Ubuntu 14.04
environment and then executed the `/bin/echo` command inside it. We saw
the result on the command line:
- Hello World!
+ Hello world
So what happened to our container after that? Well Docker containers
only run as long as the command you specify is active. Here, as soon as
-`Hello World!` was echoed, the container stopped.
+`Hello world` was echoed, the container stopped.
## An Interactive Container
@@ -88,7 +88,7 @@ use the `exit` command to finish.
As with our previous container, once the Bash shell process has
finished, the container is stopped.
-## A Daemonized Hello World!
+## A Daemonized Hello world
Now a container that runs a command and then exits has some uses but
it's not overly helpful. Let's create a container that runs as a daemon,
@@ -99,7 +99,7 @@ Again we can do this with the `docker run` command:
$ sudo docker run -d ubuntu:14.04 /bin/sh -c "while true; do echo hello world; sleep 1; done"
1e5535038e285177d5214659a068137486f96ee5c2e85a4ac52dc83f2ebe4147
-Wait what? Where's our "Hello World!" Let's look at what we've run here.
+Wait what? Where's our "Hello world" Let's look at what we've run here.
It should look pretty familiar. We ran `docker run` but this time we
specified a flag: `-d`. The `-d` flag tells Docker to run the container
and put it in the background, to daemonize it.
@@ -131,7 +131,7 @@ world` daemon.
Firstly let's make sure our container is running. We can
do that with the `docker ps` command. The `docker ps` command queries
-the Docker daemon for information about all the container it knows
+the Docker daemon for information about all the containers it knows
about.
$ sudo docker ps
diff --git a/docs/sources/userguide/dockerlinks.md b/docs/sources/userguide/dockerlinks.md
index 833f4aed98..20a5c1a179 100644
--- a/docs/sources/userguide/dockerlinks.md
+++ b/docs/sources/userguide/dockerlinks.md
@@ -94,7 +94,7 @@ yourself. This naming provides two useful functions:
that makes it easier for you to remember them, for example naming a
container with a web application in it `web`.
-2. It provides Docker with reference point that allows it to refer to other
+2. It provides Docker with a reference point that allows it to refer to other
containers, for example link container `web` to container `db`.
You can name your container by using the `--name` flag, for example:
@@ -169,10 +169,12 @@ Docker exposes connectivity information for the parent container inside the
child container in two ways:
* Environment variables,
-* Updating the `/etc/host` file.
+* Updating the `/etc/hosts` file.
Let's look first at the environment variables Docker sets. Let's run the `env`
command to list the container's environment variables.
+
+```
$ sudo docker run --rm --name web2 --link db:db training/webapp env
. . .
DB_NAME=/web2/db
@@ -182,6 +184,7 @@ command to list the container's environment variables.
DB_PORT_5000_TCP_PORT=5432
DB_PORT_5000_TCP_ADDR=172.17.0.5
. . .
+```
> **Note**:
> These Environment variables are only set for the first process in the
@@ -189,8 +192,8 @@ command to list the container's environment variables.
> will scrub them when spawning shells for connection.
We can see that Docker has created a series of environment variables with
-useful information about our `db` container. Each variables is prefixed with
-`DB` which is populated from the `alias` we specified above. If our `alias`
+useful information about our `db` container. Each variable is prefixed with
+`DB_` which is populated from the `alias` we specified above. If our `alias`
were `db1` the variables would be prefixed with `DB1_`. You can use these
environment variables to configure your applications to connect to the database
on the `db` container. The connection will be secure, private and only the
diff --git a/docs/sources/userguide/dockerrepos.md b/docs/sources/userguide/dockerrepos.md
index 5babfc76f4..a73c4b7834 100644
--- a/docs/sources/userguide/dockerrepos.md
+++ b/docs/sources/userguide/dockerrepos.md
@@ -1,37 +1,49 @@
page_title: Working with Docker Hub
-page_description: Learning how to use Docker Hub to manage images and work flow
+page_description: Learn how to use the Docker Hub to manage Docker images and work flow
page_keywords: repo, Docker Hub, Docker Hub, registry, index, repositories, usage, pull image, push image, image, documentation
# Working with Docker Hub
-So far we've seen a lot about how to use Docker on the command line and
-your local host. We've seen [how to pull down
-images](/userguide/usingdocker/) that you can run your containers from
-and we've seen how to [create your own images](/userguide/dockerimages).
+So far you've learned how to use the command line to run Docker on your local host.
+You've learned how to [pull down images](/userguide/usingdocker/) to build containers
+from existing images and you've learned how to [create your own images](/userguide/dockerimages).
-Now we're going to learn a bit more about
-[Docker Hub](https://hub.docker.com) and how you can use it to enhance
-your Docker work flows.
+Next, you're going to learn how to use the [Docker Hub](https://hub.docker.com) to
+simplify and enhance your Docker workflows.
-[Docker Hub](https://hub.docker.com) is the public registry that Docker
-Inc maintains. It contains a huge collection of images, over 15,000,
-that you can download and use to build your containers. It also provides
-authentication, structure (you can setup teams and organizations), work
-flow tools like webhooks and build triggers as well as privacy features
-like private repositories for storing images you don't want to publicly
-share.
+The [Docker Hub](https://hub.docker.com) is a public registry maintained by Docker,
+Inc. It contains over 15,000 images you can download and use to build containers. It also
+provides authentication, work group structure, workflow tools like webhooks and build
+triggers, and privacy tools like private repositories for storing images you don't want
+to share publicly.
## Docker commands and Docker Hub
-Docker acts as a client for these services via the `docker search`,
-`pull`, `login` and `push` commands.
+Docker itself provides access to Docker Hub services via the `docker search`,
+`pull`, `login`, and `push` commands. This page will show you how these commands work.
+
+### Account creation and login
+Typically, you'll want to start by creating an account on Docker Hub (if you haven't
+already) and logging in. You can create your account directly on
+[Docker Hub](https://hub.docker.com/account/signup/), or by running:
+
+ $ sudo docker login
+
+This will prompt you for a user name, which will become the public namespace for your
+public repositories.
+If your user name is available, Docker will prompt you to enter a password and your
+e-mail address. It will then automatically log you in. You can now commit and
+push your own images up to your repos on Docker Hub.
+
+> **Note:**
+> Your authentication credentials will be stored in the [`.dockercfg`
+> authentication file](#authentication-file) in your home directory.
## Searching for images
-As we've already seen we can search the
-[Docker Hub](https://hub.docker.com) registry via it's search interface
-or using the command line interface. Searching can find images by name,
-user name or description:
+You can search the [Docker Hub](https://hub.docker.com) registry via its search
+interface or by using the command line interface. Searching can find images by image
+name, user name, or description:
$ sudo docker search centos
NAME DESCRIPTION STARS OFFICIAL TRUSTED
@@ -41,12 +53,12 @@ user name or description:
There you can see two example results: `centos` and
`tianon/centos`. The second result shows that it comes from
-the public repository of a user, `tianon/`, while the first result,
-`centos`, doesn't explicitly list a repository so it comes from the
+the public repository of a user, named `tianon/`, while the first result,
+`centos`, doesn't explicitly list a repository which means that it comes from the
trusted top-level namespace. The `/` character separates a user's
-repository and the image name.
+repository from the image name.
-Once you have found the image you want, you can download it:
+Once you've found the image you want, you can download it with `docker pull <imagename>`:
$ sudo docker pull centos
Pulling repository centos
@@ -55,84 +67,63 @@ Once you have found the image you want, you can download it:
511136ea3c5a: Download complete
7064731afe90: Download complete
-The image is now available to run a container from.
+You now have an image from which you can run containers.
## Contributing to Docker Hub
Anyone can pull public images from the [Docker Hub](https://hub.docker.com)
registry, but if you would like to share your own images, then you must
-register a user first as we saw in the [first section of the Docker User
+register first, as we saw in the [first section of the Docker User
Guide](/userguide/dockerhub/).
-To refresh your memory, you can create your user name and login to
-[Docker Hub](https://hub.docker.com/account/signup/), or by running:
-
- $ sudo docker login
-
-This will prompt you for a user name, which will become a public
-namespace for your public repositories, for example:
-
- training/webapp
-
-Here `training` is the user name and `webapp` is a repository owned by
-that user.
-
-If your user name is available then `docker` will also prompt you to
-enter a password and your e-mail address. It will then automatically log
-you in. Now you're ready to commit and push your own images!
-
-> **Note:**
-> Your authentication credentials will be stored in the [`.dockercfg`
-> authentication file](#authentication-file) in your home directory.
-
## Pushing a repository to Docker Hub
-In order to push an repository to its registry you need to have named an image,
+In order to push a repository to its registry, you need to have named an image
or committed your container to a named image as we saw
[here](/userguide/dockerimages).
-Now you can push this repository to the registry designated by its name
-or tag.
+Now you can push this repository to the registry designated by its name or tag.
$ sudo docker push yourname/newimage
-The image will then be uploaded and available for use.
+The image will then be uploaded and available for use by your team-mates and/or the
+community.
## Features of Docker Hub
-Now let's look at some of the features of Docker Hub. You can find more
-information [here](/docker-io/).
+Let's take a closer look at some of the features of Docker Hub. You can find more
+information [here](http://docs.docker.com/docker-hub/).
* Private repositories
* Organizations and teams
* Automated Builds
* Webhooks
-## Private Repositories
+### Private Repositories
Sometimes you have images you don't want to make public and share with
everyone. So Docker Hub allows you to have private repositories. You can
sign up for a plan [here](https://registry.hub.docker.com/plans/).
-## Organizations and teams
+### Organizations and teams
One of the useful aspects of private repositories is that you can share
them only with members of your organization or team. Docker Hub lets you
create organizations where you can collaborate with your colleagues and
-manage private repositories. You can create and manage an organization
+manage private repositories. You can learn how to create and manage an organization
[here](https://registry.hub.docker.com/account/organizations/).
-## Automated Builds
+### Automated Builds
-Automated Builds automate the building and updating of images from [GitHub](https://www.github.com)
-or [BitBucket](http://bitbucket.com), directly on Docker Hub. It works by adding a commit hook to
-your selected GitHub or BitBucket repository, triggering a build and update when you push a
-commit.
+Automated Builds automate the building and updating of images from
+[GitHub](https://www.github.com) or [BitBucket](http://bitbucket.com), directly on Docker
+Hub. It works by adding a commit hook to your selected GitHub or BitBucket repository,
+triggering a build and update when you push a commit.
-### To setup an Automated Build
+#### To setup an Automated Build
1. Create a [Docker Hub account](https://hub.docker.com/) and login.
-2. Link your GitHub or BitBucket account through the [`Link Accounts`](https://registry.hub.docker.com/account/accounts/) menu.
+2. Link your GitHub or BitBucket account through the ["Link Accounts"](https://registry.hub.docker.com/account/accounts/) menu.
3. [Configure an Automated Build](https://registry.hub.docker.com/builds/).
4. Pick a GitHub or BitBucket project that has a `Dockerfile` that you want to build.
5. Pick the branch you want to build (the default is the `master` branch).
@@ -141,33 +132,32 @@ commit.
8. Specify where the `Dockerfile` is located. The default is `/`.
Once the Automated Build is configured it will automatically trigger a
-build, and in a few minutes, if there are no errors, you will see your
-new Automated Build on the [Docker Hub](https://hub.docker.com) Registry.
-It will stay in sync with your GitHub and BitBucket repository until you
+build and, in a few minutes, you should see your new Automated Build on the [Docker Hub](https://hub.docker.com)
+Registry. It will stay in sync with your GitHub and BitBucket repository until you
deactivate the Automated Build.
-If you want to see the status of your Automated Builds you can go to your
+If you want to see the status of your Automated Builds, you can go to your
[Automated Builds page](https://registry.hub.docker.com/builds/) on the Docker Hub,
-and it will show you the status of your builds, and the build history.
+and it will show you the status of your builds and their build history.
Once you've created an Automated Build you can deactivate or delete it. You
-cannot however push to an Automated Build with the `docker push` command.
+cannot, however, push to an Automated Build with the `docker push` command.
You can only manage it by committing code to your GitHub or BitBucket
repository.
You can create multiple Automated Builds per repository and configure them
to point to specific `Dockerfile`'s or Git branches.
-### Build Triggers
+#### Build Triggers
Automated Builds can also be triggered via a URL on Docker Hub. This
allows you to rebuild an Automated build image on demand.
-## Webhooks
+### Webhooks
Webhooks are attached to your repositories and allow you to trigger an
event when an image or updated image is pushed to the repository. With
-a webhook you can specify a target URL and a JSON payload will be
+a webhook you can specify a target URL and a JSON payload that will be
delivered when the image is pushed.
## Next steps
diff --git a/docs/sources/userguide/dockervolumes.md b/docs/sources/userguide/dockervolumes.md
index 0c2f6cfac6..93ac37b1cc 100644
--- a/docs/sources/userguide/dockervolumes.md
+++ b/docs/sources/userguide/dockervolumes.md
@@ -80,23 +80,23 @@ it.
Let's create a new named container with a volume to share.
- $ docker run -d -v /dbdata --name dbdata training/postgres
+ $ sudo docker run -d -v /dbdata --name dbdata training/postgres
You can then use the `--volumes-from` flag to mount the `/dbdata` volume in another container.
- $ docker run -d --volumes-from dbdata --name db1 training/postgres
+ $ sudo docker run -d --volumes-from dbdata --name db1 training/postgres
And another:
- $ docker run -d --volumes-from dbdata --name db2 training/postgres
+ $ sudo docker run -d --volumes-from dbdata --name db2 training/postgres
-You can use multiple `-volumes-from` parameters to bring together multiple data
+You can use multiple `--volumes-from` parameters to bring together multiple data
volumes from multiple containers.
You can also extend the chain by mounting the volume that came from the
`dbdata` container in yet another container via the `db1` or `db2` containers.
- $ docker run -d --name db3 --volumes-from db1 training/postgres
+ $ sudo docker run -d --name db3 --volumes-from db1 training/postgres
If you remove containers that mount volumes, including the initial `dbdata`
container, or the subsequent containers `db1` and `db2`, the volumes will not
@@ -122,7 +122,7 @@ we'll be left with a backup of our `dbdata` volume.
You could then to restore to the same container, or another that you've made
elsewhere. Create a new container.
- $ sudo docker run -v /dbdata --name dbdata2 ubuntu
+ $ sudo docker run -v /dbdata --name dbdata2 ubuntu /bin/bash
Then un-tar the backup file in the new container's data volume.
diff --git a/docs/sources/userguide/index.md b/docs/sources/userguide/index.md
index 87dab67cca..eef59c000b 100644
--- a/docs/sources/userguide/index.md
+++ b/docs/sources/userguide/index.md
@@ -29,7 +29,7 @@ environment. To learn more;
Go to [Using Docker Hub](/userguide/dockerhub).
-## Dockerizing Applications: A "Hello World!"
+## Dockerizing Applications: A "Hello world"
*How do I run applications inside containers?*
@@ -82,11 +82,11 @@ Go to [Working with Docker Hub](/userguide/dockerrepos).
## Getting help
-* [Docker homepage](http://www.docker.io/)
+* [Docker homepage](http://www.docker.com/)
* [Docker Hub](https://hub.docker.com)
-* [Docker blog](http://blog.docker.io/)
-* [Docker documentation](http://docs.docker.io/)
-* [Docker Getting Started Guide](http://www.docker.io/gettingstarted/)
+* [Docker blog](http://blog.docker.com/)
+* [Docker documentation](http://docs.docker.com/)
+* [Docker Getting Started Guide](http://www.docker.com/gettingstarted/)
* [Docker code on GitHub](https://github.com/dotcloud/docker)
* [Docker mailing
list](https://groups.google.com/forum/#!forum/docker-user)
diff --git a/docs/sources/userguide/usingdocker.md b/docs/sources/userguide/usingdocker.md
index 54c094bfa9..857eac5e56 100644
--- a/docs/sources/userguide/usingdocker.md
+++ b/docs/sources/userguide/usingdocker.md
@@ -19,7 +19,7 @@ In the process we learned about several Docker commands:
> **Tip:**
> Another way to learn about `docker` commands is our
-> [interactive tutorial](https://www.docker.io/gettingstarted).
+> [interactive tutorial](https://www.docker.com/tryit/).
The `docker` client is pretty simple. Each action you can take
with Docker is a command and each command can take a series of
@@ -87,11 +87,6 @@ This will display the help text and all available flags:
--no-stdin=false: Do not attach stdin
--sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
-
-None of the containers we've run did anything particularly useful
-though. So let's build on that experience by running an example web
-application in Docker.
-
> **Note:**
> You can see a full list of Docker's commands
> [here](/reference/commandline/cli/).
@@ -140,8 +135,8 @@ command. This tells the `docker ps` command to return the details of the
*last* container started.
> **Note:**
-> The `docker ps` command only shows running containers. If you want to
-> see stopped containers too use the `-a` flag.
+> By default, the `docker ps` command only shows information about running
+> containers. If you want to see stopped containers too use the `-a` flag.
We can see the same details we saw [when we first Dockerized a
container](/userguide/dockerizing) with one important addition in the `PORTS`
@@ -184,8 +179,9 @@ see the application.
Our Python application is live!
> **Note:**
-> If you have used boot2docker on OSX you'll need to get the IP of the virtual
-> host instead of using localhost. You can do this by running the following in
+> If you have used the boot2docker virtual machine on OS X, Windows or Linux,
+> you'll need to get the IP of the virtual host instead of using localhost.
+> You can do this by running the following in
> the boot2docker shell.
>
> $ boot2docker ip
diff --git a/docs/theme/mkdocs/base.html b/docs/theme/mkdocs/base.html
index f931be2c90..8f2bd0603a 100644
--- a/docs/theme/mkdocs/base.html
+++ b/docs/theme/mkdocs/base.html
@@ -4,6 +4,11 @@
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
+{% set docker_version = "$VERSION" %}{% set docker_commit = "$GITCOMMIT" %}{% set docker_branch = "$GIT_BRANCH" %}{% set aws_bucket = "$AWS_S3_BUCKET" %}
+ <meta name="docker_version" content="{{ docker_version }}">
+ <meta name="docker_git_branch" content="{{ docker_branch }}">
+ <meta name="docker_git_commit" content="{{ docker_commit }}">
+
{% if meta.page_description %}<meta name="description" content="{{ meta.page_description[0] }}">{% endif %}
{% if meta.page_keywords %}<meta name="keywords" content="{{ meta.page_keywords[0] }}">{% endif %}
{% if site_author %}<meta name="author" content="{{ site_author }}">{% endif %}
@@ -60,7 +65,7 @@
</div>
</div>
<div class="span9 content-body">
- {% include "version.html" %}
+ {% include "beta_warning.html" %}
{{ content }}
</div>
</div>
diff --git a/docs/theme/mkdocs/css/main.css b/docs/theme/mkdocs/css/main.css
index 42a7a18a56..18e65ebd3f 100644
--- a/docs/theme/mkdocs/css/main.css
+++ b/docs/theme/mkdocs/css/main.css
@@ -4,7 +4,7 @@
Core Docker style file
used on
- www.docker.io
+ www.docker.com
docker-index
****************************** */
/* this is about 10% darker, but slightly different */
@@ -2146,4 +2146,4 @@ a:hover {
background: url("../img/homepage/docker-whale-home-logo+@2x.png");
background-size: 459px 261px;
}
-} \ No newline at end of file
+}
diff --git a/docs/theme/mkdocs/header.html b/docs/theme/mkdocs/header.html
index 785797f0dc..3560929cac 100644
--- a/docs/theme/mkdocs/header.html
+++ b/docs/theme/mkdocs/header.html
@@ -25,14 +25,14 @@
<ul class="nav">
<li><a href="https://registry.hub.docker.com" title="Browse Repos">Browse Repos</a></li>
<li><a href="http://docs.docker.com" title="Documentation">Documentation</a></li>
- <li><a href="http://docker.com/community" title="Community">Community</a></li>
- <li><a href="http://docker.com/resources/help/" title="Help">Help</a></li>
+ <li><a href="http://www.docker.com/community/participate/" title="Community">Community</a></li>
+ <li><a href="http://www.docker.com/resources/help/" title="Help">Help</a></li>
</ul>
<div id="usernav" class="pull-right">
<ul class="nav user">
<li class="dropdown">
<a id="logged-in-header-username" class="dropdown-toggle" data-toggle="dropdown" href="#">
- <img class="profile" src="https://secure.gravatar.com/avatar/26dc2b32b9e753823aef55e89687a9fc.jpg?s=30&amp;r=g&amp;d=mm" alt="profile picture">ostezer
+ <img class="profile" src="" alt="profile picture">
</a>
<ul class="dropdown-menu pull-right">
<li><a href="https://hub.docker.com/">View Profile</a></li>
diff --git a/engine/env.go b/engine/env.go
index f63f29e10f..3e292107a7 100644
--- a/engine/env.go
+++ b/engine/env.go
@@ -199,6 +199,22 @@ func (env *Env) SetAuto(k string, v interface{}) {
}
}
+func changeFloats(v interface{}) interface{} {
+ switch v := v.(type) {
+ case float64:
+ return int(v)
+ case map[string]interface{}:
+ for key, val := range v {
+ v[key] = changeFloats(val)
+ }
+ case []interface{}:
+ for idx, val := range v {
+ v[idx] = changeFloats(val)
+ }
+ }
+ return v
+}
+
func (env *Env) Encode(dst io.Writer) error {
m := make(map[string]interface{})
for k, v := range env.Map() {
@@ -207,10 +223,7 @@ func (env *Env) Encode(dst io.Writer) error {
// FIXME: we fix-convert float values to int, because
// encoding/json decodes integers to float64, but cannot encode them back.
// (See http://golang.org/src/pkg/encoding/json/decode.go#L46)
- if fval, isFloat := val.(float64); isFloat {
- val = int(fval)
- }
- m[k] = val
+ m[k] = changeFloats(val)
} else {
m[k] = v
}
diff --git a/engine/env_test.go b/engine/env_test.go
index 39669d6780..f76d879114 100644
--- a/engine/env_test.go
+++ b/engine/env_test.go
@@ -1,7 +1,11 @@
package engine
import (
+ "bytes"
+ "encoding/json"
"testing"
+
+ "github.com/dotcloud/docker/pkg/testutils"
)
func TestEnvLenZero(t *testing.T) {
@@ -143,3 +147,166 @@ func TestMultiMap(t *testing.T) {
t.Fatalf("%#v", v)
}
}
+
+func testMap(l int) [][2]string {
+ res := make([][2]string, l)
+ for i := 0; i < l; i++ {
+ t := [2]string{testutils.RandomString(5), testutils.RandomString(20)}
+ res[i] = t
+ }
+ return res
+}
+
+func BenchmarkSet(b *testing.B) {
+ fix := testMap(100)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ env := &Env{}
+ for _, kv := range fix {
+ env.Set(kv[0], kv[1])
+ }
+ }
+}
+
+func BenchmarkSetJson(b *testing.B) {
+ fix := testMap(100)
+ type X struct {
+ f string
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ env := &Env{}
+ for _, kv := range fix {
+ if err := env.SetJson(kv[0], X{kv[1]}); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkGet(b *testing.B) {
+ fix := testMap(100)
+ env := &Env{}
+ for _, kv := range fix {
+ env.Set(kv[0], kv[1])
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, kv := range fix {
+ env.Get(kv[0])
+ }
+ }
+}
+
+func BenchmarkGetJson(b *testing.B) {
+ fix := testMap(100)
+ env := &Env{}
+ type X struct {
+ f string
+ }
+ for _, kv := range fix {
+ env.SetJson(kv[0], X{kv[1]})
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, kv := range fix {
+ if err := env.GetJson(kv[0], &X{}); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkEncode(b *testing.B) {
+ fix := testMap(100)
+ env := &Env{}
+ type X struct {
+ f string
+ }
+ // half a json
+ for i, kv := range fix {
+ if i%2 != 0 {
+ if err := env.SetJson(kv[0], X{kv[1]}); err != nil {
+ b.Fatal(err)
+ }
+ continue
+ }
+ env.Set(kv[0], kv[1])
+ }
+ var writer bytes.Buffer
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ env.Encode(&writer)
+ writer.Reset()
+ }
+}
+
+func BenchmarkDecode(b *testing.B) {
+ fix := testMap(100)
+ env := &Env{}
+ type X struct {
+ f string
+ }
+ // half a json
+ for i, kv := range fix {
+ if i%2 != 0 {
+ if err := env.SetJson(kv[0], X{kv[1]}); err != nil {
+ b.Fatal(err)
+ }
+ continue
+ }
+ env.Set(kv[0], kv[1])
+ }
+ var writer bytes.Buffer
+ env.Encode(&writer)
+ denv := &Env{}
+ reader := bytes.NewReader(writer.Bytes())
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err := denv.Decode(reader)
+ if err != nil {
+ b.Fatal(err)
+ }
+ reader.Seek(0, 0)
+ }
+}
+
+func TestLongNumbers(t *testing.T) {
+ type T struct {
+ TestNum int64
+ }
+ v := T{67108864}
+ var buf bytes.Buffer
+ e := &Env{}
+ e.SetJson("Test", v)
+ if err := e.Encode(&buf); err != nil {
+ t.Fatal(err)
+ }
+ res := make(map[string]T)
+ if err := json.Unmarshal(buf.Bytes(), &res); err != nil {
+ t.Fatal(err)
+ }
+ if res["Test"].TestNum != v.TestNum {
+ t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum)
+ }
+}
+
+func TestLongNumbersArray(t *testing.T) {
+ type T struct {
+ TestNum []int64
+ }
+ v := T{[]int64{67108864}}
+ var buf bytes.Buffer
+ e := &Env{}
+ e.SetJson("Test", v)
+ if err := e.Encode(&buf); err != nil {
+ t.Fatal(err)
+ }
+ res := make(map[string]T)
+ if err := json.Unmarshal(buf.Bytes(), &res); err != nil {
+ t.Fatal(err)
+ }
+ if res["Test"].TestNum[0] != v.TestNum[0] {
+ t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum)
+ }
+}
diff --git a/graph/graph.go b/graph/graph.go
index c9e9e6a949..0badd8dcde 100644
--- a/graph/graph.go
+++ b/graph/graph.go
@@ -16,6 +16,7 @@ import (
"github.com/dotcloud/docker/daemon/graphdriver"
"github.com/dotcloud/docker/dockerversion"
"github.com/dotcloud/docker/image"
+ "github.com/dotcloud/docker/pkg/truncindex"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/utils"
)
@@ -23,7 +24,7 @@ import (
// A Graph is a store for versioned filesystem images and the relationship between them.
type Graph struct {
Root string
- idIndex *utils.TruncIndex
+ idIndex *truncindex.TruncIndex
driver graphdriver.Driver
}
@@ -41,7 +42,7 @@ func NewGraph(root string, driver graphdriver.Driver) (*Graph, error) {
graph := &Graph{
Root: abspath,
- idIndex: utils.NewTruncIndex([]string{}),
+ idIndex: truncindex.NewTruncIndex([]string{}),
driver: driver,
}
if err := graph.restore(); err != nil {
@@ -62,7 +63,7 @@ func (graph *Graph) restore() error {
ids = append(ids, id)
}
}
- graph.idIndex = utils.NewTruncIndex(ids)
+ graph.idIndex = truncindex.NewTruncIndex(ids)
utils.Debugf("Restored %d elements", len(dir))
return nil
}
diff --git a/graph/service.go b/graph/service.go
index 4bce6b5645..3201d6b994 100644
--- a/graph/service.go
+++ b/graph/service.go
@@ -1,7 +1,6 @@
package graph
import (
- "encoding/json"
"io"
"github.com/dotcloud/docker/engine"
@@ -135,8 +134,8 @@ func (s *TagStore) CmdLookup(job *engine.Job) engine.Status {
}
name := job.Args[0]
if image, err := s.LookupImage(name); err == nil && image != nil {
- if job.GetenvBool("dirty") {
- b, err := json.Marshal(image)
+ if job.GetenvBool("raw") {
+ b, err := image.RawJson()
if err != nil {
return job.Error(err)
}
diff --git a/hack/RELEASE-CHECKLIST.md b/hack/RELEASE-CHECKLIST.md
index 6d7ed15297..2fe1a3ce96 100644
--- a/hack/RELEASE-CHECKLIST.md
+++ b/hack/RELEASE-CHECKLIST.md
@@ -138,11 +138,10 @@ make AWS_S3_BUCKET=beta-docs.docker.io docs-release
### 5. Commit and create a pull request to the "release" branch
```bash
-export GITHUBUSER="YOUR_GITHUB_USER"
git add VERSION CHANGELOG.md
git commit -m "Bump version to $VERSION"
git push $GITHUBUSER bump_$VERSION
-echo "https://github.com/$GITHUBUSER/docker/compare/dotcloud:master...$GITHUBUSER:bump_$VERSION?expand=1"
+echo "https://github.com/$GITHUBUSER/docker/compare/dotcloud:release...$GITHUBUSER:bump_$VERSION?expand=1"
```
That last command will give you the proper link to visit to ensure that you
diff --git a/hack/dind b/hack/dind
index a9de03e4ff..77629ad0a5 100755
--- a/hack/dind
+++ b/hack/dind
@@ -3,7 +3,7 @@ set -e
# DinD: a wrapper script which allows docker to be run inside a docker container.
# Original version by Jerome Petazzoni <jerome@dotcloud.com>
-# See the blog post: http://blog.docker.io/2013/09/docker-can-now-run-within-docker/
+# See the blog post: http://blog.docker.com/2013/09/docker-can-now-run-within-docker/
#
# This script should be executed inside a docker container in privilieged mode
# ('docker run --privileged', introduced in docker 0.6).
diff --git a/hack/infrastructure/README.md b/hack/infrastructure/README.md
index 9b2ffda956..d12fc4c63e 100644
--- a/hack/infrastructure/README.md
+++ b/hack/infrastructure/README.md
@@ -20,7 +20,7 @@ AWS | packages (S3 bucket), dotCloud PAAS, dev-env, ci
CloudFlare | cdn
Digital Ocean | ci
dotCloud PAAS | website, index, registry, ssl, blog
-DynECT | dns (docker.io)
+DynECT | dns (docker.com)
GitHub | repository
Linode | stackbrew
Mailgun | outgoing e-mail
@@ -36,20 +36,19 @@ and which service is handling them.
URL | Service
---------------------------------------------|---------------------------------
- http://blog.docker.io/ | blog
+ http://blog.docker.com/ | blog
*http://cdn-registry-1.docker.io/ | registry (pull)
http://debug.docker.io/ | debug tool
- http://docs.docker.io/ | docsproxy (proxy to readthedocs)
+ http://docs.docker.com/ | documentation served from an S3 bucket
http://docker-ci.dotcloud.com/ | ci
- http://docker.io/ | redirect to www.docker.io (dynect)
- http://docker.readthedocs.org/ | docs
+ http://docker.com/ | redirect to www.docker.com (dynect)
*http://get.docker.io/ | packages
https://github.com/dotcloud/docker | repository
-*https://index.docker.io/ | index
+*https://hub.docker.com/ | Docker Hub
http://registry-1.docker.io/ | registry (push)
http://staging-docker-ci.dotcloud.com/ | ci
*http://test.docker.io/ | packages
-*http://www.docker.io/ | website
+*http://www.docker.com/ | website
http://? (internal URL, not for public use) | stackbrew
*Ordered-by: lexicographic*
diff --git a/hack/make/dynbinary b/hack/make/dynbinary
index 426b9cb566..74bb0dd36e 100644
--- a/hack/make/dynbinary
+++ b/hack/make/dynbinary
@@ -40,5 +40,6 @@ fi
(
export LDFLAGS_STATIC_DOCKER="-X github.com/dotcloud/docker/dockerversion.INITSHA1 \"$DOCKER_INITSHA1\" -X github.com/dotcloud/docker/dockerversion.INITPATH \"$DOCKER_INITPATH\""
+ export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary
source "$(dirname "$BASH_SOURCE")/binary"
)
diff --git a/hack/make/ubuntu b/hack/make/ubuntu
index 751eacf868..0d19d7528b 100644
--- a/hack/make/ubuntu
+++ b/hack/make/ubuntu
@@ -8,8 +8,8 @@ if [ -n "$(git status --porcelain)" ]; then
fi
PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)"
-PACKAGE_URL="http://www.docker.io/"
-PACKAGE_MAINTAINER="docker@dotcloud.com"
+PACKAGE_URL="http://www.docker.com/"
+PACKAGE_MAINTAINER="support@docker.com"
PACKAGE_DESCRIPTION="Linux container runtime
Docker complements LXC with a high-level API which operates at the process
level. It runs unix processes with strong guarantees of isolation and
@@ -47,10 +47,10 @@ bundle_ubuntu() {
cp contrib/completion/fish/docker.fish $DIR/etc/fish/completions/
# Include contributed man pages
- contrib/man/md/md2man-all.sh -q
+ docs/man/md2man-all.sh -q
manRoot="$DIR/usr/share/man"
mkdir -p "$manRoot"
- for manDir in contrib/man/man*; do
+ for manDir in docs/man/man?; do
manBase="$(basename "$manDir")" # "man1"
for manFile in "$manDir"/*; do
manName="$(basename "$manFile")" # "docker-build.1"
diff --git a/hack/vendor.sh b/hack/vendor.sh
index e5158b1d8a..2ee530a736 100755
--- a/hack/vendor.sh
+++ b/hack/vendor.sh
@@ -41,12 +41,14 @@ clone() {
clone git github.com/kr/pty 67e2db24c8
-clone git github.com/gorilla/context b06ed15e1c
+clone git github.com/gorilla/context 14f550f51a
clone git github.com/gorilla/mux 136d54f81f
clone git github.com/syndtr/gocapability 3c85049eae
+clone git github.com/tchap/go-patricia v1.0.1
+
clone hg code.google.com/p/go.net 84a4013f96e0
clone hg code.google.com/p/gosqlite 74691fb6f837
@@ -61,4 +63,4 @@ mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar
clone git github.com/godbus/dbus v1
clone git github.com/coreos/go-systemd v2
-clone git github.com/docker/libcontainer v1.0.1
+clone git github.com/docker/libcontainer 53cfe0a1eba9145bf5329abbb52b0072ccab8a00
diff --git a/image/image.go b/image/image.go
index b56cbf08ee..5c250947ce 100644
--- a/image/image.go
+++ b/image/image.go
@@ -149,6 +149,22 @@ func jsonPath(root string) string {
return path.Join(root, "json")
}
+func (img *Image) RawJson() ([]byte, error) {
+ root, err := img.root()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to get root for image %s: %s", img.ID, err)
+ }
+ fh, err := os.Open(jsonPath(root))
+ if err != nil {
+ return nil, fmt.Errorf("Failed to open json for image %s: %s", img.ID, err)
+ }
+ buf, err := ioutil.ReadAll(fh)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to read json for image %s: %s", img.ID, err)
+ }
+ return buf, nil
+}
+
// TarLayer returns a tar archive of the image's filesystem layer.
func (img *Image) TarLayer() (arch archive.Archive, err error) {
if img.graph == nil {
diff --git a/integration-cli/build_tests/TestBuildAddTar/1/Dockerfile b/integration-cli/build_tests/TestBuildAddTar/1/Dockerfile
new file mode 100644
index 0000000000..2091b0e4d9
--- /dev/null
+++ b/integration-cli/build_tests/TestBuildAddTar/1/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox
+ADD test.tar /test.tar
+RUN cat /test.tar/test/foo
diff --git a/integration-cli/build_tests/TestBuildAddTar/1/test.tar b/integration-cli/build_tests/TestBuildAddTar/1/test.tar
new file mode 100644
index 0000000000..33639c6476
--- /dev/null
+++ b/integration-cli/build_tests/TestBuildAddTar/1/test.tar
Binary files differ
diff --git a/integration-cli/build_tests/TestBuildAddTar/2/Dockerfile b/integration-cli/build_tests/TestBuildAddTar/2/Dockerfile
new file mode 100644
index 0000000000..830e9ddbee
--- /dev/null
+++ b/integration-cli/build_tests/TestBuildAddTar/2/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox
+ADD test.tar /
+RUN cat /test/foo
diff --git a/integration-cli/build_tests/TestBuildAddTar/2/test.tar b/integration-cli/build_tests/TestBuildAddTar/2/test.tar
new file mode 100644
index 0000000000..33639c6476
--- /dev/null
+++ b/integration-cli/build_tests/TestBuildAddTar/2/test.tar
Binary files differ
diff --git a/integration-cli/build_tests/TestContextTar/Dockerfile b/integration-cli/build_tests/TestContextTar/Dockerfile
new file mode 100644
index 0000000000..41380570c1
--- /dev/null
+++ b/integration-cli/build_tests/TestContextTar/Dockerfile
@@ -0,0 +1,3 @@
+FROM busybox
+ADD foo /foo
+CMD ["cat", "/foo"]
diff --git a/integration-cli/build_tests/TestContextTar/foo b/integration-cli/build_tests/TestContextTar/foo
new file mode 100644
index 0000000000..257cc5642c
--- /dev/null
+++ b/integration-cli/build_tests/TestContextTar/foo
@@ -0,0 +1 @@
+foo
diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go
index 9a360c1964..039423e06c 100644
--- a/integration-cli/docker_cli_build_test.go
+++ b/integration-cli/docker_cli_build_test.go
@@ -9,6 +9,8 @@ import (
"strings"
"testing"
"time"
+
+ "github.com/dotcloud/docker/archive"
)
func TestBuildCacheADD(t *testing.T) {
@@ -1130,6 +1132,50 @@ func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) {
logDone("build - add local and remote file with cache")
}
+func testContextTar(t *testing.T, compression archive.Compression) {
+ contextDirectory := filepath.Join(workingDirectory, "build_tests", "TestContextTar")
+ context, err := archive.Tar(contextDirectory, compression)
+
+ if err != nil {
+ t.Fatalf("failed to build context tar: %v", err)
+ }
+ buildCmd := exec.Command(dockerBinary, "build", "-t", "contexttar", "-")
+ buildCmd.Stdin = context
+
+ out, exitCode, err := runCommandWithOutput(buildCmd)
+ if err != nil || exitCode != 0 {
+ t.Fatalf("build failed to complete: %v %v", out, err)
+ }
+ deleteImages("contexttar")
+ logDone(fmt.Sprintf("build - build an image with a context tar, compression: %v", compression))
+}
+
+func TestContextTarGzip(t *testing.T) {
+ testContextTar(t, archive.Gzip)
+}
+
+func TestContextTarNoCompression(t *testing.T) {
+ testContextTar(t, archive.Uncompressed)
+}
+
+func TestNoContext(t *testing.T) {
+ buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-")
+ buildCmd.Stdin = strings.NewReader("FROM busybox\nCMD echo ok\n")
+
+ out, exitCode, err := runCommandWithOutput(buildCmd)
+ if err != nil || exitCode != 0 {
+ t.Fatalf("build failed to complete: %v %v", out, err)
+ }
+
+ out, exitCode, err = cmd(t, "run", "nocontext")
+ if out != "ok\n" {
+ t.Fatalf("run produced invalid output: %q, expected %q", out, "ok")
+ }
+
+ deleteImages("nocontext")
+ logDone("build - build an image with no context")
+}
+
// TODO: TestCaching
func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) {
name := "testbuildaddlocalandremotefilewithoutcache"
@@ -1444,3 +1490,276 @@ func TestBuildAddToSymlinkDest(t *testing.T) {
}
logDone("build - add to symlink destination")
}
+
+func TestBuildEscapeWhitespace(t *testing.T) {
+ name := "testbuildescaping"
+ defer deleteImages(name)
+
+ _, err := buildImage(name, `
+ FROM busybox
+ MAINTAINER "Docker \
+IO <io@\
+docker.com>"
+ `, true)
+
+ res, err := inspectField(name, "Author")
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res != "Docker IO <io@docker.com>" {
+ t.Fatal("Parsed string did not match the escaped string")
+ }
+
+ logDone("build - validate escaping whitespace")
+}
+
+func TestDockerignore(t *testing.T) {
+ name := "testbuilddockerignore"
+ defer deleteImages(name)
+ dockerfile := `
+ FROM busybox
+ ADD . /bla
+ RUN [[ -f /bla/src/x.go ]]
+ RUN [[ -f /bla/Makefile ]]
+ RUN [[ ! -e /bla/src/_vendor ]]
+ RUN [[ ! -e /bla/.gitignore ]]
+ RUN [[ ! -e /bla/README.md ]]
+ RUN [[ ! -e /bla/.git ]]`
+ ctx, err := fakeContext(dockerfile, map[string]string{
+ "Makefile": "all:",
+ ".git/HEAD": "ref: foo",
+ "src/x.go": "package main",
+ "src/_vendor/v.go": "package main",
+ ".gitignore": "",
+ "README.md": "readme",
+ ".dockerignore": ".git\npkg\n.gitignore\nsrc/_vendor\n*.md",
+ })
+ defer ctx.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := buildImageFromContext(name, ctx, true); err != nil {
+ t.Fatal(err)
+ }
+ logDone("build - test .dockerignore")
+}
+
+func TestDockerignoringDockerfile(t *testing.T) {
+ name := "testbuilddockerignoredockerfile"
+ defer deleteImages(name)
+ dockerfile := `
+ FROM scratch`
+ ctx, err := fakeContext(dockerfile, map[string]string{
+ "Dockerfile": "FROM scratch",
+ ".dockerignore": "Dockerfile\n",
+ })
+ defer ctx.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err = buildImageFromContext(name, ctx, true); err == nil {
+ t.Fatalf("Didn't get expected error from ignoring Dockerfile")
+ }
+ logDone("build - test .dockerignore of Dockerfile")
+}
+
+func TestBuildLineBreak(t *testing.T) {
+ name := "testbuildlinebreak"
+ defer deleteImages(name)
+ _, err := buildImage(name,
+ `FROM busybox
+RUN sh -c 'echo root:testpass \
+ > /tmp/passwd'
+RUN mkdir -p /var/run/sshd
+RUN [ "$(cat /tmp/passwd)" = "root:testpass" ]
+RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`,
+ true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ logDone("build - line break with \\")
+}
+
+func TestBuildEOLInLine(t *testing.T) {
+ name := "testbuildeolinline"
+ defer deleteImages(name)
+ _, err := buildImage(name,
+ `FROM busybox
+RUN sh -c 'echo root:testpass > /tmp/passwd'
+RUN echo "foo \n bar"; echo "baz"
+RUN mkdir -p /var/run/sshd
+RUN [ "$(cat /tmp/passwd)" = "root:testpass" ]
+RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`,
+ true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ logDone("build - end of line in dockerfile instruction")
+}
+
+func TestBuildCommentsShebangs(t *testing.T) {
+ name := "testbuildcomments"
+ defer deleteImages(name)
+ _, err := buildImage(name,
+ `FROM busybox
+# This is an ordinary comment.
+RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh
+RUN [ ! -x /hello.sh ]
+# comment with line break \
+RUN chmod +x /hello.sh
+RUN [ -x /hello.sh ]
+RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ]
+RUN [ "$(/hello.sh)" = "hello world" ]`,
+ true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ logDone("build - comments and shebangs")
+}
+
+func TestBuildUsersAndGroups(t *testing.T) {
+ name := "testbuildusers"
+ defer deleteImages(name)
+ _, err := buildImage(name,
+ `FROM busybox
+
+# Make sure our defaults work
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ]
+
+# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0)
+USER root
+RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ]
+
+# Setup dockerio user and group
+RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
+RUN echo 'dockerio:x:1001:' >> /etc/group
+
+# Make sure we can switch to our user and all the information is exactly as we expect it to be
+USER dockerio
+RUN id -G
+RUN id -Gn
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
+
+# Switch back to root and double check that worked exactly as we might expect it to
+USER root
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ]
+
+# Add a "supplementary" group for our dockerio user
+RUN echo 'supplementary:x:1002:dockerio' >> /etc/group
+
+# ... and then go verify that we get it like we expect
+USER dockerio
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ]
+USER 1001
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ]
+
+# super test the new "user:group" syntax
+USER dockerio:dockerio
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
+USER 1001:dockerio
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
+USER dockerio:1001
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
+USER 1001:1001
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
+USER dockerio:supplementary
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
+USER dockerio:1002
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
+USER 1001:supplementary
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
+USER 1001:1002
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
+
+# make sure unknown uid/gid still works properly
+USER 1042:1043
+RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`,
+ true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ logDone("build - users and groups")
+}
+
+func TestBuildEnvUsage(t *testing.T) {
+ name := "testbuildenvusage"
+ defer deleteImages(name)
+ dockerfile := `FROM busybox
+ENV FOO /foo/baz
+ENV BAR /bar
+ENV BAZ $BAR
+ENV FOOPATH $PATH:$FOO
+RUN [ "$BAR" = "$BAZ" ]
+RUN [ "$FOOPATH" = "$PATH:/foo/baz" ]
+ENV FROM hello/docker/world
+ENV TO /docker/world/hello
+ADD $FROM $TO
+RUN [ "$(cat $TO)" = "hello" ]`
+ ctx, err := fakeContext(dockerfile, map[string]string{
+ "hello/docker/world": "hello",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = buildImageFromContext(name, ctx, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ logDone("build - environment variables usage")
+}
+
+func TestBuildAddScript(t *testing.T) {
+ name := "testbuildaddscript"
+ defer deleteImages(name)
+ dockerfile := `
+FROM busybox
+ADD test /test
+RUN ["chmod","+x","/test"]
+RUN ["/test"]
+RUN [ "$(cat /testfile)" = 'test!' ]`
+ ctx, err := fakeContext(dockerfile, map[string]string{
+ "test": "#!/bin/sh\necho 'test!' > /testfile",
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = buildImageFromContext(name, ctx, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ logDone("build - add and run script")
+}
+
+func TestBuildAddTar(t *testing.T) {
+
+ checkOutput := func(out string) {
+ n := -1
+ x := ""
+ for i, line := range strings.Split(out, "\n") {
+ if strings.HasPrefix(line, "Step 2") {
+ n = i + 2
+ x = line[strings.Index(line, "cat ")+4:]
+ }
+ if i == n {
+ if line != "Hi" {
+ t.Fatalf("Could not find contents of %s (expected 'Hi' got '%s'", x, line)
+ }
+ n = -2
+ }
+ }
+ if n > -2 {
+ t.Fatalf("Could not find contents of %s in build output", x)
+ }
+ }
+
+ for _, n := range []string{"1", "2"} {
+ buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildAddTar", n)
+ buildCmd := exec.Command(dockerBinary, "build", "-t", "testbuildaddtar", ".")
+ buildCmd.Dir = buildDirectory
+ out, _, err := runCommandWithOutput(buildCmd)
+ errorOut(err, t, fmt.Sprintf("build failed to complete for TestBuildAddTar/%s: %v", n, err))
+ checkOutput(out)
+ }
+}
diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go
index c02c89cd30..7caf3588ce 100644
--- a/integration-cli/docker_cli_commit_test.go
+++ b/integration-cli/docker_cli_commit_test.go
@@ -34,6 +34,33 @@ func TestCommitAfterContainerIsDone(t *testing.T) {
logDone("commit - echo foo and commit the image")
}
+func TestCommitWithoutPause(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo")
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, fmt.Sprintf("failed to run container: %v %v", out, err))
+
+ cleanedContainerID := stripTrailingCharacters(out)
+
+ waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID)
+ _, _, err = runCommandWithOutput(waitCmd)
+ errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out))
+
+ commitCmd := exec.Command(dockerBinary, "commit", "-p=false", cleanedContainerID)
+ out, _, err = runCommandWithOutput(commitCmd)
+ errorOut(err, t, fmt.Sprintf("failed to commit container to image: %v %v", out, err))
+
+ cleanedImageID := stripTrailingCharacters(out)
+
+ inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID)
+ out, _, err = runCommandWithOutput(inspectCmd)
+ errorOut(err, t, fmt.Sprintf("failed to inspect image: %v %v", out, err))
+
+ deleteContainer(cleanedContainerID)
+ deleteImages(cleanedImageID)
+
+ logDone("commit - echo foo and commit the image with --pause=false")
+}
+
func TestCommitNewFile(t *testing.T) {
cmd := exec.Command(dockerBinary, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo")
if _, err := runCommand(cmd); err != nil {
diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go
index 478ebd2df1..416f9421b7 100644
--- a/integration-cli/docker_cli_diff_test.go
+++ b/integration-cli/docker_cli_diff_test.go
@@ -66,7 +66,7 @@ func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) {
}
func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) {
- runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sleep 0")
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sleep", "0")
cid, _, err := runCommandWithOutput(runCmd)
errorOut(err, t, fmt.Sprintf("%s", err))
cleanCID := stripTrailingCharacters(cid)
diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go
new file mode 100644
index 0000000000..b9d184c09e
--- /dev/null
+++ b/integration-cli/docker_cli_events_test.go
@@ -0,0 +1,29 @@
+package main
+
+import (
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+func TestCLIGetEvents(t *testing.T) {
+ out, _, _ := cmd(t, "images", "-q")
+ image := strings.Split(out, "\n")[0]
+ cmd(t, "tag", image, "utest:tag1")
+ cmd(t, "tag", image, "utest:tag2")
+ cmd(t, "rmi", "utest:tag1")
+ cmd(t, "rmi", "utest:tag2")
+ eventsCmd := exec.Command("timeout", "0.2", dockerBinary, "events", "--since=1")
+ out, _, _ = runCommandWithOutput(eventsCmd)
+ events := strings.Split(out, "\n")
+ n_events := len(events)
+ // The last element after the split above will be an empty string, so we
+ // get the two elements before the last, which are the untags we're
+ // looking for.
+ for _, v := range events[n_events-3 : n_events-1] {
+ if !strings.Contains(v, "untag") {
+ t.Fatalf("event should be untag, not %#v", v)
+ }
+ }
+ logDone("events - untags are logged")
+}
diff --git a/integration-cli/docker_cli_history_test.go b/integration-cli/docker_cli_history_test.go
index 42edec2fd6..59ad117128 100644
--- a/integration-cli/docker_cli_history_test.go
+++ b/integration-cli/docker_cli_history_test.go
@@ -41,3 +41,21 @@ func TestBuildHistory(t *testing.T) {
deleteImages("testbuildhistory")
}
+
+func TestHistoryExistentImage(t *testing.T) {
+ historyCmd := exec.Command(dockerBinary, "history", "busybox")
+ _, exitCode, err := runCommandWithOutput(historyCmd)
+ if err != nil || exitCode != 0 {
+ t.Fatal("failed to get image history")
+ }
+ logDone("history - history on existent image must not fail")
+}
+
+func TestHistoryNonExistentImage(t *testing.T) {
+ historyCmd := exec.Command(dockerBinary, "history", "testHistoryNonExistentImage")
+ _, exitCode, err := runCommandWithOutput(historyCmd)
+ if err == nil || exitCode == 0 {
+ t.Fatal("history on a non-existent image didn't result in a non-zero exit status")
+ }
+ logDone("history - history on non-existent image must fail")
+}
diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go
index b27bc870fe..2cf2bbfcf5 100644
--- a/integration-cli/docker_cli_images_test.go
+++ b/integration-cli/docker_cli_images_test.go
@@ -5,6 +5,7 @@ import (
"os/exec"
"strings"
"testing"
+ "time"
)
func TestImagesEnsureImageIsListed(t *testing.T) {
@@ -56,3 +57,44 @@ func TestCLIImageTagRemove(t *testing.T) {
}
logDone("tag,rmi- tagging the same images multiple times then removing tags")
}
+
+func TestImagesOrderedByCreationDate(t *testing.T) {
+ defer deleteImages("order:test_a")
+ defer deleteImages("order:test_c")
+ defer deleteImages("order:test_b")
+ id1, err := buildImage("order:test_a",
+ `FROM scratch
+ MAINTAINER dockerio1`, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(time.Second)
+ id2, err := buildImage("order:test_c",
+ `FROM scratch
+ MAINTAINER dockerio2`, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ time.Sleep(time.Second)
+ id3, err := buildImage("order:test_b",
+ `FROM scratch
+ MAINTAINER dockerio3`, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc"))
+ errorOut(err, t, fmt.Sprintf("listing images failed with errors: %v", err))
+ imgs := strings.Split(out, "\n")
+ if imgs[0] != id3 {
+ t.Fatalf("First image must be %s, got %s", id3, imgs[0])
+ }
+ if imgs[1] != id2 {
+ t.Fatalf("Second image must be %s, got %s", id2, imgs[1])
+ }
+ if imgs[2] != id1 {
+ t.Fatalf("Third image must be %s, got %s", id1, imgs[2])
+ }
+
+ logDone("images - ordering by creation date")
+}
diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go
index 0480183bc7..d2616475c9 100644
--- a/integration-cli/docker_cli_links_test.go
+++ b/integration-cli/docker_cli_links_test.go
@@ -2,12 +2,13 @@ package main
import (
"fmt"
- "github.com/dotcloud/docker/pkg/iptables"
"io/ioutil"
"os"
"os/exec"
"strings"
"testing"
+
+ "github.com/dotcloud/docker/pkg/iptables"
)
func TestEtcHostsRegularFile(t *testing.T) {
@@ -90,3 +91,33 @@ func TestIpTablesRulesWhenLinkAndUnlink(t *testing.T) {
logDone("link - verify iptables when link and unlink")
}
+
+func TestInspectLinksStarted(t *testing.T) {
+ defer deleteAllContainers()
+ cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
+ cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
+ cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sleep", "10")
+ links, err := inspectField("testinspectlink", "HostConfig.Links")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if expected := "[/container1:/testinspectlink/alias1 /container2:/testinspectlink/alias2]"; links != expected {
+ t.Fatalf("Links %s, but expected %s", links, expected)
+ }
+ logDone("link - links in started container inspect")
+}
+
+func TestInspectLinksStopped(t *testing.T) {
+ defer deleteAllContainers()
+ cmd(t, "run", "-d", "--name", "container1", "busybox", "sleep", "10")
+ cmd(t, "run", "-d", "--name", "container2", "busybox", "sleep", "10")
+ cmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true")
+ links, err := inspectField("testinspectlink", "HostConfig.Links")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if expected := "[/container1:/testinspectlink/alias1 /container2:/testinspectlink/alias2]"; links != expected {
+ t.Fatalf("Links %s, but expected %s", links, expected)
+ }
+ logDone("link - links in stopped container inspect")
+}
diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go
index 75235b6bb8..8b1d006626 100644
--- a/integration-cli/docker_cli_logs_test.go
+++ b/integration-cli/docker_cli_logs_test.go
@@ -169,3 +169,47 @@ func TestLogsStderrInStdout(t *testing.T) {
logDone("logs - stderr in stdout (with pseudo-tty)")
}
+
+func TestLogsTail(t *testing.T) {
+ testLen := 100
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen))
+
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err))
+
+ cleanedContainerID := stripTrailingCharacters(out)
+ exec.Command(dockerBinary, "wait", cleanedContainerID).Run()
+
+ logsCmd := exec.Command(dockerBinary, "logs", "--tail", "5", cleanedContainerID)
+ out, _, _, err = runCommandWithStdoutStderr(logsCmd)
+ errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err))
+
+ lines := strings.Split(out, "\n")
+
+ if len(lines) != 6 {
+ t.Fatalf("Expected log %d lines, received %d\n", 6, len(lines))
+ }
+
+ logsCmd = exec.Command(dockerBinary, "logs", "--tail", "all", cleanedContainerID)
+ out, _, _, err = runCommandWithStdoutStderr(logsCmd)
+ errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err))
+
+ lines = strings.Split(out, "\n")
+
+ if len(lines) != testLen+1 {
+ t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines))
+ }
+
+ logsCmd = exec.Command(dockerBinary, "logs", "--tail", "random", cleanedContainerID)
+ out, _, _, err = runCommandWithStdoutStderr(logsCmd)
+ errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err))
+
+ lines = strings.Split(out, "\n")
+
+ if len(lines) != testLen+1 {
+ t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines))
+ }
+
+ deleteContainer(cleanedContainerID)
+ logDone("logs - logs tail")
+}
diff --git a/integration-cli/docker_cli_rm_test.go b/integration-cli/docker_cli_rm_test.go
index e25c9991de..34b5df0338 100644
--- a/integration-cli/docker_cli_rm_test.go
+++ b/integration-cli/docker_cli_rm_test.go
@@ -43,7 +43,7 @@ func TestRemoveContainerWithVolume(t *testing.T) {
}
func TestRemoveContainerRunning(t *testing.T) {
- cmd := exec.Command(dockerBinary, "run", "-d", "--name", "foo", "busybox", "sleep", "300")
+ cmd := exec.Command(dockerBinary, "run", "-dt", "--name", "foo", "busybox", "top")
if _, err := runCommand(cmd); err != nil {
t.Fatal(err)
}
diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go
index fc71f01820..eda1b3f0a5 100644
--- a/integration-cli/docker_cli_run_test.go
+++ b/integration-cli/docker_cli_run_test.go
@@ -946,3 +946,40 @@ func TestModeHostname(t *testing.T) {
logDone("run - hostname and several network modes")
}
+
+func TestRootWorkdir(t *testing.T) {
+ s, _, err := cmd(t, "run", "--workdir", "/", "busybox", "pwd")
+ if err != nil {
+ t.Fatal(s, err)
+ }
+ if s != "/\n" {
+ t.Fatalf("pwd returned '%s' (expected /\\n)", s)
+ }
+
+ deleteAllContainers()
+
+ logDone("run - workdir /")
+}
+
+func TestAllowBindMountingRoot(t *testing.T) {
+ s, _, err := cmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host")
+ if err != nil {
+ t.Fatal(s, err)
+ }
+
+ deleteAllContainers()
+
+ logDone("run - bind mount / as volume")
+}
+
+func TestDisallowBindMountingRootToRoot(t *testing.T) {
+ cmd := exec.Command(dockerBinary, "run", "-v", "/:/", "busybox", "ls", "/host")
+ out, _, err := runCommandWithOutput(cmd)
+ if err == nil {
+ t.Fatal(out, err)
+ }
+
+ deleteAllContainers()
+
+ logDone("run - bind mount /:/ as volume should fail")
+}
diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go
index d728c7de95..fb94cad9d8 100644
--- a/integration-cli/docker_cli_save_load_test.go
+++ b/integration-cli/docker_cli_save_load_test.go
@@ -7,8 +7,8 @@ import (
"testing"
)
-// save a repo and try to load it
-func TestSaveAndLoadRepo(t *testing.T) {
+// save a repo and try to load it using stdout
+func TestSaveAndLoadRepoStdout(t *testing.T) {
runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
out, _, err := runCommandWithOutput(runCmd)
errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err))
@@ -25,6 +25,10 @@ func TestSaveAndLoadRepo(t *testing.T) {
out, _, err = runCommandWithOutput(commitCmd)
errorOut(err, t, fmt.Sprintf("failed to commit container: %v %v", out, err))
+ inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+ before, _, err := runCommandWithOutput(inspectCmd)
+ errorOut(err, t, fmt.Sprintf("the repo should exist before saving it: %v %v", before, err))
+
saveCmdTemplate := `%v save %v > /tmp/foobar-save-load-test.tar`
saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName)
saveCmd := exec.Command("bash", "-c", saveCmdFinal)
@@ -39,14 +43,70 @@ func TestSaveAndLoadRepo(t *testing.T) {
errorOut(err, t, fmt.Sprintf("failed to load repo: %v %v", out, err))
inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+ after, _, err := runCommandWithOutput(inspectCmd)
+ errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", after, err))
+
+ if before != after {
+ t.Fatalf("inspect is not the same after a save / load")
+ }
+
+ deleteContainer(cleanedContainerID)
+ deleteImages(repoName)
+
+ os.Remove("/tmp/foobar-save-load-test.tar")
+
+ logDone("save - save a repo using stdout")
+ logDone("load - load a repo using stdout")
+}
+
+// save a repo and try to load it using flags
+func TestSaveAndLoadRepoFlags(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+ out, _, err := runCommandWithOutput(runCmd)
+ errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err))
+
+ cleanedContainerID := stripTrailingCharacters(out)
+
+ repoName := "foobar-save-load-test"
+
+ inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
out, _, err = runCommandWithOutput(inspectCmd)
- errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", out, err))
+ errorOut(err, t, fmt.Sprintf("output should've been a container id: %v %v", cleanedContainerID, err))
+
+ commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+ out, _, err = runCommandWithOutput(commitCmd)
+ errorOut(err, t, fmt.Sprintf("failed to commit container: %v %v", out, err))
+
+ inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+ before, _, err := runCommandWithOutput(inspectCmd)
+ errorOut(err, t, fmt.Sprintf("the repo should exist before saving it: %v %v", before, err))
+
+ saveCmdTemplate := `%v save -o /tmp/foobar-save-load-test.tar %v`
+ saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName)
+ saveCmd := exec.Command("bash", "-c", saveCmdFinal)
+ out, _, err = runCommandWithOutput(saveCmd)
+ errorOut(err, t, fmt.Sprintf("failed to save repo: %v %v", out, err))
+
+ deleteImages(repoName)
+
+ loadCmdFinal := `docker load -i /tmp/foobar-save-load-test.tar`
+ loadCmd := exec.Command("bash", "-c", loadCmdFinal)
+ out, _, err = runCommandWithOutput(loadCmd)
+ errorOut(err, t, fmt.Sprintf("failed to load repo: %v %v", out, err))
+
+ inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+ after, _, err := runCommandWithOutput(inspectCmd)
+ errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", after, err))
+
+ if before != after {
+ t.Fatalf("inspect is not the same after a save / load")
+ }
deleteContainer(cleanedContainerID)
deleteImages(repoName)
os.Remove("/tmp/foobar-save-load-test.tar")
- logDone("save - save a repo")
- logDone("load - load a repo")
+ logDone("save - save a repo using -o")
+ logDone("load - load a repo using -i")
}
diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go
index 050aec51a6..16523ff468 100644
--- a/integration-cli/docker_cli_search_test.go
+++ b/integration-cli/docker_cli_search_test.go
@@ -9,7 +9,7 @@ import (
// search for repos named "registry" on the central registry
func TestSearchOnCentralRegistry(t *testing.T) {
- searchCmd := exec.Command(dockerBinary)
+ searchCmd := exec.Command(dockerBinary, "search", "stackbrew/busybox")
out, exitCode, err := runCommandWithOutput(searchCmd)
errorOut(err, t, fmt.Sprintf("encountered error while searching: %v", err))
@@ -17,9 +17,9 @@ func TestSearchOnCentralRegistry(t *testing.T) {
t.Fatal("failed to search on the central registry")
}
- if !strings.Contains(out, "registry") {
- t.Fatal("couldn't find any repository named (or containing) 'registry'")
+ if !strings.Contains(out, "Busybox base image.") {
+ t.Fatal("couldn't find any repository named (or containing) 'Busybox base image.'")
}
- logDone("search - search for repositories named (or containing) 'registry'")
+ logDone("search - search for repositories named (or containing) 'Busybox base image.'")
}
diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go
index ef51f64644..032927e221 100644
--- a/integration-cli/docker_cli_tag_test.go
+++ b/integration-cli/docker_cli_tag_test.go
@@ -8,16 +8,12 @@ import (
// tagging a named image in a new unprefixed repo should work
func TestTagUnprefixedRepoByName(t *testing.T) {
- pullCmd := exec.Command(dockerBinary, "pull", "busybox")
- out, exitCode, err := runCommandWithOutput(pullCmd)
- errorOut(err, t, fmt.Sprintf("%s %s", out, err))
-
- if err != nil || exitCode != 0 {
- t.Fatal("pulling the busybox image from the registry has failed")
+ if err := pullImageIfNotExist("busybox:latest"); err != nil {
+ t.Fatal("couldn't find the busybox:latest image locally and failed to pull it")
}
- tagCmd := exec.Command(dockerBinary, "tag", "busybox", "testfoobarbaz")
- out, _, err = runCommandWithOutput(tagCmd)
+ tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "testfoobarbaz")
+ out, _, err := runCommandWithOutput(tagCmd)
errorOut(err, t, fmt.Sprintf("%v %v", out, err))
deleteImages("testfoobarbaz")
@@ -62,18 +58,14 @@ func TestTagInvalidUnprefixedRepo(t *testing.T) {
// ensure we allow the use of valid tags
func TestTagValidPrefixedRepo(t *testing.T) {
- pullCmd := exec.Command(dockerBinary, "pull", "busybox")
- out, exitCode, err := runCommandWithOutput(pullCmd)
- errorOut(err, t, fmt.Sprintf("%s %s", out, err))
-
- if err != nil || exitCode != 0 {
- t.Fatal("pulling the busybox image from the registry has failed")
+ if err := pullImageIfNotExist("busybox:latest"); err != nil {
+ t.Fatal("couldn't find the busybox:latest image locally and failed to pull it")
}
validRepos := []string{"fooo/bar", "fooaa/test"}
for _, repo := range validRepos {
- tagCmd := exec.Command(dockerBinary, "tag", "busybox", repo)
+ tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", repo)
_, _, err := runCommandWithOutput(tagCmd)
if err != nil {
t.Errorf("tag busybox %v should have worked: %s", repo, err)
diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go
index c1e306f2ee..74576ba489 100644
--- a/integration-cli/docker_utils.go
+++ b/integration-cli/docker_utils.go
@@ -16,6 +16,10 @@ import (
func deleteContainer(container string) error {
container = strings.Replace(container, "\n", " ", -1)
container = strings.Trim(container, " ")
+ killArgs := fmt.Sprintf("kill %v", container)
+ killSplitArgs := strings.Split(killArgs, " ")
+ killCmd := exec.Command(dockerBinary, killSplitArgs...)
+ runCommand(killCmd)
rmArgs := fmt.Sprintf("rm %v", container)
rmSplitArgs := strings.Split(rmArgs, " ")
rmCmd := exec.Command(dockerBinary, rmSplitArgs...)
@@ -62,6 +66,27 @@ func deleteImages(images string) error {
return err
}
+func imageExists(image string) error {
+ inspectCmd := exec.Command(dockerBinary, "inspect", image)
+ exitCode, err := runCommand(inspectCmd)
+ if exitCode != 0 && err == nil {
+ err = fmt.Errorf("couldn't find image '%s'", image)
+ }
+ return err
+}
+
+func pullImageIfNotExist(image string) (err error) {
+ if err := imageExists(image); err != nil {
+ pullCmd := exec.Command(dockerBinary, "pull", image)
+ _, exitCode, err := runCommandWithOutput(pullCmd)
+
+ if err != nil || exitCode != 0 {
+ err = fmt.Errorf("image '%s' wasn't found locally and it couldn't be pulled: %s", image, err)
+ }
+ }
+ return
+}
+
func cmd(t *testing.T, args ...string) (string, int, error) {
out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...))
errorOut(err, t, fmt.Sprintf("'%s' failed with errors: %v (%v)", strings.Join(args, " "), err, out))
diff --git a/integration/api_test.go b/integration/api_test.go
index 103df949e6..b2e44717a5 100644
--- a/integration/api_test.go
+++ b/integration/api_test.go
@@ -4,7 +4,6 @@ import (
"bufio"
"bytes"
"encoding/json"
- "fmt"
"io"
"io/ioutil"
"net"
@@ -19,56 +18,9 @@ import (
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/runconfig"
- "github.com/dotcloud/docker/utils"
"github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
)
-func TestGetEvents(t *testing.T) {
- eng := NewTestEngine(t)
- srv := mkServerFromEngine(eng, t)
- // FIXME: we might not need daemon, why not simply nuke
- // the engine?
- daemon := mkDaemonFromEngine(eng, t)
- defer nuke(daemon)
-
- var events []*utils.JSONMessage
- for _, parts := range [][3]string{
- {"fakeaction", "fakeid", "fakeimage"},
- {"fakeaction2", "fakeid", "fakeimage"},
- } {
- action, id, from := parts[0], parts[1], parts[2]
- ev := srv.LogEvent(action, id, from)
- events = append(events, ev)
- }
-
- req, err := http.NewRequest("GET", "/events?since=1", nil)
- if err != nil {
- t.Fatal(err)
- }
-
- r := httptest.NewRecorder()
- setTimeout(t, "", 500*time.Millisecond, func() {
- if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
- t.Fatal(err)
- }
- assertHttpNotError(r, t)
- })
-
- dec := json.NewDecoder(r.Body)
- for i := 0; i < 2; i++ {
- var jm utils.JSONMessage
- if err := dec.Decode(&jm); err == io.EOF {
- break
- } else if err != nil {
- t.Fatal(err)
- }
- if jm != *events[i] {
- t.Fatalf("Event received it different than expected")
- }
- }
-
-}
-
func TestGetImagesJSON(t *testing.T) {
eng := NewTestEngine(t)
defer mkDaemonFromEngine(eng, t).Nuke()
@@ -172,30 +124,6 @@ func TestGetImagesJSON(t *testing.T) {
}
}
-func TestGetImagesHistory(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- r := httptest.NewRecorder()
-
- req, err := http.NewRequest("GET", fmt.Sprintf("/images/%s/history", unitTestImageName), nil)
- if err != nil {
- t.Fatal(err)
- }
- if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
- t.Fatal(err)
- }
- assertHttpNotError(r, t)
-
- outs := engine.NewTable("Created", 0)
- if _, err := outs.ReadListFrom(r.Body.Bytes()); err != nil {
- t.Fatal(err)
- }
- if len(outs.Data) != 1 {
- t.Errorf("Expected 1 line, %d found", len(outs.Data))
- }
-}
-
func TestGetImagesByName(t *testing.T) {
eng := NewTestEngine(t)
defer mkDaemonFromEngine(eng, t).Nuke()
@@ -697,59 +625,26 @@ func TestPostContainersStart(t *testing.T) {
}
containerAssertExists(eng, containerID, t)
- // Give some time to the process to start
- // FIXME: use Wait once it's available as a job
- containerWaitTimeout(eng, containerID, t)
- if !containerRunning(eng, containerID, t) {
- t.Errorf("Container should be running")
- }
-
- r = httptest.NewRecorder()
- if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
- t.Fatal(err)
- }
- // Starting an already started container should return an error
- // FIXME: verify a precise error code. There is a possible bug here
- // which causes this to return 404 even though the container exists.
- assertHttpError(r, t)
- containerAssertExists(eng, containerID, t)
- containerKill(eng, containerID, t)
-}
-
-// Expected behaviour: using / as a bind mount source should throw an error
-func TestRunErrorBindMountRootSource(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- containerID := createTestContainer(
- eng,
- &runconfig.Config{
- Image: unitTestImageID,
- Cmd: []string{"/bin/cat"},
- OpenStdin: true,
- },
- t,
- )
- hostConfigJSON, err := json.Marshal(&runconfig.HostConfig{
- Binds: []string{"/:/tmp"},
- })
-
- req, err := http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON))
+ req, err = http.NewRequest("POST", "/containers/"+containerID+"/start", bytes.NewReader(hostConfigJSON))
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", "application/json")
- r := httptest.NewRecorder()
+ r = httptest.NewRecorder()
if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
- if r.Code != http.StatusInternalServerError {
- containerKill(eng, containerID, t)
- t.Fatal("should have failed to run when using / as a source for the bind mount")
+
+ // Starting an already started container should return a 304
+ assertHttpNotError(r, t)
+ if r.Code != http.StatusNotModified {
+ t.Fatalf("%d NOT MODIFIER expected, received %d\n", http.StatusNotModified, r.Code)
}
+ containerAssertExists(eng, containerID, t)
+ containerKill(eng, containerID, t)
}
func TestPostContainersStop(t *testing.T) {
@@ -790,6 +685,22 @@ func TestPostContainersStop(t *testing.T) {
if containerRunning(eng, containerID, t) {
t.Fatalf("The container hasn't been stopped")
}
+
+ req, err = http.NewRequest("POST", "/containers/"+containerID+"/stop?t=1", bytes.NewReader([]byte{}))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ r = httptest.NewRecorder()
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ t.Fatal(err)
+ }
+
+ // Stopping an already stopper container should return a 304
+ assertHttpNotError(r, t)
+ if r.Code != http.StatusNotModified {
+ t.Fatalf("%d NOT MODIFIER expected, received %d\n", http.StatusNotModified, r.Code)
+ }
}
func TestPostContainersWait(t *testing.T) {
diff --git a/integration/commands_test.go b/integration/commands_test.go
index 4ad225bb43..47e9860052 100644
--- a/integration/commands_test.go
+++ b/integration/commands_test.go
@@ -8,9 +8,7 @@ import (
"os"
"path"
"regexp"
- "strconv"
"strings"
- "syscall"
"testing"
"time"
@@ -118,141 +116,6 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error
return nil
}
-// TestRunHostname checks that 'docker run -h' correctly sets a custom hostname
-func TestRunHostname(t *testing.T) {
- stdout, stdoutPipe := io.Pipe()
-
- cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
- defer cleanup(globalEngine, t)
-
- c := make(chan struct{})
- go func() {
- defer close(c)
- if err := cli.CmdRun("-h", "foobar", unitTestImageID, "hostname"); err != nil {
- t.Fatal(err)
- }
- }()
-
- setTimeout(t, "Reading command output time out", 2*time.Second, func() {
- cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
- if err != nil {
- t.Fatal(err)
- }
- if cmdOutput != "foobar\n" {
- t.Fatalf("'hostname' should display '%s', not '%s'", "foobar\n", cmdOutput)
- }
- })
-
- container := globalDaemon.List()[0]
-
- setTimeout(t, "CmdRun timed out", 10*time.Second, func() {
- <-c
-
- go func() {
- cli.CmdWait(container.ID)
- }()
-
- if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil {
- t.Fatal(err)
- }
- })
-
- // Cleanup pipes
- if err := closeWrap(stdout, stdoutPipe); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestRunWorkdir checks that 'docker run -w' correctly sets a custom working directory
-func TestRunWorkdir(t *testing.T) {
- stdout, stdoutPipe := io.Pipe()
-
- cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
- defer cleanup(globalEngine, t)
-
- c := make(chan struct{})
- go func() {
- defer close(c)
- if err := cli.CmdRun("-w", "/foo/bar", unitTestImageID, "pwd"); err != nil {
- t.Fatal(err)
- }
- }()
-
- setTimeout(t, "Reading command output time out", 2*time.Second, func() {
- cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
- if err != nil {
- t.Fatal(err)
- }
- if cmdOutput != "/foo/bar\n" {
- t.Fatalf("'pwd' should display '%s', not '%s'", "/foo/bar\n", cmdOutput)
- }
- })
-
- container := globalDaemon.List()[0]
-
- setTimeout(t, "CmdRun timed out", 10*time.Second, func() {
- <-c
-
- go func() {
- cli.CmdWait(container.ID)
- }()
-
- if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil {
- t.Fatal(err)
- }
- })
-
- // Cleanup pipes
- if err := closeWrap(stdout, stdoutPipe); err != nil {
- t.Fatal(err)
- }
-}
-
-// TestRunWorkdirExists checks that 'docker run -w' correctly sets a custom working directory, even if it exists
-func TestRunWorkdirExists(t *testing.T) {
- stdout, stdoutPipe := io.Pipe()
-
- cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
- defer cleanup(globalEngine, t)
-
- c := make(chan struct{})
- go func() {
- defer close(c)
- if err := cli.CmdRun("-w", "/proc", unitTestImageID, "pwd"); err != nil {
- t.Fatal(err)
- }
- }()
-
- setTimeout(t, "Reading command output time out", 2*time.Second, func() {
- cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
- if err != nil {
- t.Fatal(err)
- }
- if cmdOutput != "/proc\n" {
- t.Fatalf("'pwd' should display '%s', not '%s'", "/proc\n", cmdOutput)
- }
- })
-
- container := globalDaemon.List()[0]
-
- setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
- <-c
-
- go func() {
- cli.CmdWait(container.ID)
- }()
-
- if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil {
- t.Fatal(err)
- }
- })
-
- // Cleanup pipes
- if err := closeWrap(stdout, stdoutPipe); err != nil {
- t.Fatal(err)
- }
-}
-
// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected
func TestRunWorkdirExistsAndIsFile(t *testing.T) {
@@ -361,7 +224,7 @@ func TestRunDisconnect(t *testing.T) {
// cause /bin/cat to exit.
setTimeout(t, "Waiting for /bin/cat to exit timed out", 2*time.Second, func() {
container := globalDaemon.List()[0]
- container.Wait()
+ container.State.WaitStop(-1 * time.Second)
if container.State.IsRunning() {
t.Fatalf("/bin/cat is still running after closing stdin")
}
@@ -413,78 +276,12 @@ func TestRunDisconnectTty(t *testing.T) {
// In tty mode, we expect the process to stay alive even after client's stdin closes.
// Give some time to monitor to do his thing
- container.WaitTimeout(500 * time.Millisecond)
+ container.State.WaitStop(500 * time.Millisecond)
if !container.State.IsRunning() {
t.Fatalf("/bin/cat should still be running after closing stdin (tty mode)")
}
}
-// TestAttachStdin checks attaching to stdin without stdout and stderr.
-// 'docker run -i -a stdin' should sends the client's stdin to the command,
-// then detach from it and print the container id.
-func TestRunAttachStdin(t *testing.T) {
-
- stdin, stdinPipe := io.Pipe()
- stdout, stdoutPipe := io.Pipe()
-
- cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
- defer cleanup(globalEngine, t)
-
- ch := make(chan struct{})
- go func() {
- defer close(ch)
- cli.CmdRun("-i", "-a", "stdin", unitTestImageID, "sh", "-c", "echo hello && cat && sleep 5")
- }()
-
- // Send input to the command, close stdin
- setTimeout(t, "Write timed out", 10*time.Second, func() {
- if _, err := stdinPipe.Write([]byte("hi there\n")); err != nil {
- t.Fatal(err)
- }
- if err := stdinPipe.Close(); err != nil {
- t.Fatal(err)
- }
- })
-
- container := globalDaemon.List()[0]
-
- // Check output
- setTimeout(t, "Reading command output time out", 10*time.Second, func() {
- cmdOutput, err := bufio.NewReader(stdout).ReadString('\n')
- if err != nil {
- t.Fatal(err)
- }
- if cmdOutput != container.ID+"\n" {
- t.Fatalf("Wrong output: should be '%s', not '%s'\n", container.ID+"\n", cmdOutput)
- }
- })
-
- // wait for CmdRun to return
- setTimeout(t, "Waiting for CmdRun timed out", 5*time.Second, func() {
- <-ch
- })
-
- setTimeout(t, "Waiting for command to exit timed out", 10*time.Second, func() {
- container.Wait()
- })
-
- // Check logs
- if cmdLogs, err := container.ReadLog("json"); err != nil {
- t.Fatal(err)
- } else {
- if output, err := ioutil.ReadAll(cmdLogs); err != nil {
- t.Fatal(err)
- } else {
- expectedLogs := []string{"{\"log\":\"hello\\n\",\"stream\":\"stdout\"", "{\"log\":\"hi there\\n\",\"stream\":\"stdout\""}
- for _, expectedLog := range expectedLogs {
- if !strings.Contains(string(output), expectedLog) {
- t.Fatalf("Unexpected logs: should contains '%s', it is not '%s'\n", expectedLog, output)
- }
- }
- }
- }
-}
-
// TestRunDetach checks attaching and detaching with the escape sequence.
func TestRunDetach(t *testing.T) {
@@ -738,7 +535,7 @@ func TestAttachDisconnect(t *testing.T) {
// We closed stdin, expect /bin/cat to still be running
// Wait a little bit to make sure container.monitor() did his thing
- err := container.WaitTimeout(500 * time.Millisecond)
+ _, err := container.State.WaitStop(500 * time.Millisecond)
if err == nil || !container.State.IsRunning() {
t.Fatalf("/bin/cat is not running after closing stdin")
}
@@ -746,7 +543,7 @@ func TestAttachDisconnect(t *testing.T) {
// Try to avoid the timeout in destroy. Best effort, don't check error
cStdin, _ := container.StdinPipe()
cStdin.Close()
- container.Wait()
+ container.State.WaitStop(-1 * time.Second)
}
// Expected behaviour: container gets deleted automatically after exit
@@ -787,23 +584,6 @@ func TestRunAutoRemove(t *testing.T) {
}
}
-func TestCmdLogs(t *testing.T) {
- t.Skip("Test not impemented")
- cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
- defer cleanup(globalEngine, t)
-
- if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil {
- t.Fatal(err)
- }
- if err := cli.CmdRun("-t", unitTestImageID, "sh", "-c", "ls -l"); err != nil {
- t.Fatal(err)
- }
-
- if err := cli.CmdLogs(globalDaemon.List()[0].ID); err != nil {
- t.Fatal(err)
- }
-}
-
// Expected behaviour: error out when attempting to bind mount non-existing source paths
func TestRunErrorBindNonExistingSource(t *testing.T) {
@@ -825,6 +605,7 @@ func TestRunErrorBindNonExistingSource(t *testing.T) {
}
func TestImagesViz(t *testing.T) {
+ t.Skip("Image viz is deprecated")
stdout, stdoutPipe := io.Pipe()
cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
@@ -875,6 +656,7 @@ func TestImagesViz(t *testing.T) {
}
func TestImagesTree(t *testing.T) {
+ t.Skip("Image tree is deprecated")
stdout, stdoutPipe := io.Pipe()
cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
@@ -1095,73 +877,3 @@ func TestContainerOrphaning(t *testing.T) {
}
}
-
-func TestCmdKill(t *testing.T) {
- var (
- stdin, stdinPipe = io.Pipe()
- stdout, stdoutPipe = io.Pipe()
- cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
- cli2 = client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
- )
- defer cleanup(globalEngine, t)
-
- ch := make(chan struct{})
- go func() {
- defer close(ch)
- cli.CmdRun("-i", "-t", unitTestImageID, "sh", "-c", "trap 'echo SIGUSR1' USR1; trap 'echo SIGUSR2' USR2; echo Ready; while true; do read; done")
- }()
-
- container := waitContainerStart(t, 10*time.Second)
-
- setTimeout(t, "Read Ready timed out", 3*time.Second, func() {
- if err := expectPipe("Ready", stdout); err != nil {
- t.Fatal(err)
- }
- })
-
- setTimeout(t, "SIGUSR1 timed out", 2*time.Second, func() {
- for i := 0; i < 10; i++ {
- if err := cli2.CmdKill("-s", strconv.Itoa(int(syscall.SIGUSR1)), container.ID); err != nil {
- t.Fatal(err)
- }
- if err := expectPipe("SIGUSR1", stdout); err != nil {
- t.Fatal(err)
- }
- }
- })
-
- setTimeout(t, "SIGUSR2 timed out", 2*time.Second, func() {
- for i := 0; i < 20; i++ {
- sig := "USR2"
- if i%2 != 0 {
- // Swap to testing "SIGUSR2" for every odd iteration
- sig = "SIGUSR2"
- }
- if err := cli2.CmdKill("--signal="+sig, container.ID); err != nil {
- t.Fatal(err)
- }
- if err := expectPipe("SIGUSR2", stdout); err != nil {
- t.Fatal(err)
- }
- }
- })
-
- stdout.Close()
- time.Sleep(500 * time.Millisecond)
- if !container.State.IsRunning() {
- t.Fatal("The container should be still running")
- }
-
- setTimeout(t, "Waiting for container timedout", 5*time.Second, func() {
- if err := cli2.CmdKill(container.ID); err != nil {
- t.Fatal(err)
- }
-
- <-ch
- if err := cli2.CmdWait(container.ID); err != nil {
- t.Fatal(err)
- }
- })
-
- closeWrap(stdin, stdinPipe, stdout, stdoutPipe)
-}
diff --git a/integration/container_test.go b/integration/container_test.go
index 8fe52a3cd6..48b3321a50 100644
--- a/integration/container_test.go
+++ b/integration/container_test.go
@@ -2,7 +2,6 @@ package docker
import (
"fmt"
- "github.com/dotcloud/docker/runconfig"
"io"
"io/ioutil"
"os"
@@ -10,6 +9,8 @@ import (
"strings"
"testing"
"time"
+
+ "github.com/dotcloud/docker/runconfig"
)
func TestKillDifferentUser(t *testing.T) {
@@ -60,7 +61,7 @@ func TestKillDifferentUser(t *testing.T) {
if container.State.IsRunning() {
t.Errorf("Container shouldn't be running")
}
- container.Wait()
+ container.State.WaitStop(-1 * time.Second)
if container.State.IsRunning() {
t.Errorf("Container shouldn't be running")
}
@@ -134,7 +135,7 @@ func TestRestartStdin(t *testing.T) {
if err := stdin.Close(); err != nil {
t.Fatal(err)
}
- container.Wait()
+ container.State.WaitStop(-1 * time.Second)
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
@@ -164,7 +165,7 @@ func TestRestartStdin(t *testing.T) {
if err := stdin.Close(); err != nil {
t.Fatal(err)
}
- container.Wait()
+ container.State.WaitStop(-1 * time.Second)
output, err = ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
@@ -212,7 +213,7 @@ func TestStdin(t *testing.T) {
if err := stdin.Close(); err != nil {
t.Fatal(err)
}
- container.Wait()
+ container.State.WaitStop(-1 * time.Second)
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
@@ -257,7 +258,7 @@ func TestTty(t *testing.T) {
if err := stdin.Close(); err != nil {
t.Fatal(err)
}
- container.Wait()
+ container.State.WaitStop(-1 * time.Second)
output, err := ioutil.ReadAll(stdout)
if err != nil {
t.Fatal(err)
@@ -366,7 +367,7 @@ func BenchmarkRunParallel(b *testing.B) {
complete <- err
return
}
- if err := container.WaitTimeout(15 * time.Second); err != nil {
+ if _, err := container.State.WaitStop(15 * time.Second); err != nil {
complete <- err
return
}
@@ -420,7 +421,7 @@ func TestCopyVolumeUidGid(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
- img, err := r.Commit(container1, "", "", "unit test commited image", "", nil)
+ img, err := r.Commit(container1, "", "", "unit test commited image", "", true, nil)
if err != nil {
t.Error(err)
}
@@ -446,7 +447,7 @@ func TestCopyVolumeUidGid(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
- img2, err := r.Commit(container2, "", "", "unit test commited image", "", nil)
+ img2, err := r.Commit(container2, "", "", "unit test commited image", "", true, nil)
if err != nil {
t.Error(err)
}
@@ -480,7 +481,7 @@ func TestCopyVolumeContent(t *testing.T) {
t.Errorf("Container shouldn't be running")
}
- img, err := r.Commit(container1, "", "", "unit test commited image", "", nil)
+ img, err := r.Commit(container1, "", "", "unit test commited image", "", true, nil)
if err != nil {
t.Error(err)
}
diff --git a/integration/runtime_test.go b/integration/runtime_test.go
index 96df15be60..4c0f636d60 100644
--- a/integration/runtime_test.go
+++ b/integration/runtime_test.go
@@ -334,7 +334,7 @@ func TestDaemonCreate(t *testing.T) {
}
container, _, err = daemon.Create(config, "")
- _, err = daemon.Commit(container, "testrepo", "testtag", "", "", config)
+ _, err = daemon.Commit(container, "testrepo", "testtag", "", "", true, config)
if err != nil {
t.Error(err)
}
@@ -496,7 +496,7 @@ func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daem
})
// Even if the state is running, lets give some time to lxc to spawn the process
- container.WaitTimeout(500 * time.Millisecond)
+ container.State.WaitStop(500 * time.Millisecond)
strPort = container.NetworkSettings.Ports[p][0].HostPort
return daemon, container, strPort
@@ -611,7 +611,7 @@ func TestRestore(t *testing.T) {
// Simulate a crash/manual quit of dockerd: process dies, states stays 'Running'
cStdin, _ := container2.StdinPipe()
cStdin.Close()
- if err := container2.WaitTimeout(2 * time.Second); err != nil {
+ if _, err := container2.State.WaitStop(2 * time.Second); err != nil {
t.Fatal(err)
}
container2.State.SetRunning(42)
diff --git a/integration/sorter_test.go b/integration/sorter_test.go
deleted file mode 100644
index 610fe9b3ab..0000000000
--- a/integration/sorter_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package docker
-
-import (
- "github.com/dotcloud/docker/engine"
- "testing"
- "time"
-)
-
-func TestServerListOrderedImagesByCreationDate(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- if err := generateImage("", eng); err != nil {
- t.Fatal(err)
- }
-
- images := getImages(eng, t, true, "")
-
- if images.Data[0].GetInt("Created") < images.Data[1].GetInt("Created") {
- t.Error("Expected images to be ordered by most recent creation date.")
- }
-}
-
-func TestServerListOrderedImagesByCreationDateAndTag(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkDaemonFromEngine(eng, t).Nuke()
-
- err := generateImage("bar", eng)
- if err != nil {
- t.Fatal(err)
- }
-
- time.Sleep(time.Second)
-
- err = generateImage("zed", eng)
- if err != nil {
- t.Fatal(err)
- }
-
- images := getImages(eng, t, true, "")
-
- if repoTags := images.Data[0].GetList("RepoTags"); repoTags[0] != "repo:zed" && repoTags[0] != "repo:bar" {
- t.Errorf("Expected Images to be ordered by most recent creation date.")
- }
-}
-
-func generateImage(name string, eng *engine.Engine) error {
- archive, err := fakeTar()
- if err != nil {
- return err
- }
- job := eng.Job("import", "-", "repo", name)
- job.Stdin.Add(archive)
- job.SetenvBool("json", true)
- return job.Run()
-}
diff --git a/integration/utils_test.go b/integration/utils_test.go
index d8101dfb1d..7be7f13eee 100644
--- a/integration/utils_test.go
+++ b/integration/utils_test.go
@@ -96,11 +96,13 @@ func containerAttach(eng *engine.Engine, id string, t utils.Fataler) (io.WriteCl
}
func containerWait(eng *engine.Engine, id string, t utils.Fataler) int {
- return getContainer(eng, id, t).Wait()
+ ex, _ := getContainer(eng, id, t).State.WaitStop(-1 * time.Second)
+ return ex
}
func containerWaitTimeout(eng *engine.Engine, id string, t utils.Fataler) error {
- return getContainer(eng, id, t).WaitTimeout(500 * time.Millisecond)
+ _, err := getContainer(eng, id, t).State.WaitStop(500 * time.Millisecond)
+ return err
}
func containerKill(eng *engine.Engine, id string, t utils.Fataler) {
@@ -307,7 +309,7 @@ func runContainer(eng *engine.Engine, r *daemon.Daemon, args []string, t *testin
return "", err
}
- container.Wait()
+ container.State.WaitStop(-1 * time.Second)
data, err := ioutil.ReadAll(stdout)
if err != nil {
return "", err
diff --git a/pkg/dockerscript/MAINTAINERS b/pkg/dockerscript/MAINTAINERS
deleted file mode 100644
index aee10c8421..0000000000
--- a/pkg/dockerscript/MAINTAINERS
+++ /dev/null
@@ -1 +0,0 @@
-Solomon Hykes <solomon@docker.com> (@shykes)
diff --git a/pkg/dockerscript/dockerscript.go b/pkg/dockerscript/dockerscript.go
deleted file mode 100644
index e7ec5d1286..0000000000
--- a/pkg/dockerscript/dockerscript.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package dockerscript
-
-import (
- "fmt"
- "github.com/dotcloud/docker/pkg/dockerscript/scanner"
- "io"
- "strings"
-)
-
-type Command struct {
- Args []string
- Children []*Command
- Background bool
-}
-
-type Scanner struct {
- scanner.Scanner
- commentLine bool
-}
-
-func Parse(src io.Reader) ([]*Command, error) {
- s := &Scanner{}
- s.Init(src)
- s.Whitespace = 1<<'\t' | 1<<' '
- s.Mode = scanner.ScanStrings | scanner.ScanRawStrings | scanner.ScanIdents
- expr, err := parse(s, "")
- if err != nil {
- return nil, fmt.Errorf("line %d:%d: %v\n", s.Pos().Line, s.Pos().Column, err)
- }
- return expr, nil
-}
-
-func (cmd *Command) subString(depth int) string {
- var prefix string
- for i := 0; i < depth; i++ {
- prefix += " "
- }
- s := prefix + strings.Join(cmd.Args, ", ")
- if len(cmd.Children) > 0 {
- s += " {\n"
- for _, subcmd := range cmd.Children {
- s += subcmd.subString(depth + 1)
- }
- s += prefix + "}"
- }
- s += "\n"
- return s
-}
-
-func (cmd *Command) String() string {
- return cmd.subString(0)
-}
-
-func parseArgs(s *Scanner) ([]string, rune, error) {
- var parseError error
- // FIXME: we overwrite previously set error
- s.Error = func(s *scanner.Scanner, msg string) {
- parseError = fmt.Errorf(msg)
- // parseError = fmt.Errorf("line %d:%d: %s\n", s.Pos().Line, s.Pos().Column, msg)
- }
- var args []string
- tok := s.Scan()
- for tok != scanner.EOF {
- if parseError != nil {
- return args, tok, parseError
- }
- text := s.TokenText()
- // Toggle line comment
- if strings.HasPrefix(text, "#") {
- s.commentLine = true
- } else if text == "\n" || text == "\r" {
- s.commentLine = false
- return args, tok, nil
- }
- if !s.commentLine {
- if text == "{" || text == "}" || text == "\n" || text == "\r" || text == ";" || text == "&" {
- return args, tok, nil
- }
- args = append(args, text)
- }
- tok = s.Scan()
- }
- return args, tok, nil
-}
-
-func parse(s *Scanner, opener string) (expr []*Command, err error) {
- /*
- defer func() {
- fmt.Printf("parse() returned %d commands:\n", len(expr))
- for _, c := range expr {
- fmt.Printf("\t----> %s\n", c)
- }
- }()
- */
- for {
- args, tok, err := parseArgs(s)
- if err != nil {
- return nil, err
- }
- cmd := &Command{Args: args}
- afterArgs := s.TokenText()
- if afterArgs == "{" {
- children, err := parse(s, "{")
- if err != nil {
- return nil, err
- }
- cmd.Children = children
- } else if afterArgs == "}" && opener != "{" {
- return nil, fmt.Errorf("unexpected end of block '}'")
- } else if afterArgs == "&" {
- cmd.Background = true
- }
- if len(cmd.Args) > 0 || len(cmd.Children) > 0 {
- expr = append(expr, cmd)
- }
- if tok == scanner.EOF || afterArgs == "}" {
- break
- }
- }
- return expr, nil
-}
diff --git a/pkg/dockerscript/scanner/extra.go b/pkg/dockerscript/scanner/extra.go
deleted file mode 100644
index 05c17e247e..0000000000
--- a/pkg/dockerscript/scanner/extra.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package scanner
-
-import (
- "strings"
- "unicode"
-)
-
-// extra functions used to hijack the upstream text/scanner
-
-func detectIdent(ch rune) bool {
- if unicode.IsLetter(ch) {
- return true
- }
- if unicode.IsDigit(ch) {
- return true
- }
- if strings.ContainsRune("_:/+-@%^.!=", ch) {
- return true
- }
- return false
-}
diff --git a/pkg/dockerscript/scanner/scanner.go b/pkg/dockerscript/scanner/scanner.go
deleted file mode 100644
index b208fc7810..0000000000
--- a/pkg/dockerscript/scanner/scanner.go
+++ /dev/null
@@ -1,673 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package scanner provides a scanner and tokenizer for UTF-8-encoded text.
-// It takes an io.Reader providing the source, which then can be tokenized
-// through repeated calls to the Scan function. For compatibility with
-// existing tools, the NUL character is not allowed. If the first character
-// in the source is a UTF-8 encoded byte order mark (BOM), it is discarded.
-//
-// By default, a Scanner skips white space and Go comments and recognizes all
-// literals as defined by the Go language specification. It may be
-// customized to recognize only a subset of those literals and to recognize
-// different white space characters.
-//
-// Basic usage pattern:
-//
-// var s scanner.Scanner
-// s.Init(src)
-// tok := s.Scan()
-// for tok != scanner.EOF {
-// // do something with tok
-// tok = s.Scan()
-// }
-//
-package scanner
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
- "unicode/utf8"
-)
-
-// TODO(gri): Consider changing this to use the new (token) Position package.
-
-// A source position is represented by a Position value.
-// A position is valid if Line > 0.
-type Position struct {
- Filename string // filename, if any
- Offset int // byte offset, starting at 0
- Line int // line number, starting at 1
- Column int // column number, starting at 1 (character count per line)
-}
-
-// IsValid returns true if the position is valid.
-func (pos *Position) IsValid() bool { return pos.Line > 0 }
-
-func (pos Position) String() string {
- s := pos.Filename
- if pos.IsValid() {
- if s != "" {
- s += ":"
- }
- s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
- }
- if s == "" {
- s = "???"
- }
- return s
-}
-
-// Predefined mode bits to control recognition of tokens. For instance,
-// to configure a Scanner such that it only recognizes (Go) identifiers,
-// integers, and skips comments, set the Scanner's Mode field to:
-//
-// ScanIdents | ScanInts | SkipComments
-//
-const (
- ScanIdents = 1 << -Ident
- ScanInts = 1 << -Int
- ScanFloats = 1 << -Float // includes Ints
- ScanChars = 1 << -Char
- ScanStrings = 1 << -String
- ScanRawStrings = 1 << -RawString
- ScanComments = 1 << -Comment
- SkipComments = 1 << -skipComment // if set with ScanComments, comments become white space
- GoTokens = ScanIdents | ScanFloats | ScanChars | ScanStrings | ScanRawStrings | ScanComments | SkipComments
-)
-
-// The result of Scan is one of the following tokens or a Unicode character.
-const (
- EOF = -(iota + 1)
- Ident
- Int
- Float
- Char
- String
- RawString
- Comment
- skipComment
-)
-
-var tokenString = map[rune]string{
- EOF: "EOF",
- Ident: "Ident",
- Int: "Int",
- Float: "Float",
- Char: "Char",
- String: "String",
- RawString: "RawString",
- Comment: "Comment",
-}
-
-// TokenString returns a printable string for a token or Unicode character.
-func TokenString(tok rune) string {
- if s, found := tokenString[tok]; found {
- return s
- }
- return fmt.Sprintf("%q", string(tok))
-}
-
-// GoWhitespace is the default value for the Scanner's Whitespace field.
-// Its value selects Go's white space characters.
-const GoWhitespace = 1<<'\t' | 1<<'\n' | 1<<'\r' | 1<<' '
-
-const bufLen = 1024 // at least utf8.UTFMax
-
-// A Scanner implements reading of Unicode characters and tokens from an io.Reader.
-type Scanner struct {
- // Input
- src io.Reader
-
- // Source buffer
- srcBuf [bufLen + 1]byte // +1 for sentinel for common case of s.next()
- srcPos int // reading position (srcBuf index)
- srcEnd int // source end (srcBuf index)
-
- // Source position
- srcBufOffset int // byte offset of srcBuf[0] in source
- line int // line count
- column int // character count
- lastLineLen int // length of last line in characters (for correct column reporting)
- lastCharLen int // length of last character in bytes
-
- // Token text buffer
- // Typically, token text is stored completely in srcBuf, but in general
- // the token text's head may be buffered in tokBuf while the token text's
- // tail is stored in srcBuf.
- tokBuf bytes.Buffer // token text head that is not in srcBuf anymore
- tokPos int // token text tail position (srcBuf index); valid if >= 0
- tokEnd int // token text tail end (srcBuf index)
-
- // One character look-ahead
- ch rune // character before current srcPos
-
- // Error is called for each error encountered. If no Error
- // function is set, the error is reported to os.Stderr.
- Error func(s *Scanner, msg string)
-
- // ErrorCount is incremented by one for each error encountered.
- ErrorCount int
-
- // The Mode field controls which tokens are recognized. For instance,
- // to recognize Ints, set the ScanInts bit in Mode. The field may be
- // changed at any time.
- Mode uint
-
- // The Whitespace field controls which characters are recognized
- // as white space. To recognize a character ch <= ' ' as white space,
- // set the ch'th bit in Whitespace (the Scanner's behavior is undefined
- // for values ch > ' '). The field may be changed at any time.
- Whitespace uint64
-
- // Start position of most recently scanned token; set by Scan.
- // Calling Init or Next invalidates the position (Line == 0).
- // The Filename field is always left untouched by the Scanner.
- // If an error is reported (via Error) and Position is invalid,
- // the scanner is not inside a token. Call Pos to obtain an error
- // position in that case.
- Position
-}
-
-// Init initializes a Scanner with a new source and returns s.
-// Error is set to nil, ErrorCount is set to 0, Mode is set to GoTokens,
-// and Whitespace is set to GoWhitespace.
-func (s *Scanner) Init(src io.Reader) *Scanner {
- s.src = src
-
- // initialize source buffer
- // (the first call to next() will fill it by calling src.Read)
- s.srcBuf[0] = utf8.RuneSelf // sentinel
- s.srcPos = 0
- s.srcEnd = 0
-
- // initialize source position
- s.srcBufOffset = 0
- s.line = 1
- s.column = 0
- s.lastLineLen = 0
- s.lastCharLen = 0
-
- // initialize token text buffer
- // (required for first call to next()).
- s.tokPos = -1
-
- // initialize one character look-ahead
- s.ch = -1 // no char read yet
-
- // initialize public fields
- s.Error = nil
- s.ErrorCount = 0
- s.Mode = GoTokens
- s.Whitespace = GoWhitespace
- s.Line = 0 // invalidate token position
-
- return s
-}
-
-// next reads and returns the next Unicode character. It is designed such
-// that only a minimal amount of work needs to be done in the common ASCII
-// case (one test to check for both ASCII and end-of-buffer, and one test
-// to check for newlines).
-func (s *Scanner) next() rune {
- ch, width := rune(s.srcBuf[s.srcPos]), 1
-
- if ch >= utf8.RuneSelf {
- // uncommon case: not ASCII or not enough bytes
- for s.srcPos+utf8.UTFMax > s.srcEnd && !utf8.FullRune(s.srcBuf[s.srcPos:s.srcEnd]) {
- // not enough bytes: read some more, but first
- // save away token text if any
- if s.tokPos >= 0 {
- s.tokBuf.Write(s.srcBuf[s.tokPos:s.srcPos])
- s.tokPos = 0
- // s.tokEnd is set by Scan()
- }
- // move unread bytes to beginning of buffer
- copy(s.srcBuf[0:], s.srcBuf[s.srcPos:s.srcEnd])
- s.srcBufOffset += s.srcPos
- // read more bytes
- // (an io.Reader must return io.EOF when it reaches
- // the end of what it is reading - simply returning
- // n == 0 will make this loop retry forever; but the
- // error is in the reader implementation in that case)
- i := s.srcEnd - s.srcPos
- n, err := s.src.Read(s.srcBuf[i:bufLen])
- s.srcPos = 0
- s.srcEnd = i + n
- s.srcBuf[s.srcEnd] = utf8.RuneSelf // sentinel
- if err != nil {
- if s.srcEnd == 0 {
- if s.lastCharLen > 0 {
- // previous character was not EOF
- s.column++
- }
- s.lastCharLen = 0
- return EOF
- }
- if err != io.EOF {
- s.error(err.Error())
- }
- // If err == EOF, we won't be getting more
- // bytes; break to avoid infinite loop. If
- // err is something else, we don't know if
- // we can get more bytes; thus also break.
- break
- }
- }
- // at least one byte
- ch = rune(s.srcBuf[s.srcPos])
- if ch >= utf8.RuneSelf {
- // uncommon case: not ASCII
- ch, width = utf8.DecodeRune(s.srcBuf[s.srcPos:s.srcEnd])
- if ch == utf8.RuneError && width == 1 {
- // advance for correct error position
- s.srcPos += width
- s.lastCharLen = width
- s.column++
- s.error("illegal UTF-8 encoding")
- return ch
- }
- }
- }
-
- // advance
- s.srcPos += width
- s.lastCharLen = width
- s.column++
-
- // special situations
- switch ch {
- case 0:
- // for compatibility with other tools
- s.error("illegal character NUL")
- case '\n':
- s.line++
- s.lastLineLen = s.column
- s.column = 0
- }
-
- return ch
-}
-
-// Next reads and returns the next Unicode character.
-// It returns EOF at the end of the source. It reports
-// a read error by calling s.Error, if not nil; otherwise
-// it prints an error message to os.Stderr. Next does not
-// update the Scanner's Position field; use Pos() to
-// get the current position.
-func (s *Scanner) Next() rune {
- s.tokPos = -1 // don't collect token text
- s.Line = 0 // invalidate token position
- ch := s.Peek()
- s.ch = s.next()
- return ch
-}
-
-// Peek returns the next Unicode character in the source without advancing
-// the scanner. It returns EOF if the scanner's position is at the last
-// character of the source.
-func (s *Scanner) Peek() rune {
- if s.ch < 0 {
- // this code is only run for the very first character
- s.ch = s.next()
- if s.ch == '\uFEFF' {
- s.ch = s.next() // ignore BOM
- }
- }
- return s.ch
-}
-
-func (s *Scanner) error(msg string) {
- s.ErrorCount++
- if s.Error != nil {
- s.Error(s, msg)
- return
- }
- pos := s.Position
- if !pos.IsValid() {
- pos = s.Pos()
- }
- fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
-}
-
-func (s *Scanner) scanIdentifier() rune {
- ch := s.next() // read character after first '_' or letter
- for detectIdent(ch) {
- ch = s.next()
- }
- return ch
-}
-
-func digitVal(ch rune) int {
- switch {
- case '0' <= ch && ch <= '9':
- return int(ch - '0')
- case 'a' <= ch && ch <= 'f':
- return int(ch - 'a' + 10)
- case 'A' <= ch && ch <= 'F':
- return int(ch - 'A' + 10)
- }
- return 16 // larger than any legal digit val
-}
-
-func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
-
-func (s *Scanner) scanMantissa(ch rune) rune {
- for isDecimal(ch) {
- ch = s.next()
- }
- return ch
-}
-
-func (s *Scanner) scanFraction(ch rune) rune {
- if ch == '.' {
- ch = s.scanMantissa(s.next())
- }
- return ch
-}
-
-func (s *Scanner) scanExponent(ch rune) rune {
- if ch == 'e' || ch == 'E' {
- ch = s.next()
- if ch == '-' || ch == '+' {
- ch = s.next()
- }
- ch = s.scanMantissa(ch)
- }
- return ch
-}
-
-func (s *Scanner) scanNumber(ch rune) (rune, rune) {
- // isDecimal(ch)
- if ch == '0' {
- // int or float
- ch = s.next()
- if ch == 'x' || ch == 'X' {
- // hexadecimal int
- ch = s.next()
- hasMantissa := false
- for digitVal(ch) < 16 {
- ch = s.next()
- hasMantissa = true
- }
- if !hasMantissa {
- s.error("illegal hexadecimal number")
- }
- } else {
- // octal int or float
- has8or9 := false
- for isDecimal(ch) {
- if ch > '7' {
- has8or9 = true
- }
- ch = s.next()
- }
- if s.Mode&ScanFloats != 0 && (ch == '.' || ch == 'e' || ch == 'E') {
- // float
- ch = s.scanFraction(ch)
- ch = s.scanExponent(ch)
- return Float, ch
- }
- // octal int
- if has8or9 {
- s.error("illegal octal number")
- }
- }
- return Int, ch
- }
- // decimal int or float
- ch = s.scanMantissa(ch)
- if s.Mode&ScanFloats != 0 && (ch == '.' || ch == 'e' || ch == 'E') {
- // float
- ch = s.scanFraction(ch)
- ch = s.scanExponent(ch)
- return Float, ch
- }
- return Int, ch
-}
-
-func (s *Scanner) scanDigits(ch rune, base, n int) rune {
- for n > 0 && digitVal(ch) < base {
- ch = s.next()
- n--
- }
- if n > 0 {
- s.error("illegal char escape")
- }
- return ch
-}
-
-func (s *Scanner) scanEscape(quote rune) rune {
- ch := s.next() // read character after '/'
- switch ch {
- case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
- // nothing to do
- ch = s.next()
- case '0', '1', '2', '3', '4', '5', '6', '7':
- ch = s.scanDigits(ch, 8, 3)
- case 'x':
- ch = s.scanDigits(s.next(), 16, 2)
- case 'u':
- ch = s.scanDigits(s.next(), 16, 4)
- case 'U':
- ch = s.scanDigits(s.next(), 16, 8)
- default:
- s.error("illegal char escape")
- }
- return ch
-}
-
-func (s *Scanner) scanString(quote rune) (n int) {
- ch := s.next() // read character after quote
- for ch != quote {
- if ch == '\n' || ch < 0 {
- s.error("literal not terminated")
- return
- }
- if ch == '\\' {
- ch = s.scanEscape(quote)
- } else {
- ch = s.next()
- }
- n++
- }
- return
-}
-
-func (s *Scanner) scanRawString() {
- ch := s.next() // read character after '`'
- for ch != '`' {
- if ch < 0 {
- s.error("literal not terminated")
- return
- }
- ch = s.next()
- }
-}
-
-func (s *Scanner) scanChar() {
- if s.scanString('\'') != 1 {
- s.error("illegal char literal")
- }
-}
-
-func (s *Scanner) scanComment(ch rune) rune {
- // ch == '/' || ch == '*'
- if ch == '/' {
- // line comment
- ch = s.next() // read character after "//"
- for ch != '\n' && ch >= 0 {
- ch = s.next()
- }
- return ch
- }
-
- // general comment
- ch = s.next() // read character after "/*"
- for {
- if ch < 0 {
- s.error("comment not terminated")
- break
- }
- ch0 := ch
- ch = s.next()
- if ch0 == '*' && ch == '/' {
- ch = s.next()
- break
- }
- }
- return ch
-}
-
-// Scan reads the next token or Unicode character from source and returns it.
-// It only recognizes tokens t for which the respective Mode bit (1<<-t) is set.
-// It returns EOF at the end of the source. It reports scanner errors (read and
-// token errors) by calling s.Error, if not nil; otherwise it prints an error
-// message to os.Stderr.
-func (s *Scanner) Scan() rune {
- ch := s.Peek()
-
- // reset token text position
- s.tokPos = -1
- s.Line = 0
-
-redo:
- // skip white space
- for s.Whitespace&(1<<uint(ch)) != 0 {
- ch = s.next()
- }
-
- // start collecting token text
- s.tokBuf.Reset()
- s.tokPos = s.srcPos - s.lastCharLen
-
- // set token position
- // (this is a slightly optimized version of the code in Pos())
- s.Offset = s.srcBufOffset + s.tokPos
- if s.column > 0 {
- // common case: last character was not a '\n'
- s.Line = s.line
- s.Column = s.column
- } else {
- // last character was a '\n'
- // (we cannot be at the beginning of the source
- // since we have called next() at least once)
- s.Line = s.line - 1
- s.Column = s.lastLineLen
- }
-
- // determine token value
- tok := ch
- switch {
- case detectIdent(ch):
- if s.Mode&ScanIdents != 0 {
- tok = Ident
- ch = s.scanIdentifier()
- } else {
- ch = s.next()
- }
- case isDecimal(ch):
- if s.Mode&(ScanInts|ScanFloats) != 0 {
- tok, ch = s.scanNumber(ch)
- } else {
- ch = s.next()
- }
- default:
- switch ch {
- case '"':
- if s.Mode&ScanStrings != 0 {
- s.scanString('"')
- tok = String
- }
- ch = s.next()
- case '\'':
- if s.Mode&ScanChars != 0 {
- s.scanChar()
- tok = Char
- }
- ch = s.next()
- case '.':
- ch = s.next()
- if isDecimal(ch) && s.Mode&ScanFloats != 0 {
- tok = Float
- ch = s.scanMantissa(ch)
- ch = s.scanExponent(ch)
- }
- case '/':
- ch = s.next()
- if (ch == '/' || ch == '*') && s.Mode&ScanComments != 0 {
- if s.Mode&SkipComments != 0 {
- s.tokPos = -1 // don't collect token text
- ch = s.scanComment(ch)
- goto redo
- }
- ch = s.scanComment(ch)
- tok = Comment
- }
- case '`':
- if s.Mode&ScanRawStrings != 0 {
- s.scanRawString()
- tok = String
- }
- ch = s.next()
- default:
- ch = s.next()
- }
- }
-
- // end of token text
- s.tokEnd = s.srcPos - s.lastCharLen
-
- s.ch = ch
- return tok
-}
-
-// Pos returns the position of the character immediately after
-// the character or token returned by the last call to Next or Scan.
-func (s *Scanner) Pos() (pos Position) {
- pos.Filename = s.Filename
- pos.Offset = s.srcBufOffset + s.srcPos - s.lastCharLen
- switch {
- case s.column > 0:
- // common case: last character was not a '\n'
- pos.Line = s.line
- pos.Column = s.column
- case s.lastLineLen > 0:
- // last character was a '\n'
- pos.Line = s.line - 1
- pos.Column = s.lastLineLen
- default:
- // at the beginning of the source
- pos.Line = 1
- pos.Column = 1
- }
- return
-}
-
-// TokenText returns the string corresponding to the most recently scanned token.
-// Valid after calling Scan().
-func (s *Scanner) TokenText() string {
- if s.tokPos < 0 {
- // no token text
- return ""
- }
-
- if s.tokEnd < 0 {
- // if EOF was reached, s.tokEnd is set to -1 (s.srcPos == 0)
- s.tokEnd = s.tokPos
- }
-
- if s.tokBuf.Len() == 0 {
- // common case: the entire token text is still in srcBuf
- return string(s.srcBuf[s.tokPos:s.tokEnd])
- }
-
- // part of the token text was saved in tokBuf: save the rest in
- // tokBuf as well and return its content
- s.tokBuf.Write(s.srcBuf[s.tokPos:s.tokEnd])
- s.tokPos = s.tokEnd // ensure idempotency of TokenText() call
- return s.tokBuf.String()
-}
diff --git a/pkg/mflag/MAINTAINERS b/pkg/mflag/MAINTAINERS
index ceeb0cfd18..e0f18f14f1 100644
--- a/pkg/mflag/MAINTAINERS
+++ b/pkg/mflag/MAINTAINERS
@@ -1 +1 @@
-Victor Vieux <victor.vieux@docker.com> (@vieux)
+Victor Vieux <vieux@docker.com> (@vieux)
diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go
index ad23e540fb..52f786e451 100644
--- a/pkg/mflag/flag.go
+++ b/pkg/mflag/flag.go
@@ -305,12 +305,10 @@ type flagSlice []string
func (p flagSlice) Len() int { return len(p) }
func (p flagSlice) Less(i, j int) bool {
- pi, pj := strings.ToLower(p[i]), strings.ToLower(p[j])
- if pi[0] == '-' {
- pi = pi[1:]
- }
- if pj[0] == '-' {
- pj = pj[1:]
+ pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-")
+ lpi, lpj := strings.ToLower(pi), strings.ToLower(pj)
+ if lpi != lpj {
+ return lpi < lpj
}
return pi < pj
}
@@ -443,8 +441,6 @@ func (f *FlagSet) PrintDefaults() {
}
fmt.Fprintln(writer, "\t", line)
}
- // start := fmt.Sprintf(format, strings.Join(names, ", -"), flag.DefValue)
- // fmt.Fprintln(f.out(), start, strings.Replace(flag.Usage, "\n", "\n"+strings.Repeat(" ", len(start)+1), -1))
}
})
writer.Flush()
@@ -833,14 +829,12 @@ func (f *FlagSet) parseOne() (bool, string, error) {
f.args = f.args[1:]
has_value := false
value := ""
- for i := 1; i < len(name); i++ { // equals cannot be first
- if name[i] == '=' {
- value = trimQuotes(name[i+1:])
- has_value = true
- name = name[0:i]
- break
- }
+ if i := strings.Index(name, "="); i != -1 {
+ value = trimQuotes(name[i+1:])
+ has_value = true
+ name = name[:i]
}
+
m := f.formal
flag, alreadythere := m[name] // BUG
if !alreadythere {
diff --git a/pkg/mount/flags.go b/pkg/mount/flags.go
new file mode 100644
index 0000000000..742698e8d3
--- /dev/null
+++ b/pkg/mount/flags.go
@@ -0,0 +1,62 @@
+package mount
+
+import (
+ "strings"
+)
+
+// Parse fstab type mount options into mount() flags
+// and device specific data
+func parseOptions(options string) (int, string) {
+ var (
+ flag int
+ data []string
+ )
+
+ flags := map[string]struct {
+ clear bool
+ flag int
+ }{
+ "defaults": {false, 0},
+ "ro": {false, RDONLY},
+ "rw": {true, RDONLY},
+ "suid": {true, NOSUID},
+ "nosuid": {false, NOSUID},
+ "dev": {true, NODEV},
+ "nodev": {false, NODEV},
+ "exec": {true, NOEXEC},
+ "noexec": {false, NOEXEC},
+ "sync": {false, SYNCHRONOUS},
+ "async": {true, SYNCHRONOUS},
+ "dirsync": {false, DIRSYNC},
+ "remount": {false, REMOUNT},
+ "mand": {false, MANDLOCK},
+ "nomand": {true, MANDLOCK},
+ "atime": {true, NOATIME},
+ "noatime": {false, NOATIME},
+ "diratime": {true, NODIRATIME},
+ "nodiratime": {false, NODIRATIME},
+ "bind": {false, BIND},
+ "rbind": {false, RBIND},
+ "private": {false, PRIVATE},
+ "relatime": {false, RELATIME},
+ "norelatime": {true, RELATIME},
+ "strictatime": {false, STRICTATIME},
+ "nostrictatime": {true, STRICTATIME},
+ }
+
+ for _, o := range strings.Split(options, ",") {
+ // If the option does not exist in the flags table or the flag
+ // is not supported on the platform,
+ // then it is a data value for a specific fs type
+ if f, exists := flags[o]; exists && f.flag != 0 {
+ if f.clear {
+ flag &= ^f.flag
+ } else {
+ flag |= f.flag
+ }
+ } else {
+ data = append(data, o)
+ }
+ }
+ return flag, strings.Join(data, ",")
+}
diff --git a/pkg/mount/flags_freebsd.go b/pkg/mount/flags_freebsd.go
new file mode 100644
index 0000000000..4ddf4d7090
--- /dev/null
+++ b/pkg/mount/flags_freebsd.go
@@ -0,0 +1,28 @@
+// +build freebsd,cgo
+
+package mount
+
+/*
+#include <sys/mount.h>
+*/
+import "C"
+
+const (
+ RDONLY = C.MNT_RDONLY
+ NOSUID = C.MNT_NOSUID
+ NOEXEC = C.MNT_NOEXEC
+ SYNCHRONOUS = C.MNT_SYNCHRONOUS
+ NOATIME = C.MNT_NOATIME
+
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NODEV = 0
+ NODIRATIME = 0
+ PRIVATE = 0
+ RBIND = 0
+ RELATIVE = 0
+ RELATIME = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+)
diff --git a/pkg/mount/flags_linux.go b/pkg/mount/flags_linux.go
index b7124b1dfa..19c882fcd8 100644
--- a/pkg/mount/flags_linux.go
+++ b/pkg/mount/flags_linux.go
@@ -3,62 +3,23 @@
package mount
import (
- "strings"
"syscall"
)
-// Parse fstab type mount options into mount() flags
-// and device specific data
-func parseOptions(options string) (int, string) {
- var (
- flag int
- data []string
- )
-
- flags := map[string]struct {
- clear bool
- flag int
- }{
- "defaults": {false, 0},
- "ro": {false, syscall.MS_RDONLY},
- "rw": {true, syscall.MS_RDONLY},
- "suid": {true, syscall.MS_NOSUID},
- "nosuid": {false, syscall.MS_NOSUID},
- "dev": {true, syscall.MS_NODEV},
- "nodev": {false, syscall.MS_NODEV},
- "exec": {true, syscall.MS_NOEXEC},
- "noexec": {false, syscall.MS_NOEXEC},
- "sync": {false, syscall.MS_SYNCHRONOUS},
- "async": {true, syscall.MS_SYNCHRONOUS},
- "dirsync": {false, syscall.MS_DIRSYNC},
- "remount": {false, syscall.MS_REMOUNT},
- "mand": {false, syscall.MS_MANDLOCK},
- "nomand": {true, syscall.MS_MANDLOCK},
- "atime": {true, syscall.MS_NOATIME},
- "noatime": {false, syscall.MS_NOATIME},
- "diratime": {true, syscall.MS_NODIRATIME},
- "nodiratime": {false, syscall.MS_NODIRATIME},
- "bind": {false, syscall.MS_BIND},
- "rbind": {false, syscall.MS_BIND | syscall.MS_REC},
- "private": {false, syscall.MS_PRIVATE},
- "relatime": {false, syscall.MS_RELATIME},
- "norelatime": {true, syscall.MS_RELATIME},
- "strictatime": {false, syscall.MS_STRICTATIME},
- "nostrictatime": {true, syscall.MS_STRICTATIME},
- }
-
- for _, o := range strings.Split(options, ",") {
- // If the option does not exist in the flags table then it is a
- // data value for a specific fs type
- if f, exists := flags[o]; exists {
- if f.clear {
- flag &= ^f.flag
- } else {
- flag |= f.flag
- }
- } else {
- data = append(data, o)
- }
- }
- return flag, strings.Join(data, ",")
-}
+const (
+ RDONLY = syscall.MS_RDONLY
+ NOSUID = syscall.MS_NOSUID
+ NODEV = syscall.MS_NODEV
+ NOEXEC = syscall.MS_NOEXEC
+ SYNCHRONOUS = syscall.MS_SYNCHRONOUS
+ DIRSYNC = syscall.MS_DIRSYNC
+ REMOUNT = syscall.MS_REMOUNT
+ MANDLOCK = syscall.MS_MANDLOCK
+ NOATIME = syscall.MS_NOATIME
+ NODIRATIME = syscall.MS_NODIRATIME
+ BIND = syscall.MS_BIND
+ RBIND = syscall.MS_BIND | syscall.MS_REC
+ PRIVATE = syscall.MS_PRIVATE
+ RELATIME = syscall.MS_RELATIME
+ STRICTATIME = syscall.MS_STRICTATIME
+)
diff --git a/pkg/mount/flags_unsupported.go b/pkg/mount/flags_unsupported.go
index c894efe5b1..e598354151 100644
--- a/pkg/mount/flags_unsupported.go
+++ b/pkg/mount/flags_unsupported.go
@@ -1,7 +1,22 @@
-// +build !linux !amd64
+// +build !linux,!freebsd linux,!amd64 freebsd,!cgo
package mount
-func parseOptions(options string) (int, string) {
- panic("Not implemented")
-}
+const (
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NOATIME = 0
+ NODEV = 0
+ NODIRATIME = 0
+ NOEXEC = 0
+ NOSUID = 0
+ PRIVATE = 0
+ RBIND = 0
+ RELATIME = 0
+ RELATIVE = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+ SYNCHRONOUS = 0
+ RDONLY = 0
+)
diff --git a/pkg/mount/mount.go b/pkg/mount/mount.go
index 3860b975bd..5ca731601f 100644
--- a/pkg/mount/mount.go
+++ b/pkg/mount/mount.go
@@ -29,8 +29,11 @@ func Mounted(mountpoint string) (bool, error) {
// the target is not mounted
// Options must be specified as fstab style
func Mount(device, target, mType, options string) error {
- if mounted, err := Mounted(target); err != nil || mounted {
- return err
+ flag, _ := parseOptions(options)
+ if flag&REMOUNT != REMOUNT {
+ if mounted, err := Mounted(target); err != nil || mounted {
+ return err
+ }
}
return ForceMount(device, target, mType, options)
}
diff --git a/pkg/mount/mount_test.go b/pkg/mount/mount_test.go
index 6edc31d410..5c7f1b86a0 100644
--- a/pkg/mount/mount_test.go
+++ b/pkg/mount/mount_test.go
@@ -3,12 +3,11 @@ package mount
import (
"os"
"path"
- "syscall"
"testing"
)
func TestMountOptionsParsing(t *testing.T) {
- options := "bind,ro,size=10k"
+ options := "noatime,ro,size=10k"
flag, data := parseOptions(options)
@@ -16,7 +15,7 @@ func TestMountOptionsParsing(t *testing.T) {
t.Fatalf("Expected size=10 got %s", data)
}
- expectedFlag := syscall.MS_BIND | syscall.MS_RDONLY
+ expectedFlag := NOATIME | RDONLY
if flag != expectedFlag {
t.Fatalf("Expected %d got %d", expectedFlag, flag)
@@ -31,10 +30,15 @@ func TestMounted(t *testing.T) {
defer os.RemoveAll(tmp)
var (
- sourcePath = path.Join(tmp, "sourcefile.txt")
- targetPath = path.Join(tmp, "targetfile.txt")
+ sourceDir = path.Join(tmp, "source")
+ targetDir = path.Join(tmp, "target")
+ sourcePath = path.Join(sourceDir, "file.txt")
+ targetPath = path.Join(targetDir, "file.txt")
)
+ os.Mkdir(sourceDir, 0777)
+ os.Mkdir(targetDir, 0777)
+
f, err := os.Create(sourcePath)
if err != nil {
t.Fatal(err)
@@ -48,23 +52,23 @@ func TestMounted(t *testing.T) {
}
f.Close()
- if err := Mount(sourcePath, targetPath, "none", "bind,rw"); err != nil {
+ if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
t.Fatal(err)
}
defer func() {
- if err := Unmount(targetPath); err != nil {
+ if err := Unmount(targetDir); err != nil {
t.Fatal(err)
}
}()
- mounted, err := Mounted(targetPath)
+ mounted, err := Mounted(targetDir)
if err != nil {
t.Fatal(err)
}
if !mounted {
- t.Fatalf("Expected %s to be mounted", targetPath)
+ t.Fatalf("Expected %s to be mounted", targetDir)
}
- if _, err := os.Stat(targetPath); err != nil {
+ if _, err := os.Stat(targetDir); err != nil {
t.Fatal(err)
}
}
@@ -77,10 +81,15 @@ func TestMountReadonly(t *testing.T) {
defer os.RemoveAll(tmp)
var (
- sourcePath = path.Join(tmp, "sourcefile.txt")
- targetPath = path.Join(tmp, "targetfile.txt")
+ sourceDir = path.Join(tmp, "source")
+ targetDir = path.Join(tmp, "target")
+ sourcePath = path.Join(sourceDir, "file.txt")
+ targetPath = path.Join(targetDir, "file.txt")
)
+ os.Mkdir(sourceDir, 0777)
+ os.Mkdir(targetDir, 0777)
+
f, err := os.Create(sourcePath)
if err != nil {
t.Fatal(err)
@@ -94,11 +103,11 @@ func TestMountReadonly(t *testing.T) {
}
f.Close()
- if err := Mount(sourcePath, targetPath, "none", "bind,ro"); err != nil {
+ if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil {
t.Fatal(err)
}
defer func() {
- if err := Unmount(targetPath); err != nil {
+ if err := Unmount(targetDir); err != nil {
t.Fatal(err)
}
}()
@@ -108,3 +117,21 @@ func TestMountReadonly(t *testing.T) {
t.Fatal("Should not be able to open a ro file as rw")
}
}
+
+func TestGetMounts(t *testing.T) {
+ mounts, err := GetMounts()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ root := false
+ for _, entry := range mounts {
+ if entry.Mountpoint == "/" {
+ root = true
+ }
+ }
+
+ if !root {
+ t.Fatal("/ should be mounted at least")
+ }
+}
diff --git a/pkg/mount/mounter_freebsd.go b/pkg/mount/mounter_freebsd.go
new file mode 100644
index 0000000000..bb870e6f59
--- /dev/null
+++ b/pkg/mount/mounter_freebsd.go
@@ -0,0 +1,59 @@
+package mount
+
+/*
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/_iovec.h>
+#include <sys/mount.h>
+#include <sys/param.h>
+*/
+import "C"
+
+import (
+ "fmt"
+ "strings"
+ "syscall"
+ "unsafe"
+)
+
+func allocateIOVecs(options []string) []C.struct_iovec {
+ out := make([]C.struct_iovec, len(options))
+ for i, option := range options {
+ out[i].iov_base = unsafe.Pointer(C.CString(option))
+ out[i].iov_len = C.size_t(len(option) + 1)
+ }
+ return out
+}
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ isNullFS := false
+
+ xs := strings.Split(data, ",")
+ for _, x := range xs {
+ if x == "bind" {
+ isNullFS = true
+ }
+ }
+
+ options := []string{"fspath", target}
+ if isNullFS {
+ options = append(options, "fstype", "nullfs", "target", device)
+ } else {
+ options = append(options, "fstype", mType, "from", device)
+ }
+ rawOptions := allocateIOVecs(options)
+ for _, rawOption := range rawOptions {
+ defer C.free(rawOption.iov_base)
+ }
+
+ if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
+ reason := C.GoString(C.strerror(*C.__error()))
+ return fmt.Errorf("Failed to call nmount: %s", reason)
+ }
+ return nil
+}
+
+func unmount(target string, flag int) error {
+ return syscall.Unmount(target, flag)
+}
diff --git a/pkg/mount/mounter_unsupported.go b/pkg/mount/mounter_unsupported.go
index ee27b35f89..06f2ac00b2 100644
--- a/pkg/mount/mounter_unsupported.go
+++ b/pkg/mount/mounter_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux !amd64
+// +build !linux,!freebsd linux,!amd64 freebsd,!cgo
package mount
diff --git a/pkg/mount/mountinfo.go b/pkg/mount/mountinfo.go
index 32996f05c8..78b83ced4a 100644
--- a/pkg/mount/mountinfo.go
+++ b/pkg/mount/mountinfo.go
@@ -1,79 +1,7 @@
package mount
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strings"
-)
-
-const (
- /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
- (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
-
- (1) mount ID: unique identifier of the mount (may be reused after umount)
- (2) parent ID: ID of parent (or of self for the top of the mount tree)
- (3) major:minor: value of st_dev for files on filesystem
- (4) root: root of the mount within the filesystem
- (5) mount point: mount point relative to the process's root
- (6) mount options: per mount options
- (7) optional fields: zero or more fields of the form "tag[:value]"
- (8) separator: marks the end of the optional fields
- (9) filesystem type: name of filesystem of the form "type[.subtype]"
- (10) mount source: filesystem specific information or "none"
- (11) super options: per super block options*/
- mountinfoFormat = "%d %d %d:%d %s %s %s "
-)
-
type MountInfo struct {
Id, Parent, Major, Minor int
Root, Mountpoint, Opts string
Fstype, Source, VfsOpts string
}
-
-// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts
-func parseMountTable() ([]*MountInfo, error) {
- f, err := os.Open("/proc/self/mountinfo")
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- return parseInfoFile(f)
-}
-
-func parseInfoFile(r io.Reader) ([]*MountInfo, error) {
- var (
- s = bufio.NewScanner(r)
- out = []*MountInfo{}
- )
-
- for s.Scan() {
- if err := s.Err(); err != nil {
- return nil, err
- }
-
- var (
- p = &MountInfo{}
- text = s.Text()
- )
-
- if _, err := fmt.Sscanf(text, mountinfoFormat,
- &p.Id, &p.Parent, &p.Major, &p.Minor,
- &p.Root, &p.Mountpoint, &p.Opts); err != nil {
- return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
- }
- // Safe as mountinfo encodes mountpoints with spaces as \040.
- index := strings.Index(text, " - ")
- postSeparatorFields := strings.Fields(text[index+3:])
- if len(postSeparatorFields) != 3 {
- return nil, fmt.Errorf("Error did not find 3 fields post '-' in '%s'", text)
- }
- p.Fstype = postSeparatorFields[0]
- p.Source = postSeparatorFields[1]
- p.VfsOpts = postSeparatorFields[2]
- out = append(out, p)
- }
- return out, nil
-}
diff --git a/pkg/mount/mountinfo_freebsd.go b/pkg/mount/mountinfo_freebsd.go
new file mode 100644
index 0000000000..a16bdb84f8
--- /dev/null
+++ b/pkg/mount/mountinfo_freebsd.go
@@ -0,0 +1,38 @@
+package mount
+
+/*
+#include <sys/param.h>
+#include <sys/ucred.h>
+#include <sys/mount.h>
+*/
+import "C"
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts
+func parseMountTable() ([]*MountInfo, error) {
+ var rawEntries *C.struct_statfs
+
+ count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
+ if count == 0 {
+ return nil, fmt.Errorf("Failed to call getmntinfo")
+ }
+
+ var entries []C.struct_statfs
+ header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
+ header.Cap = count
+ header.Len = count
+ header.Data = uintptr(unsafe.Pointer(rawEntries))
+
+ var out []*MountInfo
+ for _, entry := range entries {
+ var mountinfo MountInfo
+ mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
+ out = append(out, &mountinfo)
+ }
+ return out, nil
+}
diff --git a/pkg/mount/mountinfo_linux.go b/pkg/mount/mountinfo_linux.go
new file mode 100644
index 0000000000..01c954fff3
--- /dev/null
+++ b/pkg/mount/mountinfo_linux.go
@@ -0,0 +1,73 @@
+package mount
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+const (
+ /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
+ (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
+
+ (1) mount ID: unique identifier of the mount (may be reused after umount)
+ (2) parent ID: ID of parent (or of self for the top of the mount tree)
+ (3) major:minor: value of st_dev for files on filesystem
+ (4) root: root of the mount within the filesystem
+ (5) mount point: mount point relative to the process's root
+ (6) mount options: per mount options
+ (7) optional fields: zero or more fields of the form "tag[:value]"
+ (8) separator: marks the end of the optional fields
+ (9) filesystem type: name of filesystem of the form "type[.subtype]"
+ (10) mount source: filesystem specific information or "none"
+ (11) super options: per super block options*/
+ mountinfoFormat = "%d %d %d:%d %s %s %s "
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts
+func parseMountTable() ([]*MountInfo, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
+
+func parseInfoFile(r io.Reader) ([]*MountInfo, error) {
+ var (
+ s = bufio.NewScanner(r)
+ out = []*MountInfo{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ var (
+ p = &MountInfo{}
+ text = s.Text()
+ )
+
+ if _, err := fmt.Sscanf(text, mountinfoFormat,
+ &p.Id, &p.Parent, &p.Major, &p.Minor,
+ &p.Root, &p.Mountpoint, &p.Opts); err != nil {
+ return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
+ }
+ // Safe as mountinfo encodes mountpoints with spaces as \040.
+ index := strings.Index(text, " - ")
+ postSeparatorFields := strings.Fields(text[index+3:])
+ if len(postSeparatorFields) != 3 {
+ return nil, fmt.Errorf("Error did not find 3 fields post '-' in '%s'", text)
+ }
+ p.Fstype = postSeparatorFields[0]
+ p.Source = postSeparatorFields[1]
+ p.VfsOpts = postSeparatorFields[2]
+ out = append(out, p)
+ }
+ return out, nil
+}
diff --git a/pkg/mount/mountinfo_test.go b/pkg/mount/mountinfo_test_linux.go
index f2e3daa8b3..f2e3daa8b3 100644
--- a/pkg/mount/mountinfo_test.go
+++ b/pkg/mount/mountinfo_test_linux.go
diff --git a/pkg/mount/mountinfo_unsupported.go b/pkg/mount/mountinfo_unsupported.go
new file mode 100644
index 0000000000..352336b9a3
--- /dev/null
+++ b/pkg/mount/mountinfo_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux,!freebsd freebsd,!cgo
+
+package mount
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func parseMountTable() ([]*MountInfo, error) {
+ return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/pkg/networkfs/MAINTAINERS b/pkg/networkfs/MAINTAINERS
index ceeb0cfd18..e0f18f14f1 100644
--- a/pkg/networkfs/MAINTAINERS
+++ b/pkg/networkfs/MAINTAINERS
@@ -1 +1 @@
-Victor Vieux <victor.vieux@docker.com> (@vieux)
+Victor Vieux <vieux@docker.com> (@vieux)
diff --git a/pkg/symlink/fs.go b/pkg/symlink/fs.go
index 257491f91b..da9c590675 100644
--- a/pkg/symlink/fs.go
+++ b/pkg/symlink/fs.go
@@ -13,8 +13,6 @@ const maxLoopCounter = 100
// FollowSymlink will follow an existing link and scope it to the root
// path provided.
func FollowSymlinkInScope(link, root string) (string, error) {
- prev := "/"
-
root, err := filepath.Abs(root)
if err != nil {
return "", err
@@ -25,10 +23,16 @@ func FollowSymlinkInScope(link, root string) (string, error) {
return "", err
}
+ if link == root {
+ return root, nil
+ }
+
if !strings.HasPrefix(filepath.Dir(link), root) {
return "", fmt.Errorf("%s is not within %s", link, root)
}
+ prev := "/"
+
for _, p := range strings.Split(link, "/") {
prev = filepath.Join(prev, p)
prev = filepath.Clean(prev)
diff --git a/pkg/sysinfo/MAINTAINERS b/pkg/sysinfo/MAINTAINERS
index 1e998f8ac1..68a97d2fc2 100644
--- a/pkg/sysinfo/MAINTAINERS
+++ b/pkg/sysinfo/MAINTAINERS
@@ -1 +1,2 @@
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
+Victor Vieux <vieux@docker.com> (@vieux)
diff --git a/pkg/system/MAINTAINERS b/pkg/system/MAINTAINERS
index 1e998f8ac1..68a97d2fc2 100644
--- a/pkg/system/MAINTAINERS
+++ b/pkg/system/MAINTAINERS
@@ -1 +1,2 @@
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
+Victor Vieux <vieux@docker.com> (@vieux)
diff --git a/pkg/tailfile/tailfile.go b/pkg/tailfile/tailfile.go
new file mode 100644
index 0000000000..2ffd36d258
--- /dev/null
+++ b/pkg/tailfile/tailfile.go
@@ -0,0 +1,61 @@
+package tailfile
+
+import (
+ "bytes"
+ "errors"
+ "os"
+)
+
+const blockSize = 1024
+
+var eol = []byte("\n")
+var ErrNonPositiveLinesNumber = errors.New("Lines number must be positive")
+
+//TailFile returns last n lines of file f
+func TailFile(f *os.File, n int) ([][]byte, error) {
+ if n <= 0 {
+ return nil, ErrNonPositiveLinesNumber
+ }
+ size, err := f.Seek(0, os.SEEK_END)
+ if err != nil {
+ return nil, err
+ }
+ block := -1
+ var data []byte
+ var cnt int
+ for {
+ var b []byte
+ step := int64(block * blockSize)
+ left := size + step // how many bytes to beginning
+ if left < 0 {
+ if _, err := f.Seek(0, os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ b = make([]byte, blockSize+left)
+ if _, err := f.Read(b); err != nil {
+ return nil, err
+ }
+ data = append(b, data...)
+ break
+ } else {
+ b = make([]byte, blockSize)
+ if _, err := f.Seek(step, os.SEEK_END); err != nil {
+ return nil, err
+ }
+ if _, err := f.Read(b); err != nil {
+ return nil, err
+ }
+ data = append(b, data...)
+ }
+ cnt += bytes.Count(b, eol)
+ if cnt > n {
+ break
+ }
+ block--
+ }
+ lines := bytes.Split(data, eol)
+ if n < len(lines) {
+ return lines[len(lines)-n-1 : len(lines)-1], nil
+ }
+ return lines[:len(lines)-1], nil
+}
diff --git a/pkg/tailfile/tailfile_test.go b/pkg/tailfile/tailfile_test.go
new file mode 100644
index 0000000000..31217c036c
--- /dev/null
+++ b/pkg/tailfile/tailfile_test.go
@@ -0,0 +1,148 @@
+package tailfile
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+func TestTailFile(t *testing.T) {
+ f, err := ioutil.TempFile("", "tail-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ defer os.RemoveAll(f.Name())
+ testFile := []byte(`first line
+second line
+third line
+fourth line
+fifth line
+next first line
+next second line
+next third line
+next fourth line
+next fifth line
+last first line
+next first line
+next second line
+next third line
+next fourth line
+next fifth line
+next first line
+next second line
+next third line
+next fourth line
+next fifth line
+last second line
+last third line
+last fourth line
+last fifth line
+truncated line`)
+ if _, err := f.Write(testFile); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := f.Seek(0, os.SEEK_SET); err != nil {
+ t.Fatal(err)
+ }
+ expected := []string{"last fourth line", "last fifth line"}
+ res, err := TailFile(f, 2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, l := range res {
+ t.Logf("%s", l)
+ if expected[i] != string(l) {
+ t.Fatalf("Expected line %s, got %s", expected[i], l)
+ }
+ }
+}
+
+func TestTailFileManyLines(t *testing.T) {
+ f, err := ioutil.TempFile("", "tail-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ defer os.RemoveAll(f.Name())
+ testFile := []byte(`first line
+second line
+truncated line`)
+ if _, err := f.Write(testFile); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := f.Seek(0, os.SEEK_SET); err != nil {
+ t.Fatal(err)
+ }
+ expected := []string{"first line", "second line"}
+ res, err := TailFile(f, 10000)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, l := range res {
+ t.Logf("%s", l)
+ if expected[i] != string(l) {
+ t.Fatalf("Expected line %s, got %s", expected[i], l)
+ }
+ }
+}
+
+func TestTailEmptyFile(t *testing.T) {
+ f, err := ioutil.TempFile("", "tail-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ defer os.RemoveAll(f.Name())
+ res, err := TailFile(f, 10000)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(res) != 0 {
+ t.Fatal("Must be empty slice from empty file")
+ }
+}
+
+func TestTailNegativeN(t *testing.T) {
+ f, err := ioutil.TempFile("", "tail-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ defer os.RemoveAll(f.Name())
+ testFile := []byte(`first line
+second line
+truncated line`)
+ if _, err := f.Write(testFile); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := f.Seek(0, os.SEEK_SET); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber {
+ t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err)
+ }
+ if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber {
+ t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err)
+ }
+}
+
+func BenchmarkTail(b *testing.B) {
+ f, err := ioutil.TempFile("", "tail-test")
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer f.Close()
+ defer os.RemoveAll(f.Name())
+ for i := 0; i < 10000; i++ {
+ if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil {
+ b.Fatal(err)
+ }
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if _, err := TailFile(f, 1000); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/pkg/testutils/testutils.go b/pkg/testutils/testutils.go
index 4655e5844d..9c664ff253 100644
--- a/pkg/testutils/testutils.go
+++ b/pkg/testutils/testutils.go
@@ -1,10 +1,15 @@
package testutils
import (
+ "math/rand"
"testing"
"time"
)
+const chars = "abcdefghijklmnopqrstuvwxyz" +
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
+ "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
+
// Timeout calls f and waits for 100ms for it to complete.
// If it doesn't, it causes the tests to fail.
// t must be a valid testing context.
@@ -21,3 +26,12 @@ func Timeout(t *testing.T, f func()) {
case <-onDone:
}
}
+
+// RandomString returns random string of specified length
+func RandomString(length int) string {
+ res := make([]byte, length)
+ for i := 0; i < length; i++ {
+ res[i] = chars[rand.Intn(len(chars))]
+ }
+ return string(res)
+}
diff --git a/pkg/truncindex/truncindex.go b/pkg/truncindex/truncindex.go
new file mode 100644
index 0000000000..89aa88d6b7
--- /dev/null
+++ b/pkg/truncindex/truncindex.go
@@ -0,0 +1,106 @@
+package truncindex
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/tchap/go-patricia/patricia"
+)
+
+var (
+ ErrNoID = errors.New("prefix can't be empty")
+)
+
+func init() {
+ // Change patricia max prefix per node length,
+ // because our len(ID) always 64
+ patricia.MaxPrefixPerNode = 64
+}
+
+// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
+// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
+type TruncIndex struct {
+ sync.RWMutex
+ trie *patricia.Trie
+ ids map[string]struct{}
+}
+
+func NewTruncIndex(ids []string) (idx *TruncIndex) {
+ idx = &TruncIndex{
+ ids: make(map[string]struct{}),
+ trie: patricia.NewTrie(),
+ }
+ for _, id := range ids {
+ idx.addId(id)
+ }
+ return
+}
+
+func (idx *TruncIndex) addId(id string) error {
+ if strings.Contains(id, " ") {
+ return fmt.Errorf("Illegal character: ' '")
+ }
+ if id == "" {
+ return ErrNoID
+ }
+ if _, exists := idx.ids[id]; exists {
+ return fmt.Errorf("Id already exists: '%s'", id)
+ }
+ idx.ids[id] = struct{}{}
+ if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted {
+ return fmt.Errorf("Failed to insert id: %s", id)
+ }
+ return nil
+}
+
+func (idx *TruncIndex) Add(id string) error {
+ idx.Lock()
+ defer idx.Unlock()
+ if err := idx.addId(id); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (idx *TruncIndex) Delete(id string) error {
+ idx.Lock()
+ defer idx.Unlock()
+ if _, exists := idx.ids[id]; !exists || id == "" {
+ return fmt.Errorf("No such id: '%s'", id)
+ }
+ delete(idx.ids, id)
+ if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted {
+ return fmt.Errorf("No such id: '%s'", id)
+ }
+ return nil
+}
+
+func (idx *TruncIndex) Get(s string) (string, error) {
+ idx.RLock()
+ defer idx.RUnlock()
+ var (
+ id string
+ )
+ if s == "" {
+ return "", ErrNoID
+ }
+ subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error {
+ if id != "" {
+ // we haven't found the ID if there are two or more IDs
+ id = ""
+ return fmt.Errorf("we've found two entries")
+ }
+ id = string(prefix)
+ return nil
+ }
+
+ if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil {
+ return "", fmt.Errorf("No such id: %s", s)
+ }
+ if id != "" {
+ return id, nil
+ }
+ return "", fmt.Errorf("No such id: %s", s)
+}
diff --git a/pkg/truncindex/truncindex_test.go b/pkg/truncindex/truncindex_test.go
new file mode 100644
index 0000000000..f88d667d5a
--- /dev/null
+++ b/pkg/truncindex/truncindex_test.go
@@ -0,0 +1,401 @@
+package truncindex
+
+import (
+ "math/rand"
+ "testing"
+
+ "github.com/dotcloud/docker/utils"
+)
+
+// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix.
+func TestTruncIndex(t *testing.T) {
+ ids := []string{}
+ index := NewTruncIndex(ids)
+ // Get on an empty index
+ if _, err := index.Get("foobar"); err == nil {
+ t.Fatal("Get on an empty index should return an error")
+ }
+
+ // Spaces should be illegal in an id
+ if err := index.Add("I have a space"); err == nil {
+ t.Fatalf("Adding an id with ' ' should return an error")
+ }
+
+ id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96"
+ // Add an id
+ if err := index.Add(id); err != nil {
+ t.Fatal(err)
+ }
+
+ // Add an empty id (should fail)
+ if err := index.Add(""); err == nil {
+ t.Fatalf("Adding an empty id should return an error")
+ }
+
+ // Get a non-existing id
+ assertIndexGet(t, index, "abracadabra", "", true)
+ // Get an empty id
+ assertIndexGet(t, index, "", "", true)
+ // Get the exact id
+ assertIndexGet(t, index, id, id, false)
+ // The first letter should match
+ assertIndexGet(t, index, id[:1], id, false)
+ // The first half should match
+ assertIndexGet(t, index, id[:len(id)/2], id, false)
+ // The second half should NOT match
+ assertIndexGet(t, index, id[len(id)/2:], "", true)
+
+ id2 := id[:6] + "blabla"
+ // Add an id
+ if err := index.Add(id2); err != nil {
+ t.Fatal(err)
+ }
+ // Both exact IDs should work
+ assertIndexGet(t, index, id, id, false)
+ assertIndexGet(t, index, id2, id2, false)
+
+ // 6 characters or less should conflict
+ assertIndexGet(t, index, id[:6], "", true)
+ assertIndexGet(t, index, id[:4], "", true)
+ assertIndexGet(t, index, id[:1], "", true)
+
+ // 7 characters should NOT conflict
+ assertIndexGet(t, index, id[:7], id, false)
+ assertIndexGet(t, index, id2[:7], id2, false)
+
+ // Deleting a non-existing id should return an error
+ if err := index.Delete("non-existing"); err == nil {
+ t.Fatalf("Deleting a non-existing id should return an error")
+ }
+
+ // Deleting an empty id should return an error
+ if err := index.Delete(""); err == nil {
+ t.Fatal("Deleting an empty id should return an error")
+ }
+
+ // Deleting id2 should remove conflicts
+ if err := index.Delete(id2); err != nil {
+ t.Fatal(err)
+ }
+ // id2 should no longer work
+ assertIndexGet(t, index, id2, "", true)
+ assertIndexGet(t, index, id2[:7], "", true)
+ assertIndexGet(t, index, id2[:11], "", true)
+
+ // conflicts between id and id2 should be gone
+ assertIndexGet(t, index, id[:6], id, false)
+ assertIndexGet(t, index, id[:4], id, false)
+ assertIndexGet(t, index, id[:1], id, false)
+
+ // non-conflicting substrings should still not conflict
+ assertIndexGet(t, index, id[:7], id, false)
+ assertIndexGet(t, index, id[:15], id, false)
+ assertIndexGet(t, index, id, id, false)
+}
+
+func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) {
+ if result, err := index.Get(input); err != nil && !expectError {
+ t.Fatalf("Unexpected error getting '%s': %s", input, err)
+ } else if err == nil && expectError {
+ t.Fatalf("Getting '%s' should return an error, not '%s'", input, result)
+ } else if result != expectedResult {
+ t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult)
+ }
+}
+
+func BenchmarkTruncIndexAdd100(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 100; i++ {
+ testSet = append(testSet, utils.GenerateRandomID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexAdd250(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 250; i++ {
+ testSet = append(testSet, utils.GenerateRandomID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexAdd500(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 500; i++ {
+ testSet = append(testSet, utils.GenerateRandomID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexGet100(b *testing.B) {
+ var testSet []string
+ var testKeys []string
+ for i := 0; i < 100; i++ {
+ testSet = append(testSet, utils.GenerateRandomID())
+ }
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ l := rand.Intn(12) + 12
+ testKeys = append(testKeys, id[:l])
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, id := range testKeys {
+ if res, err := index.Get(id); err != nil {
+ b.Fatal(res, err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexGet250(b *testing.B) {
+ var testSet []string
+ var testKeys []string
+ for i := 0; i < 250; i++ {
+ testSet = append(testSet, utils.GenerateRandomID())
+ }
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ l := rand.Intn(12) + 12
+ testKeys = append(testKeys, id[:l])
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, id := range testKeys {
+ if res, err := index.Get(id); err != nil {
+ b.Fatal(res, err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexGet500(b *testing.B) {
+ var testSet []string
+ var testKeys []string
+ for i := 0; i < 500; i++ {
+ testSet = append(testSet, utils.GenerateRandomID())
+ }
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ l := rand.Intn(12) + 12
+ testKeys = append(testKeys, id[:l])
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, id := range testKeys {
+ if res, err := index.Get(id); err != nil {
+ b.Fatal(res, err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexDelete100(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 100; i++ {
+ testSet = append(testSet, utils.GenerateRandomID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ b.StartTimer()
+ for _, id := range testSet {
+ if err := index.Delete(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexDelete250(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 250; i++ {
+ testSet = append(testSet, utils.GenerateRandomID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ b.StartTimer()
+ for _, id := range testSet {
+ if err := index.Delete(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexDelete500(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 500; i++ {
+ testSet = append(testSet, utils.GenerateRandomID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ b.StartTimer()
+ for _, id := range testSet {
+ if err := index.Delete(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexNew100(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 100; i++ {
+ testSet = append(testSet, utils.GenerateRandomID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ NewTruncIndex(testSet)
+ }
+}
+
+func BenchmarkTruncIndexNew250(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 250; i++ {
+ testSet = append(testSet, utils.GenerateRandomID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ NewTruncIndex(testSet)
+ }
+}
+
+func BenchmarkTruncIndexNew500(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 500; i++ {
+ testSet = append(testSet, utils.GenerateRandomID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ NewTruncIndex(testSet)
+ }
+}
+
+func BenchmarkTruncIndexAddGet100(b *testing.B) {
+ var testSet []string
+ var testKeys []string
+ for i := 0; i < 500; i++ {
+ id := utils.GenerateRandomID()
+ testSet = append(testSet, id)
+ l := rand.Intn(12) + 12
+ testKeys = append(testKeys, id[:l])
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ for _, id := range testKeys {
+ if res, err := index.Get(id); err != nil {
+ b.Fatal(res, err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexAddGet250(b *testing.B) {
+ var testSet []string
+ var testKeys []string
+ for i := 0; i < 500; i++ {
+ id := utils.GenerateRandomID()
+ testSet = append(testSet, id)
+ l := rand.Intn(12) + 12
+ testKeys = append(testKeys, id[:l])
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ for _, id := range testKeys {
+ if res, err := index.Get(id); err != nil {
+ b.Fatal(res, err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexAddGet500(b *testing.B) {
+ var testSet []string
+ var testKeys []string
+ for i := 0; i < 500; i++ {
+ id := utils.GenerateRandomID()
+ testSet = append(testSet, id)
+ l := rand.Intn(12) + 12
+ testKeys = append(testKeys, id[:l])
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ for _, id := range testKeys {
+ if res, err := index.Get(id); err != nil {
+ b.Fatal(res, err)
+ }
+ }
+ }
+}
diff --git a/runconfig/config_test.go b/runconfig/config_test.go
index b426253b9e..3b57b0a603 100644
--- a/runconfig/config_test.go
+++ b/runconfig/config_test.go
@@ -1,6 +1,7 @@
package runconfig
import (
+ "fmt"
"strings"
"testing"
@@ -20,6 +21,18 @@ func mustParse(t *testing.T, args string) (*Config, *HostConfig) {
return config, hostConfig
}
+// check if (a == c && b == d) || (a == d && b == c)
+// because maps are randomized
+func compareRandomizedStrings(a, b, c, d string) error {
+ if a == c && b == d {
+ return nil
+ }
+ if a == d && b == c {
+ return nil
+ }
+ return fmt.Errorf("strings don't match")
+}
+
func TestParseRunLinks(t *testing.T) {
if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
@@ -98,11 +111,11 @@ func TestParseRunVolumes(t *testing.T) {
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
}
- if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" {
+ if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp", "/hostVar:/containerVar") != nil {
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
}
- if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" {
+ if _, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], "/hostTmp:/containerTmp:ro", "/hostVar:/containerVar:rw") != nil {
t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
}
diff --git a/runconfig/parse.go b/runconfig/parse.go
index 5bb065421c..dfd9f4ddd3 100644
--- a/runconfig/parse.go
+++ b/runconfig/parse.go
@@ -65,14 +65,14 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container")
flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
flCpuset = cmd.String([]string{"-cpuset"}, "", "CPUs in which to allow execution (0-3, 0,1)")
- flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container\n'bridge': creates a new network stack for the container on the docker bridge\n'none': no networking for this container\n'container:<name|id>': reuses another container network stack\n'host': use the host network stack inside the contaner. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.")
+ flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container\n'bridge': creates a new network stack for the container on the docker bridge\n'none': no networking for this container\n'container:<name|id>': reuses another container network stack\n'host': use the host network stack inside the container. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.")
// For documentation purpose
- _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)")
+ _ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify received signals to the process (even in non-tty mode). SIGCHLD is not proxied.")
_ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container")
)
cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to stdin, stdout or stderr.")
- cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
+ cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g., from the host: -v /host:/container, from docker: -v /container)")
cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)")
cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables")
cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a line delimited file of ENV variables")
@@ -132,8 +132,8 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
// add any bind targets to the list of container volumes
for bind := range flVolumes.GetMap() {
if arr := strings.Split(bind, ":"); len(arr) > 1 {
- if arr[0] == "/" {
- return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'")
+ if arr[1] == "/" {
+ return nil, nil, cmd, fmt.Errorf("Invalid bind mount: destination can't be '/'")
}
// after creating the bind mount we want to delete it from the flVolumes values because
// we do not want bind mounts being committed to image configs
diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go
index 88fa0dd542..94cea493f0 100644
--- a/runconfig/parse_test.go
+++ b/runconfig/parse_test.go
@@ -25,26 +25,26 @@ func TestParseLxcConfOpt(t *testing.T) {
func TestNetHostname(t *testing.T) {
if _, _, _, err := Parse([]string{"-h=name", "img", "cmd"}, nil); err != nil {
- t.Fatal("Unexpected error: %s", err)
+ t.Fatalf("Unexpected error: %s", err)
}
if _, _, _, err := Parse([]string{"--net=host", "img", "cmd"}, nil); err != nil {
- t.Fatal("Unexpected error: %s", err)
+ t.Fatalf("Unexpected error: %s", err)
}
if _, _, _, err := Parse([]string{"-h=name", "--net=bridge", "img", "cmd"}, nil); err != nil {
- t.Fatal("Unexpected error: %s", err)
+ t.Fatalf("Unexpected error: %s", err)
}
if _, _, _, err := Parse([]string{"-h=name", "--net=none", "img", "cmd"}, nil); err != nil {
- t.Fatal("Unexpected error: %s", err)
+ t.Fatalf("Unexpected error: %s", err)
}
if _, _, _, err := Parse([]string{"-h=name", "--net=host", "img", "cmd"}, nil); err != ErrConflictNetworkHostname {
- t.Fatal("Expected error ErrConflictNetworkHostname, got: %s", err)
+ t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err)
}
if _, _, _, err := Parse([]string{"-h=name", "--net=container:other", "img", "cmd"}, nil); err != ErrConflictNetworkHostname {
- t.Fatal("Expected error ErrConflictNetworkHostname, got: %s", err)
+ t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err)
}
}
diff --git a/server/MAINTAINERS b/server/MAINTAINERS
index 3564d3db47..e35518a6de 100644
--- a/server/MAINTAINERS
+++ b/server/MAINTAINERS
@@ -1,2 +1,2 @@
Solomon Hykes <solomon@docker.com> (@shykes)
-Victor Vieux <victor.vieux@docker.com> (@vieux) \ No newline at end of file
+Victor Vieux <vieux@docker.com> (@vieux)
diff --git a/server/buildfile.go b/server/buildfile.go
index 5b94c9423a..71fed660b2 100644
--- a/server/buildfile.go
+++ b/server/buildfile.go
@@ -17,6 +17,7 @@ import (
"sort"
"strings"
"syscall"
+ "time"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/daemon"
@@ -696,7 +697,7 @@ func (b *buildFile) run(c *daemon.Container) error {
}
// Wait for it to finish
- if ret := c.Wait(); ret != 0 {
+ if ret, _ := c.State.WaitStop(-1 * time.Second); ret != 0 {
err := &utils.JSONError{
Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.config.Cmd, ret),
Code: ret,
@@ -751,7 +752,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
autoConfig := *b.config
autoConfig.Cmd = autoCmd
// Commit the container
- image, err := b.daemon.Commit(container, "", "", "", b.maintainer, &autoConfig)
+ image, err := b.daemon.Commit(container, "", "", "", b.maintainer, true, &autoConfig)
if err != nil {
return err
}
@@ -761,7 +762,7 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
}
// Long lines can be split with a backslash
-var lineContinuation = regexp.MustCompile(`\s*\\\s*\n`)
+var lineContinuation = regexp.MustCompile(`\\\s*\n`)
func (b *buildFile) Build(context io.Reader) (string, error) {
tmpdirPath, err := ioutil.TempDir("", "docker-build")
diff --git a/server/server.go b/server/server.go
index d28059fe56..3e6de00c1e 100644
--- a/server/server.go
+++ b/server/server.go
@@ -22,6 +22,7 @@
package server
import (
+ "bytes"
"encoding/json"
"fmt"
"io"
@@ -52,6 +53,7 @@ import (
"github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/pkg/graphdb"
"github.com/dotcloud/docker/pkg/signal"
+ "github.com/dotcloud/docker/pkg/tailfile"
"github.com/dotcloud/docker/registry"
"github.com/dotcloud/docker/runconfig"
"github.com/dotcloud/docker/utils"
@@ -410,6 +412,7 @@ func (srv *Server) exportImage(eng *engine.Engine, name, tempdir string) error {
return err
}
job := eng.Job("image_inspect", n)
+ job.SetenvBool("raw", true)
job.Stdout.Add(json)
if err := job.Run(); err != nil {
return err
@@ -779,6 +782,7 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
v.Set("IndexServerAddress", registry.IndexServerAddress())
v.Set("InitSha1", dockerversion.INITSHA1)
v.Set("InitPath", initPath)
+ v.SetList("Sockets", srv.daemon.Sockets)
if _, err := v.WriteTo(job.Stdout); err != nil {
return job.Error(err)
}
@@ -1036,7 +1040,7 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
return job.Error(err)
}
- img, err := srv.daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig)
+ img, err := srv.daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig)
if err != nil {
return job.Error(err)
}
@@ -1236,9 +1240,10 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName
break
}
if !success {
- out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, %s", img.Tag, localName, lastErr), nil))
+ err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, localName, lastErr)
+ out.Write(sf.FormatProgress(utils.TruncateID(img.ID), err.Error(), nil))
if parallel {
- errors <- fmt.Errorf("Could not find repository on any of the indexed registries.")
+ errors <- err
return
}
}
@@ -2043,23 +2048,20 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
if container == nil {
return job.Errorf("No such container: %s", name)
}
+
+ if container.State.IsRunning() {
+ return job.Errorf("Container already started")
+ }
+
// If no environment was set, then no hostconfig was passed.
if len(job.Environ()) > 0 {
hostConfig := runconfig.ContainerHostConfigFromJob(job)
// Validate the HostConfig binds. Make sure that:
- // 1) the source of a bind mount isn't /
- // The bind mount "/:/foo" isn't allowed.
- // 2) Check that the source exists
- // The source to be bind mounted must exist.
+ // the source exists
for _, bind := range hostConfig.Binds {
splitBind := strings.Split(bind, ":")
source := splitBind[0]
- // refuse to bind mount "/" to the container
- if source == "/" {
- return job.Errorf("Invalid bind mount '%s' : source can't be '/'", bind)
- }
-
// ensure the source exists on the host
_, err := os.Stat(source)
if err != nil && os.IsNotExist(err) {
@@ -2096,6 +2098,9 @@ func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
t = job.GetenvInt("t")
}
if container := srv.daemon.Get(name); container != nil {
+ if !container.State.IsRunning() {
+ return job.Errorf("Container already stopped")
+ }
if err := container.Stop(int(t)); err != nil {
return job.Errorf("Cannot stop container %s: %s\n", name, err)
}
@@ -2112,7 +2117,7 @@ func (srv *Server) ContainerWait(job *engine.Job) engine.Status {
}
name := job.Args[0]
if container := srv.daemon.Get(name); container != nil {
- status := container.Wait()
+ status, _ := container.State.WaitStop(-1 * time.Second)
job.Printf("%d\n", status)
return engine.StatusOK
}
@@ -2150,8 +2155,10 @@ func (srv *Server) ContainerLogs(job *engine.Job) engine.Status {
name = job.Args[0]
stdout = job.GetenvBool("stdout")
stderr = job.GetenvBool("stderr")
+ tail = job.Getenv("tail")
follow = job.GetenvBool("follow")
times = job.GetenvBool("timestamps")
+ lines = -1
format string
)
if !(stdout || stderr) {
@@ -2160,6 +2167,9 @@ func (srv *Server) ContainerLogs(job *engine.Job) engine.Status {
if times {
format = time.StampMilli
}
+ if tail == "" {
+ tail = "all"
+ }
container := srv.daemon.Get(name)
if container == nil {
return job.Errorf("No such container: %s", name)
@@ -2187,25 +2197,47 @@ func (srv *Server) ContainerLogs(job *engine.Job) engine.Status {
} else if err != nil {
utils.Errorf("Error reading logs (json): %s", err)
} else {
- dec := json.NewDecoder(cLog)
- for {
- l := &utils.JSONLog{}
-
- if err := dec.Decode(l); err == io.EOF {
- break
- } else if err != nil {
- utils.Errorf("Error streaming logs: %s", err)
- break
- }
- logLine := l.Log
- if times {
- logLine = fmt.Sprintf("[%s] %s", l.Created.Format(format), logLine)
+ if tail != "all" {
+ var err error
+ lines, err = strconv.Atoi(tail)
+ if err != nil {
+ utils.Errorf("Failed to parse tail %s, error: %v, show all logs", err)
+ lines = -1
}
- if l.Stream == "stdout" && stdout {
- fmt.Fprintf(job.Stdout, "%s", logLine)
+ }
+ if lines != 0 {
+ if lines > 0 {
+ f := cLog.(*os.File)
+ ls, err := tailfile.TailFile(f, lines)
+ if err != nil {
+ return job.Error(err)
+ }
+ tmp := bytes.NewBuffer([]byte{})
+ for _, l := range ls {
+ fmt.Fprintf(tmp, "%s\n", l)
+ }
+ cLog = tmp
}
- if l.Stream == "stderr" && stderr {
- fmt.Fprintf(job.Stderr, "%s", logLine)
+ dec := json.NewDecoder(cLog)
+ for {
+ l := &utils.JSONLog{}
+
+ if err := dec.Decode(l); err == io.EOF {
+ break
+ } else if err != nil {
+ utils.Errorf("Error streaming logs: %s", err)
+ break
+ }
+ logLine := l.Log
+ if times {
+ logLine = fmt.Sprintf("[%s] %s", l.Created.Format(format), logLine)
+ }
+ if l.Stream == "stdout" && stdout {
+ fmt.Fprintf(job.Stdout, "%s", logLine)
+ }
+ if l.Stream == "stderr" && stderr {
+ fmt.Fprintf(job.Stderr, "%s", logLine)
+ }
}
}
}
@@ -2325,7 +2357,7 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
// If we are in stdinonce mode, wait for the process to end
// otherwise, simply return
if container.Config.StdinOnce && !container.Config.Tty {
- container.Wait()
+ container.State.WaitStop(-1 * time.Second)
}
}
return engine.StatusOK
diff --git a/utils/stdcopy.go b/utils/stdcopy.go
index ab8759f4ee..bb9d632661 100644
--- a/utils/stdcopy.go
+++ b/utils/stdcopy.go
@@ -82,13 +82,17 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
for nr < StdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
- // Don't exit on EOF, because we can have some more input
- if er != nil && er != io.EOF {
- return 0, er
- }
nr += nr2
- if nr == 0 {
- return written, nil
+ if er == io.EOF {
+ if nr < StdWriterPrefixLen {
+ Debugf("Corrupted prefix: %v", buf[:nr])
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ Debugf("Error reading header: %s", er)
+ return 0, er
}
}
@@ -123,21 +127,22 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
for nr < frameSize+StdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
+ nr += nr2
if er == io.EOF {
- return written, nil
+ if nr < frameSize+StdWriterPrefixLen {
+ Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
+ return written, nil
+ }
+ break
}
if er != nil {
Debugf("Error reading frame: %s", er)
return 0, er
}
- nr += nr2
}
// Write the retrieved frame (without header)
nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen])
- if nw > 0 {
- written += int64(nw)
- }
if ew != nil {
Debugf("Error writing frame: %s", ew)
return 0, ew
@@ -147,6 +152,7 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
Debugf("Error Short Write: (%d on %d)", nw, frameSize)
return 0, io.ErrShortWrite
}
+ written += int64(nw)
// Move the rest of the buffer to the beginning
copy(buf, buf[frameSize+StdWriterPrefixLen:])
diff --git a/utils/utils.go b/utils/utils.go
index 0495dc6fa8..333468d361 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -9,7 +9,6 @@ import (
"encoding/json"
"errors"
"fmt"
- "index/suffixarray"
"io"
"io/ioutil"
"net/http"
@@ -397,100 +396,6 @@ func GetTotalUsedFds() int {
return -1
}
-// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
-// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
-type TruncIndex struct {
- sync.RWMutex
- index *suffixarray.Index
- ids map[string]bool
- bytes []byte
-}
-
-func NewTruncIndex(ids []string) (idx *TruncIndex) {
- idx = &TruncIndex{
- ids: make(map[string]bool),
- bytes: []byte{' '},
- }
- for _, id := range ids {
- idx.ids[id] = true
- idx.bytes = append(idx.bytes, []byte(id+" ")...)
- }
- idx.index = suffixarray.New(idx.bytes)
- return
-}
-
-func (idx *TruncIndex) addId(id string) error {
- if strings.Contains(id, " ") {
- return fmt.Errorf("Illegal character: ' '")
- }
- if _, exists := idx.ids[id]; exists {
- return fmt.Errorf("Id already exists: %s", id)
- }
- idx.ids[id] = true
- idx.bytes = append(idx.bytes, []byte(id+" ")...)
- return nil
-}
-
-func (idx *TruncIndex) Add(id string) error {
- idx.Lock()
- defer idx.Unlock()
- if err := idx.addId(id); err != nil {
- return err
- }
- idx.index = suffixarray.New(idx.bytes)
- return nil
-}
-
-func (idx *TruncIndex) AddWithoutSuffixarrayUpdate(id string) error {
- idx.Lock()
- defer idx.Unlock()
- return idx.addId(id)
-}
-
-func (idx *TruncIndex) UpdateSuffixarray() {
- idx.Lock()
- defer idx.Unlock()
- idx.index = suffixarray.New(idx.bytes)
-}
-
-func (idx *TruncIndex) Delete(id string) error {
- idx.Lock()
- defer idx.Unlock()
- if _, exists := idx.ids[id]; !exists {
- return fmt.Errorf("No such id: %s", id)
- }
- before, after, err := idx.lookup(id)
- if err != nil {
- return err
- }
- delete(idx.ids, id)
- idx.bytes = append(idx.bytes[:before], idx.bytes[after:]...)
- idx.index = suffixarray.New(idx.bytes)
- return nil
-}
-
-func (idx *TruncIndex) lookup(s string) (int, int, error) {
- offsets := idx.index.Lookup([]byte(" "+s), -1)
- //log.Printf("lookup(%s): %v (index bytes: '%s')\n", s, offsets, idx.index.Bytes())
- if offsets == nil || len(offsets) == 0 || len(offsets) > 1 {
- return -1, -1, fmt.Errorf("No such id: %s", s)
- }
- offsetBefore := offsets[0] + 1
- offsetAfter := offsetBefore + strings.Index(string(idx.bytes[offsetBefore:]), " ")
- return offsetBefore, offsetAfter, nil
-}
-
-func (idx *TruncIndex) Get(s string) (string, error) {
- idx.RLock()
- defer idx.RUnlock()
- before, after, err := idx.lookup(s)
- //log.Printf("Get(%s) bytes=|%s| before=|%d| after=|%d|\n", s, idx.bytes, before, after)
- if err != nil {
- return "", err
- }
- return string(idx.bytes[before:after]), err
-}
-
// TruncateID returns a shorthand version of a string identifier for convenience.
// A collision with other shorthands is very unlikely, but possible.
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
diff --git a/utils/utils_test.go b/utils/utils_test.go
index 63d722ed07..049c0e30a2 100644
--- a/utils/utils_test.go
+++ b/utils/utils_test.go
@@ -135,108 +135,6 @@ func TestRaceWriteBroadcaster(t *testing.T) {
<-c
}
-// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix.
-func TestTruncIndex(t *testing.T) {
- ids := []string{}
- index := NewTruncIndex(ids)
- // Get on an empty index
- if _, err := index.Get("foobar"); err == nil {
- t.Fatal("Get on an empty index should return an error")
- }
-
- // Spaces should be illegal in an id
- if err := index.Add("I have a space"); err == nil {
- t.Fatalf("Adding an id with ' ' should return an error")
- }
-
- id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96"
- // Add an id
- if err := index.Add(id); err != nil {
- t.Fatal(err)
- }
- // Get a non-existing id
- assertIndexGet(t, index, "abracadabra", "", true)
- // Get the exact id
- assertIndexGet(t, index, id, id, false)
- // The first letter should match
- assertIndexGet(t, index, id[:1], id, false)
- // The first half should match
- assertIndexGet(t, index, id[:len(id)/2], id, false)
- // The second half should NOT match
- assertIndexGet(t, index, id[len(id)/2:], "", true)
-
- id2 := id[:6] + "blabla"
- // Add an id
- if err := index.Add(id2); err != nil {
- t.Fatal(err)
- }
- // Both exact IDs should work
- assertIndexGet(t, index, id, id, false)
- assertIndexGet(t, index, id2, id2, false)
-
- // 6 characters or less should conflict
- assertIndexGet(t, index, id[:6], "", true)
- assertIndexGet(t, index, id[:4], "", true)
- assertIndexGet(t, index, id[:1], "", true)
-
- // 7 characters should NOT conflict
- assertIndexGet(t, index, id[:7], id, false)
- assertIndexGet(t, index, id2[:7], id2, false)
-
- // Deleting a non-existing id should return an error
- if err := index.Delete("non-existing"); err == nil {
- t.Fatalf("Deleting a non-existing id should return an error")
- }
-
- // Deleting id2 should remove conflicts
- if err := index.Delete(id2); err != nil {
- t.Fatal(err)
- }
- // id2 should no longer work
- assertIndexGet(t, index, id2, "", true)
- assertIndexGet(t, index, id2[:7], "", true)
- assertIndexGet(t, index, id2[:11], "", true)
-
- // conflicts between id and id2 should be gone
- assertIndexGet(t, index, id[:6], id, false)
- assertIndexGet(t, index, id[:4], id, false)
- assertIndexGet(t, index, id[:1], id, false)
-
- // non-conflicting substrings should still not conflict
- assertIndexGet(t, index, id[:7], id, false)
- assertIndexGet(t, index, id[:15], id, false)
- assertIndexGet(t, index, id, id, false)
-}
-
-func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) {
- if result, err := index.Get(input); err != nil && !expectError {
- t.Fatalf("Unexpected error getting '%s': %s", input, err)
- } else if err == nil && expectError {
- t.Fatalf("Getting '%s' should return an error", input)
- } else if result != expectedResult {
- t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult)
- }
-}
-
-func BenchmarkTruncIndexAdd(b *testing.B) {
- ids := []string{"banana", "bananaa", "bananab"}
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- index := NewTruncIndex([]string{})
- for _, id := range ids {
- index.Add(id)
- }
- }
-}
-
-func BenchmarkTruncIndexNew(b *testing.B) {
- ids := []string{"banana", "bananaa", "bananab"}
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- NewTruncIndex(ids)
- }
-}
-
func assertKernelVersion(t *testing.T, a, b *KernelVersionInfo, result int) {
if r := CompareKernelVersion(a, b); r != result {
t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
diff --git a/vendor/src/github.com/docker/libcontainer/README.md b/vendor/src/github.com/docker/libcontainer/README.md
index 068c66e6cf..ee14a57ce1 100644
--- a/vendor/src/github.com/docker/libcontainer/README.md
+++ b/vendor/src/github.com/docker/libcontainer/README.md
@@ -1,40 +1,49 @@
## libcontainer - reference implementation for containers
+### Note on API changes:
+
+Please bear with us while we work on making the libcontainer API stable and something that we can support long term. We are currently discussing the API with the community, therefore, if you currently depend on libcontainer please pin your dependency at a specific tag or commit id. Please join the discussion and help shape the API.
+
#### Background
-libcontainer specifies configuration options for what a container is. It provides a native Go implementation
-for using Linux namespaces with no external dependencies. libcontainer provides many convenience functions for working with namespaces, networking, and management.
+libcontainer specifies configuration options for what a container is. It provides a native Go implementation for using Linux namespaces with no external dependencies. libcontainer provides many convenience functions for working with namespaces, networking, and management.
#### Container
-A container is a self contained directory that is able to run one or more processes without
-affecting the host system. The directory is usually a full system tree. Inside the directory
-a `container.json` file is placed with the runtime configuration for how the processes
-should be contained and run. Environment, networking, and different capabilities for the
-process are specified in this file. The configuration is used for each process executed inside the container.
+A container is a self contained execution environment that shares the kernel of the host system and which is (optionally) isolated from other containers in the system.
+
+libcontainer may be used to execute a process in a container. If a user tries to run a new process inside an existing container, the new process is added to the processes executing in the container.
+
+
+#### Root file system
-See the `container.json` file for what the configuration should look like.
+A container runs with a directory known as its *root file system*, or *rootfs*, mounted as the file system root. The rootfs is usually a full system tree.
-Using this configuration and the current directory holding the rootfs for a process, one can use libcontainer to exec the container. Running the life of the namespace, a `pid` file
-is written to the current directory with the pid of the namespaced process to the external world. A client can use this pid to wait, kill, or perform other operation with the container. If a user tries to run a new process inside an existing container with a live namespace, the namespace will be joined by the new process.
-You may also specify an alternate root place where the `container.json` file is read and where the `pid` file will be saved.
+#### Configuration
+
+A container is initially configured by supplying configuration data when the container is created.
+
#### nsinit
-`nsinit` is a cli application used as the reference implementation of libcontainer. It is able to
-spawn or join new containers giving the current directory. To use `nsinit` cd into a Linux
-rootfs and copy a `container.json` file into the directory with your specified configuration.
+`nsinit` is a cli application which demonstrates the use of libcontainer. It is able to spawn new containers or join existing containers, based on the current directory.
-To execute `/bin/bash` in the current directory as a container just run:
+To use `nsinit`, cd into a Linux rootfs and copy a `container.json` file into the directory with your specified configuration. Environment, networking, and different capabilities for the container are specified in this file. The configuration is used for each process executed inside the container.
+
+See the `sample_configs` folder for examples of what the container configuration should look like.
+
+To execute `/bin/bash` in the current directory as a container just run the following **as root**:
```bash
nsinit exec /bin/bash
```
-If you wish to spawn another process inside the container while your current bash session is
-running just run the exact same command again to get another bash shell or change the command. If the original process dies, PID 1, all other processes spawned inside the container will also be killed and the namespace will be removed.
+If you wish to spawn another process inside the container while your current bash session is running, run the same command again to get another bash shell (or change the command). If the original process (PID 1) dies, all other processes spawned inside the container will be killed and the namespace will be removed.
+
+You can identify if a process is running in a container by looking to see if `state.json` is in the root of the directory.
+
+You may also specify an alternate root place where the `container.json` file is read and where the `state.json` file will be saved.
-You can identify if a process is running in a container by looking to see if `pid` is in the root of the directory.
#### Future
See the [roadmap](ROADMAP.md).
diff --git a/vendor/src/github.com/docker/libcontainer/api.go b/vendor/src/github.com/docker/libcontainer/api.go
new file mode 100644
index 0000000000..310f06e810
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/api.go
@@ -0,0 +1,23 @@
+package libcontainer
+
+import (
+ "github.com/docker/libcontainer/cgroups/fs"
+ "github.com/docker/libcontainer/network"
+)
+
+// Returns all available stats for the given container.
+func GetStats(container *Config, state *State) (*ContainerStats, error) {
+ var containerStats ContainerStats
+ stats, err := fs.GetStats(container.Cgroups)
+ if err != nil {
+ return &containerStats, err
+ }
+ containerStats.CgroupStats = stats
+ networkStats, err := network.GetStats(&state.NetworkState)
+ if err != nil {
+ return &containerStats, err
+ }
+ containerStats.NetworkStats = networkStats
+
+ return &containerStats, nil
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
index 1a0323f442..8fa34c21c2 100644
--- a/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/apply_raw.go
@@ -57,12 +57,13 @@ func GetStats(c *cgroups.Cgroup) (*cgroups.Stats, error) {
d, err := getCgroupData(c, 0)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("getting CgroupData %s", err)
}
- for _, sys := range subsystems {
- if err := sys.GetStats(d, stats); err != nil {
- return nil, err
+ for sysName, sys := range subsystems {
+ // Don't fail if a cgroup hierarchy was not found.
+ if err := sys.GetStats(d, stats); err != nil && err != cgroups.ErrNotFound {
+ return nil, fmt.Errorf("getting stats for system %q %s", sysName, err)
}
}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/notify_linux.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/notify_linux.go
new file mode 100644
index 0000000000..d92063bade
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/notify_linux.go
@@ -0,0 +1,82 @@
+// +build linux
+
+package fs
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/docker/libcontainer/cgroups"
+)
+
+// NotifyOnOOM sends signals on the returned channel when the cgroup reaches
+// its memory limit. The channel is closed when the cgroup is removed.
+func NotifyOnOOM(c *cgroups.Cgroup) (<-chan struct{}, error) {
+ d, err := getCgroupData(c, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ return notifyOnOOM(d)
+}
+
+func notifyOnOOM(d *data) (<-chan struct{}, error) {
+ dir, err := d.path("memory")
+ if err != nil {
+ return nil, err
+ }
+
+ fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0)
+ if syserr != 0 {
+ return nil, syserr
+ }
+
+ eventfd := os.NewFile(fd, "eventfd")
+
+ oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control"))
+ if err != nil {
+ eventfd.Close()
+ return nil, err
+ }
+
+ var (
+ eventControlPath = filepath.Join(dir, "cgroup.event_control")
+ data = fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd())
+ )
+
+ if err := writeFile(dir, "cgroup.event_control", data); err != nil {
+ eventfd.Close()
+ oomControl.Close()
+ return nil, err
+ }
+
+ ch := make(chan struct{})
+
+ go func() {
+ defer func() {
+ close(ch)
+ eventfd.Close()
+ oomControl.Close()
+ }()
+
+ buf := make([]byte, 8)
+
+ for {
+ if _, err := eventfd.Read(buf); err != nil {
+ return
+ }
+
+ // When a cgroup is destroyed, an event is sent to eventfd.
+ // So if the control path is gone, return instead of notifying.
+ if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) {
+ return
+ }
+
+ ch <- struct{}{}
+ }
+ }()
+
+ return ch, nil
+}
diff --git a/vendor/src/github.com/docker/libcontainer/cgroups/fs/notify_linux_test.go b/vendor/src/github.com/docker/libcontainer/cgroups/fs/notify_linux_test.go
new file mode 100644
index 0000000000..a11880cb66
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/cgroups/fs/notify_linux_test.go
@@ -0,0 +1,86 @@
+// +build linux
+
+package fs
+
+import (
+ "encoding/binary"
+ "fmt"
+ "syscall"
+ "testing"
+ "time"
+)
+
+func TestNotifyOnOOM(t *testing.T) {
+ helper := NewCgroupTestUtil("memory", t)
+ defer helper.cleanup()
+
+ helper.writeFileContents(map[string]string{
+ "memory.oom_control": "",
+ "cgroup.event_control": "",
+ })
+
+ var eventFd, oomControlFd int
+
+ ooms, err := notifyOnOOM(helper.CgroupData)
+ if err != nil {
+ t.Fatal("expected no error, got:", err)
+ }
+
+ memoryPath, _ := helper.CgroupData.path("memory")
+ data, err := readFile(memoryPath, "cgroup.event_control")
+ if err != nil {
+ t.Fatal("couldn't read event control file:", err)
+ }
+
+ if _, err := fmt.Sscanf(data, "%d %d", &eventFd, &oomControlFd); err != nil {
+ t.Fatalf("invalid control data %q: %s", data, err)
+ }
+
+ // re-open the eventfd
+ efd, err := syscall.Dup(eventFd)
+ if err != nil {
+ t.Fatal("unable to reopen eventfd:", err)
+ }
+ defer syscall.Close(efd)
+
+ if err != nil {
+ t.Fatal("unable to dup event fd:", err)
+ }
+
+ buf := make([]byte, 8)
+ binary.LittleEndian.PutUint64(buf, 1)
+
+ if _, err := syscall.Write(efd, buf); err != nil {
+ t.Fatal("unable to write to eventfd:", err)
+ }
+
+ select {
+ case <-ooms:
+ case <-time.After(100 * time.Millisecond):
+ t.Fatal("no notification on oom channel after 100ms")
+ }
+
+ // simulate what happens when a cgroup is destroyed by cleaning up and then
+ // writing to the eventfd.
+ helper.cleanup()
+ if _, err := syscall.Write(efd, buf); err != nil {
+ t.Fatal("unable to write to eventfd:", err)
+ }
+
+ // give things a moment to shut down
+ select {
+ case _, ok := <-ooms:
+ if ok {
+ t.Fatal("expected no oom to be triggered")
+ }
+ case <-time.After(100 * time.Millisecond):
+ }
+
+ if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(oomControlFd), syscall.F_GETFD, 0); err != syscall.EBADF {
+ t.Error("expected oom control to be closed")
+ }
+
+ if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(eventFd), syscall.F_GETFD, 0); err != syscall.EBADF {
+ t.Error("expected event fd to be closed")
+ }
+}
diff --git a/vendor/src/github.com/docker/libcontainer/container.go b/vendor/src/github.com/docker/libcontainer/container.go
index be72d92eee..8fe95c24f7 100644
--- a/vendor/src/github.com/docker/libcontainer/container.go
+++ b/vendor/src/github.com/docker/libcontainer/container.go
@@ -2,24 +2,21 @@ package libcontainer
import (
"github.com/docker/libcontainer/cgroups"
- "github.com/docker/libcontainer/devices"
+ "github.com/docker/libcontainer/mount"
+ "github.com/docker/libcontainer/network"
)
-// Context is a generic key value pair that allows arbatrary data to be sent
-type Context map[string]string
+type MountConfig mount.MountConfig
-// Container defines configuration options for executing a process inside a contained environment
-type Container struct {
- // Hostname optionally sets the container's hostname if provided
- Hostname string `json:"hostname,omitempty"`
+type Network network.Network
- // ReadonlyFs will remount the container's rootfs as readonly where only externally mounted
- // bind mounts are writtable
- ReadonlyFs bool `json:"readonly_fs,omitempty"`
+// Config defines configuration options for executing a process inside a contained environment.
+type Config struct {
+ // Mount specific options.
+ MountConfig *MountConfig `json:"mount_config,omitempty"`
- // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs
- // This is a common option when the container is running in ramdisk
- NoPivotRoot bool `json:"no_pivot_root,omitempty"`
+ // Hostname optionally sets the container's hostname if provided
+ Hostname string `json:"hostname,omitempty"`
// User will set the uid and gid of the executing process running inside the container
User string `json:"user,omitempty"`
@@ -54,41 +51,17 @@ type Container struct {
// placed into to limit the resources the container has available
Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"`
- // Context is a generic key value format that allows for additional settings to be passed
- // on the container's creation
- // This is commonly used to specify apparmor profiles, selinux labels, and different restrictions
- // placed on the container's processes
- Context Context `json:"context,omitempty"`
-
- // Mounts specify additional source and destination paths that will be mounted inside the container's
- // rootfs and mount namespace if specified
- Mounts Mounts `json:"mounts,omitempty"`
-
- // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!
- DeviceNodes []*devices.Device `json:"device_nodes,omitempty"`
-}
-
-// Network defines configuration for a container's networking stack
-//
-// The network configuration can be omited from a container causing the
-// container to be setup with the host's networking stack
-type Network struct {
- // Type sets the networks type, commonly veth and loopback
- Type string `json:"type,omitempty"`
+ // AppArmorProfile specifies the profile to apply to the process running in the container and is
+ // change at the time the process is execed
+ AppArmorProfile string `json:"apparmor_profile,omitempty"`
- // Context is a generic key value format for setting additional options that are specific to
- // the network type
- Context Context `json:"context,omitempty"`
-
- // Address contains the IP and mask to set on the network interface
- Address string `json:"address,omitempty"`
-
- // Gateway sets the gateway address that is used as the default for the interface
- Gateway string `json:"gateway,omitempty"`
+ // ProcessLabel specifies the label to apply to the process running in the container. It is
+ // commonly used by selinux
+ ProcessLabel string `json:"process_label,omitempty"`
- // Mtu sets the mtu value for the interface and will be mirrored on both the host and
- // container's interfaces if a pair is created, specifically in the case of type veth
- Mtu int `json:"mtu,omitempty"`
+ // RestrictSys will remount /proc/sys, /sys, and mask over sysrq-trigger as well as /proc/irq and
+ // /proc/bus
+ RestrictSys bool `json:"restrict_sys,omitempty"`
}
// Routes can be specified to create entries in the route table as the container is started
diff --git a/vendor/src/github.com/docker/libcontainer/container_test.go b/vendor/src/github.com/docker/libcontainer/container_test.go
index deb65aa835..5981281153 100644
--- a/vendor/src/github.com/docker/libcontainer/container_test.go
+++ b/vendor/src/github.com/docker/libcontainer/container_test.go
@@ -3,7 +3,10 @@ package libcontainer
import (
"encoding/json"
"os"
+ "path/filepath"
"testing"
+
+ "github.com/docker/libcontainer/devices"
)
// Checks whether the expected capability is specified in the capabilities.
@@ -16,17 +19,41 @@ func contains(expected string, values []string) bool {
return false
}
-func TestContainerJsonFormat(t *testing.T) {
- f, err := os.Open("container.json")
+func containsDevice(expected *devices.Device, values []*devices.Device) bool {
+ for _, d := range values {
+ if d.Path == expected.Path &&
+ d.CgroupPermissions == expected.CgroupPermissions &&
+ d.FileMode == expected.FileMode &&
+ d.MajorNumber == expected.MajorNumber &&
+ d.MinorNumber == expected.MinorNumber &&
+ d.Type == expected.Type {
+ return true
+ }
+ }
+ return false
+}
+
+func loadConfig(name string) (*Config, error) {
+ f, err := os.Open(filepath.Join("sample_configs", name))
if err != nil {
- t.Fatal("Unable to open container.json")
+ return nil, err
}
defer f.Close()
- var container *Container
+ var container *Config
if err := json.NewDecoder(f).Decode(&container); err != nil {
- t.Fatalf("failed to decode container config: %s", err)
+ return nil, err
+ }
+
+ return container, nil
+}
+
+func TestConfigJsonFormat(t *testing.T) {
+ container, err := loadConfig("attach_to_bridge.json")
+ if err != nil {
+ t.Fatal(err)
}
+
if container.Hostname != "koye" {
t.Log("hostname is not set")
t.Fail()
@@ -61,4 +88,73 @@ func TestContainerJsonFormat(t *testing.T) {
t.Log("capabilities mask should contain SYS_CHROOT")
t.Fail()
}
+
+ for _, n := range container.Networks {
+ if n.Type == "veth" {
+ if n.Bridge != "docker0" {
+ t.Logf("veth bridge should be docker0 but received %q", n.Bridge)
+ t.Fail()
+ }
+
+ if n.Address != "172.17.0.101/16" {
+ t.Logf("veth address should be 172.17.0.101/61 but received %q", n.Address)
+ t.Fail()
+ }
+
+ if n.VethPrefix != "veth" {
+ t.Logf("veth prefix should be veth but received %q", n.VethPrefix)
+ t.Fail()
+ }
+
+ if n.Gateway != "172.17.42.1" {
+ t.Logf("veth gateway should be 172.17.42.1 but received %q", n.Gateway)
+ t.Fail()
+ }
+
+ if n.Mtu != 1500 {
+ t.Logf("veth mtu should be 1500 but received %d", n.Mtu)
+ t.Fail()
+ }
+
+ break
+ }
+ }
+
+ for _, d := range devices.DefaultSimpleDevices {
+ if !containsDevice(d, container.MountConfig.DeviceNodes) {
+ t.Logf("expected device configuration for %s", d.Path)
+ t.Fail()
+ }
+ }
+
+ if !container.RestrictSys {
+ t.Log("expected restrict sys to be true")
+ t.Fail()
+ }
+}
+
+func TestApparmorProfile(t *testing.T) {
+ container, err := loadConfig("apparmor.json")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if container.AppArmorProfile != "docker-default" {
+ t.Fatalf("expected apparmor profile to be docker-default but received %q", container.AppArmorProfile)
+ }
+}
+
+func TestSelinuxLabels(t *testing.T) {
+ container, err := loadConfig("selinux.json")
+ if err != nil {
+ t.Fatal(err)
+ }
+ label := "system_u:system_r:svirt_lxc_net_t:s0:c164,c475"
+
+ if container.ProcessLabel != label {
+ t.Fatalf("expected process label %q but received %q", label, container.ProcessLabel)
+ }
+ if container.MountConfig.MountLabel != label {
+ t.Fatalf("expected mount label %q but received %q", label, container.MountConfig.MountLabel)
+ }
}
diff --git a/vendor/src/github.com/docker/libcontainer/mount/init.go b/vendor/src/github.com/docker/libcontainer/mount/init.go
index 4e913ad1e4..34fad6dd19 100644
--- a/vendor/src/github.com/docker/libcontainer/mount/init.go
+++ b/vendor/src/github.com/docker/libcontainer/mount/init.go
@@ -8,7 +8,6 @@ import (
"path/filepath"
"syscall"
- "github.com/docker/libcontainer"
"github.com/docker/libcontainer/label"
"github.com/docker/libcontainer/mount/nodes"
"github.com/dotcloud/docker/pkg/symlink"
@@ -26,14 +25,14 @@ type mount struct {
data string
}
-// InitializeMountNamespace setups up the devices, mount points, and filesystems for use inside a
-// new mount namepsace
-func InitializeMountNamespace(rootfs, console string, container *libcontainer.Container) error {
+// InitializeMountNamespace sets up the devices, mount points, and filesystems for use inside a
+// new mount namespace.
+func InitializeMountNamespace(rootfs, console string, mountConfig *MountConfig) error {
var (
err error
flag = syscall.MS_PRIVATE
)
- if container.NoPivotRoot {
+ if mountConfig.NoPivotRoot {
flag = syscall.MS_SLAVE
}
if err := system.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil {
@@ -42,16 +41,16 @@ func InitializeMountNamespace(rootfs, console string, container *libcontainer.Co
if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil {
return fmt.Errorf("mouting %s as bind %s", rootfs, err)
}
- if err := mountSystem(rootfs, container); err != nil {
+ if err := mountSystem(rootfs, mountConfig); err != nil {
return fmt.Errorf("mount system %s", err)
}
- if err := setupBindmounts(rootfs, container.Mounts); err != nil {
+ if err := setupBindmounts(rootfs, mountConfig.Mounts); err != nil {
return fmt.Errorf("bind mounts %s", err)
}
- if err := nodes.CreateDeviceNodes(rootfs, container.DeviceNodes); err != nil {
+ if err := nodes.CreateDeviceNodes(rootfs, mountConfig.DeviceNodes); err != nil {
return fmt.Errorf("create device nodes %s", err)
}
- if err := SetupPtmx(rootfs, console, container.Context["mount_label"]); err != nil {
+ if err := SetupPtmx(rootfs, console, mountConfig.MountLabel); err != nil {
return err
}
if err := setupDevSymlinks(rootfs); err != nil {
@@ -61,7 +60,7 @@ func InitializeMountNamespace(rootfs, console string, container *libcontainer.Co
return fmt.Errorf("chdir into %s %s", rootfs, err)
}
- if container.NoPivotRoot {
+ if mountConfig.NoPivotRoot {
err = MsMoveRoot(rootfs)
} else {
err = PivotRoot(rootfs)
@@ -70,7 +69,7 @@ func InitializeMountNamespace(rootfs, console string, container *libcontainer.Co
return err
}
- if container.ReadonlyFs {
+ if mountConfig.ReadonlyFs {
if err := SetReadonly(); err != nil {
return fmt.Errorf("set readonly %s", err)
}
@@ -83,8 +82,8 @@ func InitializeMountNamespace(rootfs, console string, container *libcontainer.Co
// mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts
// inside the mount namespace
-func mountSystem(rootfs string, container *libcontainer.Container) error {
- for _, m := range newSystemMounts(rootfs, container.Context["mount_label"], container.Mounts) {
+func mountSystem(rootfs string, mountConfig *MountConfig) error {
+ for _, m := range newSystemMounts(rootfs, mountConfig.MountLabel, mountConfig.Mounts) {
if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) {
return fmt.Errorf("mkdirall %s %s", m.path, err)
}
@@ -145,7 +144,7 @@ func setupDevSymlinks(rootfs string) error {
return nil
}
-func setupBindmounts(rootfs string, bindMounts libcontainer.Mounts) error {
+func setupBindmounts(rootfs string, bindMounts Mounts) error {
for _, m := range bindMounts.OfType("bind") {
var (
flags = syscall.MS_BIND | syscall.MS_REC
@@ -188,7 +187,7 @@ func setupBindmounts(rootfs string, bindMounts libcontainer.Mounts) error {
// TODO: this is crappy right now and should be cleaned up with a better way of handling system and
// standard bind mounts allowing them to be more dynamic
-func newSystemMounts(rootfs, mountLabel string, mounts libcontainer.Mounts) []mount {
+func newSystemMounts(rootfs, mountLabel string, mounts Mounts) []mount {
systemMounts := []mount{
{source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags},
{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags},
diff --git a/vendor/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go b/vendor/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go
index 3811a1d739..83660715d4 100644
--- a/vendor/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go
+++ b/vendor/src/github.com/docker/libcontainer/mount/nodes/nodes_unsupported.go
@@ -3,10 +3,11 @@
package nodes
import (
- "github.com/docker/libcontainer"
+ "errors"
+
"github.com/docker/libcontainer/devices"
)
func CreateDeviceNodes(rootfs string, nodesToCreate []*devices.Device) error {
- return libcontainer.ErrUnsupported
+ return errors.New("Unsupported method")
}
diff --git a/vendor/src/github.com/docker/libcontainer/mount/types.go b/vendor/src/github.com/docker/libcontainer/mount/types.go
new file mode 100644
index 0000000000..a2659e582e
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/mount/types.go
@@ -0,0 +1,48 @@
+package mount
+
+import (
+ "errors"
+
+ "github.com/docker/libcontainer/devices"
+)
+
+type MountConfig struct {
+ // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs
+ // This is a common option when the container is running in ramdisk
+ NoPivotRoot bool `json:"no_pivot_root,omitempty"`
+
+ // ReadonlyFs will remount the container's rootfs as readonly where only externally mounted
+ // bind mounts are writtable
+ ReadonlyFs bool `json:"readonly_fs,omitempty"`
+
+ // Mounts specify additional source and destination paths that will be mounted inside the container's
+ // rootfs and mount namespace if specified
+ Mounts Mounts `json:"mounts,omitempty"`
+
+ // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!
+ DeviceNodes []*devices.Device `json:"device_nodes,omitempty"`
+
+ MountLabel string `json:"mount_label,omitempty"`
+}
+
+type Mount struct {
+ Type string `json:"type,omitempty"`
+ Source string `json:"source,omitempty"` // Source path, in the host namespace
+ Destination string `json:"destination,omitempty"` // Destination path, in the container
+ Writable bool `json:"writable,omitempty"`
+ Private bool `json:"private,omitempty"`
+}
+
+type Mounts []Mount
+
+var ErrUnsupported = errors.New("Unsupported method")
+
+func (s Mounts) OfType(t string) Mounts {
+ out := Mounts{}
+ for _, m := range s {
+ if m.Type == t {
+ out = append(out, m)
+ }
+ }
+ return out
+}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/create.go b/vendor/src/github.com/docker/libcontainer/namespaces/create.go
index 20bef20459..15a844bc0b 100644
--- a/vendor/src/github.com/docker/libcontainer/namespaces/create.go
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/create.go
@@ -7,4 +7,4 @@ import (
"github.com/docker/libcontainer"
)
-type CreateCommand func(container *libcontainer.Container, console, rootfs, dataPath, init string, childPipe *os.File, args []string) *exec.Cmd
+type CreateCommand func(container *libcontainer.Config, console, rootfs, dataPath, init string, childPipe *os.File, args []string) *exec.Cmd
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/exec.go b/vendor/src/github.com/docker/libcontainer/namespaces/exec.go
index 31d5ba21d8..0aa2bb9c7a 100644
--- a/vendor/src/github.com/docker/libcontainer/namespaces/exec.go
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/exec.go
@@ -15,9 +15,11 @@ import (
"github.com/dotcloud/docker/pkg/system"
)
-// Exec performes setup outside of a namespace so that a container can be
+// TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work.
+// Move this to libcontainer package.
+// Exec performs setup outside of a namespace so that a container can be
// executed. Exec is a high level function for working with container namespaces.
-func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) {
+func Exec(container *libcontainer.Config, term Terminal, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) {
var (
master *os.File
console string
@@ -30,6 +32,7 @@ func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath str
if err != nil {
return -1, err
}
+ defer syncPipe.Close()
if container.Tty {
master, console, err = system.CreateMasterAndConsole()
@@ -50,16 +53,13 @@ func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath str
return -1, err
}
+ // Now we passed the pipe to the child, close our side
+ syncPipe.CloseChild()
+
started, err := system.GetProcessStartTime(command.Process.Pid)
if err != nil {
return -1, err
}
- if err := WritePid(dataPath, command.Process.Pid, started); err != nil {
- command.Process.Kill()
- command.Wait()
- return -1, err
- }
- defer DeletePid(dataPath)
// Do this before syncing with child so that no children
// can escape the cgroup
@@ -73,14 +73,32 @@ func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath str
defer cleaner.Cleanup()
}
- if err := InitializeNetworking(container, command.Process.Pid, syncPipe); err != nil {
+ var networkState network.NetworkState
+ if err := InitializeNetworking(container, command.Process.Pid, syncPipe, &networkState); err != nil {
+ command.Process.Kill()
+ command.Wait()
+ return -1, err
+ }
+
+ state := &libcontainer.State{
+ InitPid: command.Process.Pid,
+ InitStartTime: started,
+ NetworkState: networkState,
+ }
+
+ if err := libcontainer.SaveState(dataPath, state); err != nil {
command.Process.Kill()
command.Wait()
return -1, err
}
+ defer libcontainer.DeleteState(dataPath)
// Sync with child
- syncPipe.Close()
+ if err := syncPipe.ReadFromChild(); err != nil {
+ command.Process.Kill()
+ command.Wait()
+ return -1, err
+ }
if startCallback != nil {
startCallback()
@@ -99,11 +117,11 @@ func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath str
// args provided
//
// console: the /dev/console to setup inside the container
-// init: the progam executed inside the namespaces
+// init: the program executed inside the namespaces
// root: the path to the container json file and information
-// pipe: sync pipe to syncronize the parent and child processes
-// args: the arguemnts to pass to the container to run as the user's program
-func DefaultCreateCommand(container *libcontainer.Container, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd {
+// pipe: sync pipe to synchronize the parent and child processes
+// args: the arguments to pass to the container to run as the user's program
+func DefaultCreateCommand(container *libcontainer.Config, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd {
// get our binary name from arg0 so we can always reexec ourself
env := []string{
"console=" + console,
@@ -133,9 +151,9 @@ func DefaultCreateCommand(container *libcontainer.Container, console, rootfs, da
return command
}
-// SetupCgroups applies the cgroup restrictions to the process running in the contaienr based
+// SetupCgroups applies the cgroup restrictions to the process running in the container based
// on the container's configuration
-func SetupCgroups(container *libcontainer.Container, nspid int) (cgroups.ActiveCgroup, error) {
+func SetupCgroups(container *libcontainer.Config, nspid int) (cgroups.ActiveCgroup, error) {
if container.Cgroups != nil {
c := container.Cgroups
if systemd.UseSystemd() {
@@ -148,18 +166,17 @@ func SetupCgroups(container *libcontainer.Container, nspid int) (cgroups.ActiveC
// InitializeNetworking creates the container's network stack outside of the namespace and moves
// interfaces into the container's net namespaces if necessary
-func InitializeNetworking(container *libcontainer.Container, nspid int, pipe *SyncPipe) error {
- context := libcontainer.Context{}
+func InitializeNetworking(container *libcontainer.Config, nspid int, pipe *SyncPipe, networkState *network.NetworkState) error {
for _, config := range container.Networks {
strategy, err := network.GetStrategy(config.Type)
if err != nil {
return err
}
- if err := strategy.Create(config, nspid, context); err != nil {
+ if err := strategy.Create((*network.Network)(config), nspid, networkState); err != nil {
return err
}
}
- return pipe.SendToChild(context)
+ return pipe.SendToChild(networkState)
}
// GetNamespaceFlags parses the container's Namespaces options to set the correct
@@ -167,7 +184,7 @@ func InitializeNetworking(container *libcontainer.Container, nspid int, pipe *Sy
func GetNamespaceFlags(namespaces map[string]bool) (flag int) {
for key, enabled := range namespaces {
if enabled {
- if ns := libcontainer.GetNamespace(key); ns != nil {
+ if ns := GetNamespace(key); ns != nil {
flag |= ns.Value
}
}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/execin.go b/vendor/src/github.com/docker/libcontainer/namespaces/execin.go
index f44e92abe5..d349282e84 100644
--- a/vendor/src/github.com/docker/libcontainer/namespaces/execin.go
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/execin.go
@@ -13,7 +13,7 @@ import (
)
// ExecIn uses an existing pid and joins the pid's namespaces with the new command.
-func ExecIn(container *libcontainer.Container, nspid int, args []string) error {
+func ExecIn(container *libcontainer.Config, state *libcontainer.State, args []string) error {
// TODO(vmarmol): If this gets too long, send it over a pipe to the child.
// Marshall the container into JSON since it won't be available in the namespace.
containerJson, err := json.Marshal(container)
@@ -22,7 +22,7 @@ func ExecIn(container *libcontainer.Container, nspid int, args []string) error {
}
// Enter the namespace and then finish setup
- finalArgs := []string{os.Args[0], "nsenter", "--nspid", strconv.Itoa(nspid), "--containerjson", string(containerJson), "--"}
+ finalArgs := []string{os.Args[0], "nsenter", "--nspid", strconv.Itoa(state.InitPid), "--containerjson", string(containerJson), "--"}
finalArgs = append(finalArgs, args...)
if err := system.Execv(finalArgs[0], finalArgs[0:], os.Environ()); err != nil {
return err
@@ -31,7 +31,7 @@ func ExecIn(container *libcontainer.Container, nspid int, args []string) error {
}
// NsEnter is run after entering the namespace.
-func NsEnter(container *libcontainer.Container, nspid int, args []string) error {
+func NsEnter(container *libcontainer.Config, nspid int, args []string) error {
// clear the current processes env and replace it with the environment
// defined on the container
if err := LoadContainerEnvironment(container); err != nil {
@@ -41,8 +41,8 @@ func NsEnter(container *libcontainer.Container, nspid int, args []string) error
return err
}
- if process_label, ok := container.Context["process_label"]; ok {
- if err := label.SetProcessLabel(process_label); err != nil {
+ if container.ProcessLabel != "" {
+ if err := label.SetProcessLabel(container.ProcessLabel); err != nil {
return err
}
}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/init.go b/vendor/src/github.com/docker/libcontainer/namespaces/init.go
index e89afdb47e..53d2611b89 100644
--- a/vendor/src/github.com/docker/libcontainer/namespaces/init.go
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/init.go
@@ -23,9 +23,17 @@ import (
"github.com/dotcloud/docker/pkg/user"
)
+// TODO(vishh): This is part of the libcontainer API and it does much more than just namespaces related work.
+// Move this to libcontainer package.
// Init is the init process that first runs inside a new namespace to setup mounts, users, networking,
// and other options required for the new container.
-func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) error {
+func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) (err error) {
+ defer func() {
+ if err != nil {
+ syncPipe.ReportChildError(err)
+ }
+ }()
+
rootfs, err := utils.ResolveRootfs(uncleanRootfs)
if err != nil {
return err
@@ -38,12 +46,10 @@ func Init(container *libcontainer.Container, uncleanRootfs, consolePath string,
}
// We always read this as it is a way to sync with the parent as well
- context, err := syncPipe.ReadFromParent()
+ networkState, err := syncPipe.ReadFromParent()
if err != nil {
- syncPipe.Close()
return err
}
- syncPipe.Close()
if consolePath != "" {
if err := console.OpenAndDup(consolePath); err != nil {
@@ -58,7 +64,7 @@ func Init(container *libcontainer.Container, uncleanRootfs, consolePath string,
return fmt.Errorf("setctty %s", err)
}
}
- if err := setupNetwork(container, context); err != nil {
+ if err := setupNetwork(container, networkState); err != nil {
return fmt.Errorf("setup networking %s", err)
}
if err := setupRoute(container); err != nil {
@@ -67,9 +73,12 @@ func Init(container *libcontainer.Container, uncleanRootfs, consolePath string,
label.Init()
- if err := mount.InitializeMountNamespace(rootfs, consolePath, container); err != nil {
+ if err := mount.InitializeMountNamespace(rootfs,
+ consolePath,
+ (*mount.MountConfig)(container.MountConfig)); err != nil {
return fmt.Errorf("setup mount namespace %s", err)
}
+
if container.Hostname != "" {
if err := system.Sethostname(container.Hostname); err != nil {
return fmt.Errorf("sethostname %s", err)
@@ -78,13 +87,16 @@ func Init(container *libcontainer.Container, uncleanRootfs, consolePath string,
runtime.LockOSThread()
- if err := apparmor.ApplyProfile(container.Context["apparmor_profile"]); err != nil {
- return fmt.Errorf("set apparmor profile %s: %s", container.Context["apparmor_profile"], err)
+ if err := apparmor.ApplyProfile(container.AppArmorProfile); err != nil {
+ return fmt.Errorf("set apparmor profile %s: %s", container.AppArmorProfile, err)
}
- if err := label.SetProcessLabel(container.Context["process_label"]); err != nil {
+
+ if err := label.SetProcessLabel(container.ProcessLabel); err != nil {
return fmt.Errorf("set process label %s", err)
}
- if container.Context["restrictions"] != "" {
+
+ // TODO: (crosbymichael) make this configurable at the Config level
+ if container.RestrictSys {
if err := restrict.Restrict("proc/sys", "proc/sysrq-trigger", "proc/irq", "proc/bus", "sys"); err != nil {
return err
}
@@ -157,14 +169,14 @@ func SetupUser(u string) error {
// setupVethNetwork uses the Network config if it is not nil to initialize
// the new veth interface inside the container for use by changing the name to eth0
// setting the MTU and IP address along with the default gateway
-func setupNetwork(container *libcontainer.Container, context libcontainer.Context) error {
+func setupNetwork(container *libcontainer.Config, networkState *network.NetworkState) error {
for _, config := range container.Networks {
strategy, err := network.GetStrategy(config.Type)
if err != nil {
return err
}
- err1 := strategy.Initialize(config, context)
+ err1 := strategy.Initialize((*network.Network)(config), networkState)
if err1 != nil {
return err1
}
@@ -172,7 +184,7 @@ func setupNetwork(container *libcontainer.Container, context libcontainer.Contex
return nil
}
-func setupRoute(container *libcontainer.Container) error {
+func setupRoute(container *libcontainer.Config) error {
for _, config := range container.Routes {
if err := netlink.AddRoute(config.Destination, config.Source, config.Gateway, config.InterfaceName); err != nil {
return err
@@ -184,7 +196,7 @@ func setupRoute(container *libcontainer.Container) error {
// FinalizeNamespace drops the caps, sets the correct user
// and working dir, and closes any leaky file descriptors
// before execing the command inside the namespace
-func FinalizeNamespace(container *libcontainer.Container) error {
+func FinalizeNamespace(container *libcontainer.Config) error {
// Ensure that all non-standard fds we may have accidentally
// inherited are marked close-on-exec so they stay out of the
// container
@@ -193,7 +205,7 @@ func FinalizeNamespace(container *libcontainer.Container) error {
}
// drop capabilities in bounding set before changing user
- if err := capabilities.DropBoundingSet(container); err != nil {
+ if err := capabilities.DropBoundingSet(container.Capabilities); err != nil {
return fmt.Errorf("drop bounding set %s", err)
}
@@ -211,7 +223,7 @@ func FinalizeNamespace(container *libcontainer.Container) error {
}
// drop all other capabilities
- if err := capabilities.DropCapabilities(container); err != nil {
+ if err := capabilities.DropCapabilities(container.Capabilities); err != nil {
return fmt.Errorf("drop capabilities %s", err)
}
@@ -224,7 +236,7 @@ func FinalizeNamespace(container *libcontainer.Container) error {
return nil
}
-func LoadContainerEnvironment(container *libcontainer.Container) error {
+func LoadContainerEnvironment(container *libcontainer.Config) error {
os.Clearenv()
for _, pair := range container.Env {
p := strings.SplitN(pair, "=", 2)
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/pid.go b/vendor/src/github.com/docker/libcontainer/namespaces/pid.go
deleted file mode 100644
index 8d97ec1463..0000000000
--- a/vendor/src/github.com/docker/libcontainer/namespaces/pid.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package namespaces
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
-)
-
-// WritePid writes the namespaced processes pid to pid and it's start time
-// to the path specified
-func WritePid(path string, pid int, startTime string) error {
- err := ioutil.WriteFile(filepath.Join(path, "pid"), []byte(fmt.Sprint(pid)), 0655)
- if err != nil {
- return err
- }
- return ioutil.WriteFile(filepath.Join(path, "start"), []byte(startTime), 0655)
-}
-
-// DeletePid removes the pid and started file from disk when the container's process
-// dies and the container is cleanly removed
-func DeletePid(path string) error {
- err := os.Remove(filepath.Join(path, "pid"))
- if serr := os.Remove(filepath.Join(path, "start")); err == nil {
- err = serr
- }
- return err
-}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe.go b/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe.go
index 6fa8465790..dcb5d9749d 100644
--- a/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe.go
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe.go
@@ -5,8 +5,9 @@ import (
"fmt"
"io/ioutil"
"os"
+ "syscall"
- "github.com/docker/libcontainer"
+ "github.com/docker/libcontainer/network"
)
// SyncPipe allows communication to and from the child processes
@@ -16,24 +17,17 @@ type SyncPipe struct {
parent, child *os.File
}
-func NewSyncPipe() (s *SyncPipe, err error) {
- s = &SyncPipe{}
- s.child, s.parent, err = os.Pipe()
- if err != nil {
- return nil, err
- }
- return s, nil
-}
-
-func NewSyncPipeFromFd(parendFd, childFd uintptr) (*SyncPipe, error) {
+func NewSyncPipeFromFd(parentFd, childFd uintptr) (*SyncPipe, error) {
s := &SyncPipe{}
- if parendFd > 0 {
- s.parent = os.NewFile(parendFd, "parendPipe")
+
+ if parentFd > 0 {
+ s.parent = os.NewFile(parentFd, "parentPipe")
} else if childFd > 0 {
s.child = os.NewFile(childFd, "childPipe")
} else {
return nil, fmt.Errorf("no valid sync pipe fd specified")
}
+
return s, nil
}
@@ -45,36 +39,64 @@ func (s *SyncPipe) Parent() *os.File {
return s.parent
}
-func (s *SyncPipe) SendToChild(context libcontainer.Context) error {
- data, err := json.Marshal(context)
+func (s *SyncPipe) SendToChild(networkState *network.NetworkState) error {
+ data, err := json.Marshal(networkState)
if err != nil {
return err
}
+
s.parent.Write(data)
+
+ return syscall.Shutdown(int(s.parent.Fd()), syscall.SHUT_WR)
+}
+
+func (s *SyncPipe) ReadFromChild() error {
+ data, err := ioutil.ReadAll(s.parent)
+ if err != nil {
+ return err
+ }
+
+ if len(data) > 0 {
+ return fmt.Errorf("%s", data)
+ }
+
return nil
}
-func (s *SyncPipe) ReadFromParent() (libcontainer.Context, error) {
+func (s *SyncPipe) ReadFromParent() (*network.NetworkState, error) {
data, err := ioutil.ReadAll(s.child)
if err != nil {
return nil, fmt.Errorf("error reading from sync pipe %s", err)
}
- var context libcontainer.Context
+ var networkState *network.NetworkState
if len(data) > 0 {
- if err := json.Unmarshal(data, &context); err != nil {
+ if err := json.Unmarshal(data, &networkState); err != nil {
return nil, err
}
}
- return context, nil
+ return networkState, nil
+}
+func (s *SyncPipe) ReportChildError(err error) {
+ s.child.Write([]byte(err.Error()))
+ s.CloseChild()
}
func (s *SyncPipe) Close() error {
if s.parent != nil {
s.parent.Close()
}
+
if s.child != nil {
s.child.Close()
}
+
return nil
}
+
+func (s *SyncPipe) CloseChild() {
+ if s.child != nil {
+ s.child.Close()
+ s.child = nil
+ }
+}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_linux.go b/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_linux.go
new file mode 100644
index 0000000000..ad61e75d29
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_linux.go
@@ -0,0 +1,20 @@
+package namespaces
+
+import (
+ "os"
+ "syscall"
+)
+
+func NewSyncPipe() (s *SyncPipe, err error) {
+ s = &SyncPipe{}
+
+ fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ s.child = os.NewFile(uintptr(fds[0]), "child syncpipe")
+ s.parent = os.NewFile(uintptr(fds[1]), "parent syncpipe")
+
+ return s, nil
+}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_test.go b/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_test.go
new file mode 100644
index 0000000000..69bd0abbfb
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/sync_pipe_test.go
@@ -0,0 +1,61 @@
+package namespaces
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/docker/libcontainer/network"
+)
+
+func TestSendErrorFromChild(t *testing.T) {
+ pipe, err := NewSyncPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := pipe.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ expected := "something bad happened"
+
+ pipe.ReportChildError(fmt.Errorf(expected))
+
+ childError := pipe.ReadFromChild()
+ if childError == nil {
+ t.Fatal("expected an error to be returned but did not receive anything")
+ }
+
+ if childError.Error() != expected {
+ t.Fatalf("expected %q but received error message %q", expected, childError.Error())
+ }
+}
+
+func TestSendPayloadToChild(t *testing.T) {
+ pipe, err := NewSyncPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer func() {
+ if err := pipe.Close(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ expected := "libcontainer"
+
+ if err := pipe.SendToChild(&network.NetworkState{VethHost: expected}); err != nil {
+ t.Fatal(err)
+ }
+
+ payload, err := pipe.ReadFromParent()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if payload.VethHost != expected {
+ t.Fatalf("expected veth host %q but received %q", expected, payload.VethHost)
+ }
+}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/types.go b/vendor/src/github.com/docker/libcontainer/namespaces/types.go
new file mode 100644
index 0000000000..16ce981e85
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/types.go
@@ -0,0 +1,50 @@
+package namespaces
+
+import "errors"
+
+type (
+ Namespace struct {
+ Key string `json:"key,omitempty"`
+ Value int `json:"value,omitempty"`
+ File string `json:"file,omitempty"`
+ }
+ Namespaces []*Namespace
+)
+
+// namespaceList is used to convert the libcontainer types
+// into the names of the files located in /proc/<pid>/ns/* for
+// each namespace
+var (
+ namespaceList = Namespaces{}
+ ErrUnkownNamespace = errors.New("Unknown namespace")
+ ErrUnsupported = errors.New("Unsupported method")
+)
+
+func (ns *Namespace) String() string {
+ return ns.Key
+}
+
+func GetNamespace(key string) *Namespace {
+ for _, ns := range namespaceList {
+ if ns.Key == key {
+ cpy := *ns
+ return &cpy
+ }
+ }
+ return nil
+}
+
+// Contains returns true if the specified Namespace is
+// in the slice
+func (n Namespaces) Contains(ns string) bool {
+ return n.Get(ns) != nil
+}
+
+func (n Namespaces) Get(ns string) *Namespace {
+ for _, nsp := range n {
+ if nsp != nil && nsp.Key == ns {
+ return nsp
+ }
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/docker/libcontainer/types_linux.go b/vendor/src/github.com/docker/libcontainer/namespaces/types_linux.go
index c14531df20..d3079944c7 100644
--- a/vendor/src/github.com/docker/libcontainer/types_linux.go
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/types_linux.go
@@ -1,4 +1,4 @@
-package libcontainer
+package namespaces
import (
"syscall"
diff --git a/vendor/src/github.com/docker/libcontainer/types_test.go b/vendor/src/github.com/docker/libcontainer/namespaces/types_test.go
index dd31298fdf..4d0a72c9b2 100644
--- a/vendor/src/github.com/docker/libcontainer/types_test.go
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/types_test.go
@@ -1,4 +1,4 @@
-package libcontainer
+package namespaces
import (
"testing"
@@ -28,17 +28,3 @@ func TestNamespacesContains(t *testing.T) {
t.Fatal("namespaces should contain NEWPID but does not")
}
}
-
-func TestCapabilitiesContains(t *testing.T) {
- caps := Capabilities{
- GetCapability("MKNOD"),
- GetCapability("SETPCAP"),
- }
-
- if caps.Contains("SYS_ADMIN") {
- t.Fatal("capabilities should not contain SYS_ADMIN")
- }
- if !caps.Contains("MKNOD") {
- t.Fatal("capabilities should contain MKNOD but does not")
- }
-}
diff --git a/vendor/src/github.com/docker/libcontainer/namespaces/unsupported.go b/vendor/src/github.com/docker/libcontainer/namespaces/unsupported.go
index a0653ee8ad..8398b94d7d 100644
--- a/vendor/src/github.com/docker/libcontainer/namespaces/unsupported.go
+++ b/vendor/src/github.com/docker/libcontainer/namespaces/unsupported.go
@@ -7,20 +7,20 @@ import (
"github.com/docker/libcontainer/cgroups"
)
-func Exec(container *libcontainer.Container, term Terminal, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) {
- return -1, libcontainer.ErrUnsupported
+func Exec(container *libcontainer.Config, term Terminal, rootfs, dataPath string, args []string, createCommand CreateCommand, startCallback func()) (int, error) {
+ return -1, ErrUnsupported
}
-func Init(container *libcontainer.Container, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) error {
- return libcontainer.ErrUnsupported
+func Init(container *libcontainer.Config, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) error {
+ return ErrUnsupported
}
-func InitializeNetworking(container *libcontainer.Container, nspid int, pipe *SyncPipe) error {
- return libcontainer.ErrUnsupported
+func InitializeNetworking(container *libcontainer.Config, nspid int, pipe *SyncPipe) error {
+ return ErrUnsupported
}
-func SetupCgroups(container *libcontainer.Container, nspid int) (cgroups.ActiveCgroup, error) {
- return nil, libcontainer.ErrUnsupported
+func SetupCgroups(container *libcontainer.Config, nspid int) (cgroups.ActiveCgroup, error) {
+ return nil, ErrUnsupported
}
func GetNamespaceFlags(namespaces map[string]bool) (flag int) {
diff --git a/vendor/src/github.com/docker/libcontainer/network/loopback.go b/vendor/src/github.com/docker/libcontainer/network/loopback.go
index 218d1959be..46a1fa8c86 100644
--- a/vendor/src/github.com/docker/libcontainer/network/loopback.go
+++ b/vendor/src/github.com/docker/libcontainer/network/loopback.go
@@ -1,19 +1,20 @@
+// +build linux
+
package network
import (
"fmt"
- "github.com/docker/libcontainer"
)
// Loopback is a network strategy that provides a basic loopback device
type Loopback struct {
}
-func (l *Loopback) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error {
+func (l *Loopback) Create(n *Network, nspid int, networkState *NetworkState) error {
return nil
}
-func (l *Loopback) Initialize(config *libcontainer.Network, context libcontainer.Context) error {
+func (l *Loopback) Initialize(config *Network, networkState *NetworkState) error {
if err := SetMtu("lo", config.Mtu); err != nil {
return fmt.Errorf("set lo mtu to %d %s", config.Mtu, err)
}
diff --git a/vendor/src/github.com/docker/libcontainer/network/netns.go b/vendor/src/github.com/docker/libcontainer/network/netns.go
index e8a9188ddb..64544476b8 100644
--- a/vendor/src/github.com/docker/libcontainer/network/netns.go
+++ b/vendor/src/github.com/docker/libcontainer/network/netns.go
@@ -1,3 +1,5 @@
+// +build linux
+
package network
import (
@@ -5,7 +7,6 @@ import (
"os"
"syscall"
- "github.com/docker/libcontainer"
"github.com/dotcloud/docker/pkg/system"
)
@@ -13,17 +14,16 @@ import (
type NetNS struct {
}
-func (v *NetNS) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error {
- context["nspath"] = n.Context["nspath"]
+func (v *NetNS) Create(n *Network, nspid int, networkState *NetworkState) error {
+ networkState.NsPath = n.NsPath
return nil
}
-func (v *NetNS) Initialize(config *libcontainer.Network, context libcontainer.Context) error {
- nspath, exists := context["nspath"]
- if !exists {
- return fmt.Errorf("nspath does not exist in network context")
+func (v *NetNS) Initialize(config *Network, networkState *NetworkState) error {
+ if networkState.NsPath == "" {
+ return fmt.Errorf("nspath does is not specified in NetworkState")
}
- f, err := os.OpenFile(nspath, os.O_RDONLY, 0)
+ f, err := os.OpenFile(networkState.NsPath, os.O_RDONLY, 0)
if err != nil {
return fmt.Errorf("failed get network namespace fd: %v", err)
}
diff --git a/vendor/src/github.com/docker/libcontainer/network/network.go b/vendor/src/github.com/docker/libcontainer/network/network.go
index 94cd711d2f..48eeec6047 100644
--- a/vendor/src/github.com/docker/libcontainer/network/network.go
+++ b/vendor/src/github.com/docker/libcontainer/network/network.go
@@ -1,8 +1,11 @@
+// +build linux
+
package network
import (
- "github.com/docker/libcontainer/netlink"
"net"
+
+ "github.com/docker/libcontainer/netlink"
)
func InterfaceUp(name string) error {
diff --git a/vendor/src/github.com/docker/libcontainer/network/stats.go b/vendor/src/github.com/docker/libcontainer/network/stats.go
new file mode 100644
index 0000000000..b69fa91851
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/network/stats.go
@@ -0,0 +1,68 @@
+package network
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+type NetworkStats struct {
+ RxBytes uint64 `json:"rx_bytes,omitempty"`
+ RxPackets uint64 `json:"rx_packets,omitempty"`
+ RxErrors uint64 `json:"rx_errors,omitempty"`
+ RxDropped uint64 `json:"rx_dropped,omitempty"`
+ TxBytes uint64 `json:"tx_bytes,omitempty"`
+ TxPackets uint64 `json:"tx_packets,omitempty"`
+ TxErrors uint64 `json:"tx_errors,omitempty"`
+ TxDropped uint64 `json:"tx_dropped,omitempty"`
+}
+
+// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
+func GetStats(networkState *NetworkState) (NetworkStats, error) {
+ // This can happen if the network runtime information is missing - possible if the container was created by an old version of libcontainer.
+ if networkState.VethHost == "" {
+ return NetworkStats{}, nil
+ }
+ data, err := readSysfsNetworkStats(networkState.VethHost)
+ if err != nil {
+ return NetworkStats{}, err
+ }
+
+ return NetworkStats{
+ RxBytes: data["rx_bytes"],
+ RxPackets: data["rx_packets"],
+ RxErrors: data["rx_errors"],
+ RxDropped: data["rx_dropped"],
+ TxBytes: data["tx_bytes"],
+ TxPackets: data["tx_packets"],
+ TxErrors: data["tx_errors"],
+ TxDropped: data["tx_dropped"],
+ }, nil
+}
+
+// Reads all the statistics available under /sys/class/net/<EthInterface>/statistics as a map with file name as key and data as integers.
+func readSysfsNetworkStats(ethInterface string) (map[string]uint64, error) {
+ out := make(map[string]uint64)
+
+ fullPath := filepath.Join("/sys/class/net", ethInterface, "statistics/")
+ err := filepath.Walk(fullPath, func(path string, _ os.FileInfo, _ error) error {
+ // skip fullPath.
+ if path == fullPath {
+ return nil
+ }
+ base := filepath.Base(path)
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+ value, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+ if err != nil {
+ return err
+ }
+ out[base] = value
+ return nil
+ })
+ return out, err
+}
diff --git a/vendor/src/github.com/docker/libcontainer/network/strategy.go b/vendor/src/github.com/docker/libcontainer/network/strategy.go
index 321cf58e3d..be5ec93b71 100644
--- a/vendor/src/github.com/docker/libcontainer/network/strategy.go
+++ b/vendor/src/github.com/docker/libcontainer/network/strategy.go
@@ -1,9 +1,9 @@
+// +build linux
+
package network
import (
"errors"
-
- "github.com/docker/libcontainer"
)
var (
@@ -19,8 +19,8 @@ var strategies = map[string]NetworkStrategy{
// NetworkStrategy represents a specific network configuration for
// a container's networking stack
type NetworkStrategy interface {
- Create(*libcontainer.Network, int, libcontainer.Context) error
- Initialize(*libcontainer.Network, libcontainer.Context) error
+ Create(*Network, int, *NetworkState) error
+ Initialize(*Network, *NetworkState) error
}
// GetStrategy returns the specific network strategy for the
diff --git a/vendor/src/github.com/docker/libcontainer/network/types.go b/vendor/src/github.com/docker/libcontainer/network/types.go
new file mode 100644
index 0000000000..0f1df30e85
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/network/types.go
@@ -0,0 +1,40 @@
+package network
+
+// Network defines configuration for a container's networking stack
+//
+// The network configuration can be omited from a container causing the
+// container to be setup with the host's networking stack
+type Network struct {
+ // Type sets the networks type, commonly veth and loopback
+ Type string `json:"type,omitempty"`
+
+ // Path to network namespace
+ NsPath string `json:"ns_path,omitempty"`
+
+ // The bridge to use.
+ Bridge string `json:"bridge,omitempty"`
+
+ // Prefix for the veth interfaces.
+ VethPrefix string `json:"veth_prefix,omitempty"`
+
+ // Address contains the IP and mask to set on the network interface
+ Address string `json:"address,omitempty"`
+
+ // Gateway sets the gateway address that is used as the default for the interface
+ Gateway string `json:"gateway,omitempty"`
+
+ // Mtu sets the mtu value for the interface and will be mirrored on both the host and
+ // container's interfaces if a pair is created, specifically in the case of type veth
+ Mtu int `json:"mtu,omitempty"`
+}
+
+// Struct describing the network specific runtime state that will be maintained by libcontainer for all running containers
+// Do not depend on it outside of libcontainer.
+type NetworkState struct {
+ // The name of the veth interface on the Host.
+ VethHost string `json:"veth_host,omitempty"`
+ // The name of the veth interface created inside the container for the child.
+ VethChild string `json:"veth_child,omitempty"`
+ // Net namespace path.
+ NsPath string `json:"ns_path,omitempty"`
+}
diff --git a/vendor/src/github.com/docker/libcontainer/network/veth.go b/vendor/src/github.com/docker/libcontainer/network/veth.go
index 4dad4aa20a..fcafd85ccf 100644
--- a/vendor/src/github.com/docker/libcontainer/network/veth.go
+++ b/vendor/src/github.com/docker/libcontainer/network/veth.go
@@ -1,8 +1,10 @@
+// +build linux
+
package network
import (
"fmt"
- "github.com/docker/libcontainer"
+
"github.com/docker/libcontainer/utils"
)
@@ -14,24 +16,21 @@ type Veth struct {
const defaultDevice = "eth0"
-func (v *Veth) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error {
+func (v *Veth) Create(n *Network, nspid int, networkState *NetworkState) error {
var (
- bridge string
- prefix string
- exists bool
+ bridge = n.Bridge
+ prefix = n.VethPrefix
)
- if bridge, exists = n.Context["bridge"]; !exists {
- return fmt.Errorf("bridge does not exist in network context")
+ if bridge == "" {
+ return fmt.Errorf("bridge is not specified")
}
- if prefix, exists = n.Context["prefix"]; !exists {
- return fmt.Errorf("veth prefix does not exist in network context")
+ if prefix == "" {
+ return fmt.Errorf("veth prefix is not specified")
}
name1, name2, err := createVethPair(prefix)
if err != nil {
return err
}
- context["veth-host"] = name1
- context["veth-child"] = name2
if err := SetInterfaceMaster(name1, bridge); err != nil {
return err
}
@@ -44,16 +43,16 @@ func (v *Veth) Create(n *libcontainer.Network, nspid int, context libcontainer.C
if err := SetInterfaceInNamespacePid(name2, nspid); err != nil {
return err
}
+ networkState.VethHost = name1
+ networkState.VethChild = name2
+
return nil
}
-func (v *Veth) Initialize(config *libcontainer.Network, context libcontainer.Context) error {
- var (
- vethChild string
- exists bool
- )
- if vethChild, exists = context["veth-child"]; !exists {
- return fmt.Errorf("vethChild does not exist in network context")
+func (v *Veth) Initialize(config *Network, networkState *NetworkState) error {
+ var vethChild = networkState.VethChild
+ if vethChild == "" {
+ return fmt.Errorf("vethChild is not specified")
}
if err := InterfaceDown(vethChild); err != nil {
return fmt.Errorf("interface down %s %s", vethChild, err)
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/exec.go b/vendor/src/github.com/docker/libcontainer/nsinit/exec.go
index 5bbfd088d4..c58c30664e 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/exec.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/exec.go
@@ -19,19 +19,20 @@ var execCommand = cli.Command{
}
func execAction(context *cli.Context) {
- var nspid, exitCode int
+ var exitCode int
container, err := loadContainer()
if err != nil {
log.Fatal(err)
}
- if nspid, err = readPid(); err != nil && !os.IsNotExist(err) {
- log.Fatalf("unable to read pid: %s", err)
+ state, err := libcontainer.GetState(dataPath)
+ if err != nil && !os.IsNotExist(err) {
+ log.Fatalf("unable to read state.json: %s", err)
}
- if nspid > 0 {
- err = namespaces.ExecIn(container, nspid, []string(context.Args()))
+ if state != nil {
+ err = namespaces.ExecIn(container, state, []string(context.Args()))
} else {
term := namespaces.NewTerminal(os.Stdin, os.Stdout, os.Stderr, container.Tty)
exitCode, err = startContainer(container, term, dataPath, []string(context.Args()))
@@ -48,7 +49,7 @@ func execAction(context *cli.Context) {
// error.
//
// Signals sent to the current process will be forwarded to container.
-func startContainer(container *libcontainer.Container, term namespaces.Terminal, dataPath string, args []string) (int, error) {
+func startContainer(container *libcontainer.Config, term namespaces.Terminal, dataPath string, args []string) (int, error) {
var (
cmd *exec.Cmd
sigc = make(chan os.Signal, 10)
@@ -56,7 +57,7 @@ func startContainer(container *libcontainer.Container, term namespaces.Terminal,
signal.Notify(sigc)
- createCommand := func(container *libcontainer.Container, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd {
+ createCommand := func(container *libcontainer.Config, console, rootfs, dataPath, init string, pipe *os.File, args []string) *exec.Cmd {
cmd = namespaces.DefaultCreateCommand(container, console, rootfs, dataPath, init, pipe, args)
if logPath != "" {
cmd.Env = append(cmd.Env, fmt.Sprintf("log=%s", logPath))
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/spec.go b/vendor/src/github.com/docker/libcontainer/nsinit/spec.go
index 24294ff378..beadc9d87a 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/spec.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/spec.go
@@ -30,7 +30,7 @@ func specAction(context *cli.Context) {
}
// returns the container spec in json format.
-func getContainerSpec(container *libcontainer.Container) (string, error) {
+func getContainerSpec(container *libcontainer.Config) (string, error) {
spec, err := json.MarshalIndent(container, "", "\t")
if err != nil {
return "", err
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/stats.go b/vendor/src/github.com/docker/libcontainer/nsinit/stats.go
index ff6a1ce535..eae9833808 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/stats.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/stats.go
@@ -7,7 +7,6 @@ import (
"github.com/codegangsta/cli"
"github.com/docker/libcontainer"
- "github.com/docker/libcontainer/cgroups/fs"
)
var statsCommand = cli.Command{
@@ -22,7 +21,12 @@ func statsAction(context *cli.Context) {
log.Fatal(err)
}
- stats, err := getContainerStats(container)
+ runtimeCkpt, err := libcontainer.GetState(dataPath)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ stats, err := getStats(container, runtimeCkpt)
if err != nil {
log.Fatalf("Failed to get stats - %v\n", err)
}
@@ -31,8 +35,8 @@ func statsAction(context *cli.Context) {
}
// returns the container stats in json format.
-func getContainerStats(container *libcontainer.Container) (string, error) {
- stats, err := fs.GetStats(container.Cgroups)
+func getStats(container *libcontainer.Config, state *libcontainer.State) (string, error) {
+ stats, err := libcontainer.GetStats(container, state)
if err != nil {
return "", err
}
diff --git a/vendor/src/github.com/docker/libcontainer/nsinit/utils.go b/vendor/src/github.com/docker/libcontainer/nsinit/utils.go
index bd49434e44..44194d885b 100644
--- a/vendor/src/github.com/docker/libcontainer/nsinit/utils.go
+++ b/vendor/src/github.com/docker/libcontainer/nsinit/utils.go
@@ -2,23 +2,21 @@ package main
import (
"encoding/json"
- "io/ioutil"
"log"
"os"
"path/filepath"
- "strconv"
"github.com/docker/libcontainer"
)
-func loadContainer() (*libcontainer.Container, error) {
+func loadContainer() (*libcontainer.Config, error) {
f, err := os.Open(filepath.Join(dataPath, "container.json"))
if err != nil {
return nil, err
}
defer f.Close()
- var container *libcontainer.Container
+ var container *libcontainer.Config
if err := json.NewDecoder(f).Decode(&container); err != nil {
return nil, err
}
@@ -26,20 +24,6 @@ func loadContainer() (*libcontainer.Container, error) {
return container, nil
}
-func readPid() (int, error) {
- data, err := ioutil.ReadFile(filepath.Join(dataPath, "pid"))
- if err != nil {
- return -1, err
- }
-
- pid, err := strconv.Atoi(string(data))
- if err != nil {
- return -1, err
- }
-
- return pid, nil
-}
-
func openLog(name string) error {
f, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755)
if err != nil {
@@ -51,8 +35,8 @@ func openLog(name string) error {
return nil
}
-func loadContainerFromJson(rawData string) (*libcontainer.Container, error) {
- var container *libcontainer.Container
+func loadContainerFromJson(rawData string) (*libcontainer.Config, error) {
+ var container *libcontainer.Config
if err := json.Unmarshal([]byte(rawData), &container); err != nil {
return nil, err
diff --git a/vendor/src/github.com/docker/libcontainer/sample_configs/README.md b/vendor/src/github.com/docker/libcontainer/sample_configs/README.md
new file mode 100644
index 0000000000..4ccc6cde94
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/sample_configs/README.md
@@ -0,0 +1,5 @@
+These configuration files can be used with `nsinit` to quickly develop, test,
+and experiment with features of libcontainer.
+
+When consuming these configuration files, copy them into your rootfs and rename
+the file to `container.json` for use with `nsinit`.
diff --git a/vendor/src/github.com/docker/libcontainer/sample_configs/apparmor.json b/vendor/src/github.com/docker/libcontainer/sample_configs/apparmor.json
new file mode 100644
index 0000000000..f739df1006
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/sample_configs/apparmor.json
@@ -0,0 +1,196 @@
+{
+ "capabilities": [
+ "CHOWN",
+ "DAC_OVERRIDE",
+ "FOWNER",
+ "MKNOD",
+ "NET_RAW",
+ "SETGID",
+ "SETUID",
+ "SETFCAP",
+ "SETPCAP",
+ "NET_BIND_SERVICE",
+ "SYS_CHROOT",
+ "KILL"
+ ],
+ "cgroups": {
+ "allowed_devices": [
+ {
+ "cgroup_permissions": "m",
+ "major_number": -1,
+ "minor_number": -1,
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "m",
+ "major_number": -1,
+ "minor_number": -1,
+ "type": 98
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 5,
+ "minor_number": 1,
+ "path": "/dev/console",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 4,
+ "path": "/dev/tty0",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 4,
+ "minor_number": 1,
+ "path": "/dev/tty1",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 136,
+ "minor_number": -1,
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 5,
+ "minor_number": 2,
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 10,
+ "minor_number": 200,
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 3,
+ "path": "/dev/null",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 5,
+ "path": "/dev/zero",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 7,
+ "path": "/dev/full",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 5,
+ "path": "/dev/tty",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 9,
+ "path": "/dev/urandom",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 8,
+ "path": "/dev/random",
+ "type": 99
+ }
+ ],
+ "name": "docker-koye",
+ "parent": "docker"
+ },
+ "restrict_sys": true,
+ "apparmor_profile": "docker-default",
+ "mount_config": {
+ "device_nodes": [
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 3,
+ "path": "/dev/null",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 5,
+ "path": "/dev/zero",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 7,
+ "path": "/dev/full",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 5,
+ "path": "/dev/tty",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 9,
+ "path": "/dev/urandom",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 8,
+ "path": "/dev/random",
+ "type": 99
+ }
+ ]
+ },
+ "environment": [
+ "HOME=/",
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HOSTNAME=koye",
+ "TERM=xterm"
+ ],
+ "hostname": "koye",
+ "namespaces": {
+ "NEWIPC": true,
+ "NEWNET": true,
+ "NEWNS": true,
+ "NEWPID": true,
+ "NEWUTS": true
+ },
+ "networks": [
+ {
+ "address": "127.0.0.1/0",
+ "gateway": "localhost",
+ "mtu": 1500,
+ "type": "loopback"
+ }
+ ],
+ "tty": true,
+ "user": "daemon"
+}
diff --git a/vendor/src/github.com/docker/libcontainer/container.json b/vendor/src/github.com/docker/libcontainer/sample_configs/attach_to_bridge.json
index da138d173f..0795e6c143 100644
--- a/vendor/src/github.com/docker/libcontainer/container.json
+++ b/vendor/src/github.com/docker/libcontainer/sample_configs/attach_to_bridge.json
@@ -116,71 +116,62 @@
"name": "docker-koye",
"parent": "docker"
},
- "context": {
- "mount_label": "",
- "process_label": "",
- "restrictions": "true"
+ "restrict_sys": true,
+ "mount_config": {
+ "device_nodes": [
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 3,
+ "path": "/dev/null",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 5,
+ "path": "/dev/zero",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 7,
+ "path": "/dev/full",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 5,
+ "path": "/dev/tty",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 9,
+ "path": "/dev/urandom",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 8,
+ "path": "/dev/random",
+ "type": 99
+ }
+ ]
},
- "device_nodes": [
- {
- "cgroup_permissions": "rwm",
- "major_number": 10,
- "minor_number": 229,
- "path": "/dev/fuse",
- "type": 99
- },
- {
- "cgroup_permissions": "rwm",
- "file_mode": 438,
- "major_number": 1,
- "minor_number": 3,
- "path": "/dev/null",
- "type": 99
- },
- {
- "cgroup_permissions": "rwm",
- "file_mode": 438,
- "major_number": 1,
- "minor_number": 5,
- "path": "/dev/zero",
- "type": 99
- },
- {
- "cgroup_permissions": "rwm",
- "file_mode": 438,
- "major_number": 1,
- "minor_number": 7,
- "path": "/dev/full",
- "type": 99
- },
- {
- "cgroup_permissions": "rwm",
- "file_mode": 438,
- "major_number": 5,
- "path": "/dev/tty",
- "type": 99
- },
- {
- "cgroup_permissions": "rwm",
- "file_mode": 438,
- "major_number": 1,
- "minor_number": 9,
- "path": "/dev/urandom",
- "type": 99
- },
- {
- "cgroup_permissions": "rwm",
- "file_mode": 438,
- "major_number": 1,
- "minor_number": 8,
- "path": "/dev/random",
- "type": 99
- }
- ],
"environment": [
"HOME=/",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
- "HOSTNAME=2d388ea3bd92",
+ "HOSTNAME=koye",
"TERM=xterm"
],
"hostname": "koye",
@@ -200,10 +191,8 @@
},
{
"address": "172.17.0.101/16",
- "context": {
- "bridge": "docker0",
- "prefix": "veth"
- },
+ "bridge": "docker0",
+ "veth_prefix": "veth",
"gateway": "172.17.42.1",
"mtu": 1500,
"type": "veth"
diff --git a/vendor/src/github.com/docker/libcontainer/sample_configs/minimal.json b/vendor/src/github.com/docker/libcontainer/sample_configs/minimal.json
new file mode 100644
index 0000000000..c08c996797
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/sample_configs/minimal.json
@@ -0,0 +1,195 @@
+{
+ "capabilities": [
+ "CHOWN",
+ "DAC_OVERRIDE",
+ "FOWNER",
+ "MKNOD",
+ "NET_RAW",
+ "SETGID",
+ "SETUID",
+ "SETFCAP",
+ "SETPCAP",
+ "NET_BIND_SERVICE",
+ "SYS_CHROOT",
+ "KILL"
+ ],
+ "cgroups": {
+ "allowed_devices": [
+ {
+ "cgroup_permissions": "m",
+ "major_number": -1,
+ "minor_number": -1,
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "m",
+ "major_number": -1,
+ "minor_number": -1,
+ "type": 98
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 5,
+ "minor_number": 1,
+ "path": "/dev/console",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 4,
+ "path": "/dev/tty0",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 4,
+ "minor_number": 1,
+ "path": "/dev/tty1",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 136,
+ "minor_number": -1,
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 5,
+ "minor_number": 2,
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 10,
+ "minor_number": 200,
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 3,
+ "path": "/dev/null",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 5,
+ "path": "/dev/zero",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 7,
+ "path": "/dev/full",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 5,
+ "path": "/dev/tty",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 9,
+ "path": "/dev/urandom",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 8,
+ "path": "/dev/random",
+ "type": 99
+ }
+ ],
+ "name": "docker-koye",
+ "parent": "docker"
+ },
+ "restrict_sys": true,
+ "mount_config": {
+ "device_nodes": [
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 3,
+ "path": "/dev/null",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 5,
+ "path": "/dev/zero",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 7,
+ "path": "/dev/full",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 5,
+ "path": "/dev/tty",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 9,
+ "path": "/dev/urandom",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 8,
+ "path": "/dev/random",
+ "type": 99
+ }
+ ]
+ },
+ "environment": [
+ "HOME=/",
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HOSTNAME=koye",
+ "TERM=xterm"
+ ],
+ "hostname": "koye",
+ "namespaces": {
+ "NEWIPC": true,
+ "NEWNET": true,
+ "NEWNS": true,
+ "NEWPID": true,
+ "NEWUTS": true
+ },
+ "networks": [
+ {
+ "address": "127.0.0.1/0",
+ "gateway": "localhost",
+ "mtu": 1500,
+ "type": "loopback"
+ }
+ ],
+ "tty": true,
+ "user": "daemon"
+}
diff --git a/vendor/src/github.com/docker/libcontainer/sample_configs/selinux.json b/vendor/src/github.com/docker/libcontainer/sample_configs/selinux.json
new file mode 100644
index 0000000000..ce383e2cc2
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/sample_configs/selinux.json
@@ -0,0 +1,197 @@
+{
+ "capabilities": [
+ "CHOWN",
+ "DAC_OVERRIDE",
+ "FOWNER",
+ "MKNOD",
+ "NET_RAW",
+ "SETGID",
+ "SETUID",
+ "SETFCAP",
+ "SETPCAP",
+ "NET_BIND_SERVICE",
+ "SYS_CHROOT",
+ "KILL"
+ ],
+ "cgroups": {
+ "allowed_devices": [
+ {
+ "cgroup_permissions": "m",
+ "major_number": -1,
+ "minor_number": -1,
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "m",
+ "major_number": -1,
+ "minor_number": -1,
+ "type": 98
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 5,
+ "minor_number": 1,
+ "path": "/dev/console",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 4,
+ "path": "/dev/tty0",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 4,
+ "minor_number": 1,
+ "path": "/dev/tty1",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 136,
+ "minor_number": -1,
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 5,
+ "minor_number": 2,
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "major_number": 10,
+ "minor_number": 200,
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 3,
+ "path": "/dev/null",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 5,
+ "path": "/dev/zero",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 7,
+ "path": "/dev/full",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 5,
+ "path": "/dev/tty",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 9,
+ "path": "/dev/urandom",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 8,
+ "path": "/dev/random",
+ "type": 99
+ }
+ ],
+ "name": "docker-koye",
+ "parent": "docker"
+ },
+ "restrict_sys": true,
+ "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c164,c475",
+ "mount_config": {
+ "mount_label": "system_u:system_r:svirt_lxc_net_t:s0:c164,c475",
+ "device_nodes": [
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 3,
+ "path": "/dev/null",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 5,
+ "path": "/dev/zero",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 7,
+ "path": "/dev/full",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 5,
+ "path": "/dev/tty",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 9,
+ "path": "/dev/urandom",
+ "type": 99
+ },
+ {
+ "cgroup_permissions": "rwm",
+ "file_mode": 438,
+ "major_number": 1,
+ "minor_number": 8,
+ "path": "/dev/random",
+ "type": 99
+ }
+ ]
+ },
+ "environment": [
+ "HOME=/",
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HOSTNAME=koye",
+ "TERM=xterm"
+ ],
+ "hostname": "koye",
+ "namespaces": {
+ "NEWIPC": true,
+ "NEWNET": true,
+ "NEWNS": true,
+ "NEWPID": true,
+ "NEWUTS": true
+ },
+ "networks": [
+ {
+ "address": "127.0.0.1/0",
+ "gateway": "localhost",
+ "mtu": 1500,
+ "type": "loopback"
+ }
+ ],
+ "tty": true,
+ "user": "daemon"
+}
diff --git a/vendor/src/github.com/docker/libcontainer/security/capabilities/capabilities.go b/vendor/src/github.com/docker/libcontainer/security/capabilities/capabilities.go
index ef872178f6..21e4de26e0 100644
--- a/vendor/src/github.com/docker/libcontainer/security/capabilities/capabilities.go
+++ b/vendor/src/github.com/docker/libcontainer/security/capabilities/capabilities.go
@@ -3,7 +3,6 @@ package capabilities
import (
"os"
- "github.com/docker/libcontainer"
"github.com/syndtr/gocapability/capability"
)
@@ -11,13 +10,13 @@ const allCapabilityTypes = capability.CAPS | capability.BOUNDS
// DropBoundingSet drops the capability bounding set to those specified in the
// container configuration.
-func DropBoundingSet(container *libcontainer.Container) error {
+func DropBoundingSet(capabilities []string) error {
c, err := capability.NewPid(os.Getpid())
if err != nil {
return err
}
- keep := getEnabledCapabilities(container)
+ keep := getEnabledCapabilities(capabilities)
c.Clear(capability.BOUNDS)
c.Set(capability.BOUNDS, keep...)
@@ -29,13 +28,13 @@ func DropBoundingSet(container *libcontainer.Container) error {
}
// DropCapabilities drops all capabilities for the current process expect those specified in the container configuration.
-func DropCapabilities(container *libcontainer.Container) error {
+func DropCapabilities(capList []string) error {
c, err := capability.NewPid(os.Getpid())
if err != nil {
return err
}
- keep := getEnabledCapabilities(container)
+ keep := getEnabledCapabilities(capList)
c.Clear(allCapabilityTypes)
c.Set(allCapabilityTypes, keep...)
@@ -46,10 +45,10 @@ func DropCapabilities(container *libcontainer.Container) error {
}
// getEnabledCapabilities returns the capabilities that should not be dropped by the container.
-func getEnabledCapabilities(container *libcontainer.Container) []capability.Cap {
+func getEnabledCapabilities(capList []string) []capability.Cap {
keep := []capability.Cap{}
- for _, capability := range container.Capabilities {
- if c := libcontainer.GetCapability(capability); c != nil {
+ for _, capability := range capList {
+ if c := GetCapability(capability); c != nil {
keep = append(keep, c.Value)
}
}
diff --git a/vendor/src/github.com/docker/libcontainer/security/capabilities/types.go b/vendor/src/github.com/docker/libcontainer/security/capabilities/types.go
new file mode 100644
index 0000000000..feb38e3338
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/security/capabilities/types.go
@@ -0,0 +1,90 @@
+package capabilities
+
+import "github.com/syndtr/gocapability/capability"
+
+type (
+ CapabilityMapping struct {
+ Key string `json:"key,omitempty"`
+ Value capability.Cap `json:"value,omitempty"`
+ }
+ Capabilities []*CapabilityMapping
+)
+
+func (c *CapabilityMapping) String() string {
+ return c.Key
+}
+
+func GetCapability(key string) *CapabilityMapping {
+ for _, capp := range capabilityList {
+ if capp.Key == key {
+ cpy := *capp
+ return &cpy
+ }
+ }
+ return nil
+}
+
+func GetAllCapabilities() []string {
+ output := make([]string, len(capabilityList))
+ for i, capability := range capabilityList {
+ output[i] = capability.String()
+ }
+ return output
+}
+
+// Contains returns true if the specified Capability is
+// in the slice
+func (c Capabilities) contains(capp string) bool {
+ return c.get(capp) != nil
+}
+
+func (c Capabilities) get(capp string) *CapabilityMapping {
+ for _, cap := range c {
+ if cap.Key == capp {
+ return cap
+ }
+ }
+ return nil
+}
+
+var capabilityList = Capabilities{
+ {Key: "SETPCAP", Value: capability.CAP_SETPCAP},
+ {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE},
+ {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO},
+ {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT},
+ {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN},
+ {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE},
+ {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE},
+ {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME},
+ {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG},
+ {Key: "MKNOD", Value: capability.CAP_MKNOD},
+ {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE},
+ {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL},
+ {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE},
+ {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN},
+ {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN},
+ {Key: "SYSLOG", Value: capability.CAP_SYSLOG},
+ {Key: "SETUID", Value: capability.CAP_SETUID},
+ {Key: "SETGID", Value: capability.CAP_SETGID},
+ {Key: "CHOWN", Value: capability.CAP_CHOWN},
+ {Key: "NET_RAW", Value: capability.CAP_NET_RAW},
+ {Key: "DAC_OVERRIDE", Value: capability.CAP_DAC_OVERRIDE},
+ {Key: "FOWNER", Value: capability.CAP_FOWNER},
+ {Key: "DAC_READ_SEARCH", Value: capability.CAP_DAC_READ_SEARCH},
+ {Key: "FSETID", Value: capability.CAP_FSETID},
+ {Key: "KILL", Value: capability.CAP_KILL},
+ {Key: "SETGID", Value: capability.CAP_SETGID},
+ {Key: "SETUID", Value: capability.CAP_SETUID},
+ {Key: "LINUX_IMMUTABLE", Value: capability.CAP_LINUX_IMMUTABLE},
+ {Key: "NET_BIND_SERVICE", Value: capability.CAP_NET_BIND_SERVICE},
+ {Key: "NET_BROADCAST", Value: capability.CAP_NET_BROADCAST},
+ {Key: "IPC_LOCK", Value: capability.CAP_IPC_LOCK},
+ {Key: "IPC_OWNER", Value: capability.CAP_IPC_OWNER},
+ {Key: "SYS_CHROOT", Value: capability.CAP_SYS_CHROOT},
+ {Key: "SYS_PTRACE", Value: capability.CAP_SYS_PTRACE},
+ {Key: "SYS_BOOT", Value: capability.CAP_SYS_BOOT},
+ {Key: "LEASE", Value: capability.CAP_LEASE},
+ {Key: "SETFCAP", Value: capability.CAP_SETFCAP},
+ {Key: "WAKE_ALARM", Value: capability.CAP_WAKE_ALARM},
+ {Key: "BLOCK_SUSPEND", Value: capability.CAP_BLOCK_SUSPEND},
+}
diff --git a/vendor/src/github.com/docker/libcontainer/security/capabilities/types_test.go b/vendor/src/github.com/docker/libcontainer/security/capabilities/types_test.go
new file mode 100644
index 0000000000..06e8a2b01c
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/security/capabilities/types_test.go
@@ -0,0 +1,19 @@
+package capabilities
+
+import (
+ "testing"
+)
+
+func TestCapabilitiesContains(t *testing.T) {
+ caps := Capabilities{
+ GetCapability("MKNOD"),
+ GetCapability("SETPCAP"),
+ }
+
+ if caps.contains("SYS_ADMIN") {
+ t.Fatal("capabilities should not contain SYS_ADMIN")
+ }
+ if !caps.contains("MKNOD") {
+ t.Fatal("capabilities should contain MKNOD but does not")
+ }
+}
diff --git a/vendor/src/github.com/docker/libcontainer/state.go b/vendor/src/github.com/docker/libcontainer/state.go
new file mode 100644
index 0000000000..a055bb0ffe
--- /dev/null
+++ b/vendor/src/github.com/docker/libcontainer/state.go
@@ -0,0 +1,55 @@
+package libcontainer
+
+import (
+ "encoding/json"
+ "os"
+ "path/filepath"
+
+ "github.com/docker/libcontainer/network"
+)
+
+// State represents a running container's state
+type State struct {
+ // InitPid is the init process id in the parent namespace
+ InitPid int `json:"init_pid,omitempty"`
+ // InitStartTime is the init process start time
+ InitStartTime string `json:"init_start_time,omitempty"`
+ // Network runtime state.
+ NetworkState network.NetworkState `json:"network_state,omitempty"`
+}
+
+// The name of the runtime state file
+const stateFile = "state.json"
+
+// SaveState writes the container's runtime state to a state.json file
+// in the specified path
+func SaveState(basePath string, state *State) error {
+ f, err := os.Create(filepath.Join(basePath, stateFile))
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ return json.NewEncoder(f).Encode(state)
+}
+
+// GetState reads the state.json file for a running container
+func GetState(basePath string) (*State, error) {
+ f, err := os.Open(filepath.Join(basePath, stateFile))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var state *State
+ if err := json.NewDecoder(f).Decode(&state); err != nil {
+ return nil, err
+ }
+
+ return state, nil
+}
+
+// DeleteState deletes the state.json file
+func DeleteState(basePath string) error {
+ return os.Remove(filepath.Join(basePath, stateFile))
+}
diff --git a/vendor/src/github.com/docker/libcontainer/types.go b/vendor/src/github.com/docker/libcontainer/types.go
index 834201036f..5095dca66a 100644
--- a/vendor/src/github.com/docker/libcontainer/types.go
+++ b/vendor/src/github.com/docker/libcontainer/types.go
@@ -1,165 +1,11 @@
package libcontainer
import (
- "errors"
-
- "github.com/syndtr/gocapability/capability"
-)
-
-var (
- ErrUnkownNamespace = errors.New("Unknown namespace")
- ErrUnkownCapability = errors.New("Unknown capability")
- ErrUnsupported = errors.New("Unsupported method")
-)
-
-type Mounts []Mount
-
-func (s Mounts) OfType(t string) Mounts {
- out := Mounts{}
- for _, m := range s {
- if m.Type == t {
- out = append(out, m)
- }
- }
- return out
-}
-
-type Mount struct {
- Type string `json:"type,omitempty"`
- Source string `json:"source,omitempty"` // Source path, in the host namespace
- Destination string `json:"destination,omitempty"` // Destination path, in the container
- Writable bool `json:"writable,omitempty"`
- Private bool `json:"private,omitempty"`
-}
-
-// namespaceList is used to convert the libcontainer types
-// into the names of the files located in /proc/<pid>/ns/* for
-// each namespace
-var (
- namespaceList = Namespaces{}
-
- capabilityList = Capabilities{
- {Key: "SETPCAP", Value: capability.CAP_SETPCAP},
- {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE},
- {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO},
- {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT},
- {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN},
- {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE},
- {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE},
- {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME},
- {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG},
- {Key: "MKNOD", Value: capability.CAP_MKNOD},
- {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE},
- {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL},
- {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE},
- {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN},
- {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN},
- {Key: "SYSLOG", Value: capability.CAP_SYSLOG},
- {Key: "SETUID", Value: capability.CAP_SETUID},
- {Key: "SETGID", Value: capability.CAP_SETGID},
- {Key: "CHOWN", Value: capability.CAP_CHOWN},
- {Key: "NET_RAW", Value: capability.CAP_NET_RAW},
- {Key: "DAC_OVERRIDE", Value: capability.CAP_DAC_OVERRIDE},
- {Key: "FOWNER", Value: capability.CAP_FOWNER},
- {Key: "DAC_READ_SEARCH", Value: capability.CAP_DAC_READ_SEARCH},
- {Key: "FSETID", Value: capability.CAP_FSETID},
- {Key: "KILL", Value: capability.CAP_KILL},
- {Key: "SETGID", Value: capability.CAP_SETGID},
- {Key: "SETUID", Value: capability.CAP_SETUID},
- {Key: "LINUX_IMMUTABLE", Value: capability.CAP_LINUX_IMMUTABLE},
- {Key: "NET_BIND_SERVICE", Value: capability.CAP_NET_BIND_SERVICE},
- {Key: "NET_BROADCAST", Value: capability.CAP_NET_BROADCAST},
- {Key: "IPC_LOCK", Value: capability.CAP_IPC_LOCK},
- {Key: "IPC_OWNER", Value: capability.CAP_IPC_OWNER},
- {Key: "SYS_CHROOT", Value: capability.CAP_SYS_CHROOT},
- {Key: "SYS_PTRACE", Value: capability.CAP_SYS_PTRACE},
- {Key: "SYS_BOOT", Value: capability.CAP_SYS_BOOT},
- {Key: "LEASE", Value: capability.CAP_LEASE},
- {Key: "SETFCAP", Value: capability.CAP_SETFCAP},
- {Key: "WAKE_ALARM", Value: capability.CAP_WAKE_ALARM},
- {Key: "BLOCK_SUSPEND", Value: capability.CAP_BLOCK_SUSPEND},
- }
-)
-
-type (
- Namespace struct {
- Key string `json:"key,omitempty"`
- Value int `json:"value,omitempty"`
- File string `json:"file,omitempty"`
- }
- Namespaces []*Namespace
+ "github.com/docker/libcontainer/cgroups"
+ "github.com/docker/libcontainer/network"
)
-func (ns *Namespace) String() string {
- return ns.Key
-}
-
-func GetNamespace(key string) *Namespace {
- for _, ns := range namespaceList {
- if ns.Key == key {
- cpy := *ns
- return &cpy
- }
- }
- return nil
-}
-
-// Contains returns true if the specified Namespace is
-// in the slice
-func (n Namespaces) Contains(ns string) bool {
- return n.Get(ns) != nil
-}
-
-func (n Namespaces) Get(ns string) *Namespace {
- for _, nsp := range n {
- if nsp != nil && nsp.Key == ns {
- return nsp
- }
- }
- return nil
-}
-
-type (
- Capability struct {
- Key string `json:"key,omitempty"`
- Value capability.Cap `json:"value,omitempty"`
- }
- Capabilities []*Capability
-)
-
-func (c *Capability) String() string {
- return c.Key
-}
-
-func GetCapability(key string) *Capability {
- for _, capp := range capabilityList {
- if capp.Key == key {
- cpy := *capp
- return &cpy
- }
- }
- return nil
-}
-
-func GetAllCapabilities() []string {
- output := make([]string, len(capabilityList))
- for i, capability := range capabilityList {
- output[i] = capability.String()
- }
- return output
-}
-
-// Contains returns true if the specified Capability is
-// in the slice
-func (c Capabilities) Contains(capp string) bool {
- return c.Get(capp) != nil
-}
-
-func (c Capabilities) Get(capp string) *Capability {
- for _, cap := range c {
- if cap.Key == capp {
- return cap
- }
- }
- return nil
+type ContainerStats struct {
+ NetworkStats network.NetworkStats `json:"network_stats, omitempty"`
+ CgroupStats *cgroups.Stats `json:"cgroup_stats, omitempty"`
}
diff --git a/vendor/src/github.com/gorilla/context/context.go b/vendor/src/github.com/gorilla/context/context.go
index a7f7d85bb4..81cb128b19 100644
--- a/vendor/src/github.com/gorilla/context/context.go
+++ b/vendor/src/github.com/gorilla/context/context.go
@@ -30,9 +30,10 @@ func Set(r *http.Request, key, val interface{}) {
// Get returns a value stored for a given key in a given request.
func Get(r *http.Request, key interface{}) interface{} {
mutex.RLock()
- if data[r] != nil {
+ if ctx := data[r]; ctx != nil {
+ value := ctx[key]
mutex.RUnlock()
- return data[r][key]
+ return value
}
mutex.RUnlock()
return nil
@@ -54,20 +55,28 @@ func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
func GetAll(r *http.Request) map[interface{}]interface{} {
mutex.RLock()
if context, ok := data[r]; ok {
+ result := make(map[interface{}]interface{}, len(context))
+ for k, v := range context {
+ result[k] = v
+ }
mutex.RUnlock()
- return context
+ return result
}
mutex.RUnlock()
return nil
}
-// GetAllOk returns all stored values for the request as a map. It returns not
-// ok if the request was never registered.
+// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
+// the request was registered.
func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
mutex.RLock()
context, ok := data[r]
+ result := make(map[interface{}]interface{}, len(context))
+ for k, v := range context {
+ result[k] = v
+ }
mutex.RUnlock()
- return context, ok
+ return result, ok
}
// Delete removes a value stored for a given key in a given request.
diff --git a/vendor/src/github.com/tchap/go-patricia/.gitignore b/vendor/src/github.com/tchap/go-patricia/.gitignore
new file mode 100644
index 0000000000..b3971c067a
--- /dev/null
+++ b/vendor/src/github.com/tchap/go-patricia/.gitignore
@@ -0,0 +1,25 @@
+# Swap files.
+*.swp
+
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/src/github.com/tchap/go-patricia/AUTHORS b/vendor/src/github.com/tchap/go-patricia/AUTHORS
new file mode 100644
index 0000000000..e640b0bf51
--- /dev/null
+++ b/vendor/src/github.com/tchap/go-patricia/AUTHORS
@@ -0,0 +1,3 @@
+This is the complete list of go-patricia copyright holders:
+
+Ondřej Kupka <ondra.cap@gmail.com>
diff --git a/vendor/src/github.com/tchap/go-patricia/LICENSE b/vendor/src/github.com/tchap/go-patricia/LICENSE
new file mode 100644
index 0000000000..e50d398e98
--- /dev/null
+++ b/vendor/src/github.com/tchap/go-patricia/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 The AUTHORS
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/src/github.com/tchap/go-patricia/README.md b/vendor/src/github.com/tchap/go-patricia/README.md
new file mode 100644
index 0000000000..11ee4612d3
--- /dev/null
+++ b/vendor/src/github.com/tchap/go-patricia/README.md
@@ -0,0 +1,120 @@
+# go-patricia #
+
+**Documentation**: [GoDoc](http://godoc.org/github.com/tchap/go-patricia/patricia)<br />
+**Build Status**: [![Build
+Status](https://drone.io/github.com/tchap/go-patricia/status.png)](https://drone.io/github.com/tchap/go-patricia/latest)<br />
+**Test Coverage**: [![Coverage
+Status](https://coveralls.io/repos/tchap/go-patricia/badge.png)](https://coveralls.io/r/tchap/go-patricia)
+
+## About ##
+
+A generic patricia trie (also called radix tree) implemented in Go (Golang).
+
+The patricia trie as implemented in this library enables fast visiting of items
+in some particular ways:
+
+1. visit all items saved in the tree,
+2. visit all items matching particular prefix (visit subtree), or
+3. given a string, visit all items matching some prefix of that string.
+
+`[]byte` type is used for keys, `interface{}` for values.
+
+`Trie` is not thread safe. Synchronize the access yourself.
+
+### State of the Project ###
+
+Apparently some people are using this, so the API should not change often.
+Any ideas on how to make the library better are still welcome.
+
+More (unit) testing would be cool as well...
+
+## Usage ##
+
+Import the package from GitHub first.
+
+```go
+import "github.com/tchap/go-patricia/patricia"
+```
+
+You can as well use gopkg.in thingie:
+
+```go
+import "gopkg.in/tchap/go-patricia.v1/patricia"
+```
+
+Then you can start having fun.
+
+```go
+printItem := func(prefix patricia.Prefix, item patricia.Item) error {
+ fmt.Printf("%q: %v\n", prefix, item)
+ return nil
+}
+
+// Create a new tree.
+trie := NewTrie()
+
+// Insert some items.
+trie.Insert(Prefix("Pepa Novak"), 1)
+trie.Insert(Prefix("Pepa Sindelar"), 2)
+trie.Insert(Prefix("Karel Macha"), 3)
+trie.Insert(Prefix("Karel Hynek Macha"), 4)
+
+// Just check if some things are present in the tree.
+key := Prefix("Pepa Novak")
+fmt.Printf("%q present? %v\n", key, trie.Match(key))
+// "Pepa Novak" present? true
+key = Prefix("Karel")
+fmt.Printf("Anybody called %q here? %v\n", key, trie.MatchSubtree(key))
+// Anybody called "Karel" here? true
+
+// Walk the tree.
+trie.Visit(printItem)
+// "Pepa Novak": 1
+// "Pepa Sindelar": 2
+// "Karel Macha": 3
+// "Karel Hynek Macha": 4
+
+// Walk a subtree.
+trie.VisitSubtree(Prefix("Pepa"), printItem)
+// "Pepa Novak": 1
+// "Pepa Sindelar": 2
+
+// Modify an item, then fetch it from the tree.
+trie.Set(Prefix("Karel Hynek Macha"), 10)
+key = Prefix("Karel Hynek Macha")
+fmt.Printf("%q: %v\n", key, trie.Get(key))
+// "Karel Hynek Macha": 10
+
+// Walk prefixes.
+prefix := Prefix("Karel Hynek Macha je kouzelnik")
+trie.VisitPrefixes(prefix, printItem)
+// "Karel Hynek Macha": 10
+
+// Delete some items.
+trie.Delete(Prefix("Pepa Novak"))
+trie.Delete(Prefix("Karel Macha"))
+
+// Walk again.
+trie.Visit(printItem)
+// "Pepa Sindelar": 2
+// "Karel Hynek Macha": 10
+
+// Delete a subtree.
+trie.DeleteSubtree(Prefix("Pepa"))
+
+// Print what is left.
+trie.Visit(printItem)
+// "Karel Hynek Macha": 10
+```
+
+## License ##
+
+MIT, check the `LICENSE` file.
+
+[![Gittip
+Badge](http://img.shields.io/gittip/alanhamlett.png)](https://www.gittip.com/tchap/
+"Gittip Badge")
+
+[![Bitdeli
+Badge](https://d2weczhvl823v0.cloudfront.net/tchap/go-patricia/trend.png)](https://bitdeli.com/free
+"Bitdeli Badge")
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/children.go b/vendor/src/github.com/tchap/go-patricia/patricia/children.go
new file mode 100644
index 0000000000..07d3326335
--- /dev/null
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/children.go
@@ -0,0 +1,230 @@
+// Copyright (c) 2014 The go-patricia AUTHORS
+//
+// Use of this source code is governed by The MIT License
+// that can be found in the LICENSE file.
+
+package patricia
+
+// Max prefix length that is kept in a single trie node.
+var MaxPrefixPerNode = 10
+
+// Max children to keep in a node in the sparse mode.
+const MaxChildrenPerSparseNode = 8
+
+type childList interface {
+ length() int
+ head() *Trie
+ add(child *Trie) childList
+ replace(b byte, child *Trie)
+ remove(child *Trie)
+ next(b byte) *Trie
+ walk(prefix *Prefix, visitor VisitorFunc) error
+}
+
+type sparseChildList struct {
+ children []*Trie
+}
+
+func newSparseChildList() childList {
+ return &sparseChildList{
+ children: make([]*Trie, 0, MaxChildrenPerSparseNode),
+ }
+}
+
+func (list *sparseChildList) length() int {
+ return len(list.children)
+}
+
+func (list *sparseChildList) head() *Trie {
+ return list.children[0]
+}
+
+func (list *sparseChildList) add(child *Trie) childList {
+ // Search for an empty spot and insert the child if possible.
+ if len(list.children) != cap(list.children) {
+ list.children = append(list.children, child)
+ return list
+ }
+
+ // Otherwise we have to transform to the dense list type.
+ return newDenseChildList(list, child)
+}
+
+func (list *sparseChildList) replace(b byte, child *Trie) {
+ // Seek the child and replace it.
+ for i, node := range list.children {
+ if node.prefix[0] == b {
+ list.children[i] = child
+ return
+ }
+ }
+}
+
+func (list *sparseChildList) remove(child *Trie) {
+ for i, node := range list.children {
+ if node.prefix[0] == child.prefix[0] {
+ list.children = append(list.children[:i], list.children[i+1:]...)
+ return
+ }
+ }
+
+ // This is not supposed to be reached.
+ panic("removing non-existent child")
+}
+
+func (list *sparseChildList) next(b byte) *Trie {
+ for _, child := range list.children {
+ if child.prefix[0] == b {
+ return child
+ }
+ }
+ return nil
+}
+
+func (list *sparseChildList) walk(prefix *Prefix, visitor VisitorFunc) error {
+ for _, child := range list.children {
+ *prefix = append(*prefix, child.prefix...)
+ if child.item != nil {
+ err := visitor(*prefix, child.item)
+ if err != nil {
+ if err == SkipSubtree {
+ *prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
+ continue
+ }
+ *prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
+ return err
+ }
+ }
+
+ err := child.children.walk(prefix, visitor)
+ *prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type denseChildList struct {
+ min int
+ max int
+ children []*Trie
+}
+
+func newDenseChildList(list *sparseChildList, child *Trie) childList {
+ var (
+ min int = 255
+ max int = 0
+ )
+ for _, child := range list.children {
+ b := int(child.prefix[0])
+ if b < min {
+ min = b
+ }
+ if b > max {
+ max = b
+ }
+ }
+
+ b := int(child.prefix[0])
+ if b < min {
+ min = b
+ }
+ if b > max {
+ max = b
+ }
+
+ children := make([]*Trie, max-min+1)
+ for _, child := range list.children {
+ children[int(child.prefix[0])-min] = child
+ }
+ children[int(child.prefix[0])-min] = child
+
+ return &denseChildList{min, max, children}
+}
+
+func (list *denseChildList) length() int {
+ return list.max - list.min + 1
+}
+
+func (list *denseChildList) head() *Trie {
+ return list.children[0]
+}
+
+func (list *denseChildList) add(child *Trie) childList {
+ b := int(child.prefix[0])
+
+ switch {
+ case list.min <= b && b <= list.max:
+ if list.children[b-list.min] != nil {
+ panic("dense child list collision detected")
+ }
+ list.children[b-list.min] = child
+
+ case b < list.min:
+ children := make([]*Trie, list.max-b+1)
+ children[0] = child
+ copy(children[list.min-b:], list.children)
+ list.children = children
+ list.min = b
+
+ default: // b > list.max
+ children := make([]*Trie, b-list.min+1)
+ children[b-list.min] = child
+ copy(children, list.children)
+ list.children = children
+ list.max = b
+ }
+
+ return list
+}
+
+func (list *denseChildList) replace(b byte, child *Trie) {
+ list.children[int(b)-list.min] = nil
+ list.children[int(child.prefix[0])-list.min] = child
+}
+
+func (list *denseChildList) remove(child *Trie) {
+ i := int(child.prefix[0]) - list.min
+ if list.children[i] == nil {
+ // This is not supposed to be reached.
+ panic("removing non-existent child")
+ }
+ list.children[i] = nil
+}
+
+func (list *denseChildList) next(b byte) *Trie {
+ i := int(b)
+ if i < list.min || list.max < i {
+ return nil
+ }
+ return list.children[i-list.min]
+}
+
+func (list *denseChildList) walk(prefix *Prefix, visitor VisitorFunc) error {
+ for _, child := range list.children {
+ if child == nil {
+ continue
+ }
+ *prefix = append(*prefix, child.prefix...)
+ if child.item != nil {
+ if err := visitor(*prefix, child.item); err != nil {
+ if err == SkipSubtree {
+ *prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
+ continue
+ }
+ *prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
+ return err
+ }
+ }
+
+ err := child.children.walk(prefix, visitor)
+ *prefix = (*prefix)[:len(*prefix)-len(child.prefix)]
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/patricia.go b/vendor/src/github.com/tchap/go-patricia/patricia/patricia.go
new file mode 100644
index 0000000000..8fcbcdf426
--- /dev/null
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/patricia.go
@@ -0,0 +1,432 @@
+// Copyright (c) 2014 The go-patricia AUTHORS
+//
+// Use of this source code is governed by The MIT License
+// that can be found in the LICENSE file.
+
+package patricia
+
+import (
+ "errors"
+)
+
+//------------------------------------------------------------------------------
+// Trie
+//------------------------------------------------------------------------------
+
+type (
+ Prefix []byte
+ Item interface{}
+ VisitorFunc func(prefix Prefix, item Item) error
+)
+
+// Trie is a generic patricia trie that allows fast retrieval of items by prefix.
+// and other funky stuff.
+//
+// Trie is not thread-safe.
+type Trie struct {
+ prefix Prefix
+ item Item
+
+ children childList
+}
+
+// Public API ------------------------------------------------------------------
+
+// Trie constructor.
+func NewTrie() *Trie {
+ return &Trie{
+ children: newSparseChildList(),
+ }
+}
+
+// Item returns the item stored in the root of this trie.
+func (trie *Trie) Item() Item {
+ return trie.item
+}
+
+// Insert inserts a new item into the trie using the given prefix. Insert does
+// not replace existing items. It returns false if an item was already in place.
+func (trie *Trie) Insert(key Prefix, item Item) (inserted bool) {
+ return trie.put(key, item, false)
+}
+
+// Set works much like Insert, but it always sets the item, possibly replacing
+// the item previously inserted.
+func (trie *Trie) Set(key Prefix, item Item) {
+ trie.put(key, item, true)
+}
+
+// Get returns the item located at key.
+//
+// This method is a bit dangerous, because Get can as well end up in an internal
+// node that is not really representing any user-defined value. So when nil is
+// a valid value being used, it is not possible to tell if the value was inserted
+// into the tree by the user or not. A possible workaround for this is not to use
+// nil interface as a valid value, even using zero value of any type is enough
+// to prevent this bad behaviour.
+func (trie *Trie) Get(key Prefix) (item Item) {
+ _, node, found, leftover := trie.findSubtree(key)
+ if !found || len(leftover) != 0 {
+ return nil
+ }
+ return node.item
+}
+
+// Match returns what Get(prefix) != nil would return. The same warning as for
+// Get applies here as well.
+func (trie *Trie) Match(prefix Prefix) (matchedExactly bool) {
+ return trie.Get(prefix) != nil
+}
+
+// MatchSubtree returns true when there is a subtree representing extensions
+// to key, that is if there are any keys in the tree which have key as prefix.
+func (trie *Trie) MatchSubtree(key Prefix) (matched bool) {
+ _, _, matched, _ = trie.findSubtree(key)
+ return
+}
+
+// Visit calls visitor on every node containing a non-nil item.
+//
+// If an error is returned from visitor, the function stops visiting the tree
+// and returns that error, unless it is a special error - SkipSubtree. In that
+// case Visit skips the subtree represented by the current node and continues
+// elsewhere.
+func (trie *Trie) Visit(visitor VisitorFunc) error {
+ return trie.walk(nil, visitor)
+}
+
+// VisitSubtree works much like Visit, but it only visits nodes matching prefix.
+func (trie *Trie) VisitSubtree(prefix Prefix, visitor VisitorFunc) error {
+ // Nil prefix not allowed.
+ if prefix == nil {
+ panic(ErrNilPrefix)
+ }
+
+ // Empty trie must be handled explicitly.
+ if trie.prefix == nil {
+ return nil
+ }
+
+ // Locate the relevant subtree.
+ _, root, found, leftover := trie.findSubtree(prefix)
+ if !found {
+ return nil
+ }
+ prefix = append(prefix, leftover...)
+
+ // Visit it.
+ return root.walk(prefix, visitor)
+}
+
+// VisitPrefixes visits only nodes that represent prefixes of key.
+// To say the obvious, returning SkipSubtree from visitor makes no sense here.
+func (trie *Trie) VisitPrefixes(key Prefix, visitor VisitorFunc) error {
+ // Nil key not allowed.
+ if key == nil {
+ panic(ErrNilPrefix)
+ }
+
+ // Empty trie must be handled explicitly.
+ if trie.prefix == nil {
+ return nil
+ }
+
+ // Walk the path matching key prefixes.
+ node := trie
+ prefix := key
+ offset := 0
+ for {
+ // Compute what part of prefix matches.
+ common := node.longestCommonPrefixLength(key)
+ key = key[common:]
+ offset += common
+
+ // Partial match means that there is no subtree matching prefix.
+ if common < len(node.prefix) {
+ return nil
+ }
+
+ // Call the visitor.
+ if item := node.item; item != nil {
+ if err := visitor(prefix[:offset], item); err != nil {
+ return err
+ }
+ }
+
+ if len(key) == 0 {
+ // This node represents key, we are finished.
+ return nil
+ }
+
+ // There is some key suffix left, move to the children.
+ child := node.children.next(key[0])
+ if child == nil {
+ // There is nowhere to continue, return.
+ return nil
+ }
+
+ node = child
+ }
+}
+
+// Delete deletes the item represented by the given prefix.
+//
+// True is returned if the matching node was found and deleted.
+func (trie *Trie) Delete(key Prefix) (deleted bool) {
+ // Nil prefix not allowed.
+ if key == nil {
+ panic(ErrNilPrefix)
+ }
+
+ // Empty trie must be handled explicitly.
+ if trie.prefix == nil {
+ return false
+ }
+
+ // Find the relevant node.
+ parent, node, _, leftover := trie.findSubtree(key)
+ if len(leftover) != 0 {
+ return false
+ }
+
+ // If the item is already set to nil, there is nothing to do.
+ if node.item == nil {
+ return false
+ }
+
+ // Delete the item.
+ node.item = nil
+
+ // Compact since that might be possible now.
+ if compacted := node.compact(); compacted != node {
+ if parent == nil {
+ *node = *compacted
+ } else {
+ parent.children.replace(node.prefix[0], compacted)
+ *parent = *parent.compact()
+ }
+ }
+
+ return true
+}
+
+// DeleteSubtree finds the subtree exactly matching prefix and deletes it.
+//
+// True is returned if the subtree was found and deleted.
+func (trie *Trie) DeleteSubtree(prefix Prefix) (deleted bool) {
+ // Nil prefix not allowed.
+ if prefix == nil {
+ panic(ErrNilPrefix)
+ }
+
+ // Empty trie must be handled explicitly.
+ if trie.prefix == nil {
+ return false
+ }
+
+ // Locate the relevant subtree.
+ parent, root, found, _ := trie.findSubtree(prefix)
+ if !found {
+ return false
+ }
+
+ // If we are in the root of the trie, reset the trie.
+ if parent == nil {
+ root.prefix = nil
+ root.children = newSparseChildList()
+ return true
+ }
+
+ // Otherwise remove the root node from its parent.
+ parent.children.remove(root)
+ return true
+}
+
+// Internal helper methods -----------------------------------------------------
+
+func (trie *Trie) put(key Prefix, item Item, replace bool) (inserted bool) {
+ // Nil prefix not allowed.
+ if key == nil {
+ panic(ErrNilPrefix)
+ }
+
+ var (
+ common int
+ node *Trie = trie
+ child *Trie
+ )
+
+ if node.prefix == nil {
+ if len(key) <= MaxPrefixPerNode {
+ node.prefix = key
+ goto InsertItem
+ }
+ node.prefix = key[:MaxPrefixPerNode]
+ key = key[MaxPrefixPerNode:]
+ goto AppendChild
+ }
+
+ for {
+ // Compute the longest common prefix length.
+ common = node.longestCommonPrefixLength(key)
+ key = key[common:]
+
+ // Only a part matches, split.
+ if common < len(node.prefix) {
+ goto SplitPrefix
+ }
+
+ // common == len(node.prefix) since never (common > len(node.prefix))
+ // common == len(former key) <-> 0 == len(key)
+ // -> former key == node.prefix
+ if len(key) == 0 {
+ goto InsertItem
+ }
+
+ // Check children for matching prefix.
+ child = node.children.next(key[0])
+ if child == nil {
+ goto AppendChild
+ }
+ node = child
+ }
+
+SplitPrefix:
+ // Split the prefix if necessary.
+ child = new(Trie)
+ *child = *node
+ *node = *NewTrie()
+ node.prefix = child.prefix[:common]
+ child.prefix = child.prefix[common:]
+ child = child.compact()
+ node.children = node.children.add(child)
+
+AppendChild:
+ // Keep appending children until whole prefix is inserted.
+ // This loop starts with empty node.prefix that needs to be filled.
+ for len(key) != 0 {
+ child := NewTrie()
+ if len(key) <= MaxPrefixPerNode {
+ child.prefix = key
+ node.children = node.children.add(child)
+ node = child
+ goto InsertItem
+ } else {
+ child.prefix = key[:MaxPrefixPerNode]
+ key = key[MaxPrefixPerNode:]
+ node.children = node.children.add(child)
+ node = child
+ }
+ }
+
+InsertItem:
+ // Try to insert the item if possible.
+ if replace || node.item == nil {
+ node.item = item
+ return true
+ }
+ return false
+}
+
+func (trie *Trie) compact() *Trie {
+ // Only a node with a single child can be compacted.
+ if trie.children.length() != 1 {
+ return trie
+ }
+
+ child := trie.children.head()
+
+ // If any item is set, we cannot compact since we want to retain
+ // the ability to do searching by key. This makes compaction less usable,
+ // but that simply cannot be avoided.
+ if trie.item != nil || child.item != nil {
+ return trie
+ }
+
+ // Make sure the combined prefixes fit into a single node.
+ if len(trie.prefix)+len(child.prefix) > MaxPrefixPerNode {
+ return trie
+ }
+
+ // Concatenate the prefixes, move the items.
+ child.prefix = append(trie.prefix, child.prefix...)
+ if trie.item != nil {
+ child.item = trie.item
+ }
+
+ return child
+}
+
+func (trie *Trie) findSubtree(prefix Prefix) (parent *Trie, root *Trie, found bool, leftover Prefix) {
+ // Find the subtree matching prefix.
+ root = trie
+ for {
+ // Compute what part of prefix matches.
+ common := root.longestCommonPrefixLength(prefix)
+ prefix = prefix[common:]
+
+ // We used up the whole prefix, subtree found.
+ if len(prefix) == 0 {
+ found = true
+ leftover = root.prefix[common:]
+ return
+ }
+
+ // Partial match means that there is no subtree matching prefix.
+ if common < len(root.prefix) {
+ leftover = root.prefix[common:]
+ return
+ }
+
+ // There is some prefix left, move to the children.
+ child := root.children.next(prefix[0])
+ if child == nil {
+ // There is nowhere to continue, there is no subtree matching prefix.
+ return
+ }
+
+ parent = root
+ root = child
+ }
+}
+
+func (trie *Trie) walk(actualRootPrefix Prefix, visitor VisitorFunc) error {
+ var prefix Prefix
+ // Allocate a bit more space for prefix at the beginning.
+ if actualRootPrefix == nil {
+ prefix = make(Prefix, 32+len(trie.prefix))
+ copy(prefix, trie.prefix)
+ prefix = prefix[:len(trie.prefix)]
+ } else {
+ prefix = make(Prefix, 32+len(actualRootPrefix))
+ copy(prefix, actualRootPrefix)
+ prefix = prefix[:len(actualRootPrefix)]
+ }
+
+ // Visit the root first. Not that this works for empty trie as well since
+ // in that case item == nil && len(children) == 0.
+ if trie.item != nil {
+ if err := visitor(prefix, trie.item); err != nil {
+ if err == SkipSubtree {
+ return nil
+ }
+ return err
+ }
+ }
+
+ // Then continue to the children.
+ return trie.children.walk(&prefix, visitor)
+}
+
+func (trie *Trie) longestCommonPrefixLength(prefix Prefix) (i int) {
+ for ; i < len(prefix) && i < len(trie.prefix) && prefix[i] == trie.prefix[i]; i++ {
+ }
+ return
+}
+
+// Errors ----------------------------------------------------------------------
+
+var (
+ SkipSubtree = errors.New("Skip this subtree")
+ ErrNilPrefix = errors.New("Nil prefix passed into a method call")
+)
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go
new file mode 100644
index 0000000000..346e9a66cb
--- /dev/null
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_dense_test.go
@@ -0,0 +1,161 @@
+// Copyright (c) 2014 The go-patricia AUTHORS
+//
+// Use of this source code is governed by The MIT License
+// that can be found in the LICENSE file.
+
+package patricia
+
+import (
+ "testing"
+)
+
+// Tests -----------------------------------------------------------------------
+
+func TestTrie_InsertDense(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"aba", 0, success},
+ {"abb", 1, success},
+ {"abc", 2, success},
+ {"abd", 3, success},
+ {"abe", 4, success},
+ {"abf", 5, success},
+ {"abg", 6, success},
+ {"abh", 7, success},
+ {"abi", 8, success},
+ {"abj", 9, success},
+ {"abk", 0, success},
+ {"abl", 1, success},
+ {"abm", 2, success},
+ {"abn", 3, success},
+ {"abo", 4, success},
+ {"abp", 5, success},
+ {"abq", 6, success},
+ {"abr", 7, success},
+ {"abs", 8, success},
+ {"abt", 9, success},
+ {"abu", 0, success},
+ {"abv", 1, success},
+ {"abw", 2, success},
+ {"abx", 3, success},
+ {"aby", 4, success},
+ {"abz", 5, success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+}
+
+func TestTrie_InsertDensePreceeding(t *testing.T) {
+ trie := NewTrie()
+ start := byte(70)
+ // create a dense node
+ for i := byte(0); i <= MaxChildrenPerSparseNode; i++ {
+ if !trie.Insert(Prefix([]byte{start + i}), true) {
+ t.Errorf("insert failed, prefix=%v", start+i)
+ }
+ }
+ // insert some preceeding keys
+ for i := byte(1); i < start; i *= i + 1 {
+ if !trie.Insert(Prefix([]byte{start - i}), true) {
+ t.Errorf("insert failed, prefix=%v", start-i)
+ }
+ }
+}
+
+func TestTrie_InsertDenseDuplicatePrefixes(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"aba", 0, success},
+ {"abb", 1, success},
+ {"abc", 2, success},
+ {"abd", 3, success},
+ {"abe", 4, success},
+ {"abf", 5, success},
+ {"abg", 6, success},
+ {"abh", 7, success},
+ {"abi", 8, success},
+ {"abj", 9, success},
+ {"abk", 0, success},
+ {"abl", 1, success},
+ {"abm", 2, success},
+ {"abn", 3, success},
+ {"abo", 4, success},
+ {"abp", 5, success},
+ {"abq", 6, success},
+ {"abr", 7, success},
+ {"abs", 8, success},
+ {"abt", 9, success},
+ {"abu", 0, success},
+ {"abv", 1, success},
+ {"abw", 2, success},
+ {"abx", 3, success},
+ {"aby", 4, success},
+ {"abz", 5, success},
+ {"aba", 0, failure},
+ {"abb", 1, failure},
+ {"abc", 2, failure},
+ {"abd", 3, failure},
+ {"abe", 4, failure},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+}
+
+func TestTrie_DeleteDense(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"aba", 0, success},
+ {"abb", 1, success},
+ {"abc", 2, success},
+ {"abd", 3, success},
+ {"abe", 4, success},
+ {"abf", 5, success},
+ {"abg", 6, success},
+ {"abh", 7, success},
+ {"abi", 8, success},
+ {"abj", 9, success},
+ {"abk", 0, success},
+ {"abl", 1, success},
+ {"abm", 2, success},
+ {"abn", 3, success},
+ {"abo", 4, success},
+ {"abp", 5, success},
+ {"abq", 6, success},
+ {"abr", 7, success},
+ {"abs", 8, success},
+ {"abt", 9, success},
+ {"abu", 0, success},
+ {"abv", 1, success},
+ {"abw", 2, success},
+ {"abx", 3, success},
+ {"aby", 4, success},
+ {"abz", 5, success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+
+ for _, v := range data {
+ t.Logf("DELETE word=%v, success=%v", v.key, v.retVal)
+ if ok := trie.Delete([]byte(v.key)); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+}
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go
new file mode 100644
index 0000000000..27f3c878b5
--- /dev/null
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_sparse_test.go
@@ -0,0 +1,659 @@
+// Copyright (c) 2014 The go-patricia AUTHORS
+//
+// Use of this source code is governed by The MIT License
+// that can be found in the LICENSE file.
+
+package patricia
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+const (
+ success = true
+ failure = false
+)
+
+type testData struct {
+ key string
+ value interface{}
+ retVal bool
+}
+
+// Tests -----------------------------------------------------------------------
+
+func TestTrie_InsertDifferentPrefixes(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"Pepaneeeeeeeeeeeeee", "Pepan Zdepan", success},
+ {"Honzooooooooooooooo", "Honza Novak", success},
+ {"Jenikuuuuuuuuuuuuuu", "Jenik Poustevnicek", success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+}
+
+func TestTrie_InsertDuplicatePrefixes(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"Pepan", "Pepan Zdepan", success},
+ {"Pepan", "Pepan Zdepan", failure},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+}
+
+func TestTrie_InsertVariousPrefixes(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"Pepan", "Pepan Zdepan", success},
+ {"Pepin", "Pepin Omacka", success},
+ {"Honza", "Honza Novak", success},
+ {"Jenik", "Jenik Poustevnicek", success},
+ {"Pepan", "Pepan Dupan", failure},
+ {"Karel", "Karel Pekar", success},
+ {"Jenik", "Jenik Poustevnicek", failure},
+ {"Pepanek", "Pepanek Zemlicka", success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+}
+
+func TestTrie_InsertAndMatchPrefix(t *testing.T) {
+ trie := NewTrie()
+ t.Log("INSERT prefix=by week")
+ trie.Insert(Prefix("by week"), 2)
+ t.Log("INSERT prefix=by")
+ trie.Insert(Prefix("by"), 1)
+
+ if !trie.Match(Prefix("by")) {
+ t.Error("MATCH prefix=by, expected=true, got=false")
+ }
+}
+
+func TestTrie_SetGet(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"Pepan", "Pepan Zdepan", success},
+ {"Pepin", "Pepin Omacka", success},
+ {"Honza", "Honza Novak", success},
+ {"Jenik", "Jenik Poustevnicek", success},
+ {"Pepan", "Pepan Dupan", failure},
+ {"Karel", "Karel Pekar", success},
+ {"Jenik", "Jenik Poustevnicek", failure},
+ {"Pepanek", "Pepanek Zemlicka", success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+
+ for _, v := range data {
+ t.Logf("SET %q to 10", v.key)
+ trie.Set(Prefix(v.key), 10)
+ }
+
+ for _, v := range data {
+ value := trie.Get(Prefix(v.key))
+ t.Logf("GET %q => %v", v.key, value)
+ if value.(int) != 10 {
+ t.Errorf("Unexpected return value, != 10", value)
+ }
+ }
+
+ if value := trie.Get(Prefix("random crap")); value != nil {
+ t.Errorf("Unexpected return value, %v != <nil>", value)
+ }
+}
+
+func TestTrie_Match(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"Pepan", "Pepan Zdepan", success},
+ {"Pepin", "Pepin Omacka", success},
+ {"Honza", "Honza Novak", success},
+ {"Jenik", "Jenik Poustevnicek", success},
+ {"Pepan", "Pepan Dupan", failure},
+ {"Karel", "Karel Pekar", success},
+ {"Jenik", "Jenik Poustevnicek", failure},
+ {"Pepanek", "Pepanek Zemlicka", success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+
+ for _, v := range data {
+ matched := trie.Match(Prefix(v.key))
+ t.Logf("MATCH %q => %v", v.key, matched)
+ if !matched {
+ t.Errorf("Inserted key %q was not matched", v.key)
+ }
+ }
+
+ if trie.Match(Prefix("random crap")) {
+ t.Errorf("Key that was not inserted matched: %q", "random crap")
+ }
+}
+
+func TestTrie_MatchFalsePositive(t *testing.T) {
+ trie := NewTrie()
+
+ if ok := trie.Insert(Prefix("A"), 1); !ok {
+ t.Fatal("INSERT prefix=A, item=1 not ok")
+ }
+
+ resultMatchSubtree := trie.MatchSubtree(Prefix("A extra"))
+ resultMatch := trie.Match(Prefix("A extra"))
+
+ if resultMatchSubtree != false {
+ t.Error("MatchSubtree returned false positive")
+ }
+
+ if resultMatch != false {
+ t.Error("Match returned false positive")
+ }
+}
+
+func TestTrie_MatchSubtree(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"Pepan", "Pepan Zdepan", success},
+ {"Pepin", "Pepin Omacka", success},
+ {"Honza", "Honza Novak", success},
+ {"Jenik", "Jenik Poustevnicek", success},
+ {"Pepan", "Pepan Dupan", failure},
+ {"Karel", "Karel Pekar", success},
+ {"Jenik", "Jenik Poustevnicek", failure},
+ {"Pepanek", "Pepanek Zemlicka", success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+
+ for _, v := range data {
+ key := Prefix(v.key[:3])
+ matched := trie.MatchSubtree(key)
+ t.Logf("MATCH_SUBTREE %q => %v", key, matched)
+ if !matched {
+ t.Errorf("Subtree %q was not matched", v.key)
+ }
+ }
+}
+
+func TestTrie_Visit(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"Pepa", 0, success},
+ {"Pepa Zdepa", 1, success},
+ {"Pepa Kuchar", 2, success},
+ {"Honza", 3, success},
+ {"Jenik", 4, success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal {
+ t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+
+ if err := trie.Visit(func(prefix Prefix, item Item) error {
+ name := data[item.(int)].key
+ t.Logf("VISITING prefix=%q, item=%v", prefix, item)
+ if !strings.HasPrefix(string(prefix), name) {
+ t.Errorf("Unexpected prefix encountered, %q not a prefix of %q", prefix, name)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestTrie_VisitSkipSubtree(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"Pepa", 0, success},
+ {"Pepa Zdepa", 1, success},
+ {"Pepa Kuchar", 2, success},
+ {"Honza", 3, success},
+ {"Jenik", 4, success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal {
+ t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+
+ if err := trie.Visit(func(prefix Prefix, item Item) error {
+ t.Logf("VISITING prefix=%q, item=%v", prefix, item)
+ if item.(int) == 0 {
+ t.Logf("SKIP %q", prefix)
+ return SkipSubtree
+ }
+ if strings.HasPrefix(string(prefix), "Pepa") {
+ t.Errorf("Unexpected prefix encountered, %q", prefix)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestTrie_VisitReturnError(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"Pepa", 0, success},
+ {"Pepa Zdepa", 1, success},
+ {"Pepa Kuchar", 2, success},
+ {"Honza", 3, success},
+ {"Jenik", 4, success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal {
+ t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+
+ someErr := errors.New("Something exploded")
+ if err := trie.Visit(func(prefix Prefix, item Item) error {
+ t.Logf("VISITING prefix=%q, item=%v", prefix, item)
+ if item.(int) == 0 {
+ return someErr
+ }
+ if item.(int) != 0 {
+ t.Errorf("Unexpected prefix encountered, %q", prefix)
+ }
+ return nil
+ }); err != nil && err != someErr {
+ t.Fatal(err)
+ }
+}
+
+func TestTrie_VisitSubtree(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"Pepa", 0, success},
+ {"Pepa Zdepa", 1, success},
+ {"Pepa Kuchar", 2, success},
+ {"Honza", 3, success},
+ {"Jenik", 4, success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal {
+ t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+
+ var counter int
+ subtreePrefix := []byte("Pep")
+ t.Log("VISIT Pep")
+ if err := trie.VisitSubtree(subtreePrefix, func(prefix Prefix, item Item) error {
+ t.Logf("VISITING prefix=%q, item=%v", prefix, item)
+ if !bytes.HasPrefix(prefix, subtreePrefix) {
+ t.Errorf("Unexpected prefix encountered, %q does not extend %q",
+ prefix, subtreePrefix)
+ }
+ if len(prefix) > len(data[item.(int)].key) {
+ t.Fatalf("Something is rather fishy here, prefix=%q", prefix)
+ }
+ counter++
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if counter != 3 {
+ t.Error("Unexpected number of nodes visited")
+ }
+}
+
+func TestTrie_VisitPrefixes(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"P", 0, success},
+ {"Pe", 1, success},
+ {"Pep", 2, success},
+ {"Pepa", 3, success},
+ {"Pepa Zdepa", 4, success},
+ {"Pepa Kuchar", 5, success},
+ {"Honza", 6, success},
+ {"Jenik", 7, success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal {
+ t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+
+ var counter int
+ word := []byte("Pepa")
+ if err := trie.VisitPrefixes(word, func(prefix Prefix, item Item) error {
+ t.Logf("VISITING prefix=%q, item=%v", prefix, item)
+ if !bytes.HasPrefix(word, prefix) {
+ t.Errorf("Unexpected prefix encountered, %q is not a prefix of %q",
+ prefix, word)
+ }
+ counter++
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if counter != 4 {
+ t.Error("Unexpected number of nodes visited")
+ }
+}
+
+func TestParticiaTrie_Delete(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"Pepan", "Pepan Zdepan", success},
+ {"Honza", "Honza Novak", success},
+ {"Jenik", "Jenik Poustevnicek", success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal {
+ t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+
+ for _, v := range data {
+ t.Logf("DELETE word=%v, success=%v", v.key, v.retVal)
+ if ok := trie.Delete([]byte(v.key)); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+}
+
+func TestParticiaTrie_DeleteNonExistent(t *testing.T) {
+ trie := NewTrie()
+
+ insertData := []testData{
+ {"Pepan", "Pepan Zdepan", success},
+ {"Honza", "Honza Novak", success},
+ {"Jenik", "Jenik Poustevnicek", success},
+ }
+ deleteData := []testData{
+ {"Pepan", "Pepan Zdepan", success},
+ {"Honza", "Honza Novak", success},
+ {"Pepan", "Pepan Zdepan", failure},
+ {"Jenik", "Jenik Poustevnicek", success},
+ {"Honza", "Honza Novak", failure},
+ }
+
+ for _, v := range insertData {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal {
+ t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+
+ for _, v := range deleteData {
+ t.Logf("DELETE word=%v, success=%v", v.key, v.retVal)
+ if ok := trie.Delete([]byte(v.key)); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+}
+
+func TestParticiaTrie_DeleteSubtree(t *testing.T) {
+ trie := NewTrie()
+
+ insertData := []testData{
+ {"P", 0, success},
+ {"Pe", 1, success},
+ {"Pep", 2, success},
+ {"Pepa", 3, success},
+ {"Pepa Zdepa", 4, success},
+ {"Pepa Kuchar", 5, success},
+ {"Honza", 6, success},
+ {"Jenik", 7, success},
+ }
+ deleteData := []testData{
+ {"Pe", -1, success},
+ {"Pe", -1, failure},
+ {"Honzik", -1, failure},
+ {"Honza", -1, success},
+ {"Honza", -1, failure},
+ {"Pep", -1, failure},
+ {"P", -1, success},
+ {"Nobody", -1, failure},
+ {"", -1, success},
+ }
+
+ for _, v := range insertData {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal {
+ t.Fatalf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+
+ for _, v := range deleteData {
+ t.Logf("DELETE_SUBTREE prefix=%v, success=%v", v.key, v.retVal)
+ if ok := trie.DeleteSubtree([]byte(v.key)); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+}
+
+/*
+func TestTrie_Dump(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"Honda", nil, success},
+ {"Honza", nil, success},
+ {"Jenik", nil, success},
+ {"Pepan", nil, success},
+ {"Pepin", nil, success},
+ }
+
+ for i, v := range data {
+ if _, ok := trie.Insert([]byte(v.key), v.value); ok != v.retVal {
+ t.Logf("INSERT %v %v", v.key, v.value)
+ t.Fatalf("Unexpected return value, expected=%v, got=%v", i, ok)
+ }
+ }
+
+ dump := `
++--+--+ Hon +--+--+ da
+ | |
+ | +--+ za
+ |
+ +--+ Jenik
+ |
+ +--+ Pep +--+--+ an
+ |
+ +--+ in
+`
+
+ var buf bytes.Buffer
+ trie.Dump(buf)
+
+ if !bytes.Equal(buf.Bytes(), dump) {
+ t.Logf("DUMP")
+ t.Fatalf("Unexpected dump generated, expected\n\n%v\ngot\n\n%v", dump, buf.String())
+ }
+}
+*/
+
+func TestTrie_compact(t *testing.T) {
+ trie := NewTrie()
+
+ trie.Insert(Prefix("a"), 0)
+ trie.Insert(Prefix("ab"), 0)
+ trie.Insert(Prefix("abc"), 0)
+ trie.Insert(Prefix("abcd"), 0)
+ trie.Insert(Prefix("abcde"), 0)
+ trie.Insert(Prefix("abcdef"), 0)
+ trie.Insert(Prefix("abcdefg"), 0)
+ trie.Insert(Prefix("abcdefgi"), 0)
+ trie.Insert(Prefix("abcdefgij"), 0)
+ trie.Insert(Prefix("abcdefgijk"), 0)
+
+ trie.Delete(Prefix("abcdef"))
+ trie.Delete(Prefix("abcde"))
+ trie.Delete(Prefix("abcdefg"))
+
+ trie.Delete(Prefix("a"))
+ trie.Delete(Prefix("abc"))
+ trie.Delete(Prefix("ab"))
+
+ trie.Visit(func(prefix Prefix, item Item) error {
+ // 97 ~~ 'a',
+ for ch := byte(97); ch <= 107; ch++ {
+ if c := bytes.Count(prefix, []byte{ch}); c > 1 {
+ t.Errorf("%q appeared in %q %v times", ch, prefix, c)
+ }
+ }
+ return nil
+ })
+}
+
+func TestTrie_longestCommonPrefixLenght(t *testing.T) {
+ trie := NewTrie()
+ trie.prefix = []byte("1234567890")
+
+ switch {
+ case trie.longestCommonPrefixLength([]byte("")) != 0:
+ t.Fail()
+ case trie.longestCommonPrefixLength([]byte("12345")) != 5:
+ t.Fail()
+ case trie.longestCommonPrefixLength([]byte("123789")) != 3:
+ t.Fail()
+ case trie.longestCommonPrefixLength([]byte("12345678901")) != 10:
+ t.Fail()
+ }
+}
+
+// Examples --------------------------------------------------------------------
+
+func ExampleTrie() {
+ // Create a new tree.
+ trie := NewTrie()
+
+ // Insert some items.
+ trie.Insert(Prefix("Pepa Novak"), 1)
+ trie.Insert(Prefix("Pepa Sindelar"), 2)
+ trie.Insert(Prefix("Karel Macha"), 3)
+ trie.Insert(Prefix("Karel Hynek Macha"), 4)
+
+ // Just check if some things are present in the tree.
+ key := Prefix("Pepa Novak")
+ fmt.Printf("%q present? %v\n", key, trie.Match(key))
+ key = Prefix("Karel")
+ fmt.Printf("Anybody called %q here? %v\n", key, trie.MatchSubtree(key))
+
+ // Walk the tree.
+ trie.Visit(printItem)
+ // "Pepa Novak": 1
+ // "Pepa Sindelar": 2
+ // "Karel Macha": 3
+ // "Karel Hynek Macha": 4
+
+ // Walk a subtree.
+ trie.VisitSubtree(Prefix("Pepa"), printItem)
+ // "Pepa Novak": 1
+ // "Pepa Sindelar": 2
+
+ // Modify an item, then fetch it from the tree.
+ trie.Set(Prefix("Karel Hynek Macha"), 10)
+ key = Prefix("Karel Hynek Macha")
+ fmt.Printf("%q: %v\n", key, trie.Get(key))
+ // "Karel Hynek Macha": 10
+
+ // Walk prefixes.
+ prefix := Prefix("Karel Hynek Macha je kouzelnik")
+ trie.VisitPrefixes(prefix, printItem)
+ // "Karel Hynek Macha": 10
+
+ // Delete some items.
+ trie.Delete(Prefix("Pepa Novak"))
+ trie.Delete(Prefix("Karel Macha"))
+
+ // Walk again.
+ trie.Visit(printItem)
+ // "Pepa Sindelar": 2
+ // "Karel Hynek Macha": 10
+
+ // Delete a subtree.
+ trie.DeleteSubtree(Prefix("Pepa"))
+
+ // Print what is left.
+ trie.Visit(printItem)
+ // "Karel Hynek Macha": 10
+
+ // Output:
+ // "Pepa Novak" present? true
+ // Anybody called "Karel" here? true
+ // "Pepa Novak": 1
+ // "Pepa Sindelar": 2
+ // "Karel Macha": 3
+ // "Karel Hynek Macha": 4
+ // "Pepa Novak": 1
+ // "Pepa Sindelar": 2
+ // "Karel Hynek Macha": 10
+ // "Karel Hynek Macha": 10
+ // "Pepa Sindelar": 2
+ // "Karel Hynek Macha": 10
+ // "Karel Hynek Macha": 10
+}
+
+// Helpers ---------------------------------------------------------------------
+
+func printItem(prefix Prefix, item Item) error {
+ fmt.Printf("%q: %v\n", prefix, item)
+ return nil
+}
diff --git a/vendor/src/github.com/tchap/go-patricia/patricia/patricia_test.go b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_test.go
new file mode 100644
index 0000000000..ce5ae378fa
--- /dev/null
+++ b/vendor/src/github.com/tchap/go-patricia/patricia/patricia_test.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2014 The go-patricia AUTHORS
+//
+// Use of this source code is governed by The MIT License
+// that can be found in the LICENSE file.
+
+package patricia
+
+import (
+ "crypto/rand"
+ "reflect"
+ "testing"
+)
+
+// Tests -----------------------------------------------------------------------
+
+func TestTrie_GetNonexistentPrefix(t *testing.T) {
+ trie := NewTrie()
+
+ data := []testData{
+ {"aba", 0, success},
+ }
+
+ for _, v := range data {
+ t.Logf("INSERT prefix=%v, item=%v, success=%v", v.key, v.value, v.retVal)
+ if ok := trie.Insert(Prefix(v.key), v.value); ok != v.retVal {
+ t.Errorf("Unexpected return value, expected=%v, got=%v", v.retVal, ok)
+ }
+ }
+
+ t.Logf("GET prefix=baa, expect item=nil")
+ if item := trie.Get(Prefix("baa")); item != nil {
+ t.Errorf("Unexpected return value, expected=<nil>, got=%v", item)
+ }
+}
+
+func TestTrie_RandomKitchenSink(t *testing.T) {
+ if testing.Short() {
+ t.Skip()
+ }
+ const count, size = 750000, 16
+ b := make([]byte, count+size+1)
+ if _, err := rand.Read(b); err != nil {
+ t.Fatal("error generating random bytes", err)
+ }
+ m := make(map[string]string)
+ for i := 0; i < count; i++ {
+ m[string(b[i:i+size])] = string(b[i+1 : i+size+1])
+ }
+ trie := NewTrie()
+ getAndDelete := func(k, v string) {
+ i := trie.Get(Prefix(k))
+ if i == nil {
+ t.Fatalf("item not found, prefix=%v", []byte(k))
+ } else if s, ok := i.(string); !ok {
+ t.Fatalf("unexpected item type, expecting=%v, got=%v", reflect.TypeOf(k), reflect.TypeOf(i))
+ } else if s != v {
+ t.Fatalf("unexpected item, expecting=%v, got=%v", []byte(k), []byte(s))
+ } else if !trie.Delete(Prefix(k)) {
+ t.Fatalf("delete failed, prefix=%v", []byte(k))
+ } else if i = trie.Get(Prefix(k)); i != nil {
+ t.Fatalf("unexpected item, expecting=<nil>, got=%v", i)
+ } else if trie.Delete(Prefix(k)) {
+ t.Fatalf("extra delete succeeded, prefix=%v", []byte(k))
+ }
+ }
+ for k, v := range m {
+ if !trie.Insert(Prefix(k), v) {
+ t.Fatalf("insert failed, prefix=%v", []byte(k))
+ }
+ if byte(k[size/2]) < 128 {
+ getAndDelete(k, v)
+ delete(m, k)
+ }
+ }
+ for k, v := range m {
+ getAndDelete(k, v)
+ }
+}