summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunclejack <unclejack@users.noreply.github.com>2014-04-09 01:56:01 +0300
committerunclejack <unclejack@users.noreply.github.com>2014-04-09 01:56:01 +0300
commite128a606e39fa63c6b4fd6e53a1d88cf00aad868 (patch)
tree199ee7eb6678ffecd2ddad95fce794c795ad5183
parent143c9707a9fafc39e1d9747f528db97b2564f01e (diff)
parentdc9c28f51d669d6b09e81c2381f800f1a33bb659 (diff)
downloaddocker-0.10.1-hotfixes.tar.gz
Merge pull request #5079 from unclejack/bump_v0.10.0release-0.100.10.1-hotfixes
Bump version to v0.10.0
-rw-r--r--.travis.yml2
-rw-r--r--AUTHORS1
-rw-r--r--CHANGELOG.md177
-rw-r--r--CONTRIBUTING.md63
-rw-r--r--Dockerfile13
-rw-r--r--LICENSE13
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile27
-rw-r--r--VERSION2
-rw-r--r--api/api_unit_test.go46
-rw-r--r--api/client/cli.go102
-rw-r--r--api/client/commands.go (renamed from api/client.go)693
-rw-r--r--api/client/utils.go390
-rw-r--r--api/common.go11
-rw-r--r--api/server/server.go (renamed from api/server.go)104
-rw-r--r--api/server/server_unit_test.go180
-rw-r--r--archive/archive.go5
-rw-r--r--archive/changes_test.go8
-rw-r--r--auth/MAINTAINERS3
-rw-r--r--builtins/builtins.go12
-rw-r--r--commands_unit_test.go160
-rwxr-xr-xcontrib/check-config.sh146
-rwxr-xr-xcontrib/completion/bash/docker9
-rw-r--r--contrib/completion/fish/docker.fish40
-rwxr-xr-xcontrib/completion/zsh/_docker2
-rw-r--r--contrib/desktop-integration/data/Dockerfile6
-rw-r--r--contrib/desktop-integration/iceweasel/Dockerfile8
-rw-r--r--contrib/docker-device-tool/device_tool.go2
-rw-r--r--contrib/host-integration/Dockerfile.dev2
-rw-r--r--contrib/host-integration/Dockerfile.min2
-rw-r--r--contrib/host-integration/manager.go2
-rwxr-xr-xcontrib/init/sysvinit-debian/docker41
-rw-r--r--contrib/init/sysvinit-debian/docker.default2
-rw-r--r--contrib/init/upstart/docker.conf37
-rw-r--r--contrib/man/man1/docker-attach.156
-rw-r--r--contrib/man/man1/docker-build.165
-rw-r--r--contrib/man/man1/docker-images.184
-rw-r--r--contrib/man/man1/docker-info.139
-rw-r--r--contrib/man/man1/docker-inspect.1237
-rw-r--r--contrib/man/man1/docker-rm.145
-rw-r--r--contrib/man/man1/docker-rmi.129
-rw-r--r--contrib/man/man1/docker-run.1277
-rw-r--r--contrib/man/man1/docker-tag.149
-rw-r--r--contrib/man/man1/docker.1172
-rwxr-xr-xcontrib/mkimage-debootstrap.sh1
-rwxr-xr-xcontrib/mkseccomp.pl2
-rw-r--r--daemonconfig/config.go (renamed from config.go)19
-rw-r--r--docker/docker.go77
-rw-r--r--docs/MAINTAINERS1
-rw-r--r--docs/sources/articles/runmetrics.rst2
-rw-r--r--docs/sources/articles/security.rst6
-rw-r--r--docs/sources/examples/apt-cacher-ng.Dockerfile15
-rw-r--r--docs/sources/examples/apt-cacher-ng.rst102
-rw-r--r--docs/sources/examples/example_header.inc1
-rw-r--r--docs/sources/examples/hello_world.rst34
-rw-r--r--docs/sources/examples/https.rst126
-rw-r--r--docs/sources/examples/index.rst2
-rw-r--r--docs/sources/examples/mongodb.rst6
-rw-r--r--docs/sources/examples/nodejs_web_app.rst6
-rw-r--r--docs/sources/examples/postgresql_service.Dockerfile2
-rw-r--r--docs/sources/examples/postgresql_service.rst10
-rw-r--r--docs/sources/examples/python_web_app.rst2
-rw-r--r--docs/sources/examples/running_redis_service.rst8
-rw-r--r--docs/sources/examples/running_riak_service.rst2
-rw-r--r--docs/sources/examples/running_ssh_service.rst4
-rw-r--r--docs/sources/installation/amazon.rst32
-rw-r--r--docs/sources/installation/binaries.rst15
-rw-r--r--docs/sources/installation/fedora.rst8
-rw-r--r--docs/sources/installation/index.rst1
-rw-r--r--docs/sources/installation/mac.rst11
-rw-r--r--docs/sources/installation/rhel.rst4
-rw-r--r--docs/sources/installation/softlayer.rst25
-rw-r--r--docs/sources/installation/ubuntulinux.rst31
-rw-r--r--docs/sources/reference/api/docker_io_accounts_api.rst8
-rw-r--r--docs/sources/reference/api/docker_remote_api.rst5
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.10.rst6
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.2.rst4
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.3.rst4
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.4.rst4
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.5.rst4
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.6.rst4
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.7.rst4
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.8.rst4
-rw-r--r--docs/sources/reference/api/docker_remote_api_v1.9.rst4
-rw-r--r--docs/sources/reference/api/remote_api_client_libraries.rst6
-rw-r--r--docs/sources/reference/builder.rst89
-rw-r--r--docs/sources/reference/commandline/cli.rst411
-rw-r--r--docs/sources/reference/run.rst97
-rw-r--r--docs/sources/terms/images/docker-filesystems-busyboxrw.pngbin121141 -> 113106 bytes
-rw-r--r--docs/sources/terms/images/docker-filesystems-debian.pngbin77822 -> 64585 bytes
-rw-r--r--docs/sources/terms/images/docker-filesystems-debianrw.pngbin94218 -> 80992 bytes
-rw-r--r--docs/sources/terms/images/docker-filesystems-generic.pngbin78384 -> 67894 bytes
-rw-r--r--docs/sources/terms/images/docker-filesystems-multilayer.pngbin127744 -> 118391 bytes
-rw-r--r--docs/sources/terms/images/docker-filesystems-multiroot.pngbin72247 -> 63920 bytes
-rw-r--r--docs/sources/terms/images/docker-filesystems.svg131
-rw-r--r--docs/sources/use/ambassador_pattern_linking.rst28
-rw-r--r--docs/sources/use/basics.rst4
-rw-r--r--docs/sources/use/chef.rst95
-rw-r--r--docs/sources/use/host_integration.rst5
-rw-r--r--docs/sources/use/index.rst1
-rw-r--r--docs/sources/use/networking.rst4
-rw-r--r--docs/sources/use/port_redirection.rst6
-rw-r--r--docs/sources/use/working_with_links_names.rst26
-rw-r--r--docs/sources/use/working_with_volumes.rst22
-rw-r--r--docs/sources/use/workingwithrepository.rst2
-rw-r--r--execdriver/native/default_template.go90
-rw-r--r--graph/graph.go (renamed from graph.go)80
-rw-r--r--graph/tags.go (renamed from tags.go)7
-rw-r--r--graph/tags_unit_test.go (renamed from tags_unit_test.go)28
-rw-r--r--hack/PACKAGERS.md33
-rw-r--r--hack/RELEASE-CHECKLIST.md65
-rwxr-xr-xhack/dind4
-rw-r--r--hack/infrastructure/docker-ci/Dockerfile29
-rw-r--r--hack/infrastructure/docker-ci/MAINTAINERS1
-rw-r--r--hack/infrastructure/docker-ci/README.rst65
-rw-r--r--hack/infrastructure/docker-ci/VERSION1
-rw-r--r--hack/infrastructure/docker-ci/buildbot/github.py176
-rw-r--r--hack/infrastructure/docker-ci/buildbot/master.cfg161
-rw-r--r--hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml22
-rw-r--r--hack/infrastructure/docker-ci/dcr/prod/settings.yml5
-rw-r--r--hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml22
-rw-r--r--hack/infrastructure/docker-ci/dcr/stage/settings.yml5
-rwxr-xr-xhack/infrastructure/docker-ci/docker-coverage/gocoverage.sh52
l---------hack/infrastructure/docker-ci/dockertest/docker1
l---------hack/infrastructure/docker-ci/dockertest/docker-registry1
-rwxr-xr-xhack/infrastructure/docker-ci/dockertest/nightlyrelease13
-rwxr-xr-xhack/infrastructure/docker-ci/dockertest/project8
-rwxr-xr-xhack/infrastructure/docker-ci/functionaltests/test_index.py61
-rwxr-xr-xhack/infrastructure/docker-ci/functionaltests/test_registry.sh27
-rw-r--r--hack/infrastructure/docker-ci/nginx/nginx.conf12
-rw-r--r--hack/infrastructure/docker-ci/report/Dockerfile28
-rwxr-xr-xhack/infrastructure/docker-ci/report/deployment.py130
-rwxr-xr-xhack/infrastructure/docker-ci/report/report.py145
-rwxr-xr-xhack/infrastructure/docker-ci/setup.sh54
-rw-r--r--hack/infrastructure/docker-ci/testbuilder/Dockerfile12
-rwxr-xr-xhack/infrastructure/docker-ci/testbuilder/docker-registry.sh12
-rwxr-xr-xhack/infrastructure/docker-ci/testbuilder/docker.sh18
-rwxr-xr-xhack/infrastructure/docker-ci/testbuilder/testbuilder.sh40
-rwxr-xr-xhack/infrastructure/docker-ci/tool/backup.py47
-rwxr-xr-xhack/install.sh47
-rwxr-xr-xhack/make.sh37
-rwxr-xr-x[-rw-r--r--]hack/make/binary5
-rw-r--r--hack/make/dynbinary6
-rw-r--r--hack/make/test-integration-cli40
-rw-r--r--hack/make/tgz2
-rw-r--r--hack/make/ubuntu8
-rwxr-xr-xhack/release.sh205
-rwxr-xr-xhack/vendor.sh5
-rw-r--r--image/graph.go11
-rw-r--r--image/image.go (renamed from image.go)54
-rw-r--r--integration-cli/build_tests/TestBuildSixtySteps/Dockerfile60
-rw-r--r--integration-cli/docker_cli_build_test.go28
-rw-r--r--integration-cli/docker_cli_commit_test.go34
-rw-r--r--integration-cli/docker_cli_diff_test.go91
-rw-r--r--integration-cli/docker_cli_export_import_test.go50
-rw-r--r--integration-cli/docker_cli_images_test.go58
-rw-r--r--integration-cli/docker_cli_import_test.go20
-rw-r--r--integration-cli/docker_cli_info_test.go29
-rw-r--r--integration-cli/docker_cli_kill_test.go36
-rw-r--r--integration-cli/docker_cli_logs_test.go76
-rw-r--r--integration-cli/docker_cli_pull_test.go30
-rw-r--r--integration-cli/docker_cli_push_test.go48
-rw-r--r--integration-cli/docker_cli_run_test.go386
-rw-r--r--integration-cli/docker_cli_save_load_test.go52
-rw-r--r--integration-cli/docker_cli_search_test.go25
-rw-r--r--integration-cli/docker_cli_tag_test.go86
-rw-r--r--integration-cli/docker_cli_top_test.go32
-rw-r--r--integration-cli/docker_cli_version_test.go39
-rw-r--r--integration-cli/docker_test_vars.go29
-rw-r--r--integration-cli/docker_utils.go63
-rw-r--r--integration-cli/utils.go113
-rw-r--r--integration/api_test.go165
-rw-r--r--integration/auth_test.go14
-rw-r--r--integration/buildfile_test.go78
-rw-r--r--integration/commands_test.go122
-rw-r--r--integration/container_test.go331
-rw-r--r--integration/fixtures/https/ca.pem23
-rw-r--r--integration/fixtures/https/client-cert.pem73
-rw-r--r--integration/fixtures/https/client-key.pem16
-rw-r--r--integration/fixtures/https/client-rogue-cert.pem73
-rw-r--r--integration/fixtures/https/client-rogue-key.pem16
-rw-r--r--integration/fixtures/https/server-cert.pem76
-rw-r--r--integration/fixtures/https/server-key.pem16
-rw-r--r--integration/fixtures/https/server-rogue-cert.pem76
-rw-r--r--integration/fixtures/https/server-rogue-key.pem16
-rw-r--r--integration/graph_test.go61
-rw-r--r--integration/https_test.go82
-rw-r--r--integration/runtime_test.go99
-rw-r--r--integration/server_test.go157
-rw-r--r--integration/utils_test.go25
-rw-r--r--opts/envfile.go54
-rw-r--r--opts/opts.go (renamed from pkg/opts/opts.go)25
-rw-r--r--opts/opts_test.go78
-rw-r--r--pkg/cgroups/apply_nosystemd.go15
-rw-r--r--pkg/cgroups/apply_raw.go216
-rw-r--r--pkg/cgroups/apply_systemd.go158
-rw-r--r--pkg/cgroups/cgroups.go179
-rw-r--r--pkg/iptables/iptables.go1
-rw-r--r--pkg/label/label.go26
-rw-r--r--pkg/label/label_selinux.go77
-rw-r--r--pkg/libcontainer/MAINTAINERS2
-rw-r--r--pkg/libcontainer/README.md125
-rw-r--r--pkg/libcontainer/TODO.md8
-rw-r--r--pkg/libcontainer/apparmor/setup.go49
-rw-r--r--pkg/libcontainer/capabilities/capabilities.go12
-rw-r--r--pkg/libcontainer/container.go34
-rw-r--r--pkg/libcontainer/container.json2
-rw-r--r--pkg/libcontainer/network/loopback.go24
-rw-r--r--pkg/libcontainer/network/netns.go34
-rw-r--r--pkg/libcontainer/network/strategy.go5
-rw-r--r--pkg/libcontainer/network/veth.go6
-rw-r--r--pkg/libcontainer/nsinit/command.go4
-rw-r--r--pkg/libcontainer/nsinit/exec.go47
-rw-r--r--pkg/libcontainer/nsinit/execin.go22
-rw-r--r--pkg/libcontainer/nsinit/init.go40
-rw-r--r--pkg/libcontainer/nsinit/mount.go106
-rw-r--r--pkg/libcontainer/nsinit/nsinit.go5
-rw-r--r--pkg/libcontainer/nsinit/nsinit/main.go55
-rw-r--r--pkg/libcontainer/nsinit/state.go16
-rw-r--r--pkg/libcontainer/types.go99
-rw-r--r--pkg/libcontainer/types_linux.go12
-rw-r--r--pkg/libcontainer/types_test.go2
-rw-r--r--pkg/listenbuffer/buffer.go27
-rw-r--r--pkg/mflag/MAINTAINERS1
-rw-r--r--pkg/mflag/example/example.go3
-rw-r--r--pkg/mflag/flag.go48
-rw-r--r--pkg/namesgenerator/names-generator.go31
-rw-r--r--pkg/netlink/MAINTAINERS2
-rw-r--r--pkg/netlink/netlink_linux.go51
-rw-r--r--pkg/netlink/netlink_unsupported.go4
-rw-r--r--pkg/opts/opts_test.go24
-rw-r--r--pkg/selinux/selinux.go378
-rw-r--r--pkg/selinux/selinux_test.go59
-rw-r--r--pkg/signal/MAINTAINERS2
-rw-r--r--pkg/signal/signal.go19
-rw-r--r--pkg/signal/signal_darwin.go40
-rw-r--r--pkg/signal/signal_freebsd.go42
-rw-r--r--pkg/signal/signal_linux.go43
-rw-r--r--pkg/signal/signal_unsupported.go9
-rw-r--r--pkg/system/calls_linux.go8
-rw-r--r--pkg/system/proc.go26
-rw-r--r--pkg/system/unsupported.go4
-rw-r--r--pkg/systemd/booted.go15
-rw-r--r--pkg/systemd/listendfd.go2
-rw-r--r--pkg/term/MAINTAINERS2
-rw-r--r--pkg/term/termios_darwin.go39
-rw-r--r--pkg/term/termios_freebsd.go65
-rw-r--r--pkg/version/version.go14
-rw-r--r--pkg/version/version_test.go2
-rw-r--r--registry/auth.go (renamed from auth/auth.go)2
-rw-r--r--registry/auth_test.go (renamed from auth/auth_test.go)2
-rw-r--r--registry/registry.go67
-rw-r--r--registry/registry_mock_test.go7
-rw-r--r--registry/registry_test.go15
-rw-r--r--runconfig/compare.go9
-rw-r--r--runconfig/config.go7
-rw-r--r--runconfig/config_test.go220
-rw-r--r--runconfig/hostconfig.go21
-rw-r--r--runconfig/merge.go10
-rw-r--r--runconfig/parse.go68
-rw-r--r--runconfig/parse_test.go3
-rw-r--r--runtime/container.go (renamed from container.go)199
-rw-r--r--runtime/container_unit_test.go (renamed from container_unit_test.go)6
-rw-r--r--runtime/execdriver/MAINTAINERS (renamed from execdriver/MAINTAINERS)2
-rw-r--r--runtime/execdriver/driver.go (renamed from execdriver/driver.go)45
-rw-r--r--runtime/execdriver/execdrivers/execdrivers.go23
-rw-r--r--runtime/execdriver/lxc/driver.go (renamed from execdriver/lxc/driver.go)47
-rw-r--r--runtime/execdriver/lxc/info.go (renamed from execdriver/lxc/info.go)2
-rw-r--r--runtime/execdriver/lxc/info_test.go (renamed from execdriver/lxc/info_test.go)0
-rw-r--r--runtime/execdriver/lxc/init.go (renamed from execdriver/lxc/init.go)33
-rw-r--r--runtime/execdriver/lxc/lxc_init_linux.go (renamed from execdriver/lxc/lxc_init_linux.go)0
-rw-r--r--runtime/execdriver/lxc/lxc_init_unsupported.go (renamed from execdriver/lxc/lxc_init_unsupported.go)0
-rw-r--r--runtime/execdriver/lxc/lxc_template.go (renamed from execdriver/lxc/lxc_template.go)47
-rw-r--r--runtime/execdriver/lxc/lxc_template_unit_test.go (renamed from execdriver/lxc/lxc_template_unit_test.go)18
-rw-r--r--runtime/execdriver/native/configuration/parse.go186
-rw-r--r--runtime/execdriver/native/configuration/parse_test.go166
-rw-r--r--runtime/execdriver/native/create.go114
-rw-r--r--runtime/execdriver/native/driver.go (renamed from execdriver/native/driver.go)93
-rw-r--r--runtime/execdriver/native/info.go (renamed from execdriver/native/info.go)0
-rw-r--r--runtime/execdriver/native/template/default_template.go45
-rw-r--r--runtime/execdriver/native/term.go (renamed from execdriver/native/term.go)2
-rw-r--r--runtime/execdriver/pipes.go (renamed from execdriver/pipes.go)0
-rw-r--r--runtime/execdriver/termconsole.go (renamed from execdriver/termconsole.go)0
-rw-r--r--runtime/graphdriver/aufs/aufs.go (renamed from graphdriver/aufs/aufs.go)4
-rw-r--r--runtime/graphdriver/aufs/aufs_test.go (renamed from graphdriver/aufs/aufs_test.go)54
-rw-r--r--runtime/graphdriver/aufs/dirs.go (renamed from graphdriver/aufs/dirs.go)0
-rw-r--r--runtime/graphdriver/aufs/migrate.go (renamed from graphdriver/aufs/migrate.go)6
-rw-r--r--runtime/graphdriver/aufs/mount.go (renamed from graphdriver/aufs/mount.go)0
-rw-r--r--runtime/graphdriver/aufs/mount_linux.go (renamed from graphdriver/aufs/mount_linux.go)0
-rw-r--r--runtime/graphdriver/aufs/mount_unsupported.go (renamed from graphdriver/aufs/mount_unsupported.go)0
-rw-r--r--runtime/graphdriver/btrfs/btrfs.go (renamed from graphdriver/btrfs/btrfs.go)8
-rw-r--r--runtime/graphdriver/btrfs/dummy_unsupported.go (renamed from graphdriver/btrfs/dummy_unsupported.go)0
-rw-r--r--runtime/graphdriver/devmapper/attach_loopback.go (renamed from graphdriver/devmapper/attach_loopback.go)0
-rw-r--r--runtime/graphdriver/devmapper/deviceset.go (renamed from graphdriver/devmapper/deviceset.go)257
-rw-r--r--runtime/graphdriver/devmapper/devmapper.go (renamed from graphdriver/devmapper/devmapper.go)0
-rw-r--r--runtime/graphdriver/devmapper/devmapper_doc.go (renamed from graphdriver/devmapper/devmapper_doc.go)0
-rw-r--r--runtime/graphdriver/devmapper/devmapper_log.go (renamed from graphdriver/devmapper/devmapper_log.go)0
-rw-r--r--runtime/graphdriver/devmapper/devmapper_test.go (renamed from graphdriver/devmapper/devmapper_test.go)0
-rw-r--r--runtime/graphdriver/devmapper/devmapper_wrapper.go (renamed from graphdriver/devmapper/devmapper_wrapper.go)0
-rw-r--r--runtime/graphdriver/devmapper/driver.go (renamed from graphdriver/devmapper/driver.go)14
-rw-r--r--runtime/graphdriver/devmapper/driver_test.go (renamed from graphdriver/devmapper/driver_test.go)22
-rw-r--r--runtime/graphdriver/devmapper/ioctl.go (renamed from graphdriver/devmapper/ioctl.go)0
-rw-r--r--runtime/graphdriver/devmapper/mount.go (renamed from graphdriver/devmapper/mount.go)0
-rw-r--r--runtime/graphdriver/devmapper/sys.go (renamed from graphdriver/devmapper/sys.go)0
-rw-r--r--runtime/graphdriver/driver.go (renamed from graphdriver/driver.go)5
-rw-r--r--runtime/graphdriver/vfs/driver.go (renamed from graphdriver/vfs/driver.go)4
-rw-r--r--runtime/history.go30
-rw-r--r--runtime/networkdriver/bridge/driver.go (renamed from networkdriver/lxc/driver.go)46
-rw-r--r--runtime/networkdriver/ipallocator/allocator.go (renamed from networkdriver/ipallocator/allocator.go)2
-rw-r--r--runtime/networkdriver/ipallocator/allocator_test.go (renamed from networkdriver/ipallocator/allocator_test.go)0
-rw-r--r--runtime/networkdriver/network.go (renamed from networkdriver/network.go)0
-rw-r--r--runtime/networkdriver/network_test.go (renamed from networkdriver/network_test.go)0
-rw-r--r--runtime/networkdriver/portallocator/portallocator.go (renamed from networkdriver/portallocator/portallocator.go)39
-rw-r--r--runtime/networkdriver/portallocator/portallocator_test.go (renamed from networkdriver/portallocator/portallocator_test.go)33
-rw-r--r--runtime/networkdriver/portmapper/mapper.go (renamed from networkdriver/portmapper/mapper.go)0
-rw-r--r--runtime/networkdriver/portmapper/mapper_test.go (renamed from networkdriver/portmapper/mapper_test.go)0
-rw-r--r--runtime/networkdriver/utils.go (renamed from networkdriver/utils.go)0
-rw-r--r--runtime/runtime.go (renamed from runtime.go)395
-rw-r--r--runtime/runtime_aufs.go22
-rw-r--r--runtime/runtime_btrfs.go7
-rw-r--r--runtime/runtime_devicemapper.go7
-rw-r--r--runtime/runtime_no_aufs.go11
-rw-r--r--runtime/server.go10
-rw-r--r--runtime/sorter.go (renamed from sorter.go)2
-rw-r--r--runtime/state.go (renamed from state.go)7
-rw-r--r--runtime/utils.go (renamed from utils.go)30
-rw-r--r--runtime/utils_test.go29
-rw-r--r--runtime/volumes.go (renamed from volumes.go)163
-rw-r--r--server/buildfile.go (renamed from buildfile.go)148
-rw-r--r--server/server.go (renamed from server.go)528
-rw-r--r--server/server_unit_test.go (renamed from server_unit_test.go)2
-rw-r--r--sysinit/sysinit.go36
-rw-r--r--utils/jsonmessage.go14
-rw-r--r--utils/signal.go11
-rw-r--r--utils/signal_freebsd.go (renamed from utils/signal_darwin.go)2
-rw-r--r--utils/signal_linux.go47
-rw-r--r--utils/stdcopy.go5
-rw-r--r--utils/streamformatter.go29
-rw-r--r--utils/utils.go116
-rw-r--r--utils/utils_test.go51
-rw-r--r--utils_test.go24
-rw-r--r--vendor/src/github.com/coreos/go-systemd/.travis.yml8
-rw-r--r--vendor/src/github.com/coreos/go-systemd/LICENSE191
-rw-r--r--vendor/src/github.com/coreos/go-systemd/README.md44
-rw-r--r--vendor/src/github.com/coreos/go-systemd/activation/files.go (renamed from pkg/systemd/activation/files.go)1
-rw-r--r--vendor/src/github.com/coreos/go-systemd/activation/files_test.go84
-rw-r--r--vendor/src/github.com/coreos/go-systemd/activation/listeners.go (renamed from pkg/systemd/activation/listeners.go)1
-rw-r--r--vendor/src/github.com/coreos/go-systemd/activation/listeners_test.go88
-rw-r--r--vendor/src/github.com/coreos/go-systemd/dbus/dbus.go104
-rw-r--r--vendor/src/github.com/coreos/go-systemd/dbus/dbus_test.go41
-rw-r--r--vendor/src/github.com/coreos/go-systemd/dbus/methods.go354
-rw-r--r--vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go314
-rw-r--r--vendor/src/github.com/coreos/go-systemd/dbus/properties.go220
-rw-r--r--vendor/src/github.com/coreos/go-systemd/dbus/set.go26
-rw-r--r--vendor/src/github.com/coreos/go-systemd/dbus/set_test.go26
-rw-r--r--vendor/src/github.com/coreos/go-systemd/dbus/subscription.go249
-rw-r--r--vendor/src/github.com/coreos/go-systemd/dbus/subscription_set.go32
-rw-r--r--vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go67
-rw-r--r--vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go90
-rw-r--r--vendor/src/github.com/coreos/go-systemd/examples/activation/activation.go44
-rw-r--r--vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/README.md19
-rw-r--r--vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service11
-rw-r--r--vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket5
-rw-r--r--vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go26
-rw-r--r--vendor/src/github.com/coreos/go-systemd/examples/activation/listen.go50
-rw-r--r--vendor/src/github.com/coreos/go-systemd/fixtures/start-stop.service5
-rw-r--r--vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service5
-rw-r--r--vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events.service5
-rw-r--r--vendor/src/github.com/coreos/go-systemd/journal/send.go168
-rwxr-xr-xvendor/src/github.com/coreos/go-systemd/test3
-rw-r--r--vendor/src/github.com/godbus/dbus/LICENSE25
-rw-r--r--vendor/src/github.com/godbus/dbus/README.markdown38
-rw-r--r--vendor/src/github.com/godbus/dbus/_examples/eavesdrop.go30
-rw-r--r--vendor/src/github.com/godbus/dbus/_examples/introspect.go21
-rw-r--r--vendor/src/github.com/godbus/dbus/_examples/list-names.go27
-rw-r--r--vendor/src/github.com/godbus/dbus/_examples/notification.go17
-rw-r--r--vendor/src/github.com/godbus/dbus/_examples/prop.go68
-rw-r--r--vendor/src/github.com/godbus/dbus/_examples/server.go45
-rw-r--r--vendor/src/github.com/godbus/dbus/_examples/signal.go24
-rw-r--r--vendor/src/github.com/godbus/dbus/auth.go253
-rw-r--r--vendor/src/github.com/godbus/dbus/auth_external.go26
-rw-r--r--vendor/src/github.com/godbus/dbus/auth_sha1.go102
-rw-r--r--vendor/src/github.com/godbus/dbus/call.go147
-rw-r--r--vendor/src/github.com/godbus/dbus/conn.go601
-rw-r--r--vendor/src/github.com/godbus/dbus/conn_darwin.go21
-rw-r--r--vendor/src/github.com/godbus/dbus/conn_other.go27
-rw-r--r--vendor/src/github.com/godbus/dbus/conn_test.go199
-rw-r--r--vendor/src/github.com/godbus/dbus/dbus.go258
-rw-r--r--vendor/src/github.com/godbus/dbus/decoder.go228
-rw-r--r--vendor/src/github.com/godbus/dbus/doc.go63
-rw-r--r--vendor/src/github.com/godbus/dbus/encoder.go179
-rw-r--r--vendor/src/github.com/godbus/dbus/examples_test.go50
-rw-r--r--vendor/src/github.com/godbus/dbus/export.go302
-rw-r--r--vendor/src/github.com/godbus/dbus/homedir.go28
-rw-r--r--vendor/src/github.com/godbus/dbus/homedir_dynamic.go15
-rw-r--r--vendor/src/github.com/godbus/dbus/homedir_static.go45
-rw-r--r--vendor/src/github.com/godbus/dbus/introspect/call.go27
-rw-r--r--vendor/src/github.com/godbus/dbus/introspect/introspect.go80
-rw-r--r--vendor/src/github.com/godbus/dbus/introspect/introspectable.go74
-rw-r--r--vendor/src/github.com/godbus/dbus/message.go346
-rw-r--r--vendor/src/github.com/godbus/dbus/prop/prop.go264
-rw-r--r--vendor/src/github.com/godbus/dbus/proto_test.go369
-rw-r--r--vendor/src/github.com/godbus/dbus/sig.go257
-rw-r--r--vendor/src/github.com/godbus/dbus/sig_test.go70
-rw-r--r--vendor/src/github.com/godbus/dbus/transport_darwin.go6
-rw-r--r--vendor/src/github.com/godbus/dbus/transport_generic.go35
-rw-r--r--vendor/src/github.com/godbus/dbus/transport_unix.go190
-rw-r--r--vendor/src/github.com/godbus/dbus/transport_unix_test.go49
-rw-r--r--vendor/src/github.com/godbus/dbus/transport_unixcred.go22
-rw-r--r--vendor/src/github.com/godbus/dbus/variant.go129
-rw-r--r--vendor/src/github.com/godbus/dbus/variant_lexer.go284
-rw-r--r--vendor/src/github.com/godbus/dbus/variant_parser.go817
-rw-r--r--vendor/src/github.com/godbus/dbus/variant_test.go78
-rw-r--r--vendor/src/github.com/kr/pty/doc.go5
-rw-r--r--vendor/src/github.com/kr/pty/pty_freebsd.go53
-rw-r--r--vendor/src/github.com/kr/pty/pty_unsupported.go27
-rw-r--r--version.go32
417 files changed, 19054 insertions, 5135 deletions
diff --git a/.travis.yml b/.travis.yml
index 8a43d9a462..b8e4d43fcc 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -25,6 +25,6 @@ before_script:
script:
- hack/travis/dco.py
- hack/travis/gofmt.py
- - make -sC docs SPHINXOPTS=-q docs man
+ - make -sC docs SPHINXOPTS=-qW docs man
# vim:set sw=2 ts=2:
diff --git a/AUTHORS b/AUTHORS
index df091d5950..6e34065266 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -177,6 +177,7 @@ Keli Hu <dev@keli.hu>
Ken Cochrane <kencochrane@gmail.com>
Kevin Clark <kevin.clark@gmail.com>
Kevin J. Lynagh <kevin@keminglabs.com>
+Kevin Wallace <kevin@pentabarf.net>
Keyvan Fatehi <keyvanfatehi@gmail.com>
kim0 <email.ahmedkamal@googlemail.com>
Kim BKC Carlbacker <kim.carlbacker@gmail.com>
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 40ba3d32ac..8743d3a7db 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,182 @@
# Changelog
+## 0.10.0 (2014-04-08)
+
+#### Builder
+- Fix printing multiple messages on a single line. Fixes broken output during builds.
+- Follow symlinks inside container's root for ADD build instructions.
+- Fix EXPOSE caching.
+
+#### Documentation
+- Add the new options of `docker ps` to the documentation.
+- Add the options of `docker restart` to the documentation.
+- Update daemon docs and help messages for --iptables and --ip-forward.
+- Updated apt-cacher-ng docs example.
+- Remove duplicate description of --mtu from docs.
+- Add missing -t and -v for `docker images` to the docs.
+- Add fixes to the cli docs.
+- Update libcontainer docs.
+- Update images in docs to remove references to AUFS and LXC.
+- Update the nodejs_web_app in the docs to use the new epel RPM address.
+- Fix external link on security of containers.
+- Update remote API docs.
+- Add image size to history docs.
+- Be explicit about binding to all interfaces in redis example.
+- Document DisableNetwork flag in the 1.10 remote api.
+- Document that `--lxc-conf` is lxc only.
+- Add chef usage documentation.
+- Add example for an image with multiple for `docker load`.
+- Explain what `docker run -a` does in the docs.
+
+#### Contrib
+- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile.
+- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly.
+- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more.
+- Add check-config script to contrib.
+- Fix fish shell completion.
+
+#### Hack
+* Clean up "go test" output from "make test" to be much more readable/scannable.
+* Excluse more "definitely not unit tested Go source code" directories from hack/make/test.
++ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh.
+- Include contributed completions in Ubuntu PPA.
++ Add cli integration tests.
+* Add tweaks to the hack scripts to make them simpler.
+
+#### Remote API
++ Add TLS auth support for API.
+* Move git clone from daemon to client.
+- Fix content-type detection in docker cp.
+* Split API into 2 go packages.
+
+#### Runtime
+* Support hairpin NAT without going through Docker server.
+- devicemapper: succeed immediately when removing non-existing devices.
+- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping).
+- devicemapper: increase timeout in waitClose to 10 seconds.
+- devicemapper: ensure we shut down thin pool cleanly.
+- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice.
+- devicemapper: avoid AB-BA deadlock.
+- devicemapper: make shutdown better/faster.
+- improve alpha sorting in mflag.
+- Remove manual http cookie management because the cookiejar is being used.
+- Use BSD raw mode on Darwin. Fixes nano, tmux and others.
+- Add FreeBSD support for the client.
+- Merge auth package into registry.
+- Add deprecation warning for -t on `docker pull`.
+- Remove goroutine leak on error.
+- Update parseLxcInfo to comply with new lxc1.0 format.
+- Fix attach exit on darwin.
+- Improve deprecation message.
+- Retry to retrieve the layer metadata up to 5 times for `docker pull`.
+- Only unshare the mount namespace for execin.
+- Merge existing config when committing.
+- Disable daemon startup timeout.
+- Fix issue #4681: add loopback interface when networking is disabled.
+- Add failing test case for issue #4681.
+- Send SIGTERM to child, instead of SIGKILL.
+- Show the driver and the kernel version in `docker info` even when not in debug mode.
+- Always symlink /dev/ptmx for libcontainer. This fixes console related problems.
+- Fix issue caused by the absence of /etc/apparmor.d.
+- Don't leave empty cidFile behind when failing to create the container.
+- Mount cgroups automatically if they're not mounted already.
+- Use mock for search tests.
+- Update to double-dash everywhere.
+- Move .dockerenv parsing to lxc driver.
+- Move all bind-mounts in the container inside the namespace.
+- Don't use separate bind mount for container.
+- Always symlink /dev/ptmx for libcontainer.
+- Don't kill by pid for other drivers.
+- Add initial logging to libcontainer.
+* Sort by port in `docker ps`.
+- Move networking drivers into runtime top level package.
++ Add --no-prune to `docker rmi`.
++ Add time since exit in `docker ps`.
+- graphdriver: add build tags.
+- Prevent allocation of previously allocated ports & prevent improve port allocation.
+* Add support for --since/--before in `docker ps`.
+- Clean up container stop.
++ Add support for configurable dns search domains.
+- Add support for relative WORKDIR instructions.
+- Add --output flag for docker save.
+- Remove duplication of DNS entries in config merging.
+- Add cpuset.cpus to cgroups and native driver options.
+- Remove docker-ci.
+- Promote btrfs. btrfs is no longer considered experimental.
+- Add --input flag to `docker load`.
+- Return error when existing bridge doesn't match IP address.
+- Strip comments before parsing line continuations to avoid interpreting instructions as comments.
+- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces.
+- Add systemd implementation of cgroups and make containers show up as systemd units.
+- Fix commit and import when no repository is specified.
+- Remount /var/lib/docker as --private to fix scaling issue.
+- Use the environment's proxy when pinging the remote registry.
+- Reduce error level from harmless errors.
+* Allow --volumes-from to be individual files.
+- Fix expanding buffer in StdCopy.
+- Set error regardless of attach or stdin. This fixes #3364.
+- Add support for --env-file to load environment variables from files.
+- Symlink /etc/mtab and /proc/mounts.
+- Allow pushing a single tag.
+- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM.
+- Don't throw error when starting an already running container.
+- Fix dynamic port allocation limit.
+- remove setupDev from libcontainer.
+- Add API version to `docker version`.
+- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup.
+- Fix --volumes-from mount failure.
+- Allow non-privileged containers to create device nodes.
+- Skip login tests because of external dependency on a hosted service.
+- Deprecate `docker images --tree` and `docker images --viz`.
+- Deprecate `docker insert`.
+- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04.
+- Add specific error message when hitting 401 over HTTP on push.
+- Fix absolute volume check.
+- Remove volumes-from from the config.
+- Move DNS options to hostconfig.
+- Update the apparmor profile for libcontainer.
+- Add deprecation notice for `docker commit -run`.
+
+## 0.9.1 (2014-03-24)
+
+#### Builder
+- Fix printing multiple messages on a single line. Fixes broken output during builds.
+
+#### Documentation
+- Fix external link on security of containers.
+
+#### Contrib
+- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly.
+- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile.
+
+#### Hack
+- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh.
+
+#### Remote API
+- Fix content-type detection in `docker cp`.
+
+#### Runtime
+- Use BSD raw mode on Darwin. Fixes nano, tmux and others.
+- Only unshare the mount namespace for execin.
+- Retry to retrieve the layer metadata up to 5 times for `docker pull`.
+- Merge existing config when committing.
+- Fix panic in monitor.
+- Disable daemon startup timeout.
+- Fix issue #4681: add loopback interface when networking is disabled.
+- Add failing test case for issue #4681.
+- Send SIGTERM to child, instead of SIGKILL.
+- Show the driver and the kernel version in `docker info` even when not in debug mode.
+- Always symlink /dev/ptmx for libcontainer. This fixes console related problems.
+- Fix issue caused by the absence of /etc/apparmor.d.
+- Don't leave empty cidFile behind when failing to create the container.
+- Improve deprecation message.
+- Fix attach exit on darwin.
+- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping).
+- devicemapper: succeed immediately when removing non-existing devices.
+- devicemapper: increase timeout in waitClose to 10 seconds.
+- Remove goroutine leak on error.
+- Update parseLxcInfo to comply with new lxc1.0 format.
+
## 0.9.0 (2014-03-10)
#### Builder
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c03c5d0d9c..0e8b98122f 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -126,33 +126,46 @@ For more details see [MAINTAINERS.md](hack/MAINTAINERS.md)
The sign-off is a simple line at the end of the explanation for the
patch, which certifies that you wrote it or otherwise have the right to
pass it on as an open-source patch. The rules are pretty simple: if you
-can certify the below:
+can certify the below (from
+[developercertificate.org](http://developercertificate.org/)):
```
-Docker Developer Certificate of Origin 1.1
-
-By making a contribution to the Docker Project ("Project"), I represent and
-warrant that:
-
-a. The contribution was created in whole or in part by me and I have the right
-to submit the contribution on my own behalf or on behalf of a third party who
-has authorized me to submit this contribution to the Project; or
-
-b. The contribution is based upon previous work that, to the best of my
-knowledge, is covered under an appropriate open source license and I have the
-right and authorization to submit that work with modifications, whether
-created in whole or in part by me, under the same open source license (unless
-I am permitted to submit under a different license) that I have identified in
-the contribution; or
-
-c. The contribution was provided directly to me by some other person who
-represented and warranted (a) or (b) and I have not modified it.
-
-d. I understand and agree that this Project and the contribution are publicly
-known and that a record of the contribution (including all personal
-information I submit with it, including my sign-off record) is maintained
-indefinitely and may be redistributed consistent with this Project or the open
-source license(s) involved.
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
```
then you just add a line to every git commit message:
diff --git a/Dockerfile b/Dockerfile
index 9929a10f3c..ec95bad293 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -6,13 +6,13 @@
# docker build -t docker .
#
# # Mount your source in an interactive container for quick testing:
-# docker run -v `pwd`:/go/src/github.com/dotcloud/docker -privileged -i -t docker bash
+# docker run -v `pwd`:/go/src/github.com/dotcloud/docker --privileged -i -t docker bash
#
# # Run the test suite:
-# docker run -privileged docker hack/make.sh test
+# docker run --privileged docker hack/make.sh test
#
# # Publish a release:
-# docker run -privileged \
+# docker run --privileged \
# -e AWS_S3_BUCKET=baz \
# -e AWS_ACCESS_KEY=foo \
# -e AWS_SECRET_KEY=bar \
@@ -68,7 +68,10 @@ ENV GOPATH /go:/go/src/github.com/dotcloud/docker/vendor
RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
# Compile Go for cross compilation
-ENV DOCKER_CROSSPLATFORMS linux/386 linux/arm darwin/amd64 darwin/386
+ENV DOCKER_CROSSPLATFORMS \
+ linux/386 linux/arm \
+ darwin/amd64 darwin/386 \
+ freebsd/amd64 freebsd/386 freebsd/arm
# (set an explicit GOARM of 5 for maximum compatibility)
ENV GOARM 5
RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
@@ -87,7 +90,7 @@ RUN git config --global user.email 'docker-dummy@example.com'
VOLUME /var/lib/docker
WORKDIR /go/src/github.com/dotcloud/docker
-ENV DOCKER_BUILDTAGS apparmor
+ENV DOCKER_BUILDTAGS apparmor selinux
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
diff --git a/LICENSE b/LICENSE
index d645695673..27448585ad 100644
--- a/LICENSE
+++ b/LICENSE
@@ -176,18 +176,7 @@
END OF TERMS AND CONDITIONS
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
+ Copyright 2014 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/MAINTAINERS b/MAINTAINERS
index 49d14ba0bd..d1f4d15491 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1,5 +1,5 @@
Solomon Hykes <solomon@dotcloud.com> (@shykes)
-Guillaume Charmes <guillaume@dotcloud.com> (@creack)
+Guillaume J. Charmes <guillaume@docker.com> (@creack)
Victor Vieux <vieux@docker.com> (@vieux)
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
.travis.yml: Tianon Gravi <admwiggin@gmail.com> (@tianon)
diff --git a/Makefile b/Makefile
index b3bea8a31f..d49aa3b667 100644
--- a/Makefile
+++ b/Makefile
@@ -1,9 +1,17 @@
-.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration
+.PHONY: all binary build cross default docs docs-build docs-shell shell test test-integration test-integration-cli
-GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
-DOCKER_IMAGE := docker:$(GIT_BRANCH)
-DOCKER_DOCS_IMAGE := docker-docs:$(GIT_BRANCH)
-DOCKER_RUN_DOCKER := docker run --rm -i -t --privileged -e TESTFLAGS -v "$(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles" "$(DOCKER_IMAGE)"
+# to allow `make BINDDIR=. shell` or `make BINDDIR= test`
+BINDDIR := bundles
+# to allow `make DOCSPORT=9000 docs`
+DOCSPORT := 8000
+
+GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
+DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH))
+DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH),:$(GIT_BRANCH))
+DOCKER_MOUNT := $(if $(BINDDIR),-v "$(CURDIR)/$(BINDDIR):/go/src/github.com/dotcloud/docker/$(BINDDIR)")
+
+DOCKER_RUN_DOCKER := docker run --rm -it --privileged -e TESTFLAGS -e DOCKER_GRAPHDRIVER -e DOCKER_EXECDRIVER $(DOCKER_MOUNT) "$(DOCKER_IMAGE)"
+DOCKER_RUN_DOCS := docker run --rm -it -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)"
default: binary
@@ -17,17 +25,20 @@ cross: build
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross
docs: docs-build
- docker run --rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)"
+ $(DOCKER_RUN_DOCS)
docs-shell: docs-build
- docker run --rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)" bash
+ $(DOCKER_RUN_DOCS) bash
test: build
- $(DOCKER_RUN_DOCKER) hack/make.sh test test-integration
+ $(DOCKER_RUN_DOCKER) hack/make.sh binary test test-integration test-integration-cli
test-integration: build
$(DOCKER_RUN_DOCKER) hack/make.sh test-integration
+test-integration-cli: build
+ $(DOCKER_RUN_DOCKER) hack/make.sh binary test-integration-cli
+
shell: build
$(DOCKER_RUN_DOCKER) bash
diff --git a/VERSION b/VERSION
index ac39a106c4..78bc1abd14 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-0.9.0
+0.10.0
diff --git a/api/api_unit_test.go b/api/api_unit_test.go
index 2b3e76e75c..678331d369 100644
--- a/api/api_unit_test.go
+++ b/api/api_unit_test.go
@@ -1,9 +1,6 @@
package api
import (
- "fmt"
- "net/http"
- "net/http/httptest"
"testing"
)
@@ -20,46 +17,3 @@ func TestJsonContentType(t *testing.T) {
t.Fail()
}
}
-
-func TestGetBoolParam(t *testing.T) {
- if ret, err := getBoolParam("true"); err != nil || !ret {
- t.Fatalf("true -> true, nil | got %t %s", ret, err)
- }
- if ret, err := getBoolParam("True"); err != nil || !ret {
- t.Fatalf("True -> true, nil | got %t %s", ret, err)
- }
- if ret, err := getBoolParam("1"); err != nil || !ret {
- t.Fatalf("1 -> true, nil | got %t %s", ret, err)
- }
- if ret, err := getBoolParam(""); err != nil || ret {
- t.Fatalf("\"\" -> false, nil | got %t %s", ret, err)
- }
- if ret, err := getBoolParam("false"); err != nil || ret {
- t.Fatalf("false -> false, nil | got %t %s", ret, err)
- }
- if ret, err := getBoolParam("0"); err != nil || ret {
- t.Fatalf("0 -> false, nil | got %t %s", ret, err)
- }
- if ret, err := getBoolParam("faux"); err == nil || ret {
- t.Fatalf("faux -> false, err | got %t %s", ret, err)
- }
-}
-
-func TesthttpError(t *testing.T) {
- r := httptest.NewRecorder()
-
- httpError(r, fmt.Errorf("No such method"))
- if r.Code != http.StatusNotFound {
- t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
- }
-
- httpError(r, fmt.Errorf("This accound hasn't been activated"))
- if r.Code != http.StatusForbidden {
- t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
- }
-
- httpError(r, fmt.Errorf("Some error"))
- if r.Code != http.StatusInternalServerError {
- t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
- }
-}
diff --git a/api/client/cli.go b/api/client/cli.go
new file mode 100644
index 0000000000..b58d3c3c75
--- /dev/null
+++ b/api/client/cli.go
@@ -0,0 +1,102 @@
+package client
+
+import (
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "text/template"
+
+ flag "github.com/dotcloud/docker/pkg/mflag"
+ "github.com/dotcloud/docker/pkg/term"
+ "github.com/dotcloud/docker/registry"
+)
+
+var funcMap = template.FuncMap{
+ "json": func(v interface{}) string {
+ a, _ := json.Marshal(v)
+ return string(a)
+ },
+}
+
+func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) {
+ methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
+ method := reflect.ValueOf(cli).MethodByName(methodName)
+ if !method.IsValid() {
+ return nil, false
+ }
+ return method.Interface().(func(...string) error), true
+}
+
+func (cli *DockerCli) ParseCommands(args ...string) error {
+ if len(args) > 0 {
+ method, exists := cli.getMethod(args[0])
+ if !exists {
+ fmt.Println("Error: Command not found:", args[0])
+ return cli.CmdHelp(args[1:]...)
+ }
+ return method(args[1:]...)
+ }
+ return cli.CmdHelp(args...)
+}
+
+func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet {
+ flags := flag.NewFlagSet(name, flag.ContinueOnError)
+ flags.Usage = func() {
+ fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description)
+ flags.PrintDefaults()
+ os.Exit(2)
+ }
+ return flags
+}
+
+func (cli *DockerCli) LoadConfigFile() (err error) {
+ cli.configFile, err = registry.LoadConfig(os.Getenv("HOME"))
+ if err != nil {
+ fmt.Fprintf(cli.err, "WARNING: %s\n", err)
+ }
+ return err
+}
+
+func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string, tlsConfig *tls.Config) *DockerCli {
+ var (
+ isTerminal = false
+ terminalFd uintptr
+ )
+
+ if in != nil {
+ if file, ok := in.(*os.File); ok {
+ terminalFd = file.Fd()
+ isTerminal = term.IsTerminal(terminalFd)
+ }
+ }
+
+ if err == nil {
+ err = out
+ }
+ return &DockerCli{
+ proto: proto,
+ addr: addr,
+ in: in,
+ out: out,
+ err: err,
+ isTerminal: isTerminal,
+ terminalFd: terminalFd,
+ tlsConfig: tlsConfig,
+ }
+}
+
+type DockerCli struct {
+ proto string
+ addr string
+ configFile *registry.ConfigFile
+ in io.ReadCloser
+ out io.Writer
+ err io.Writer
+ isTerminal bool
+ terminalFd uintptr
+ tlsConfig *tls.Config
+}
diff --git a/api/client.go b/api/client/commands.go
index 10075ae613..443917d3fb 100644
--- a/api/client.go
+++ b/api/client/commands.go
@@ -1,76 +1,38 @@
-package api
+package client
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/json"
- "errors"
"fmt"
- "github.com/dotcloud/docker/archive"
- "github.com/dotcloud/docker/auth"
- "github.com/dotcloud/docker/dockerversion"
- "github.com/dotcloud/docker/engine"
- "github.com/dotcloud/docker/nat"
- flag "github.com/dotcloud/docker/pkg/mflag"
- "github.com/dotcloud/docker/pkg/term"
- "github.com/dotcloud/docker/registry"
- "github.com/dotcloud/docker/runconfig"
- "github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
- "net"
"net/http"
- "net/http/httputil"
"net/url"
"os"
- "os/signal"
+ "os/exec"
"path"
- "reflect"
- "regexp"
- "runtime"
+ goruntime "runtime"
"strconv"
"strings"
"syscall"
"text/tabwriter"
"text/template"
"time"
-)
-var funcMap = template.FuncMap{
- "json": func(v interface{}) string {
- a, _ := json.Marshal(v)
- return string(a)
- },
-}
-
-var (
- ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
+ "github.com/dotcloud/docker/api"
+ "github.com/dotcloud/docker/archive"
+ "github.com/dotcloud/docker/dockerversion"
+ "github.com/dotcloud/docker/engine"
+ "github.com/dotcloud/docker/nat"
+ "github.com/dotcloud/docker/pkg/signal"
+ "github.com/dotcloud/docker/pkg/term"
+ "github.com/dotcloud/docker/registry"
+ "github.com/dotcloud/docker/runconfig"
+ "github.com/dotcloud/docker/utils"
)
-func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) {
- methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
- method := reflect.ValueOf(cli).MethodByName(methodName)
- if !method.IsValid() {
- return nil, false
- }
- return method.Interface().(func(...string) error), true
-}
-
-func ParseCommands(proto, addr string, args ...string) error {
- cli := NewDockerCli(os.Stdin, os.Stdout, os.Stderr, proto, addr)
-
- if len(args) > 0 {
- method, exists := cli.getMethod(args[0])
- if !exists {
- fmt.Println("Error: Command not found:", args[0])
- return cli.CmdHelp(args[1:]...)
- }
- return method(args[1:]...)
- }
- return cli.CmdHelp(args...)
-}
-
func (cli *DockerCli) CmdHelp(args ...string) error {
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
@@ -81,7 +43,7 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
return nil
}
}
- help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET)
+ help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", api.DEFAULTUNIXSOCKET)
for _, command := range [][]string{
{"attach", "Attach to a running container"},
{"build", "Build a container from a Dockerfile"},
@@ -94,7 +56,6 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
{"images", "List images"},
{"import", "Create a new filesystem image from the contents of a tarball"},
{"info", "Display system-wide information"},
- {"insert", "Insert a file in an image"},
{"inspect", "Return low-level information on a container"},
{"kill", "Kill a running container"},
{"load", "Load an image from a tar archive"},
@@ -123,7 +84,9 @@ func (cli *DockerCli) CmdHelp(args ...string) error {
return nil
}
+// FIXME: 'insert' is deprecated.
func (cli *DockerCli) CmdInsert(args ...string) error {
+ fmt.Fprintf(os.Stderr, "Warning: '%s' is deprecated and will be removed in a future version. Please use 'docker build' and 'ADD' instead.\n")
cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH")
if err := cmd.Parse(args); err != nil {
return nil
@@ -160,6 +123,8 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
err error
)
+ _, err = exec.LookPath("git")
+ hasGit := err == nil
if cmd.Arg(0) == "-" {
// As a special case, 'docker build -' will build from an empty context with the
// contents of stdin as a Dockerfile
@@ -168,17 +133,34 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
return err
}
context, err = archive.Generate("Dockerfile", string(dockerfile))
- } else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) {
+ } else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) {
isRemote = true
} else {
- if _, err := os.Stat(cmd.Arg(0)); err != nil {
+ root := cmd.Arg(0)
+ if utils.IsGIT(root) {
+ remoteURL := cmd.Arg(0)
+ if !strings.HasPrefix(remoteURL, "git://") && !strings.HasPrefix(remoteURL, "git@") && !utils.IsURL(remoteURL) {
+ remoteURL = "https://" + remoteURL
+ }
+
+ root, err = ioutil.TempDir("", "docker-build-git")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(root)
+
+ if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
+ return fmt.Errorf("Error trying to use git: %s (%s)", err, output)
+ }
+ }
+ if _, err := os.Stat(root); err != nil {
return err
}
- filename := path.Join(cmd.Arg(0), "Dockerfile")
+ filename := path.Join(root, "Dockerfile")
if _, err = os.Stat(filename); os.IsNotExist(err) {
return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0))
}
- context, err = archive.Tar(cmd.Arg(0), archive.Uncompressed)
+ context, err = archive.Tar(root, archive.Uncompressed)
}
var body io.Reader
// Setup an upload progress bar
@@ -189,6 +171,15 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
}
// Upload the build context
v := &url.Values{}
+
+ //Check if the given image name can be resolved
+ if *tag != "" {
+ repository, _ := utils.ParseRepositoryTag(*tag)
+ if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
+ return err
+ }
+ }
+
v.Set("t", *tag)
if *suppressOutput {
@@ -229,7 +220,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error {
// 'docker login': login / register a user to registry service.
func (cli *DockerCli) CmdLogin(args ...string) error {
- cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+auth.IndexServerAddress()+"\" is the default.")
+ cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.")
var username, password, email string
@@ -240,7 +231,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
if err != nil {
return nil
}
- serverAddress := auth.IndexServerAddress()
+ serverAddress := registry.IndexServerAddress()
if len(cmd.Args()) > 0 {
serverAddress = cmd.Arg(0)
}
@@ -266,7 +257,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
cli.LoadConfigFile()
authconfig, ok := cli.configFile.Configs[serverAddress]
if !ok {
- authconfig = auth.AuthConfig{}
+ authconfig = registry.AuthConfig{}
}
if username == "" {
@@ -311,7 +302,7 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false)
if statusCode == 401 {
delete(cli.configFile.Configs, serverAddress)
- auth.SaveConfig(cli.configFile)
+ registry.SaveConfig(cli.configFile)
return err
}
if err != nil {
@@ -320,10 +311,10 @@ func (cli *DockerCli) CmdLogin(args ...string) error {
var out2 engine.Env
err = out2.Decode(stream)
if err != nil {
- cli.configFile, _ = auth.LoadConfig(os.Getenv("HOME"))
+ cli.configFile, _ = registry.LoadConfig(os.Getenv("HOME"))
return err
}
- auth.SaveConfig(cli.configFile)
+ registry.SaveConfig(cli.configFile)
if out2.Get("Status") != "" {
fmt.Fprintf(cli.out, "%s\n", out2.Get("Status"))
}
@@ -367,7 +358,8 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
if dockerversion.VERSION != "" {
fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION)
}
- fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version())
+ fmt.Fprintf(cli.out, "Client API version: %s\n", api.APIVERSION)
+ fmt.Fprintf(cli.out, "Go version (client): %s\n", goruntime.Version())
if dockerversion.GITCOMMIT != "" {
fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT)
}
@@ -389,6 +381,9 @@ func (cli *DockerCli) CmdVersion(args ...string) error {
}
out.Close()
fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version"))
+ if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" {
+ fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion)
+ }
fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
release := utils.GetReleaseVersion()
@@ -432,7 +427,7 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers"))
fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images"))
- fmt.Fprintf(cli.out, "Driver: %s\n", remoteInfo.Get("Driver"))
+ fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver"))
var driverStatus [][2]string
if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
return err
@@ -440,14 +435,15 @@ func (cli *DockerCli) CmdInfo(args ...string) error {
for _, pair := range driverStatus {
fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
}
+ fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
+ fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
+
if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd"))
fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
- fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
- fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" {
fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1)
@@ -533,13 +529,23 @@ func (cli *DockerCli) CmdRestart(args ...string) error {
func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
sigc := make(chan os.Signal, 1)
- utils.CatchAll(sigc)
+ signal.CatchAll(sigc)
go func() {
for s := range sigc {
if s == syscall.SIGCHLD {
continue
}
- if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%d", cid, s), nil, false)); err != nil {
+ var sig string
+ for sigStr, sigN := range signal.SignalMap {
+ if sigN == s {
+ sig = sigStr
+ break
+ }
+ }
+ if sig == "" {
+ utils.Errorf("Unsupported signal: %d. Discarding.", s)
+ }
+ if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil {
utils.Debugf("Error sending signal: %s", err)
}
}
@@ -548,9 +554,11 @@ func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
}
func (cli *DockerCli) CmdStart(args ...string) error {
- cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
- attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process")
- openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin")
+ var (
+ cmd = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
+ attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process")
+ openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin")
+ )
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -559,8 +567,10 @@ func (cli *DockerCli) CmdStart(args ...string) error {
return nil
}
- var cErr chan error
- var tty bool
+ var (
+ cErr chan error
+ tty bool
+ )
if *attach || *openStdin {
if cmd.NArg() > 1 {
return fmt.Errorf("You cannot start and attach multiple containers at once.")
@@ -571,7 +581,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
return err
}
- container := &Container{}
+ container := &api.Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
@@ -581,7 +591,7 @@ func (cli *DockerCli) CmdStart(args ...string) error {
if !container.Config.Tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
- defer utils.StopCatch(sigc)
+ defer signal.StopCatch(sigc)
}
var in io.ReadCloser
@@ -606,8 +616,8 @@ func (cli *DockerCli) CmdStart(args ...string) error {
if err != nil {
if !*attach || !*openStdin {
fmt.Fprintf(cli.err, "%s\n", err)
- encounteredError = fmt.Errorf("Error: failed to start one or more containers")
}
+ encounteredError = fmt.Errorf("Error: failed to start one or more containers")
} else {
if !*attach || !*openStdin {
fmt.Fprintf(cli.out, "%s\n", name)
@@ -758,9 +768,13 @@ func (cli *DockerCli) CmdPort(args ...string) error {
return nil
}
- port := cmd.Arg(1)
- proto := "tcp"
- parts := strings.SplitN(port, "/", 2)
+ var (
+ port = cmd.Arg(1)
+ proto = "tcp"
+ parts = strings.SplitN(port, "/", 2)
+ container api.Container
+ )
+
if len(parts) == 2 && len(parts[1]) != 0 {
port = parts[0]
proto = parts[1]
@@ -769,13 +783,13 @@ func (cli *DockerCli) CmdPort(args ...string) error {
if err != nil {
return err
}
- var out Container
- err = json.Unmarshal(body, &out)
+
+ err = json.Unmarshal(body, &container)
if err != nil {
return err
}
- if frontends, exists := out.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil {
+ if frontends, exists := container.NetworkSettings.Ports[nat.Port(port+"/"+proto)]; exists && frontends != nil {
for _, frontend := range frontends {
fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
}
@@ -788,8 +802,9 @@ func (cli *DockerCli) CmdPort(args ...string) error {
// 'docker rmi IMAGE' removes all images with the name IMAGE
func (cli *DockerCli) CmdRmi(args ...string) error {
var (
- cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
- force = cmd.Bool([]string{"f", "-force"}, false, "Force")
+ cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
+ force = cmd.Bool([]string{"f", "-force"}, false, "Force")
+ noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents")
)
if err := cmd.Parse(args); err != nil {
return nil
@@ -803,6 +818,9 @@ func (cli *DockerCli) CmdRmi(args ...string) error {
if *force {
v.Set("force", "1")
}
+ if *noprune {
+ v.Set("noprune", "1")
+ }
var encounteredError error
for _, name := range cmd.Args() {
@@ -969,6 +987,14 @@ func (cli *DockerCli) CmdImport(args ...string) error {
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
v := url.Values{}
+
+ if repository != "" {
+ //Check if the given image name can be resolved
+ if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
+ return err
+ }
+ }
+
v.Set("repo", repository)
v.Set("tag", tag)
v.Set("fromSrc", src)
@@ -983,7 +1009,7 @@ func (cli *DockerCli) CmdImport(args ...string) error {
}
func (cli *DockerCli) CmdPush(args ...string) error {
- cmd := cli.Subcmd("push", "NAME", "Push an image or a repository to the registry")
+ cmd := cli.Subcmd("push", "NAME[:TAG]", "Push an image or a repository to the registry")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -996,8 +1022,10 @@ func (cli *DockerCli) CmdPush(args ...string) error {
cli.LoadConfigFile()
+ remote, tag := utils.ParseRepositoryTag(name)
+
// Resolve the Repository name from fqn to hostname + name
- hostname, _, err := registry.ResolveRepositoryName(name)
+ hostname, _, err := registry.ResolveRepositoryName(remote)
if err != nil {
return err
}
@@ -1008,7 +1036,7 @@ func (cli *DockerCli) CmdPush(args ...string) error {
// Custom repositories can have different rules, and we must also
// allow pushing by image ID.
if len(strings.SplitN(name, "/", 2)) == 1 {
- username := cli.configFile.Configs[auth.IndexServerAddress()].Username
+ username := cli.configFile.Configs[registry.IndexServerAddress()].Username
if username == "" {
username = "<user>"
}
@@ -1016,7 +1044,8 @@ func (cli *DockerCli) CmdPush(args ...string) error {
}
v := url.Values{}
- push := func(authConfig auth.AuthConfig) error {
+ v.Set("tag", tag)
+ push := func(authConfig registry.AuthConfig) error {
buf, err := json.Marshal(authConfig)
if err != nil {
return err
@@ -1025,7 +1054,7 @@ func (cli *DockerCli) CmdPush(args ...string) error {
base64.URLEncoding.EncodeToString(buf),
}
- return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{
+ return cli.stream("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, map[string][]string{
"X-Registry-Auth": registryAuthHeader,
})
}
@@ -1045,8 +1074,8 @@ func (cli *DockerCli) CmdPush(args ...string) error {
}
func (cli *DockerCli) CmdPull(args ...string) error {
- cmd := cli.Subcmd("pull", "NAME", "Pull an image or a repository from the registry")
- tag := cmd.String([]string{"t", "-tag"}, "", "Download tagged image in repository")
+ cmd := cli.Subcmd("pull", "NAME[:TAG]", "Pull an image or a repository from the registry")
+ tag := cmd.String([]string{"#t", "#-tag"}, "", "Download tagged image in repository")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -1075,7 +1104,7 @@ func (cli *DockerCli) CmdPull(args ...string) error {
v.Set("fromImage", remote)
v.Set("tag", *tag)
- pull := func(authConfig auth.AuthConfig) error {
+ pull := func(authConfig registry.AuthConfig) error {
buf, err := json.Marshal(authConfig)
if err != nil {
return err
@@ -1107,10 +1136,11 @@ func (cli *DockerCli) CmdPull(args ...string) error {
func (cli *DockerCli) CmdImages(args ...string) error {
cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images")
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
- all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate images used to build)")
+ all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate image layers)")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
- flViz := cmd.Bool([]string{"v", "#viz", "-viz"}, false, "Output graph in graphviz format")
- flTree := cmd.Bool([]string{"t", "#tree", "-tree"}, false, "Output graph in tree format")
+ // FIXME: --viz and --tree are deprecated. Remove them in a future version.
+ flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format")
+ flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format")
if err := cmd.Parse(args); err != nil {
return nil
@@ -1122,6 +1152,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
filter := cmd.Arg(0)
+ // FIXME: --viz and --tree are deprecated. Remove them in a future version.
if *flViz || *flTree {
body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false))
if err != nil {
@@ -1232,6 +1263,7 @@ func (cli *DockerCli) CmdImages(args ...string) error {
return nil
}
+// FIXME: --viz and --tree are deprecated. Remove them in a future version.
func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) {
length := images.Len()
if length > 1 {
@@ -1258,6 +1290,7 @@ func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[
}
}
+// FIXME: --viz and --tree are deprecated. Remove them in a future version.
func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) {
var (
imageID string
@@ -1281,6 +1314,7 @@ func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix strin
}
}
+// FIXME: --viz and --tree are deprecated. Remove them in a future version.
func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) {
var imageID string
if noTrunc {
@@ -1304,8 +1338,8 @@ func (cli *DockerCli) CmdPs(args ...string) error {
all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.")
- since := cmd.String([]string{"#sinceId", "-since-id"}, "", "Show only containers created since Id, include non-running ones.")
- before := cmd.String([]string{"#beforeId", "-before-id"}, "", "Show only container created before Id, include non-running ones.")
+ since := cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show only containers created since Id or Name, include non-running ones.")
+ before := cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.")
last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.")
if err := cmd.Parse(args); err != nil {
@@ -1374,7 +1408,7 @@ func (cli *DockerCli) CmdPs(args ...string) error {
outCommand = utils.Trunc(outCommand, 20)
}
ports.ReadListFrom([]byte(out.Get("Ports")))
- fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), displayablePorts(ports), strings.Join(outNames, ","))
+ fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ","))
if *size {
if out.GetInt("SizeRootFs") > 0 {
fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs")))
@@ -1399,7 +1433,8 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes")
flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith <hannibal@a-team.com>\"")
- flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`)
+ // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands.
+ flConfig := cmd.String([]string{"#run", "#-run"}, "", "this option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands")
if err := cmd.Parse(args); err != nil {
return nil
}
@@ -1419,6 +1454,13 @@ func (cli *DockerCli) CmdCommit(args ...string) error {
return nil
}
+ //Check if the given image name can be resolved
+ if repository != "" {
+ if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
+ return err
+ }
+ }
+
v := url.Values{}
v.Set("container", name)
v.Set("repo", repository)
@@ -1548,7 +1590,7 @@ func (cli *DockerCli) CmdLogs(args ...string) error {
return err
}
- container := &Container{}
+ container := &api.Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
@@ -1585,7 +1627,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
return err
}
- container := &Container{}
+ container := &api.Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
@@ -1614,7 +1656,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error {
if *proxy && !container.Config.Tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
- defer utils.StopCatch(sigc)
+ defer signal.StopCatch(sigc)
}
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil {
@@ -1707,6 +1749,11 @@ func (cli *DockerCli) CmdTag(args ...string) error {
}
v := url.Values{}
+
+ //Check if the given image name can be resolved
+ if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
+ return err
+ }
v.Set("repo", repository)
v.Set("tag", tag)
@@ -1753,7 +1800,21 @@ func (cli *DockerCli) CmdRun(args ...string) error {
if containerIDFile, err = os.Create(hostConfig.ContainerIDFile); err != nil {
return fmt.Errorf("Failed to create the container ID file: %s", err)
}
- defer containerIDFile.Close()
+ defer func() {
+ containerIDFile.Close()
+ var (
+ cidFileInfo os.FileInfo
+ err error
+ )
+ if cidFileInfo, err = os.Stat(hostConfig.ContainerIDFile); err != nil {
+ return
+ }
+ if cidFileInfo.Size() == 0 {
+ if err := os.Remove(hostConfig.ContainerIDFile); err != nil {
+ fmt.Printf("failed to remove CID file '%s': %s \n", hostConfig.ContainerIDFile, err)
+ }
+ }
+ }()
}
containerValues := url.Values{}
@@ -1818,7 +1879,7 @@ func (cli *DockerCli) CmdRun(args ...string) error {
if sigProxy {
sigc := cli.forwardAllSignals(runResult.Get("Id"))
- defer utils.StopCatch(sigc)
+ defer signal.StopCatch(sigc)
}
var (
@@ -1996,7 +2057,9 @@ func (cli *DockerCli) CmdCp(args ...string) error {
}
func (cli *DockerCli) CmdSave(args ...string) error {
- cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout)")
+ cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout by default)")
+ outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT")
+
if err := cmd.Parse(args); err != nil {
return err
}
@@ -2006,8 +2069,18 @@ func (cli *DockerCli) CmdSave(args ...string) error {
return nil
}
+ var (
+ output io.Writer = cli.out
+ err error
+ )
+ if *outfile != "" {
+ output, err = os.Create(*outfile)
+ if err != nil {
+ return err
+ }
+ }
image := cmd.Arg(0)
- if err := cli.stream("GET", "/images/"+image+"/get", nil, cli.out, nil); err != nil {
+ if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil {
return err
}
return nil
@@ -2015,6 +2088,8 @@ func (cli *DockerCli) CmdSave(args ...string) error {
func (cli *DockerCli) CmdLoad(args ...string) error {
cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN")
+ infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN")
+
if err := cmd.Parse(args); err != nil {
return err
}
@@ -2024,408 +2099,18 @@ func (cli *DockerCli) CmdLoad(args ...string) error {
return nil
}
- if err := cli.stream("POST", "/images/load", cli.in, cli.out, nil); err != nil {
- return err
- }
- return nil
-}
-
-func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) {
- params := bytes.NewBuffer(nil)
- if data != nil {
- if env, ok := data.(engine.Env); ok {
- if err := env.Encode(params); err != nil {
- return nil, -1, err
- }
- } else {
- buf, err := json.Marshal(data)
- if err != nil {
- return nil, -1, err
- }
- if _, err := params.Write(buf); err != nil {
- return nil, -1, err
- }
- }
- }
- // fixme: refactor client to support redirect
- re := regexp.MustCompile("/+")
- path = re.ReplaceAllString(path, "/")
-
- req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), params)
- if err != nil {
- return nil, -1, err
- }
- if passAuthInfo {
- cli.LoadConfigFile()
- // Resolve the Auth config relevant for this server
- authConfig := cli.configFile.ResolveAuthConfig(auth.IndexServerAddress())
- getHeaders := func(authConfig auth.AuthConfig) (map[string][]string, error) {
- buf, err := json.Marshal(authConfig)
- if err != nil {
- return nil, err
- }
- registryAuthHeader := []string{
- base64.URLEncoding.EncodeToString(buf),
- }
- return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil
- }
- if headers, err := getHeaders(authConfig); err == nil && headers != nil {
- for k, v := range headers {
- req.Header[k] = v
- }
- }
- }
- req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
- req.Host = cli.addr
- if data != nil {
- req.Header.Set("Content-Type", "application/json")
- } else if method == "POST" {
- req.Header.Set("Content-Type", "plain/text")
- }
- dial, err := net.Dial(cli.proto, cli.addr)
- if err != nil {
- if strings.Contains(err.Error(), "connection refused") {
- return nil, -1, ErrConnectionRefused
- }
- return nil, -1, err
- }
- clientconn := httputil.NewClientConn(dial, nil)
- resp, err := clientconn.Do(req)
- if err != nil {
- clientconn.Close()
- if strings.Contains(err.Error(), "connection refused") {
- return nil, -1, ErrConnectionRefused
- }
- return nil, -1, err
- }
-
- if resp.StatusCode < 200 || resp.StatusCode >= 400 {
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, -1, err
- }
- if len(body) == 0 {
- return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL)
- }
- return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body))
- }
-
- wrapper := utils.NewReadCloserWrapper(resp.Body, func() error {
- if resp != nil && resp.Body != nil {
- resp.Body.Close()
- }
- return clientconn.Close()
- })
- return wrapper, resp.StatusCode, nil
-}
-
-func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
- if (method == "POST" || method == "PUT") && in == nil {
- in = bytes.NewReader([]byte{})
- }
-
- // fixme: refactor client to support redirect
- re := regexp.MustCompile("/+")
- path = re.ReplaceAllString(path, "/")
-
- req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), in)
- if err != nil {
- return err
- }
- req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
- req.Host = cli.addr
- if method == "POST" {
- req.Header.Set("Content-Type", "plain/text")
- }
-
- if headers != nil {
- for k, v := range headers {
- req.Header[k] = v
- }
- }
-
- dial, err := net.Dial(cli.proto, cli.addr)
- if err != nil {
- if strings.Contains(err.Error(), "connection refused") {
- return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
- }
- return err
- }
- clientconn := httputil.NewClientConn(dial, nil)
- resp, err := clientconn.Do(req)
- defer clientconn.Close()
- if err != nil {
- if strings.Contains(err.Error(), "connection refused") {
- return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
- }
- return err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode < 200 || resp.StatusCode >= 400 {
- body, err := ioutil.ReadAll(resp.Body)
+ var (
+ input io.Reader = cli.in
+ err error
+ )
+ if *infile != "" {
+ input, err = os.Open(*infile)
if err != nil {
return err
}
- if len(body) == 0 {
- return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
- }
- return fmt.Errorf("Error: %s", bytes.TrimSpace(body))
- }
-
- if MatchesContentType(resp.Header.Get("Content-Type"), "application/json") {
- return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal)
}
- if _, err := io.Copy(out, resp.Body); err != nil {
- return err
- }
- return nil
-}
-
-func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error {
- defer func() {
- if started != nil {
- close(started)
- }
- }()
- // fixme: refactor client to support redirect
- re := regexp.MustCompile("/+")
- path = re.ReplaceAllString(path, "/")
-
- req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", APIVERSION, path), nil)
- if err != nil {
+ if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil {
return err
}
- req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
- req.Header.Set("Content-Type", "plain/text")
- req.Host = cli.addr
-
- dial, err := net.Dial(cli.proto, cli.addr)
- if err != nil {
- if strings.Contains(err.Error(), "connection refused") {
- return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
- }
- return err
- }
- clientconn := httputil.NewClientConn(dial, nil)
- defer clientconn.Close()
-
- // Server hijacks the connection, error 'connection closed' expected
- clientconn.Do(req)
-
- rwc, br := clientconn.Hijack()
- defer rwc.Close()
-
- if started != nil {
- started <- rwc
- }
-
- var receiveStdout chan error
-
- var oldState *term.State
-
- if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
- oldState, err = term.SetRawTerminal(cli.terminalFd)
- if err != nil {
- return err
- }
- defer term.RestoreTerminal(cli.terminalFd, oldState)
- }
-
- if stdout != nil || stderr != nil {
- receiveStdout = utils.Go(func() (err error) {
- defer func() {
- if in != nil {
- if setRawTerminal && cli.isTerminal {
- term.RestoreTerminal(cli.terminalFd, oldState)
- }
- in.Close()
- }
- }()
-
- // When TTY is ON, use regular copy
- if setRawTerminal {
- _, err = io.Copy(stdout, br)
- } else {
- _, err = utils.StdCopy(stdout, stderr, br)
- }
- utils.Debugf("[hijack] End of stdout")
- return err
- })
- }
-
- sendStdin := utils.Go(func() error {
- if in != nil {
- io.Copy(rwc, in)
- utils.Debugf("[hijack] End of stdin")
- }
- if tcpc, ok := rwc.(*net.TCPConn); ok {
- if err := tcpc.CloseWrite(); err != nil {
- utils.Errorf("Couldn't send EOF: %s\n", err)
- }
- } else if unixc, ok := rwc.(*net.UnixConn); ok {
- if err := unixc.CloseWrite(); err != nil {
- utils.Errorf("Couldn't send EOF: %s\n", err)
- }
- }
- // Discard errors due to pipe interruption
- return nil
- })
-
- if stdout != nil || stderr != nil {
- if err := <-receiveStdout; err != nil {
- utils.Errorf("Error receiveStdout: %s", err)
- return err
- }
- }
-
- if !cli.isTerminal {
- if err := <-sendStdin; err != nil {
- utils.Errorf("Error sendStdin: %s", err)
- return err
- }
- }
return nil
-
-}
-
-func (cli *DockerCli) getTtySize() (int, int) {
- if !cli.isTerminal {
- return 0, 0
- }
- ws, err := term.GetWinsize(cli.terminalFd)
- if err != nil {
- utils.Errorf("Error getting size: %s", err)
- if ws == nil {
- return 0, 0
- }
- }
- return int(ws.Height), int(ws.Width)
-}
-
-func (cli *DockerCli) resizeTty(id string) {
- height, width := cli.getTtySize()
- if height == 0 && width == 0 {
- return
- }
- v := url.Values{}
- v.Set("h", strconv.Itoa(height))
- v.Set("w", strconv.Itoa(width))
- if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
- utils.Errorf("Error resize: %s", err)
- }
-}
-
-func (cli *DockerCli) monitorTtySize(id string) error {
- cli.resizeTty(id)
-
- sigchan := make(chan os.Signal, 1)
- signal.Notify(sigchan, syscall.SIGWINCH)
- go func() {
- for _ = range sigchan {
- cli.resizeTty(id)
- }
- }()
- return nil
-}
-
-func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet {
- flags := flag.NewFlagSet(name, flag.ContinueOnError)
- flags.Usage = func() {
- fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description)
- flags.PrintDefaults()
- os.Exit(2)
- }
- return flags
-}
-
-func (cli *DockerCli) LoadConfigFile() (err error) {
- cli.configFile, err = auth.LoadConfig(os.Getenv("HOME"))
- if err != nil {
- fmt.Fprintf(cli.err, "WARNING: %s\n", err)
- }
- return err
-}
-
-func waitForExit(cli *DockerCli, containerId string) (int, error) {
- stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false)
- if err != nil {
- return -1, err
- }
-
- var out engine.Env
- if err := out.Decode(stream); err != nil {
- return -1, err
- }
- return out.GetInt("StatusCode"), nil
-}
-
-// getExitCode perform an inspect on the container. It returns
-// the running state and the exit code.
-func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
- body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false))
- if err != nil {
- // If we can't connect, then the daemon probably died.
- if err != ErrConnectionRefused {
- return false, -1, err
- }
- return false, -1, nil
- }
- c := &Container{}
- if err := json.Unmarshal(body, c); err != nil {
- return false, -1, err
- }
- return c.State.Running, c.State.ExitCode, nil
-}
-
-func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) {
- if stream != nil {
- defer stream.Close()
- }
- if err != nil {
- return nil, statusCode, err
- }
- body, err := ioutil.ReadAll(stream)
- if err != nil {
- return nil, -1, err
- }
- return body, statusCode, nil
-}
-
-func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
- var (
- isTerminal = false
- terminalFd uintptr
- )
-
- if in != nil {
- if file, ok := in.(*os.File); ok {
- terminalFd = file.Fd()
- isTerminal = term.IsTerminal(terminalFd)
- }
- }
-
- if err == nil {
- err = out
- }
- return &DockerCli{
- proto: proto,
- addr: addr,
- in: in,
- out: out,
- err: err,
- isTerminal: isTerminal,
- terminalFd: terminalFd,
- }
-}
-
-type DockerCli struct {
- proto string
- addr string
- configFile *auth.ConfigFile
- in io.ReadCloser
- out io.Writer
- err io.Writer
- isTerminal bool
- terminalFd uintptr
}
diff --git a/api/client/utils.go b/api/client/utils.go
new file mode 100644
index 0000000000..4ef09ba783
--- /dev/null
+++ b/api/client/utils.go
@@ -0,0 +1,390 @@
+package client
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "os"
+ gosignal "os/signal"
+ "regexp"
+ goruntime "runtime"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/dotcloud/docker/api"
+ "github.com/dotcloud/docker/dockerversion"
+ "github.com/dotcloud/docker/engine"
+ "github.com/dotcloud/docker/pkg/term"
+ "github.com/dotcloud/docker/registry"
+ "github.com/dotcloud/docker/utils"
+)
+
+var (
+ ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
+)
+
+func (cli *DockerCli) dial() (net.Conn, error) {
+ if cli.tlsConfig != nil && cli.proto != "unix" {
+ return tls.Dial(cli.proto, cli.addr, cli.tlsConfig)
+ }
+ return net.Dial(cli.proto, cli.addr)
+}
+
+func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) {
+ params := bytes.NewBuffer(nil)
+ if data != nil {
+ if env, ok := data.(engine.Env); ok {
+ if err := env.Encode(params); err != nil {
+ return nil, -1, err
+ }
+ } else {
+ buf, err := json.Marshal(data)
+ if err != nil {
+ return nil, -1, err
+ }
+ if _, err := params.Write(buf); err != nil {
+ return nil, -1, err
+ }
+ }
+ }
+ // fixme: refactor client to support redirect
+ re := regexp.MustCompile("/+")
+ path = re.ReplaceAllString(path, "/")
+
+ req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params)
+ if err != nil {
+ return nil, -1, err
+ }
+ if passAuthInfo {
+ cli.LoadConfigFile()
+ // Resolve the Auth config relevant for this server
+ authConfig := cli.configFile.ResolveAuthConfig(registry.IndexServerAddress())
+ getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) {
+ buf, err := json.Marshal(authConfig)
+ if err != nil {
+ return nil, err
+ }
+ registryAuthHeader := []string{
+ base64.URLEncoding.EncodeToString(buf),
+ }
+ return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil
+ }
+ if headers, err := getHeaders(authConfig); err == nil && headers != nil {
+ for k, v := range headers {
+ req.Header[k] = v
+ }
+ }
+ }
+ req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
+ req.Host = cli.addr
+ if data != nil {
+ req.Header.Set("Content-Type", "application/json")
+ } else if method == "POST" {
+ req.Header.Set("Content-Type", "plain/text")
+ }
+ dial, err := cli.dial()
+ if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return nil, -1, ErrConnectionRefused
+ }
+ return nil, -1, err
+ }
+ clientconn := httputil.NewClientConn(dial, nil)
+ resp, err := clientconn.Do(req)
+ if err != nil {
+ clientconn.Close()
+ if strings.Contains(err.Error(), "connection refused") {
+ return nil, -1, ErrConnectionRefused
+ }
+ return nil, -1, err
+ }
+
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, -1, err
+ }
+ if len(body) == 0 {
+ return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL)
+ }
+ return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body))
+ }
+
+ wrapper := utils.NewReadCloserWrapper(resp.Body, func() error {
+ if resp != nil && resp.Body != nil {
+ resp.Body.Close()
+ }
+ return clientconn.Close()
+ })
+ return wrapper, resp.StatusCode, nil
+}
+
+func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
+ if (method == "POST" || method == "PUT") && in == nil {
+ in = bytes.NewReader([]byte{})
+ }
+
+ // fixme: refactor client to support redirect
+ re := regexp.MustCompile("/+")
+ path = re.ReplaceAllString(path, "/")
+
+ req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
+ req.Host = cli.addr
+ if method == "POST" {
+ req.Header.Set("Content-Type", "plain/text")
+ }
+
+ if headers != nil {
+ for k, v := range headers {
+ req.Header[k] = v
+ }
+ }
+
+ dial, err := cli.dial()
+ if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
+ }
+ return err
+ }
+ clientconn := httputil.NewClientConn(dial, nil)
+ resp, err := clientconn.Do(req)
+ defer clientconn.Close()
+ if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
+ }
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+ if len(body) == 0 {
+ return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
+ }
+ return fmt.Errorf("Error: %s", bytes.TrimSpace(body))
+ }
+
+ if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") {
+ return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal)
+ }
+ if _, err := io.Copy(out, resp.Body); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error {
+ defer func() {
+ if started != nil {
+ close(started)
+ }
+ }()
+ // fixme: refactor client to support redirect
+ re := regexp.MustCompile("/+")
+ path = re.ReplaceAllString(path, "/")
+
+ req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), nil)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION)
+ req.Header.Set("Content-Type", "plain/text")
+ req.Host = cli.addr
+
+ dial, err := cli.dial()
+ if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?")
+ }
+ return err
+ }
+ clientconn := httputil.NewClientConn(dial, nil)
+ defer clientconn.Close()
+
+ // Server hijacks the connection, error 'connection closed' expected
+ clientconn.Do(req)
+
+ rwc, br := clientconn.Hijack()
+ defer rwc.Close()
+
+ if started != nil {
+ started <- rwc
+ }
+
+ var receiveStdout chan error
+
+ var oldState *term.State
+
+ if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
+ oldState, err = term.SetRawTerminal(cli.terminalFd)
+ if err != nil {
+ return err
+ }
+ defer term.RestoreTerminal(cli.terminalFd, oldState)
+ }
+
+ if stdout != nil || stderr != nil {
+ receiveStdout = utils.Go(func() (err error) {
+ defer func() {
+ if in != nil {
+ if setRawTerminal && cli.isTerminal {
+ term.RestoreTerminal(cli.terminalFd, oldState)
+ }
+ // For some reason this Close call blocks on darwin..
+ // As the client exists right after, simply discard the close
+ // until we find a better solution.
+ if goruntime.GOOS != "darwin" {
+ in.Close()
+ }
+ }
+ }()
+
+ // When TTY is ON, use regular copy
+ if setRawTerminal {
+ _, err = io.Copy(stdout, br)
+ } else {
+ _, err = utils.StdCopy(stdout, stderr, br)
+ }
+ utils.Debugf("[hijack] End of stdout")
+ return err
+ })
+ }
+
+ sendStdin := utils.Go(func() error {
+ if in != nil {
+ io.Copy(rwc, in)
+ utils.Debugf("[hijack] End of stdin")
+ }
+ if tcpc, ok := rwc.(*net.TCPConn); ok {
+ if err := tcpc.CloseWrite(); err != nil {
+ utils.Debugf("Couldn't send EOF: %s\n", err)
+ }
+ } else if unixc, ok := rwc.(*net.UnixConn); ok {
+ if err := unixc.CloseWrite(); err != nil {
+ utils.Debugf("Couldn't send EOF: %s\n", err)
+ }
+ }
+ // Discard errors due to pipe interruption
+ return nil
+ })
+
+ if stdout != nil || stderr != nil {
+ if err := <-receiveStdout; err != nil {
+ utils.Debugf("Error receiveStdout: %s", err)
+ return err
+ }
+ }
+
+ if !cli.isTerminal {
+ if err := <-sendStdin; err != nil {
+ utils.Debugf("Error sendStdin: %s", err)
+ return err
+ }
+ }
+ return nil
+
+}
+
+func (cli *DockerCli) resizeTty(id string) {
+ height, width := cli.getTtySize()
+ if height == 0 && width == 0 {
+ return
+ }
+ v := url.Values{}
+ v.Set("h", strconv.Itoa(height))
+ v.Set("w", strconv.Itoa(width))
+ if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
+ utils.Debugf("Error resize: %s", err)
+ }
+}
+
+func waitForExit(cli *DockerCli, containerId string) (int, error) {
+ stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false)
+ if err != nil {
+ return -1, err
+ }
+
+ var out engine.Env
+ if err := out.Decode(stream); err != nil {
+ return -1, err
+ }
+ return out.GetInt("StatusCode"), nil
+}
+
+// getExitCode perform an inspect on the container. It returns
+// the running state and the exit code.
+func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
+ body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false))
+ if err != nil {
+ // If we can't connect, then the daemon probably died.
+ if err != ErrConnectionRefused {
+ return false, -1, err
+ }
+ return false, -1, nil
+ }
+ c := &api.Container{}
+ if err := json.Unmarshal(body, c); err != nil {
+ return false, -1, err
+ }
+ return c.State.Running, c.State.ExitCode, nil
+}
+
+func (cli *DockerCli) monitorTtySize(id string) error {
+ cli.resizeTty(id)
+
+ sigchan := make(chan os.Signal, 1)
+ gosignal.Notify(sigchan, syscall.SIGWINCH)
+ go func() {
+ for _ = range sigchan {
+ cli.resizeTty(id)
+ }
+ }()
+ return nil
+}
+
+func (cli *DockerCli) getTtySize() (int, int) {
+ if !cli.isTerminal {
+ return 0, 0
+ }
+ ws, err := term.GetWinsize(cli.terminalFd)
+ if err != nil {
+ utils.Debugf("Error getting size: %s", err)
+ if ws == nil {
+ return 0, 0
+ }
+ }
+ return int(ws.Height), int(ws.Width)
+}
+
+func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) {
+ if stream != nil {
+ defer stream.Close()
+ }
+ if err != nil {
+ return nil, statusCode, err
+ }
+ body, err := ioutil.ReadAll(stream)
+ if err != nil {
+ return nil, -1, err
+ }
+ return body, statusCode, nil
+}
diff --git a/api/common.go b/api/common.go
index 10e7ddb4ae..44bd901379 100644
--- a/api/common.go
+++ b/api/common.go
@@ -3,15 +3,16 @@ package api
import (
"fmt"
"github.com/dotcloud/docker/engine"
+ "github.com/dotcloud/docker/pkg/version"
"github.com/dotcloud/docker/utils"
"mime"
"strings"
)
const (
- APIVERSION = "1.10"
- DEFAULTHTTPHOST = "127.0.0.1"
- DEFAULTUNIXSOCKET = "/var/run/docker.sock"
+ APIVERSION version.Version = "1.10"
+ DEFAULTHTTPHOST = "127.0.0.1"
+ DEFAULTUNIXSOCKET = "/var/run/docker.sock"
)
func ValidateHost(val string) (string, error) {
@@ -23,8 +24,10 @@ func ValidateHost(val string) (string, error) {
}
//TODO remove, used on < 1.5 in getContainersJSON
-func displayablePorts(ports *engine.Table) string {
+func DisplayablePorts(ports *engine.Table) string {
result := []string{}
+ ports.SetKey("PublicPort")
+ ports.Sort()
for _, port := range ports.Data {
if port.Get("IP") == "" {
result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type")))
diff --git a/api/server.go b/api/server/server.go
index 6fafe60f9f..c6eafaf265 100644
--- a/api/server.go
+++ b/api/server/server.go
@@ -1,21 +1,15 @@
-package api
+package server
import (
"bufio"
"bytes"
"code.google.com/p/go.net/websocket"
+ "crypto/tls"
+ "crypto/x509"
"encoding/base64"
"encoding/json"
"expvar"
"fmt"
- "github.com/dotcloud/docker/auth"
- "github.com/dotcloud/docker/engine"
- "github.com/dotcloud/docker/pkg/listenbuffer"
- "github.com/dotcloud/docker/pkg/systemd"
- "github.com/dotcloud/docker/pkg/user"
- "github.com/dotcloud/docker/pkg/version"
- "github.com/dotcloud/docker/utils"
- "github.com/gorilla/mux"
"io"
"io/ioutil"
"log"
@@ -26,7 +20,16 @@ import (
"strconv"
"strings"
"syscall"
- "time"
+
+ "github.com/dotcloud/docker/api"
+ "github.com/dotcloud/docker/engine"
+ "github.com/dotcloud/docker/pkg/listenbuffer"
+ "github.com/dotcloud/docker/pkg/systemd"
+ "github.com/dotcloud/docker/pkg/user"
+ "github.com/dotcloud/docker/pkg/version"
+ "github.com/dotcloud/docker/registry"
+ "github.com/dotcloud/docker/utils"
+ "github.com/gorilla/mux"
)
var (
@@ -314,7 +317,7 @@ func getContainersJSON(eng *engine.Engine, version version.Version, w http.Respo
for _, out := range outs.Data {
ports := engine.NewTable("", 0)
ports.ReadListFrom([]byte(out.Get("Ports")))
- out.Set("Ports", displayablePorts(ports))
+ out.Set("Ports", api.DisplayablePorts(ports))
}
w.Header().Set("Content-Type", "application/json")
if _, err = outs.WriteListTo(w); err != nil {
@@ -381,13 +384,13 @@ func postImagesCreate(eng *engine.Engine, version version.Version, w http.Respon
job *engine.Job
)
authEncoded := r.Header.Get("X-Registry-Auth")
- authConfig := &auth.AuthConfig{}
+ authConfig := &registry.AuthConfig{}
if authEncoded != "" {
authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
// for a pull it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
- authConfig = &auth.AuthConfig{}
+ authConfig = &registry.AuthConfig{}
}
}
if image != "" { //pull
@@ -429,7 +432,7 @@ func getImagesSearch(eng *engine.Engine, version version.Version, w http.Respons
}
var (
authEncoded = r.Header.Get("X-Registry-Auth")
- authConfig = &auth.AuthConfig{}
+ authConfig = &registry.AuthConfig{}
metaHeaders = map[string][]string{}
)
@@ -438,7 +441,7 @@ func getImagesSearch(eng *engine.Engine, version version.Version, w http.Respons
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
// for a search it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
- authConfig = &auth.AuthConfig{}
+ authConfig = &registry.AuthConfig{}
}
}
for k, v := range r.Header {
@@ -455,6 +458,7 @@ func getImagesSearch(eng *engine.Engine, version version.Version, w http.Respons
return job.Run()
}
+// FIXME: 'insert' is deprecated as of 0.10, and should be removed in a future version.
func postImagesInsert(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
if err := parseForm(r); err != nil {
return err
@@ -494,7 +498,7 @@ func postImagesPush(eng *engine.Engine, version version.Version, w http.Response
if err := parseForm(r); err != nil {
return err
}
- authConfig := &auth.AuthConfig{}
+ authConfig := &registry.AuthConfig{}
authEncoded := r.Header.Get("X-Registry-Auth")
if authEncoded != "" {
@@ -502,7 +506,7 @@ func postImagesPush(eng *engine.Engine, version version.Version, w http.Response
authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
// to increase compatibility to existing api it is defaulting to be empty
- authConfig = &auth.AuthConfig{}
+ authConfig = &registry.AuthConfig{}
}
} else {
// the old format is supported for compatibility if there was no authConfig header
@@ -514,6 +518,7 @@ func postImagesPush(eng *engine.Engine, version version.Version, w http.Response
job := eng.Job("push", vars["name"])
job.SetenvJson("metaHeaders", metaHeaders)
job.SetenvJson("authConfig", authConfig)
+ job.Setenv("tag", r.Form.Get("tag"))
if version.GreaterThan("1.0") {
job.SetenvBool("json", true)
streamJSON(job, w, true)
@@ -624,6 +629,7 @@ func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWr
var job = eng.Job("image_delete", vars["name"])
streamJSON(job, w, false)
job.Setenv("force", r.Form.Get("force"))
+ job.Setenv("noprune", r.Form.Get("noprune"))
return job.Run()
}
@@ -636,7 +642,7 @@ func postContainersStart(eng *engine.Engine, version version.Version, w http.Res
job := eng.Job("start", name)
// allow a nil body for backwards compatibility
if r.Body != nil {
- if MatchesContentType(r.Header.Get("Content-Type"), "application/json") {
+ if api.MatchesContentType(r.Header.Get("Content-Type"), "application/json") {
if err := job.DecodeEnv(r.Body); err != nil {
return err
}
@@ -823,9 +829,9 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
}
var (
authEncoded = r.Header.Get("X-Registry-Auth")
- authConfig = &auth.AuthConfig{}
+ authConfig = &registry.AuthConfig{}
configFileEncoded = r.Header.Get("X-Registry-Config")
- configFile = &auth.ConfigFile{}
+ configFile = &registry.ConfigFile{}
job = eng.Job("build")
)
@@ -838,7 +844,7 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
if err := json.NewDecoder(authJson).Decode(authConfig); err != nil {
// for a pull it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
- authConfig = &auth.AuthConfig{}
+ authConfig = &registry.AuthConfig{}
}
}
@@ -847,7 +853,7 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite
if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil {
// for a pull it is not an error if no auth was given
// to increase compatibility with the existing api it is defaulting to be empty
- configFile = &auth.ConfigFile{}
+ configFile = &registry.ConfigFile{}
}
}
@@ -883,7 +889,7 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp
var copyData engine.Env
- if contentType := r.Header.Get("Content-Type"); contentType == "application/json" {
+ if contentType := r.Header.Get("Content-Type"); api.MatchesContentType(contentType, "application/json") {
if err := copyData.Decode(r.Body); err != nil {
return err
}
@@ -894,6 +900,9 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp
if copyData.Get("Resource") == "" {
return fmt.Errorf("Path cannot be empty")
}
+
+ origResource := copyData.Get("Resource")
+
if copyData.Get("Resource")[0] == '/' {
copyData.Set("Resource", copyData.Get("Resource")[1:])
}
@@ -904,6 +913,8 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp
utils.Errorf("%s", err.Error())
if strings.Contains(err.Error(), "No such container") {
w.WriteHeader(http.StatusNotFound)
+ } else if strings.Contains(err.Error(), "no such file or directory") {
+ return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"])
}
}
return nil
@@ -930,20 +941,20 @@ func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, local
if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") {
userAgent := strings.Split(r.Header.Get("User-Agent"), "/")
- if len(userAgent) == 2 && !dockerVersion.Equal(userAgent[1]) {
+ if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) {
utils.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion)
}
}
version := version.Version(mux.Vars(r)["version"])
if version == "" {
- version = APIVERSION
+ version = api.APIVERSION
}
if enableCors {
writeCorsHeaders(w, r)
}
- if version.GreaterThan(APIVERSION) {
- http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, APIVERSION).Error(), http.StatusNotFound)
+ if version.GreaterThan(api.APIVERSION) {
+ http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound)
return
}
@@ -1130,9 +1141,8 @@ func changeGroup(addr string, nameOrGid string) error {
// ListenAndServe sets up the required http.Server and gets it listening for
// each addr passed in and does protocol specific checking.
-func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors bool, dockerVersion string, socketGroup string) error {
- r, err := createRouter(eng, logging, enableCors, dockerVersion)
-
+func ListenAndServe(proto, addr string, job *engine.Job) error {
+ r, err := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"))
if err != nil {
return err
}
@@ -1147,22 +1157,48 @@ func ListenAndServe(proto, addr string, eng *engine.Engine, logging, enableCors
}
}
- l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock, 15*time.Minute)
+ l, err := listenbuffer.NewListenBuffer(proto, addr, activationLock)
if err != nil {
return err
}
+ if proto != "unix" && (job.GetenvBool("Tls") || job.GetenvBool("TlsVerify")) {
+ tlsCert := job.Getenv("TlsCert")
+ tlsKey := job.Getenv("TlsKey")
+ cert, err := tls.LoadX509KeyPair(tlsCert, tlsKey)
+ if err != nil {
+ return fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?",
+ tlsCert, tlsKey, err)
+ }
+ tlsConfig := &tls.Config{
+ NextProtos: []string{"http/1.1"},
+ Certificates: []tls.Certificate{cert},
+ }
+ if job.GetenvBool("TlsVerify") {
+ certPool := x509.NewCertPool()
+ file, err := ioutil.ReadFile(job.Getenv("TlsCa"))
+ if err != nil {
+ return fmt.Errorf("Couldn't read CA certificate: %s", err)
+ }
+ certPool.AppendCertsFromPEM(file)
+
+ tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+ tlsConfig.ClientCAs = certPool
+ }
+ l = tls.NewListener(l, tlsConfig)
+ }
+
// Basic error and sanity checking
switch proto {
case "tcp":
- if !strings.HasPrefix(addr, "127.0.0.1") {
+ if !strings.HasPrefix(addr, "127.0.0.1") && !job.GetenvBool("TlsVerify") {
log.Println("/!\\ DON'T BIND ON ANOTHER IP ADDRESS THAN 127.0.0.1 IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\")
}
case "unix":
if err := os.Chmod(addr, 0660); err != nil {
return err
}
-
+ socketGroup := job.Getenv("SocketGroup")
if socketGroup != "" {
if err := changeGroup(addr, socketGroup); err != nil {
if socketGroup == "docker" {
@@ -1198,7 +1234,7 @@ func ServeApi(job *engine.Job) engine.Status {
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
go func() {
log.Printf("Listening for HTTP on %s (%s)\n", protoAddrParts[0], protoAddrParts[1])
- chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("Version"), job.Getenv("SocketGroup"))
+ chErrors <- ListenAndServe(protoAddrParts[0], protoAddrParts[1], job)
}()
}
diff --git a/api/server/server_unit_test.go b/api/server/server_unit_test.go
new file mode 100644
index 0000000000..3dbba640ff
--- /dev/null
+++ b/api/server/server_unit_test.go
@@ -0,0 +1,180 @@
+package server
+
+import (
+ "fmt"
+ "github.com/dotcloud/docker/api"
+ "github.com/dotcloud/docker/engine"
+ "github.com/dotcloud/docker/utils"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+)
+
+func TestGetBoolParam(t *testing.T) {
+ if ret, err := getBoolParam("true"); err != nil || !ret {
+ t.Fatalf("true -> true, nil | got %t %s", ret, err)
+ }
+ if ret, err := getBoolParam("True"); err != nil || !ret {
+ t.Fatalf("True -> true, nil | got %t %s", ret, err)
+ }
+ if ret, err := getBoolParam("1"); err != nil || !ret {
+ t.Fatalf("1 -> true, nil | got %t %s", ret, err)
+ }
+ if ret, err := getBoolParam(""); err != nil || ret {
+ t.Fatalf("\"\" -> false, nil | got %t %s", ret, err)
+ }
+ if ret, err := getBoolParam("false"); err != nil || ret {
+ t.Fatalf("false -> false, nil | got %t %s", ret, err)
+ }
+ if ret, err := getBoolParam("0"); err != nil || ret {
+ t.Fatalf("0 -> false, nil | got %t %s", ret, err)
+ }
+ if ret, err := getBoolParam("faux"); err == nil || ret {
+ t.Fatalf("faux -> false, err | got %t %s", ret, err)
+
+ }
+}
+
+func TesthttpError(t *testing.T) {
+ r := httptest.NewRecorder()
+
+ httpError(r, fmt.Errorf("No such method"))
+ if r.Code != http.StatusNotFound {
+ t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code)
+ }
+
+ httpError(r, fmt.Errorf("This accound hasn't been activated"))
+ if r.Code != http.StatusForbidden {
+ t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code)
+ }
+
+ httpError(r, fmt.Errorf("Some error"))
+ if r.Code != http.StatusInternalServerError {
+ t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code)
+ }
+}
+
+func TestGetVersion(t *testing.T) {
+ tmp, err := utils.TestDirectory("")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmp)
+ eng, err := engine.New(tmp)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var called bool
+ eng.Register("version", func(job *engine.Job) engine.Status {
+ called = true
+ v := &engine.Env{}
+ v.SetJson("Version", "42.1")
+ v.Set("ApiVersion", "1.1.1.1.1")
+ v.Set("GoVersion", "2.42")
+ v.Set("Os", "Linux")
+ v.Set("Arch", "x86_64")
+ if _, err := v.WriteTo(job.Stdout); err != nil {
+ return job.Error(err)
+ }
+ return engine.StatusOK
+ })
+
+ r := httptest.NewRecorder()
+ req, err := http.NewRequest("GET", "/version", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // FIXME getting the version should require an actual running Server
+ if err := ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ t.Fatal(err)
+ }
+ if !called {
+ t.Fatalf("handler was not called")
+ }
+ out := engine.NewOutput()
+ v, err := out.AddEnv()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := io.Copy(out, r.Body); err != nil {
+ t.Fatal(err)
+ }
+ out.Close()
+ expected := "42.1"
+ if result := v.Get("Version"); result != expected {
+ t.Errorf("Expected version %s, %s found", expected, result)
+ }
+ expected = "application/json"
+ if result := r.HeaderMap.Get("Content-Type"); result != expected {
+ t.Errorf("Expected Content-Type %s, %s found", expected, result)
+ }
+}
+
+func TestGetInfo(t *testing.T) {
+ tmp, err := utils.TestDirectory("")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmp)
+ eng, err := engine.New(tmp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var called bool
+ eng.Register("info", func(job *engine.Job) engine.Status {
+ called = true
+ v := &engine.Env{}
+ v.SetInt("Containers", 1)
+ v.SetInt("Images", 42000)
+ if _, err := v.WriteTo(job.Stdout); err != nil {
+ return job.Error(err)
+ }
+ return engine.StatusOK
+ })
+
+ r := httptest.NewRecorder()
+ req, err := http.NewRequest("GET", "/info", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // FIXME getting the version should require an actual running Server
+ if err := ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ t.Fatal(err)
+ }
+ if !called {
+ t.Fatalf("handler was not called")
+ }
+
+ out := engine.NewOutput()
+ i, err := out.AddEnv()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := io.Copy(out, r.Body); err != nil {
+ t.Fatal(err)
+ }
+ out.Close()
+ {
+ expected := 42000
+ result := i.GetInt("Images")
+ if expected != result {
+ t.Fatalf("%#v\n", result)
+ }
+ }
+ {
+ expected := 1
+ result := i.GetInt("Containers")
+ if expected != result {
+ t.Fatalf("%#v\n", result)
+ }
+ }
+ {
+ expected := "application/json"
+ if result := r.HeaderMap.Get("Content-Type"); result != expected {
+ t.Fatalf("%#v\n", result)
+ }
+ }
+}
diff --git a/archive/archive.go b/archive/archive.go
index 5d6c020438..2fac18e99f 100644
--- a/archive/archive.go
+++ b/archive/archive.go
@@ -404,7 +404,7 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- err = os.MkdirAll(parentPath, 600)
+ err = os.MkdirAll(parentPath, 0777)
if err != nil {
return err
}
@@ -617,6 +617,9 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
if _, err := io.Copy(f, src); err != nil {
return nil, err
}
+ if err = f.Sync(); err != nil {
+ return nil, err
+ }
if _, err := f.Seek(0, 0); err != nil {
return nil, err
}
diff --git a/archive/changes_test.go b/archive/changes_test.go
index 1302b76f47..34c0f0da64 100644
--- a/archive/changes_test.go
+++ b/archive/changes_test.go
@@ -138,7 +138,7 @@ func mutateSampleDir(t *testing.T, root string) {
}
// Rewrite a file
- if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileN\n"), 0777); err != nil {
+ if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil {
t.Fatal(err)
}
@@ -146,12 +146,12 @@ func mutateSampleDir(t *testing.T, root string) {
if err := os.RemoveAll(path.Join(root, "file3")); err != nil {
t.Fatal(err)
}
- if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileM\n"), 0404); err != nil {
+ if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil {
t.Fatal(err)
}
// Touch file
- if err := os.Chtimes(path.Join(root, "file4"), time.Now(), time.Now()); err != nil {
+ if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
t.Fatal(err)
}
@@ -195,7 +195,7 @@ func mutateSampleDir(t *testing.T, root string) {
}
// Touch dir
- if err := os.Chtimes(path.Join(root, "dir3"), time.Now(), time.Now()); err != nil {
+ if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
t.Fatal(err)
}
}
diff --git a/auth/MAINTAINERS b/auth/MAINTAINERS
deleted file mode 100644
index bf3984f5f9..0000000000
--- a/auth/MAINTAINERS
+++ /dev/null
@@ -1,3 +0,0 @@
-Sam Alba <sam@dotcloud.com> (@samalba)
-Joffrey Fuhrer <joffrey@dotcloud.com> (@shin-)
-Ken Cochrane <ken@dotcloud.com> (@kencochrane)
diff --git a/builtins/builtins.go b/builtins/builtins.go
index 5b146cd20f..109bc5b913 100644
--- a/builtins/builtins.go
+++ b/builtins/builtins.go
@@ -1,11 +1,10 @@
package builtins
import (
+ api "github.com/dotcloud/docker/api/server"
"github.com/dotcloud/docker/engine"
-
- "github.com/dotcloud/docker"
- "github.com/dotcloud/docker/api"
- "github.com/dotcloud/docker/networkdriver/lxc"
+ "github.com/dotcloud/docker/runtime/networkdriver/bridge"
+ "github.com/dotcloud/docker/server"
)
func Register(eng *engine.Engine) {
@@ -34,7 +33,6 @@ func remote(eng *engine.Engine) {
// These components should be broken off into plugins of their own.
//
func daemon(eng *engine.Engine) {
- eng.Register("initserver", docker.InitServer)
- eng.Register("init_networkdriver", lxc.InitDriver)
- eng.Register("version", docker.GetVersion)
+ eng.Register("initserver", server.InitServer)
+ eng.Register("init_networkdriver", bridge.InitDriver)
}
diff --git a/commands_unit_test.go b/commands_unit_test.go
deleted file mode 100644
index 60d8d60398..0000000000
--- a/commands_unit_test.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package docker
-
-import (
- "github.com/dotcloud/docker/runconfig"
- "strings"
- "testing"
-)
-
-func parse(t *testing.T, args string) (*runconfig.Config, *runconfig.HostConfig, error) {
- config, hostConfig, _, err := runconfig.Parse(strings.Split(args+" ubuntu bash", " "), nil)
- return config, hostConfig, err
-}
-
-func mustParse(t *testing.T, args string) (*runconfig.Config, *runconfig.HostConfig) {
- config, hostConfig, err := parse(t, args)
- if err != nil {
- t.Fatal(err)
- }
- return config, hostConfig
-}
-
-func TestParseRunLinks(t *testing.T) {
- if _, hostConfig := mustParse(t, "-link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
- t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
- }
- if _, hostConfig := mustParse(t, "-link a:b -link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
- t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links)
- }
- if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
- t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
- }
-
- if _, _, err := parse(t, "-link a"); err == nil {
- t.Fatalf("Error parsing links. `-link a` should be an error but is not")
- }
- if _, _, err := parse(t, "-link"); err == nil {
- t.Fatalf("Error parsing links. `-link` should be an error but is not")
- }
-}
-
-func TestParseRunAttach(t *testing.T) {
- if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr {
- t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
- }
- if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr {
- t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
- }
- if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
- t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
- }
- if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
- t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
- }
-
- if _, _, err := parse(t, "-a"); err == nil {
- t.Fatalf("Error parsing attach flags, `-a` should be an error but is not")
- }
- if _, _, err := parse(t, "-a invalid"); err == nil {
- t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not")
- }
- if _, _, err := parse(t, "-a invalid -a stdout"); err == nil {
- t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not")
- }
- if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil {
- t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not")
- }
- if _, _, err := parse(t, "-a stdin -d"); err == nil {
- t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not")
- }
- if _, _, err := parse(t, "-a stdout -d"); err == nil {
- t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not")
- }
- if _, _, err := parse(t, "-a stderr -d"); err == nil {
- t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not")
- }
- if _, _, err := parse(t, "-d -rm"); err == nil {
- t.Fatalf("Error parsing attach flags, `-d -rm` should be an error but is not")
- }
-}
-
-func TestParseRunVolumes(t *testing.T) {
- if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil {
- t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds)
- } else if _, exists := config.Volumes["/tmp"]; !exists {
- t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
- }
-
- if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil {
- t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds)
- } else if _, exists := config.Volumes["/tmp"]; !exists {
- t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes)
- } else if _, exists := config.Volumes["/var"]; !exists {
- t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes)
- }
-
- if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
- t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
- } else if _, exists := config.Volumes["/containerTmp"]; !exists {
- t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
- }
-
- if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" {
- t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
- } else if _, exists := config.Volumes["/containerTmp"]; !exists {
- t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
- } else if _, exists := config.Volumes["/containerVar"]; !exists {
- t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
- }
-
- if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" {
- t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
- } else if _, exists := config.Volumes["/containerTmp"]; !exists {
- t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
- } else if _, exists := config.Volumes["/containerVar"]; !exists {
- t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
- }
-
- if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
- t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
- } else if _, exists := config.Volumes["/containerTmp"]; !exists {
- t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
- } else if _, exists := config.Volumes["/containerVar"]; !exists {
- t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
- }
-
- if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil {
- t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds)
- } else if len(config.Volumes) != 0 {
- t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes)
- }
-
- if _, _, err := parse(t, "-v /"); err == nil {
- t.Fatalf("Expected error, but got none")
- }
-
- if _, _, err := parse(t, "-v /:/"); err == nil {
- t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't")
- }
- if _, _, err := parse(t, "-v"); err == nil {
- t.Fatalf("Error parsing volume flags, `-v` should fail but didn't")
- }
- if _, _, err := parse(t, "-v /tmp:"); err == nil {
- t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't")
- }
- if _, _, err := parse(t, "-v /tmp:ro"); err == nil {
- t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't")
- }
- if _, _, err := parse(t, "-v /tmp::"); err == nil {
- t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't")
- }
- if _, _, err := parse(t, "-v :"); err == nil {
- t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't")
- }
- if _, _, err := parse(t, "-v ::"); err == nil {
- t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't")
- }
- if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil {
- t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't")
- }
-}
diff --git a/contrib/check-config.sh b/contrib/check-config.sh
new file mode 100755
index 0000000000..53bf708404
--- /dev/null
+++ b/contrib/check-config.sh
@@ -0,0 +1,146 @@
+#!/usr/bin/env bash
+set -e
+
+# bits of this were adapted from lxc-checkconfig
+# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
+
+: ${CONFIG:=/proc/config.gz}
+
+if ! command -v zgrep &> /dev/null; then
+ zgrep() {
+ zcat "$2" | grep "$1"
+ }
+fi
+
+is_set() {
+ zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
+}
+
+# see http://en.wikipedia.org/wiki/ANSI_escape_code#Colors
+declare -A colors=(
+ [black]=30
+ [red]=31
+ [green]=32
+ [yellow]=33
+ [blue]=34
+ [magenta]=35
+ [cyan]=36
+ [white]=37
+)
+color() {
+ color=()
+ if [ "$1" = 'bold' ]; then
+ color+=( '1' )
+ shift
+ fi
+ if [ $# -gt 0 ] && [ "${colors[$1]}" ]; then
+ color+=( "${colors[$1]}" )
+ fi
+ local IFS=';'
+ echo -en '\033['"${color[*]}"m
+}
+wrap_color() {
+ text="$1"
+ shift
+ color "$@"
+ echo -n "$text"
+ color reset
+ echo
+}
+
+wrap_good() {
+ echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
+}
+wrap_bad() {
+ echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
+}
+wrap_warning() {
+ wrap_color >&2 "$*" red
+}
+
+check_flag() {
+ if is_set "$1"; then
+ wrap_good "CONFIG_$1" 'enabled'
+ else
+ wrap_bad "CONFIG_$1" 'missing'
+ fi
+}
+
+check_flags() {
+ for flag in "$@"; do
+ echo "- $(check_flag "$flag")"
+ done
+}
+
+if [ ! -e "$CONFIG" ]; then
+ wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..."
+ for tryConfig in \
+ '/proc/config.gz' \
+ "/boot/config-$(uname -r)" \
+ '/usr/src/linux/.config' \
+ ; do
+ if [ -e "$tryConfig" ]; then
+ CONFIG="$tryConfig"
+ break
+ fi
+ done
+ if [ ! -e "$CONFIG" ]; then
+ wrap_warning "error: cannot find kernel config"
+ wrap_warning " try running this script again, specifying the kernel config:"
+ wrap_warning " CONFIG=/path/to/kernel/.config $0"
+ exit 1
+ fi
+fi
+
+wrap_color "info: reading kernel config from $CONFIG ..." white
+echo
+
+echo 'Generally Necessary:'
+
+echo -n '- '
+cgroupCpuDir="$(awk '/[, ]cpu([, ]|$)/ && $8 == "cgroup" { print $5 }' /proc/$$/mountinfo | head -n1)"
+cgroupDir="$(dirname "$cgroupCpuDir")"
+if [ -d "$cgroupDir/cpu" ]; then
+ echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
+else
+ echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupCpuDir]"
+ echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
+fi
+
+flags=(
+ NAMESPACES {NET,PID,IPC,UTS}_NS
+ DEVPTS_MULTIPLE_INSTANCES
+ CGROUPS CGROUP_DEVICE
+ MACVLAN VETH BRIDGE
+ IP_NF_TARGET_MASQUERADE NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
+ NF_NAT NF_NAT_NEEDED
+)
+check_flags "${flags[@]}"
+echo
+
+echo 'Optional Features:'
+flags=(
+ MEMCG_SWAP
+ RESOURCE_COUNTERS
+)
+check_flags "${flags[@]}"
+
+echo '- Storage Drivers:'
+{
+ echo '- "'$(wrap_color 'aufs' blue)'":'
+ check_flags AUFS_FS | sed 's/^/ /'
+ if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
+ echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
+ fi
+
+ echo '- "'$(wrap_color 'btrfs' blue)'":'
+ check_flags BTRFS_FS | sed 's/^/ /'
+
+ echo '- "'$(wrap_color 'devicemapper' blue)'":'
+ check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS | sed 's/^/ /'
+} | sed 's/^/ /'
+echo
+
+#echo 'Potential Future Features:'
+#check_flags USER_NS
+#echo
diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker
index 1449330986..e6a191d32b 100755
--- a/contrib/completion/bash/docker
+++ b/contrib/completion/bash/docker
@@ -392,11 +392,8 @@ _docker_port()
_docker_ps()
{
case "$prev" in
- --since-id|--before-id)
- COMPREPLY=( $( compgen -W "$( __docker_q ps -a -q )" -- "$cur" ) )
- # TODO replace this with __docker_containers_all
- # see https://github.com/dotcloud/docker/issues/3565
- return
+ --since|--before)
+ __docker_containers_all
;;
-n)
return
@@ -407,7 +404,7 @@ _docker_ps()
case "$cur" in
-*)
- COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since-id --before-id -n" -- "$cur" ) )
+ COMPREPLY=( $( compgen -W "-q --quiet -s --size -a --all --no-trunc -l --latest --since --before -n" -- "$cur" ) )
;;
*)
;;
diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish
index 2629533aac..e3bb72aebe 100644
--- a/contrib/completion/fish/docker.fish
+++ b/contrib/completion/fish/docker.fish
@@ -26,36 +26,38 @@ end
function __fish_print_docker_containers --description 'Print a list of docker containers' -a select
switch $select
case running
- docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
+ docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
case stopped
- docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
+ docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF-1)}' | tr ',' '\n'
case all
- docker ps -a --no-trunc | awk 'NR>1' | awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n'
+ docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF-1)}' | tr ',' '\n'
end
end
function __fish_print_docker_images --description 'Print a list of docker images'
- docker images | awk 'NR>1' | grep -v '<none>' | awk '{print $1":"$2}'
+ docker images | command awk 'NR>1' | command grep -v '<none>' | command awk '{print $1":"$2}'
end
function __fish_print_docker_repositories --description 'Print a list of docker repositories'
- docker images | awk 'NR>1' | grep -v '<none>' | awk '{print $1}' | sort | uniq
+ docker images | command awk 'NR>1' | command grep -v '<none>' | command awk '{print $1}' | command sort | command uniq
end
# common options
complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode'
+complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group"
complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified'
complete -c docker -f -n '__fish_docker_no_subcommand' -l api-enable-cors -d 'Enable CORS headers in the remote API'
complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d "Attach containers to a pre-existing network bridge; use 'none' to disable container networking"
complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b"
complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode'
complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force docker to use specific DNS servers'
+complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the docker runtime to use a specific exec driver'
complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the docker runtime'
complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Enable inter-container communication'
complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports'
complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Disable enabling of net.ipv4.ip_forward'
complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Disable docker's addition of iptables rules"
-complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if not default route is available'
+complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available'
complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file'
complete -c docker -f -n '__fish_docker_no_subcommand' -s r -l restart -d 'Restart previously running containers'
complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the docker runtime to use a specific storage driver'
@@ -71,7 +73,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_pri
# build
complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build a container from a Dockerfile'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image'
-complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress verbose build output'
+complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the verbose output generated by the containers'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build'
complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success'
@@ -100,16 +102,16 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_pri
# history
complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image'
complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output"
-complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'only show numeric IDs'
+complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs'
complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image"
# images
complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images'
-complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'show all images (by default filter out the intermediate images used to build)'
+complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)'
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output"
-complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'only show numeric IDs'
-complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'output graph in tree format'
-complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s v -l viz -d 'output graph in graphviz format'
+complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs'
+complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s t -l tree -d 'Output graph in tree format'
+complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s v -l viz -d 'Output graph in graphviz format'
complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository"
# import
@@ -126,7 +128,7 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from insert' -a '(__fish_pri
complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container'
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.'
complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image"
-complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers running)' -d "Container"
+complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container"
# kill
complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container'
@@ -138,9 +140,9 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image
# login
complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Register or Login to the docker registry server'
-complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'email'
-complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'password'
-complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'username'
+complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s e -l email -d 'Email'
+complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password'
+complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username'
# logs
complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container'
@@ -154,13 +156,13 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print
# ps
complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.'
-complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before-id -d 'Show only container created before Id, include non-running ones.'
+complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output"
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs'
complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display sizes'
-complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since-id -d 'Show only containers created since Id, include non-running ones.'
+complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.'
# pull
complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from the docker registry server'
@@ -180,12 +182,14 @@ complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_pr
# rm
complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers'
+complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force removal of running container'
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container'
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated to the container'
complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container"
# rmi
complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images'
+complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force'
complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image"
# run
diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker
index 8b50bac01b..a379fd40f8 100755
--- a/contrib/completion/zsh/_docker
+++ b/contrib/completion/zsh/_docker
@@ -174,7 +174,7 @@ __docker_subcommand () {
(ps)
_arguments '-a[Show all containers. Only running containers are shown by default]' \
'-h[Show help]' \
- '-before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \
+ '--before-id=-[Show only container created before Id, include non-running one]:containers:__docker_containers' \
'-n=-[Show n last created containers, include non-running one]:n:(1 5 10 25 50)'
;;
(tag)
diff --git a/contrib/desktop-integration/data/Dockerfile b/contrib/desktop-integration/data/Dockerfile
index a9843a52ad..76846af912 100644
--- a/contrib/desktop-integration/data/Dockerfile
+++ b/contrib/desktop-integration/data/Dockerfile
@@ -9,13 +9,13 @@
# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/data/Dockerfile
#
# # Build data image
-# docker build -t data -rm .
+# docker build -t data .
#
# # Create a data container. (eg: iceweasel-data)
-# docker run -name iceweasel-data data true
+# docker run --name iceweasel-data data true
#
# # List data from it
-# docker run -volumes-from iceweasel-data busybox ls -al /data
+# docker run --volumes-from iceweasel-data busybox ls -al /data
docker-version 0.6.5
diff --git a/contrib/desktop-integration/iceweasel/Dockerfile b/contrib/desktop-integration/iceweasel/Dockerfile
index 721cc6d2cf..f9f58c9ca5 100644
--- a/contrib/desktop-integration/iceweasel/Dockerfile
+++ b/contrib/desktop-integration/iceweasel/Dockerfile
@@ -10,16 +10,16 @@
# wget http://raw.github.com/dotcloud/docker/master/contrib/desktop-integration/iceweasel/Dockerfile
#
# # Build iceweasel image
-# docker build -t iceweasel -rm .
+# docker build -t iceweasel .
#
# # Run stateful data-on-host iceweasel. For ephemeral, remove -v /data/iceweasel:/data
# docker run -v /data/iceweasel:/data -v /tmp/.X11-unix:/tmp/.X11-unix \
-# -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
+# -v /dev/snd:/dev/snd --lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
# -e DISPLAY=unix$DISPLAY iceweasel
#
# # To run stateful dockerized data containers
-# docker run -volumes-from iceweasel-data -v /tmp/.X11-unix:/tmp/.X11-unix \
-# -v /dev/snd:/dev/snd -lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
+# docker run --volumes-from iceweasel-data -v /tmp/.X11-unix:/tmp/.X11-unix \
+# -v /dev/snd:/dev/snd --lxc-conf='lxc.cgroup.devices.allow = c 116:* rwm' \
# -e DISPLAY=unix$DISPLAY iceweasel
docker-version 0.6.5
diff --git a/contrib/docker-device-tool/device_tool.go b/contrib/docker-device-tool/device_tool.go
index 4d1ee0cea5..12c762a7f3 100644
--- a/contrib/docker-device-tool/device_tool.go
+++ b/contrib/docker-device-tool/device_tool.go
@@ -3,7 +3,7 @@ package main
import (
"flag"
"fmt"
- "github.com/dotcloud/docker/graphdriver/devmapper"
+ "github.com/dotcloud/docker/runtime/graphdriver/devmapper"
"os"
"path"
"sort"
diff --git a/contrib/host-integration/Dockerfile.dev b/contrib/host-integration/Dockerfile.dev
index 161416e750..800216532f 100644
--- a/contrib/host-integration/Dockerfile.dev
+++ b/contrib/host-integration/Dockerfile.dev
@@ -6,7 +6,7 @@
#
FROM ubuntu:12.10
-MAINTAINER Guillaume J. Charmes <guillaume@dotcloud.com>
+MAINTAINER Guillaume J. Charmes <guillaume@docker.com>
RUN apt-get update && apt-get install -y wget git mercurial
diff --git a/contrib/host-integration/Dockerfile.min b/contrib/host-integration/Dockerfile.min
index 1a7b3a9d82..60bb89b986 100644
--- a/contrib/host-integration/Dockerfile.min
+++ b/contrib/host-integration/Dockerfile.min
@@ -1,4 +1,4 @@
FROM busybox
-MAINTAINER Guillaume J. Charmes <guillaume@dotcloud.com>
+MAINTAINER Guillaume J. Charmes <guillaume@docker.com>
ADD manager /usr/bin/
ENTRYPOINT ["/usr/bin/manager"]
diff --git a/contrib/host-integration/manager.go b/contrib/host-integration/manager.go
index 6742ee4d7c..2798a5d06f 100644
--- a/contrib/host-integration/manager.go
+++ b/contrib/host-integration/manager.go
@@ -70,7 +70,7 @@ func main() {
bufErr := bytes.NewBuffer(nil)
// Instanciate the Docker CLI
- cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock")
+ cli := docker.NewDockerCli(nil, bufOut, bufErr, "unix", "/var/run/docker.sock", false, nil)
// Retrieve the container info
if err := cli.CmdInspect(flag.Arg(0)); err != nil {
// As of docker v0.6.3, CmdInspect always returns nil
diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker
index 510683a459..67f0d2807f 100755
--- a/contrib/init/sysvinit-debian/docker
+++ b/contrib/init/sysvinit-debian/docker
@@ -21,6 +21,7 @@ BASE=$(basename $0)
# modify these in /etc/default/$BASE (/etc/default/docker)
DOCKER=/usr/bin/$BASE
DOCKER_PIDFILE=/var/run/$BASE.pid
+DOCKER_LOGFILE=/var/log/$BASE.log
DOCKER_OPTS=
DOCKER_DESC="Docker"
@@ -50,23 +51,37 @@ fail_unless_root() {
fi
}
+cgroupfs_mount() {
+ # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
+ if grep -v '^#' /etc/fstab | grep -q cgroup \
+ || [ ! -e /proc/cgroups ] \
+ || [ ! -d /sys/fs/cgroup ]; then
+ return
+ fi
+ if ! mountpoint -q /sys/fs/cgroup; then
+ mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
+ fi
+ (
+ cd /sys/fs/cgroup
+ for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
+ mkdir -p $sys
+ if ! mountpoint -q $sys; then
+ if ! mount -n -t cgroup -o $sys cgroup $sys; then
+ rmdir $sys || true
+ fi
+ fi
+ done
+ )
+}
+
case "$1" in
start)
fail_unless_root
- if ! grep -q cgroup /proc/mounts; then
- # rough approximation of cgroupfs-mount
- mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
- for sys in $(cut -d' ' -f1 /proc/cgroups); do
- mkdir -p /sys/fs/cgroup/$sys
- if ! mount -n -t cgroup -o $sys cgroup /sys/fs/cgroup/$sys 2>/dev/null; then
- rmdir /sys/fs/cgroup/$sys 2>/dev/null || true
- fi
- done
- fi
+ cgroupfs_mount
- touch /var/log/docker.log
- chgrp docker /var/log/docker.log
+ touch "$DOCKER_LOGFILE"
+ chgrp docker "$DOCKER_LOGFILE"
log_begin_msg "Starting $DOCKER_DESC: $BASE"
start-stop-daemon --start --background \
@@ -76,7 +91,7 @@ case "$1" in
-- \
-d -p "$DOCKER_PIDFILE" \
$DOCKER_OPTS \
- > /var/log/docker.log 2>&1
+ >> "$DOCKER_LOGFILE" 2>&1
log_end_msg $?
;;
diff --git a/contrib/init/sysvinit-debian/docker.default b/contrib/init/sysvinit-debian/docker.default
index d5110b5e2f..14e660175b 100644
--- a/contrib/init/sysvinit-debian/docker.default
+++ b/contrib/init/sysvinit-debian/docker.default
@@ -4,7 +4,7 @@
#DOCKER="/usr/local/bin/docker"
# Use DOCKER_OPTS to modify the daemon startup options.
-#DOCKER_OPTS="-dns 8.8.8.8 -dns 8.8.4.4"
+#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4"
# If you need Docker to use an HTTP proxy, it can also be specified here.
#export http_proxy="http://127.0.0.1:3128/"
diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf
index e2cc4536e1..e27d77e145 100644
--- a/contrib/init/upstart/docker.conf
+++ b/contrib/init/upstart/docker.conf
@@ -2,9 +2,34 @@ description "Docker daemon"
start on filesystem
stop on runlevel [!2345]
+limit nofile 524288 1048576
+limit nproc 524288 1048576
respawn
+pre-start script
+ # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
+ if grep -v '^#' /etc/fstab | grep -q cgroup \
+ || [ ! -e /proc/cgroups ] \
+ || [ ! -d /sys/fs/cgroup ]; then
+ exit 0
+ fi
+ if ! mountpoint -q /sys/fs/cgroup; then
+ mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
+ fi
+ (
+ cd /sys/fs/cgroup
+ for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
+ mkdir -p $sys
+ if ! mountpoint -q $sys; then
+ if ! mount -n -t cgroup -o $sys cgroup $sys; then
+ rmdir $sys || true
+ fi
+ fi
+ done
+ )
+end script
+
script
# modify these in /etc/default/$UPSTART_JOB (/etc/default/docker)
DOCKER=/usr/bin/$UPSTART_JOB
@@ -12,15 +37,5 @@ script
if [ -f /etc/default/$UPSTART_JOB ]; then
. /etc/default/$UPSTART_JOB
fi
- if ! grep -q cgroup /proc/mounts; then
- # rough approximation of cgroupfs-mount
- mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
- for sys in $(cut -d' ' -f1 /proc/cgroups); do
- mkdir -p /sys/fs/cgroup/$sys
- if ! mount -n -t cgroup -o $sys cgroup /sys/fs/cgroup/$sys 2>/dev/null; then
- rmdir /sys/fs/cgroup/$sys 2>/dev/null || true
- fi
- done
- fi
- "$DOCKER" -d $DOCKER_OPTS
+ exec "$DOCKER" -d $DOCKER_OPTS
end script
diff --git a/contrib/man/man1/docker-attach.1 b/contrib/man/man1/docker-attach.1
new file mode 100644
index 0000000000..f0879d7507
--- /dev/null
+++ b/contrib/man/man1/docker-attach.1
@@ -0,0 +1,56 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-attach.1
+.\"
+.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
+.SH NAME
+docker-attach \- Attach to a running container
+.SH SYNOPSIS
+.B docker attach
+\fB--no-stdin\fR[=\fIfalse\fR]
+\fB--sig-proxy\fR[=\fItrue\fR]
+container
+.SH DESCRIPTION
+If you \fBdocker run\fR a container in detached mode (\fB-d\fR), you can reattach to the detached container with \fBdocker attach\fR using the container's ID or name.
+.sp
+You can detach from the container again (and leave it running) with CTRL-c (for a quiet exit) or CTRL-\ to get a stacktrace of the Docker client when it quits. When you detach from the container the exit code will be returned to the client.
+.SH "OPTIONS"
+.TP
+.B --no-stdin=\fItrue\fR|\fIfalse\fR:
+When set to true, do not attach to stdin. The default is \fIfalse\fR.
+.TP
+.B --sig-proxy=\fItrue\fR|\fIfalse\fR:
+When set to true, proxify all received signal to the process (even in non-tty mode). The default is \fItrue\fR.
+.sp
+.SH EXAMPLES
+.sp
+.PP
+.B Attaching to a container
+.TP
+In this example the top command is run inside a container, from an image called fedora, in detached mode. The ID from the container is passed into the \fBdocker attach\fR command:
+.sp
+.nf
+.RS
+# ID=$(sudo docker run -d fedora /usr/bin/top -b)
+# sudo docker attach $ID
+top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05
+Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
+Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
+Mem: 373572k total, 355560k used, 18012k free, 27872k buffers
+Swap: 786428k total, 0k used, 786428k free, 221740k cached
+
+PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top
+
+top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05
+Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie
+Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
+Mem: 373572k total, 355244k used, 18328k free, 27872k buffers
+Swap: 786428k total, 0k used, 786428k free, 221776k cached
+
+PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top
+.RE
+.fi
+.sp
+.SH HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/man1/docker-build.1 b/contrib/man/man1/docker-build.1
new file mode 100644
index 0000000000..6546b7be2a
--- /dev/null
+++ b/contrib/man/man1/docker-build.1
@@ -0,0 +1,65 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-build.1
+.\"
+.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
+.SH NAME
+docker-build \- Build a container image from a Dockerfile source at PATH
+.SH SYNOPSIS
+.B docker build
+[\fB--no-cache\fR[=\fIfalse\fR]
+[\fB-q\fR|\fB--quiet\fR[=\fIfalse\fR]
+[\fB--rm\fR[=\fitrue\fR]]
+[\fB-t\fR|\fB--tag\fR=\fItag\fR]
+PATH | URL | -
+.SH DESCRIPTION
+This will read the Dockerfile from the directory specified in \fBPATH\fR. It also sends any other files and directories found in the current directory to the Docker daemon. The contents of this directory would be used by ADD command found within the Dockerfile.
+Warning, this will send a lot of data to the Docker daemon if the current directory contains a lot of data.
+If the absolute path is provided instead of ‘.’, only the files and directories required by the ADD commands from the Dockerfile will be added to the context and transferred to the Docker daemon.
+.sp
+When a single Dockerfile is given as URL, then no context is set. When a Git repository is set as URL, the repository is used as context.
+.SH "OPTIONS"
+.TP
+.B -q, --quiet=\fItrue\fR|\fIfalse\fR:
+When set to true, suppress verbose build output. Default is \fIfalse\fR.
+.TP
+.B --rm=\fItrue\fr|\fIfalse\fR:
+When true, remove intermediate containers that are created during the build process. The default is true.
+.TP
+.B -t, --tag=\fItag\fR:
+Tag to be applied to the resulting image on successful completion of the build.
+.TP
+.B --no-cache=\fItrue\fR|\fIfalse\fR
+When set to true, do not use a cache when building the image. The default is \fIfalse\fR.
+.sp
+.SH EXAMPLES
+.sp
+.sp
+.B Building an image from current directory
+.TP
+USing a Dockerfile, Docker images are built using the build command:
+.sp
+.RS
+docker build .
+.RE
+.sp
+If, for some reasone, you do not what to remove the intermediate containers created during the build you must set--rm=false.
+.sp
+.RS
+docker build --rm=false .
+.sp
+.RE
+.sp
+A good practice is to make a subdirectory with a related name and create the Dockerfile in that directory. E.g. a directory called mongo may contain a Dockerfile for a MongoDB image, or a directory called httpd may contain an Dockerfile for an Apache web server.
+.sp
+It is also good practice to add the files required for the image to the subdirectory. These files will be then specified with the `ADD` instruction in the Dockerfile. Note: if you include a tar file, which is good practice, then Docker will automatically extract the contents of the tar file specified in the `ADD` instruction into the specified target.
+.sp
+.B Building an image container using a URL
+.TP
+This will clone the Github repository and use it as context. The Dockerfile at the root of the repository is used as Dockerfile. This only works if the Github repository is a dedicated repository. Note that you can specify an arbitrary Git repository by using the ‘git://’ schema.
+.sp
+.RS
+docker build github.com/scollier/Fedora-Dockerfiles/tree/master/apache
+.RE
+.sp
+.SH HISTORY
+March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/man1/docker-images.1 b/contrib/man/man1/docker-images.1
new file mode 100644
index 0000000000..e540ba2b79
--- /dev/null
+++ b/contrib/man/man1/docker-images.1
@@ -0,0 +1,84 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-images.1
+.\"
+.TH "DOCKER" "1" "April 2014" "0.1" "Docker"
+.SH NAME
+docker-images \- List the images in the local repository
+.SH SYNOPSIS
+.B docker images
+[\fB-a\fR|\fB--all\fR=\fIfalse\fR]
+[\fB--no-trunc\fR[=\fIfalse\fR]
+[\fB-q\fR|\fB--quiet\fR[=\fIfalse\fR]
+[\fB-t\fR|\fB--tree\fR=\fIfalse\fR]
+[\fB-v\fR|\fB--viz\fR=\fIfalse\fR]
+[NAME]
+.SH DESCRIPTION
+This command lists the images stored in the local Docker repository.
+.sp
+By default, intermediate images, used during builds, are not listed. Some of the output, e.g. image ID, is truncated, for space reasons. However the truncated image ID, and often the first few characters, are enough to be used in other Docker commands that use the image ID. The output includes repository, tag, image ID, date created and the virtual size.
+.sp
+The title REPOSITORY for the first title may seem confusing. It is essentially the image name. However, because you can tag a specific image, and multiple tags (image instances) can be associated with a single name, the name is really a repository for all tagged images of the same name.
+.SH "OPTIONS"
+.TP
+.B -a, --all=\fItrue\fR|\fIfalse\fR:
+When set to true, also include all intermediate images in the list. The default is false.
+.TP
+.B --no-trunc=\fItrue\fR|\fIfalse\fR:
+When set to true, list the full image ID and not the truncated ID. The default is false.
+.TP
+.B -q, --quiet=\fItrue\fR|\fIfalse\fR:
+When set to true, list the complete image ID as part of the output. The default is false.
+.TP
+.B -t, --tree=\fItrue\fR|\fIfalse\fR:
+When set to true, list the images in a tree dependency tree (hierarchy) format. The default is false.
+.TP
+.B -v, --viz=\fItrue\fR|\fIfalse\fR
+When set to true, list the graph in graphviz format. The default is \fIfalse\fR.
+.sp
+.SH EXAMPLES
+.sp
+.B Listing the images
+.TP
+To list the images in a local repository (not the registry) run:
+.sp
+.RS
+docker images
+.RE
+.sp
+The list will contain the image repository name, a tag for the image, and an image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, IMAGE ID, CREATED, and VIRTUAL SIZE.
+.sp
+To get a verbose list of images which contains all the intermediate images used in builds use \fB-a\fR:
+.sp
+.RS
+docker images -a
+.RE
+.sp
+.B List images dependency tree hierarchy
+.TP
+To list the images in the local repository (not the registry) in a dependency tree format then use the \fB-t\fR|\fB--tree=true\fR option.
+.sp
+.RS
+docker images -t
+.RE
+.sp
+This displays a staggered hierarchy tree where the less indented image is the oldest with dependent image layers branching inward (to the right) on subsequent lines. The newest or top level image layer is listed last in any tree branch.
+.sp
+.B List images in GraphViz format
+.TP
+To display the list in a format consumable by a GraphViz tools run with \fB-v\fR|\fB--viz=true\fR. For example to produce a .png graph file of the hierarchy use:
+.sp
+.RS
+docker images --viz | dot -Tpng -o docker.png
+.sp
+.RE
+.sp
+.B Listing only the shortened image IDs
+.TP
+Listing just the shortened image IDs. This can be useful for some automated tools.
+.sp
+.RS
+docker images -q
+.RE
+.sp
+.SH HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/man1/docker-info.1 b/contrib/man/man1/docker-info.1
new file mode 100644
index 0000000000..dca2600af0
--- /dev/null
+++ b/contrib/man/man1/docker-info.1
@@ -0,0 +1,39 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-info.1
+.\"
+.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
+.SH NAME
+docker-info \- Display system wide information
+.SH SYNOPSIS
+.B docker info
+.SH DESCRIPTION
+This command displays system wide information regarding the Docker installation. Information displayed includes the number of containers and images, pool name, data file, metadata file, data space used, total data space, metadata space used, total metadata space, execution driver, and the kernel version.
+.sp
+The data file is where the images are stored and the metadata file is where the meta data regarding those images are stored. When run for the first time Docker allocates a certain amount of data space and meta data space from the space available on the volume where /var/lib/docker is mounted.
+.SH "OPTIONS"
+There are no available options.
+.sp
+.SH EXAMPLES
+.sp
+.B Display Docker system information
+.TP
+Here is a sample output:
+.sp
+.RS
+ # docker info
+ Containers: 18
+ Images: 95
+ Storage Driver: devicemapper
+ Pool Name: docker-8:1-170408448-pool
+ Data file: /var/lib/docker/devicemapper/devicemapper/data
+ Metadata file: /var/lib/docker/devicemapper/devicemapper/metadata
+ Data Space Used: 9946.3 Mb
+ Data Space Total: 102400.0 Mb
+ Metadata Space Used: 9.9 Mb
+ Metadata Space Total: 2048.0 Mb
+ Execution Driver: native-0.1
+ Kernel Version: 3.10.0-116.el7.x86_64
+.RE
+.sp
+.SH HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/man1/docker-inspect.1 b/contrib/man/man1/docker-inspect.1
new file mode 100644
index 0000000000..225125e564
--- /dev/null
+++ b/contrib/man/man1/docker-inspect.1
@@ -0,0 +1,237 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-inspect.1
+.\"
+.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
+.SH NAME
+docker-inspect \- Return low-level information on a container/image
+.SH SYNOPSIS
+.B docker inspect
+[\fB-f\fR|\fB--format\fR=""
+CONTAINER|IMAGE [CONTAINER|IMAGE...]
+.SH DESCRIPTION
+This displays all the information available in Docker for a given container or image. By default, this will render all results in a JSON array. If a format is specified, the given template will be executed for each result.
+.SH "OPTIONS"
+.TP
+.B -f, --format="":
+The text/template package of Go describes all the details of the format. See examples section
+.SH EXAMPLES
+.sp
+.PP
+.B Getting information on a container
+.TP
+To get information on a container use it's ID or instance name
+.sp
+.fi
+.RS
+#docker inspect 1eb5fabf5a03
+
+[{
+ "ID": "1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b",
+ "Created": "2014-04-04T21:33:52.02361335Z",
+ "Path": "/usr/sbin/nginx",
+ "Args": [],
+ "Config": {
+ "Hostname": "1eb5fabf5a03",
+ "Domainname": "",
+ "User": "",
+ "Memory": 0,
+ "MemorySwap": 0,
+ "CpuShares": 0,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "AttachStderr": false,
+ "PortSpecs": null,
+ "ExposedPorts": {
+ "80/tcp": {}
+ },
+ "Tty": true,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": [
+ "HOME=/",
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "Cmd": [
+ "/usr/sbin/nginx"
+ ],
+ "Dns": null,
+ "DnsSearch": null,
+ "Image": "summit/nginx",
+ "Volumes": null,
+ "VolumesFrom": "",
+ "WorkingDir": "",
+ "Entrypoint": null,
+ "NetworkDisabled": false,
+ "OnBuild": null,
+ "Context": {
+ "mount_label": "system_u:object_r:svirt_sandbox_file_t:s0:c0,c650",
+ "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c0,c650"
+ }
+ },
+ "State": {
+ "Running": true,
+ "Pid": 858,
+ "ExitCode": 0,
+ "StartedAt": "2014-04-04T21:33:54.16259207Z",
+ "FinishedAt": "0001-01-01T00:00:00Z",
+ "Ghost": false
+ },
+ "Image": "df53773a4390e25936f9fd3739e0c0e60a62d024ea7b669282b27e65ae8458e6",
+ "NetworkSettings": {
+ "IPAddress": "172.17.0.2",
+ "IPPrefixLen": 16,
+ "Gateway": "172.17.42.1",
+ "Bridge": "docker0",
+ "PortMapping": null,
+ "Ports": {
+ "80/tcp": [
+ {
+ "HostIp": "0.0.0.0",
+ "HostPort": "80"
+ }
+ ]
+ }
+ },
+ "ResolvConfPath": "/etc/resolv.conf",
+ "HostnamePath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hostname",
+ "HostsPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hosts",
+ "Name": "/ecstatic_ptolemy",
+ "Driver": "devicemapper",
+ "ExecDriver": "native-0.1",
+ "Volumes": {},
+ "VolumesRW": {},
+ "HostConfig": {
+ "Binds": null,
+ "ContainerIDFile": "",
+ "LxcConf": [],
+ "Privileged": false,
+ "PortBindings": {
+ "80/tcp": [
+ {
+ "HostIp": "0.0.0.0",
+ "HostPort": "80"
+ }
+ ]
+ },
+ "Links": null,
+ "PublishAllPorts": false,
+ "DriverOptions": {
+ "lxc": null
+ },
+ "CliAddress": ""
+ }
+.RE
+.nf
+.sp
+.B Getting the IP address of a container instance
+.TP
+To get the IP address of a container use:
+.sp
+.fi
+.RS
+# docker inspect --format='{{.NetworkSettings.IPAddress}}' 1eb5fabf5a03
+
+172.17.0.2
+.RE
+.nf
+.sp
+.B Listing all port bindings
+.TP
+One can loop over arrays and maps in the results to produce simple text output:
+.sp
+.fi
+.RS
+# docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' 1eb5fabf5a03
+
+80/tcp -> 80
+.RE
+.nf
+.sp
+.B Getting information on an image
+.TP
+Use an image's ID or name (e.g. repository/name[:tag]) to get information on it.
+.sp
+.fi
+.RS
+docker inspect 58394af37342
+[{
+ "id": "58394af373423902a1b97f209a31e3777932d9321ef10e64feaaa7b4df609cf9",
+ "parent": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
+ "created": "2014-02-03T16:10:40.500814677Z",
+ "container": "f718f19a28a5147da49313c54620306243734bafa63c76942ef6f8c4b4113bc5",
+ "container_config": {
+ "Hostname": "88807319f25e",
+ "Domainname": "",
+ "User": "",
+ "Memory": 0,
+ "MemorySwap": 0,
+ "CpuShares": 0,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "AttachStderr": false,
+ "PortSpecs": null,
+ "ExposedPorts": null,
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": [
+ "HOME=/",
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ "#(nop) ADD fedora-20-medium.tar.xz in /"
+ ],
+ "Dns": null,
+ "DnsSearch": null,
+ "Image": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
+ "Volumes": null,
+ "VolumesFrom": "",
+ "WorkingDir": "",
+ "Entrypoint": null,
+ "NetworkDisabled": false,
+ "OnBuild": null,
+ "Context": null
+ },
+ "docker_version": "0.6.3",
+ "author": "Lokesh Mandvekar \u003clsm5@redhat.com\u003e - ./buildcontainers.sh",
+ "config": {
+ "Hostname": "88807319f25e",
+ "Domainname": "",
+ "User": "",
+ "Memory": 0,
+ "MemorySwap": 0,
+ "CpuShares": 0,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "AttachStderr": false,
+ "PortSpecs": null,
+ "ExposedPorts": null,
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": [
+ "HOME=/",
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "Cmd": null,
+ "Dns": null,
+ "DnsSearch": null,
+ "Image": "8abc22fbb04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db",
+ "Volumes": null,
+ "VolumesFrom": "",
+ "WorkingDir": "",
+ "Entrypoint": null,
+ "NetworkDisabled": false,
+ "OnBuild": null,
+ "Context": null
+ },
+ "architecture": "x86_64",
+ "Size": 385520098
+}]
+.RE
+.nf
+.sp
+.SH HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/man1/docker-rm.1 b/contrib/man/man1/docker-rm.1
new file mode 100644
index 0000000000..b06e014d3b
--- /dev/null
+++ b/contrib/man/man1/docker-rm.1
@@ -0,0 +1,45 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-rm.1
+.\"
+.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
+.SH NAME
+docker-rm \- Remove one or more containers.
+.SH SYNOPSIS
+.B docker rm
+[\fB-f\fR|\fB--force\fR[=\fIfalse\fR]
+[\fB-l\fR|\fB--link\fR[=\fIfalse\fR]
+[\fB-v\fR|\fB--volumes\fR[=\fIfalse\fR]
+CONTAINER [CONTAINER...]
+.SH DESCRIPTION
+This will remove one or more containers from the host node. The container name or ID can be used. This does not remove images. You cannot remove a running container unless you use the \fB-f\fR option. To see all containers on a host use the \fBdocker ps -a\fR command.
+.SH "OPTIONS"
+.TP
+.B -f, --force=\fItrue\fR|\fIfalse\fR:
+When set to true, force the removal of the container. The default is \fIfalse\fR.
+.TP
+.B -l, --link=\fItrue\fR|\fIfalse\fR:
+When set to true, remove the specified link and not the underlying container. The default is \fIfalse\fR.
+.TP
+.B -v, --volumes=\fItrue\fR|\fIfalse\fR:
+When set to true, remove the volumes associated to the container. The default is \fIfalse\fR.
+.SH EXAMPLES
+.sp
+.PP
+.B Removing a container using its ID
+.TP
+To remove a container using its ID, find either from a \fBdocker ps -a\fR command, or use the ID returned from the \fBdocker run\fR command, or retrieve it from a file used to store it using the \fBdocker run --cidfile\fR:
+.sp
+.RS
+docker rm abebf7571666
+.RE
+.sp
+.B Removing a container using the container name:
+.TP
+The name of the container can be found using the \fBdocker ps -a\fR command. The use that name as follows:
+.sp
+.RS
+docker rm hopeful_morse
+.RE
+.sp
+.SH HISTORY
+March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/man1/docker-rmi.1 b/contrib/man/man1/docker-rmi.1
new file mode 100644
index 0000000000..6f33446ecd
--- /dev/null
+++ b/contrib/man/man1/docker-rmi.1
@@ -0,0 +1,29 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-run.1
+.\"
+.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
+.SH NAME
+docker-rmi \- Remove one or more images.
+.SH SYNOPSIS
+.B docker rmi
+[\fB-f\fR|\fB--force\fR[=\fIfalse\fR]
+IMAGE [IMAGE...]
+.SH DESCRIPTION
+This will remove one or more images from the host node. This does not remove images from a registry. You cannot remove an image of a running container unless you use the \fB-f\fR option. To see all images on a host use the \fBdocker images\fR command.
+.SH "OPTIONS"
+.TP
+.B -f, --force=\fItrue\fR|\fIfalse\fR:
+When set to true, force the removal of the image. The default is \fIfalse\fR.
+.SH EXAMPLES
+.sp
+.PP
+.B Removing an image
+.TP
+Here is an example of removing and image:
+.sp
+.RS
+docker rmi fedora/httpd
+.RE
+.sp
+.SH HISTORY
+March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/man1/docker-run.1 b/contrib/man/man1/docker-run.1
new file mode 100644
index 0000000000..fd449374e3
--- /dev/null
+++ b/contrib/man/man1/docker-run.1
@@ -0,0 +1,277 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-run.1
+.\"
+.TH "DOCKER" "1" "MARCH 2014" "0.1" "Docker"
+.SH NAME
+docker-run \- Run a process in an isolated container
+.SH SYNOPSIS
+.B docker run
+[\fB-a\fR|\fB--attach\fR[=]] [\fB-c\fR|\fB--cpu-shares\fR[=0] [\fB-m\fR|\fB--memory\fR=\fImemory-limit\fR]
+[\fB--cidfile\fR=\fIfile\fR] [\fB-d\fR|\fB--detach\fR[=\fIfalse\fR]] [\fB--dns\fR=\fIIP-address\fR]
+[\fB--name\fR=\fIname\fR] [\fB-u\fR|\fB--user\fR=\fIusername\fR|\fIuid\fR]
+[\fB--link\fR=\fIname\fR:\fIalias\fR]
+[\fB-e\fR|\fB--env\fR=\fIenvironment\fR] [\fB--entrypoint\fR=\fIcommand\fR]
+[\fB--expose\fR=\fIport\fR] [\fB-P\fR|\fB--publish-all\fR[=\fIfalse\fR]]
+[\fB-p\fR|\fB--publish\fR=\fIport-mappping\fR] [\fB-h\fR|\fB--hostname\fR=\fIhostname\fR]
+[\fB--rm\fR[=\fIfalse\fR]] [\fB--priviledged\fR[=\fIfalse\fR]
+[\fB-i\fR|\fB--interactive\fR[=\fIfalse\fR]
+[\fB-t\fR|\fB--tty\fR[=\fIfalse\fR]] [\fB--lxc-conf\fR=\fIoptions\fR]
+[\fB-n\fR|\fB--networking\fR[=\fItrue\fR]]
+[\fB-v\fR|\fB--volume\fR=\fIvolume\fR] [\fB--volumes-from\fR=\fIcontainer-id\fR]
+[\fB-w\fR|\fB--workdir\fR=\fIdirectory\fR] [\fB--sig-proxy\fR[=\fItrue\fR]]
+IMAGE [COMMAND] [ARG...]
+.SH DESCRIPTION
+.PP
+Run a process in a new container. \fBdocker run\fR starts a process with its own file system, its own networking, and its own isolated process tree. The \fIIMAGE\fR which starts the process may define defaults related to the process that will be run in the container, the networking to expose, and more, but \fBdocker run\fR gives final control to the operator or administrator who starts the container from the image. For that reason \fBdocker run\fR has more options than any other docker command.
+
+If the \fIIMAGE\fR is not already loaded then \fBdocker run\fR will pull the \fIIMAGE\fR, and all image dependencies, from the repository in the same way running \fBdocker pull\fR \fIIMAGE\fR, before it starts the container from that image.
+
+
+.SH "OPTIONS"
+
+.TP
+.B -a, --attach=\fIstdin\fR|\fIstdout\fR|\fIstderr\fR:
+Attach to stdin, stdout or stderr. In foreground mode (the default when -d is not specified), \fBdocker run\fR can start the process in the container and attach the console to the process’s standard input, output, and standard error. It can even pretend to be a TTY (this is what most commandline executables expect) and pass along signals. The \fB-a\fR option can be set for each of stdin, stdout, and stderr.
+
+.TP
+.B -c, --cpu-shares=0:
+CPU shares in relative weight. You can increase the priority of a container with the -c option. By default, all containers run at the same priority and get the same proportion of CPU cycles, but you can tell the kernel to give more shares of CPU time to one or more containers when you start them via \fBdocker run\fR.
+
+.TP
+.B -m, --memory=\fImemory-limit\fR:
+Allows you to constrain the memory available to a container. If the host supports swap memory, then the -m memory setting can be larger than physical RAM. The memory limit format: <number><optional unit>, where unit = b, k, m or g.
+
+.TP
+.B --cidfile=\fIfile\fR:
+Write the container ID to the file specified.
+
+.TP
+.B -d, --detach=\fItrue\fR|\fIfalse\fR:
+Detached mode. This runs the container in the background. It outputs the new container's id and and error messages. At any time you can run \fBdocker ps\fR in the other shell to view a list of the running containers. You can reattach to a detached container with \fBdocker attach\fR. If you choose to run a container in the detached mode, then you cannot use the -rm option.
+
+.TP
+.B --dns=\fIIP-address\fR:
+Set custom DNS servers. This option can be used to override the DNS configuration passed to the container. Typically this is necessary when the host DNS configuration is invalid for the container (eg. 127.0.0.1). When this is the case the \fB-dns\fR flags is necessary for every run.
+
+.TP
+.B -e, --env=\fIenvironment\fR:
+Set environment variables. This option allows you to specify arbitrary environment variables that are available for the process that will be launched inside of the container.
+
+.TP
+.B --entrypoint=\ficommand\fR:
+This option allows you to overwrite the default entrypoint of the image that is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND because it specifies what executable to run when the container starts, but it is (purposely) more difficult to override. The ENTRYPOINT gives a container its default nature or behavior, so that when you set an ENTRYPOINT you can run the container as if it were that binary, complete with default options, and you can pass in more options via the COMMAND. But, sometimes an operator may want to run something else inside the container, so you can override the default ENTRYPOINT at runtime by using a \fB--entrypoint\fR and a string to specify the new ENTRYPOINT.
+
+.TP
+.B --expose=\fIport\fR:
+Expose a port from the container without publishing it to your host. A containers port can be exposed to other containers in three ways: 1) The developer can expose the port using the EXPOSE parameter of the Dockerfile, 2) the operator can use the \fB--expose\fR option with \fBdocker run\fR, or 3) the container can be started with the \fB--link\fR.
+
+.TP
+.B -P, --publish-all=\fItrue\fR|\fIfalse\fR:
+When set to true publish all exposed ports to the host interfaces. The default is false. If the operator uses -P (or -p) then Docker will make the exposed port accessible on the host and the ports will be available to any client that can reach the host. To find the map between the host ports and the exposed ports, use \fBdocker port\fR.
+
+.TP
+.B -p, --publish=[]:
+Publish a container's port to the host (format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort) (use 'docker port' to see the actual mapping)
+
+.TP
+.B -h , --hostname=\fIhostname\fR:
+Sets the container host name that is available inside the container.
+
+.TP
+.B -i , --interactive=\fItrue\fR|\fIfalse\fR:
+When set to true, keep stdin open even if not attached. The default is false.
+
+.TP
+.B --link=\fIname\fR:\fIalias\fR:
+Add link to another container. The format is name:alias. If the operator uses \fB--link\fR when starting the new client container, then the client container can access the exposed port via a private networking interface. Docker will set some environment variables in the client container to help indicate which interface and port to use.
+
+.TP
+.B -n, --networking=\fItrue\fR|\fIfalse\fR:
+By default, all containers have networking enabled (true) and can make outgoing connections. The operator can disable networking with \fB--networking\fR to false. This disables all incoming and outgoing networking. In cases like this, I/O can only be performed through files or by using STDIN/STDOUT.
+
+Also by default, the container will use the same DNS servers as the host. but you canThe operator may override this with \fB-dns\fR.
+
+.TP
+.B --name=\fIname\fR:
+Assign a name to the container. The operator can identify a container in three ways:
+.sp
+.nf
+UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”)
+UUID short identifier (“f78375b1c487”)
+Name (“jonah”)
+.fi
+.sp
+The UUID identifiers come from the Docker daemon, and if a name is not assigned to the container with \fB--name\fR then the daemon will also generate a random string name. The name is useful when defining links (see \fB--link\fR) (or any other place you need to identify a container). This works for both background and foreground Docker containers.
+
+.TP
+.B --privileged=\fItrue\fR|\fIfalse\fR:
+Give extended privileges to this container. By default, Docker containers are “unprivileged” (=false) and cannot, for example, run a Docker daemon inside the Docker container. This is because by default a container is not allowed to access any devices. A “privileged” container is given access to all devices.
+
+When the operator executes \fBdocker run -privileged\fR, Docker will enable access to all devices on the host as well as set some configuration in AppArmor (\fB???\fR) to allow the container nearly all the same access to the host as processes running outside of a container on the host.
+
+.TP
+.B --rm=\fItrue\fR|\fIfalse\fR:
+If set to \fItrue\fR the container is automatically removed when it exits. The default is \fIfalse\fR. This option is incompatible with \fB-d\fR.
+
+.TP
+.B --sig-proxy=\fItrue\fR|\fIfalse\fR:
+When set to true, proxify all received signals to the process (even in non-tty mode). The default is true.
+
+.TP
+.B -t, --tty=\fItrue\fR|\fIfalse\fR:
+When set to true Docker can allocate a pseudo-tty and attach to the standard input of any container. This can be used, for example, to run a throwaway interactive shell. The default is value is false.
+
+.TP
+.B -u, --user=\fIusername\fR,\fRuid\fR:
+Set a username or UID for the container.
+
+.TP
+.B -v, --volume=\fIvolume\fR:
+Bind mount a volume to the container. The \fB-v\fR option can be used one or more times to add one or more mounts to a container. These mounts can then be used in other containers using the \fB--volumes-from\fR option. See examples.
+
+.TP
+.B --volumes-from=\fIcontainer-id\fR:
+Will mount volumes from the specified container identified by container-id. Once a volume is mounted in a one container it can be shared with other containers using the \fB--volumes-from\fR option when running those other containers. The volumes can be shared even if the original container with the mount is not running.
+
+.TP
+.B -w, --workdir=\fIdirectory\fR:
+Working directory inside the container. The default working directory for running binaries within a container is the root directory (/). The developer can set a different default with the Dockerfile WORKDIR instruction. The operator can override the working directory by using the \fB-w\fR option.
+
+.TP
+.B IMAGE:
+The image name or ID.
+
+.TP
+.B COMMAND:
+The command or program to run inside the image.
+
+.TP
+.B ARG:
+The arguments for the command to be run in the container.
+
+.SH EXAMPLES
+.sp
+.sp
+.B Exposing log messages from the container to the host's log
+.TP
+If you want messages that are logged in your container to show up in the host's syslog/journal then you should bind mount the /var/log directory as follows.
+.sp
+.RS
+docker run -v /dev/log:/dev/log -i -t fedora /bin/bash
+.RE
+.sp
+From inside the container you can test this by sending a message to the log.
+.sp
+.RS
+logger "Hello from my container"
+.sp
+.RE
+Then exit and check the journal.
+.RS
+.sp
+exit
+.sp
+journalctl -b | grep hello
+.RE
+.sp
+This should list the message sent to logger.
+.sp
+.B Attaching to one or more from STDIN, STDOUT, STDERR
+.TP
+If you do not specify -a then Docker will attach everything (stdin,stdout,stderr). You can specify to which of the three standard streams (stdin, stdout, stderr) you’d like to connect instead, as in:
+.sp
+.RS
+docker run -a stdin -a stdout -i -t fedora /bin/bash
+.RE
+.sp
+.B Linking Containers
+.TP
+The link feature allows multiple containers to communicate with each other. For example, a container whose Dockerfile has exposed port 80 can be run and named as follows:
+.sp
+.RS
+docker run --name=link-test -d -i -t fedora/httpd
+.RE
+.sp
+.TP
+A second container, in this case called linker, can communicate with the httpd container, named link-test, by running with the \fB--link=<name>:<alias>\fR
+.sp
+.RS
+docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash
+.RE
+.sp
+.TP
+Now the container linker is linked to container link-test with the alias lt. Running the \fBenv\fR command in the linker container shows environment variables with the LT (alias) context (\fBLT_\fR)
+.sp
+.nf
+.RS
+# env
+HOSTNAME=668231cb0978
+TERM=xterm
+LT_PORT_80_TCP=tcp://172.17.0.3:80
+LT_PORT_80_TCP_PORT=80
+LT_PORT_80_TCP_PROTO=tcp
+LT_PORT=tcp://172.17.0.3:80
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+PWD=/
+LT_NAME=/linker/lt
+SHLVL=1
+HOME=/
+LT_PORT_80_TCP_ADDR=172.17.0.3
+_=/usr/bin/env
+.RE
+.fi
+.sp
+.TP
+When linking two containers Docker will use the exposed ports of the container to create a secure tunnel for the parent to access.
+.TP
+.sp
+.B Mapping Ports for External Usage
+.TP
+The exposed port of an application can be mapped to a host port using the \fB-p\fR flag. For example a httpd port 80 can be mapped to the host port 8080 using the following:
+.sp
+.RS
+docker run -p 8080:80 -d -i -t fedora/httpd
+.RE
+.sp
+.TP
+.B Creating and Mounting a Data Volume Container
+.TP
+Many applications require the sharing of persistent data across several containers. Docker allows you to create a Data Volume Container that other containers can mount from. For example, create a named container that contains directories /var/volume1 and /tmp/volume2. The image will need to contain these directories so a couple of RUN mkdir instructions might be required for you fedora-data image:
+.sp
+.RS
+docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true
+.sp
+docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash
+.RE
+.sp
+.TP
+Multiple -volumes-from parameters will bring together multiple data volumes from multiple containers. And it's possible to mount the volumes that came from the DATA container in yet another container via the fedora-container1 intermidiery container, allowing to abstract the actual data source from users of that data:
+.sp
+.RS
+docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash
+.RE
+.TP
+.sp
+.B Mounting External Volumes
+.TP
+To mount a host directory as a container volume, specify the absolute path to the directory and the absolute path for the container directory separated by a colon:
+.sp
+.RS
+docker run -v /var/db:/data1 -i -t fedora bash
+.RE
+.sp
+.TP
+When using SELinux, be aware that the host has no knowledge of container SELinux policy. Therefore, in the above example, if SELinux policy is enforced, the /var/db directory is not writable to the container. A "Permission Denied" message will occur and an avc: message in the host's syslog.
+.sp
+.TP
+To work around this, at time of writing this man page, the following command needs to be run in order for the proper SELinux policy type label to be attached to the host directory:
+.sp
+.RS
+chcon -Rt svirt_sandbox_file_t /var/db
+.RE
+.sp
+.TP
+Now, writing to the /data1 volume in the container will be allowed and the changes will also be reflected on the host in /var/db.
+.sp
+.SH HISTORY
+March 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/man1/docker-tag.1 b/contrib/man/man1/docker-tag.1
new file mode 100644
index 0000000000..df85a1e8c1
--- /dev/null
+++ b/contrib/man/man1/docker-tag.1
@@ -0,0 +1,49 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker-tag.1
+.\"
+.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
+.SH NAME
+docker-tag \- Tag an image in the repository
+.SH SYNOPSIS
+.B docker tag
+[\fB-f\fR|\fB--force\fR[=\fIfalse\fR]
+\fBIMAGE\fR [REGISTRYHOST/][USERNAME/]NAME[:TAG]
+.SH DESCRIPTION
+This will tag an image in the repository.
+.SH "OPTIONS"
+.TP
+.B -f, --force=\fItrue\fR|\fIfalse\fR:
+When set to true, force the tag name. The default is \fIfalse\fR.
+.TP
+.B REGISTRYHOST:
+The hostname of the registry if required. This may also include the port separated by a ':'
+.TP
+.B USERNAME:
+The username or other qualifying identifier for the image.
+.TP
+.B NAME:
+The image name.
+.TP
+.B TAG:
+The tag you are assigning to the image.
+.SH EXAMPLES
+.sp
+.PP
+.B Tagging an image
+.TP
+Here is an example where an image is tagged with the tag 'Version-1.0' :
+.sp
+.RS
+docker tag 0e5574283393 fedora/httpd:Version-1.0
+.RE
+.sp
+.B Tagging an image for an internal repository
+.TP
+To push an image to an internal Registry and not the default docker.io based registry you must tag it with the registry hostname and port (if needed).
+.sp
+.RS
+docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0
+.RE
+.sp
+.SH HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/man/man1/docker.1 b/contrib/man/man1/docker.1
new file mode 100644
index 0000000000..4a36e5baf5
--- /dev/null
+++ b/contrib/man/man1/docker.1
@@ -0,0 +1,172 @@
+.\" Process this file with
+.\" nroff -man -Tascii docker.1
+.\"
+.TH "DOCKER" "1" "APRIL 2014" "0.1" "Docker"
+.SH NAME
+docker \- Docker image and container command line interface
+.SH SYNOPSIS
+.B docker [OPTIONS] [COMMAND] [arg...]
+.SH DESCRIPTION
+\fBdocker\fR has two distinct functions. It is used for starting the Docker daemon and to run the CLI (i.e., to command the daemon to manage images, containers etc.) So \fBdocker\fR is both a server as deamon and a client to the daemon through the CLI.
+.sp
+To run the Docker deamon you do not specify any of the commands listed below but must specify the \fB-d\fR option. The other options listed below are for the daemon only.
+.sp
+The Docker CLI has over 30 commands. The commands are listed below and each has its own man page which explain usage and arguements.
+.sp
+To see the man page for a command run \fBman docker <command>\fR.
+.SH "OPTIONS"
+.B \-D=false:
+Enable debug mode
+.TP
+.B\-H=[unix:///var/run/docker.sock]: tcp://[host[:port]] to bind or unix://[/path/to/socket] to use.
+When host=[0.0.0.0], port=[4243] or path
+=[/var/run/docker.sock] is omitted, default values are used.
+.TP
+.B \-\-api-enable-cors=false
+Enable CORS headers in the remote API
+.TP
+.B \-b=""
+Attach containers to a pre\-existing network bridge; use 'none' to disable container networking
+.TP
+.B \-\-bip=""
+Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of \-b
+.TP
+.B \-d=false
+Enable daemon mode
+.TP
+.B \-\-dns=""
+Force Docker to use specific DNS servers
+.TP
+.B \-g="/var/lib/docker"
+Path to use as the root of the Docker runtime
+.TP
+.B \-\-icc=true
+Enable inter\-container communication
+.TP
+.B \-\-ip="0.0.0.0"
+Default IP address to use when binding container ports
+.TP
+.B \-\-iptables=true
+Disable Docker's addition of iptables rules
+.TP
+.B \-\-mtu=1500
+Set the containers network mtu
+.TP
+.B \-p="/var/run/docker.pid"
+Path to use for daemon PID file
+.TP
+.B \-r=true
+Restart previously running containers
+.TP
+.B \-s=""
+Force the Docker runtime to use a specific storage driver
+.TP
+.B \-v=false
+Print version information and quit
+.SH "COMMANDS"
+.TP
+.B attach
+Attach to a running container
+.TP
+.B build
+Build a container from a Dockerfile
+.TP
+.B commit
+Create a new image from a container's changes
+.TP
+.B cp
+Copy files/folders from the containers filesystem to the host at path
+.TP
+.B diff
+Inspect changes on a container's filesystem
+
+.TP
+.B events
+Get real time events from the server
+.TP
+.B export
+Stream the contents of a container as a tar archive
+.TP
+.B history
+Show the history of an image
+.TP
+.B images
+List images
+.TP
+.B import
+Create a new filesystem image from the contents of a tarball
+.TP
+.B info
+Display system-wide information
+.TP
+.B insert
+Insert a file in an image
+.TP
+.B inspect
+Return low-level information on a container
+.TP
+.B kill
+Kill a running container (which includes the wrapper process and everything inside it)
+.TP
+.B load
+Load an image from a tar archive
+.TP
+.B login
+Register or Login to a Docker registry server
+.TP
+.B logs
+Fetch the logs of a container
+.TP
+.B port
+Lookup the public-facing port which is NAT-ed to PRIVATE_PORT
+.TP
+.B ps
+List containers
+.TP
+.B pull
+Pull an image or a repository from a Docker registry server
+.TP
+.B push
+Push an image or a repository to a Docker registry server
+.TP
+.B restart
+Restart a running container
+.TP
+.B rm
+Remove one or more containers
+.TP
+.B rmi
+Remove one or more images
+.TP
+.B run
+Run a command in a new container
+.TP
+.B save
+Save an image to a tar archive
+.TP
+.B search
+Search for an image in the Docker index
+.TP
+.B start
+Start a stopped container
+.TP
+.B stop
+Stop a running container
+.TP
+.B tag
+Tag an image into a repository
+.TP
+.B top
+Lookup the running processes of a container
+.TP
+.B version
+Show the Docker version information
+.TP
+.B wait
+Block until a container stops, then print its exit code
+.SH EXAMPLES
+.sp
+For specific examples please see the man page for the specific Docker command.
+.sp
+.SH HISTORY
+April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on dockier.io source material and internal work.
diff --git a/contrib/mkimage-debootstrap.sh b/contrib/mkimage-debootstrap.sh
index bf89600973..33ba7b07cb 100755
--- a/contrib/mkimage-debootstrap.sh
+++ b/contrib/mkimage-debootstrap.sh
@@ -219,6 +219,7 @@ if [ -z "$strictDebootstrap" ]; then
# make sure our packages lists are as up to date as we can get them
sudo chroot . apt-get update
+ sudo chroot . apt-get dist-upgrade -y
fi
if [ "$justTar" ]; then
diff --git a/contrib/mkseccomp.pl b/contrib/mkseccomp.pl
index 5c583cc3d3..28d0645af0 100755
--- a/contrib/mkseccomp.pl
+++ b/contrib/mkseccomp.pl
@@ -10,7 +10,7 @@
# can configure the list of syscalls. When run, this script produces output
# which, when stored in a file, can be passed to docker as follows:
#
-# docker run -lxc-conf="lxc.seccomp=$file" <rest of arguments>
+# docker run --lxc-conf="lxc.seccomp=$file" <rest of arguments>
#
# The included sample file shows how to cut about a quarter of all syscalls,
# which affecting most applications.
diff --git a/config.go b/daemonconfig/config.go
index 19aad9ed4a..146916d79a 100644
--- a/config.go
+++ b/daemonconfig/config.go
@@ -1,10 +1,9 @@
-package docker
+package daemonconfig
import (
- "net"
-
"github.com/dotcloud/docker/engine"
- "github.com/dotcloud/docker/networkdriver"
+ "github.com/dotcloud/docker/runtime/networkdriver"
+ "net"
)
const (
@@ -13,11 +12,12 @@ const (
)
// FIXME: separate runtime configuration from http api configuration
-type DaemonConfig struct {
+type Config struct {
Pidfile string
Root string
AutoRestart bool
Dns []string
+ DnsSearch []string
EnableIptables bool
EnableIpForward bool
DefaultIp net.IP
@@ -28,12 +28,13 @@ type DaemonConfig struct {
ExecDriver string
Mtu int
DisableNetwork bool
+ EnableSelinuxSupport bool
}
// ConfigFromJob creates and returns a new DaemonConfig object
// by parsing the contents of a job's environment.
-func DaemonConfigFromJob(job *engine.Job) *DaemonConfig {
- config := &DaemonConfig{
+func ConfigFromJob(job *engine.Job) *Config {
+ config := &Config{
Pidfile: job.Getenv("Pidfile"),
Root: job.Getenv("Root"),
AutoRestart: job.GetenvBool("AutoRestart"),
@@ -45,10 +46,14 @@ func DaemonConfigFromJob(job *engine.Job) *DaemonConfig {
InterContainerCommunication: job.GetenvBool("InterContainerCommunication"),
GraphDriver: job.Getenv("GraphDriver"),
ExecDriver: job.Getenv("ExecDriver"),
+ EnableSelinuxSupport: false, // FIXME: hardcoded default to disable selinux for .10 release
}
if dns := job.GetenvList("Dns"); dns != nil {
config.Dns = dns
}
+ if dnsSearch := job.GetenvList("DnsSearch"); dnsSearch != nil {
+ config.DnsSearch = dnsSearch
+ }
if mtu := job.GetenvInt("Mtu"); mtu != 0 {
config.Mtu = mtu
} else {
diff --git a/docker/docker.go b/docker/docker.go
index 2aa10dbe54..e96c173d30 100644
--- a/docker/docker.go
+++ b/docker/docker.go
@@ -1,21 +1,35 @@
package main
import (
+ "crypto/tls"
+ "crypto/x509"
"fmt"
+ "io/ioutil"
"log"
"os"
"strings"
"github.com/dotcloud/docker/api"
+ "github.com/dotcloud/docker/api/client"
"github.com/dotcloud/docker/builtins"
"github.com/dotcloud/docker/dockerversion"
"github.com/dotcloud/docker/engine"
+ "github.com/dotcloud/docker/opts"
flag "github.com/dotcloud/docker/pkg/mflag"
- "github.com/dotcloud/docker/pkg/opts"
"github.com/dotcloud/docker/sysinit"
"github.com/dotcloud/docker/utils"
)
+const (
+ defaultCaFile = "ca.pem"
+ defaultKeyFile = "key.pem"
+ defaultCertFile = "cert.pem"
+)
+
+var (
+ dockerConfDir = os.Getenv("HOME") + "/.docker/"
+)
+
func main() {
if selfPath := utils.SelfPath(); strings.Contains(selfPath, ".dockerinit") {
// Running in init mode
@@ -35,16 +49,23 @@ func main() {
flSocketGroup = flag.String([]string{"G", "-group"}, "docker", "Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group")
flEnableCors = flag.Bool([]string{"#api-enable-cors", "-api-enable-cors"}, false, "Enable CORS headers in the remote API")
flDns = opts.NewListOpts(opts.ValidateIp4Address)
- flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Disable docker's addition of iptables rules")
- flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Disable enabling of net.ipv4.ip_forward")
+ flDnsSearch = opts.NewListOpts(opts.ValidateDomain)
+ flEnableIptables = flag.Bool([]string{"#iptables", "-iptables"}, true, "Enable Docker's addition of iptables rules")
+ flEnableIpForward = flag.Bool([]string{"#ip-forward", "-ip-forward"}, true, "Enable net.ipv4.ip_forward")
flDefaultIp = flag.String([]string{"#ip", "-ip"}, "0.0.0.0", "Default IP address to use when binding container ports")
flInterContainerComm = flag.Bool([]string{"#icc", "-icc"}, true, "Enable inter-container communication")
flGraphDriver = flag.String([]string{"s", "-storage-driver"}, "", "Force the docker runtime to use a specific storage driver")
flExecDriver = flag.String([]string{"e", "-exec-driver"}, "native", "Force the docker runtime to use a specific exec driver")
flHosts = opts.NewListOpts(api.ValidateHost)
flMtu = flag.Int([]string{"#mtu", "-mtu"}, 0, "Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available")
+ flTls = flag.Bool([]string{"-tls"}, false, "Use TLS; implied by tls-verify flags")
+ flTlsVerify = flag.Bool([]string{"-tlsverify"}, false, "Use TLS and verify the remote (daemon: verify client, client: verify daemon)")
+ flCa = flag.String([]string{"-tlscacert"}, dockerConfDir+defaultCaFile, "Trust only remotes providing a certificate signed by the CA given here")
+ flCert = flag.String([]string{"-tlscert"}, dockerConfDir+defaultCertFile, "Path to TLS certificate file")
+ flKey = flag.String([]string{"-tlskey"}, dockerConfDir+defaultKeyFile, "Path to TLS key file")
)
flag.Var(&flDns, []string{"#dns", "-dns"}, "Force docker to use specific DNS servers")
+ flag.Var(&flDnsSearch, []string{"-dns-search"}, "Force Docker to use specific DNS search domains")
flag.Var(&flHosts, []string{"H", "-host"}, "tcp://host:port, unix://path/to/socket, fd://* or fd://socketfd to use in daemon mode. Multiple sockets can be specified")
flag.Parse()
@@ -73,6 +94,7 @@ func main() {
if *flDebug {
os.Setenv("DEBUG", "1")
}
+
if *flDaemon {
if flag.NArg() != 0 {
flag.Usage()
@@ -115,6 +137,7 @@ func main() {
job.Setenv("Root", realRoot)
job.SetenvBool("AutoRestart", *flAutoRestart)
job.SetenvList("Dns", flDns.GetAll())
+ job.SetenvList("DnsSearch", flDnsSearch.GetAll())
job.SetenvBool("EnableIptables", *flEnableIptables)
job.SetenvBool("EnableIpForward", *flEnableIpForward)
job.Setenv("BridgeIface", *bridgeName)
@@ -140,6 +163,12 @@ func main() {
job.SetenvBool("EnableCors", *flEnableCors)
job.Setenv("Version", dockerversion.VERSION)
job.Setenv("SocketGroup", *flSocketGroup)
+
+ job.SetenvBool("Tls", *flTls)
+ job.SetenvBool("TlsVerify", *flTlsVerify)
+ job.Setenv("TlsCa", *flCa)
+ job.Setenv("TlsCert", *flCert)
+ job.Setenv("TlsKey", *flKey)
if err := job.Run(); err != nil {
log.Fatal(err)
}
@@ -148,7 +177,47 @@ func main() {
log.Fatal("Please specify only one -H")
}
protoAddrParts := strings.SplitN(flHosts.GetAll()[0], "://", 2)
- if err := api.ParseCommands(protoAddrParts[0], protoAddrParts[1], flag.Args()...); err != nil {
+
+ var (
+ cli *client.DockerCli
+ tlsConfig tls.Config
+ )
+ tlsConfig.InsecureSkipVerify = true
+
+ // If we should verify the server, we need to load a trusted ca
+ if *flTlsVerify {
+ *flTls = true
+ certPool := x509.NewCertPool()
+ file, err := ioutil.ReadFile(*flCa)
+ if err != nil {
+ log.Fatalf("Couldn't read ca cert %s: %s", *flCa, err)
+ }
+ certPool.AppendCertsFromPEM(file)
+ tlsConfig.RootCAs = certPool
+ tlsConfig.InsecureSkipVerify = false
+ }
+
+ // If tls is enabled, try to load and send client certificates
+ if *flTls || *flTlsVerify {
+ _, errCert := os.Stat(*flCert)
+ _, errKey := os.Stat(*flKey)
+ if errCert == nil && errKey == nil {
+ *flTls = true
+ cert, err := tls.LoadX509KeyPair(*flCert, *flKey)
+ if err != nil {
+ log.Fatalf("Couldn't load X509 key pair: %s. Key encrypted?", err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{cert}
+ }
+ }
+
+ if *flTls || *flTlsVerify {
+ cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], &tlsConfig)
+ } else {
+ cli = client.NewDockerCli(os.Stdin, os.Stdout, os.Stderr, protoAddrParts[0], protoAddrParts[1], nil)
+ }
+
+ if err := cli.ParseCommands(flag.Args()...); err != nil {
if sterr, ok := err.(*utils.StatusError); ok {
if sterr.Status != "" {
log.Println(sterr.Status)
diff --git a/docs/MAINTAINERS b/docs/MAINTAINERS
index e816670419..52505fab00 100644
--- a/docs/MAINTAINERS
+++ b/docs/MAINTAINERS
@@ -1,3 +1,2 @@
-Andy Rothfusz <andy@dotcloud.com> (@metalivedev)
James Turnbull <james@lovedthanlost.net> (@jamtur01)
Sven Dowideit <SvenDowideit@fosiki.com> (@SvenDowideit)
diff --git a/docs/sources/articles/runmetrics.rst b/docs/sources/articles/runmetrics.rst
index afb7f82e39..6b705fb737 100644
--- a/docs/sources/articles/runmetrics.rst
+++ b/docs/sources/articles/runmetrics.rst
@@ -63,7 +63,7 @@ For Docker containers using cgroups, the container name will be the
full ID or long ID of the container. If a container shows up as
ae836c95b4c3 in ``docker ps``, its long ID might be something like
``ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79``. You
-can look it up with ``docker inspect`` or ``docker ps -notrunc``.
+can look it up with ``docker inspect`` or ``docker ps --no-trunc``.
Putting everything together to look at the memory metrics for a Docker
container, take a look at ``/sys/fs/cgroup/memory/lxc/<longid>/``.
diff --git a/docs/sources/articles/security.rst b/docs/sources/articles/security.rst
index 3dc5780e85..ec2ab9bffd 100644
--- a/docs/sources/articles/security.rst
+++ b/docs/sources/articles/security.rst
@@ -7,7 +7,7 @@
Docker Security
===============
- *Adapted from* `Containers & Docker: How Secure are They? <blogsecurity>`_
+ *Adapted from* `Containers & Docker: How Secure are They? <blogsecurity_>`_
There are three major areas to consider when reviewing Docker security:
@@ -82,6 +82,8 @@ when some applications start to misbehave.
Control Groups have been around for a while as well: the code was
started in 2006, and initially merged in kernel 2.6.24.
+.. _dockersecurity_daemon:
+
Docker Daemon Attack Surface
----------------------------
@@ -261,7 +263,7 @@ with Docker, since everything is provided by the kernel anyway.
For more context and especially for comparisons with VMs and other
container systems, please also see the `original blog post
-<blogsecurity>`_.
+<blogsecurity_>`_.
.. _blogsecurity: http://blog.docker.io/2013/08/containers-docker-how-secure-are-they/
diff --git a/docs/sources/examples/apt-cacher-ng.Dockerfile b/docs/sources/examples/apt-cacher-ng.Dockerfile
new file mode 100644
index 0000000000..3b7862bb58
--- /dev/null
+++ b/docs/sources/examples/apt-cacher-ng.Dockerfile
@@ -0,0 +1,15 @@
+#
+# Build: docker build -t apt-cacher .
+# Run: docker run -d -p 3142:3142 --name apt-cacher-run apt-cacher
+#
+# and then you can run containers with:
+# docker run -t -i --rm -e http_proxy http://dockerhost:3142/ debian bash
+#
+FROM ubuntu
+MAINTAINER SvenDowideit@docker.com
+
+VOLUME ["/var/cache/apt-cacher-ng"]
+RUN apt-get update ; apt-get install -yq apt-cacher-ng
+
+EXPOSE 3142
+CMD chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/*
diff --git a/docs/sources/examples/apt-cacher-ng.rst b/docs/sources/examples/apt-cacher-ng.rst
new file mode 100644
index 0000000000..dd844d4ef1
--- /dev/null
+++ b/docs/sources/examples/apt-cacher-ng.rst
@@ -0,0 +1,102 @@
+:title: Running an apt-cacher-ng service
+:description: Installing and running an apt-cacher-ng service
+:keywords: docker, example, package installation, networking, debian, ubuntu
+
+.. _running_apt-cacher-ng_service:
+
+Apt-Cacher-ng Service
+=====================
+
+.. include:: example_header.inc
+
+
+When you have multiple Docker servers, or build unrelated Docker containers
+which can't make use of the Docker build cache, it can be useful to have a
+caching proxy for your packages. This container makes the second download of
+any package almost instant.
+
+Use the following Dockerfile:
+
+.. literalinclude:: apt-cacher-ng.Dockerfile
+
+To build the image using:
+
+.. code-block:: bash
+
+ $ sudo docker build -t eg_apt_cacher_ng .
+
+Then run it, mapping the exposed port to one on the host
+
+.. code-block:: bash
+
+ $ sudo docker run -d -p 3142:3142 --name test_apt_cacher_ng eg_apt_cacher_ng
+
+To see the logfiles that are 'tailed' in the default command, you can use:
+
+.. code-block:: bash
+
+ $ sudo docker logs -f test_apt_cacher_ng
+
+To get your Debian-based containers to use the proxy, you can do one of three things
+
+1. Add an apt Proxy setting ``echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/conf.d/01proxy``
+2. Set an environment variable: ``http_proxy=http://dockerhost:3142/``
+3. Change your ``sources.list`` entries to start with ``http://dockerhost:3142/``
+
+**Option 1** injects the settings safely into your apt configuration in a local
+version of a common base:
+
+.. code-block:: bash
+
+ FROM ubuntu
+ RUN echo 'Acquire::http { Proxy "http://dockerhost:3142"; };' >> /etc/apt/apt.conf.d/01proxy
+ RUN apt-get update ; apt-get install vim git
+
+ # docker build -t my_ubuntu .
+
+**Option 2** is good for testing, but will
+break other HTTP clients which obey ``http_proxy``, such as ``curl``, ``wget`` and others:
+
+.. code-block:: bash
+
+ $ sudo docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash
+
+**Option 3** is the least portable, but there will be times when you might need to
+do it and you can do it from your ``Dockerfile`` too.
+
+Apt-cacher-ng has some tools that allow you to manage the repository, and they
+can be used by leveraging the ``VOLUME`` instruction, and the image we built to run the
+service:
+
+.. code-block:: bash
+
+ $ sudo docker run --rm -t -i --volumes-from test_apt_cacher_ng eg_apt_cacher_ng bash
+
+ $$ /usr/lib/apt-cacher-ng/distkill.pl
+ Scanning /var/cache/apt-cacher-ng, please wait...
+ Found distributions:
+ bla, taggedcount: 0
+ 1. precise-security (36 index files)
+ 2. wheezy (25 index files)
+ 3. precise-updates (36 index files)
+ 4. precise (36 index files)
+ 5. wheezy-updates (18 index files)
+
+ Found architectures:
+ 6. amd64 (36 index files)
+ 7. i386 (24 index files)
+
+ WARNING: The removal action may wipe out whole directories containing
+ index files. Select d to see detailed list.
+
+ (Number nn: tag distribution or architecture nn; 0: exit; d: show details; r: remove tagged; q: quit): q
+
+
+Finally, clean up after your test by stopping and removing the container, and
+then removing the image.
+
+.. code-block:: bash
+
+ $ sudo docker stop test_apt_cacher_ng
+ $ sudo docker rm test_apt_cacher_ng
+ $ sudo docker rmi eg_apt_cacher_ng
diff --git a/docs/sources/examples/example_header.inc b/docs/sources/examples/example_header.inc
index 0621b39794..5841141e59 100644
--- a/docs/sources/examples/example_header.inc
+++ b/docs/sources/examples/example_header.inc
@@ -4,4 +4,5 @@
* This example assumes you have Docker running in daemon mode. For
more information please see :ref:`running_examples`.
* **If you don't like sudo** then see :ref:`dockergroup`
+ * **If you're using OS X or docker via TCP** then you shouldn't use `sudo`
diff --git a/docs/sources/examples/hello_world.rst b/docs/sources/examples/hello_world.rst
index 63362e7d7b..39d7abea2c 100644
--- a/docs/sources/examples/hello_world.rst
+++ b/docs/sources/examples/hello_world.rst
@@ -52,8 +52,8 @@ This command will run a simple ``echo`` command, that will echo ``hello world``
**Explanation:**
-- **"sudo"** execute the following commands as user *root*
-- **"docker run"** run a command in a new container
+- **"sudo"** execute the following commands as user *root*
+- **"docker run"** run a command in a new container
- **"busybox"** is the image we are running the command in.
- **"/bin/echo"** is the command we want to run in the container
- **"hello world"** is the input for the echo command
@@ -67,9 +67,9 @@ See the example in action
.. raw:: html
<iframe width="560" height="400" frameborder="0"
- sandbox="allow-same-origin allow-scripts"
- srcdoc="<body><script type=&quot;text/javascript&quot;
- src=&quot;https://asciinema.org/a/7658.js&quot;
+ sandbox="allow-same-origin allow-scripts"
+ srcdoc="<body><script type=&quot;text/javascript&quot;
+ src=&quot;https://asciinema.org/a/7658.js&quot;
id=&quot;asciicast-7658&quot; async></script></body>">
</iframe>
@@ -92,7 +92,7 @@ we stop it.
.. code-block:: bash
- CONTAINER_ID=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done")
+ container_id=$(sudo docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done")
We are going to run a simple hello world daemon in a new container
made from the ``ubuntu`` image.
@@ -104,30 +104,30 @@ made from the ``ubuntu`` image.
- **"while true; do echo hello world; sleep 1; done"** is the mini
script we want to run, that will just print hello world once a
second until we stop it.
-- **$CONTAINER_ID** the output of the run command will return a
+- **$container_id** the output of the run command will return a
container id, we can use in future commands to see what is going on
with this process.
.. code-block:: bash
- sudo docker logs $CONTAINER_ID
+ sudo docker logs $container_id
Check the logs make sure it is working correctly.
- **"docker logs**" This will return the logs for a container
-- **$CONTAINER_ID** The Id of the container we want the logs for.
+- **$container_id** The Id of the container we want the logs for.
.. code-block:: bash
- sudo docker attach -sig-proxy=false $CONTAINER_ID
+ sudo docker attach --sig-proxy=false $container_id
Attach to the container to see the results in real-time.
- **"docker attach**" This will allow us to attach to a background
process to see what is going on.
-- **"-sig-proxy=false"** Do not forward signals to the container; allows
+- **"--sig-proxy=false"** Do not forward signals to the container; allows
us to exit the attachment using Control-C without stopping the container.
-- **$CONTAINER_ID** The Id of the container we want to attach too.
+- **$container_id** The Id of the container we want to attach to.
Exit from the container attachment by pressing Control-C.
@@ -141,12 +141,12 @@ Check the process list to make sure it is running.
.. code-block:: bash
- sudo docker stop $CONTAINER_ID
+ sudo docker stop $container_id
Stop the container, since we don't need it anymore.
- **"docker stop"** This stops a container
-- **$CONTAINER_ID** The Id of the container we want to stop.
+- **$container_id** The Id of the container we want to stop.
.. code-block:: bash
@@ -162,9 +162,9 @@ See the example in action
.. raw:: html
<iframe width="560" height="400" frameborder="0"
- sandbox="allow-same-origin allow-scripts"
- srcdoc="<body><script type=&quot;text/javascript&quot;
- src=&quot;https://asciinema.org/a/2562.js&quot;
+ sandbox="allow-same-origin allow-scripts"
+ srcdoc="<body><script type=&quot;text/javascript&quot;
+ src=&quot;https://asciinema.org/a/2562.js&quot;
id=&quot;asciicast-2562&quot; async></script></body>">
</iframe>
diff --git a/docs/sources/examples/https.rst b/docs/sources/examples/https.rst
new file mode 100644
index 0000000000..7a221ed951
--- /dev/null
+++ b/docs/sources/examples/https.rst
@@ -0,0 +1,126 @@
+:title: Docker HTTPS Setup
+:description: How to setup docker with https
+:keywords: docker, example, https, daemon
+
+.. _running_docker_https:
+
+Running Docker with https
+=========================
+
+By default, Docker runs via a non-networked Unix socket. It can also optionally
+communicate using a HTTP socket.
+
+If you need Docker reachable via the network in a safe manner, you can enable
+TLS by specifying the `tlsverify` flag and pointing Docker's `tlscacert` flag to a
+trusted CA certificate.
+
+In daemon mode, it will only allow connections from clients authenticated by a
+certificate signed by that CA. In client mode, it will only connect to servers
+with a certificate signed by that CA.
+
+.. warning::
+
+ Using TLS and managing a CA is an advanced topic. Please make you self familiar
+ with openssl, x509 and tls before using it in production.
+
+Create a CA, server and client keys with OpenSSL
+------------------------------------------------
+
+First, initialize the CA serial file and generate CA private and public keys:
+
+.. code-block:: bash
+
+ $ echo 01 > ca.srl
+ $ openssl genrsa -des3 -out ca-key.pem
+ $ openssl req -new -x509 -days 365 -key ca-key.pem -out ca.pem
+
+Now that we have a CA, you can create a server key and certificate signing request.
+Make sure that `"Common Name (e.g. server FQDN or YOUR name)"` matches the hostname you will use
+to connect to Docker or just use '*' for a certificate valid for any hostname:
+
+.. code-block:: bash
+
+ $ openssl genrsa -des3 -out server-key.pem
+ $ openssl req -new -key server-key.pem -out server.csr
+
+Next we're going to sign the key with our CA:
+
+.. code-block:: bash
+
+ $ openssl x509 -req -days 365 -in server.csr -CA ca.pem -CAkey ca-key.pem \
+ -out server-cert.pem
+
+For client authentication, create a client key and certificate signing request:
+
+.. code-block:: bash
+
+ $ openssl genrsa -des3 -out client-key.pem
+ $ openssl req -new -key client-key.pem -out client.csr
+
+
+To make the key suitable for client authentication, create a extensions config file:
+
+.. code-block:: bash
+
+ $ echo extendedKeyUsage = clientAuth > extfile.cnf
+
+Now sign the key:
+
+.. code-block:: bash
+
+ $ openssl x509 -req -days 365 -in client.csr -CA ca.pem -CAkey ca-key.pem \
+ -out client-cert.pem -extfile extfile.cnf
+
+Finally you need to remove the passphrase from the client and server key:
+
+.. code-block:: bash
+
+ $ openssl rsa -in server-key.pem -out server-key.pem
+ $ openssl rsa -in client-key.pem -out client-key.pem
+
+Now you can make the Docker daemon only accept connections from clients providing
+a certificate trusted by our CA:
+
+.. code-block:: bash
+
+ $ sudo docker -d --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \
+ -H=0.0.0.0:4243
+
+To be able to connect to Docker and validate its certificate, you now need to provide your client keys,
+certificates and trusted CA:
+
+.. code-block:: bash
+
+ $ docker --tlsverify --tlscacert=ca.pem --tlscert=client-cert.pem --tlskey=client-key.pem \
+ -H=dns-name-of-docker-host:4243
+
+.. warning::
+
+ As shown in the example above, you don't have to run the ``docker``
+ client with ``sudo`` or the ``docker`` group when you use
+ certificate authentication. That means anyone with the keys can
+ give any instructions to your Docker daemon, giving them root
+ access to the machine hosting the daemon. Guard these keys as you
+ would a root password!
+
+Other modes
+-----------
+If you don't want to have complete two-way authentication, you can run Docker in
+various other modes by mixing the flags.
+
+Daemon modes
+~~~~~~~~~~~~
+- tlsverify, tlscacert, tlscert, tlskey set: Authenticate clients
+- tls, tlscert, tlskey: Do not authenticate clients
+
+Client modes
+~~~~~~~~~~~~
+- tls: Authenticate server based on public/default CA pool
+- tlsverify, tlscacert: Authenticate server based on given CA
+- tls, tlscert, tlskey: Authenticate with client certificate, do not authenticate
+ server based on given CA
+- tlsverify, tlscacert, tlscert, tlskey: Authenticate with client certificate,
+ authenticate server based on given CA
+
+The client will send its client certificate if found, so you just need to drop
+your keys into `~/.docker/<ca, cert or key>.pem`
diff --git a/docs/sources/examples/index.rst b/docs/sources/examples/index.rst
index cf9ed9340a..94e2d917bb 100644
--- a/docs/sources/examples/index.rst
+++ b/docs/sources/examples/index.rst
@@ -26,3 +26,5 @@ to more substantial services like those which you might find in production.
using_supervisord
cfengine_process_management
python_web_app
+ apt-cacher-ng
+ https
diff --git a/docs/sources/examples/mongodb.rst b/docs/sources/examples/mongodb.rst
index 3e37d74c30..913dc2699a 100644
--- a/docs/sources/examples/mongodb.rst
+++ b/docs/sources/examples/mongodb.rst
@@ -47,7 +47,7 @@ divert ``/sbin/initctl`` to ``/bin/true`` so it thinks everything is working.
# Hack for initctl not being available in Ubuntu
RUN dpkg-divert --local --rename --add /sbin/initctl
- RUN ln -s /bin/true /sbin/initctl
+ RUN ln -sf /bin/true /sbin/initctl
Afterwards we'll be able to update our apt repositories and install MongoDB
@@ -86,10 +86,10 @@ the local port!
.. code-block:: bash
# Regular style
- MONGO_ID=$(sudo docker run -d <yourname>/mongodb)
+ MONGO_ID=$(sudo docker run -P -d <yourname>/mongodb)
# Lean and mean
- MONGO_ID=$(sudo docker run -d <yourname>/mongodb --noprealloc --smallfiles)
+ MONGO_ID=$(sudo docker run -P -d <yourname>/mongodb --noprealloc --smallfiles)
# Check the logs out
sudo docker logs $MONGO_ID
diff --git a/docs/sources/examples/nodejs_web_app.rst b/docs/sources/examples/nodejs_web_app.rst
index 68c073da7b..55bd76db89 100644
--- a/docs/sources/examples/nodejs_web_app.rst
+++ b/docs/sources/examples/nodejs_web_app.rst
@@ -18,7 +18,7 @@ https://github.com/gasi/docker-node-hello.
Create Node.js app
++++++++++++++++++
-First, create a ``package.json`` file that describes your app and its
+First, create a directory ``src`` where all the files would live. Then create a ``package.json`` file that describes your app and its
dependencies:
.. code-block:: json
@@ -50,7 +50,7 @@ Then, create an ``index.js`` file that defines a web app using the
res.send('Hello World\n');
});
- app.listen(PORT)
+ app.listen(PORT);
console.log('Running on http://localhost:' + PORT);
@@ -91,7 +91,7 @@ To install the right package for CentOS, we’ll use the instructions from the
.. code-block:: bash
# Enable EPEL for Node.js
- RUN rpm -Uvh http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
+ RUN rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
# Install Node.js and npm
RUN yum install -y npm
diff --git a/docs/sources/examples/postgresql_service.Dockerfile b/docs/sources/examples/postgresql_service.Dockerfile
index af1423f258..219a537882 100644
--- a/docs/sources/examples/postgresql_service.Dockerfile
+++ b/docs/sources/examples/postgresql_service.Dockerfile
@@ -7,7 +7,7 @@ MAINTAINER SvenDowideit@docker.com
# Add the PostgreSQL PGP key to verify their Debian packages.
# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc
-RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
+RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
# Add PostgreSQL's repository. It contains the most recent stable release
# of PostgreSQL, ``9.3``.
diff --git a/docs/sources/examples/postgresql_service.rst b/docs/sources/examples/postgresql_service.rst
index 5a2323471b..488e1530b2 100644
--- a/docs/sources/examples/postgresql_service.rst
+++ b/docs/sources/examples/postgresql_service.rst
@@ -37,24 +37,24 @@ And run the PostgreSQL server container (in the foreground):
.. code-block:: bash
- $ sudo docker run -rm -P -name pg_test eg_postgresql
+ $ sudo docker run --rm -P --name pg_test eg_postgresql
There are 2 ways to connect to the PostgreSQL server. We can use
:ref:`working_with_links_names`, or we can access it from our host (or the network).
-.. note:: The ``-rm`` removes the container and its image when the container
+.. note:: The ``--rm`` removes the container and its image when the container
exists successfully.
Using container linking
^^^^^^^^^^^^^^^^^^^^^^^
Containers can be linked to another container's ports directly using
-``-link remote_name:local_alias`` in the client's ``docker run``. This will
+``--link remote_name:local_alias`` in the client's ``docker run``. This will
set a number of environment variables that can then be used to connect:
.. code-block:: bash
- $ sudo docker run -rm -t -i -link pg_test:pg eg_postgresql bash
+ $ sudo docker run --rm -t -i --link pg_test:pg eg_postgresql bash
postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password
@@ -104,7 +104,7 @@ configuration and data:
.. code-block:: bash
- docker run -rm --volumes-from pg_test -t -i busybox sh
+ docker run --rm --volumes-from pg_test -t -i busybox sh
/ # ls
bin etc lib linuxrc mnt proc run sys usr
diff --git a/docs/sources/examples/python_web_app.rst b/docs/sources/examples/python_web_app.rst
index 5b8e3f6b4b..33c038f9ab 100644
--- a/docs/sources/examples/python_web_app.rst
+++ b/docs/sources/examples/python_web_app.rst
@@ -51,7 +51,7 @@ try things out, and then exit when you're done.
.. code-block:: bash
- $ sudo docker run -i -t -name pybuilder_run shykes/pybuilder bash
+ $ sudo docker run -i -t --name pybuilder_run shykes/pybuilder bash
$$ URL=http://github.com/shykes/helloflask/archive/master.tar.gz
$$ /usr/local/bin/buildapp $URL
diff --git a/docs/sources/examples/running_redis_service.rst b/docs/sources/examples/running_redis_service.rst
index c9424867a4..5a5a1b003f 100644
--- a/docs/sources/examples/running_redis_service.rst
+++ b/docs/sources/examples/running_redis_service.rst
@@ -18,11 +18,11 @@ Firstly, we create a ``Dockerfile`` for our new Redis image.
.. code-block:: bash
- FROM ubuntu:12.10
- RUN apt-get update
- RUN apt-get -y install redis-server
+ FROM debian:jessie
+ RUN apt-get update && apt-get install -y redis-server
EXPOSE 6379
ENTRYPOINT ["/usr/bin/redis-server"]
+ CMD ["--bind", "0.0.0.0"]
Next we build an image from our ``Dockerfile``. Replace ``<your username>``
with your own user name.
@@ -49,7 +49,7 @@ use a container link to provide access to our Redis database.
Create your web application container
-------------------------------------
-Next we can create a container for our application. We're going to use the ``-link``
+Next we can create a container for our application. We're going to use the ``--link``
flag to create a link to the ``redis`` container we've just created with an alias of
``db``. This will create a secure tunnel to the ``redis`` container and expose the
Redis instance running inside that container to only this container.
diff --git a/docs/sources/examples/running_riak_service.rst b/docs/sources/examples/running_riak_service.rst
index ae08a4b7f0..55e5e405c9 100644
--- a/docs/sources/examples/running_riak_service.rst
+++ b/docs/sources/examples/running_riak_service.rst
@@ -88,7 +88,7 @@ Almost there. Next, we add a hack to get us by the lack of ``initctl``:
# Hack for initctl
# See: https://github.com/dotcloud/docker/issues/1024
RUN dpkg-divert --local --rename --add /sbin/initctl
- RUN ln -s /bin/true /sbin/initctl
+ RUN ln -sf /bin/true /sbin/initctl
Then, we expose the Riak Protocol Buffers and HTTP interfaces, along with SSH:
diff --git a/docs/sources/examples/running_ssh_service.rst b/docs/sources/examples/running_ssh_service.rst
index d27799bee7..4161275019 100644
--- a/docs/sources/examples/running_ssh_service.rst
+++ b/docs/sources/examples/running_ssh_service.rst
@@ -19,14 +19,14 @@ Build the image using:
.. code-block:: bash
- $ sudo docker build -rm -t eg_sshd .
+ $ sudo docker build -t eg_sshd .
Then run it. You can then use ``docker port`` to find out what host port the container's
port 22 is mapped to:
.. code-block:: bash
- $ sudo docker run -d -P -name test_sshd eg_sshd
+ $ sudo docker run -d -P --name test_sshd eg_sshd
$ sudo docker port test_sshd 22
0.0.0.0:49154
diff --git a/docs/sources/installation/amazon.rst b/docs/sources/installation/amazon.rst
index b5465e25f8..b062a15e1e 100644
--- a/docs/sources/installation/amazon.rst
+++ b/docs/sources/installation/amazon.rst
@@ -9,6 +9,7 @@ Amazon EC2
There are several ways to install Docker on AWS EC2:
+* :ref:`amazonquickstart_new` or
* :ref:`amazonquickstart` or
* :ref:`amazonstandard`
@@ -61,6 +62,37 @@ for every Docker command.
Once you've got Docker installed, you're ready to try it out -- head
on over to the :doc:`../use/basics` or :doc:`../examples/index` section.
+.. _amazonquickstart_new:
+
+Amazon QuickStart (Release Candidate - March 2014)
+--------------------------------------------------
+
+Amazon just published new Docker-ready AMIs (2014.03 Release Candidate). Docker packages
+can now be installed from Amazon's provided Software Repository.
+
+1. **Choose an image:**
+
+ * Launch the `Create Instance Wizard
+ <https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:>`_ menu
+ on your AWS Console.
+
+ * Click the ``Community AMI`` menu option on the left side
+
+ * Search for '2014.03' and select one of the Amazon provided AMI, for example ``amzn-ami-pv-2014.03.rc-0.x86_64-ebs``
+
+ * For testing you can use the default (possibly free)
+ ``t1.micro`` instance (more info on `pricing
+ <http://aws.amazon.com/en/ec2/pricing/>`_).
+
+ * Click the ``Next: Configure Instance Details`` button at the bottom right.
+
+2. After a few more standard choices where defaults are probably ok, your Amazon
+ Linux instance should be running!
+
+3. SSH to your instance to install Docker : ``ssh -i <path to your private key> ec2-user@<your public IP address>``
+
+4. Once connected to the instance, type ``sudo yum install -y docker ; sudo service docker start`` to install and start Docker
+
.. _amazonstandard:
Standard Ubuntu Installation
diff --git a/docs/sources/installation/binaries.rst b/docs/sources/installation/binaries.rst
index bfdfbe211f..c31e19acc4 100644
--- a/docs/sources/installation/binaries.rst
+++ b/docs/sources/installation/binaries.rst
@@ -29,6 +29,12 @@ To run properly, docker needs the following software to be installed at runtime:
- iptables version 1.4 or later
- Git version 1.7 or later
- XZ Utils 4.9 or later
+- a `properly mounted
+ <https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount>`_
+ cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point `is
+ <https://github.com/dotcloud/docker/issues/2683>`_ `not
+ <https://github.com/dotcloud/docker/issues/3485>`_ `sufficient
+ <https://github.com/dotcloud/docker/issues/4568>`_)
Check kernel dependencies
@@ -37,6 +43,9 @@ Check kernel dependencies
Docker in daemon mode has specific kernel requirements. For details,
check your distribution in :ref:`installation_list`.
+In general, a 3.8 Linux kernel (or higher) is preferred, as some of the
+prior versions have known issues that are triggered by Docker.
+
Note that Docker also has a client mode, which can run on virtually
any Linux kernel (it even builds on OSX!).
@@ -49,6 +58,9 @@ Get the docker binary:
wget https://get.docker.io/builds/Linux/x86_64/docker-latest -O docker
chmod +x docker
+.. note::
+ If you have trouble downloading the binary, you can also get the smaller
+ compressed release file: https://get.docker.io/builds/Linux/x86_64/docker-latest.tgz
Run the docker daemon
---------------------
@@ -77,7 +89,8 @@ always run as the root user, but if you run the ``docker`` client as a
user in the *docker* group then you don't need to add ``sudo`` to all
the client commands.
-.. warning:: The *docker* group is root-equivalent.
+.. warning:: The *docker* group (or the group specified with ``-G``) is
+ root-equivalent; see :ref:`dockersecurity_daemon` details.
Upgrades
diff --git a/docs/sources/installation/fedora.rst b/docs/sources/installation/fedora.rst
index 7e0aee78fd..3b95f04f7f 100644
--- a/docs/sources/installation/fedora.rst
+++ b/docs/sources/installation/fedora.rst
@@ -23,15 +23,15 @@ The ``docker-io`` package provides Docker on Fedora.
If you have the (unrelated) ``docker`` package installed already, it will
conflict with ``docker-io``. There's a `bug report`_ filed for it.
-To proceed with ``docker-io`` installation on Fedora 19, please remove
-``docker`` first.
+To proceed with ``docker-io`` installation on Fedora 19 or Fedora 20, please
+remove ``docker`` first.
.. code-block:: bash
sudo yum -y remove docker
-For Fedora 20 and later, the ``wmdocker`` package will provide the same
-functionality as ``docker`` and will also not conflict with ``docker-io``.
+For Fedora 21 and later, the ``wmdocker`` package will provide the same
+functionality as the old ``docker`` and will also not conflict with ``docker-io``.
.. code-block:: bash
diff --git a/docs/sources/installation/index.rst b/docs/sources/installation/index.rst
index 39c1f6a292..ae0e9196fa 100644
--- a/docs/sources/installation/index.rst
+++ b/docs/sources/installation/index.rst
@@ -30,4 +30,5 @@ Contents:
amazon
rackspace
google
+ softlayer
binaries
diff --git a/docs/sources/installation/mac.rst b/docs/sources/installation/mac.rst
index 5139324d0b..9ce3961f7e 100644
--- a/docs/sources/installation/mac.rst
+++ b/docs/sources/installation/mac.rst
@@ -65,11 +65,12 @@ Run the following commands to get it downloaded and set up:
.. code-block:: bash
- # Get the file
- curl -o docker https://get.docker.io/builds/Darwin/x86_64/docker-latest
-
- # Mark it executable
- chmod +x docker
+ # Get the docker client file
+ DIR=$(mktemp -d ${TMPDIR:-/tmp}/dockerdl.XXXXXXX) && \
+ curl -f -o $DIR/ld.tgz https://get.docker.io/builds/Darwin/x86_64/docker-latest.tgz && \
+ gunzip $DIR/ld.tgz && \
+ tar xvf $DIR/ld.tar -C $DIR/ && \
+ cp $DIR/usr/local/bin/docker ./docker
# Set the environment variable for the docker daemon
export DOCKER_HOST=tcp://127.0.0.1:4243
diff --git a/docs/sources/installation/rhel.rst b/docs/sources/installation/rhel.rst
index 7930da6309..151fba6f1f 100644
--- a/docs/sources/installation/rhel.rst
+++ b/docs/sources/installation/rhel.rst
@@ -22,6 +22,9 @@ for the RHEL distribution.
Also note that due to the current Docker limitations, Docker is able to run
only on the **64 bit** architecture.
+You will need `RHEL 6.5`_ or higher, with a RHEL 6 kernel version 2.6.32-431 or higher
+as this has specific kernel fixes to allow Docker to work.
+
Installation
------------
@@ -78,4 +81,5 @@ If you have any issues - please report them directly in the `Red Hat Bugzilla fo
.. _EPEL installation instructions: https://fedoraproject.org/wiki/EPEL#How_can_I_use_these_extra_packages.3F
.. _Red Hat Bugzilla for docker-io component : https://bugzilla.redhat.com/enter_bug.cgi?product=Fedora%20EPEL&component=docker-io
.. _bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1043676
+.. _RHEL 6.5: https://access.redhat.com/site/articles/3078#RHEL6
diff --git a/docs/sources/installation/softlayer.rst b/docs/sources/installation/softlayer.rst
new file mode 100644
index 0000000000..0fe3d6df5a
--- /dev/null
+++ b/docs/sources/installation/softlayer.rst
@@ -0,0 +1,25 @@
+:title: Installation on IBM SoftLayer
+:description: Please note this project is currently under heavy development. It should not be used in production.
+:keywords: IBM SoftLayer, virtualization, cloud, docker, documentation, installation
+
+IBM SoftLayer
+=============
+
+.. include:: install_header.inc
+
+IBM SoftLayer QuickStart
+-------------------------
+
+1. Create an `IBM SoftLayer account <https://www.softlayer.com/cloudlayer/>`_.
+2. Log in to the `SoftLayer Console <https://control.softlayer.com/devices/>`_.
+3. Go to `Order Hourly Computing Instance Wizard <https://manage.softlayer.com/Sales/orderHourlyComputingInstance>`_ on your SoftLayer Console.
+4. Create a new *CloudLayer Computing Instance* (CCI) using the default values for all the fields and choose:
+
+- *First Available* as ``Datacenter`` and
+- *Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit)* as ``Operating System``.
+
+5. Click the *Continue Your Order* button at the bottom right and select *Go to checkout*.
+6. Insert the required *User Metadata* and place the order.
+7. Then continue with the :ref:`ubuntu_linux` instructions.
+
+Continue with the :ref:`hello_world` example. \ No newline at end of file
diff --git a/docs/sources/installation/ubuntulinux.rst b/docs/sources/installation/ubuntulinux.rst
index 416d56765e..51f303e88a 100644
--- a/docs/sources/installation/ubuntulinux.rst
+++ b/docs/sources/installation/ubuntulinux.rst
@@ -64,15 +64,26 @@ Installation
an earlier version, you will need to follow them again.
Docker is available as a Debian package, which makes installation
-easy. **See the :ref:`installmirrors` section below if you are not in
+easy. **See the** :ref:`installmirrors` **section below if you are not in
the United States.** Other sources of the Debian packages may be
faster for you to install.
-First add the Docker repository key to your local keychain.
+First, check that your APT system can deal with ``https`` URLs:
+the file ``/usr/lib/apt/methods/https`` should exist. If it doesn't,
+you need to install the package ``apt-transport-https``.
+
+.. code-block:: bash
+
+ [ -e /usr/lib/apt/methods/https ] || {
+ apt-get update
+ apt-get install apt-transport-https
+ }
+
+Then, add the Docker repository key to your local keychain.
.. code-block:: bash
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
+ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
Add the Docker repository to your apt sources list, update and install the
``lxc-docker`` package.
@@ -82,7 +93,7 @@ continue installation.*
.. code-block:: bash
- sudo sh -c "echo deb http://get.docker.io/ubuntu docker main\
+ sudo sh -c "echo deb https://get.docker.io/ubuntu docker main\
> /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-get install lxc-docker
@@ -144,7 +155,7 @@ First add the Docker repository key to your local keychain.
.. code-block:: bash
- sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
+ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
Add the Docker repository to your apt sources list, update and install the
``lxc-docker`` package.
@@ -186,7 +197,7 @@ client commands. As of 0.9.0, you can specify that a group other than ``docker``
should own the Unix socket with the ``-G`` option.
.. warning:: The *docker* group (or the group specified with ``-G``) is
- root-equivalent.
+ root-equivalent; see :ref:`dockersecurity_daemon` details.
**Example:**
@@ -282,8 +293,6 @@ incoming connections on the Docker port (default 4243):
sudo ufw allow 4243/tcp
-.. _installmirrors:
-
Docker and local DNS server warnings
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -309,9 +318,9 @@ daemon for the containers:
sudo nano /etc/default/docker
---
# Add:
- DOCKER_OPTS="-dns 8.8.8.8"
+ DOCKER_OPTS="--dns 8.8.8.8"
# 8.8.8.8 could be replaced with a local DNS server, such as 192.168.1.1
- # multiple DNS servers can be specified: -dns 8.8.8.8 -dns 192.168.1.1
+ # multiple DNS servers can be specified: --dns 8.8.8.8 --dns 192.168.1.1
The Docker daemon has to be restarted:
@@ -342,6 +351,8 @@ NetworkManager and Docker need to be restarted afterwards:
.. warning:: This might make DNS resolution slower on some networks.
+.. _installmirrors:
+
Mirrors
^^^^^^^
diff --git a/docs/sources/reference/api/docker_io_accounts_api.rst b/docs/sources/reference/api/docker_io_accounts_api.rst
index 7976f1fddf..dc5c44d4a8 100644
--- a/docs/sources/reference/api/docker_io_accounts_api.rst
+++ b/docs/sources/reference/api/docker_io_accounts_api.rst
@@ -49,14 +49,14 @@ docker.io Accounts API
{
"id": 2,
"username": "janedoe",
- "url": "",
+ "url": "https://www.docker.io/api/v1.1/users/janedoe/",
"date_joined": "2014-02-12T17:58:01.431312Z",
"type": "User",
"full_name": "Jane Doe",
"location": "San Francisco, CA",
"company": "Success, Inc.",
"profile_url": "https://docker.io/",
- "gravatar_email": "jane.doe+gravatar@example.com",
+ "gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm"
"email": "jane.doe@example.com",
"is_active": true
}
@@ -111,14 +111,14 @@ docker.io Accounts API
{
"id": 2,
"username": "janedoe",
- "url": "",
+ "url": "https://www.docker.io/api/v1.1/users/janedoe/",
"date_joined": "2014-02-12T17:58:01.431312Z",
"type": "User",
"full_name": "Jane Doe",
"location": "Private Island",
"company": "Retired",
"profile_url": "http://janedoe.com/",
- "gravatar_email": "jane.doe+gravatar@example.com",
+ "gravatar_url": "https://secure.gravatar.com/avatar/0212b397124be4acd4e7dea9aa357.jpg?s=80&r=g&d=mm"
"email": "jane.doe@example.com",
"is_active": true
}
diff --git a/docs/sources/reference/api/docker_remote_api.rst b/docs/sources/reference/api/docker_remote_api.rst
index e1071bf085..7fa8468f3c 100644
--- a/docs/sources/reference/api/docker_remote_api.rst
+++ b/docs/sources/reference/api/docker_remote_api.rst
@@ -22,6 +22,8 @@ Docker Remote API
- Since API version 1.2, the auth configuration is now handled client
side, so the client has to send the authConfig as POST in
/images/(name)/push
+- authConfig, set as the ``X-Registry-Auth`` header, is currently a Base64 encoded (json) string with credentials:
+ ``{'username': string, 'password': string, 'email': string, 'serveraddress' : string}``
2. Versions
===========
@@ -50,6 +52,7 @@ What's new
**New!** You can now use the force parameter to force delete of an image, even if it's
tagged in multiple repositories.
+ **New!** You can now use the noprune parameter to prevent the deletion of parent images
.. http:delete:: /containers/(id)
@@ -203,7 +206,7 @@ What's new
.. http:get:: /images/viz
- This URI no longer exists. The ``images -viz`` output is now generated in
+ This URI no longer exists. The ``images --viz`` output is now generated in
the client, using the ``/images/json`` data.
v1.6
diff --git a/docs/sources/reference/api/docker_remote_api_v1.10.rst b/docs/sources/reference/api/docker_remote_api_v1.10.rst
index ed63525e7e..98827c9eb2 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.10.rst
+++ b/docs/sources/reference/api/docker_remote_api_v1.10.rst
@@ -136,6 +136,7 @@ Create a container
},
"VolumesFrom":"",
"WorkingDir":"",
+ "DisableNetwork": false,
"ExposedPorts":{
"22/tcp": {}
}
@@ -931,6 +932,7 @@ Remove an image
]
:query force: 1/True/true or 0/False/false, default false
+ :query noprune: 1/True/true or 0/False/false, default false
:statuscode 200: no error
:statuscode 404: no such image
:statuscode 409: conflict
@@ -1276,8 +1278,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
- docker -d -H="192.168.1.9:4243" -api-enable-cors
+ docker -d -H="192.168.1.9:4243" --api-enable-cors
diff --git a/docs/sources/reference/api/docker_remote_api_v1.2.rst b/docs/sources/reference/api/docker_remote_api_v1.2.rst
index 1ae2db696f..80f76a3de9 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.2.rst
+++ b/docs/sources/reference/api/docker_remote_api_v1.2.rst
@@ -1045,7 +1045,7 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
- docker -d -H="tcp://192.168.1.9:4243" -api-enable-cors
+ docker -d -H="tcp://192.168.1.9:4243" --api-enable-cors
diff --git a/docs/sources/reference/api/docker_remote_api_v1.3.rst b/docs/sources/reference/api/docker_remote_api_v1.3.rst
index cb4c54642d..2b17a37a4d 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.3.rst
+++ b/docs/sources/reference/api/docker_remote_api_v1.3.rst
@@ -1124,7 +1124,7 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
- docker -d -H="192.168.1.9:4243" -api-enable-cors
+ docker -d -H="192.168.1.9:4243" --api-enable-cors
diff --git a/docs/sources/reference/api/docker_remote_api_v1.4.rst b/docs/sources/reference/api/docker_remote_api_v1.4.rst
index 39c8839653..ff5aaa7a74 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.4.rst
+++ b/docs/sources/reference/api/docker_remote_api_v1.4.rst
@@ -1168,9 +1168,9 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
- docker -d -H="192.168.1.9:4243" -api-enable-cors
+ docker -d -H="192.168.1.9:4243" --api-enable-cors
diff --git a/docs/sources/reference/api/docker_remote_api_v1.5.rst b/docs/sources/reference/api/docker_remote_api_v1.5.rst
index 0cdbaf747a..d4440e4423 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.5.rst
+++ b/docs/sources/reference/api/docker_remote_api_v1.5.rst
@@ -1137,8 +1137,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
- docker -d -H="192.168.1.9:4243" -api-enable-cors
+ docker -d -H="192.168.1.9:4243" --api-enable-cors
diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.rst b/docs/sources/reference/api/docker_remote_api_v1.6.rst
index a9ddfb2c13..cfc37084b8 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.6.rst
+++ b/docs/sources/reference/api/docker_remote_api_v1.6.rst
@@ -1274,9 +1274,9 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
- docker -d -H="192.168.1.9:4243" -api-enable-cors
+ docker -d -H="192.168.1.9:4243" --api-enable-cors
diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.rst b/docs/sources/reference/api/docker_remote_api_v1.7.rst
index cacd7ab6f7..1bafaddfc5 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.7.rst
+++ b/docs/sources/reference/api/docker_remote_api_v1.7.rst
@@ -1254,9 +1254,9 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
- docker -d -H="192.168.1.9:4243" -api-enable-cors
+ docker -d -H="192.168.1.9:4243" --api-enable-cors
diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.rst b/docs/sources/reference/api/docker_remote_api_v1.8.rst
index b752f2f8a4..16492dde76 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.8.rst
+++ b/docs/sources/reference/api/docker_remote_api_v1.8.rst
@@ -1287,8 +1287,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
- docker -d -H="192.168.1.9:4243" -api-enable-cors
+ docker -d -H="192.168.1.9:4243" --api-enable-cors
diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.rst b/docs/sources/reference/api/docker_remote_api_v1.9.rst
index 9430ff370d..27812457bb 100644
--- a/docs/sources/reference/api/docker_remote_api_v1.9.rst
+++ b/docs/sources/reference/api/docker_remote_api_v1.9.rst
@@ -1288,8 +1288,8 @@ In this version of the API, /attach, uses hijacking to transport stdin, stdout a
3.3 CORS Requests
-----------------
-To enable cross origin requests to the remote api add the flag "-api-enable-cors" when running docker in daemon mode.
+To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode.
.. code-block:: bash
- docker -d -H="192.168.1.9:4243" -api-enable-cors
+ docker -d -H="192.168.1.9:4243" --api-enable-cors
diff --git a/docs/sources/reference/api/remote_api_client_libraries.rst b/docs/sources/reference/api/remote_api_client_libraries.rst
index 9bab343bf5..4a445db36f 100644
--- a/docs/sources/reference/api/remote_api_client_libraries.rst
+++ b/docs/sources/reference/api/remote_api_client_libraries.rst
@@ -41,7 +41,13 @@ and we will add the libraries here.
+----------------------+----------------+--------------------------------------------+----------+
| Go | go-dockerclient| https://github.com/fsouza/go-dockerclient | Active |
+----------------------+----------------+--------------------------------------------+----------+
+| Go | dockerclient | https://github.com/samalba/dockerclient | Active |
++----------------------+----------------+--------------------------------------------+----------+
| PHP | Alvine | http://pear.alvine.io/ (alpha) | Active |
+----------------------+----------------+--------------------------------------------+----------+
| PHP | Docker-PHP | http://stage1.github.io/docker-php/ | Active |
+----------------------+----------------+--------------------------------------------+----------+
+| Perl | Net::Docker | https://metacpan.org/pod/Net::Docker | Active |
++----------------------+----------------+--------------------------------------------+----------+
+| Perl | Eixo::Docker | https://github.com/alambike/eixo-docker | Active |
++----------------------+----------------+--------------------------------------------+----------+
diff --git a/docs/sources/reference/builder.rst b/docs/sources/reference/builder.rst
index 9f7a816801..e8897d1b09 100644
--- a/docs/sources/reference/builder.rst
+++ b/docs/sources/reference/builder.rst
@@ -13,12 +13,10 @@ Dockerfile Reference
to create an image. Executing ``docker build`` will run your steps and
commit them along the way, giving you a final image.
-.. contents:: Table of Contents
-
.. _dockerfile_usage:
-1. Usage
-========
+Usage
+=====
To :ref:`build <cli_build>` an image from a source repository, create
a description file called ``Dockerfile`` at the root of your
@@ -49,7 +47,7 @@ to be created - so ``RUN cd /tmp`` will not have any effect on the next
instructions.
Whenever possible, Docker will re-use the intermediate images,
-accelerating ``docker build`` significantly (indicated by ``Using cache``:
+accelerating ``docker build`` significantly (indicated by ``Using cache``):
.. code-block:: bash
@@ -71,8 +69,8 @@ When you're done with your build, you're ready to look into
.. _dockerfile_format:
-2. Format
-=========
+Format
+======
Here is the format of the Dockerfile:
@@ -99,16 +97,14 @@ allows statements like:
.. _dockerfile_instructions:
-3. Instructions
-===============
Here is the set of instructions you can use in a ``Dockerfile`` for
building images.
.. _dockerfile_from:
-3.1 FROM
---------
+``FROM``
+========
``FROM <image>``
@@ -134,8 +130,8 @@ assumed. If the used tag does not exist, an error will be returned.
.. _dockerfile_maintainer:
-3.2 MAINTAINER
---------------
+``MAINTAINER``
+==============
``MAINTAINER <name>``
@@ -144,8 +140,8 @@ the generated images.
.. _dockerfile_run:
-3.3 RUN
--------
+``RUN``
+=======
RUN has 2 forms:
@@ -174,8 +170,8 @@ Known Issues (RUN)
.. _dockerfile_cmd:
-3.4 CMD
--------
+``CMD``
+=======
CMD has three forms:
@@ -192,9 +188,7 @@ omit the executable, in which case you must specify an ENTRYPOINT as
well.
When used in the shell or exec formats, the ``CMD`` instruction sets
-the command to be executed when running the image. This is
-functionally equivalent to running ``docker commit -run '{"Cmd":
-<command>}'`` outside the builder.
+the command to be executed when running the image.
If you use the *shell* form of the CMD, then the ``<command>`` will
execute in ``/bin/sh -c``:
@@ -229,20 +223,20 @@ override the default specified in CMD.
.. _dockerfile_expose:
-3.5 EXPOSE
-----------
+``EXPOSE``
+==========
``EXPOSE <port> [<port>...]``
-The ``EXPOSE`` instruction exposes ports for use within links. This is
-functionally equivalent to running ``docker commit -run '{"PortSpecs":
-["<port>", "<port2>"]}'`` outside the builder. Refer to
-:ref:`port_redirection` for detailed information.
+The ``EXPOSE`` instructions informs Docker that the container will listen
+on the specified network ports at runtime. Docker uses this information
+to interconnect containers using links (see :ref:`links <working_with_links_names>`),
+and to setup port redirection on the host system (see :ref:`port_redirection`).
.. _dockerfile_env:
-3.6 ENV
--------
+``ENV``
+=======
``ENV <key> <value>``
@@ -262,8 +256,8 @@ from the resulting image. You can view the values using ``docker inspect``, and
.. _dockerfile_add:
-3.7 ADD
--------
+``ADD``
+=======
``ADD <src> <dest>``
@@ -329,8 +323,8 @@ The copy obeys the following rules:
.. _dockerfile_entrypoint:
-3.8 ENTRYPOINT
---------------
+``ENTRYPOINT``
+==============
ENTRYPOINT has two forms:
@@ -378,8 +372,8 @@ this optional but default, you could use a CMD:
.. _dockerfile_volume:
-3.9 VOLUME
-----------
+``VOLUME``
+==========
``VOLUME ["/data"]``
@@ -389,8 +383,8 @@ and mounting instructions via docker client, refer to :ref:`volume_def` document
.. _dockerfile_user:
-3.10 USER
----------
+``USER``
+========
``USER daemon``
@@ -399,18 +393,27 @@ the image.
.. _dockerfile_workdir:
-3.11 WORKDIR
-------------
+``WORKDIR``
+===========
``WORKDIR /path/to/workdir``
The ``WORKDIR`` instruction sets the working directory for the ``RUN``, ``CMD`` and
``ENTRYPOINT`` Dockerfile commands that follow it.
-It can be used multiple times in the one Dockerfile.
+It can be used multiple times in the one Dockerfile. If a relative path is
+provided, it will be relative to the path of the previous ``WORKDIR``
+instruction. For example:
+
+ WORKDIR /a
+ WORKDIR b
+ WORKDIR c
+ RUN pwd
+
+The output of the final ``pwd`` command in this Dockerfile would be ``/a/b/c``.
-3.11 ONBUILD
-------------
+``ONBUILD``
+===========
``ONBUILD [INSTRUCTION]``
@@ -471,7 +474,7 @@ For example you might add something like this:
.. _dockerfile_examples:
-4. Dockerfile Examples
+Dockerfile Examples
======================
.. code-block:: bash
@@ -481,7 +484,7 @@ For example you might add something like this:
# VERSION 0.0.1
FROM ubuntu
- MAINTAINER Guillaume J. Charmes <guillaume@dotcloud.com>
+ MAINTAINER Guillaume J. Charmes <guillaume@docker.com>
# make sure the package repository is up to date
RUN echo "deb http://archive.ubuntu.com/ubuntu precise main universe" > /etc/apt/sources.list
diff --git a/docs/sources/reference/commandline/cli.rst b/docs/sources/reference/commandline/cli.rst
index 2e49cd5ca5..c0df5f8175 100644
--- a/docs/sources/reference/commandline/cli.rst
+++ b/docs/sources/reference/commandline/cli.rst
@@ -52,7 +52,7 @@ Sometimes this can use a more complex value string, as for ``-v``::
Strings and Integers
~~~~~~~~~~~~~~~~~~~~
-Options like ``-name=""`` expect a string, and they can only be
+Options like ``--name=""`` expect a string, and they can only be
specified once. Options like ``-c=0`` expect an integer, and they can
only be specified once.
@@ -74,36 +74,45 @@ Commands
-G, --group="docker": Group to assign the unix socket specified by -H when running in daemon mode; use '' (the empty string) to disable setting of a group
--api-enable-cors=false: Enable CORS headers in the remote API
-b, --bridge="": Attach containers to a pre-existing network bridge; use 'none' to disable container networking
- --bip="": Use this CIDR notation address for the network bridge's IP, not compatible with -b
+ -bip="": Use this CIDR notation address for the network bridge's IP, not compatible with -b
-d, --daemon=false: Enable daemon mode
--dns=[]: Force docker to use specific DNS servers
+ --dns-search=[]: Force Docker to use specific DNS search domains
-g, --graph="/var/lib/docker": Path to use as the root of the docker runtime
--icc=true: Enable inter-container communication
--ip="0.0.0.0": Default IP address to use when binding container ports
- --iptables=true: Disable docker's addition of iptables rules
+ --ip-forward=true: Enable net.ipv4.ip_forward
+ --iptables=true: Enable Docker's addition of iptables rules
-p, --pidfile="/var/run/docker.pid": Path to use for daemon PID file
-r, --restart=true: Restart previously running containers
-s, --storage-driver="": Force the docker runtime to use a specific storage driver
-e, --exec-driver="native": Force the docker runtime to use a specific exec driver
-v, --version=false: Print version information and quit
+ --tls=false: Use TLS; implied by tls-verify flags
+ --tlscacert="~/.docker/ca.pem": Trust only remotes providing a certificate signed by the CA given here
+ --tlscert="~/.docker/cert.pem": Path to TLS certificate file
+ --tlskey="~/.docker/key.pem": Path to TLS key file
+ --tlsverify=false: Use TLS and verify the remote (daemon: verify client, client: verify daemon)
--mtu=0: Set the containers network MTU; if no value is provided: default to the default route MTU or 1500 if no default route is available
-The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the
+The Docker daemon is the persistent process that manages containers. Docker uses the same binary for both the
daemon and client. To run the daemon you provide the ``-d`` flag.
To force Docker to use devicemapper as the storage driver, use ``docker -d -s devicemapper``.
-To set the DNS server for all Docker containers, use ``docker -d -dns 8.8.8.8``.
+To set the DNS server for all Docker containers, use ``docker -d --dns 8.8.8.8``.
+
+To set the DNS search domain for all Docker containers, use ``docker -d --dns-search example.com``.
To run the daemon with debug output, use ``docker -d -D``.
To use lxc as the execution driver, use ``docker -d -e lxc``.
The docker client will also honor the ``DOCKER_HOST`` environment variable to set
-the ``-H`` flag for the client.
+the ``-H`` flag for the client.
::
-
+
docker -H tcp://0.0.0.0:4243 ps
# or
export DOCKER_HOST="tcp://0.0.0.0:4243"
@@ -141,7 +150,7 @@ TMPDIR and the data directory can be set like this:
You can detach from the container again (and leave it running) with
``CTRL-c`` (for a quiet exit) or ``CTRL-\`` to get a stacktrace of
-the Docker client when it quits. When you detach from the container's
+the Docker client when it quits. When you detach from the container's
process the exit code will be returned to the client.
To stop a container, use ``docker stop``.
@@ -202,12 +211,16 @@ Examples:
--no-cache: Do not use the cache when building the image.
--rm=true: Remove intermediate containers after a successful build
-The files at ``PATH`` or ``URL`` are called the "context" of the build. The
-build process may refer to any of the files in the context, for example when
-using an :ref:`ADD <dockerfile_add>` instruction. When a single ``Dockerfile``
-is given as ``URL``, then no context is set. When a Git repository is set as
-``URL``, then the repository is used as the context. Git repositories are
-cloned with their submodules (`git clone --recursive`).
+The files at ``PATH`` or ``URL`` are called the "context" of the build.
+The build process may refer to any of the files in the context, for example when
+using an :ref:`ADD <dockerfile_add>` instruction.
+When a single ``Dockerfile`` is given as ``URL``, then no context is set.
+
+When a Git repository is set as ``URL``, then the repository is used as the context.
+The Git repository is cloned with its submodules (`git clone --recursive`).
+A fresh git clone occurs in a temporary directory on your local host, and then this
+is sent to the Docker daemon as the context.
+This way, your local user credentials and vpn's etc can be used to access private repositories
.. _cli_build_examples:
@@ -303,8 +316,6 @@ by using the ``git://`` schema.
-m, --message="": Commit message
-a, --author="": Author (eg. "John Hannibal Smith <hannibal@a-team.com>"
- --run="": Configuration to be applied when the image is launched with `docker run`.
- (ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')
.. _cli_commit_examples:
@@ -315,74 +326,14 @@ Commit an existing container
$ sudo docker ps
ID IMAGE COMMAND CREATED STATUS PORTS
- c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours
- 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours
+ c3f279d17e0a ubuntu:12.04 /bin/bash 7 days ago Up 25 hours
+ 197387f1b436 ubuntu:12.04 /bin/bash 7 days ago Up 25 hours
$ docker commit c3f279d17e0a SvenDowideit/testimage:version3
f5283438590d
$ docker images | head
REPOSITORY TAG ID CREATED VIRTUAL SIZE
SvenDowideit/testimage version3 f5283438590d 16 seconds ago 335.7 MB
-
-Change the command that a container runs
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Sometimes you have an application container running just a service and you need
-to make a quick change and then change it back.
-
-In this example, we run a container with ``ls`` and then change the image to
-run ``ls /etc``.
-
-.. code-block:: bash
-
- $ docker run -t -name test ubuntu ls
- bin boot dev etc home lib lib64 media mnt opt proc root run sbin selinux srv sys tmp usr var
- $ docker commit -run='{"Cmd": ["ls","/etc"]}' test test2
- 933d16de9e70005304c1717b5c6f2f39d6fd50752834c6f34a155c70790011eb
- $ docker run -t test2
- adduser.conf gshadow login.defs rc0.d
- alternatives gshadow- logrotate.d rc1.d
- apt host.conf lsb-base rc2.d
- ...
-
-Full -run example
-.................
-
-The ``--run`` JSON hash changes the ``Config`` section when running ``docker inspect CONTAINERID``
-or ``config`` when running ``docker inspect IMAGEID``.
-(Multiline is okay within a single quote ``'``)
-
-.. code-block:: bash
-
- $ sudo docker commit -run='
- {
- "Entrypoint" : null,
- "Privileged" : false,
- "User" : "",
- "VolumesFrom" : "",
- "Cmd" : ["cat", "-e", "/etc/resolv.conf"],
- "Dns" : ["8.8.8.8", "8.8.4.4"],
- "MemorySwap" : 0,
- "AttachStdin" : false,
- "AttachStderr" : false,
- "CpuShares" : 0,
- "OpenStdin" : false,
- "Volumes" : null,
- "Hostname" : "122612f45831",
- "PortSpecs" : ["22", "80", "443"],
- "Image" : "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
- "Tty" : false,
- "Env" : [
- "HOME=/",
- "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- ],
- "StdinOnce" : false,
- "Domainname" : "",
- "WorkingDir" : "/",
- "NetworkDisabled" : false,
- "Memory" : 0,
- "AttachStdout" : false
- }' $CONTAINER_ID
.. _cli_cp:
@@ -486,16 +437,16 @@ Show events in the past from a specified time
.. code-block:: bash
- $ sudo docker events -since 1378216169
+ $ sudo docker events --since 1378216169
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
- $ sudo docker events -since '2013-09-03'
+ $ sudo docker events --since '2013-09-03'
[2013-09-03 15:49:26 +0200 CEST] 4386fb97867d: (from 12de384bfb10) start
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
- $ sudo docker events -since '2013-09-03 15:49:29 +0200 CEST'
+ $ sudo docker events --since '2013-09-03 15:49:29 +0200 CEST'
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) die
[2013-09-03 15:49:29 +0200 CEST] 4386fb97867d: (from 12de384bfb10) stop
@@ -535,35 +486,14 @@ To see how the ``docker:latest`` image was built:
.. code-block:: bash
$ docker history docker
- ID CREATED CREATED BY
- docker:latest 19 hours ago /bin/sh -c #(nop) ADD . in /go/src/github.com/dotcloud/docker
- cf5f2467662d 2 weeks ago /bin/sh -c #(nop) ENTRYPOINT ["hack/dind"]
- 3538fbe372bf 2 weeks ago /bin/sh -c #(nop) WORKDIR /go/src/github.com/dotcloud/docker
- 7450f65072e5 2 weeks ago /bin/sh -c #(nop) VOLUME /var/lib/docker
- b79d62b97328 2 weeks ago /bin/sh -c apt-get install -y -q lxc
- 36714852a550 2 weeks ago /bin/sh -c apt-get install -y -q iptables
- 8c4c706df1d6 2 weeks ago /bin/sh -c /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEYn' > /.s3cfg
- b89989433c48 2 weeks ago /bin/sh -c pip install python-magic
- a23e640d85b5 2 weeks ago /bin/sh -c pip install s3cmd
- 41f54fec7e79 2 weeks ago /bin/sh -c apt-get install -y -q python-pip
- d9bc04add907 2 weeks ago /bin/sh -c apt-get install -y -q reprepro dpkg-sig
- e74f4760fa70 2 weeks ago /bin/sh -c gem install --no-rdoc --no-ri fpm
- 1e43224726eb 2 weeks ago /bin/sh -c apt-get install -y -q ruby1.9.3 rubygems libffi-dev
- 460953ae9d7f 2 weeks ago /bin/sh -c #(nop) ENV GOPATH=/go:/go/src/github.com/dotcloud/docker/vendor
- 8b63eb1d666b 2 weeks ago /bin/sh -c #(nop) ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/goroot/bin
- 3087f3bcedf2 2 weeks ago /bin/sh -c #(nop) ENV GOROOT=/goroot
- 635840d198e5 2 weeks ago /bin/sh -c cd /goroot/src && ./make.bash
- 439f4a0592ba 2 weeks ago /bin/sh -c curl -s https://go.googlecode.com/files/go1.1.2.src.tar.gz | tar -v -C / -xz && mv /go /goroot
- 13967ed36e93 2 weeks ago /bin/sh -c #(nop) ENV CGO_ENABLED=0
- bf7424458437 2 weeks ago /bin/sh -c apt-get install -y -q build-essential
- a89ec997c3bf 2 weeks ago /bin/sh -c apt-get install -y -q mercurial
- b9f165c6e749 2 weeks ago /bin/sh -c apt-get install -y -q git
- 17a64374afa7 2 weeks ago /bin/sh -c apt-get install -y -q curl
- d5e85dc5b1d8 2 weeks ago /bin/sh -c apt-get update
- 13e642467c11 2 weeks ago /bin/sh -c echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > /etc/apt/sources.list
- ae6dde92a94e 2 weeks ago /bin/sh -c #(nop) MAINTAINER Solomon Hykes <solomon@dotcloud.com>
- ubuntu:12.04 6 months ago
-
+ IMAGE CREATED CREATED BY SIZE
+ 3e23a5875458790b7a806f95f7ec0d0b2a5c1659bfc899c89f939f6d5b8f7094 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B
+ 8578938dd17054dce7993d21de79e96a037400e8d28e15e7290fea4f65128a36 8 days ago /bin/sh -c dpkg-reconfigure locales && locale-gen C.UTF-8 && /usr/sbin/update-locale LANG=C.UTF-8 1.245 MB
+ be51b77efb42f67a5e96437b3e102f81e0a1399038f77bf28cea0ed23a65cf60 8 days ago /bin/sh -c apt-get update && apt-get install -y git libxml2-dev python build-essential make gcc python-dev locales python-pip 338.3 MB
+ 4b137612be55ca69776c7f30c2d2dd0aa2e7d72059820abf3e25b629f887a084 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB
+ 750d58736b4b6cc0f9a9abe8f258cef269e3e9dceced1146503522be9f985ada 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -t jessie.tar.xz jessie http://http.debian.net/debian 0 B
+ 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 9 months ago 0 B
+
.. _cli_images:
``images``
@@ -575,11 +505,16 @@ To see how the ``docker:latest`` image was built:
List images
- -a, --all=false: Show all images (by default filter out the intermediate images used to build)
+ -a, --all=false: Show all images (by default filter out the intermediate image layers)
--no-trunc=false: Don't truncate output
-q, --quiet=false: Only show numeric IDs
- --tree=false: Output graph in tree format
- --viz=false: Output graph in graphviz format
+
+The default ``docker images`` will show all top level images, their repository
+and tags, and their virtual size.
+
+Docker images have intermediate layers that increase reuseability, decrease
+disk usage, and speed up ``docker build`` by allowing each step to be cached.
+These intermediate layers are not shown by default.
Listing the most recently created images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -615,46 +550,6 @@ Listing the full length image IDs
tryout latest 2629d1fa0b81b222fca63371ca16cbf6a0772d07759ff80e8d1369b926940074 23 hours ago 131.5 MB
<none> <none> 5ed6274db6ceb2397844896966ea239290555e74ef307030ebb01ff91b1914df 24 hours ago 1.089 GB
-Displaying images visually
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. code-block:: bash
-
- $ sudo docker images --viz | dot -Tpng -o docker.png
-
-.. image:: docker_images.gif
- :alt: Example inheritance graph of Docker images.
-
-
-Displaying image hierarchy
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. code-block:: bash
-
- $ sudo docker images --tree
-
- ├─8dbd9e392a96 Size: 131.5 MB (virtual 131.5 MB) Tags: ubuntu:12.04,ubuntu:latest,ubuntu:precise
- └─27cf78414709 Size: 180.1 MB (virtual 180.1 MB)
- └─b750fe79269d Size: 24.65 kB (virtual 180.1 MB) Tags: ubuntu:12.10,ubuntu:quantal
- ├─f98de3b610d5 Size: 12.29 kB (virtual 180.1 MB)
- │ └─7da80deb7dbf Size: 16.38 kB (virtual 180.1 MB)
- │ └─65ed2fee0a34 Size: 20.66 kB (virtual 180.2 MB)
- │ └─a2b9ea53dddc Size: 819.7 MB (virtual 999.8 MB)
- │ └─a29b932eaba8 Size: 28.67 kB (virtual 999.9 MB)
- │ └─e270a44f124d Size: 12.29 kB (virtual 999.9 MB) Tags: progrium/buildstep:latest
- └─17e74ac162d8 Size: 53.93 kB (virtual 180.2 MB)
- └─339a3f56b760 Size: 24.65 kB (virtual 180.2 MB)
- └─904fcc40e34d Size: 96.7 MB (virtual 276.9 MB)
- └─b1b0235328dd Size: 363.3 MB (virtual 640.2 MB)
- └─7cb05d1acb3b Size: 20.48 kB (virtual 640.2 MB)
- └─47bf6f34832d Size: 20.48 kB (virtual 640.2 MB)
- └─f165104e82ed Size: 12.29 kB (virtual 640.2 MB)
- └─d9cf85a47b7e Size: 1.911 MB (virtual 642.2 MB)
- └─3ee562df86ca Size: 17.07 kB (virtual 642.2 MB)
- └─b05fc2d00e4a Size: 24.96 kB (virtual 642.2 MB)
- └─c96a99614930 Size: 12.29 kB (virtual 642.2 MB)
- └─a6a357a48c49 Size: 12.29 kB (virtual 642.2 MB) Tags: ndj/mongodb:latest
-
.. _cli_import:
``import``
@@ -664,7 +559,7 @@ Displaying image hierarchy
Usage: docker import URL|- [REPOSITORY[:TAG]]
- Create an empty filesystem image and import the contents of the tarball
+ Create an empty filesystem image and import the contents of the tarball
(.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.
At this time, the URL must start with ``http`` and point to a single
@@ -731,34 +626,6 @@ preserved.
WARNING: No swap limit support
-.. _cli_insert:
-
-``insert``
-----------
-
-::
-
- Usage: docker insert IMAGE URL PATH
-
- Insert a file from URL in the IMAGE at PATH
-
-Use the specified ``IMAGE`` as the parent for a new image which adds a
-:ref:`layer <layer_def>` containing the new file. The ``insert`` command does
-not modify the original image, and the new image has the contents of the parent
-image, plus the new file.
-
-
-Examples
-~~~~~~~~
-
-Insert file from GitHub
-.......................
-
-.. code-block:: bash
-
- $ sudo docker insert 8283e18b24bc https://raw.github.com/metalivedev/django/master/postinstall /tmp/postinstall.sh
- 06fd35556d7b
-
.. _cli_inspect:
``inspect``
@@ -799,7 +666,7 @@ text output:
.. code-block:: bash
- $ sudo docker inspect -format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
+ $ sudo docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' $INSTANCE_ID
Find a Specific Port Mapping
............................
@@ -814,7 +681,7 @@ we ask for the ``HostPort`` field to get the public address.
.. code-block:: bash
- $ sudo docker inspect -format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
+ $ sudo docker inspect --format='{{(index (index .NetworkSettings.Ports "8787/tcp") 0).HostPort}}' $INSTANCE_ID
Get config
..........
@@ -826,7 +693,7 @@ to convert config object into JSON
.. code-block:: bash
- $ sudo docker inspect -format='{{json .config}}' $INSTANCE_ID
+ $ sudo docker inspect --format='{{json .config}}' $INSTANCE_ID
.. _cli_kill:
@@ -859,10 +726,32 @@ Known Issues (kill)
::
- Usage: docker load < repository.tar
+ Usage: docker load
+
+ Load an image from a tar archive on STDIN
+
+ -i, --input="": Read from a tar archive file, instead of STDIN
+
+Loads a tarred repository from a file or the standard input stream.
+Restores both images and tags.
+
+.. code-block:: bash
+
+ $ sudo docker images
+ REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
+ $ sudo docker load < busybox.tar
+ $ sudo docker images
+ REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
+ busybox latest 769b9341d937 7 weeks ago 2.489 MB
+ $ sudo docker load --input fedora.tar
+ $ sudo docker images
+ REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
+ busybox latest 769b9341d937 7 weeks ago 2.489 MB
+ fedora rawhide 0d20aec6529d 7 weeks ago 387 MB
+ fedora 20 58394af37342 7 weeks ago 385.5 MB
+ fedora heisenbug 58394af37342 7 weeks ago 385.5 MB
+ fedora latest 58394af37342 7 weeks ago 385.5 MB
- Loads a tarred repository from the standard input stream.
- Restores both images and tags.
.. _cli_login:
@@ -933,8 +822,14 @@ new output from the container's stdout and stderr.
List containers
-a, --all=false: Show all containers. Only running containers are shown by default.
+ --before="": Show only container created before Id or Name, include non-running ones.
+ -l, --latest=false: Show only the latest created container, include non-running ones.
+ -n=-1: Show n last created containers, include non-running ones.
--no-trunc=false: Don't truncate output
-q, --quiet=false: Only display numeric IDs
+ -s, --size=false: Display sizes, not to be used with -q
+ --since="": Show only containers created since Id or Name, include non-running ones.
+
Running ``docker ps`` showing 2 linked containers.
@@ -942,7 +837,7 @@ Running ``docker ps`` showing 2 linked containers.
$ docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
- 4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp
+ 4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds webapp
d7886598dbe2 crosbymichael/redis:latest /redis-server --dir 33 minutes ago Up 33 minutes 6379/tcp redis,webapp/db
fd2645e2e2b5 busybox:latest top 10 days ago Ghost insane_ptolemy
@@ -957,7 +852,7 @@ The last container is marked as a ``Ghost`` container. It is a container that wa
::
- Usage: docker pull NAME
+ Usage: docker pull NAME[:TAG]
Pull an image or a repository from the registry
@@ -969,7 +864,7 @@ The last container is marked as a ``Ghost`` container. It is a container that wa
::
- Usage: docker push NAME
+ Usage: docker push NAME[:TAG]
Push an image or a repository to the registry
@@ -985,6 +880,8 @@ The last container is marked as a ``Ghost`` container. It is a container that wa
Restart a running container
+ -t, --time=10: Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10
+
.. _cli_rm:
``rm``
@@ -997,6 +894,7 @@ The last container is marked as a ``Ghost`` container. It is a container that wa
Remove one or more containers
-l, --link="": Remove the link instead of the actual container
-f, --force=false: Force removal of running container
+ -v, --volumes=false: Remove the volumes associated to the container
Known Issues (rm)
~~~~~~~~~~~~~~~~~
@@ -1047,7 +945,8 @@ containers will not be deleted.
Remove one or more images
-f, --force=false: Force
-
+ --no-prune=false: Do not delete untagged parents
+
Removing tagged images
~~~~~~~~~~~~~~~~~~~~~~
@@ -1096,7 +995,8 @@ image is removed.
--cidfile="": Write the container ID to the file
-d, --detach=false: Detached mode: Run container in the background, print new container id
-e, --env=[]: Set environment variables
- -h, --host="": Container host name
+ --env-file="": Read in a line delimited file of ENV variables
+ -h, --hostname="": Container host name
-i, --interactive=false: Keep stdin open even if not attached
--privileged=false: Give extended privileges to this container
-m, --memory="": Memory limit (format: <number><optional unit>, where unit = b, k, m or g)
@@ -1106,11 +1006,12 @@ image is removed.
-t, --tty=false: Allocate a pseudo-tty
-u, --user="": Username or UID
--dns=[]: Set custom dns servers for the container
+ --dns-search=[]: Set custom DNS search domains for the container
-v, --volume=[]: Create a bind mount to a directory or file with: [host-path]:[container-path]:[rw|ro]. If a directory "container-path" is missing, then docker creates a new volume.
--volumes-from="": Mount all volumes from the given container(s)
--entrypoint="": Overwrite the default entrypoint set by the image
-w, --workdir="": Working directory inside the container
- --lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
+ --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
--sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
--expose=[]: Expose a port from the container without publishing it to your host
--link="": Add link to another container (name:alias)
@@ -1126,12 +1027,12 @@ Once the container is stopped it still exists and can be started back up. See `
The ``docker run`` command can be used in combination with ``docker commit`` to
:ref:`change the command that a container runs <cli_commit_examples>`.
-See :ref:`port_redirection` for more detailed information about the ``--expose``,
-``-p``, ``-P`` and ``--link`` parameters, and :ref:`working_with_links_names` for
+See :ref:`port_redirection` for more detailed information about the ``--expose``,
+``-p``, ``-P`` and ``--link`` parameters, and :ref:`working_with_links_names` for
specific examples using ``--link``.
-Known Issues (run -volumes-from)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Known Issues (run --volumes-from)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* :issue:`2702`: "lxc-start: Permission denied - failed to mount"
could indicate a permissions problem with AppArmor. Please see the
@@ -1158,7 +1059,7 @@ error. Docker will close this file when ``docker run`` exits.
This will *not* work, because by default, most potentially dangerous
kernel capabilities are dropped; including ``cap_sys_admin`` (which is
-required to mount filesystems). However, the ``-privileged`` flag will
+required to mount filesystems). However, the ``--privileged`` flag will
allow it to run:
.. code-block:: bash
@@ -1170,7 +1071,7 @@ allow it to run:
none 1.9G 0 1.9G 0% /mnt
-The ``-privileged`` flag gives *all* capabilities to the container,
+The ``--privileged`` flag gives *all* capabilities to the container,
and it also lifts all the limitations enforced by the ``device``
cgroup controller. In other words, the container can then do almost
everything that the host can do. This flag exists to allow special
@@ -1207,8 +1108,8 @@ starting your container.
$ sudo docker run -t -i -v /var/run/docker.sock:/var/run/docker.sock -v ./static-docker:/usr/bin/docker busybox sh
-By bind-mounting the docker unix socket and statically linked docker binary
-(such as that provided by https://get.docker.io), you give the container
+By bind-mounting the docker unix socket and statically linked docker binary
+(such as that provided by https://get.docker.io), you give the container
the full access to create and manipulate the host's docker daemon.
.. code-block:: bash
@@ -1229,6 +1130,54 @@ explains in detail how to manipulate ports in Docker.
.. code-block:: bash
+ $ sudo docker run -e MYVAR1 --env MYVAR2=foo --env-file ./env.list ubuntu bash
+
+This sets environmental variables in the container. For illustration all three
+flags are shown here. Where ``-e``, ``--env`` take an environment variable and
+value, or if no "=" is provided, then that variable's current value is passed
+through (i.e. $MYVAR1 from the host is set to $MYVAR1 in the container). All
+three flags, ``-e``, ``--env`` and ``--env-file`` can be repeated.
+
+Regardless of the order of these three flags, the ``--env-file`` are processed
+first, and then ``-e``/``--env`` flags. This way, the ``-e`` or ``--env`` will
+override variables as needed.
+
+.. code-block:: bash
+
+ $ cat ./env.list
+ TEST_FOO=BAR
+ $ sudo docker run --env TEST_FOO="This is a test" --env-file ./env.list busybox env | grep TEST_FOO
+ TEST_FOO=This is a test
+
+The ``--env-file`` flag takes a filename as an argument and expects each line
+to be in the VAR=VAL format, mimicking the argument passed to ``--env``.
+Comment lines need only be prefixed with ``#``
+
+An example of a file passed with ``--env-file``
+
+.. code-block:: bash
+
+ $ cat ./env.list
+ TEST_FOO=BAR
+
+ # this is a comment
+ TEST_APP_DEST_HOST=10.10.0.127
+ TEST_APP_DEST_PORT=8888
+
+ # pass through this variable from the caller
+ TEST_PASSTHROUGH
+ $ sudo TEST_PASSTHROUGH=howdy docker run --env-file ./env.list busybox env
+ HOME=/
+ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ HOSTNAME=5198e0745561
+ TEST_FOO=BAR
+ TEST_APP_DEST_HOST=10.10.0.127
+ TEST_APP_DEST_PORT=8888
+ TEST_PASSTHROUGH=howdy
+
+
+.. code-block:: bash
+
$ sudo docker run --name console -t -i ubuntu bash
This will create and run a new container with the container name
@@ -1255,6 +1204,35 @@ ID may be optionally suffixed with ``:ro`` or ``:rw`` to mount the volumes in
read-only or read-write mode, respectively. By default, the volumes are mounted
in the same mode (read write or read only) as the reference container.
+The ``-a`` flag tells ``docker run`` to bind to the container's stdin, stdout
+or stderr. This makes it possible to manipulate the output and input as needed.
+
+.. code-block:: bash
+
+ $ sudo echo "test" | docker run -i -a stdin ubuntu cat -
+
+This pipes data into a container and prints the container's ID by attaching
+only to the container's stdin.
+
+.. code-block:: bash
+
+ $ sudo docker run -a stderr ubuntu echo test
+
+This isn't going to print anything unless there's an error because we've only
+attached to the stderr of the container. The container's logs still store
+what's been written to stderr and stdout.
+
+.. code-block:: bash
+
+ $ sudo cat somefile | docker run -i -a stdin mybuilder dobuild
+
+This is how piping a file into a container could be done for a build.
+The container's ID will be printed after the build is done and the build logs
+could be retrieved using ``docker logs``. This is useful if you need to pipe
+a file or something else into a container and retrieve the container's ID once
+the container has finished running.
+
+
A complete example
..................
@@ -1263,7 +1241,7 @@ A complete example
$ sudo docker run -d --name static static-web-files sh
$ sudo docker run -d --expose=8098 --name riak riakserver
$ sudo docker run -d -m 100m -e DEVELOPMENT=1 -e BRANCH=example-code -v $(pwd):/app/bin:ro --name app appserver
- $ sudo docker run -d -p 1443:443 --dns=dns.dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver
+ $ sudo docker run -d -p 1443:443 --dns=dns.dev.org --dns-search=dev.org -v /var/log/httpd --volumes-from static --link riak --link app -h www.sven.dev.org --name web webserver
$ sudo docker run -t -i --rm --volumes-from web -w /var/log/httpd busybox tail -f access.log
This example shows 5 containers that might be set up to test a web application change:
@@ -1271,8 +1249,8 @@ This example shows 5 containers that might be set up to test a web application c
1. Start a pre-prepared volume image ``static-web-files`` (in the background) that has CSS, image and static HTML in it, (with a ``VOLUME`` instruction in the ``Dockerfile`` to allow the web server to use those files);
2. Start a pre-prepared ``riakserver`` image, give the container name ``riak`` and expose port ``8098`` to any containers that link to it;
3. Start the ``appserver`` image, restricting its memory usage to 100MB, setting two environment variables ``DEVELOPMENT`` and ``BRANCH`` and bind-mounting the current directory (``$(pwd)``) in the container in read-only mode as ``/app/bin``;
-4. Start the ``webserver``, mapping port ``443`` in the container to port ``1443`` on the Docker server, setting the DNS server to ``dns.dev.org``, creating a volume to put the log files into (so we can access it from another container), then importing the files from the volume exposed by the ``static`` container, and linking to all exposed ports from ``riak`` and ``app``. Lastly, we set the hostname to ``web.sven.dev.org`` so its consistent with the pre-generated SSL certificate;
-5. Finally, we create a container that runs ``tail -f access.log`` using the logs volume from the ``web`` container, setting the workdir to ``/var/log/httpd``. The ``-rm`` option means that when the container exits, the container's layer is removed.
+4. Start the ``webserver``, mapping port ``443`` in the container to port ``1443`` on the Docker server, setting the DNS server to ``dns.dev.org`` and DNS search domain to ``dev.org``, creating a volume to put the log files into (so we can access it from another container), then importing the files from the volume exposed by the ``static`` container, and linking to all exposed ports from ``riak`` and ``app``. Lastly, we set the hostname to ``web.sven.dev.org`` so its consistent with the pre-generated SSL certificate;
+5. Finally, we create a container that runs ``tail -f access.log`` using the logs volume from the ``web`` container, setting the workdir to ``/var/log/httpd``. The ``--rm`` option means that when the container exits, the container's layer is removed.
.. _cli_save:
@@ -1282,10 +1260,27 @@ This example shows 5 containers that might be set up to test a web application c
::
- Usage: docker save image > repository.tar
+ Usage: docker save IMAGE
+
+ Save an image to a tar archive (streamed to stdout by default)
+
+ -o, --output="": Write to an file, instead of STDOUT
+
+
+Produces a tarred repository to the standard output stream.
+Contains all parent layers, and all tags + versions, or specified repo:tag.
+
+.. code-block:: bash
+
+ $ sudo docker save busybox > busybox.tar
+ $ ls -sh b.tar
+ 2.7M b.tar
+ $ sudo docker save --output busybox.tar busybox
+ $ ls -sh b.tar
+ 2.7M b.tar
+ $ sudo docker save -o fedora-all.tar fedora
+ $ sudo docker save -o fedora-latest.tar fedora:latest
- Streams a tarred repository to the standard output stream.
- Contains all parent layers, and all tags + versions.
.. _cli_search:
diff --git a/docs/sources/reference/run.rst b/docs/sources/reference/run.rst
index d8de280671..d2fe449c22 100644
--- a/docs/sources/reference/run.rst
+++ b/docs/sources/reference/run.rst
@@ -80,7 +80,7 @@ through network connections or shared volumes because the container is
no longer listening to the commandline where you executed ``docker
run``. You can reattach to a detached container with ``docker``
:ref:`cli_attach`. If you choose to run a container in the detached
-mode, then you cannot use the ``-rm`` option.
+mode, then you cannot use the ``--rm`` option.
Foreground
..........
@@ -92,10 +92,10 @@ error. It can even pretend to be a TTY (this is what most commandline
executables expect) and pass along signals. All of that is
configurable::
- -a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr``
- -t=false : Allocate a pseudo-tty
- -sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
- -i=false : Keep STDIN open even if not attached
+ -a=[] : Attach to ``stdin``, ``stdout`` and/or ``stderr``
+ -t=false : Allocate a pseudo-tty
+ --sig-proxy=true: Proxify all received signal to the process (even in non-tty mode)
+ -i=false : Keep STDIN open even if not attached
If you do not specify ``-a`` then Docker will `attach everything
(stdin,stdout,stderr)
@@ -112,8 +112,8 @@ as well as persistent standard input (``stdin``), so you'll use ``-i
Container Identification
------------------------
-Name (-name)
-............
+Name (--name)
+.............
The operator can identify a container in three ways:
@@ -122,7 +122,7 @@ The operator can identify a container in three ways:
* Name ("evil_ptolemy")
The UUID identifiers come from the Docker daemon, and if you do not
-assign a name to the container with ``-name`` then the daemon will
+assign a name to the container with ``--name`` then the daemon will
also generate a random string name too. The name can become a handy
way to add meaning to a container since you can use this name when
defining :ref:`links <working_with_links_names>` (or any other place
@@ -137,7 +137,7 @@ container ID out to a file of your choosing. This is similar to how
some programs might write out their process ID to a file (you've seen
them as PID files)::
- -cidfile="": Write the container ID to the file
+ --cidfile="": Write the container ID to the file
Network Settings
----------------
@@ -145,7 +145,7 @@ Network Settings
::
-n=true : Enable networking for this container
- -dns=[] : Set custom dns servers for the container
+ --dns=[] : Set custom dns servers for the container
By default, all containers have networking enabled and they can make
any outgoing connections. The operator can completely disable
@@ -154,10 +154,10 @@ networking. In cases like this, you would perform I/O through files or
STDIN/STDOUT only.
Your container will use the same DNS servers as the host by default,
-but you can override this with ``-dns``.
+but you can override this with ``--dns``.
-Clean Up (-rm)
---------------
+Clean Up (--rm)
+---------------
By default a container's file system persists even after the container
exits. This makes debugging a lot easier (since you can inspect the
@@ -165,9 +165,9 @@ final state) and you retain all your data by default. But if you are
running short-term **foreground** processes, these container file
systems can really pile up. If instead you'd like Docker to
**automatically clean up the container and remove the file system when
-the container exits**, you can add the ``-rm`` flag::
+the container exits**, you can add the ``--rm`` flag::
- -rm=false: Automatically remove the container when it exits (incompatible with -d)
+ --rm=false: Automatically remove the container when it exits (incompatible with -d)
Runtime Constraints on CPU and Memory
@@ -193,8 +193,8 @@ Runtime Privilege and LXC Configuration
::
- -privileged=false: Give extended privileges to this container
- -lxc-conf=[]: Add custom lxc options -lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
+ --privileged=false: Give extended privileges to this container
+ --lxc-conf=[]: (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1"
By default, Docker containers are "unprivileged" and cannot, for
example, run a Docker daemon inside a Docker container. This is
@@ -203,20 +203,21 @@ but a "privileged" container is given access to all devices (see
lxc-template.go_ and documentation on `cgroups devices
<https://www.kernel.org/doc/Documentation/cgroups/devices.txt>`_).
-When the operator executes ``docker run -privileged``, Docker will
+When the operator executes ``docker run --privileged``, Docker will
enable to access to all devices on the host as well as set some
configuration in AppArmor to allow the container nearly all the same
access to the host as processes running outside containers on the
-host. Additional information about running with ``-privileged`` is
+host. Additional information about running with ``--privileged`` is
available on the `Docker Blog
<http://blog.docker.io/2013/09/docker-can-now-run-within-docker/>`_.
-An operator can also specify LXC options using one or more
-``-lxc-conf`` parameters. These can be new parameters or override
-existing parameters from the lxc-template.go_. Note that in the
-future, a given host's Docker daemon may not use LXC, so this is an
-implementation-specific configuration meant for operators already
-familiar with using LXC directly.
+If the Docker daemon was started using the ``lxc`` exec-driver
+(``docker -d --exec-driver=lxc``) then the operator can also specify
+LXC options using one or more ``--lxc-conf`` parameters. These can be
+new parameters or override existing parameters from the lxc-template.go_.
+Note that in the future, a given host's Docker daemon may not use LXC,
+so this is an implementation-specific configuration meant for operators
+already familiar with using LXC directly.
.. _lxc-template.go: https://github.com/dotcloud/docker/blob/master/execdriver/lxc/lxc_template.go
@@ -260,7 +261,7 @@ ENTRYPOINT (Default Command to Execute at Runtime
::
- -entrypoint="": Overwrite the default entrypoint set by the image
+ --entrypoint="": Overwrite the default entrypoint set by the image
The ENTRYPOINT of an image is similar to a ``COMMAND`` because it
specifies what executable to run when the container starts, but it is
@@ -274,12 +275,12 @@ runtime by using a string to specify the new ``ENTRYPOINT``. Here is an
example of how to run a shell in a container that has been set up to
automatically run something else (like ``/usr/bin/redis-server``)::
- docker run -i -t -entrypoint /bin/bash example/redis
+ docker run -i -t --entrypoint /bin/bash example/redis
or two examples of how to pass more parameters to that ENTRYPOINT::
- docker run -i -t -entrypoint /bin/bash example/redis -c ls -l
- docker run -i -t -entrypoint /usr/bin/redis-cli example/redis --help
+ docker run -i -t --entrypoint /bin/bash example/redis -c ls -l
+ docker run -i -t --entrypoint /usr/bin/redis-cli example/redis --help
EXPOSE (Incoming Ports)
@@ -290,16 +291,16 @@ providing the ``EXPOSE`` instruction to give a hint to the operator
about what incoming ports might provide services. The following
options work with or override the ``Dockerfile``'s exposed defaults::
- -expose=[]: Expose a port from the container
+ --expose=[]: Expose a port from the container
without publishing it to your host
- -P=false : Publish all exposed ports to the host interfaces
- -p=[] : Publish a container's port to the host (format:
- ip:hostPort:containerPort | ip::containerPort |
- hostPort:containerPort)
- (use 'docker port' to see the actual mapping)
- -link="" : Add link to another container (name:alias)
-
-As mentioned previously, ``EXPOSE`` (and ``-expose``) make a port
+ -P=false : Publish all exposed ports to the host interfaces
+ -p=[] : Publish a container's port to the host (format:
+ ip:hostPort:containerPort | ip::containerPort |
+ hostPort:containerPort)
+ (use 'docker port' to see the actual mapping)
+ --link="" : Add link to another container (name:alias)
+
+As mentioned previously, ``EXPOSE`` (and ``--expose``) make a port
available **in** a container for incoming connections. The port number
on the inside of the container (where the service listens) does not
need to be the same number as the port exposed on the outside of the
@@ -308,16 +309,16 @@ have an HTTP service listening on port 80 (and so you ``EXPOSE 80`` in
the ``Dockerfile``), but outside the container the port might be 42800.
To help a new client container reach the server container's internal
-port operator ``-expose``'d by the operator or ``EXPOSE``'d by the
+port operator ``--expose``'d by the operator or ``EXPOSE``'d by the
developer, the operator has three choices: start the server container
-with ``-P`` or ``-p,`` or start the client container with ``-link``.
+with ``-P`` or ``-p,`` or start the client container with ``--link``.
If the operator uses ``-P`` or ``-p`` then Docker will make the
exposed port accessible on the host and the ports will be available to
any client that can reach the host. To find the map between the host
ports and the exposed ports, use ``docker port``)
-If the operator uses ``-link`` when starting the new client container,
+If the operator uses ``--link`` when starting the new client container,
then the client container can access the exposed port via a private
networking interface. Docker will set some environment variables in
the client container to help indicate which interface and port to use.
@@ -329,7 +330,7 @@ The operator can **set any environment variable** in the container by
using one or more ``-e`` flags, even overriding those already defined by the
developer with a Dockefile ``ENV``::
- $ docker run -e "deep=purple" -rm ubuntu /bin/bash -c export
+ $ docker run -e "deep=purple" --rm ubuntu /bin/bash -c export
declare -x HOME="/"
declare -x HOSTNAME="85bc26a0e200"
declare -x OLDPWD
@@ -341,13 +342,13 @@ developer with a Dockefile ``ENV``::
Similarly the operator can set the **hostname** with ``-h``.
-``-link name:alias`` also sets environment variables, using the
+``--link name:alias`` also sets environment variables, using the
*alias* string to define environment variables within the container
that give the IP and PORT information for connecting to the service
container. Let's imagine we have a container running Redis::
# Start the service container, named redis-name
- $ docker run -d -name redis-name dockerfiles/redis
+ $ docker run -d --name redis-name dockerfiles/redis
4241164edf6f5aca5b0e9e4c9eccd899b0b8080c64c0cd26efe02166c73208f3
# The redis-name container exposed port 6379
@@ -361,12 +362,12 @@ container. Let's imagine we have a container running Redis::
Yet we can get information about the Redis container's exposed ports
-with ``-link``. Choose an alias that will form a valid environment
+with ``--link``. Choose an alias that will form a valid environment
variable!
::
- $ docker run -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c export
+ $ docker run --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c export
declare -x HOME="/"
declare -x HOSTNAME="acda7f7b1cdc"
declare -x OLDPWD
@@ -383,7 +384,7 @@ variable!
And we can use that information to connect from another container as a client::
- $ docker run -i -t -rm -link redis-name:redis_alias -entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT'
+ $ docker run -i -t --rm --link redis-name:redis_alias --entrypoint /bin/bash dockerfiles/redis -c '/redis-stable/src/redis-cli -h $REDIS_ALIAS_PORT_6379_TCP_ADDR -p $REDIS_ALIAS_PORT_6379_TCP_PORT'
172.17.0.32:6379>
VOLUME (Shared Filesystems)
@@ -393,7 +394,7 @@ VOLUME (Shared Filesystems)
-v=[]: Create a bind mount with: [host-dir]:[container-dir]:[rw|ro].
If "container-dir" is missing, then docker creates a new volume.
- -volumes-from="": Mount all volumes from the given container(s)
+ --volumes-from="": Mount all volumes from the given container(s)
The volumes commands are complex enough to have their own
documentation in section :ref:`volume_def`. A developer can define one
diff --git a/docs/sources/terms/images/docker-filesystems-busyboxrw.png b/docs/sources/terms/images/docker-filesystems-busyboxrw.png
index ad41c940e4..9ff8487b89 100644
--- a/docs/sources/terms/images/docker-filesystems-busyboxrw.png
+++ b/docs/sources/terms/images/docker-filesystems-busyboxrw.png
Binary files differ
diff --git a/docs/sources/terms/images/docker-filesystems-debian.png b/docs/sources/terms/images/docker-filesystems-debian.png
index 823a215d3e..61e5ddb2e3 100644
--- a/docs/sources/terms/images/docker-filesystems-debian.png
+++ b/docs/sources/terms/images/docker-filesystems-debian.png
Binary files differ
diff --git a/docs/sources/terms/images/docker-filesystems-debianrw.png b/docs/sources/terms/images/docker-filesystems-debianrw.png
index 97c69a9944..cacba4947b 100644
--- a/docs/sources/terms/images/docker-filesystems-debianrw.png
+++ b/docs/sources/terms/images/docker-filesystems-debianrw.png
Binary files differ
diff --git a/docs/sources/terms/images/docker-filesystems-generic.png b/docs/sources/terms/images/docker-filesystems-generic.png
index fb734b75c6..ae54b72e88 100644
--- a/docs/sources/terms/images/docker-filesystems-generic.png
+++ b/docs/sources/terms/images/docker-filesystems-generic.png
Binary files differ
diff --git a/docs/sources/terms/images/docker-filesystems-multilayer.png b/docs/sources/terms/images/docker-filesystems-multilayer.png
index 0b3ae19c2c..daedebe9c1 100644
--- a/docs/sources/terms/images/docker-filesystems-multilayer.png
+++ b/docs/sources/terms/images/docker-filesystems-multilayer.png
Binary files differ
diff --git a/docs/sources/terms/images/docker-filesystems-multiroot.png b/docs/sources/terms/images/docker-filesystems-multiroot.png
index 5e864273f3..65b61d94f1 100644
--- a/docs/sources/terms/images/docker-filesystems-multiroot.png
+++ b/docs/sources/terms/images/docker-filesystems-multiroot.png
Binary files differ
diff --git a/docs/sources/terms/images/docker-filesystems.svg b/docs/sources/terms/images/docker-filesystems.svg
index d41aff2522..054402db4c 100644
--- a/docs/sources/terms/images/docker-filesystems.svg
+++ b/docs/sources/terms/images/docker-filesystems.svg
@@ -11,7 +11,7 @@
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
inkscape:export-ydpi="90"
inkscape:export-xdpi="90"
- inkscape:export-filename="/Users/arothfusz/src/metalivedev/docker/docs/sources/terms/images/docker-filesystems-multiroot.png"
+ inkscape:export-filename="/Users/arothfusz/src/metalivedev/dockerclone/docs/sources/terms/images/docker-filesystems-multilayer.png"
sodipodi:docname="docker-filesystems.svg"
width="800"
height="600"
@@ -26,10 +26,10 @@
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="0.82666667"
- inkscape:cx="236.08871"
+ inkscape:cx="495.95588"
inkscape:cy="300"
inkscape:document-units="px"
- inkscape:current-layer="layer2"
+ inkscape:current-layer="layer13"
showgrid="false"
width="800px"
inkscape:window-width="1327"
@@ -98,6 +98,32 @@
</sodipodi:namedview>
<defs
id="defs4">
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Mend"
+ style="overflow:visible;">
+ <path
+ id="path4054"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none;"
+ transform="scale(0.4) rotate(180) translate(10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Lend"
+ style="overflow:visible;">
+ <path
+ id="path4048"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;marker-start:none;"
+ transform="scale(0.8) rotate(180) translate(12.5,0)" />
+ </marker>
<inkscape:perspective
sodipodi:type="inkscape:persp3d"
inkscape:vp_x="-406.34117 : 522.93291 : 1"
@@ -149,7 +175,7 @@
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title />
+ <dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
@@ -294,70 +320,9 @@
d="m 514.91047,422.62215 c 0,0 -1.06434,42.27288 -1.06434,42.27288 0,0 4.45362,-2.8241 4.45362,-2.8241 0.2761,-0.17507 0.46813,-0.15759 0.57629,0.0523 0.10868,0.18619 0.15712,0.50328 0.14534,0.95133 -0.0112,0.42443 -0.0782,0.81493 -0.20113,1.17164 -0.12299,0.35687 -0.32235,0.62363 -0.59831,0.80035 0,0 -10.15763,6.50487 -10.15763,6.50487 -0.27917,0.17878 -0.476,0.16246 -0.5903,-0.0494 -0.11437,-0.21191 -0.16642,-0.53506 -0.15609,-0.96944 0.0109,-0.45857 0.0801,-0.85922 0.20776,-1.20182 0.12814,-0.36656 0.33197,-0.63844 0.61129,-0.81556 0,0 4.56188,-2.89274 4.56188,-2.89274 0,0 0.97884,-39.26779 0.97884,-39.26779 0,0 -3.35907,1.85407 -3.35907,1.85407 -0.27977,0.15447 -0.48159,0.1208 -0.60529,-0.10124 -0.11445,-0.22726 -0.16609,-0.57399 -0.15489,-1.04015 0.0106,-0.44163 0.0802,-0.843 0.20889,-1.204 0.12859,-0.36073 0.33761,-0.62003 0.62686,-0.77784 0,0 4.51628,-2.46343 4.51628,-2.46343"
inkscape:connector-curvature="0" />
</g>
- <g
- id="text3655"
- style="font-size:40px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Arial;-inkscape-font-specification:Arial"
- transform="matrix(0.67123869,0,0,0.67123869,53.68199,126.56876)">
- <path
- id="path3662"
- d="m 132.8684,367.78607 c 0,0 0.71572,-54.35962 0.71572,-54.35962 0,0 2.66242,1.51122 2.66242,1.51122 0,0 -0.71153,54.62187 -0.71153,54.62187 0,0 -2.66661,-1.77347 -2.66661,-1.77347"
- inkscape:connector-curvature="0" />
- <path
- id="path3664"
- d="m 137.92667,371.15014 c 0,0 6.14809,-16.99741 6.14809,-16.99741 0,0 -5.19986,-22.51479 -5.19986,-22.51479 0,0 3.39897,2.02031 3.39897,2.02031 0,0 2.36954,10.8944 2.36954,10.8944 0.44814,2.07993 0.80843,3.81608 1.08051,5.20679 0.47284,-1.39022 0.90795,-2.61465 1.30519,-3.67276 0,0 2.89882,-7.87895 2.89882,-7.87895 0,0 3.37501,2.00607 3.37501,2.00607 0,0 -5.97372,15.60005 -5.97372,15.60005 0,0 5.92178,25.52797 5.92178,25.52797 0,0 -3.4783,-2.3133 -3.4783,-2.3133 0,0 -3.23409,-14.8189 -3.23409,-14.8189 0,0 -0.8528,-3.95585 -0.8528,-3.95585 0,0 -4.46772,13.08538 -4.46772,13.08538 0,0 -3.29142,-2.18901 -3.29142,-2.18901"
- inkscape:connector-curvature="0" />
- <path
- id="path3666"
- d="m 166.82131,374.91047 c 0,0 2.93572,2.79373 2.93572,2.79373 -0.37761,4.62343 -1.24922,7.86985 -2.61073,9.73548 -1.34456,1.83887 -2.96947,2.11901 -4.86973,0.85217 -2.3637,-1.5758 -4.23108,-4.67579 -5.61088,-9.29124 -1.36166,-4.61024 -1.99867,-10.32878 -1.91636,-17.16995 0.0532,-4.42099 0.40174,-8.10179 1.04648,-11.0477 0.64585,-2.95094 1.59765,-4.88106 2.85839,-5.78928 1.27692,-0.93132 2.65738,-0.95975 4.14303,-0.0791 1.88674,1.11849 3.42575,3.18947 4.61182,6.21733 1.19146,3.01472 1.93755,6.74983 2.23475,11.20086 0,0 -2.92082,-0.72724 -2.92082,-0.72724 -0.24353,-2.97398 -0.70922,-5.3811 -1.39599,-7.22057 -0.67412,-1.8282 -1.50208,-3.03683 -2.48268,-3.62779 -1.47568,-0.88924 -2.68418,-0.33926 -3.629,1.6424 -0.94184,1.95024 -1.44412,5.64763 -1.50886,11.09862 -0.0657,5.53171 0.32577,9.83698 1.17652,12.92095 0.85352,3.09406 1.99526,5.11378 3.42833,6.05501 1.15583,0.75914 2.13411,0.54393 2.93293,-0.65009 0.80075,-1.19694 1.32691,-3.50191 1.57708,-6.91359"
- inkscape:connector-curvature="0" />
- <path
- id="path3668"
- d="m 172.97661,394.46064 c 0,0 0.0905,-8.17492 0.0905,-8.17492 0,0 3.48861,2.27245 3.48861,2.27245 0,0 -0.0895,8.22327 -0.0895,8.22327 -0.0329,3.02363 -0.28765,5.30542 -0.76375,6.84314 -0.47577,1.56243 -1.21303,2.51325 -2.20987,2.85324 0,0 -0.81311,-3.65386 -0.81311,-3.65386 0.65091,-0.22881 1.13685,-0.89297 1.45702,-1.99285 0.32015,-1.07418 0.51068,-2.8142 0.57137,-5.21909 0,0 -1.73124,-1.15138 -1.73124,-1.15138"
- inkscape:connector-curvature="0" />
- <path
- id="path3670"
- d="m 204.77784,410.06983 c -1.27022,1.55778 -2.48568,2.44071 -3.64678,2.65261 -1.1447,0.21934 -2.36657,-0.10529 -3.66459,-0.97064 -2.13127,-1.42084 -3.74779,-3.67649 -4.85717,-6.76514 -1.10483,-3.1041 -1.63719,-6.47275 -1.60031,-10.11391 0.0216,-2.13477 0.25062,-3.94364 0.6874,-5.42825 0.44957,-1.50612 1.02226,-2.57799 1.71876,-3.21526 0.71002,-0.63098 1.50367,-0.94896 2.38159,-0.95288 0.64759,0.017 1.6255,0.25355 2.93681,0.71095 2.68835,0.95136 4.68535,1.32634 5.97773,1.11825 0.0222,-1.02578 0.0346,-1.67832 0.0372,-1.95765 0.0289,-3.07178 -0.26872,-5.42898 -0.8919,-7.06976 -0.84101,-2.21749 -2.10184,-3.83086 -3.77761,-4.84085 -1.55688,-0.93829 -2.71034,-1.00947 -3.46489,-0.21839 -0.74047,0.76925 -1.30109,2.5996 -1.68287,5.49061 0,0 -3.16708,-2.94172 -3.16708,-2.94172 0.31864,-2.91383 0.81734,-5.11515 1.49696,-6.60484 0.6812,-1.51989 1.65517,-2.41342 2.92464,-2.67921 1.27473,-0.29431 2.75127,0.0544 4.43259,1.05105 1.67794,0.99472 3.04366,2.25211 4.09313,3.7721 1.05306,1.52531 1.82526,3.12483 2.31452,4.79681 0.49033,1.64692 0.82696,3.5698 1.00937,5.76792 0.10151,1.36012 0.13673,3.72492 0.1056,7.09479 0,0 -0.0935,10.11679 -0.0935,10.11679 -0.0653,7.05995 -0.0372,11.58025 0.0844,13.55797 0.13448,1.95911 0.40887,3.94126 0.8236,5.94773 0,0 -3.55349,-2.3633 -3.55349,-2.3633 -0.33594,-1.80359 -0.5439,-3.78856 -0.62416,-5.9558 m -0.12224,-17.05427 c -1.23154,0.34731 -3.06331,0.14247 -5.48491,-0.60924 -1.36335,-0.41924 -2.32581,-0.53009 -2.89103,-0.33412 -0.56424,0.19568 -1.00286,0.73389 -1.31639,1.61435 -0.31298,0.85222 -0.4758,1.92485 -0.48867,3.21853 -0.0197,1.98221 0.29058,3.84732 0.93197,5.59804 0.65498,1.76261 1.62279,3.0659 2.90625,3.90947 1.27641,0.83893 2.42209,0.96176 3.43544,0.36456 1.01669,-0.62694 1.7731,-1.89094 2.26739,-3.79238 0.3778,-1.47261 0.58252,-3.87376 0.61388,-7.20158 0,0 0.0261,-2.76763 0.0261,-2.76763"
- inkscape:connector-curvature="0" />
- <path
- id="path3672"
- d="m 226.91498,430.33317 c 0,0 0.056,-6.79135 0.056,-6.79135 -1.69979,4.12585 -3.95958,5.23997 -6.76691,3.36841 -1.23125,-0.82083 -2.37518,-2.1017 -3.4326,-3.84047 -1.04088,-1.72429 -1.81148,-3.52427 -2.31374,-5.40182 -0.48827,-1.89422 -0.82487,-4.02954 -1.01034,-6.40682 -0.12775,-1.59592 -0.17698,-4.02489 -0.14772,-7.28678 0,0 0.25063,-27.95019 0.25063,-27.95019 0,0 3.47921,2.068 3.47921,2.068 0,0 -0.22098,25.15376 -0.22098,25.15376 -0.0353,4.02044 0.0122,6.77614 0.14272,8.26649 0.20297,2.17003 0.65699,4.07445 1.36316,5.71471 0.70804,1.61546 1.59303,2.77268 2.65633,3.47053 1.06676,0.70016 2.07587,0.76801 3.02668,0.20066 0.95364,-0.59783 1.63329,-1.79901 2.03728,-3.60358 0.41794,-1.82668 0.64337,-4.71043 0.67595,-8.64861 0,0 0.20406,-24.67831 0.20406,-24.67831 0,0 3.62583,2.15515 3.62583,2.15515 0,0 -0.37466,46.37229 -0.37466,46.37229 0,0 -3.25092,-2.16207 -3.25092,-2.16207"
- inkscape:connector-curvature="0" />
- <path
- id="path3674"
- d="m 236.84818,436.9394 c 0,0 0.31458,-40.68866 0.31458,-40.68866 0,0 -3.27066,-1.97443 -3.27066,-1.97443 0,0 0.0485,-6.13244 0.0485,-6.13244 0,0 3.26986,1.94357 3.26986,1.94357 0,0 0.0384,-4.9718 0.0384,-4.9718 0.0242,-3.13718 0.17313,-5.39171 0.44675,-6.76504 0.37445,-1.8466 1.0157,-3.14492 1.92523,-3.8952 0.92597,-0.77365 2.21207,-0.69593 3.86256,0.23811 1.06731,0.60412 2.24898,1.54093 3.54628,2.81271 0,0 -0.62418,6.66996 -0.62418,6.66996 -0.78934,-0.75385 -1.53564,-1.33338 -2.23919,-1.73932 -1.15067,-0.66373 -1.96603,-0.6152 -2.44858,0.14318 -0.48194,0.75751 -0.73333,2.55103 -0.75467,5.38196 0,0 -0.0327,4.33654 -0.0327,4.33654 0,0 4.35398,2.58795 4.35398,2.58795 0,0 -0.0456,6.23957 -0.0456,6.23957 0,0 -4.35509,-2.62908 -4.35509,-2.62908 0,0 -0.30843,40.92114 -0.30843,40.92114 0,0 -3.72704,-2.47872 -3.72704,-2.47872"
- inkscape:connector-curvature="0" />
- <path
- id="path3676"
- d="m 246.46465,429.05307 c 0,0 3.81968,1.1922 3.81968,1.1922 0.19276,3.35392 0.7721,6.20708 1.74012,8.56243 0.98544,2.37207 2.3721,4.14723 4.16469,5.32459 1.81668,1.19318 3.17579,1.3205 4.07171,0.37548 0.89826,-0.97786 1.35491,-2.50699 1.36833,-4.58524 0.012,-1.86394 -0.37148,-3.58214 -1.14903,-5.15206 -0.54183,-1.08052 -1.89103,-2.87259 -4.03793,-5.36553 -2.87017,-3.33767 -4.84719,-5.88768 -5.94667,-7.66691 -1.08128,-1.7942 -1.8993,-3.82568 -2.45597,-6.09572 -0.54119,-2.28674 -0.80303,-4.59245 -0.78627,-6.91984 0.0153,-2.11796 0.25669,-3.93345 0.72469,-5.44816 0.48302,-1.53765 1.12853,-2.66509 1.93745,-3.38209 0.60808,-0.56866 1.4316,-0.86027 2.47213,-0.87408 1.05827,-0.0353 2.19002,0.30354 3.396,1.01839 1.82428,1.08147 3.42677,2.57943 4.80442,4.49544 1.39816,1.9329 2.42778,4.04798 3.08549,6.34283 0.65928,2.26923 1.10658,5.05898 1.34104,8.36831 0,0 -3.93498,-1.30965 -3.93498,-1.30965 -0.1613,-2.60573 -0.66572,-4.86818 -1.51169,-6.78511 -0.82908,-1.90296 -2.01211,-3.31622 -3.54556,-4.24034 -1.80214,-1.08596 -3.08681,-1.24118 -3.85989,-0.47117 -0.77146,0.76845 -1.16235,1.97686 -1.17391,3.62665 -0.007,1.05006 0.14407,2.09235 0.45452,3.12753 0.31055,1.06635 0.80269,2.09487 1.47721,3.08626 0.38829,0.54294 1.53561,1.95069 3.44979,4.23261 2.78949,3.29205 4.7444,5.79841 5.85003,7.50277 1.12436,1.68881 2.00304,3.68747 2.63416,5.99522 0.63237,2.3125 0.94024,4.88426 0.92265,7.71231 -0.0173,2.76736 -0.43134,5.12235 -1.24099,7.06139 -0.79291,1.91427 -1.93089,3.05649 -3.41056,3.42835 -1.47342,0.33983 -3.12755,-0.1039 -4.95957,-1.32524 -3.01245,-2.00831 -5.28496,-4.82452 -6.83171,-8.44857 -1.52498,-3.59708 -2.47979,-8.05614 -2.86938,-13.38305"
- inkscape:connector-curvature="0" />
- <path
- id="path3678"
- d="m 267.46509,458.46409 c 0,0 10.16276,-64.44628 10.16276,-64.44628 0,0 3.35985,1.90154 3.35985,1.90154 0,0 -10.22211,64.7453 -10.22211,64.7453 0,0 -3.3005,-2.20056 -3.3005,-2.20056"
- inkscape:connector-curvature="0" />
- <path
- id="path3680"
- d="m 287.73074,470.77961 c 0,0 -3.98413,-2.64971 -3.98413,-2.64971 0,0 0.36657,-69.26132 0.36657,-69.26132 0,0 4.28286,2.431 4.28286,2.431 0,0 -0.12574,24.80354 -0.12574,24.80354 1.84841,-3.43804 4.20286,-4.3171 7.07399,-2.61515 1.5995,0.94822 3.11282,2.48894 4.53901,4.62548 1.44866,2.12297 2.63509,4.62828 3.55675,7.51533 0.94101,2.87289 1.67339,6.11301 2.19582,9.71903 0.52331,3.61258 0.77764,7.29172 0.76223,11.03361 -0.0367,8.8888 -1.19889,15.02735 -3.47692,18.39523 -2.26525,3.34891 -4.9514,3.97742 -8.04813,1.91293 -3.05429,-2.0362 -5.42013,-6.12345 -7.11007,-12.2502 0,0 -0.0322,6.34023 -0.0322,6.34023 m 0.0826,-25.6991 c -0.0308,6.05748 0.36263,10.70405 1.18198,13.94323 1.3439,5.31484 3.18967,8.7503 5.54452,10.29694 1.92772,1.26611 3.60983,0.72174 5.04245,-1.64447 1.43781,-2.407 2.17299,-6.89882 2.20167,-13.46572 0.0293,-6.72399 -0.63702,-12.10528 -1.99483,-16.13506 -1.33586,-4.00333 -2.96003,-6.57643 -4.86901,-7.72687 -1.91517,-1.15407 -3.57055,-0.50907 -4.97003,1.92406 -1.39445,2.39298 -2.10547,6.6592 -2.13675,12.80789"
- inkscape:connector-curvature="0" />
- <path
- id="path3682"
- d="m 322.12463,485.58433 c 0,0 0.65936,8.40758 0.65936,8.40758 -1.33673,-0.35442 -2.52804,-0.88064 -3.57528,-1.5781 -1.70425,-1.13503 -3.01872,-2.52454 -3.94739,-4.16917 -0.92628,-1.6404 -1.57435,-3.40805 -1.9457,-5.30454 -0.37079,-1.92713 -0.54592,-5.5546 -0.52573,-10.88197 0,0 0.114,-30.08386 0.114,-30.08386 0,0 -3.36894,-2.03377 -3.36894,-2.03377 0,0 0.0272,-6.84805 0.0272,-6.84805 0,0 3.36786,2.00182 3.36786,2.00182 0,0 0.0489,-12.91135 0.0489,-12.91135 0,0 4.63253,-2.66881 4.63253,-2.66881 0,0 -0.065,18.3241 -0.065,18.3241 0,0 4.72675,2.80952 4.72675,2.80952 0,0 -0.023,6.96866 -0.023,6.96866 0,0 -4.72829,-2.85438 -4.72829,-2.85438 0,0 -0.10923,30.77205 -0.10923,30.77205 -0.009,2.54809 0.0632,4.23726 0.21665,5.06728 0.17091,0.8418 0.43796,1.59732 0.80137,2.26677 0.38115,0.6815 0.92028,1.25067 1.61806,1.70755 0.52419,0.34326 1.21588,0.67931 2.07599,1.00867"
- inkscape:connector-curvature="0" />
- <path
- id="path3684"
- d="m 326.68371,496.68588 c 0,0 0.16352,-53.31935 0.16352,-53.31935 0,0 4.33405,2.57612 4.33405,2.57612 0,0 -0.0231,8.11168 -0.0231,8.11168 1.12479,-3.12783 2.15869,-5.02087 3.10122,-5.67423 0.96285,-0.64401 2.01732,-0.62746 3.16426,0.0524 1.66273,0.98571 3.35799,2.97819 5.08643,5.98483 0,0 -1.73463,7.50163 -1.73463,7.50163 -1.20956,-2.06252 -2.41678,-3.45673 -3.62177,-4.18598 -1.07402,-0.64988 -2.03784,-0.62407 -2.89238,0.075 -0.85268,0.66393 -1.46157,1.94671 -1.82782,3.84834 -0.54904,2.90043 -0.82874,6.26858 -0.83955,10.10792 0,0 -0.0793,28.13461 -0.0793,28.13461 0,0 -4.83103,-3.21295 -4.83103,-3.21295"
- inkscape:connector-curvature="0" />
- <path
- id="path3686"
- d="m 346.63844,509.95707 c 0,0 0.0968,-47.55946 0.0968,-47.55946 0,0 -4.43131,-2.6751 -4.43131,-2.6751 0,0 0.0162,-7.15908 0.0162,-7.15908 0,0 4.42975,2.633 4.42975,2.633 0,0 0.0118,-5.80848 0.0118,-5.80848 0.007,-3.66486 0.19039,-6.28429 0.54899,-7.86025 0.49107,-2.11858 1.34725,-3.56796 2.57091,-4.34826 1.24623,-0.8062 2.9874,-0.57829 5.2303,0.69102 1.45137,0.82149 3.06136,2.04536 4.83196,3.67489 0,0 -0.79224,7.74699 -0.79224,7.74699 -1.07705,-0.96968 -2.09389,-1.73012 -3.05099,-2.28234 -1.56464,-0.90254 -2.66858,-0.93449 -3.31577,-0.0995 -0.64623,0.83385 -0.9719,2.90502 -0.97777,6.21534 0,0 -0.009,5.07119 -0.009,5.07119 0,0 5.92043,3.51903 5.92043,3.51903 0,0 -0.0107,7.30549 -0.0107,7.30549 0,0 -5.92257,-3.57534 -5.92257,-3.57534 0,0 -0.0849,47.87735 -0.0849,47.87735 0,0 -5.0619,-3.36649 -5.0619,-3.36649"
- inkscape:connector-curvature="0" />
- <path
- id="path3688"
- d="m 359.60073,501.90418 c 0,0 5.20059,1.86777 5.20059,1.86777 0.29001,3.96114 1.10193,7.38322 2.43911,10.27061 1.36176,2.91073 3.2661,5.17238 5.72054,6.78444 2.48967,1.63519 4.34728,1.95881 5.56379,0.96109 1.21993,-1.0365 1.83154,-2.77869 1.83229,-5.22389 6.2e-4,-2.19296 -0.5384,-4.26389 -1.61481,-6.20909 -0.7497,-1.33918 -2.60804,-3.61528 -5.55946,-6.8122 -3.94075,-4.27425 -6.65395,-7.50944 -8.16465,-9.73106 -1.48522,-2.23573 -2.61386,-4.7171 -3.38893,-7.44614 -0.75395,-2.74593 -1.12852,-5.48045 -1.12491,-8.2074 0.003,-2.48146 0.31617,-4.58205 0.93929,-6.30404 0.64345,-1.7475 1.51123,-2.99566 2.60481,-3.74404 0.82208,-0.59757 1.93976,-0.84564 3.35554,-0.74295 1.44048,0.0796 2.98492,0.60687 4.63457,1.58472 2.49729,1.48044 4.69744,3.42626 6.59564,5.83924 1.92772,2.43694 3.35406,5.04673 4.27363,7.82559 0.92183,2.74989 1.55812,6.08842 1.90744,10.01415 0,0 -5.39591,-2.01583 -5.39591,-2.01583 -0.24253,-3.08522 -0.95109,-5.80694 -2.12313,-8.16184 -1.14834,-2.33544 -2.7751,-4.13563 -4.87465,-5.40091 -2.46541,-1.48565 -4.2164,-1.81727 -5.26239,-1.00324 -1.04343,0.8121 -1.56519,2.18465 -1.56724,4.11944 -10e-4,1.23148 0.21335,2.47259 0.64434,3.72428 0.43146,1.28852 1.10985,2.55443 2.03645,3.7988 0.53331,0.68393 2.10812,2.47474 4.73703,5.38635 3.83534,4.20888 6.52812,7.39657 8.05468,9.53851 1.55295,2.12718 2.77297,4.59004 3.65706,7.38727 0.88613,2.80397 1.33003,5.87348 1.33006,9.20426 -3e-5,3.25947 -0.54743,5.98195 -1.64026,8.16269 -1.06972,2.15296 -2.61798,3.35081 -4.63932,3.59644 -2.01164,0.20856 -4.27524,-0.52848 -6.78627,-2.2025 -4.12399,-2.74933 -7.24172,-6.34882 -9.37583,-10.80056 -2.10254,-4.4137 -3.43626,-9.76409 -4.0091,-16.05996"
- inkscape:connector-curvature="0" />
- </g>
</g>
<g
- style="display:inline"
+ style="display:none"
inkscape:groupmode="layer"
id="layer9"
inkscape:label="GenericRootfs">
@@ -553,7 +518,7 @@
</g>
</g>
<g
- style="display:none"
+ style="display:inline"
inkscape:label="Debian"
id="layer5"
inkscape:groupmode="layer">
@@ -1178,7 +1143,7 @@
inkscape:groupmode="layer"
id="layer10"
inkscape:label="Base Image"
- style="display:none">
+ style="display:inline">
<g
transform="matrix(0.71864924,0,0,0.71864924,102.10269,88.99025)"
style="font-size:40px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Courier New;-inkscape-font-specification:Courier New Bold"
@@ -1251,6 +1216,36 @@
inkscape:connector-curvature="0" />
</g>
</g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer13"
+ inkscape:label="Parent Pointer">
+ <path
+ style="fill:none;stroke:#000000;stroke-width:4.80000019;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ d="m 546.77419,134.2742 c 110.08065,13.30645 1.20968,53.2258 1.20968,53.2258"
+ id="path3272"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="cc" />
+ <text
+ xml:space="preserve"
+ style="font-size:32px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;line-height:103.99999619%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Helvetica"
+ x="601.20972"
+ y="125.80645"
+ id="text4672"
+ sodipodi:linespacing="104%"><tspan
+ sodipodi:role="line"
+ id="tspan4674"
+ x="601.20972"
+ y="125.80645">references</tspan><tspan
+ sodipodi:role="line"
+ x="601.20972"
+ y="159.08644"
+ id="tspan4676">parent</tspan><tspan
+ sodipodi:role="line"
+ x="601.20972"
+ y="192.36644"
+ id="tspan4678">image</tspan></text>
+ </g>
</g>
<g
style="display:none"
diff --git a/docs/sources/use/ambassador_pattern_linking.rst b/docs/sources/use/ambassador_pattern_linking.rst
index e7cdbd7c96..bbd5816768 100644
--- a/docs/sources/use/ambassador_pattern_linking.rst
+++ b/docs/sources/use/ambassador_pattern_linking.rst
@@ -43,26 +43,26 @@ Start actual redis server on one Docker host
.. code-block:: bash
- big-server $ docker run -d -name redis crosbymichael/redis
+ big-server $ docker run -d --name redis crosbymichael/redis
Then add an ambassador linked to the redis server, mapping a port to the outside world
.. code-block:: bash
- big-server $ docker run -d -link redis:redis -name redis_ambassador -p 6379:6379 svendowideit/ambassador
+ big-server $ docker run -d --link redis:redis --name redis_ambassador -p 6379:6379 svendowideit/ambassador
On the other host, you can set up another ambassador setting environment variables for each remote port we want to proxy to the ``big-server``
.. code-block:: bash
- client-server $ docker run -d -name redis_ambassador -expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
+ client-server $ docker run -d --name redis_ambassador --expose 6379 -e REDIS_PORT_6379_TCP=tcp://192.168.1.52:6379 svendowideit/ambassador
Then on the ``client-server`` host, you can use a redis client container to talk
to the remote redis server, just by linking to the local redis ambassador.
.. code-block:: bash
- client-server $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
+ client-server $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
@@ -79,19 +79,19 @@ On the docker host (192.168.1.52) that redis will run on:
.. code-block:: bash
# start actual redis server
- $ docker run -d -name redis crosbymichael/redis
+ $ docker run -d --name redis crosbymichael/redis
# get a redis-cli container for connection testing
$ docker pull relateiq/redis-cli
# test the redis server by talking to it directly
- $ docker run -t -i -rm -link redis:redis relateiq/redis-cli
+ $ docker run -t -i --rm --link redis:redis relateiq/redis-cli
redis 172.17.0.136:6379> ping
PONG
^D
# add redis ambassador
- $ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 busybox sh
+ $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 busybox sh
in the redis_ambassador container, you can see the linked redis containers's env
@@ -119,7 +119,7 @@ This environment is used by the ambassador socat script to expose redis to the w
$ docker rm redis_ambassador
$ sudo ./contrib/mkimage-unittest.sh
- $ docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 docker-ut sh
+ $ docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 docker-ut sh
$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:172.17.0.136:6379
@@ -127,7 +127,7 @@ then ping the redis server via the ambassador
.. code-block::bash
- $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
+ $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
@@ -136,7 +136,7 @@ Now goto a different server
.. code-block:: bash
$ sudo ./contrib/mkimage-unittest.sh
- $ docker run -t -i -expose 6379 -name redis_ambassador docker-ut sh
+ $ docker run -t -i --expose 6379 --name redis_ambassador docker-ut sh
$ socat TCP4-LISTEN:6379,fork,reuseaddr TCP4:192.168.1.52:6379
@@ -145,7 +145,7 @@ and get the redis-cli image so we can talk over the ambassador bridge
.. code-block:: bash
$ docker pull relateiq/redis-cli
- $ docker run -i -t -rm -link redis_ambassador:redis relateiq/redis-cli
+ $ docker run -i -t --rm --link redis_ambassador:redis relateiq/redis-cli
redis 172.17.0.160:6379> ping
PONG
@@ -157,7 +157,7 @@ When you start the container, it uses a small ``sed`` script to parse out the (p
link environment variables to set up the port forwarding. On the remote host, you need to set the
variable using the ``-e`` command line option.
-``-expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the
+``--expose 1234 -e REDIS_PORT_1234_TCP=tcp://192.168.1.52:6379`` will forward the
local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379``.
@@ -171,9 +171,9 @@ local ``1234`` port to the remote IP and port - in this case ``192.168.1.52:6379
# docker build -t SvenDowideit/ambassador .
# docker tag SvenDowideit/ambassador ambassador
# then to run it (on the host that has the real backend on it)
- # docker run -t -i -link redis:redis -name redis_ambassador -p 6379:6379 ambassador
+ # docker run -t -i --link redis:redis --name redis_ambassador -p 6379:6379 ambassador
# on the remote host, you can set up another ambassador
- # docker run -t -i -name redis_ambassador -expose 6379 sh
+ # docker run -t -i --name redis_ambassador --expose 6379 sh
FROM docker-ut
MAINTAINER SvenDowideit@home.org.au
diff --git a/docs/sources/use/basics.rst b/docs/sources/use/basics.rst
index 24c22bba39..4164e706f7 100644
--- a/docs/sources/use/basics.rst
+++ b/docs/sources/use/basics.rst
@@ -39,7 +39,9 @@ Repository to a local image cache.
12 character hash ``539c0211cd76: Download complete`` which is the
short form of the image ID. These short image IDs are the first 12
characters of the full image ID - which can be found using ``docker
- inspect`` or ``docker images -notrunc=true``
+ inspect`` or ``docker images --no-trunc=true``
+
+ **If you're using OS X** then you shouldn't use ``sudo``
Running an interactive shell
----------------------------
diff --git a/docs/sources/use/chef.rst b/docs/sources/use/chef.rst
new file mode 100644
index 0000000000..919eba7a8f
--- /dev/null
+++ b/docs/sources/use/chef.rst
@@ -0,0 +1,95 @@
+:title: Chef Usage
+:description: Installation and using Docker via Chef
+:keywords: chef, installation, usage, docker, documentation
+
+.. _install_using_chef:
+
+Using Chef
+=============
+
+.. note::
+
+ Please note this is a community contributed installation path. The
+ only 'official' installation is using the :ref:`ubuntu_linux`
+ installation path. This version may sometimes be out of date.
+
+Requirements
+------------
+
+To use this guide you'll need a working installation of
+`Chef <http://www.getchef.com/>`_. This cookbook supports a variety of
+operating systems.
+
+Installation
+------------
+
+The cookbook is available on the `Chef Community Site
+<community.opscode.com/cookbooks/docker>`_ and can be installed
+using your favorite cookbook dependency manager.
+
+The source can be found on `GitHub
+<https://github.com/bflad/chef-docker>`_.
+
+Usage
+-----
+
+The cookbook provides recipes for installing Docker, configuring init
+for Docker, and resources for managing images and containers.
+It supports almost all Docker functionality.
+
+Installation
+~~~~~~~~~~~~
+
+.. code-block:: ruby
+
+ include_recipe 'docker'
+
+Images
+~~~~~~
+
+The next step is to pull a Docker image. For this, we have a resource:
+
+.. code-block:: ruby
+
+ docker_image 'samalba/docker-registry'
+
+This is equivalent to running:
+
+.. code-block:: bash
+
+ docker pull samalba/docker-registry
+
+There are attributes available to control how long the cookbook
+will allow for downloading (5 minute default).
+
+To remove images you no longer need:
+
+.. code-block:: ruby
+
+ docker_image 'samalba/docker-registry' do
+ action :remove
+ end
+
+Containers
+~~~~~~~~~~
+
+Now you have an image where you can run commands within a container
+managed by Docker.
+
+.. code-block:: ruby
+
+ docker_container 'samalba/docker-registry' do
+ detach true
+ port '5000:5000'
+ env 'SETTINGS_FLAVOR=local'
+ volume '/mnt/docker:/docker-storage'
+ end
+
+This is equivalent to running the following command, but under upstart:
+
+.. code-block:: bash
+
+ docker run --detach=true --publish='5000:5000' --env='SETTINGS_FLAVOR=local' --volume='/mnt/docker:/docker-storage' samalba/docker-registry
+
+The resources will accept a single string or an array of values
+for any docker flags that allow multiple values.
diff --git a/docs/sources/use/host_integration.rst b/docs/sources/use/host_integration.rst
index ed341cd4bc..cb920a5908 100644
--- a/docs/sources/use/host_integration.rst
+++ b/docs/sources/use/host_integration.rst
@@ -43,11 +43,6 @@ into it:
stop on runlevel [!2345]
respawn
script
- # Wait for docker to finish starting up first.
- FILE=/var/run/docker.sock
- while [ ! -e $FILE ] ; do
- inotifywait -t 2 -e create $(dirname $FILE)
- done
/usr/bin/docker start -a redis_server
end script
diff --git a/docs/sources/use/index.rst b/docs/sources/use/index.rst
index c1b7691cca..dcf6289b41 100644
--- a/docs/sources/use/index.rst
+++ b/docs/sources/use/index.rst
@@ -20,4 +20,5 @@ Contents:
working_with_volumes
working_with_links_names
ambassador_pattern_linking
+ chef
puppet
diff --git a/docs/sources/use/networking.rst b/docs/sources/use/networking.rst
index c00c608550..59c63ed674 100644
--- a/docs/sources/use/networking.rst
+++ b/docs/sources/use/networking.rst
@@ -121,8 +121,8 @@ Container intercommunication
The value of the Docker daemon's ``icc`` parameter determines whether
containers can communicate with each other over the bridge network.
-- The default, ``-icc=true`` allows containers to communicate with each other.
-- ``-icc=false`` means containers are isolated from each other.
+- The default, ``--icc=true`` allows containers to communicate with each other.
+- ``--icc=false`` means containers are isolated from each other.
Docker uses ``iptables`` under the hood to either accept or
drop communication between containers.
diff --git a/docs/sources/use/port_redirection.rst b/docs/sources/use/port_redirection.rst
index 38d6b98841..cf5c2100a9 100644
--- a/docs/sources/use/port_redirection.rst
+++ b/docs/sources/use/port_redirection.rst
@@ -114,21 +114,21 @@ exposure, is possible because ``client`` is started after ``server``
has been started.
Here is a full example. On ``server``, the port of interest is
-exposed. The exposure is done either through the ``-expose`` parameter
+exposed. The exposure is done either through the ``--expose`` parameter
to the ``docker run`` command, or the ``EXPOSE`` build command in a
Dockerfile:
.. code-block:: bash
# Expose port 80
- docker run -expose 80 --name server <image> <cmd>
+ docker run --expose 80 --name server <image> <cmd>
The ``client`` then links to the ``server``:
.. code-block:: bash
# Link
- docker run -name client -link server:linked-server <image> <cmd>
+ docker run --name client --link server:linked-server <image> <cmd>
``client`` locally refers to ``server`` as ``linked-server``. The
following environment variables, among others, are available on
diff --git a/docs/sources/use/working_with_links_names.rst b/docs/sources/use/working_with_links_names.rst
index 1b0e9f6914..4acb6079c1 100644
--- a/docs/sources/use/working_with_links_names.rst
+++ b/docs/sources/use/working_with_links_names.rst
@@ -19,14 +19,14 @@ Container Naming
.. versionadded:: v0.6.5
-You can now name your container by using the ``-name`` flag. If no
+You can now name your container by using the ``--name`` flag. If no
name is provided, Docker will automatically generate a name. You can
see this name using the ``docker ps`` command.
.. code-block:: bash
- # format is "sudo docker run -name <container_name> <image_name> <command>"
- $ sudo docker run -name test ubuntu /bin/bash
+ # format is "sudo docker run --name <container_name> <image_name> <command>"
+ $ sudo docker run --name test ubuntu /bin/bash
# the flag "-a" Show all containers. Only running containers are shown by default.
$ sudo docker ps -a
@@ -41,9 +41,9 @@ Links: service discovery for docker
.. versionadded:: v0.6.5
Links allow containers to discover and securely communicate with each
-other by using the flag ``-link name:alias``. Inter-container
+other by using the flag ``--link name:alias``. Inter-container
communication can be disabled with the daemon flag
-``-icc=false``. With this flag set to ``false``, Container A cannot
+``--icc=false``. With this flag set to ``false``, Container A cannot
access Container B unless explicitly allowed via a link. This is a
huge win for securing your containers. When two containers are linked
together Docker creates a parent child relationship between the
@@ -63,7 +63,7 @@ based on that image and run it as a daemon.
.. code-block:: bash
- $ sudo docker run -d -name redis crosbymichael/redis
+ $ sudo docker run -d --name redis crosbymichael/redis
We can issue all the commands that you would expect using the name
``redis``; start, stop, attach, using the name for our container. The
@@ -77,9 +77,9 @@ we need to establish a link.
.. code-block:: bash
- $ sudo docker run -t -i -link redis:db -name webapp ubuntu bash
+ $ sudo docker run -t -i --link redis:db --name webapp ubuntu bash
-When you specified ``-link redis:db`` you are telling Docker to link
+When you specified ``--link redis:db`` you are telling Docker to link
the container named ``redis`` into this new container with the alias
``db``. Environment variables are prefixed with the alias so that the
parent container can access network and environment information from
@@ -112,8 +112,16 @@ Accessing the network information along with the environment of the
child container allows us to easily connect to the Redis service on
the specific IP and port in the environment.
+.. note::
+ These Environment variables are only set for the first process in
+ the container. Similarly, some daemons (such as ``sshd``) will
+ scrub them when spawning shells for connection.
+
+ You can work around this by storing the initial ``env`` in a file,
+ or looking at ``/proc/1/environ``.
+
Running ``docker ps`` shows the 2 containers, and the ``webapp/db``
-alias name for the redis container.
+alias name for the Redis container.
.. code-block:: bash
diff --git a/docs/sources/use/working_with_volumes.rst b/docs/sources/use/working_with_volumes.rst
index 755be009e3..d2f035dc84 100644
--- a/docs/sources/use/working_with_volumes.rst
+++ b/docs/sources/use/working_with_volumes.rst
@@ -42,14 +42,14 @@ two new volumes::
This command will create the new container with two new volumes that
exits instantly (``true`` is pretty much the smallest, simplest program
that you can run). Once created you can mount its volumes in any other
-container using the ``-volumes-from`` option; irrespective of whether the
+container using the ``--volumes-from`` option; irrespective of whether the
container is running or not.
Or, you can use the VOLUME instruction in a Dockerfile to add one or more new
volumes to any container created from that image::
# BUILD-USING: docker build -t data .
- # RUN-USING: docker run -name DATA data
+ # RUN-USING: docker run --name DATA data
FROM busybox
VOLUME ["/var/volume1", "/var/volume2"]
CMD ["/bin/true"]
@@ -63,19 +63,19 @@ Data Volume Container, and then to mount the data from it.
Create a named container with volumes to share (``/var/volume1`` and ``/var/volume2``)::
- $ docker run -v /var/volume1 -v /var/volume2 -name DATA busybox true
+ $ docker run -v /var/volume1 -v /var/volume2 --name DATA busybox true
Then mount those data volumes into your application containers::
- $ docker run -t -i -rm -volumes-from DATA -name client1 ubuntu bash
+ $ docker run -t -i --rm --volumes-from DATA --name client1 ubuntu bash
-You can use multiple ``-volumes-from`` parameters to bring together multiple
+You can use multiple ``--volumes-from`` parameters to bring together multiple
data volumes from multiple containers.
Interestingly, you can mount the volumes that came from the ``DATA`` container in
yet another container via the ``client1`` middleman container::
- $ docker run -t -i -rm -volumes-from client1 -name client2 ubuntu bash
+ $ docker run -t -i --rm --volumes-from client1 --name client2 ubuntu bash
This allows you to abstract the actual data source from users of that data,
similar to :ref:`ambassador_pattern_linking <ambassador_pattern_linking>`.
@@ -129,9 +129,9 @@ because they are external to images.
Instead you can use ``--volumes-from`` to start a new container that can access the
data-container's volume. For example::
- $ sudo docker run -rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data
+ $ sudo docker run --rm --volumes-from DATA -v $(pwd):/backup busybox tar cvf /backup/backup.tar /data
-* ``-rm`` - remove the container when it exits
+* ``--rm`` - remove the container when it exits
* ``--volumes-from DATA`` - attach to the volumes shared by the ``DATA`` container
* ``-v $(pwd):/backup`` - bind mount the current directory into the container; to write the tar file to
* ``busybox`` - a small simpler image - good for quick maintenance
@@ -140,13 +140,13 @@ data-container's volume. For example::
Then to restore to the same container, or another that you've made elsewhere::
# create a new data container
- $ sudo docker run -v /data -name DATA2 busybox true
+ $ sudo docker run -v /data --name DATA2 busybox true
# untar the backup files into the new container's data volume
- $ sudo docker run -rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar
+ $ sudo docker run --rm --volumes-from DATA2 -v $(pwd):/backup busybox tar xvf /backup/backup.tar
data/
data/sven.txt
# compare to the original container
- $ sudo docker run -rm --volumes-from DATA -v `pwd`:/backup busybox ls /data
+ $ sudo docker run --rm --volumes-from DATA -v `pwd`:/backup busybox ls /data
sven.txt
diff --git a/docs/sources/use/workingwithrepository.rst b/docs/sources/use/workingwithrepository.rst
index cbde932cde..c126361f8c 100644
--- a/docs/sources/use/workingwithrepository.rst
+++ b/docs/sources/use/workingwithrepository.rst
@@ -74,7 +74,7 @@ name or description:
Search the docker index for images
- -notrunc=false: Don't truncate output
+ --no-trunc=false: Don't truncate output
$ sudo docker search centos
Found 25 results matching your query ("centos")
NAME DESCRIPTION
diff --git a/execdriver/native/default_template.go b/execdriver/native/default_template.go
deleted file mode 100644
index 6e7d597b7b..0000000000
--- a/execdriver/native/default_template.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package native
-
-import (
- "fmt"
- "github.com/dotcloud/docker/execdriver"
- "github.com/dotcloud/docker/pkg/cgroups"
- "github.com/dotcloud/docker/pkg/libcontainer"
- "os"
-)
-
-// createContainer populates and configures the container type with the
-// data provided by the execdriver.Command
-func createContainer(c *execdriver.Command) *libcontainer.Container {
- container := getDefaultTemplate()
-
- container.Hostname = getEnv("HOSTNAME", c.Env)
- container.Tty = c.Tty
- container.User = c.User
- container.WorkingDir = c.WorkingDir
- container.Env = c.Env
-
- if c.Network != nil {
- container.Networks = []*libcontainer.Network{
- {
- Mtu: c.Network.Mtu,
- Address: fmt.Sprintf("%s/%d", c.Network.IPAddress, c.Network.IPPrefixLen),
- Gateway: c.Network.Gateway,
- Type: "veth",
- Context: libcontainer.Context{
- "prefix": "veth",
- "bridge": c.Network.Bridge,
- },
- },
- }
- }
-
- container.Cgroups.Name = c.ID
- if c.Privileged {
- container.Capabilities = nil
- container.Cgroups.DeviceAccess = true
- container.Context["apparmor_profile"] = "unconfined"
- }
- if c.Resources != nil {
- container.Cgroups.CpuShares = c.Resources.CpuShares
- container.Cgroups.Memory = c.Resources.Memory
- container.Cgroups.MemorySwap = c.Resources.MemorySwap
- }
- // check to see if we are running in ramdisk to disable pivot root
- container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
-
- return container
-}
-
-// getDefaultTemplate returns the docker default for
-// the libcontainer configuration file
-func getDefaultTemplate() *libcontainer.Container {
- return &libcontainer.Container{
- Capabilities: libcontainer.Capabilities{
- libcontainer.GetCapability("SETPCAP"),
- libcontainer.GetCapability("SYS_MODULE"),
- libcontainer.GetCapability("SYS_RAWIO"),
- libcontainer.GetCapability("SYS_PACCT"),
- libcontainer.GetCapability("SYS_ADMIN"),
- libcontainer.GetCapability("SYS_NICE"),
- libcontainer.GetCapability("SYS_RESOURCE"),
- libcontainer.GetCapability("SYS_TIME"),
- libcontainer.GetCapability("SYS_TTY_CONFIG"),
- libcontainer.GetCapability("MKNOD"),
- libcontainer.GetCapability("AUDIT_WRITE"),
- libcontainer.GetCapability("AUDIT_CONTROL"),
- libcontainer.GetCapability("MAC_OVERRIDE"),
- libcontainer.GetCapability("MAC_ADMIN"),
- libcontainer.GetCapability("NET_ADMIN"),
- },
- Namespaces: libcontainer.Namespaces{
- libcontainer.GetNamespace("NEWNS"),
- libcontainer.GetNamespace("NEWUTS"),
- libcontainer.GetNamespace("NEWIPC"),
- libcontainer.GetNamespace("NEWPID"),
- libcontainer.GetNamespace("NEWNET"),
- },
- Cgroups: &cgroups.Cgroup{
- Parent: "docker",
- DeviceAccess: false,
- },
- Context: libcontainer.Context{
- "apparmor_profile": "docker-default",
- },
- }
-}
diff --git a/graph.go b/graph/graph.go
index 43af2c278a..5b08ce3cf1 100644
--- a/graph.go
+++ b/graph/graph.go
@@ -1,11 +1,12 @@
-package docker
+package graph
import (
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/dockerversion"
- "github.com/dotcloud/docker/graphdriver"
+ "github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/runconfig"
+ "github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@@ -79,20 +80,20 @@ func (graph *Graph) Exists(id string) bool {
}
// Get returns the image with the given id, or an error if the image doesn't exist.
-func (graph *Graph) Get(name string) (*Image, error) {
+func (graph *Graph) Get(name string) (*image.Image, error) {
id, err := graph.idIndex.Get(name)
if err != nil {
return nil, err
}
// FIXME: return nil when the image doesn't exist, instead of an error
- img, err := LoadImage(graph.imageRoot(id))
+ img, err := image.LoadImage(graph.ImageRoot(id))
if err != nil {
return nil, err
}
if img.ID != id {
return nil, fmt.Errorf("Image stored at '%s' has wrong id '%s'", id, img.ID)
}
- img.graph = graph
+ img.SetGraph(graph)
if img.Size < 0 {
rootfs, err := graph.driver.Get(img.ID)
@@ -119,7 +120,7 @@ func (graph *Graph) Get(name string) (*Image, error) {
}
img.Size = size
- if err := img.SaveSize(graph.imageRoot(id)); err != nil {
+ if err := img.SaveSize(graph.ImageRoot(id)); err != nil {
return nil, err
}
}
@@ -127,9 +128,9 @@ func (graph *Graph) Get(name string) (*Image, error) {
}
// Create creates a new image and registers it in the graph.
-func (graph *Graph) Create(layerData archive.ArchiveReader, container *Container, comment, author string, config *runconfig.Config) (*Image, error) {
- img := &Image{
- ID: GenerateID(),
+func (graph *Graph) Create(layerData archive.ArchiveReader, containerID, containerImage, comment, author string, containerConfig, config *runconfig.Config) (*image.Image, error) {
+ img := &image.Image{
+ ID: utils.GenerateRandomID(),
Comment: comment,
Created: time.Now().UTC(),
DockerVersion: dockerversion.VERSION,
@@ -138,10 +139,10 @@ func (graph *Graph) Create(layerData archive.ArchiveReader, container *Container
Architecture: runtime.GOARCH,
OS: runtime.GOOS,
}
- if container != nil {
- img.Parent = container.Image
- img.Container = container.ID
- img.ContainerConfig = *container.Config
+ if containerID != "" {
+ img.Parent = containerImage
+ img.Container = containerID
+ img.ContainerConfig = *containerConfig
}
if err := graph.Register(nil, layerData, img); err != nil {
return nil, err
@@ -151,7 +152,7 @@ func (graph *Graph) Create(layerData archive.ArchiveReader, container *Container
// Register imports a pre-existing image into the graph.
// FIXME: pass img as first argument
-func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, img *Image) (err error) {
+func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, img *image.Image) (err error) {
defer func() {
// If any error occurs, remove the new dir from the driver.
// Don't check for errors since the dir might not have been created.
@@ -160,7 +161,7 @@ func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, i
graph.driver.Remove(img.ID)
}
}()
- if err := ValidateID(img.ID); err != nil {
+ if err := utils.ValidateID(img.ID); err != nil {
return err
}
// (This is a convenience to save time. Race conditions are taken care of by os.Rename)
@@ -171,7 +172,7 @@ func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, i
// Ensure that the image root does not exist on the filesystem
// when it is not registered in the graph.
// This is common when you switch from one graph driver to another
- if err := os.RemoveAll(graph.imageRoot(img.ID)); err != nil && !os.IsNotExist(err) {
+ if err := os.RemoveAll(graph.ImageRoot(img.ID)); err != nil && !os.IsNotExist(err) {
return err
}
@@ -188,7 +189,7 @@ func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, i
}
// Create root filesystem in the driver
- if err := graph.driver.Create(img.ID, img.Parent); err != nil {
+ if err := graph.driver.Create(img.ID, img.Parent, ""); err != nil {
return fmt.Errorf("Driver %s failed to create image rootfs %s: %s", graph.driver, img.ID, err)
}
// Mount the root filesystem so we can apply the diff/layer
@@ -197,12 +198,12 @@ func (graph *Graph) Register(jsonData []byte, layerData archive.ArchiveReader, i
return fmt.Errorf("Driver %s failed to get image rootfs %s: %s", graph.driver, img.ID, err)
}
defer graph.driver.Put(img.ID)
- img.graph = graph
- if err := StoreImage(img, jsonData, layerData, tmp, rootfs); err != nil {
+ img.SetGraph(graph)
+ if err := image.StoreImage(img, jsonData, layerData, tmp, rootfs); err != nil {
return err
}
// Commit
- if err := os.Rename(tmp, graph.imageRoot(img.ID)); err != nil {
+ if err := os.Rename(tmp, graph.ImageRoot(img.ID)); err != nil {
return err
}
graph.idIndex.Add(img.ID)
@@ -233,7 +234,7 @@ func (graph *Graph) TempLayerArchive(id string, compression archive.Compression,
// Mktemp creates a temporary sub-directory inside the graph's filesystem.
func (graph *Graph) Mktemp(id string) (string, error) {
- dir := path.Join(graph.Root, "_tmp", GenerateID())
+ dir := path.Join(graph.Root, "_tmp", utils.GenerateRandomID())
if err := os.MkdirAll(dir, 0700); err != nil {
return "", err
}
@@ -246,7 +247,7 @@ func (graph *Graph) Mktemp(id string) (string, error) {
//
// This extra layer is used by all containers as the top-most ro layer. It protects
// the container from unwanted side-effects on the rw layer.
-func setupInitLayer(initLayer string) error {
+func SetupInitLayer(initLayer string) error {
for pth, typ := range map[string]string{
"/dev/pts": "dir",
"/dev/shm": "dir",
@@ -258,6 +259,7 @@ func setupInitLayer(initLayer string) error {
"/etc/hosts": "file",
"/etc/hostname": "file",
"/dev/console": "file",
+ "/etc/mtab": "/proc/mounts",
// "var/run": "dir",
// "var/lock": "dir",
} {
@@ -284,6 +286,10 @@ func setupInitLayer(initLayer string) error {
return err
}
f.Close()
+ default:
+ if err := os.Symlink(typ, path.Join(initLayer, pth)); err != nil {
+ return err
+ }
}
} else {
return err
@@ -320,7 +326,7 @@ func (graph *Graph) Delete(name string) error {
return err
}
graph.idIndex.Delete(id)
- err = os.Rename(graph.imageRoot(id), tmp)
+ err = os.Rename(graph.ImageRoot(id), tmp)
if err != nil {
return err
}
@@ -331,9 +337,9 @@ func (graph *Graph) Delete(name string) error {
}
// Map returns a list of all images in the graph, addressable by ID.
-func (graph *Graph) Map() (map[string]*Image, error) {
- images := make(map[string]*Image)
- err := graph.walkAll(func(image *Image) {
+func (graph *Graph) Map() (map[string]*image.Image, error) {
+ images := make(map[string]*image.Image)
+ err := graph.walkAll(func(image *image.Image) {
images[image.ID] = image
})
if err != nil {
@@ -344,7 +350,7 @@ func (graph *Graph) Map() (map[string]*Image, error) {
// walkAll iterates over each image in the graph, and passes it to a handler.
// The walking order is undetermined.
-func (graph *Graph) walkAll(handler func(*Image)) error {
+func (graph *Graph) walkAll(handler func(*image.Image)) error {
files, err := ioutil.ReadDir(graph.Root)
if err != nil {
return err
@@ -364,17 +370,17 @@ func (graph *Graph) walkAll(handler func(*Image)) error {
// If an image of id ID has 3 children images, then the value for key ID
// will be a list of 3 images.
// If an image has no children, it will not have an entry in the table.
-func (graph *Graph) ByParent() (map[string][]*Image, error) {
- byParent := make(map[string][]*Image)
- err := graph.walkAll(func(image *Image) {
- parent, err := graph.Get(image.Parent)
+func (graph *Graph) ByParent() (map[string][]*image.Image, error) {
+ byParent := make(map[string][]*image.Image)
+ err := graph.walkAll(func(img *image.Image) {
+ parent, err := graph.Get(img.Parent)
if err != nil {
return
}
if children, exists := byParent[parent.ID]; exists {
- byParent[parent.ID] = append(children, image)
+ byParent[parent.ID] = append(children, img)
} else {
- byParent[parent.ID] = []*Image{image}
+ byParent[parent.ID] = []*image.Image{img}
}
})
return byParent, err
@@ -382,13 +388,13 @@ func (graph *Graph) ByParent() (map[string][]*Image, error) {
// Heads returns all heads in the graph, keyed by id.
// A head is an image which is not the parent of another image in the graph.
-func (graph *Graph) Heads() (map[string]*Image, error) {
- heads := make(map[string]*Image)
+func (graph *Graph) Heads() (map[string]*image.Image, error) {
+ heads := make(map[string]*image.Image)
byParent, err := graph.ByParent()
if err != nil {
return nil, err
}
- err = graph.walkAll(func(image *Image) {
+ err = graph.walkAll(func(image *image.Image) {
// If it's not in the byParent lookup table, then
// it's not a parent -> so it's a head!
if _, exists := byParent[image.ID]; !exists {
@@ -398,7 +404,7 @@ func (graph *Graph) Heads() (map[string]*Image, error) {
return heads, err
}
-func (graph *Graph) imageRoot(id string) string {
+func (graph *Graph) ImageRoot(id string) string {
return path.Join(graph.Root, id)
}
diff --git a/tags.go b/graph/tags.go
index 92c32b1ff5..524e1a1f9d 100644
--- a/tags.go
+++ b/graph/tags.go
@@ -1,8 +1,9 @@
-package docker
+package graph
import (
"encoding/json"
"fmt"
+ "github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"os"
@@ -65,7 +66,7 @@ func (store *TagStore) Reload() error {
return nil
}
-func (store *TagStore) LookupImage(name string) (*Image, error) {
+func (store *TagStore) LookupImage(name string) (*image.Image, error) {
// FIXME: standardize on returning nil when the image doesn't exist, and err for everything else
// (so we can pass all errors here)
repos, tag := utils.ParseRepositoryTag(name)
@@ -195,7 +196,7 @@ func (store *TagStore) Get(repoName string) (Repository, error) {
return nil, nil
}
-func (store *TagStore) GetImage(repoName, tagOrID string) (*Image, error) {
+func (store *TagStore) GetImage(repoName, tagOrID string) (*image.Image, error) {
repo, err := store.Get(repoName)
if err != nil {
return nil, err
diff --git a/tags_unit_test.go b/graph/tags_unit_test.go
index b6236280a8..17773912cf 100644
--- a/tags_unit_test.go
+++ b/graph/tags_unit_test.go
@@ -1,8 +1,13 @@
-package docker
+package graph
import (
- "github.com/dotcloud/docker/graphdriver"
+ "bytes"
+ "github.com/dotcloud/docker/image"
+ "github.com/dotcloud/docker/runtime/graphdriver"
+ _ "github.com/dotcloud/docker/runtime/graphdriver/vfs" // import the vfs driver so it is used in the tests
"github.com/dotcloud/docker/utils"
+ "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+ "io"
"os"
"path"
"testing"
@@ -13,6 +18,23 @@ const (
testImageID = "foo"
)
+func fakeTar() (io.Reader, error) {
+ content := []byte("Hello world!\n")
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
+ hdr := new(tar.Header)
+ hdr.Size = int64(len(content))
+ hdr.Name = name
+ if err := tw.WriteHeader(hdr); err != nil {
+ return nil, err
+ }
+ tw.Write([]byte(content))
+ }
+ tw.Close()
+ return buf, nil
+}
+
func mkTestTagStore(root string, t *testing.T) *TagStore {
driver, err := graphdriver.New(root)
if err != nil {
@@ -30,7 +52,7 @@ func mkTestTagStore(root string, t *testing.T) *TagStore {
if err != nil {
t.Fatal(err)
}
- img := &Image{ID: testImageID}
+ img := &image.Image{ID: testImageID}
// FIXME: this fails on Darwin with:
// tags_unit_test.go:36: mkdir /var/folders/7g/b3ydb5gx4t94ndr_cljffbt80000gq/T/docker-test569b-tRunner-075013689/vfs/dir/foo/etc/postgres: permission denied
if err := graph.Register(nil, archive, img); err != nil {
diff --git a/hack/PACKAGERS.md b/hack/PACKAGERS.md
index 5dcb120689..7170c5ad25 100644
--- a/hack/PACKAGERS.md
+++ b/hack/PACKAGERS.md
@@ -157,6 +157,33 @@ AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows:
export DOCKER_BUILDTAGS='apparmor'
```
+There are build tags for disabling graphdrivers as well. By default, support
+for all graphdrivers are built in.
+
+To disable btrfs:
+```bash
+export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
+```
+
+To disable devicemapper:
+```bash
+export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
+```
+
+To disable aufs:
+```bash
+export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
+```
+
+NOTE: if you need to set more than one build tag, space separate them.
+
+If you're building a binary that may need to be used on platforms that include
+SELinux, you will need to set `DOCKER_BUILDTAGS` as follows:
+
+```bash
+export DOCKER_BUILDTAGS='selinux'
+```
+
### Static Daemon
If it is feasible within the constraints of your distribution, you should
@@ -239,6 +266,12 @@ installed and available at runtime:
* iptables version 1.4 or later
* XZ Utils version 4.9 or later
+* a [properly
+ mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
+ cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
+ [is](https://github.com/dotcloud/docker/issues/2683)
+ [not](https://github.com/dotcloud/docker/issues/3485)
+ [sufficient](https://github.com/dotcloud/docker/issues/4568))
Additionally, the Docker client needs the following software to be installed and
available at runtime:
diff --git a/hack/RELEASE-CHECKLIST.md b/hack/RELEASE-CHECKLIST.md
index 84a0ff70e1..31f0adb757 100644
--- a/hack/RELEASE-CHECKLIST.md
+++ b/hack/RELEASE-CHECKLIST.md
@@ -6,6 +6,21 @@ So you're in charge of a Docker release? Cool. Here's what to do.
If your experience deviates from this document, please document the changes
to keep it up-to-date.
+It is important to note that this document assumes that the git remote in your
+repository that corresponds to "https://github.com/dotcloud/docker" is named
+"origin". If yours is not (for example, if you've chosen to name it "upstream"
+or something similar instead), be sure to adjust the listed snippets for your
+local environment accordingly. If you are not sure what your upstream remote is
+named, use a command like `git remote -v` to find out.
+
+If you don't have an upstream remote, you can add one easily using something
+like:
+
+```bash
+git remote add origin https://github.com/dotcloud/docker.git
+git remote add YOURUSER git@github.com:YOURUSER/docker.git
+```
+
### 1. Pull from master and create a release branch
```bash
@@ -124,7 +139,7 @@ docker run \
-e AWS_ACCESS_KEY \
-e AWS_SECRET_KEY \
-e GPG_PASSPHRASE \
- -i -t -privileged \
+ -i -t --privileged \
docker \
hack/release.sh
```
@@ -158,12 +173,28 @@ docker run \
-e AWS_ACCESS_KEY \
-e AWS_SECRET_KEY \
-e GPG_PASSPHRASE \
- -i -t -privileged \
+ -i -t --privileged \
docker \
hack/release.sh
```
-### 9. Apply tag
+### 9. Breakathon
+
+Spend several days along with the community explicitly investing time and
+resources to try and break Docker in every possible way, documenting any
+findings pertinent to the release. This time should be spent testing and
+finding ways in which the release might have caused various features or upgrade
+environments to have issues, not coding. During this time, the release is in
+code freeze, and any additional code changes will be pushed out to the next
+release.
+
+It should include various levels of breaking Docker, beyond just using Docker
+by the book.
+
+Any issues found may still remain issues for this release, but they should be
+documented and give appropriate warnings.
+
+### 10. Apply tag
```bash
git tag -a $VERSION -m $VERSION bump_$VERSION
@@ -173,22 +204,26 @@ git push origin $VERSION
It's very important that we don't make the tag until after the official
release is uploaded to get.docker.io!
-### 10. Go to github to merge the `bump_$VERSION` branch into release
+### 11. Go to github to merge the `bump_$VERSION` branch into release
-Don't delete the leftover branch just yet, as we will need it for the next step.
+Don't forget to push that pretty blue button to delete the leftover
+branch afterwards!
-### 11. Go to github to merge the `bump_$VERSION` branch into docs
+### 12. Update the docs branch
-Merging the pull request to the docs branch will automatically
-update the documentation on the "latest" revision of the docs. You
-should see the updated docs 5-10 minutes after the merge. The docs
-will appear on http://docs.docker.io/. For more information about
-documentation releases, see `docs/README.md`.
+```bash
+git checkout docs
+git fetch
+git reset --hard origin/release
+git push -f origin docs
+```
-Don't forget to push that pretty blue button to delete the leftover
-branch afterwards!
+Updating the docs branch will automatically update the documentation on the
+"latest" revision of the docs. You should see the updated docs 5-10 minutes
+after the merge. The docs will appear on http://docs.docker.io/. For more
+information about documentation releases, see `docs/README.md`.
-### 12. Create a new pull request to merge release back into master
+### 13. Create a new pull request to merge release back into master
```bash
git checkout master
@@ -206,7 +241,7 @@ echo "https://github.com/dotcloud/docker/compare/master...merge_release_$VERSION
Again, get two maintainers to validate, then merge, then push that pretty
blue button to delete your branch.
-### 13. Rejoice and Evangelize!
+### 14. Rejoice and Evangelize!
Congratulations! You're done.
diff --git a/hack/dind b/hack/dind
index eff656b0e0..94147f5324 100755
--- a/hack/dind
+++ b/hack/dind
@@ -5,7 +5,7 @@
# See the blog post: http://blog.docker.io/2013/09/docker-can-now-run-within-docker/
#
# This script should be executed inside a docker container in privilieged mode
-# ('docker run -privileged', introduced in docker 0.6).
+# ('docker run --privileged', introduced in docker 0.6).
# Usage: dind CMD [ARG...]
@@ -17,7 +17,7 @@ CGROUP=/sys/fs/cgroup
mountpoint -q $CGROUP ||
mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
- echo "Could not make a tmpfs mount. Did you use -privileged?"
+ echo "Could not make a tmpfs mount. Did you use --privileged?"
exit 1
}
diff --git a/hack/infrastructure/docker-ci/Dockerfile b/hack/infrastructure/docker-ci/Dockerfile
deleted file mode 100644
index fd795f4d45..0000000000
--- a/hack/infrastructure/docker-ci/Dockerfile
+++ /dev/null
@@ -1,29 +0,0 @@
-# DOCKER-VERSION: 0.7.6
-# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
-# DESCRIPTION: docker-ci continuous integration service
-# TO_BUILD: docker build -rm -t docker-ci/docker-ci .
-# TO_RUN: docker run -rm -i -t -p 8000:80 -p 2222:22 -v /run:/var/socket \
-# -v /data/docker-ci:/data/docker-ci docker-ci/docker-ci
-
-from ubuntu:12.04
-maintainer Daniel Mizyrycki <daniel@dotcloud.com>
-
-ENV DEBIAN_FRONTEND noninteractive
-RUN echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > \
- /etc/apt/sources.list; apt-get update
-RUN apt-get install -y --no-install-recommends python2.7 python-dev \
- libevent-dev git supervisor ssh rsync less vim sudo gcc wget nginx
-RUN cd /tmp; wget http://python-distribute.org/distribute_setup.py
-RUN cd /tmp; python distribute_setup.py; easy_install pip; rm distribute_setup.py
-
-RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
-RUN echo 'deb http://get.docker.io/ubuntu docker main' > \
- /etc/apt/sources.list.d/docker.list; apt-get update
-RUN apt-get install -y lxc-docker-0.8.0
-RUN pip install SQLAlchemy==0.7.10 buildbot buildbot-slave pyopenssl boto
-RUN ln -s /var/socket/docker.sock /run/docker.sock
-
-ADD . /docker-ci
-RUN /docker-ci/setup.sh
-
-ENTRYPOINT ["supervisord", "-n"]
diff --git a/hack/infrastructure/docker-ci/MAINTAINERS b/hack/infrastructure/docker-ci/MAINTAINERS
deleted file mode 100644
index 5dfc881420..0000000000
--- a/hack/infrastructure/docker-ci/MAINTAINERS
+++ /dev/null
@@ -1 +0,0 @@
-Daniel Mizyrycki <daniel@dotcloud.com> (@mzdaniel)
diff --git a/hack/infrastructure/docker-ci/README.rst b/hack/infrastructure/docker-ci/README.rst
deleted file mode 100644
index 3e429ffdd5..0000000000
--- a/hack/infrastructure/docker-ci/README.rst
+++ /dev/null
@@ -1,65 +0,0 @@
-=========
-docker-ci
-=========
-
-This directory contains docker-ci continuous integration system.
-As expected, it is a fully dockerized and deployed using
-docker-container-runner.
-docker-ci is based on Buildbot, a continuous integration system designed
-to automate the build/test cycle. By automatically rebuilding and testing
-the tree each time something has changed, build problems are pinpointed
-quickly, before other developers are inconvenienced by the failure.
-We are running buildbot at Rackspace to verify docker and docker-registry
-pass tests, and check for coverage code details.
-
-docker-ci instance is at https://docker-ci.docker.io/waterfall
-
-Inside docker-ci container we have the following directory structure:
-
-/docker-ci source code of docker-ci
-/data/backup/docker-ci/ daily backup (replicated over S3)
-/data/docker-ci/coverage/{docker,docker-registry}/ mapped to host volumes
-/data/buildbot/{master,slave}/ main docker-ci buildbot config and database
-/var/socket/{docker.sock} host volume access to docker socket
-
-
-Production deployment
-=====================
-
-::
-
- # Clone docker-ci repository
- git clone https://github.com/dotcloud/docker
- cd docker/hack/infrastructure/docker-ci
-
- export DOCKER_PROD=[PRODUCTION_SERVER_IP]
-
- # Create data host volume. (only once)
- docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \
- mkdir -p /data/docker-ci/coverage/docker
- docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \
- mkdir -p /data/docker-ci/coverage/docker-registry
- docker -H $DOCKER_PROD run -v /home:/data ubuntu:12.04 \
- chown -R 1000.1000 /data/docker-ci
-
- # dcr deployment. Define credentials and special environment dcr variables
- # ( retrieved at /hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml )
- export WEB_USER=[DOCKER-CI-WEBSITE-USERNAME]
- export WEB_IRC_PWD=[DOCKER-CI-WEBSITE-PASSWORD]
- export BUILDBOT_PWD=[BUILDSLAVE_PASSWORD]
- export AWS_ACCESS_KEY=[DOCKER_RELEASE_S3_ACCESS]
- export AWS_SECRET_KEY=[DOCKER_RELEASE_S3_SECRET]
- export GPG_PASSPHRASE=[DOCKER_RELEASE_PASSPHRASE]
- export BACKUP_AWS_ID=[S3_BUCKET_CREDENTIAL_ACCESS]
- export BACKUP_AWS_SECRET=[S3_BUCKET_CREDENTIAL_SECRET]
- export SMTP_USER=[MAILGUN_SMTP_USERNAME]
- export SMTP_PWD=[MAILGUN_SMTP_PASSWORD]
- export EMAIL_RCP=[EMAIL_FOR_BUILD_ERRORS]
-
- # Build docker-ci and testbuilder docker images
- docker -H $DOCKER_PROD build -rm -t docker-ci/docker-ci .
- (cd testbuilder; docker -H $DOCKER_PROD build -rm -t docker-ci/testbuilder .)
-
- # Run docker-ci container ( assuming no previous container running )
- (cd dcr/prod; dcr docker-ci.yml start)
- (cd dcr/prod; dcr docker-ci.yml register docker-ci.docker.io)
diff --git a/hack/infrastructure/docker-ci/VERSION b/hack/infrastructure/docker-ci/VERSION
deleted file mode 100644
index b49b25336d..0000000000
--- a/hack/infrastructure/docker-ci/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-0.5.6
diff --git a/hack/infrastructure/docker-ci/buildbot/github.py b/hack/infrastructure/docker-ci/buildbot/github.py
deleted file mode 100644
index 5316e13282..0000000000
--- a/hack/infrastructure/docker-ci/buildbot/github.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# This file is part of Buildbot. Buildbot is free software: you can
-# redistribute it and/or modify it under the terms of the GNU General Public
-# License as published by the Free Software Foundation, version 2.
-#
-# This program is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc., 51
-# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Copyright Buildbot Team Members
-
-#!/usr/bin/env python
-"""
-github_buildbot.py is based on git_buildbot.py
-
-github_buildbot.py will determine the repository information from the JSON
-HTTP POST it receives from github.com and build the appropriate repository.
-If your github repository is private, you must add a ssh key to the github
-repository for the user who initiated the build on the buildslave.
-
-"""
-
-import re
-import datetime
-from twisted.python import log
-import calendar
-
-try:
- import json
- assert json
-except ImportError:
- import simplejson as json
-
-# python is silly about how it handles timezones
-class fixedOffset(datetime.tzinfo):
- """
- fixed offset timezone
- """
- def __init__(self, minutes, hours, offsetSign = 1):
- self.minutes = int(minutes) * offsetSign
- self.hours = int(hours) * offsetSign
- self.offset = datetime.timedelta(minutes = self.minutes,
- hours = self.hours)
-
- def utcoffset(self, dt):
- return self.offset
-
- def dst(self, dt):
- return datetime.timedelta(0)
-
-def convertTime(myTestTimestamp):
- #"1970-01-01T00:00:00+00:00"
- # Normalize myTestTimestamp
- if myTestTimestamp[-1] == 'Z':
- myTestTimestamp = myTestTimestamp[:-1] + '-00:00'
- matcher = re.compile(r'(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)([-+])(\d\d):(\d\d)')
- result = matcher.match(myTestTimestamp)
- (year, month, day, hour, minute, second, offsetsign, houroffset, minoffset) = \
- result.groups()
- if offsetsign == '+':
- offsetsign = 1
- else:
- offsetsign = -1
-
- offsetTimezone = fixedOffset( minoffset, houroffset, offsetsign )
- myDatetime = datetime.datetime( int(year),
- int(month),
- int(day),
- int(hour),
- int(minute),
- int(second),
- 0,
- offsetTimezone)
- return calendar.timegm( myDatetime.utctimetuple() )
-
-def getChanges(request, options = None):
- """
- Reponds only to POST events and starts the build process
-
- :arguments:
- request
- the http request object
- """
- payload = json.loads(request.args['payload'][0])
- import urllib,datetime
- fname = str(datetime.datetime.now()).replace(' ','_').replace(':','-')[:19]
- # Github event debug
- # open('github_{0}.json'.format(fname),'w').write(json.dumps(json.loads(urllib.unquote(request.args['payload'][0])), sort_keys = True, indent = 2))
-
- if 'pull_request' in payload:
- user = payload['pull_request']['user']['login']
- repo = payload['pull_request']['head']['repo']['name']
- repo_url = payload['pull_request']['head']['repo']['html_url']
- else:
- user = payload['repository']['owner']['name']
- repo = payload['repository']['name']
- repo_url = payload['repository']['url']
- project = request.args.get('project', None)
- if project:
- project = project[0]
- elif project is None:
- project = ''
- # This field is unused:
- #private = payload['repository']['private']
- changes = process_change(payload, user, repo, repo_url, project)
- log.msg("Received %s changes from github" % len(changes))
- return (changes, 'git')
-
-def process_change(payload, user, repo, repo_url, project):
- """
- Consumes the JSON as a python object and actually starts the build.
-
- :arguments:
- payload
- Python Object that represents the JSON sent by GitHub Service
- Hook.
- """
- changes = []
-
- newrev = payload['after'] if 'after' in payload else payload['pull_request']['head']['sha']
- refname = payload['ref'] if 'ref' in payload else payload['pull_request']['head']['ref']
-
- # We only care about regular heads, i.e. branches
- match = re.match(r"^(refs\/heads\/|)([^/]+)$", refname)
- if not match:
- log.msg("Ignoring refname `%s': Not a branch" % refname)
- return []
-
- branch = match.groups()[1]
- if re.match(r"^0*$", newrev):
- log.msg("Branch `%s' deleted, ignoring" % branch)
- return []
- else:
- if 'pull_request' in payload:
- if payload['action'] == 'closed':
- log.msg("PR#{} closed, ignoring".format(payload['number']))
- return []
- changes = [{
- 'category' : 'github_pullrequest',
- 'who' : '{0} - PR#{1}'.format(user,payload['number']),
- 'files' : [],
- 'comments' : payload['pull_request']['title'],
- 'revision' : newrev,
- 'when' : convertTime(payload['pull_request']['updated_at']),
- 'branch' : branch,
- 'revlink' : '{0}/commit/{1}'.format(repo_url,newrev),
- 'repository' : repo_url,
- 'project' : project }]
- return changes
- for commit in payload['commits']:
- files = []
- if 'added' in commit:
- files.extend(commit['added'])
- if 'modified' in commit:
- files.extend(commit['modified'])
- if 'removed' in commit:
- files.extend(commit['removed'])
- when = convertTime( commit['timestamp'])
- log.msg("New revision: %s" % commit['id'][:8])
- chdict = dict(
- who = commit['author']['name']
- + " <" + commit['author']['email'] + ">",
- files = files,
- comments = commit['message'],
- revision = commit['id'],
- when = when,
- branch = branch,
- revlink = commit['url'],
- repository = repo_url,
- project = project)
- changes.append(chdict)
- return changes
diff --git a/hack/infrastructure/docker-ci/buildbot/master.cfg b/hack/infrastructure/docker-ci/buildbot/master.cfg
deleted file mode 100644
index 75605da8ab..0000000000
--- a/hack/infrastructure/docker-ci/buildbot/master.cfg
+++ /dev/null
@@ -1,161 +0,0 @@
-import os, re
-from buildbot.buildslave import BuildSlave
-from buildbot.schedulers.forcesched import ForceScheduler
-from buildbot.schedulers.basic import SingleBranchScheduler
-from buildbot.schedulers.timed import Nightly
-from buildbot.changes import filter
-from buildbot.config import BuilderConfig
-from buildbot.process.factory import BuildFactory
-from buildbot.process.properties import Property
-from buildbot.steps.shell import ShellCommand
-from buildbot.status import html, words
-from buildbot.status.web import authz, auth
-from buildbot.status.mail import MailNotifier
-
-
-def ENV(x):
- '''Promote an environment variable for global use returning its value'''
- retval = os.environ.get(x, '')
- globals()[x] = retval
- return retval
-
-
-class TestCommand(ShellCommand):
- '''Extend ShellCommand with optional summary logs'''
- def __init__(self, *args, **kwargs):
- super(TestCommand, self).__init__(*args, **kwargs)
-
- def createSummary(self, log):
- exit_status = re.sub(r'.+\n\+ exit (\d+).+',
- r'\1', log.getText()[-100:], flags=re.DOTALL)
- if exit_status != '0':
- return
- # Infer coverage path from log
- if '+ COVERAGE_PATH' in log.getText():
- path = re.sub(r'.+\+ COVERAGE_PATH=((.+?)-\d+).+',
- r'\2/\1', log.getText(), flags=re.DOTALL)
- url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path)
- self.addURL('coverage', url)
- elif 'COVERAGE_FILE' in log.getText():
- path = re.sub(r'.+\+ COVERAGE_FILE=((.+?)-\d+).+',
- r'\2/\1', log.getText(), flags=re.DOTALL)
- url = '{}coverage/{}/index.html'.format(c['buildbotURL'], path)
- self.addURL('coverage', url)
-
-
-PORT_WEB = 8000 # Buildbot webserver port
-PORT_GITHUB = 8011 # Buildbot github hook port
-PORT_MASTER = 9989 # Port where buildbot master listen buildworkers
-
-BUILDBOT_URL = '//localhost:{}/'.format(PORT_WEB)
-DOCKER_REPO = 'https://github.com/docker-test/docker'
-DOCKER_TEST_ARGV = 'HEAD {}'.format(DOCKER_REPO)
-REGISTRY_REPO = 'https://github.com/docker-test/docker-registry'
-REGISTRY_TEST_ARGV = 'HEAD {}'.format(REGISTRY_REPO)
-if ENV('DEPLOYMENT') == 'staging':
- BUILDBOT_URL = "//docker-ci-stage.docker.io/"
-if ENV('DEPLOYMENT') == 'production':
- BUILDBOT_URL = '//docker-ci.docker.io/'
- DOCKER_REPO = 'https://github.com/dotcloud/docker'
- DOCKER_TEST_ARGV = ''
- REGISTRY_REPO = 'https://github.com/dotcloud/docker-registry'
- REGISTRY_TEST_ARGV = ''
-
-# Credentials set by setup.sh from deployment.py
-ENV('WEB_USER')
-ENV('WEB_IRC_PWD')
-ENV('BUILDBOT_PWD')
-ENV('SMTP_USER')
-ENV('SMTP_PWD')
-ENV('EMAIL_RCP')
-ENV('IRC_CHANNEL')
-
-
-c = BuildmasterConfig = {}
-
-c['title'] = "docker-ci"
-c['titleURL'] = "waterfall"
-c['buildbotURL'] = BUILDBOT_URL
-c['db'] = {'db_url':"sqlite:///state.sqlite"}
-c['slaves'] = [BuildSlave('buildworker', BUILDBOT_PWD)]
-c['slavePortnum'] = PORT_MASTER
-
-
-# Schedulers
-c['schedulers'] = [ForceScheduler(name='trigger', builderNames=[
- 'docker', 'docker-registry', 'nightlyrelease', 'backup'])]
-c['schedulers'] += [SingleBranchScheduler(name="docker", treeStableTimer=None,
- change_filter=filter.ChangeFilter(branch='master',
- repository=DOCKER_REPO), builderNames=['docker'])]
-c['schedulers'] += [SingleBranchScheduler(name="registry", treeStableTimer=None,
- change_filter=filter.ChangeFilter(branch='master',
- repository=REGISTRY_REPO), builderNames=['docker-registry'])]
-c['schedulers'] += [SingleBranchScheduler(name='docker-pr', treeStableTimer=None,
- change_filter=filter.ChangeFilter(category='github_pullrequest',
- project='docker'), builderNames=['docker-pr'])]
-c['schedulers'] += [SingleBranchScheduler(name='docker-registry-pr', treeStableTimer=None,
- change_filter=filter.ChangeFilter(category='github_pullrequest',
- project='docker-registry'), builderNames=['docker-registry-pr'])]
-c['schedulers'] += [Nightly(name='daily', branch=None, builderNames=[
- 'nightlyrelease', 'backup'], hour=7, minute=00)]
-
-
-# Builders
-
-# Backup
-factory = BuildFactory()
-factory.addStep(TestCommand(description='backup', logEnviron=False,
- usePTY=True, command='/docker-ci/tool/backup.py'))
-c['builders'] = [BuilderConfig(name='backup',slavenames=['buildworker'],
- factory=factory)]
-
-# Docker test
-factory = BuildFactory()
-factory.addStep(TestCommand(description='docker', logEnviron=False,
- usePTY=True, command='/docker-ci/dockertest/docker {}'.format(DOCKER_TEST_ARGV)))
-c['builders'] += [BuilderConfig(name='docker',slavenames=['buildworker'],
- factory=factory)]
-
-# Docker pull request test
-factory = BuildFactory()
-factory.addStep(TestCommand(description='docker-pr', logEnviron=False,
- usePTY=True, command=['/docker-ci/dockertest/docker',
- Property('revision'), Property('repository'), Property('branch')]))
-c['builders'] += [BuilderConfig(name='docker-pr',slavenames=['buildworker'],
- factory=factory)]
-
-# docker-registry test
-factory = BuildFactory()
-factory.addStep(TestCommand(description='docker-registry', logEnviron=False,
- usePTY=True, command='/docker-ci/dockertest/docker-registry {}'.format(REGISTRY_TEST_ARGV)))
-c['builders'] += [BuilderConfig(name='docker-registry',slavenames=['buildworker'],
- factory=factory)]
-
-# Docker registry pull request test
-factory = BuildFactory()
-factory.addStep(TestCommand(description='docker-registry-pr', logEnviron=False,
- usePTY=True, command=['/docker-ci/dockertest/docker-registry',
- Property('revision'), Property('repository'), Property('branch')]))
-c['builders'] += [BuilderConfig(name='docker-registry-pr',slavenames=['buildworker'],
- factory=factory)]
-
-# Docker nightly release
-factory = BuildFactory()
-factory.addStep(ShellCommand(description='NightlyRelease',logEnviron=False,
- usePTY=True, command=['/docker-ci/dockertest/nightlyrelease']))
-c['builders'] += [BuilderConfig(name='nightlyrelease',slavenames=['buildworker'],
- factory=factory)]
-
-# Status
-authz_cfg = authz.Authz(auth=auth.BasicAuth([(WEB_USER, WEB_IRC_PWD)]),
- forceBuild='auth')
-c['status'] = [html.WebStatus(http_port=PORT_WEB, authz=authz_cfg)]
-c['status'].append(html.WebStatus(http_port=PORT_GITHUB, allowForce=True,
- change_hook_dialects={ 'github': True }))
-c['status'].append(MailNotifier(fromaddr='docker-test@docker.io',
- sendToInterestedUsers=False, extraRecipients=[EMAIL_RCP],
- mode='failing', relayhost='smtp.mailgun.org', smtpPort=587, useTls=True,
- smtpUser=SMTP_USER, smtpPassword=SMTP_PWD))
-c['status'].append(words.IRC("irc.freenode.net", "dockerqabot",
- channels=[IRC_CHANNEL], password=WEB_IRC_PWD, allowForce=True,
- notify_events={'exception':1, 'successToFailure':1, 'failureToSuccess':1}))
diff --git a/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml b/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml
deleted file mode 100644
index 523535446a..0000000000
--- a/hack/infrastructure/docker-ci/dcr/prod/docker-ci.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-docker-ci:
- image: "docker-ci/docker-ci"
- release_name: "docker-ci-0.5.6"
- ports: ["80","2222:22","8011:8011"]
- register: "80"
- volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"]
- command: []
- env:
- - "DEPLOYMENT=production"
- - "IRC_CHANNEL=docker-testing"
- - "BACKUP_BUCKET=backup-ci"
- - "$WEB_USER"
- - "$WEB_IRC_PWD"
- - "$BUILDBOT_PWD"
- - "$AWS_ACCESS_KEY"
- - "$AWS_SECRET_KEY"
- - "$GPG_PASSPHRASE"
- - "$BACKUP_AWS_ID"
- - "$BACKUP_AWS_SECRET"
- - "$SMTP_USER"
- - "$SMTP_PWD"
- - "$EMAIL_RCP"
diff --git a/hack/infrastructure/docker-ci/dcr/prod/settings.yml b/hack/infrastructure/docker-ci/dcr/prod/settings.yml
deleted file mode 100644
index 9831afa6dd..0000000000
--- a/hack/infrastructure/docker-ci/dcr/prod/settings.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-default:
- hipaches: ['192.168.100.67:6379']
- daemons: ['192.168.100.67:4243']
- use_ssh: False
-
diff --git a/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml b/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml
deleted file mode 100644
index 8eba84825c..0000000000
--- a/hack/infrastructure/docker-ci/dcr/stage/docker-ci.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-docker-ci:
- image: "docker-ci/docker-ci"
- release_name: "docker-ci-stage"
- ports: ["80","2222:22","8011:8011"]
- register: "80"
- volumes: ["/run:/var/socket","/home/docker-ci:/data/docker-ci"]
- command: []
- env:
- - "DEPLOYMENT=staging"
- - "IRC_CHANNEL=docker-testing-staging"
- - "BACKUP_BUCKET=ci-backup-stage"
- - "$BACKUP_AWS_ID"
- - "$BACKUP_AWS_SECRET"
- - "$WEB_USER"
- - "$WEB_IRC_PWD"
- - "$BUILDBOT_PWD"
- - "$AWS_ACCESS_KEY"
- - "$AWS_SECRET_KEY"
- - "$GPG_PASSPHRASE"
- - "$SMTP_USER"
- - "$SMTP_PWD"
- - "$EMAIL_RCP"
diff --git a/hack/infrastructure/docker-ci/dcr/stage/settings.yml b/hack/infrastructure/docker-ci/dcr/stage/settings.yml
deleted file mode 100644
index a7d37acff3..0000000000
--- a/hack/infrastructure/docker-ci/dcr/stage/settings.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-default:
- hipaches: ['192.168.100.65:6379']
- daemons: ['192.168.100.65:4243']
- use_ssh: False
-
diff --git a/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh b/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh
deleted file mode 100755
index fdacc290b4..0000000000
--- a/hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/bin/bash
-
-export PATH='/go/bin':$PATH
-export DOCKER_PATH='/go/src/github.com/dotcloud/docker'
-
-# Signal coverage report name, parsed by docker-ci
-set -x
-COVERAGE_PATH=$(date +"docker-%Y%m%d%H%M%S")
-set +x
-
-REPORTS="/data/$COVERAGE_PATH"
-INDEX="$REPORTS/index.html"
-
-# Test docker
-cd $DOCKER_PATH
-./hack/make.sh test; exit_status=$?
-PROFILE_PATH="$(ls -d $DOCKER_PATH/bundles/* | sed -n '$ p')/test/coverprofiles"
-
-if [ "$exit_status" -eq "0" ]; then
- # Download coverage dependencies
- go get github.com/axw/gocov/gocov
- go get -u github.com/matm/gocov-html
-
- # Create coverage report
- mkdir -p $REPORTS
- cd $PROFILE_PATH
- cat > $INDEX << "EOF"
-<!DOCTYPE html><head><meta charset="utf-8">
-<script type="text/javascript" src="//tablesorter.com/jquery-latest.js"></script>
-<script type="text/javascript" src="//tablesorter.com/__jquery.tablesorter.min.js"></script>
-<script type="text/javascript">$(document).ready(function() {
-$("table").tablesorter({ sortForce: [[1,0]] }); });</script>
-<style>table,th,td{border:1px solid black;}</style>
-<title>Docker Coverage Report</title>
-</head><body>
-<h1><strong>Docker Coverage Report</strong></h1>
-<table class="tablesorter">
-<thead><tr><th>package</th><th>pct</th></tr></thead><tbody>
-EOF
- for profile in *; do
- gocov convert $profile | gocov-html >$REPORTS/$profile.html
- echo "<tr><td><a href=\"${profile}.html\">$profile</a></td><td>" >> $INDEX
- go tool cover -func=$profile | sed -En '$ s/.+\t(.+)/\1/p' >> $INDEX
- echo "</td></tr>" >> $INDEX
- done
- echo "</tbody></table></body></html>" >> $INDEX
-fi
-
-# Signal test and coverage result, parsed by docker-ci
-set -x
-exit $exit_status
-
diff --git a/hack/infrastructure/docker-ci/dockertest/docker b/hack/infrastructure/docker-ci/dockertest/docker
deleted file mode 120000
index e3f094ee63..0000000000
--- a/hack/infrastructure/docker-ci/dockertest/docker
+++ /dev/null
@@ -1 +0,0 @@
-project \ No newline at end of file
diff --git a/hack/infrastructure/docker-ci/dockertest/docker-registry b/hack/infrastructure/docker-ci/dockertest/docker-registry
deleted file mode 120000
index e3f094ee63..0000000000
--- a/hack/infrastructure/docker-ci/dockertest/docker-registry
+++ /dev/null
@@ -1 +0,0 @@
-project \ No newline at end of file
diff --git a/hack/infrastructure/docker-ci/dockertest/nightlyrelease b/hack/infrastructure/docker-ci/dockertest/nightlyrelease
deleted file mode 100755
index 475b088065..0000000000
--- a/hack/infrastructure/docker-ci/dockertest/nightlyrelease
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env bash
-
-if [ "$DEPLOYMENT" == "production" ]; then
- AWS_S3_BUCKET='test.docker.io'
-else
- AWS_S3_BUCKET='get-staging.docker.io'
-fi
-
-docker run -rm -privileged -v /run:/var/socket \
- -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY \
- -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE \
- -e DOCKER_RELEASE=1 -e DEPLOYMENT=$DEPLOYMENT docker-ci/testbuilder docker
-
diff --git a/hack/infrastructure/docker-ci/dockertest/project b/hack/infrastructure/docker-ci/dockertest/project
deleted file mode 100755
index 160f2d5d59..0000000000
--- a/hack/infrastructure/docker-ci/dockertest/project
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/usr/bin/env bash
-set -x
-
-PROJECT_NAME=$(basename $0)
-
-docker run -rm -u sysadmin -e DEPLOYMENT=$DEPLOYMENT -v /run:/var/socket \
- -v /home/docker-ci/coverage/$PROJECT_NAME:/data docker-ci/testbuilder $PROJECT_NAME $1 $2 $3
-
diff --git a/hack/infrastructure/docker-ci/functionaltests/test_index.py b/hack/infrastructure/docker-ci/functionaltests/test_index.py
deleted file mode 100755
index fd002c81e8..0000000000
--- a/hack/infrastructure/docker-ci/functionaltests/test_index.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/python
-
-import os
-username, password = os.environ['DOCKER_CREDS'].split(':')
-
-from selenium import webdriver
-from selenium.webdriver.common.by import By
-from selenium.webdriver.common.keys import Keys
-from selenium.webdriver.support.ui import Select
-from selenium.common.exceptions import NoSuchElementException
-import unittest, time, re
-
-class Docker(unittest.TestCase):
- def setUp(self):
- self.driver = webdriver.PhantomJS()
- self.driver.implicitly_wait(30)
- self.base_url = "http://www.docker.io/"
- self.verificationErrors = []
- self.accept_next_alert = True
-
- def test_docker(self):
- driver = self.driver
- print "Login into {0} as login user {1} ...".format(self.base_url,username)
- driver.get(self.base_url + "/")
- driver.find_element_by_link_text("INDEX").click()
- driver.find_element_by_link_text("login").click()
- driver.find_element_by_id("id_username").send_keys(username)
- driver.find_element_by_id("id_password").send_keys(password)
- print "Checking login user ..."
- driver.find_element_by_css_selector("input[type=\"submit\"]").click()
- try: self.assertEqual("test", driver.find_element_by_css_selector("h3").text)
- except AssertionError as e: self.verificationErrors.append(str(e))
- print "Login user {0} found".format(username)
-
- def is_element_present(self, how, what):
- try: self.driver.find_element(by=how, value=what)
- except NoSuchElementException, e: return False
- return True
-
- def is_alert_present(self):
- try: self.driver.switch_to_alert()
- except NoAlertPresentException, e: return False
- return True
-
- def close_alert_and_get_its_text(self):
- try:
- alert = self.driver.switch_to_alert()
- alert_text = alert.text
- if self.accept_next_alert:
- alert.accept()
- else:
- alert.dismiss()
- return alert_text
- finally: self.accept_next_alert = True
-
- def tearDown(self):
- self.driver.quit()
- self.assertEqual([], self.verificationErrors)
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/hack/infrastructure/docker-ci/functionaltests/test_registry.sh b/hack/infrastructure/docker-ci/functionaltests/test_registry.sh
deleted file mode 100755
index 58642529cc..0000000000
--- a/hack/infrastructure/docker-ci/functionaltests/test_registry.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh
-
-set -x
-
-# Cleanup
-rm -rf docker-registry
-
-# Setup the environment
-export SETTINGS_FLAVOR=test
-export DOCKER_REGISTRY_CONFIG=config_test.yml
-export PYTHONPATH=$(pwd)/docker-registry/test
-
-# Get latest docker registry
-git clone -q https://github.com/dotcloud/docker-registry.git
-cd docker-registry
-sed -Ei "s#(boto_bucket: ).+#\1_env:S3_BUCKET#" config_test.yml
-
-# Get dependencies
-pip install -q -r requirements.txt
-pip install -q -r test-requirements.txt
-pip install -q tox
-
-# Run registry tests
-tox || exit 1
-python -m unittest discover -p s3.py -s test || exit 1
-python -m unittest discover -p workflow.py -s test
-
diff --git a/hack/infrastructure/docker-ci/nginx/nginx.conf b/hack/infrastructure/docker-ci/nginx/nginx.conf
deleted file mode 100644
index 6649741134..0000000000
--- a/hack/infrastructure/docker-ci/nginx/nginx.conf
+++ /dev/null
@@ -1,12 +0,0 @@
-server {
- listen 80;
- root /data/docker-ci;
-
- location / {
- proxy_pass http://localhost:8000/;
- }
-
- location /coverage {
- root /data/docker-ci;
- }
-}
diff --git a/hack/infrastructure/docker-ci/report/Dockerfile b/hack/infrastructure/docker-ci/report/Dockerfile
deleted file mode 100644
index 32600c4c58..0000000000
--- a/hack/infrastructure/docker-ci/report/Dockerfile
+++ /dev/null
@@ -1,28 +0,0 @@
-# VERSION: 0.22
-# DOCKER-VERSION 0.6.3
-# AUTHOR: Daniel Mizyrycki <daniel@dotcloud.com>
-# DESCRIPTION: Generate docker-ci daily report
-# COMMENTS: The build process is initiated by deployment.py
- Report configuration is passed through ./credentials.json at
-# deployment time.
-# TO_BUILD: docker build -t report .
-# TO_DEPLOY: docker run report
-
-from ubuntu:12.04
-maintainer Daniel Mizyrycki <daniel@dotcloud.com>
-
-env PYTHONPATH /report
-
-
-# Add report dependencies
-run echo 'deb http://archive.ubuntu.com/ubuntu precise main universe' > \
- /etc/apt/sources.list
-run apt-get update; apt-get install -y python2.7 python-pip ssh rsync
-
-# Set San Francisco timezone
-run echo "America/Los_Angeles" >/etc/timezone
-run dpkg-reconfigure --frontend noninteractive tzdata
-
-# Add report code and set default container command
-add . /report
-cmd "/report/report.py"
diff --git a/hack/infrastructure/docker-ci/report/deployment.py b/hack/infrastructure/docker-ci/report/deployment.py
deleted file mode 100755
index 5b2eaf3cab..0000000000
--- a/hack/infrastructure/docker-ci/report/deployment.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/env python
-
-'''Deploy docker-ci report container on Digital Ocean.
-Usage:
- export CONFIG_JSON='
- { "DROPLET_NAME": "Digital_Ocean_dropplet_name",
- "DO_CLIENT_ID": "Digital_Ocean_client_id",
- "DO_API_KEY": "Digital_Ocean_api_key",
- "DOCKER_KEY_ID": "Digital_Ocean_ssh_key_id",
- "DOCKER_CI_KEY_PATH": "docker-ci_private_key_path",
- "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)",
- "DOCKER_CI_ADDRESS" "user@docker-ci_fqdn_server",
- "SMTP_USER": "SMTP_server_user",
- "SMTP_PWD": "SMTP_server_password",
- "EMAIL_SENDER": "Buildbot_mailing_sender",
- "EMAIL_RCP": "Buildbot_mailing_receipient" }'
- python deployment.py
-'''
-
-import re, json, requests, base64
-from fabric import api
-from fabric.api import cd, run, put, sudo
-from os import environ as env
-from time import sleep
-from datetime import datetime
-
-# Populate environment variables
-CONFIG = json.loads(env['CONFIG_JSON'])
-for key in CONFIG:
- env[key] = CONFIG[key]
-
-# Load DOCKER_CI_KEY
-env['DOCKER_CI_KEY'] = open(env['DOCKER_CI_KEY_PATH']).read()
-
-DROPLET_NAME = env.get('DROPLET_NAME','report')
-TIMEOUT = 120 # Seconds before timeout droplet creation
-IMAGE_ID = 1004145 # Docker on Ubuntu 13.04
-REGION_ID = 4 # New York 2
-SIZE_ID = 66 # memory 512MB
-DO_IMAGE_USER = 'root' # Image user on Digital Ocean
-API_URL = 'https://api.digitalocean.com/'
-
-
-class digital_ocean():
-
- def __init__(self, key, client):
- '''Set default API parameters'''
- self.key = key
- self.client = client
- self.api_url = API_URL
-
- def api(self, cmd_path, api_arg={}):
- '''Make api call'''
- api_arg.update({'api_key':self.key, 'client_id':self.client})
- resp = requests.get(self.api_url + cmd_path, params=api_arg).text
- resp = json.loads(resp)
- if resp['status'] != 'OK':
- raise Exception(resp['error_message'])
- return resp
-
- def droplet_data(self, name):
- '''Get droplet data'''
- data = self.api('droplets')
- data = [droplet for droplet in data['droplets']
- if droplet['name'] == name]
- return data[0] if data else {}
-
-def json_fmt(data):
- '''Format json output'''
- return json.dumps(data, sort_keys = True, indent = 2)
-
-
-do = digital_ocean(env['DO_API_KEY'], env['DO_CLIENT_ID'])
-
-# Get DROPLET_NAME data
-data = do.droplet_data(DROPLET_NAME)
-
-# Stop processing if DROPLET_NAME exists on Digital Ocean
-if data:
- print ('Droplet: {} already deployed. Not further processing.'
- .format(DROPLET_NAME))
- exit(1)
-
-# Create droplet
-do.api('droplets/new', {'name':DROPLET_NAME, 'region_id':REGION_ID,
- 'image_id':IMAGE_ID, 'size_id':SIZE_ID,
- 'ssh_key_ids':[env['DOCKER_KEY_ID']]})
-
-# Wait for droplet to be created.
-start_time = datetime.now()
-while (data.get('status','') != 'active' and (
- datetime.now()-start_time).seconds < TIMEOUT):
- data = do.droplet_data(DROPLET_NAME)
- print data['status']
- sleep(3)
-
-# Wait for the machine to boot
-sleep(15)
-
-# Get droplet IP
-ip = str(data['ip_address'])
-print 'droplet: {} ip: {}'.format(DROPLET_NAME, ip)
-
-api.env.host_string = ip
-api.env.user = DO_IMAGE_USER
-api.env.key_filename = env['DOCKER_CI_KEY_PATH']
-
-# Correct timezone
-sudo('echo "America/Los_Angeles" >/etc/timezone')
-sudo('dpkg-reconfigure --frontend noninteractive tzdata')
-
-# Load JSON_CONFIG environment for Dockerfile
-CONFIG_JSON= base64.b64encode(
- '{{"DOCKER_CI_PUB": "{DOCKER_CI_PUB}",'
- ' "DOCKER_CI_KEY": "{DOCKER_CI_KEY}",'
- ' "DOCKER_CI_ADDRESS": "{DOCKER_CI_ADDRESS}",'
- ' "SMTP_USER": "{SMTP_USER}",'
- ' "SMTP_PWD": "{SMTP_PWD}",'
- ' "EMAIL_SENDER": "{EMAIL_SENDER}",'
- ' "EMAIL_RCP": "{EMAIL_RCP}"}}'.format(**env))
-
-run('mkdir -p /data/report')
-put('./', '/data/report')
-with cd('/data/report'):
- run('chmod 700 report.py')
- run('echo "{}" > credentials.json'.format(CONFIG_JSON))
- run('docker build -t report .')
- run('rm credentials.json')
- run("echo -e '30 09 * * * /usr/bin/docker run report\n' |"
- " /usr/bin/crontab -")
diff --git a/hack/infrastructure/docker-ci/report/report.py b/hack/infrastructure/docker-ci/report/report.py
deleted file mode 100755
index 7018cabc27..0000000000
--- a/hack/infrastructure/docker-ci/report/report.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/python
-
-'''CONFIG_JSON is a json encoded string base64 environment variable. It is used
-to clone docker-ci database, generate docker-ci report and submit it by email.
-CONFIG_JSON data comes from the file /report/credentials.json inserted in this
-container by deployment.py:
-
-{ "DOCKER_CI_PUB": "$(cat docker-ci_ssh_public_key.pub)",
- "DOCKER_CI_KEY": "$(cat docker-ci_ssh_private_key.key)",
- "DOCKER_CI_ADDRESS": "user@docker-ci_fqdn_server",
- "SMTP_USER": "SMTP_server_user",
- "SMTP_PWD": "SMTP_server_password",
- "EMAIL_SENDER": "Buildbot_mailing_sender",
- "EMAIL_RCP": "Buildbot_mailing_receipient" } '''
-
-import os, re, json, sqlite3, datetime, base64
-import smtplib
-from datetime import timedelta
-from subprocess import call
-from os import environ as env
-
-TODAY = datetime.date.today()
-
-# Load credentials to the environment
-env['CONFIG_JSON'] = base64.b64decode(open('/report/credentials.json').read())
-
-# Remove SSH private key as it needs more processing
-CONFIG = json.loads(re.sub(r'("DOCKER_CI_KEY".+?"(.+?)",)','',
- env['CONFIG_JSON'], flags=re.DOTALL))
-
-# Populate environment variables
-for key in CONFIG:
- env[key] = CONFIG[key]
-
-# Load SSH private key
-env['DOCKER_CI_KEY'] = re.sub('^.+"DOCKER_CI_KEY".+?"(.+?)".+','\\1',
- env['CONFIG_JSON'],flags=re.DOTALL)
-
-# Prevent rsync to validate host on first connection to docker-ci
-os.makedirs('/root/.ssh')
-open('/root/.ssh/id_rsa','w').write(env['DOCKER_CI_KEY'])
-os.chmod('/root/.ssh/id_rsa',0600)
-open('/root/.ssh/config','w').write('StrictHostKeyChecking no\n')
-
-
-# Sync buildbot database from docker-ci
-call('rsync {}:/data/buildbot/master/state.sqlite .'.format(
- env['DOCKER_CI_ADDRESS']), shell=True)
-
-class SQL:
- def __init__(self, database_name):
- sql = sqlite3.connect(database_name)
- # Use column names as keys for fetchall rows
- sql.row_factory = sqlite3.Row
- sql = sql.cursor()
- self.sql = sql
-
- def query(self,query_statement):
- return self.sql.execute(query_statement).fetchall()
-
-sql = SQL("state.sqlite")
-
-
-class Report():
-
- def __init__(self,period='',date=''):
- self.data = []
- self.period = 'date' if not period else period
- self.date = str(TODAY) if not date else date
- self.compute()
-
- def compute(self):
- '''Compute report'''
- if self.period == 'week':
- self.week_report(self.date)
- else:
- self.date_report(self.date)
-
-
- def date_report(self,date):
- '''Create a date test report'''
- builds = []
- # Get a queryset with all builds from date
- rows = sql.query('SELECT * FROM builds JOIN buildrequests'
- ' WHERE builds.brid=buildrequests.id and'
- ' date(start_time, "unixepoch", "localtime") = "{0}"'
- ' GROUP BY number'.format(date))
- build_names = sorted(set([row['buildername'] for row in rows]))
- # Create a report build line for a given build
- for build_name in build_names:
- tried = len([row['buildername']
- for row in rows if row['buildername'] == build_name])
- fail_tests = [row['buildername'] for row in rows if (
- row['buildername'] == build_name and row['results'] != 0)]
- fail = len(fail_tests)
- fail_details = ''
- fail_pct = int(100.0*fail/tried) if tried != 0 else 100
- builds.append({'name': build_name, 'tried': tried, 'fail': fail,
- 'fail_pct': fail_pct, 'fail_details':fail_details})
- if builds:
- self.data.append({'date': date, 'builds': builds})
-
-
- def week_report(self,date):
- '''Add the week's date test reports to report.data'''
- date = datetime.datetime.strptime(date,'%Y-%m-%d').date()
- last_monday = date - datetime.timedelta(days=date.weekday())
- week_dates = [last_monday + timedelta(days=x) for x in range(7,-1,-1)]
- for date in week_dates:
- self.date_report(str(date))
-
- def render_text(self):
- '''Return rendered report in text format'''
- retval = ''
- fail_tests = {}
- for builds in self.data:
- retval += 'Test date: {0}\n'.format(builds['date'],retval)
- table = ''
- for build in builds['builds']:
- table += ('Build {name:15} Tried: {tried:4} '
- ' Failures: {fail:4} ({fail_pct}%)\n'.format(**build))
- if build['name'] in fail_tests:
- fail_tests[build['name']] += build['fail_details']
- else:
- fail_tests[build['name']] = build['fail_details']
- retval += '{0}\n'.format(table)
- retval += '\n Builds failing'
- for fail_name in fail_tests:
- retval += '\n' + fail_name + '\n'
- for (fail_id,fail_url,rn_tests,nr_errors,log_errors,
- tracelog_errors) in fail_tests[fail_name]:
- retval += fail_url + '\n'
- retval += '\n\n'
- return retval
-
-
-# Send email
-smtp_from = env['EMAIL_SENDER']
-subject = '[docker-ci] Daily report for {}'.format(str(TODAY))
-msg = "From: {}\r\nTo: {}\r\nSubject: {}\r\n\r\n".format(
- smtp_from, env['EMAIL_RCP'], subject)
-msg = msg + Report('week').render_text()
-server = smtplib.SMTP_SSL('smtp.mailgun.org')
-server.login(env['SMTP_USER'], env['SMTP_PWD'])
-server.sendmail(smtp_from, env['EMAIL_RCP'], msg)
diff --git a/hack/infrastructure/docker-ci/setup.sh b/hack/infrastructure/docker-ci/setup.sh
deleted file mode 100755
index 65a00f6dd0..0000000000
--- a/hack/infrastructure/docker-ci/setup.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env bash
-
-# Set timezone
-echo "GMT" >/etc/timezone
-dpkg-reconfigure --frontend noninteractive tzdata
-
-# Set ssh superuser
-mkdir -p /data/buildbot /var/run/sshd /run
-useradd -m -d /home/sysadmin -s /bin/bash -G sudo,docker -p '*' sysadmin
-sed -Ei 's/(\%sudo.*) ALL/\1 NOPASSWD:ALL/' /etc/sudoers
-cd /home/sysadmin
-mkdir .ssh
-chmod 700 .ssh
-cat > .ssh/authorized_keys << 'EOF'
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC7ALVhwQ68q1SjrKaAduOuOEAcWmb8kDZf5qA7T1fM8AP07EDC7nSKRJ8PXUBGTOQfxm89coJDuSJsTAZ+1PvglXhA0Mq6+knc6ZrZY+SuZlDIDAk4TOdVPoDZnmR1YW2McxHkhcGIOKeC8MMig5NeEjtgQwXzauUSPqeh8HMlLZRMooFYyyluIpn7NaCLzyWjwAQz2s3KyI7VE7hl+ncCrW86v+dciEdwqtzNoUMFb3iDpPxaiCl3rv+SB7co/5eUDTs1FZvUcYMXKQuf8R+2ZKzXOpwr0Zs8sKQXvXavCeWykwGgXLBjVkvrDcHuDD6UXCW63UKgmRECpLZaMBVIIRWLEEgTS5OSQTcxpMVe5zUW6sDvXHTcdPwWrcn1dE9F/0vLC0HJ4ADKelLX5zyTpmXGbuZuntIf1JO67D/K/P++uV1rmVIH+zgtOf23w5rX2zKb4BSTqP0sv61pmWV7MEVoEz6yXswcTjS92tb775v7XLU9vKAkt042ORFdE4/++hejhL/Lj52IRgjt1CJZHZsR9JywJZrz3kYuf8eU2J2FYh0Cpz5gmf0f+12Rt4HztnZxGPP4KuMa66e4+hpx1jynjMZ7D5QUnNYEmuvJByopn8HSluuY/kS5MMyZCZtJLEPGX4+yECX0Di/S0vCRl2NyqfCBqS+yXXT5SA1nFw== docker-test@docker.io
-EOF
-chmod 600 .ssh/authorized_keys
-chown -R sysadmin .ssh
-
-# Fix docker group id for use of host dockerd by sysadmin
-sed -Ei 's/(docker:x:)[^:]+/\1999/' /etc/group
-
-# Create buildbot configuration
-cd /data/buildbot; buildbot create-master master
-cp -a /data/buildbot/master/master.cfg.sample \
- /data/buildbot/master/master.cfg
-cd /data/buildbot; \
- buildslave create-slave slave localhost:9989 buildworker pass
-cp /docker-ci/buildbot/master.cfg /data/buildbot/master
-
-# Patch github webstatus to capture pull requests
-cp /docker-ci/buildbot/github.py /usr/local/lib/python2.7/dist-packages/buildbot/status/web/hooks
-chown -R sysadmin.sysadmin /data
-
-# Create nginx configuration
-rm /etc/nginx/sites-enabled/default
-cp /docker-ci/nginx/nginx.conf /etc/nginx/conf.d/buildbot.conf
-/bin/echo -e '\ndaemon off;\n' >> /etc/nginx/nginx.conf
-
-# Set supervisord buildbot, nginx and sshd processes
-/bin/echo -e "\
-[program:buildmaster]\n\
-command=twistd --nodaemon --no_save -y buildbot.tac\n\
-directory=/data/buildbot/master\n\
-user=sysadmin\n\n\
-[program:buildworker]\n\
-command=twistd --nodaemon --no_save -y buildbot.tac\n\
-directory=/data/buildbot/slave\n\
-user=sysadmin\n" > \
- /etc/supervisor/conf.d/buildbot.conf
-/bin/echo -e "[program:nginx]\ncommand=/usr/sbin/nginx\n" > \
- /etc/supervisor/conf.d/nginx.conf
-/bin/echo -e "[program:sshd]\ncommand=/usr/sbin/sshd -D\n" > \
- /etc/supervisor/conf.d/sshd.conf
diff --git a/hack/infrastructure/docker-ci/testbuilder/Dockerfile b/hack/infrastructure/docker-ci/testbuilder/Dockerfile
deleted file mode 100644
index a008da6843..0000000000
--- a/hack/infrastructure/docker-ci/testbuilder/Dockerfile
+++ /dev/null
@@ -1,12 +0,0 @@
-# TO_BUILD: docker build -rm -no-cache -t docker-ci/testbuilder .
-# TO_RUN: docker run -rm -u sysadmin \
-# -v /run:/var/socket docker-ci/testbuilder docker-registry
-#
-
-FROM docker-ci/docker-ci
-ENV HOME /home/sysadmin
-
-RUN mkdir /testbuilder
-ADD . /testbuilder
-
-ENTRYPOINT ["/testbuilder/testbuilder.sh"]
diff --git a/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh b/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh
deleted file mode 100755
index 72087462ad..0000000000
--- a/hack/infrastructure/docker-ci/testbuilder/docker-registry.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-set -x
-set -e
-PROJECT_PATH=$1
-
-# Build the docker project
-cd /data/$PROJECT_PATH
-sg docker -c "docker build -q -rm -t registry ."
-cd test; sg docker -c "docker build -q -rm -t docker-registry-test ."
-
-# Run the tests
-sg docker -c "docker run -rm -v /home/docker-ci/coverage/docker-registry:/data docker-registry-test"
diff --git a/hack/infrastructure/docker-ci/testbuilder/docker.sh b/hack/infrastructure/docker-ci/testbuilder/docker.sh
deleted file mode 100755
index b365dd7eaf..0000000000
--- a/hack/infrastructure/docker-ci/testbuilder/docker.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env bash
-set -x
-set -e
-PROJECT_PATH=$1
-
-# Build the docker project
-cd /data/$PROJECT_PATH
-sg docker -c "docker build -q -rm -t docker ."
-
-if [ "$DOCKER_RELEASE" == "1" ]; then
- # Do nightly release
- echo sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY= -e AWS_SECRET_KEY= -e GPG_PASSPHRASE= docker hack/release.sh"
- set +x
- sg docker -c "docker run -rm -privileged -v /run:/var/socket -e AWS_S3_BUCKET=$AWS_S3_BUCKET -e AWS_ACCESS_KEY=$AWS_ACCESS_KEY -e AWS_SECRET_KEY=$AWS_SECRET_KEY -e GPG_PASSPHRASE=$GPG_PASSPHRASE docker hack/release.sh"
-else
- # Run the tests
- sg docker -c "docker run -rm -privileged -v /home/docker-ci/coverage/docker:/data docker ./hack/infrastructure/docker-ci/docker-coverage/gocoverage.sh"
-fi
diff --git a/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh b/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh
deleted file mode 100755
index 70701343c2..0000000000
--- a/hack/infrastructure/docker-ci/testbuilder/testbuilder.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env bash
-# Download, build and run a docker project tests
-# Environment variables: DEPLOYMENT
-
-cat $0
-set -e
-set -x
-
-PROJECT=$1
-COMMIT=${2-HEAD}
-REPO=${3-https://github.com/dotcloud/$PROJECT}
-BRANCH=${4-master}
-REPO_PROJ="https://github.com/docker-test/$PROJECT"
-if [ "$DEPLOYMENT" == "production" ]; then
- REPO_PROJ="https://github.com/dotcloud/$PROJECT"
-fi
-set +x
-
-# Generate a random string of $1 characters
-function random {
- cat /dev/urandom | tr -cd 'a-f0-9' | head -c $1
-}
-
-PROJECT_PATH="$PROJECT-tmp-$(random 12)"
-
-# Set docker-test git user
-set -x
-git config --global user.email "docker-test@docker.io"
-git config --global user.name "docker-test"
-
-# Fetch project
-git clone -q $REPO_PROJ -b master /data/$PROJECT_PATH
-cd /data/$PROJECT_PATH
-echo "Git commit: $(git rev-parse HEAD)"
-git fetch -q $REPO $BRANCH
-git merge --no-edit $COMMIT
-
-# Build the project dockertest
-/testbuilder/$PROJECT.sh $PROJECT_PATH
-rm -rf /data/$PROJECT_PATH
diff --git a/hack/infrastructure/docker-ci/tool/backup.py b/hack/infrastructure/docker-ci/tool/backup.py
deleted file mode 100755
index 2db633e526..0000000000
--- a/hack/infrastructure/docker-ci/tool/backup.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-
-import os,sys,json
-from datetime import datetime
-from filecmp import cmp
-from subprocess import check_call
-from boto.s3.key import Key
-from boto.s3.connection import S3Connection
-
-def ENV(x):
- '''Promote an environment variable for global use returning its value'''
- retval = os.environ.get(x, '')
- globals()[x] = retval
- return retval
-
-ROOT_PATH = '/data/backup/docker-ci'
-TODAY = str(datetime.today())[:10]
-BACKUP_FILE = '{}/docker-ci_{}.tgz'.format(ROOT_PATH, TODAY)
-BACKUP_LINK = '{}/docker-ci.tgz'.format(ROOT_PATH)
-ENV('BACKUP_BUCKET')
-ENV('BACKUP_AWS_ID')
-ENV('BACKUP_AWS_SECRET')
-
-'''Create full master buildbot backup, avoiding duplicates'''
-# Ensure backup path exist
-if not os.path.exists(ROOT_PATH):
- os.makedirs(ROOT_PATH)
-# Make actual backups
-check_call('/bin/tar czf {} -C /data --exclude=backup --exclude=buildbot/slave'
- ' . 1>/dev/null 2>&1'.format(BACKUP_FILE),shell=True)
-# remove previous dump if it is the same as the latest
-if (os.path.exists(BACKUP_LINK) and cmp(BACKUP_FILE, BACKUP_LINK) and
- os.path._resolve_link(BACKUP_LINK) != BACKUP_FILE):
- os.unlink(os.path._resolve_link(BACKUP_LINK))
-# Recreate backup link pointing to latest backup
-try:
- os.unlink(BACKUP_LINK)
-except:
- pass
-os.symlink(BACKUP_FILE, BACKUP_LINK)
-
-# Make backup on S3
-bucket = S3Connection(BACKUP_AWS_ID,BACKUP_AWS_SECRET).get_bucket(BACKUP_BUCKET)
-k = Key(bucket)
-k.key = BACKUP_FILE
-k.set_contents_from_filename(BACKUP_FILE)
-bucket.copy_key(os.path.basename(BACKUP_LINK),BACKUP_BUCKET,BACKUP_FILE[1:])
diff --git a/hack/install.sh b/hack/install.sh
index 65e34f9659..43248cf2c0 100755
--- a/hack/install.sh
+++ b/hack/install.sh
@@ -72,11 +72,38 @@ fi
if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then
lsb_dist='Debian'
fi
+if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then
+ lsb_dist='Fedora'
+fi
case "$lsb_dist" in
+ Fedora)
+ (
+ set -x
+ $sh_c 'sleep 3; yum -y -q install docker-io'
+ )
+ if command_exists docker && [ -e /var/run/docker.sock ]; then
+ (
+ set -x
+ $sh_c 'docker run busybox echo "Docker has been successfully installed!"'
+ ) || true
+ fi
+ your_user=your-user
+ [ "$user" != 'root' ] && your_user="$user"
+ echo
+ echo 'If you would like to use Docker as a non-root user, you should now consider'
+ echo 'adding your user to the "docker" group with something like:'
+ echo
+ echo ' sudo usermod -aG docker' $your_user
+ echo
+ echo 'Remember that you will have to log out and back in for this to take effect!'
+ echo
+ exit 0
+ ;;
+
Ubuntu|Debian)
export DEBIAN_FRONTEND=noninteractive
-
+
did_apt_get_update=
apt_get_update() {
if [ -z "$did_apt_get_update" ]; then
@@ -84,21 +111,21 @@ case "$lsb_dist" in
did_apt_get_update=1
fi
}
-
- # TODO remove this section once device-mapper lands
+
+ # aufs is preferred over devicemapper; try to ensure the driver is available.
if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
kern_extras="linux-image-extra-$(uname -r)"
-
+
apt_get_update
( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true
-
+
if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)'
echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!'
( set -x; sleep 10 )
fi
fi
-
+
if [ ! -e /usr/lib/apt/methods/https ]; then
apt_get_update
( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https' )
@@ -111,9 +138,9 @@ case "$lsb_dist" in
(
set -x
if [ "https://get.docker.io/" = "$url" ]; then
- $sh_c "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9"
+ $sh_c "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9"
elif [ "https://test.docker.io/" = "$url" ]; then
- $sh_c "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6"
+ $sh_c "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6"
else
$sh_c "$curl ${url}gpg | apt-key add -"
fi
@@ -138,7 +165,7 @@ case "$lsb_dist" in
echo
exit 0
;;
-
+
Gentoo)
if [ "$url" = "https://test.docker.io/" ]; then
echo >&2
@@ -153,7 +180,7 @@ case "$lsb_dist" in
echo >&2
exit 1
fi
-
+
(
set -x
$sh_c 'sleep 3; emerge app-emulation/docker'
diff --git a/hack/make.sh b/hack/make.sh
index 63edca4d4c..e81271370d 100755
--- a/hack/make.sh
+++ b/hack/make.sh
@@ -43,6 +43,7 @@ DEFAULT_BUNDLES=(
binary
test
test-integration
+ test-integration-cli
dynbinary
dyntest
dyntest-integration
@@ -89,7 +90,7 @@ LDFLAGS='
'
LDFLAGS_STATIC='-linkmode external'
EXTLDFLAGS_STATIC='-static'
-BUILDFLAGS=( -a -tags "netgo $DOCKER_BUILDTAGS" )
+BUILDFLAGS=( -a -tags "netgo static_build $DOCKER_BUILDTAGS" )
# A few more flags that are specific just to building a completely-static binary (see hack/make/binary)
# PLEASE do not use these anywhere else.
@@ -125,7 +126,7 @@ go_test_dir() {
testcover=( -cover -coverprofile "$coverprofile" $coverpkg )
fi
(
- set -x
+ echo '+ go test' $TESTFLAGS "github.com/dotcloud/docker${dir#.}"
cd "$dir"
go test ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS
)
@@ -136,11 +137,41 @@ go_test_dir() {
# output, one per line.
find_dirs() {
find -not \( \
- \( -wholename './vendor' -o -wholename './integration' -o -wholename './contrib' -o -wholename './pkg/mflag/example' \) \
+ \( \
+ -wholename './vendor' \
+ -o -wholename './integration' \
+ -o -wholename './integration-cli' \
+ -o -wholename './contrib' \
+ -o -wholename './pkg/mflag/example' \
+ -o -wholename './.git' \
+ -o -wholename './bundles' \
+ -o -wholename './docs' \
+ \) \
-prune \
\) -name "$1" -print0 | xargs -0n1 dirname | sort -u
}
+hash_files() {
+ while [ $# -gt 0 ]; do
+ f="$1"
+ shift
+ dir="$(dirname "$f")"
+ base="$(basename "$f")"
+ for hashAlgo in md5 sha256; do
+ if command -v "${hashAlgo}sum" &> /dev/null; then
+ (
+ # subshell and cd so that we get output files like:
+ # $HASH docker-$VERSION
+ # instead of:
+ # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION
+ cd "$dir"
+ "${hashAlgo}sum" "$base" > "$base.$hashAlgo"
+ )
+ fi
+ done
+ done
+}
+
bundle() {
bundlescript=$1
bundle=$(basename $bundlescript)
diff --git a/hack/make/binary b/hack/make/binary
index 7272b1ede0..041e4d1ee8 100644..100755
--- a/hack/make/binary
+++ b/hack/make/binary
@@ -3,7 +3,7 @@
DEST=$1
go build \
- -o $DEST/docker-$VERSION \
+ -o "$DEST/docker-$VERSION" \
"${BUILDFLAGS[@]}" \
-ldflags "
$LDFLAGS
@@ -11,3 +11,6 @@ go build \
" \
./docker
echo "Created binary: $DEST/docker-$VERSION"
+ln -sf "docker-$VERSION" "$DEST/docker"
+
+hash_files "$DEST/docker-$VERSION"
diff --git a/hack/make/dynbinary b/hack/make/dynbinary
index d4f583fb62..75cffe3dcc 100644
--- a/hack/make/dynbinary
+++ b/hack/make/dynbinary
@@ -5,7 +5,7 @@ DEST=$1
if [ -z "$DOCKER_CLIENTONLY" ]; then
# dockerinit still needs to be a static binary, even if docker is dynamic
go build \
- -o $DEST/dockerinit-$VERSION \
+ -o "$DEST/dockerinit-$VERSION" \
"${BUILDFLAGS[@]}" \
-ldflags "
$LDFLAGS
@@ -14,7 +14,9 @@ if [ -z "$DOCKER_CLIENTONLY" ]; then
" \
./dockerinit
echo "Created binary: $DEST/dockerinit-$VERSION"
- ln -sf dockerinit-$VERSION $DEST/dockerinit
+ ln -sf "dockerinit-$VERSION" "$DEST/dockerinit"
+
+ hash_files "$DEST/dockerinit-$VERSION"
sha1sum=
if command -v sha1sum &> /dev/null; then
diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli
new file mode 100644
index 0000000000..b0506d261a
--- /dev/null
+++ b/hack/make/test-integration-cli
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+DEST=$1
+
+set -e
+
+DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs}
+DOCKER_EXECDRIVER=${DOCKER_EXECDRIVER:-native}
+
+bundle_test_integration_cli() {
+ go_test_dir ./integration-cli
+}
+
+# subshell so that we can export PATH without breaking other things
+(
+ export PATH="$DEST/../binary:$DEST/../dynbinary:$PATH"
+
+ if ! command -v docker &> /dev/null; then
+ echo >&2 'error: binary or dynbinary must be run before test-integration-cli'
+ false
+ fi
+
+ ( set -x; exec \
+ docker --daemon --debug \
+ --storage-driver "$DOCKER_GRAPHDRIVER" \
+ --exec-driver "$DOCKER_EXECDRIVER" \
+ --pidfile "$DEST/docker.pid" \
+ &> "$DEST/docker.log"
+ ) &
+
+ # pull the busybox image before running the tests
+ sleep 2
+ ( set -x; docker pull busybox )
+
+ bundle_test_integration_cli
+
+ DOCKERD_PID=$(set -x; cat $DEST/docker.pid)
+ ( set -x; kill $DOCKERD_PID )
+ wait $DOCKERD_PID || true
+) 2>&1 | tee $DEST/test.log
diff --git a/hack/make/tgz b/hack/make/tgz
index 5d03306322..120339976b 100644
--- a/hack/make/tgz
+++ b/hack/make/tgz
@@ -23,6 +23,8 @@ for d in "$CROSS/"*/*; do
tar --numeric-owner --owner 0 -C "$DEST/build" -czf "$TGZ" usr
+ hash_files "$TGZ"
+
rm -rf "$DEST/build"
echo "Created tgz: $TGZ"
diff --git a/hack/make/ubuntu b/hack/make/ubuntu
index ebc12f27ec..403a6c7652 100644
--- a/hack/make/ubuntu
+++ b/hack/make/ubuntu
@@ -38,6 +38,14 @@ bundle_ubuntu() {
mkdir -p $DIR/lib/systemd/system
cp contrib/init/systemd/docker.service $DIR/lib/systemd/system/
+ # Include contributed completions
+ mkdir -p $DIR/etc/bash_completion.d
+ cp contrib/completion/bash/docker $DIR/etc/bash_completion.d/
+ mkdir -p $DIR/usr/share/zsh/vendor-completions
+ cp contrib/completion/zsh/_docker $DIR/usr/share/zsh/vendor-completions/
+ mkdir -p $DIR/etc/fish/completions
+ cp contrib/completion/fish/docker.fish $DIR/etc/fish/completions/
+
# Copy the binary
# This will fail if the binary bundle hasn't been built
mkdir -p $DIR/usr/bin
diff --git a/hack/release.sh b/hack/release.sh
index 50913dd395..d77d454e27 100755
--- a/hack/release.sh
+++ b/hack/release.sh
@@ -31,7 +31,7 @@ docker run -e AWS_S3_BUCKET=get-staging.docker.io \
-e AWS_ACCESS_KEY=AKI1234... \
-e AWS_SECRET_KEY=sEs4mE... \
-e GPG_PASSPHRASE=m0resEs4mE... \
- -i -t -privileged \
+ -i -t --privileged \
docker ./hack/release.sh
EOF
exit 1
@@ -53,35 +53,22 @@ RELEASE_BUNDLES=(
)
if [ "$1" != '--release-regardless-of-test-failure' ]; then
- RELEASE_BUNDLES=( test "${RELEASE_BUNDLES[@]}" )
-fi
-
-if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
- echo >&2
- echo >&2 'The build or tests appear to have failed.'
- echo >&2
- echo >&2 'You, as the release maintainer, now have a couple options:'
- echo >&2 '- delay release and fix issues'
- echo >&2 '- delay release and fix issues'
- echo >&2 '- did we mention how important this is? issues need fixing :)'
- echo >&2
- echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
- echo >&2 ' really knows all the hairy problems at hand with the current release'
- echo >&2 ' issues) may bypass this checking by running this script again with the'
- echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
- echo >&2 ' running the test suite, and will only build the binaries and packages. Please'
- echo >&2 ' avoid using this if at all possible.'
- echo >&2
- echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
- echo >&2 ' should be used. If there are release issues, we should always err on the'
- echo >&2 ' side of caution.'
- echo >&2
- exit 1
+ RELEASE_BUNDLES=(
+ test test-integration
+ "${RELEASE_BUNDLES[@]}"
+ test-integration-cli
+ )
fi
VERSION=$(cat VERSION)
BUCKET=$AWS_S3_BUCKET
+# These are the 2 keys we've used to sign the deb's
+# release (get.docker.io)
+# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
+# test (test.docker.io)
+# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
+
setup_s3() {
# Try creating the bucket. Ignore errors (it might already exist).
s3cmd mb s3://$BUCKET 2>/dev/null || true
@@ -114,76 +101,138 @@ s3_url() {
esac
}
+build_all() {
+ if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
+ echo >&2
+ echo >&2 'The build or tests appear to have failed.'
+ echo >&2
+ echo >&2 'You, as the release maintainer, now have a couple options:'
+ echo >&2 '- delay release and fix issues'
+ echo >&2 '- delay release and fix issues'
+ echo >&2 '- did we mention how important this is? issues need fixing :)'
+ echo >&2
+ echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
+ echo >&2 ' really knows all the hairy problems at hand with the current release'
+ echo >&2 ' issues) may bypass this checking by running this script again with the'
+ echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
+ echo >&2 ' running the test suite, and will only build the binaries and packages. Please'
+ echo >&2 ' avoid using this if at all possible.'
+ echo >&2
+ echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
+ echo >&2 ' should be used. If there are release issues, we should always err on the'
+ echo >&2 ' side of caution.'
+ echo >&2
+ exit 1
+ fi
+}
+
+upload_release_build() {
+ src="$1"
+ dst="$2"
+ latest="$3"
+
+ echo
+ echo "Uploading $src"
+ echo " to $dst"
+ echo
+ s3cmd --follow-symlinks --preserve --acl-public put "$src" "$dst"
+ if [ "$latest" ]; then
+ echo
+ echo "Copying to $latest"
+ echo
+ s3cmd --acl-public cp "$dst" "$latest"
+ fi
+
+ # get hash files too (see hash_files() in hack/make.sh)
+ for hashAlgo in md5 sha256; do
+ if [ -e "$src.$hashAlgo" ]; then
+ echo
+ echo "Uploading $src.$hashAlgo"
+ echo " to $dst.$hashAlgo"
+ echo
+ s3cmd --follow-symlinks --preserve --acl-public --mime-type='text/plain' put "$src.$hashAlgo" "$dst.$hashAlgo"
+ if [ "$latest" ]; then
+ echo
+ echo "Copying to $latest.$hashAlgo"
+ echo
+ s3cmd --acl-public cp "$dst.$hashAlgo" "$latest.$hashAlgo"
+ fi
+ fi
+ done
+}
+
release_build() {
GOOS=$1
GOARCH=$2
- BINARY=bundles/$VERSION/cross/$GOOS/$GOARCH/docker-$VERSION
- TGZ=bundles/$VERSION/tgz/$GOOS/$GOARCH/docker-$VERSION.tgz
+ binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
+ tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
+ binary=docker-$VERSION
+ tgz=docker-$VERSION.tgz
+
+ latestBase=
+ if [ -z "$NOLATEST" ]; then
+ latestBase=docker-latest
+ fi
# we need to map our GOOS and GOARCH to uname values
# see https://en.wikipedia.org/wiki/Uname
# ie, GOOS=linux -> "uname -s"=Linux
- S3OS=$GOOS
- case "$S3OS" in
+ s3Os=$GOOS
+ case "$s3Os" in
darwin)
- S3OS=Darwin
+ s3Os=Darwin
;;
freebsd)
- S3OS=FreeBSD
+ s3Os=FreeBSD
;;
linux)
- S3OS=Linux
+ s3Os=Linux
;;
*)
- echo >&2 "error: can't convert $S3OS to an appropriate value for 'uname -s'"
+ echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
exit 1
;;
esac
- S3ARCH=$GOARCH
- case "$S3ARCH" in
+ s3Arch=$GOARCH
+ case "$s3Arch" in
amd64)
- S3ARCH=x86_64
+ s3Arch=x86_64
;;
386)
- S3ARCH=i386
+ s3Arch=i386
;;
arm)
- S3ARCH=armel
+ s3Arch=armel
# someday, we might potentially support mutliple GOARM values, in which case we might get armhf here too
;;
*)
- echo >&2 "error: can't convert $S3ARCH to an appropriate value for 'uname -m'"
+ echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
exit 1
;;
esac
- S3DIR=s3://$BUCKET/builds/$S3OS/$S3ARCH
+ s3Dir=s3://$BUCKET/builds/$s3Os/$s3Arch
+ latest=
+ latestTgz=
+ if [ "$latestBase" ]; then
+ latest="$s3Dir/$latestBase"
+ latestTgz="$s3Dir/$latestBase.tgz"
+ fi
- if [ ! -x "$BINARY" ]; then
- echo >&2 "error: can't find $BINARY - was it compiled properly?"
+ if [ ! -x "$binDir/$binary" ]; then
+ echo >&2 "error: can't find $binDir/$binary - was it compiled properly?"
exit 1
fi
- if [ ! -f "$TGZ" ]; then
- echo >&2 "error: can't find $TGZ - was it packaged properly?"
+ if [ ! -f "$tgzDir/$tgz" ]; then
+ echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
exit 1
fi
- echo "Uploading $BINARY to $S3OS/$S3ARCH/docker-$VERSION"
- s3cmd --follow-symlinks --preserve --acl-public put $BINARY $S3DIR/docker-$VERSION
-
- echo "Uploading $TGZ to $S3OS/$S3ARCH/docker-$VERSION.tgz"
- s3cmd --follow-symlinks --preserve --acl-public put $TGZ $S3DIR/docker-$VERSION.tgz
-
- if [ -z "$NOLATEST" ]; then
- echo "Copying $S3OS/$S3ARCH/docker-$VERSION to $S3OS/$S3ARCH/docker-latest"
- s3cmd --acl-public cp $S3DIR/docker-$VERSION $S3DIR/docker-latest
-
- echo "Copying $S3OS/$S3ARCH/docker-$VERSION.tgz to $S3OS/$S3ARCH/docker-latest.tgz"
- s3cmd --acl-public cp $S3DIR/docker-$VERSION.tgz $S3DIR/docker-latest.tgz
- fi
+ upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
+ upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
}
# Upload the 'ubuntu' bundle to S3:
@@ -194,21 +243,6 @@ release_ubuntu() {
echo >&2 './hack/make.sh must be run before release_ubuntu'
exit 1
}
- # Make sure that we have our keys
- mkdir -p /.gnupg/
- s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true
- gpg --list-keys releasedocker >/dev/null || {
- gpg --gen-key --batch <<EOF
-Key-Type: RSA
-Key-Length: 2048
-Passphrase: $GPG_PASSPHRASE
-Name-Real: Docker Release Tool
-Name-Email: docker@dotcloud.com
-Name-Comment: releasedocker
-Expire-Date: 0
-%commit
-EOF
- }
# Sign our packages
dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \
@@ -243,10 +277,15 @@ EOF
# Upload repo
s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/
cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/index
+# Check that HTTPS transport is available to APT
+if [ ! -e /usr/lib/apt/methods/https ]; then
+ apt-get update
+ apt-get install -y apt-transport-https
+fi
# Add the repository to your APT sources
echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
# Then import the repository key
-apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
+apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
# Install docker
apt-get update ; apt-get install -y lxc-docker
@@ -305,8 +344,28 @@ release_test() {
fi
}
+setup_gpg() {
+ # Make sure that we have our keys
+ mkdir -p /.gnupg/
+ s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true
+ gpg --list-keys releasedocker >/dev/null || {
+ gpg --gen-key --batch <<EOF
+Key-Type: RSA
+Key-Length: 4096
+Passphrase: $GPG_PASSPHRASE
+Name-Real: Docker Release Tool
+Name-Email: docker@dotcloud.com
+Name-Comment: releasedocker
+Expire-Date: 0
+%commit
+EOF
+ }
+}
+
main() {
+ build_all
setup_s3
+ setup_gpg
release_binaries
release_ubuntu
release_index
diff --git a/hack/vendor.sh b/hack/vendor.sh
index 184cb750a5..4200d90867 100755
--- a/hack/vendor.sh
+++ b/hack/vendor.sh
@@ -39,7 +39,7 @@ clone() {
echo done
}
-clone git github.com/kr/pty 3b1f6487b
+clone git github.com/kr/pty 98c7b80083
clone git github.com/gorilla/context 708054d61e5
@@ -58,3 +58,6 @@ mv src/code.google.com/p/go/src/pkg/archive/tar tmp-tar
rm -rf src/code.google.com/p/go
mkdir -p src/code.google.com/p/go/src/pkg/archive
mv tmp-tar src/code.google.com/p/go/src/pkg/archive/tar
+
+clone git github.com/godbus/dbus cb98efbb933d8389ab549a060e880ea3c375d213
+clone git github.com/coreos/go-systemd 4c14ed39b8a643ac44b4f95b5a53c00e94261475
diff --git a/image/graph.go b/image/graph.go
new file mode 100644
index 0000000000..dd0136b00e
--- /dev/null
+++ b/image/graph.go
@@ -0,0 +1,11 @@
+package image
+
+import (
+ "github.com/dotcloud/docker/runtime/graphdriver"
+)
+
+type Graph interface {
+ Get(id string) (*Image, error)
+ ImageRoot(id string) string
+ Driver() graphdriver.Driver
+}
diff --git a/image.go b/image/image.go
index fa5b65787c..33503bad5a 100644
--- a/image.go
+++ b/image/image.go
@@ -1,20 +1,16 @@
-package docker
+package image
import (
- "crypto/rand"
- "encoding/hex"
"encoding/json"
"fmt"
"github.com/dotcloud/docker/archive"
- "github.com/dotcloud/docker/graphdriver"
"github.com/dotcloud/docker/runconfig"
+ "github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/utils"
- "io"
"io/ioutil"
"os"
"path"
"strconv"
- "strings"
"time"
)
@@ -30,8 +26,9 @@ type Image struct {
Config *runconfig.Config `json:"config,omitempty"`
Architecture string `json:"architecture,omitempty"`
OS string `json:"os,omitempty"`
- graph *Graph
Size int64
+
+ graph Graph
}
func LoadImage(root string) (*Image, error) {
@@ -45,7 +42,7 @@ func LoadImage(root string) (*Image, error) {
if err := json.Unmarshal(jsonData, img); err != nil {
return nil, err
}
- if err := ValidateID(img.ID); err != nil {
+ if err := utils.ValidateID(img.ID); err != nil {
return nil, err
}
@@ -72,7 +69,7 @@ func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, ro
var (
size int64
err error
- driver = img.graph.driver
+ driver = img.graph.Driver()
)
if err := os.MkdirAll(layer, 0755); err != nil {
return err
@@ -136,6 +133,10 @@ func StoreImage(img *Image, jsonData []byte, layerData archive.ArchiveReader, ro
return nil
}
+func (img *Image) SetGraph(graph Graph) {
+ img.graph = graph
+}
+
// SaveSize stores the current `size` value of `img` in the directory `root`.
func (img *Image) SaveSize(root string) error {
if err := ioutil.WriteFile(path.Join(root, "layersize"), []byte(strconv.Itoa(int(img.Size))), 0600); err != nil {
@@ -153,7 +154,7 @@ func (img *Image) TarLayer() (arch archive.Archive, err error) {
if img.graph == nil {
return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID)
}
- driver := img.graph.driver
+ driver := img.graph.Driver()
if differ, ok := driver.(graphdriver.Differ); ok {
return differ.Diff(img.ID)
}
@@ -201,33 +202,6 @@ func (img *Image) TarLayer() (arch archive.Archive, err error) {
}), nil
}
-func ValidateID(id string) error {
- if id == "" {
- return fmt.Errorf("Image id can't be empty")
- }
- if strings.Contains(id, ":") {
- return fmt.Errorf("Invalid character in image id: ':'")
- }
- return nil
-}
-
-func GenerateID() string {
- for {
- id := make([]byte, 32)
- if _, err := io.ReadFull(rand.Reader, id); err != nil {
- panic(err) // This shouldn't happen
- }
- value := hex.EncodeToString(id)
- // if we try to parse the truncated for as an int and we don't have
- // an error then the value is all numberic and causes issues when
- // used as a hostname. ref #3869
- if _, err := strconv.Atoi(utils.TruncateID(value)); err == nil {
- continue
- }
- return value
- }
-}
-
// Image includes convenience proxy functions to its graph
// These functions will return an error if the image is not registered
// (ie. if image.graph == nil)
@@ -274,16 +248,16 @@ func (img *Image) root() (string, error) {
if img.graph == nil {
return "", fmt.Errorf("Can't lookup root of unregistered image")
}
- return img.graph.imageRoot(img.ID), nil
+ return img.graph.ImageRoot(img.ID), nil
}
-func (img *Image) getParentsSize(size int64) int64 {
+func (img *Image) GetParentsSize(size int64) int64 {
parentImage, err := img.GetParent()
if err != nil || parentImage == nil {
return size
}
size += parentImage.Size
- return parentImage.getParentsSize(size)
+ return parentImage.GetParentsSize(size)
}
// Depth returns the number of parents for a
diff --git a/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile b/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile
new file mode 100644
index 0000000000..89b66f4f1d
--- /dev/null
+++ b/integration-cli/build_tests/TestBuildSixtySteps/Dockerfile
@@ -0,0 +1,60 @@
+FROM busybox
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
+RUN echo "foo"
diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go
new file mode 100644
index 0000000000..7cd42dc69c
--- /dev/null
+++ b/integration-cli/docker_cli_build_test.go
@@ -0,0 +1,28 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "path/filepath"
+ "testing"
+)
+
+func TestBuildSixtySteps(t *testing.T) {
+ buildDirectory := filepath.Join(workingDirectory, "build_tests", "TestBuildSixtySteps")
+ buildCmd := exec.Command(dockerBinary, "build", "-t", "foobuildsixtysteps", ".")
+ buildCmd.Dir = buildDirectory
+ out, exitCode, err := runCommandWithOutput(buildCmd)
+ errorOut(err, t, fmt.Sprintf("build failed to complete: %v %v", out, err))
+
+ if err != nil || exitCode != 0 {
+ t.Fatal("failed to build the image")
+ }
+
+ deleteImages("foobuildsixtysteps")
+
+ logDone("build - build an image with sixty build steps")
+}
+
+// TODO: TestCaching
+
+// TODO: TestADDCacheInvalidation
diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go
new file mode 100644
index 0000000000..51adaac9df
--- /dev/null
+++ b/integration-cli/docker_cli_commit_test.go
@@ -0,0 +1,34 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "testing"
+)
+
+func TestCommitAfterContainerIsDone(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo")
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, fmt.Sprintf("failed to run container: %v %v", out, err))
+
+ cleanedContainerID := stripTrailingCharacters(out)
+
+ waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID)
+ _, _, err = runCommandWithOutput(waitCmd)
+ errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out))
+
+ commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID)
+ out, _, err = runCommandWithOutput(commitCmd)
+ errorOut(err, t, fmt.Sprintf("failed to commit container to image: %v %v", out, err))
+
+ cleanedImageID := stripTrailingCharacters(out)
+
+ inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID)
+ out, _, err = runCommandWithOutput(inspectCmd)
+ errorOut(err, t, fmt.Sprintf("failed to inspect image: %v %v", out, err))
+
+ deleteContainer(cleanedContainerID)
+ deleteImages(cleanedImageID)
+
+ logDone("commit - echo foo and commit the image")
+}
diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go
new file mode 100644
index 0000000000..478ebd2df1
--- /dev/null
+++ b/integration-cli/docker_cli_diff_test.go
@@ -0,0 +1,91 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+// ensure that an added file shows up in docker diff
+func TestDiffFilenameShownInOutput(t *testing.T) {
+ containerCmd := `echo foo > /root/bar`
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd)
+ cid, _, err := runCommandWithOutput(runCmd)
+ errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err))
+
+ cleanCID := stripTrailingCharacters(cid)
+
+ diffCmd := exec.Command(dockerBinary, "diff", cleanCID)
+ out, _, err := runCommandWithOutput(diffCmd)
+ errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err))
+
+ found := false
+ for _, line := range strings.Split(out, "\n") {
+ if strings.Contains("A /root/bar", line) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("couldn't find the new file in docker diff's output: %v", out)
+ }
+ deleteContainer(cleanCID)
+
+ logDone("diff - check if created file shows up")
+}
+
+// test to ensure GH #3840 doesn't occur any more
+func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) {
+ // this is a list of files which shouldn't show up in `docker diff`
+ dockerinitFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerinit", "/.dockerenv"}
+
+ // we might not run into this problem from the first run, so start a few containers
+ for i := 0; i < 20; i++ {
+ containerCmd := `echo foo > /root/bar`
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd)
+ cid, _, err := runCommandWithOutput(runCmd)
+ errorOut(err, t, fmt.Sprintf("%s", err))
+
+ cleanCID := stripTrailingCharacters(cid)
+
+ diffCmd := exec.Command(dockerBinary, "diff", cleanCID)
+ out, _, err := runCommandWithOutput(diffCmd)
+ errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err))
+
+ deleteContainer(cleanCID)
+
+ for _, filename := range dockerinitFiles {
+ if strings.Contains(out, filename) {
+ t.Errorf("found file which should've been ignored %v in diff output", filename)
+ }
+ }
+ }
+
+ logDone("diff - check if ignored files show up in diff")
+}
+
+func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sleep 0")
+ cid, _, err := runCommandWithOutput(runCmd)
+ errorOut(err, t, fmt.Sprintf("%s", err))
+ cleanCID := stripTrailingCharacters(cid)
+
+ diffCmd := exec.Command(dockerBinary, "diff", cleanCID)
+ out, _, err := runCommandWithOutput(diffCmd)
+ errorOut(err, t, fmt.Sprintf("failed to run diff: %v %v", out, err))
+ deleteContainer(cleanCID)
+
+ expected := map[string]bool{
+ "C /dev": true,
+ "A /dev/full": true, // busybox
+ "C /dev/ptmx": true, // libcontainer
+ "A /dev/kmsg": true, // lxc
+ }
+
+ for _, line := range strings.Split(out, "\n") {
+ if line != "" && !expected[line] {
+ t.Errorf("'%s' is shown in the diff but shouldn't", line)
+ }
+ }
+}
diff --git a/integration-cli/docker_cli_export_import_test.go b/integration-cli/docker_cli_export_import_test.go
new file mode 100644
index 0000000000..2e443cd39e
--- /dev/null
+++ b/integration-cli/docker_cli_export_import_test.go
@@ -0,0 +1,50 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "testing"
+)
+
+// export an image and try to import it into a new one
+func TestExportContainerAndImportImage(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+ out, _, err := runCommandWithOutput(runCmd)
+ if err != nil {
+ t.Fatal("failed to create a container", out, err)
+ }
+
+ cleanedContainerID := stripTrailingCharacters(out)
+
+ inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
+ out, _, err = runCommandWithOutput(inspectCmd)
+ if err != nil {
+ t.Fatalf("output should've been a container id: %s %s ", cleanedContainerID, err)
+ }
+
+ exportCmdTemplate := `%v export %v > /tmp/testexp.tar`
+ exportCmdFinal := fmt.Sprintf(exportCmdTemplate, dockerBinary, cleanedContainerID)
+ exportCmd := exec.Command("bash", "-c", exportCmdFinal)
+ out, _, err = runCommandWithOutput(exportCmd)
+ errorOut(err, t, fmt.Sprintf("failed to export container: %v %v", out, err))
+
+ importCmdFinal := `cat /tmp/testexp.tar | docker import - testexp`
+ importCmd := exec.Command("bash", "-c", importCmdFinal)
+ out, _, err = runCommandWithOutput(importCmd)
+ errorOut(err, t, fmt.Sprintf("failed to import image: %v %v", out, err))
+
+ cleanedImageID := stripTrailingCharacters(out)
+
+ inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID)
+ out, _, err = runCommandWithOutput(inspectCmd)
+ errorOut(err, t, fmt.Sprintf("output should've been an image id: %v %v", out, err))
+
+ deleteContainer(cleanedContainerID)
+ deleteImages("testexp")
+
+ os.Remove("/tmp/testexp.tar")
+
+ logDone("export - export a container")
+ logDone("import - import an image")
+}
diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go
new file mode 100644
index 0000000000..82b70bab40
--- /dev/null
+++ b/integration-cli/docker_cli_images_test.go
@@ -0,0 +1,58 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+func TestImagesEnsureImageIsListed(t *testing.T) {
+ imagesCmd := exec.Command(dockerBinary, "images")
+ out, _, err := runCommandWithOutput(imagesCmd)
+ errorOut(err, t, fmt.Sprintf("listing images failed with errors: %v", err))
+
+ if !strings.Contains(out, "busybox") {
+ t.Fatal("images should've listed busybox")
+ }
+
+ logDone("images - busybox should be listed")
+}
+
+func TestCLIImageTagRemove(t *testing.T) {
+ imagesBefore, _, _ := cmd(t, "images", "-a")
+ cmd(t, "tag", "busybox", "utest:tag1")
+ cmd(t, "tag", "busybox", "utest/docker:tag2")
+ cmd(t, "tag", "busybox", "utest:5000/docker:tag3")
+ {
+ imagesAfter, _, _ := cmd(t, "images", "-a")
+ if nLines(imagesAfter) != nLines(imagesBefore)+3 {
+ t.Fatalf("before: %#s\n\nafter: %#s\n", imagesBefore, imagesAfter)
+ }
+ }
+ cmd(t, "rmi", "utest/docker:tag2")
+ {
+ imagesAfter, _, _ := cmd(t, "images", "-a")
+ if nLines(imagesAfter) != nLines(imagesBefore)+2 {
+ t.Fatalf("before: %#s\n\nafter: %#s\n", imagesBefore, imagesAfter)
+ }
+
+ }
+ cmd(t, "rmi", "utest:5000/docker:tag3")
+ {
+ imagesAfter, _, _ := cmd(t, "images", "-a")
+ if nLines(imagesAfter) != nLines(imagesBefore)+1 {
+ t.Fatalf("before: %#s\n\nafter: %#s\n", imagesBefore, imagesAfter)
+ }
+
+ }
+ cmd(t, "rmi", "utest:tag1")
+ {
+ imagesAfter, _, _ := cmd(t, "images", "-a")
+ if nLines(imagesAfter) != nLines(imagesBefore)+0 {
+ t.Fatalf("before: %#s\n\nafter: %#s\n", imagesBefore, imagesAfter)
+ }
+
+ }
+ logDone("tag,rmi- tagging the same images multiple times then removing tags")
+}
diff --git a/integration-cli/docker_cli_import_test.go b/integration-cli/docker_cli_import_test.go
new file mode 100644
index 0000000000..9b36aa9ce1
--- /dev/null
+++ b/integration-cli/docker_cli_import_test.go
@@ -0,0 +1,20 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+func TestImportDisplay(t *testing.T) {
+ importCmd := exec.Command(dockerBinary, "import", "https://github.com/ewindisch/docker-cirros/raw/master/cirros-0.3.0-x86_64-lxc.tar.gz")
+ out, _, err := runCommandWithOutput(importCmd)
+ errorOut(err, t, fmt.Sprintf("import failed with errors: %v", err))
+
+ if n := len(strings.Split(out, "\n")); n != 3 {
+ t.Fatalf("display is messed up: %d '\\n' instead of 3", n)
+ }
+
+ logDone("import - cirros was imported and display is fine")
+}
diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go
new file mode 100644
index 0000000000..32aa3a2125
--- /dev/null
+++ b/integration-cli/docker_cli_info_test.go
@@ -0,0 +1,29 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+// ensure docker info succeeds
+func TestInfoEnsureSucceeds(t *testing.T) {
+ versionCmd := exec.Command(dockerBinary, "info")
+ out, exitCode, err := runCommandWithOutput(versionCmd)
+ errorOut(err, t, fmt.Sprintf("encountered error while running docker info: %v", err))
+
+ if err != nil || exitCode != 0 {
+ t.Fatal("failed to execute docker info")
+ }
+
+ stringsToCheck := []string{"Containers:", "Execution Driver:", "Kernel Version:"}
+
+ for _, linePrefix := range stringsToCheck {
+ if !strings.Contains(out, linePrefix) {
+ t.Errorf("couldn't find string %v in output", linePrefix)
+ }
+ }
+
+ logDone("info - verify that it works")
+}
diff --git a/integration-cli/docker_cli_kill_test.go b/integration-cli/docker_cli_kill_test.go
new file mode 100644
index 0000000000..b8265d8cfb
--- /dev/null
+++ b/integration-cli/docker_cli_kill_test.go
@@ -0,0 +1,36 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+func TestKillContainer(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 10")
+ out, _, err := runCommandWithOutput(runCmd)
+ errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err))
+
+ cleanedContainerID := stripTrailingCharacters(out)
+
+ inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
+ inspectOut, _, err := runCommandWithOutput(inspectCmd)
+ errorOut(err, t, fmt.Sprintf("out should've been a container id: %v %v", inspectOut, err))
+
+ killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID)
+ out, _, err = runCommandWithOutput(killCmd)
+ errorOut(err, t, fmt.Sprintf("failed to kill container: %v %v", out, err))
+
+ listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q")
+ out, _, err = runCommandWithOutput(listRunningContainersCmd)
+ errorOut(err, t, fmt.Sprintf("failed to list running containers: %v", err))
+
+ if strings.Contains(out, cleanedContainerID) {
+ t.Fatal("killed container is still running")
+ }
+
+ deleteContainer(cleanedContainerID)
+
+ logDone("kill - kill container running sleep 10")
+}
diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go
new file mode 100644
index 0000000000..8fcf4d7333
--- /dev/null
+++ b/integration-cli/docker_cli_logs_test.go
@@ -0,0 +1,76 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "testing"
+)
+
+// This used to work, it test a log of PageSize-1 (gh#4851)
+func TestLogsContainerSmallerThanPage(t *testing.T) {
+ testLen := 32767
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen))
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err))
+
+ cleanedContainerID := stripTrailingCharacters(out)
+ exec.Command(dockerBinary, "wait", cleanedContainerID).Run()
+
+ logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID)
+ out, _, _, err = runCommandWithStdoutStderr(logsCmd)
+ errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err))
+
+ if len(out) != testLen+1 {
+ t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out))
+ }
+
+ deleteContainer(cleanedContainerID)
+
+ logDone("logs - logs container running echo smaller than page size")
+}
+
+// Regression test: When going over the PageSize, it used to panic (gh#4851)
+func TestLogsContainerBiggerThanPage(t *testing.T) {
+ testLen := 32768
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen))
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err))
+
+ cleanedContainerID := stripTrailingCharacters(out)
+ exec.Command(dockerBinary, "wait", cleanedContainerID).Run()
+
+ logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID)
+ out, _, _, err = runCommandWithStdoutStderr(logsCmd)
+ errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err))
+
+ if len(out) != testLen+1 {
+ t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out))
+ }
+
+ deleteContainer(cleanedContainerID)
+
+ logDone("logs - logs container running echo bigger than page size")
+}
+
+// Regression test: When going much over the PageSize, it used to block (gh#4851)
+func TestLogsContainerMuchBiggerThanPage(t *testing.T) {
+ testLen := 33000
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen))
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, fmt.Sprintf("run failed with errors: %v", err))
+
+ cleanedContainerID := stripTrailingCharacters(out)
+ exec.Command(dockerBinary, "wait", cleanedContainerID).Run()
+
+ logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID)
+ out, _, _, err = runCommandWithStdoutStderr(logsCmd)
+ errorOut(err, t, fmt.Sprintf("failed to log container: %v %v", out, err))
+
+ if len(out) != testLen+1 {
+ t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out))
+ }
+
+ deleteContainer(cleanedContainerID)
+
+ logDone("logs - logs container running echo much bigger than page size")
+}
diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go
new file mode 100644
index 0000000000..13b443f3d6
--- /dev/null
+++ b/integration-cli/docker_cli_pull_test.go
@@ -0,0 +1,30 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "testing"
+)
+
+// pulling an image from the central registry should work
+func TestPullImageFromCentralRegistry(t *testing.T) {
+ pullCmd := exec.Command(dockerBinary, "pull", "busybox")
+ out, exitCode, err := runCommandWithOutput(pullCmd)
+ errorOut(err, t, fmt.Sprintf("%s %s", out, err))
+
+ if err != nil || exitCode != 0 {
+ t.Fatal("pulling the busybox image from the registry has failed")
+ }
+ logDone("pull - pull busybox")
+}
+
+// pulling a non-existing image from the central registry should return a non-zero exit code
+func TestPullNonExistingImage(t *testing.T) {
+ pullCmd := exec.Command(dockerBinary, "pull", "fooblahblah1234")
+ _, exitCode, err := runCommandWithOutput(pullCmd)
+
+ if err == nil || exitCode == 0 {
+ t.Fatal("expected non-zero exit status when pulling non-existing image")
+ }
+ logDone("pull - pull fooblahblah1234 (non-existing image)")
+}
diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go
new file mode 100644
index 0000000000..160bb9e286
--- /dev/null
+++ b/integration-cli/docker_cli_push_test.go
@@ -0,0 +1,48 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "testing"
+)
+
+// these tests need a freshly started empty private docker registry
+
+// pulling an image from the central registry should work
+func TestPushBusyboxImage(t *testing.T) {
+ // skip this test until we're able to use a registry
+ t.Skip()
+ // tag the image to upload it tot he private registry
+ repoName := fmt.Sprintf("%v/busybox", privateRegistryURL)
+ tagCmd := exec.Command(dockerBinary, "tag", "busybox", repoName)
+ out, exitCode, err := runCommandWithOutput(tagCmd)
+ errorOut(err, t, fmt.Sprintf("%v %v", out, err))
+
+ if err != nil || exitCode != 0 {
+ t.Fatal("image tagging failed")
+ }
+
+ pushCmd := exec.Command(dockerBinary, "push", repoName)
+ out, exitCode, err = runCommandWithOutput(pushCmd)
+ errorOut(err, t, fmt.Sprintf("%v %v", out, err))
+
+ deleteImages(repoName)
+
+ if err != nil || exitCode != 0 {
+ t.Fatal("pushing the image to the private registry has failed")
+ }
+ logDone("push - push busybox to private registry")
+}
+
+// pushing an image without a prefix should throw an error
+func TestPushUnprefixedRepo(t *testing.T) {
+ // skip this test until we're able to use a registry
+ t.Skip()
+ pushCmd := exec.Command(dockerBinary, "push", "busybox")
+ _, exitCode, err := runCommandWithOutput(pushCmd)
+
+ if err == nil || exitCode == 0 {
+ t.Fatal("pushing an unprefixed repo didn't result in a non-zero exit status")
+ }
+ logDone("push - push unprefixed busybox repo --> must fail")
+}
diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go
new file mode 100644
index 0000000000..b0805dd35c
--- /dev/null
+++ b/integration-cli/docker_cli_run_test.go
@@ -0,0 +1,386 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+// "test123" should be printed by docker run
+func TestDockerRunEchoStdout(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "busybox", "echo", "test123")
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, out)
+
+ if out != "test123\n" {
+ t.Errorf("container should've printed 'test123'")
+ }
+
+ deleteAllContainers()
+
+ logDone("run - echo test123")
+}
+
+// "test" should be printed
+func TestDockerRunEchoStdoutWithMemoryLimit(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-m", "2786432", "busybox", "echo", "test")
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, out)
+
+ if out != "test\n" {
+ t.Errorf("container should've printed 'test'")
+
+ }
+
+ deleteAllContainers()
+
+ logDone("run - echo with memory limit")
+}
+
+// "test" should be printed
+func TestDockerRunEchoStdoutWitCPULimit(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "busybox", "echo", "test")
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, out)
+
+ if out != "test\n" {
+ t.Errorf("container should've printed 'test'")
+ }
+
+ deleteAllContainers()
+
+ logDone("run - echo with CPU limit")
+}
+
+// "test" should be printed
+func TestDockerRunEchoStdoutWithCPUAndMemoryLimit(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "2786432", "busybox", "echo", "test")
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, out)
+
+ if out != "test\n" {
+ t.Errorf("container should've printed 'test'")
+ }
+
+ deleteAllContainers()
+
+ logDone("run - echo with CPU and memory limit")
+}
+
+// "test" should be printed
+func TestDockerRunEchoNamedContainer(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test")
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, out)
+
+ if out != "test\n" {
+ t.Errorf("container should've printed 'test'")
+ }
+
+ if err := deleteContainer("testfoonamedcontainer"); err != nil {
+ t.Errorf("failed to remove the named container: %v", err)
+ }
+
+ deleteAllContainers()
+
+ logDone("run - echo with named container")
+}
+
+// it should be possible to ping Google DNS resolver
+// this will fail when Internet access is unavailable
+func TestDockerRunPingGoogle(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "busybox", "ping", "-c", "1", "8.8.8.8")
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, out)
+
+ errorOut(err, t, "container should've been able to ping 8.8.8.8")
+
+ deleteAllContainers()
+
+ logDone("run - ping 8.8.8.8")
+}
+
+// the exit code should be 0
+// some versions of lxc might make this test fail
+func TestDockerRunExitCodeZero(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "busybox", "true")
+ exitCode, err := runCommand(runCmd)
+ errorOut(err, t, fmt.Sprintf("%s", err))
+
+ if exitCode != 0 {
+ t.Errorf("container should've exited with exit code 0")
+ }
+
+ deleteAllContainers()
+
+ logDone("run - exit with 0")
+}
+
+// the exit code should be 1
+// some versions of lxc might make this test fail
+func TestDockerRunExitCodeOne(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "busybox", "false")
+ exitCode, err := runCommand(runCmd)
+ if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) {
+ t.Fatal(err)
+ }
+ if exitCode != 1 {
+ t.Errorf("container should've exited with exit code 1")
+ }
+
+ deleteAllContainers()
+
+ logDone("run - exit with 1")
+}
+
+// it should be possible to pipe in data via stdin to a process running in a container
+// some versions of lxc might make this test fail
+func TestRunStdinPipe(t *testing.T) {
+ runCmd := exec.Command("bash", "-c", `echo "blahblah" | docker run -i -a stdin busybox cat`)
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, out)
+
+ out = stripTrailingCharacters(out)
+
+ inspectCmd := exec.Command(dockerBinary, "inspect", out)
+ inspectOut, _, err := runCommandWithOutput(inspectCmd)
+ errorOut(err, t, fmt.Sprintf("out should've been a container id: %s %s", out, inspectOut))
+
+ waitCmd := exec.Command(dockerBinary, "wait", out)
+ _, _, err = runCommandWithOutput(waitCmd)
+ errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out))
+
+ logsCmd := exec.Command(dockerBinary, "logs", out)
+ containerLogs, _, err := runCommandWithOutput(logsCmd)
+ errorOut(err, t, fmt.Sprintf("error thrown while trying to get container logs: %s", err))
+
+ containerLogs = stripTrailingCharacters(containerLogs)
+
+ if containerLogs != "blahblah" {
+ t.Errorf("logs didn't print the container's logs %s", containerLogs)
+ }
+
+ rmCmd := exec.Command(dockerBinary, "rm", out)
+ _, _, err = runCommandWithOutput(rmCmd)
+ errorOut(err, t, fmt.Sprintf("rm failed to remove container %s", err))
+
+ deleteAllContainers()
+
+ logDone("run - pipe in with -i -a stdin")
+}
+
+// the container's ID should be printed when starting a container in detached mode
+func TestDockerRunDetachedContainerIDPrinting(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, out)
+
+ out = stripTrailingCharacters(out)
+
+ inspectCmd := exec.Command(dockerBinary, "inspect", out)
+ inspectOut, _, err := runCommandWithOutput(inspectCmd)
+ errorOut(err, t, fmt.Sprintf("out should've been a container id: %s %s", out, inspectOut))
+
+ waitCmd := exec.Command(dockerBinary, "wait", out)
+ _, _, err = runCommandWithOutput(waitCmd)
+ errorOut(err, t, fmt.Sprintf("error thrown while waiting for container: %s", out))
+
+ rmCmd := exec.Command(dockerBinary, "rm", out)
+ rmOut, _, err := runCommandWithOutput(rmCmd)
+ errorOut(err, t, "rm failed to remove container")
+
+ rmOut = stripTrailingCharacters(rmOut)
+ if rmOut != out {
+ t.Errorf("rm didn't print the container ID %s %s", out, rmOut)
+ }
+
+ deleteAllContainers()
+
+ logDone("run - print container ID in detached mode")
+}
+
+// the working directory should be set correctly
+func TestDockerRunWorkingDirectory(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-w", "/root", "busybox", "pwd")
+ out, _, _, err := runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, out)
+
+ out = stripTrailingCharacters(out)
+
+ if out != "/root" {
+ t.Errorf("-w failed to set working directory")
+ }
+
+ runCmd = exec.Command(dockerBinary, "run", "--workdir", "/root", "busybox", "pwd")
+ out, _, _, err = runCommandWithStdoutStderr(runCmd)
+ errorOut(err, t, out)
+
+ out = stripTrailingCharacters(out)
+
+ if out != "/root" {
+ t.Errorf("--workdir failed to set working directory")
+ }
+
+ deleteAllContainers()
+
+ logDone("run - run with working directory set by -w")
+ logDone("run - run with working directory set by --workdir")
+}
+
+// pinging Google's DNS resolver should fail when we disable the networking
+func TestDockerRunWithoutNetworking(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "--networking=false", "busybox", "ping", "-c", "1", "8.8.8.8")
+ out, _, exitCode, err := runCommandWithStdoutStderr(runCmd)
+ if err != nil && exitCode != 1 {
+ t.Fatal(out, err)
+ }
+ if exitCode != 1 {
+ t.Errorf("--networking=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8")
+ }
+
+ runCmd = exec.Command(dockerBinary, "run", "-n=false", "busybox", "ping", "-c", "1", "8.8.8.8")
+ out, _, exitCode, err = runCommandWithStdoutStderr(runCmd)
+ if err != nil && exitCode != 1 {
+ t.Fatal(out, err)
+ }
+ if exitCode != 1 {
+ t.Errorf("-n=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8")
+ }
+
+ deleteAllContainers()
+
+ logDone("run - disable networking with --networking=false")
+ logDone("run - disable networking with -n=false")
+}
+
+// Regression test for #4741
+func TestDockerRunWithVolumesAsFiles(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/etc/hosts:/target-file", "busybox", "true")
+ out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd)
+ if err != nil && exitCode != 0 {
+ t.Fatal("1", out, stderr, err)
+ }
+
+ runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/target-file")
+ out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd)
+ if err != nil && exitCode != 0 {
+ t.Fatal("2", out, stderr, err)
+ }
+ deleteAllContainers()
+
+ logDone("run - regression test for #4741 - volumes from as files")
+}
+
+// Regression test for #4979
+func TestDockerRunWithVolumesFromExited(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file")
+ out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd)
+ if err != nil && exitCode != 0 {
+ t.Fatal("1", out, stderr, err)
+ }
+
+ runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file")
+ out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd)
+ if err != nil && exitCode != 0 {
+ t.Fatal("2", out, stderr, err)
+ }
+ deleteAllContainers()
+
+ logDone("run - regression test for #4979 - volumes-from on exited container")
+}
+
+// Regression test for #4830
+func TestDockerRunWithRelativePath(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-v", "tmp:/other-tmp", "busybox", "true")
+ if _, _, _, err := runCommandWithStdoutStderr(runCmd); err == nil {
+ t.Fatalf("relative path should result in an error")
+ }
+
+ deleteAllContainers()
+
+ logDone("run - volume with relative path")
+}
+
+func TestVolumesMountedAsReadonly(t *testing.T) {
+ cmd := exec.Command(dockerBinary, "run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile")
+ if code, err := runCommand(cmd); err == nil || code == 0 {
+ t.Fatalf("run should fail because volume is ro: exit code %d", code)
+ }
+
+ deleteAllContainers()
+
+ logDone("run - volumes as readonly mount")
+}
+
+func TestVolumesFromInReadonlyMode(t *testing.T) {
+ cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true")
+ if _, err := runCommand(cmd); err != nil {
+ t.Fatal(err)
+ }
+
+ cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:ro", "busybox", "touch", "/test/file")
+ if code, err := runCommand(cmd); err == nil || code == 0 {
+ t.Fatalf("run should fail because volume is ro: exit code %d", code)
+ }
+
+ deleteAllContainers()
+
+ logDone("run - volumes from as readonly mount")
+}
+
+// Regression test for #1201
+func TestVolumesFromInReadWriteMode(t *testing.T) {
+ cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true")
+ if _, err := runCommand(cmd); err != nil {
+ t.Fatal(err)
+ }
+
+ cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "busybox", "touch", "/test/file")
+ if _, err := runCommand(cmd); err != nil {
+ t.Fatal(err)
+ }
+
+ deleteAllContainers()
+
+ logDone("run - volumes from as read write mount")
+}
+
+// Test for #1351
+func TestApplyVolumesFromBeforeVolumes(t *testing.T) {
+ cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "touch", "/test/foo")
+ if _, err := runCommand(cmd); err != nil {
+ t.Fatal(err)
+ }
+
+ cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "-v", "/test", "busybox", "cat", "/test/foo")
+ if _, err := runCommand(cmd); err != nil {
+ t.Fatal(err)
+ }
+
+ deleteAllContainers()
+
+ logDone("run - volumes from mounted first")
+}
+
+func TestMultipleVolumesFrom(t *testing.T) {
+ cmd := exec.Command(dockerBinary, "run", "--name", "parent1", "-v", "/test", "busybox", "touch", "/test/foo")
+ if _, err := runCommand(cmd); err != nil {
+ t.Fatal(err)
+ }
+
+ cmd = exec.Command(dockerBinary, "run", "--name", "parent2", "-v", "/other", "busybox", "touch", "/other/bar")
+ if _, err := runCommand(cmd); err != nil {
+ t.Fatal(err)
+ }
+
+ cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent1", "--volumes-from", "parent2",
+ "busybox", "sh", "-c", "cat /test/foo && cat /other/bar")
+ if _, err := runCommand(cmd); err != nil {
+ t.Fatal(err)
+ }
+
+ deleteAllContainers()
+
+ logDone("run - multiple volumes from")
+}
diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go
new file mode 100644
index 0000000000..d728c7de95
--- /dev/null
+++ b/integration-cli/docker_cli_save_load_test.go
@@ -0,0 +1,52 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "testing"
+)
+
+// save a repo and try to load it
+func TestSaveAndLoadRepo(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+ out, _, err := runCommandWithOutput(runCmd)
+ errorOut(err, t, fmt.Sprintf("failed to create a container: %v %v", out, err))
+
+ cleanedContainerID := stripTrailingCharacters(out)
+
+ repoName := "foobar-save-load-test"
+
+ inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
+ out, _, err = runCommandWithOutput(inspectCmd)
+ errorOut(err, t, fmt.Sprintf("output should've been a container id: %v %v", cleanedContainerID, err))
+
+ commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+ out, _, err = runCommandWithOutput(commitCmd)
+ errorOut(err, t, fmt.Sprintf("failed to commit container: %v %v", out, err))
+
+ saveCmdTemplate := `%v save %v > /tmp/foobar-save-load-test.tar`
+ saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName)
+ saveCmd := exec.Command("bash", "-c", saveCmdFinal)
+ out, _, err = runCommandWithOutput(saveCmd)
+ errorOut(err, t, fmt.Sprintf("failed to save repo: %v %v", out, err))
+
+ deleteImages(repoName)
+
+ loadCmdFinal := `cat /tmp/foobar-save-load-test.tar | docker load`
+ loadCmd := exec.Command("bash", "-c", loadCmdFinal)
+ out, _, err = runCommandWithOutput(loadCmd)
+ errorOut(err, t, fmt.Sprintf("failed to load repo: %v %v", out, err))
+
+ inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+ out, _, err = runCommandWithOutput(inspectCmd)
+ errorOut(err, t, fmt.Sprintf("the repo should exist after loading it: %v %v", out, err))
+
+ deleteContainer(cleanedContainerID)
+ deleteImages(repoName)
+
+ os.Remove("/tmp/foobar-save-load-test.tar")
+
+ logDone("save - save a repo")
+ logDone("load - load a repo")
+}
diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go
new file mode 100644
index 0000000000..050aec51a6
--- /dev/null
+++ b/integration-cli/docker_cli_search_test.go
@@ -0,0 +1,25 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+// search for repos named "registry" on the central registry
+func TestSearchOnCentralRegistry(t *testing.T) {
+ searchCmd := exec.Command(dockerBinary)
+ out, exitCode, err := runCommandWithOutput(searchCmd)
+ errorOut(err, t, fmt.Sprintf("encountered error while searching: %v", err))
+
+ if err != nil || exitCode != 0 {
+ t.Fatal("failed to search on the central registry")
+ }
+
+ if !strings.Contains(out, "registry") {
+ t.Fatal("couldn't find any repository named (or containing) 'registry'")
+ }
+
+ logDone("search - search for repositories named (or containing) 'registry'")
+}
diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go
new file mode 100644
index 0000000000..d75b7db385
--- /dev/null
+++ b/integration-cli/docker_cli_tag_test.go
@@ -0,0 +1,86 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "testing"
+)
+
+// tagging a named image in a new unprefixed repo should work
+func TestTagUnprefixedRepoByName(t *testing.T) {
+ pullCmd := exec.Command(dockerBinary, "pull", "busybox")
+ out, exitCode, err := runCommandWithOutput(pullCmd)
+ errorOut(err, t, fmt.Sprintf("%s %s", out, err))
+
+ if err != nil || exitCode != 0 {
+ t.Fatal("pulling the busybox image from the registry has failed")
+ }
+
+ tagCmd := exec.Command(dockerBinary, "tag", "busybox", "testfoobarbaz")
+ out, _, err = runCommandWithOutput(tagCmd)
+ errorOut(err, t, fmt.Sprintf("%v %v", out, err))
+
+ deleteImages("testfoobarbaz")
+
+ logDone("tag - busybox -> testfoobarbaz")
+}
+
+// tagging an image by ID in a new unprefixed repo should work
+func TestTagUnprefixedRepoByID(t *testing.T) {
+ getIDCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.id}}", "busybox")
+ out, _, err := runCommandWithOutput(getIDCmd)
+ errorOut(err, t, fmt.Sprintf("failed to get the image ID of busybox: %v", err))
+
+ cleanedImageID := stripTrailingCharacters(out)
+ tagCmd := exec.Command(dockerBinary, "tag", cleanedImageID, "testfoobarbaz")
+ out, _, err = runCommandWithOutput(tagCmd)
+ errorOut(err, t, fmt.Sprintf("%s %s", out, err))
+
+ deleteImages("testfoobarbaz")
+
+ logDone("tag - busybox's image ID -> testfoobarbaz")
+}
+
+// ensure we don't allow the use of invalid tags; these tag operations should fail
+func TestTagInvalidUnprefixedRepo(t *testing.T) {
+ // skip this until we start blocking bad tags
+ t.Skip()
+
+ invalidRepos := []string{"-foo", "fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo", "f"}
+
+ for _, repo := range invalidRepos {
+ tagCmd := exec.Command(dockerBinary, "tag", "busybox", repo)
+ _, _, err := runCommandWithOutput(tagCmd)
+ if err == nil {
+ t.Errorf("tag busybox %v should have failed", repo)
+ continue
+ }
+ logMessage := fmt.Sprintf("tag - busybox %v --> must fail", repo)
+ logDone(logMessage)
+ }
+}
+
+// ensure we allow the use of valid tags
+func TestTagValidPrefixedRepo(t *testing.T) {
+ pullCmd := exec.Command(dockerBinary, "pull", "busybox")
+ out, exitCode, err := runCommandWithOutput(pullCmd)
+ errorOut(err, t, fmt.Sprintf("%s %s", out, err))
+
+ if err != nil || exitCode != 0 {
+ t.Fatal("pulling the busybox image from the registry has failed")
+ }
+
+ validRepos := []string{"fooo/bar", "fooaa/test"}
+
+ for _, repo := range validRepos {
+ tagCmd := exec.Command(dockerBinary, "tag", "busybox", repo)
+ _, _, err := runCommandWithOutput(tagCmd)
+ if err != nil {
+ t.Errorf("tag busybox %v should have worked: %s", repo, err)
+ continue
+ }
+ deleteImages(repo)
+ logMessage := fmt.Sprintf("tag - busybox %v", repo)
+ logDone(logMessage)
+ }
+}
diff --git a/integration-cli/docker_cli_top_test.go b/integration-cli/docker_cli_top_test.go
new file mode 100644
index 0000000000..73d590cf06
--- /dev/null
+++ b/integration-cli/docker_cli_top_test.go
@@ -0,0 +1,32 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+func TestTop(t *testing.T) {
+ runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "sleep", "20")
+ out, _, err := runCommandWithOutput(runCmd)
+ errorOut(err, t, fmt.Sprintf("failed to start the container: %v", err))
+
+ cleanedContainerID := stripTrailingCharacters(out)
+
+ topCmd := exec.Command(dockerBinary, "top", cleanedContainerID)
+ out, _, err = runCommandWithOutput(topCmd)
+ errorOut(err, t, fmt.Sprintf("failed to run top: %v %v", out, err))
+
+ killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID)
+ _, err = runCommand(killCmd)
+ errorOut(err, t, fmt.Sprintf("failed to kill container: %v", err))
+
+ deleteContainer(cleanedContainerID)
+
+ if !strings.Contains(out, "sleep 20") {
+ t.Fatal("top should've listed sleep 20 in the process list")
+ }
+
+ logDone("top - sleep process should be listed")
+}
diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go
new file mode 100644
index 0000000000..f18d5bede6
--- /dev/null
+++ b/integration-cli/docker_cli_version_test.go
@@ -0,0 +1,39 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+// ensure docker version works
+func TestVersionEnsureSucceeds(t *testing.T) {
+ versionCmd := exec.Command(dockerBinary, "version")
+ out, exitCode, err := runCommandWithOutput(versionCmd)
+ errorOut(err, t, fmt.Sprintf("encountered error while running docker version: %v", err))
+
+ if err != nil || exitCode != 0 {
+ t.Fatal("failed to execute docker version")
+ }
+
+ stringsToCheck := []string{
+ "Client version:",
+ "Client API version:",
+ "Go version (client):",
+ "Git commit (client):",
+ "Server version:",
+ "Server API version:",
+ "Git commit (server):",
+ "Go version (server):",
+ "Last stable version:",
+ }
+
+ for _, linePrefix := range stringsToCheck {
+ if !strings.Contains(out, linePrefix) {
+ t.Errorf("couldn't find string %v in output", linePrefix)
+ }
+ }
+
+ logDone("version - verify that it works and that the output is properly formatted")
+}
diff --git a/integration-cli/docker_test_vars.go b/integration-cli/docker_test_vars.go
new file mode 100644
index 0000000000..f8bd5c116b
--- /dev/null
+++ b/integration-cli/docker_test_vars.go
@@ -0,0 +1,29 @@
+package main
+
+import (
+ "os"
+)
+
+// the docker binary to use
+var dockerBinary = "docker"
+
+// the private registry image to use for tests involving the registry
+var registryImageName = "registry"
+
+// the private registry to use for tests
+var privateRegistryURL = "127.0.0.1:5000"
+
+var workingDirectory string
+
+func init() {
+ if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" {
+ dockerBinary = dockerBin
+ }
+ if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" {
+ registryImageName = registryImage
+ }
+ if registry := os.Getenv("REGISTRY_URL"); registry != "" {
+ privateRegistryURL = registry
+ }
+ workingDirectory, _ = os.Getwd()
+}
diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go
new file mode 100644
index 0000000000..6da86c9753
--- /dev/null
+++ b/integration-cli/docker_utils.go
@@ -0,0 +1,63 @@
+package main
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+func deleteContainer(container string) error {
+ container = strings.Replace(container, "\n", " ", -1)
+ container = strings.Trim(container, " ")
+ rmArgs := fmt.Sprintf("rm %v", container)
+ rmSplitArgs := strings.Split(rmArgs, " ")
+ rmCmd := exec.Command(dockerBinary, rmSplitArgs...)
+ exitCode, err := runCommand(rmCmd)
+ // set error manually if not set
+ if exitCode != 0 && err == nil {
+ err = fmt.Errorf("failed to remove container: `docker rm` exit is non-zero")
+ }
+
+ return err
+}
+
+func getAllContainers() (string, error) {
+ getContainersCmd := exec.Command(dockerBinary, "ps", "-q", "-a")
+ out, exitCode, err := runCommandWithOutput(getContainersCmd)
+ if exitCode != 0 && err == nil {
+ err = fmt.Errorf("failed to get a list of containers: %v\n", out)
+ }
+
+ return out, err
+}
+
+func deleteAllContainers() error {
+ containers, err := getAllContainers()
+ if err != nil {
+ fmt.Println(containers)
+ return err
+ }
+
+ if err = deleteContainer(containers); err != nil {
+ return err
+ }
+ return nil
+}
+
+func deleteImages(images string) error {
+ rmiCmd := exec.Command(dockerBinary, "rmi", images)
+ exitCode, err := runCommand(rmiCmd)
+ // set error manually if not set
+ if exitCode != 0 && err == nil {
+ err = fmt.Errorf("failed to remove image: `docker rmi` exit is non-zero")
+ }
+
+ return err
+}
+
+func cmd(t *testing.T, args ...string) (string, int, error) {
+ out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...))
+ errorOut(err, t, fmt.Sprintf("'%s' failed with errors: %v (%v)", strings.Join(args, " "), err, out))
+ return out, status, err
+}
diff --git a/integration-cli/utils.go b/integration-cli/utils.go
new file mode 100644
index 0000000000..ae7af52687
--- /dev/null
+++ b/integration-cli/utils.go
@@ -0,0 +1,113 @@
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os/exec"
+ "strings"
+ "syscall"
+ "testing"
+)
+
+func getExitCode(err error) (int, error) {
+ exitCode := 0
+ if exiterr, ok := err.(*exec.ExitError); ok {
+ if procExit := exiterr.Sys().(syscall.WaitStatus); ok {
+ return procExit.ExitStatus(), nil
+ }
+ }
+ return exitCode, fmt.Errorf("failed to get exit code")
+}
+
+func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) {
+ exitCode = 0
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ var exiterr error
+ if exitCode, exiterr = getExitCode(err); exiterr != nil {
+ // TODO: Fix this so we check the error's text.
+ // we've failed to retrieve exit code, so we set it to 127
+ exitCode = 127
+ }
+ }
+ output = string(out)
+ return
+}
+
+func runCommandWithStdoutStderr(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) {
+ exitCode = 0
+ var stderrBuffer bytes.Buffer
+ stderrPipe, err := cmd.StderrPipe()
+ if err != nil {
+ return "", "", -1, err
+ }
+ go io.Copy(&stderrBuffer, stderrPipe)
+ out, err := cmd.Output()
+
+ if err != nil {
+ var exiterr error
+ if exitCode, exiterr = getExitCode(err); exiterr != nil {
+ // TODO: Fix this so we check the error's text.
+ // we've failed to retrieve exit code, so we set it to 127
+ exitCode = 127
+ }
+ }
+ stdout = string(out)
+ stderr = string(stderrBuffer.Bytes())
+ return
+}
+
+func runCommand(cmd *exec.Cmd) (exitCode int, err error) {
+ exitCode = 0
+ err = cmd.Run()
+ if err != nil {
+ var exiterr error
+ if exitCode, exiterr = getExitCode(err); exiterr != nil {
+ // TODO: Fix this so we check the error's text.
+ // we've failed to retrieve exit code, so we set it to 127
+ exitCode = 127
+ }
+ }
+ return
+}
+
+func startCommand(cmd *exec.Cmd) (exitCode int, err error) {
+ exitCode = 0
+ err = cmd.Start()
+ if err != nil {
+ var exiterr error
+ if exitCode, exiterr = getExitCode(err); exiterr != nil {
+ // TODO: Fix this so we check the error's text.
+ // we've failed to retrieve exit code, so we set it to 127
+ exitCode = 127
+ }
+ }
+ return
+}
+
+func logDone(message string) {
+ fmt.Printf("[PASSED]: %s\n", message)
+}
+
+func stripTrailingCharacters(target string) string {
+ target = strings.Trim(target, "\n")
+ target = strings.Trim(target, " ")
+ return target
+}
+
+func errorOut(err error, t *testing.T, message string) {
+ if err != nil {
+ t.Fatal(message)
+ }
+}
+
+func errorOutOnNonNilError(err error, t *testing.T, message string) {
+ if err == nil {
+ t.Fatalf(message)
+ }
+}
+
+func nLines(s string) int {
+ return strings.Count(s, "\n")
+}
diff --git a/integration/api_test.go b/integration/api_test.go
index cb92d89858..26441a2668 100644
--- a/integration/api_test.go
+++ b/integration/api_test.go
@@ -5,13 +5,6 @@ import (
"bytes"
"encoding/json"
"fmt"
- "github.com/dotcloud/docker"
- "github.com/dotcloud/docker/api"
- "github.com/dotcloud/docker/dockerversion"
- "github.com/dotcloud/docker/engine"
- "github.com/dotcloud/docker/runconfig"
- "github.com/dotcloud/docker/utils"
- "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
"io"
"io/ioutil"
"net"
@@ -20,84 +13,16 @@ import (
"strings"
"testing"
"time"
-)
-
-func TestGetVersion(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkRuntimeFromEngine(eng, t).Nuke()
-
- var err error
- r := httptest.NewRecorder()
-
- req, err := http.NewRequest("GET", "/version", nil)
- if err != nil {
- t.Fatal(err)
- }
- // FIXME getting the version should require an actual running Server
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
- t.Fatal(err)
- }
- assertHttpNotError(r, t)
-
- out := engine.NewOutput()
- v, err := out.AddEnv()
- if err != nil {
- t.Fatal(err)
- }
- if _, err := io.Copy(out, r.Body); err != nil {
- t.Fatal(err)
- }
- out.Close()
- expected := dockerversion.VERSION
- if result := v.Get("Version"); result != expected {
- t.Errorf("Expected version %s, %s found", expected, result)
- }
- expected = "application/json"
- if result := r.HeaderMap.Get("Content-Type"); result != expected {
- t.Errorf("Expected Content-Type %s, %s found", expected, result)
- }
-}
-
-func TestGetInfo(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkRuntimeFromEngine(eng, t).Nuke()
- job := eng.Job("images")
- initialImages, err := job.Stdout.AddListTable()
- if err != nil {
- t.Fatal(err)
- }
- if err := job.Run(); err != nil {
- t.Fatal(err)
- }
- req, err := http.NewRequest("GET", "/info", nil)
- if err != nil {
- t.Fatal(err)
- }
- r := httptest.NewRecorder()
-
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
- t.Fatal(err)
- }
- assertHttpNotError(r, t)
-
- out := engine.NewOutput()
- i, err := out.AddEnv()
- if err != nil {
- t.Fatal(err)
- }
- if _, err := io.Copy(out, r.Body); err != nil {
- t.Fatal(err)
- }
- out.Close()
- if images := i.GetInt("Images"); images != initialImages.Len() {
- t.Errorf("Expected images: %d, %d found", initialImages.Len(), images)
- }
- expected := "application/json"
- if result := r.HeaderMap.Get("Content-Type"); result != expected {
- t.Errorf("Expected Content-Type %s, %s found", expected, result)
- }
-}
+ "github.com/dotcloud/docker/api"
+ "github.com/dotcloud/docker/api/server"
+ "github.com/dotcloud/docker/engine"
+ "github.com/dotcloud/docker/image"
+ "github.com/dotcloud/docker/runconfig"
+ "github.com/dotcloud/docker/runtime"
+ "github.com/dotcloud/docker/utils"
+ "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
+)
func TestGetEvents(t *testing.T) {
eng := NewTestEngine(t)
@@ -124,7 +49,7 @@ func TestGetEvents(t *testing.T) {
r := httptest.NewRecorder()
setTimeout(t, "", 500*time.Millisecond, func() {
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -165,7 +90,7 @@ func TestGetImagesJSON(t *testing.T) {
r := httptest.NewRecorder()
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -200,7 +125,7 @@ func TestGetImagesJSON(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil {
t.Fatal(err)
}
assertHttpNotError(r2, t)
@@ -233,7 +158,7 @@ func TestGetImagesJSON(t *testing.T) {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r3, req3); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r3, req3); err != nil {
t.Fatal(err)
}
assertHttpNotError(r3, t)
@@ -258,7 +183,7 @@ func TestGetImagesHistory(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -282,12 +207,12 @@ func TestGetImagesByName(t *testing.T) {
}
r := httptest.NewRecorder()
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
- img := &docker.Image{}
+ img := &image.Image{}
if err := json.Unmarshal(r.Body.Bytes(), img); err != nil {
t.Fatal(err)
}
@@ -326,7 +251,7 @@ func TestGetContainersJSON(t *testing.T) {
}
r := httptest.NewRecorder()
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -362,7 +287,7 @@ func TestGetContainersExport(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -400,7 +325,7 @@ func TestSaveImageAndThenLoad(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusOK {
@@ -414,7 +339,7 @@ func TestSaveImageAndThenLoad(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusOK {
@@ -427,7 +352,7 @@ func TestSaveImageAndThenLoad(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusNotFound {
@@ -440,7 +365,7 @@ func TestSaveImageAndThenLoad(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusOK {
@@ -453,7 +378,7 @@ func TestSaveImageAndThenLoad(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusOK {
@@ -480,7 +405,7 @@ func TestGetContainersChanges(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -547,7 +472,7 @@ func TestGetContainersTop(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -595,11 +520,11 @@ func TestGetContainersByName(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
- outContainer := &docker.Container{}
+ outContainer := &runtime.Container{}
if err := json.Unmarshal(r.Body.Bytes(), outContainer); err != nil {
t.Fatal(err)
}
@@ -630,7 +555,7 @@ func TestPostCommit(t *testing.T) {
}
r := httptest.NewRecorder()
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -666,7 +591,7 @@ func TestPostContainersCreate(t *testing.T) {
}
r := httptest.NewRecorder()
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -715,7 +640,7 @@ func TestPostContainersKill(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -754,7 +679,7 @@ func TestPostContainersRestart(t *testing.T) {
t.Fatal(err)
}
r := httptest.NewRecorder()
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -796,7 +721,7 @@ func TestPostContainersStart(t *testing.T) {
req.Header.Set("Content-Type", "application/json")
r := httptest.NewRecorder()
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -813,7 +738,7 @@ func TestPostContainersStart(t *testing.T) {
}
r = httptest.NewRecorder()
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
// Starting an already started container should return an error
@@ -851,7 +776,7 @@ func TestRunErrorBindMountRootSource(t *testing.T) {
req.Header.Set("Content-Type", "application/json")
r := httptest.NewRecorder()
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusInternalServerError {
@@ -888,7 +813,7 @@ func TestPostContainersStop(t *testing.T) {
t.Fatal(err)
}
r := httptest.NewRecorder()
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -920,7 +845,7 @@ func TestPostContainersWait(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -978,7 +903,7 @@ func TestPostContainersAttach(t *testing.T) {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r.ResponseRecorder, t)
@@ -1056,7 +981,7 @@ func TestPostContainersAttachStderr(t *testing.T) {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r.ResponseRecorder, t)
@@ -1113,7 +1038,7 @@ func TestDeleteContainers(t *testing.T) {
t.Fatal(err)
}
r := httptest.NewRecorder()
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -1132,7 +1057,7 @@ func TestOptionsRoute(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -1151,7 +1076,7 @@ func TestGetEnabledCors(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -1198,7 +1123,7 @@ func TestDeleteImages(t *testing.T) {
}
r := httptest.NewRecorder()
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusConflict {
@@ -1211,7 +1136,7 @@ func TestDeleteImages(t *testing.T) {
}
r2 := httptest.NewRecorder()
- if err := api.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r2, req2); err != nil {
t.Fatal(err)
}
assertHttpNotError(r2, t)
@@ -1263,7 +1188,7 @@ func TestPostContainersCopy(t *testing.T) {
t.Fatal(err)
}
req.Header.Add("Content-Type", "application/json")
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
assertHttpNotError(r, t)
@@ -1311,7 +1236,7 @@ func TestPostContainersCopyWhenContainerNotFound(t *testing.T) {
t.Fatal(err)
}
req.Header.Add("Content-Type", "application/json")
- if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
+ if err := server.ServeRequest(eng, api.APIVERSION, r, req); err != nil {
t.Fatal(err)
}
if r.Code != http.StatusNotFound {
diff --git a/integration/auth_test.go b/integration/auth_test.go
index c5bdabace2..8109bbb130 100644
--- a/integration/auth_test.go
+++ b/integration/auth_test.go
@@ -4,7 +4,7 @@ import (
"crypto/rand"
"encoding/hex"
"fmt"
- "github.com/dotcloud/docker/auth"
+ "github.com/dotcloud/docker/registry"
"os"
"strings"
"testing"
@@ -16,15 +16,16 @@ import (
// - Integration tests should have side-effects limited to the host environment being tested.
func TestLogin(t *testing.T) {
+ t.Skip("FIXME: please remove dependency on external services")
os.Setenv("DOCKER_INDEX_URL", "https://indexstaging-docker.dotcloud.com")
defer os.Setenv("DOCKER_INDEX_URL", "")
- authConfig := &auth.AuthConfig{
+ authConfig := &registry.AuthConfig{
Username: "unittester",
Password: "surlautrerivejetattendrai",
Email: "noise+unittester@docker.com",
ServerAddress: "https://indexstaging-docker.dotcloud.com/v1/",
}
- status, err := auth.Login(authConfig, nil)
+ status, err := registry.Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
@@ -34,6 +35,7 @@ func TestLogin(t *testing.T) {
}
func TestCreateAccount(t *testing.T) {
+ t.Skip("FIXME: please remove dependency on external services")
tokenBuffer := make([]byte, 16)
_, err := rand.Read(tokenBuffer)
if err != nil {
@@ -41,13 +43,13 @@ func TestCreateAccount(t *testing.T) {
}
token := hex.EncodeToString(tokenBuffer)[:12]
username := "ut" + token
- authConfig := &auth.AuthConfig{
+ authConfig := &registry.AuthConfig{
Username: username,
Password: "test42",
Email: fmt.Sprintf("docker-ut+%s@example.com", token),
ServerAddress: "https://indexstaging-docker.dotcloud.com/v1/",
}
- status, err := auth.Login(authConfig, nil)
+ status, err := registry.Login(authConfig, nil)
if err != nil {
t.Fatal(err)
}
@@ -59,7 +61,7 @@ func TestCreateAccount(t *testing.T) {
t.Fatalf("Expected status: \"%s\", found \"%s\" instead.", expectedStatus, status)
}
- status, err = auth.Login(authConfig, nil)
+ status, err = registry.Login(authConfig, nil)
if err == nil {
t.Fatalf("Expected error but found nil instead")
}
diff --git a/integration/buildfile_test.go b/integration/buildfile_test.go
index efab9707ec..bb864a5a12 100644
--- a/integration/buildfile_test.go
+++ b/integration/buildfile_test.go
@@ -2,9 +2,11 @@ package docker
import (
"fmt"
- "github.com/dotcloud/docker"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/engine"
+ "github.com/dotcloud/docker/image"
+ "github.com/dotcloud/docker/nat"
+ "github.com/dotcloud/docker/server"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"net"
@@ -309,6 +311,16 @@ RUN [ "$(cat /testfile)" = 'test!' ]
},
nil,
},
+ {
+ `
+FROM {IMAGE}
+# what \
+RUN mkdir /testing
+RUN touch /testing/other
+`,
+ nil,
+ nil,
+ },
}
// FIXME: test building with 2 successive overlapping ADD commands
@@ -350,7 +362,7 @@ func TestBuild(t *testing.T) {
}
}
-func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) (*docker.Image, error) {
+func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, useCache bool) (*image.Image, error) {
if eng == nil {
eng = NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
@@ -382,7 +394,7 @@ func buildImage(context testContextTemplate, t *testing.T, eng *engine.Engine, u
}
dockerfile := constructDockerfile(context.dockerfile, ip, port)
- buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil)
+ buildfile := server.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, useCache, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil)
id, err := buildfile.Build(context.Archive(dockerfile, t))
if err != nil {
return nil, err
@@ -439,6 +451,25 @@ func TestBuildUser(t *testing.T) {
}
}
+func TestBuildRelativeWorkdir(t *testing.T) {
+ img, err := buildImage(testContextTemplate{`
+ FROM {IMAGE}
+ RUN [ "$PWD" = '/' ]
+ WORKDIR test1
+ RUN [ "$PWD" = '/test1' ]
+ WORKDIR /test2
+ RUN [ "$PWD" = '/test2' ]
+ WORKDIR test3
+ RUN [ "$PWD" = '/test2/test3' ]
+ `, nil, nil}, t, nil, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if img.Config.WorkingDir != "/test2/test3" {
+ t.Fatalf("Expected workdir to be '/test2/test3', received '%s'", img.Config.WorkingDir)
+ }
+}
+
func TestBuildEnv(t *testing.T) {
img, err := buildImage(testContextTemplate{`
from {IMAGE}
@@ -491,7 +522,7 @@ func TestBuildExpose(t *testing.T) {
t.Fatal(err)
}
- if img.Config.PortSpecs[0] != "4243" {
+ if _, exists := img.Config.ExposedPorts[nat.NewPort("tcp", "4243")]; !exists {
t.Fail()
}
}
@@ -593,6 +624,17 @@ func TestBuildImageWithCache(t *testing.T) {
checkCacheBehavior(t, template, true)
}
+func TestBuildExposeWithCache(t *testing.T) {
+ template := testContextTemplate{`
+ from {IMAGE}
+ maintainer dockerio
+ expose 80
+ run echo hello
+ `,
+ nil, nil}
+ checkCacheBehavior(t, template, true)
+}
+
func TestBuildImageWithoutCache(t *testing.T) {
template := testContextTemplate{`
from {IMAGE}
@@ -786,7 +828,7 @@ func TestForbiddenContextPath(t *testing.T) {
}
dockerfile := constructDockerfile(context.dockerfile, ip, port)
- buildfile := docker.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil)
+ buildfile := server.NewBuildFile(srv, ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil)
_, err = buildfile.Build(context.Archive(dockerfile, t))
if err == nil {
@@ -832,7 +874,7 @@ func TestBuildADDFileNotFound(t *testing.T) {
}
dockerfile := constructDockerfile(context.dockerfile, ip, port)
- buildfile := docker.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil)
+ buildfile := server.NewBuildFile(mkServerFromEngine(eng, t), ioutil.Discard, ioutil.Discard, false, true, false, ioutil.Discard, utils.NewStreamFormatter(false), nil, nil)
_, err = buildfile.Build(context.Archive(dockerfile, t))
if err == nil {
@@ -876,7 +918,7 @@ func TestBuildInheritance(t *testing.T) {
}
// from parent
- if img.Config.PortSpecs[0] != "4243" {
+ if _, exists := img.Config.ExposedPorts[nat.NewPort("tcp", "4243")]; !exists {
t.Fail()
}
}
@@ -904,8 +946,8 @@ func TestBuildFails(t *testing.T) {
func TestBuildFailsDockerfileEmpty(t *testing.T) {
_, err := buildImage(testContextTemplate{``, nil, nil}, t, nil, true)
- if err != docker.ErrDockerfileEmpty {
- t.Fatal("Expected: %v, got: %v", docker.ErrDockerfileEmpty, err)
+ if err != server.ErrDockerfileEmpty {
+ t.Fatal("Expected: %v, got: %v", server.ErrDockerfileEmpty, err)
}
}
@@ -966,3 +1008,21 @@ func TestBuildOnBuildForbiddenMaintainerTrigger(t *testing.T) {
t.Fatal("Error should not be nil")
}
}
+
+// gh #2446
+func TestBuildAddToSymlinkDest(t *testing.T) {
+ eng := NewTestEngine(t)
+ defer nuke(mkRuntimeFromEngine(eng, t))
+
+ _, err := buildImage(testContextTemplate{`
+ from {IMAGE}
+ run mkdir /foo
+ run ln -s /foo /bar
+ add foo /bar/
+ run stat /bar/foo
+ `,
+ [][2]string{{"foo", "HEYO"}}, nil}, t, eng, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/integration/commands_test.go b/integration/commands_test.go
index 9f7a41384c..15bb61b49c 100644
--- a/integration/commands_test.go
+++ b/integration/commands_test.go
@@ -3,10 +3,11 @@ package docker
import (
"bufio"
"fmt"
- "github.com/dotcloud/docker"
- "github.com/dotcloud/docker/api"
+ "github.com/dotcloud/docker/api/client"
"github.com/dotcloud/docker/engine"
+ "github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/pkg/term"
+ "github.com/dotcloud/docker/runtime"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@@ -35,7 +36,7 @@ func closeWrap(args ...io.Closer) error {
return nil
}
-func setRaw(t *testing.T, c *docker.Container) *term.State {
+func setRaw(t *testing.T, c *runtime.Container) *term.State {
pty, err := c.GetPtyMaster()
if err != nil {
t.Fatal(err)
@@ -47,7 +48,7 @@ func setRaw(t *testing.T, c *docker.Container) *term.State {
return state
}
-func unsetRaw(t *testing.T, c *docker.Container, state *term.State) {
+func unsetRaw(t *testing.T, c *runtime.Container, state *term.State) {
pty, err := c.GetPtyMaster()
if err != nil {
t.Fatal(err)
@@ -55,8 +56,8 @@ func unsetRaw(t *testing.T, c *docker.Container, state *term.State) {
term.RestoreTerminal(pty.Fd(), state)
}
-func waitContainerStart(t *testing.T, timeout time.Duration) *docker.Container {
- var container *docker.Container
+func waitContainerStart(t *testing.T, timeout time.Duration) *runtime.Container {
+ var container *runtime.Container
setTimeout(t, "Waiting for the container to be started timed out", timeout, func() {
for {
@@ -120,7 +121,7 @@ func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error
func TestRunHostname(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
c := make(chan struct{})
@@ -165,7 +166,7 @@ func TestRunHostname(t *testing.T) {
func TestRunWorkdir(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
c := make(chan struct{})
@@ -210,7 +211,7 @@ func TestRunWorkdir(t *testing.T) {
func TestRunWorkdirExists(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
c := make(chan struct{})
@@ -251,11 +252,30 @@ func TestRunWorkdirExists(t *testing.T) {
}
}
+// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected
+func TestRunWorkdirExistsAndIsFile(t *testing.T) {
+
+ cli := client.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
+ defer cleanup(globalEngine, t)
+
+ c := make(chan struct{})
+ go func() {
+ defer close(c)
+ if err := cli.CmdRun("-w", "/bin/cat", unitTestImageID, "pwd"); err == nil {
+ t.Fatal("should have failed to run when using /bin/cat as working dir.")
+ }
+ }()
+
+ setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
+ <-c
+ })
+}
+
func TestRunExit(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
c1 := make(chan struct{})
@@ -308,7 +328,7 @@ func TestRunDisconnect(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
c1 := make(chan struct{})
@@ -354,7 +374,7 @@ func TestRunDisconnectTty(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
c1 := make(chan struct{})
@@ -406,7 +426,7 @@ func TestRunAttachStdin(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
ch := make(chan struct{})
@@ -470,7 +490,7 @@ func TestRunDetach(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
ch := make(chan struct{})
@@ -517,7 +537,7 @@ func TestAttachDetach(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
ch := make(chan struct{})
@@ -550,7 +570,7 @@ func TestAttachDetach(t *testing.T) {
stdin, stdinPipe = io.Pipe()
stdout, stdoutPipe = io.Pipe()
- cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
ch = make(chan struct{})
go func() {
@@ -598,7 +618,7 @@ func TestAttachDetachTruncatedID(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
// Discard the CmdRun output
@@ -616,7 +636,7 @@ func TestAttachDetachTruncatedID(t *testing.T) {
stdin, stdinPipe = io.Pipe()
stdout, stdoutPipe = io.Pipe()
- cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
ch := make(chan struct{})
go func() {
@@ -663,7 +683,7 @@ func TestAttachDisconnect(t *testing.T) {
stdin, stdinPipe := io.Pipe()
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
go func() {
@@ -732,13 +752,13 @@ func TestAttachDisconnect(t *testing.T) {
func TestRunAutoRemove(t *testing.T) {
t.Skip("Fixme. Skipping test for now, race condition")
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
c := make(chan struct{})
go func() {
defer close(c)
- if err := cli.CmdRun("-rm", unitTestImageID, "hostname"); err != nil {
+ if err := cli.CmdRun("--rm", unitTestImageID, "hostname"); err != nil {
t.Fatal(err)
}
}()
@@ -768,7 +788,7 @@ func TestRunAutoRemove(t *testing.T) {
func TestCmdLogs(t *testing.T) {
t.Skip("Test not impemented")
- cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
if err := cli.CmdRun(unitTestImageID, "sh", "-c", "ls -l"); err != nil {
@@ -786,7 +806,7 @@ func TestCmdLogs(t *testing.T) {
// Expected behaviour: error out when attempting to bind mount non-existing source paths
func TestRunErrorBindNonExistingSource(t *testing.T) {
- cli := api.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(nil, nil, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
c := make(chan struct{})
@@ -806,7 +826,7 @@ func TestRunErrorBindNonExistingSource(t *testing.T) {
func TestImagesViz(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
image := buildTestImages(t, globalEngine)
@@ -856,7 +876,7 @@ func TestImagesViz(t *testing.T) {
func TestImagesTree(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
- cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
image := buildTestImages(t, globalEngine)
@@ -902,7 +922,7 @@ func TestImagesTree(t *testing.T) {
})
}
-func buildTestImages(t *testing.T, eng *engine.Engine) *docker.Image {
+func buildTestImages(t *testing.T, eng *engine.Engine) *image.Image {
var testBuilder = testContextTemplate{
`
@@ -930,7 +950,7 @@ run [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]
// #2098 - Docker cidFiles only contain short version of the containerId
//sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test"
// TestRunCidFile tests that run --cidfile returns the longid
-func TestRunCidFile(t *testing.T) {
+func TestRunCidFileCheckIDLength(t *testing.T) {
stdout, stdoutPipe := io.Pipe()
tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
@@ -939,7 +959,7 @@ func TestRunCidFile(t *testing.T) {
}
tmpCidFile := path.Join(tmpDir, "cid")
- cli := api.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
c := make(chan struct{})
@@ -979,6 +999,35 @@ func TestRunCidFile(t *testing.T) {
}
+// Ensure that CIDFile gets deleted if it's empty
+// Perform this test by making `docker run` fail
+func TestRunCidFileCleanupIfEmpty(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", "TestRunCidFile")
+ if err != nil {
+ t.Fatal(err)
+ }
+ tmpCidFile := path.Join(tmpDir, "cid")
+
+ cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
+ defer cleanup(globalEngine, t)
+
+ c := make(chan struct{})
+ go func() {
+ defer close(c)
+ if err := cli.CmdRun("--cidfile", tmpCidFile, unitTestImageID); err == nil {
+ t.Fatal("running without a command should haveve failed")
+ }
+ if _, err := os.Stat(tmpCidFile); err == nil {
+ t.Fatalf("empty CIDFile '%s' should've been deleted", tmpCidFile)
+ }
+ }()
+ defer os.RemoveAll(tmpDir)
+
+ setTimeout(t, "CmdRun timed out", 5*time.Second, func() {
+ <-c
+ })
+}
+
func TestContainerOrphaning(t *testing.T) {
// setup a temporary directory
@@ -989,7 +1038,7 @@ func TestContainerOrphaning(t *testing.T) {
defer os.RemoveAll(tmpDir)
// setup a CLI and server
- cli := api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
defer cleanup(globalEngine, t)
srv := mkServerFromEngine(globalEngine, t)
@@ -1032,7 +1081,7 @@ func TestContainerOrphaning(t *testing.T) {
// remove the second image by name
resp := engine.NewTable("", 0)
- if err := srv.DeleteImage(imageName, resp, true, false); err == nil {
+ if err := srv.DeleteImage(imageName, resp, true, false, false); err == nil {
t.Fatal("Expected error, got none")
}
@@ -1049,8 +1098,8 @@ func TestCmdKill(t *testing.T) {
var (
stdin, stdinPipe = io.Pipe()
stdout, stdoutPipe = io.Pipe()
- cli = api.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr)
- cli2 = api.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr)
+ cli = client.NewDockerCli(stdin, stdoutPipe, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
+ cli2 = client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto, testDaemonAddr, nil)
)
defer cleanup(globalEngine, t)
@@ -1080,8 +1129,13 @@ func TestCmdKill(t *testing.T) {
})
setTimeout(t, "SIGUSR2 timed out", 2*time.Second, func() {
- for i := 0; i < 10; i++ {
- if err := cli2.CmdKill("--signal=USR2", container.ID); err != nil {
+ for i := 0; i < 20; i++ {
+ sig := "USR2"
+ if i%2 != 0 {
+ // Swap to testing "SIGUSR2" for every odd iteration
+ sig = "SIGUSR2"
+ }
+ if err := cli2.CmdKill("--signal="+sig, container.ID); err != nil {
t.Fatal(err)
}
if err := expectPipe("SIGUSR2", stdout); err != nil {
diff --git a/integration/container_test.go b/integration/container_test.go
index 4efb95a2a1..43f51c1e5f 100644
--- a/integration/container_test.go
+++ b/integration/container_test.go
@@ -350,7 +350,7 @@ func TestStart(t *testing.T) {
if !container.State.IsRunning() {
t.Errorf("Container should be running")
}
- if err := container.Start(); err == nil {
+ if err := container.Start(); err != nil {
t.Fatalf("A running container should be able to be started")
}
@@ -385,7 +385,7 @@ func TestCpuShares(t *testing.T) {
if !container.State.IsRunning() {
t.Errorf("Container should be running")
}
- if err := container.Start(); err == nil {
+ if err := container.Start(); err != nil {
t.Fatalf("A running container should be able to be started")
}
@@ -434,28 +434,6 @@ func TestOutput(t *testing.T) {
}
}
-func TestContainerNetwork(t *testing.T) {
- runtime := mkRuntime(t)
- defer nuke(runtime)
- container, _, err := runtime.Create(
- &runconfig.Config{
- Image: GetTestImage(runtime).ID,
- Cmd: []string{"ping", "-c", "1", "127.0.0.1"},
- },
- "",
- )
- if err != nil {
- t.Fatal(err)
- }
- defer runtime.Destroy(container)
- if err := container.Run(); err != nil {
- t.Fatal(err)
- }
- if code := container.State.GetExitCode(); code != 0 {
- t.Fatalf("Unexpected ping 127.0.0.1 exit code %d (expected 0)", code)
- }
-}
-
func TestKillDifferentUser(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
@@ -1109,7 +1087,7 @@ func TestEntrypointNoCmd(t *testing.T) {
}
}
-func BenchmarkRunSequencial(b *testing.B) {
+func BenchmarkRunSequential(b *testing.B) {
runtime := mkRuntime(b)
defer nuke(runtime)
for i := 0; i < b.N; i++ {
@@ -1295,123 +1273,6 @@ func TestBindMounts(t *testing.T) {
}
}
-// Test that -volumes-from supports both read-only mounts
-func TestFromVolumesInReadonlyMode(t *testing.T) {
- runtime := mkRuntime(t)
- defer nuke(runtime)
- container, _, err := runtime.Create(
- &runconfig.Config{
- Image: GetTestImage(runtime).ID,
- Cmd: []string{"/bin/echo", "-n", "foobar"},
- Volumes: map[string]struct{}{"/test": {}},
- },
- "",
- )
- if err != nil {
- t.Fatal(err)
- }
- defer runtime.Destroy(container)
- _, err = container.Output()
- if err != nil {
- t.Fatal(err)
- }
- if !container.VolumesRW["/test"] {
- t.Fail()
- }
-
- container2, _, err := runtime.Create(
- &runconfig.Config{
- Image: GetTestImage(runtime).ID,
- Cmd: []string{"/bin/echo", "-n", "foobar"},
- VolumesFrom: container.ID + ":ro",
- },
- "",
- )
- if err != nil {
- t.Fatal(err)
- }
- defer runtime.Destroy(container2)
-
- _, err = container2.Output()
- if err != nil {
- t.Fatal(err)
- }
-
- if container.Volumes["/test"] != container2.Volumes["/test"] {
- t.Logf("container volumes do not match: %s | %s ",
- container.Volumes["/test"],
- container2.Volumes["/test"])
- t.Fail()
- }
-
- _, exists := container2.VolumesRW["/test"]
- if !exists {
- t.Logf("container2 is missing '/test' volume: %s", container2.VolumesRW)
- t.Fail()
- }
-
- if container2.VolumesRW["/test"] != false {
- t.Log("'/test' volume mounted in read-write mode, expected read-only")
- t.Fail()
- }
-}
-
-// Test that VolumesRW values are copied to the new container. Regression test for #1201
-func TestVolumesFromReadonlyMount(t *testing.T) {
- runtime := mkRuntime(t)
- defer nuke(runtime)
- container, _, err := runtime.Create(
- &runconfig.Config{
- Image: GetTestImage(runtime).ID,
- Cmd: []string{"/bin/echo", "-n", "foobar"},
- Volumes: map[string]struct{}{"/test": {}},
- },
- "",
- )
- if err != nil {
- t.Fatal(err)
- }
- defer runtime.Destroy(container)
- _, err = container.Output()
- if err != nil {
- t.Fatal(err)
- }
- if !container.VolumesRW["/test"] {
- t.Fail()
- }
-
- container2, _, err := runtime.Create(
- &runconfig.Config{
- Image: GetTestImage(runtime).ID,
- Cmd: []string{"/bin/echo", "-n", "foobar"},
- VolumesFrom: container.ID,
- },
- "",
- )
- if err != nil {
- t.Fatal(err)
- }
- defer runtime.Destroy(container2)
-
- _, err = container2.Output()
- if err != nil {
- t.Fatal(err)
- }
-
- if container.Volumes["/test"] != container2.Volumes["/test"] {
- t.Fail()
- }
-
- actual, exists := container2.VolumesRW["/test"]
- if !exists {
- t.Fail()
- }
-
- if container.VolumesRW["/test"] != actual {
- t.Fail()
- }
-}
-
// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819.
func TestRestartWithVolumes(t *testing.T) {
runtime := mkRuntime(t)
@@ -1456,70 +1317,50 @@ func TestRestartWithVolumes(t *testing.T) {
}
}
-// Test for #1351
-func TestVolumesFromWithVolumes(t *testing.T) {
+func TestContainerNetwork(t *testing.T) {
runtime := mkRuntime(t)
defer nuke(runtime)
-
- container, _, err := runtime.Create(&runconfig.Config{
- Image: GetTestImage(runtime).ID,
- Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
- Volumes: map[string]struct{}{"/test": {}},
- },
+ container, _, err := runtime.Create(
+ &runconfig.Config{
+ Image: GetTestImage(runtime).ID,
+ // If I change this to ping 8.8.8.8 it fails. Any idea why? - timthelion
+ Cmd: []string{"ping", "-c", "1", "127.0.0.1"},
+ },
"",
)
if err != nil {
t.Fatal(err)
}
defer runtime.Destroy(container)
-
- for key := range container.Config.Volumes {
- if key != "/test" {
- t.Fail()
- }
- }
-
- _, err = container.Output()
- if err != nil {
+ if err := container.Run(); err != nil {
t.Fatal(err)
}
-
- expected := container.Volumes["/test"]
- if expected == "" {
- t.Fail()
+ if code := container.State.GetExitCode(); code != 0 {
+ t.Fatalf("Unexpected ping 127.0.0.1 exit code %d (expected 0)", code)
}
+}
- container2, _, err := runtime.Create(
+// Issue #4681
+func TestLoopbackFunctionsWhenNetworkingIsDissabled(t *testing.T) {
+ runtime := mkRuntime(t)
+ defer nuke(runtime)
+ container, _, err := runtime.Create(
&runconfig.Config{
- Image: GetTestImage(runtime).ID,
- Cmd: []string{"cat", "/test/foo"},
- VolumesFrom: container.ID,
- Volumes: map[string]struct{}{"/test": {}},
+ Image: GetTestImage(runtime).ID,
+ Cmd: []string{"ping", "-c", "1", "127.0.0.1"},
+ NetworkDisabled: true,
},
"",
)
if err != nil {
t.Fatal(err)
}
- defer runtime.Destroy(container2)
-
- output, err := container2.Output()
- if err != nil {
+ defer runtime.Destroy(container)
+ if err := container.Run(); err != nil {
t.Fatal(err)
}
-
- if string(output) != "bar" {
- t.Fail()
- }
-
- if container.Volumes["/test"] != container2.Volumes["/test"] {
- t.Fail()
- }
-
- // Ensure it restarts successfully
- _, err = container2.Output()
- if err != nil {
- t.Fatal(err)
+ if code := container.State.GetExitCode(); code != 0 {
+ t.Fatalf("Unexpected ping 127.0.0.1 exit code %d (expected 0)", code)
}
}
@@ -1528,7 +1369,7 @@ func TestOnlyLoopbackExistsWhenUsingDisableNetworkOption(t *testing.T) {
runtime := mkRuntimeFromEngine(eng, t)
defer nuke(runtime)
- config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show"}, nil)
+ config, hc, _, err := runconfig.Parse([]string{"-n=false", GetTestImage(runtime).ID, "ip", "addr", "show", "up"}, nil)
if err != nil {
t.Fatal(err)
}
@@ -1580,7 +1421,7 @@ func TestPrivilegedCanMknod(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
- if output, err := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
+ if output, err := runContainer(eng, runtime, []string{"--privileged", "_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
t.Fatalf("Could not mknod into privileged container %s %v", output, err)
}
}
@@ -1589,21 +1430,21 @@ func TestPrivilegedCanMount(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
- if output, _ := runContainer(eng, runtime, []string{"-privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" {
+ if output, _ := runContainer(eng, runtime, []string{"--privileged", "_", "sh", "-c", "mount -t tmpfs none /tmp && echo ok"}, t); output != "ok\n" {
t.Fatal("Could not mount into privileged container")
}
}
-func TestPrivilegedCannotMknod(t *testing.T) {
+func TestUnprivilegedCanMknod(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
- if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 || echo ok"}, t); output != "ok\n" {
- t.Fatal("Could mknod into secure container")
+ if output, _ := runContainer(eng, runtime, []string{"_", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok"}, t); output != "ok\n" {
+ t.Fatal("Couldn't mknod into secure container")
}
}
-func TestPrivilegedCannotMount(t *testing.T) {
+func TestUnprivilegedCannotMount(t *testing.T) {
eng := NewTestEngine(t)
runtime := mkRuntimeFromEngine(eng, t)
defer runtime.Nuke()
@@ -1611,109 +1452,3 @@ func TestPrivilegedCannotMount(t *testing.T) {
t.Fatal("Could mount into secure container")
}
}
-
-func TestMultipleVolumesFrom(t *testing.T) {
- runtime := mkRuntime(t)
- defer nuke(runtime)
-
- container, _, err := runtime.Create(&runconfig.Config{
- Image: GetTestImage(runtime).ID,
- Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
- Volumes: map[string]struct{}{"/test": {}},
- },
- "",
- )
- if err != nil {
- t.Fatal(err)
- }
- defer runtime.Destroy(container)
-
- for key := range container.Config.Volumes {
- if key != "/test" {
- t.Fail()
- }
- }
-
- _, err = container.Output()
- if err != nil {
- t.Fatal(err)
- }
-
- expected := container.Volumes["/test"]
- if expected == "" {
- t.Fail()
- }
-
- container2, _, err := runtime.Create(
- &runconfig.Config{
- Image: GetTestImage(runtime).ID,
- Cmd: []string{"sh", "-c", "echo -n bar > /other/foo"},
- Volumes: map[string]struct{}{"/other": {}},
- },
- "",
- )
- if err != nil {
- t.Fatal(err)
- }
- defer runtime.Destroy(container2)
-
- for key := range container2.Config.Volumes {
- if key != "/other" {
- t.FailNow()
- }
- }
- if _, err := container2.Output(); err != nil {
- t.Fatal(err)
- }
-
- container3, _, err := runtime.Create(
- &runconfig.Config{
- Image: GetTestImage(runtime).ID,
- Cmd: []string{"/bin/echo", "-n", "foobar"},
- VolumesFrom: strings.Join([]string{container.ID, container2.ID}, ","),
- }, "")
-
- if err != nil {
- t.Fatal(err)
- }
- defer runtime.Destroy(container3)
-
- if _, err := container3.Output(); err != nil {
- t.Fatal(err)
- }
-
- if container3.Volumes["/test"] != container.Volumes["/test"] {
- t.Fail()
- }
- if container3.Volumes["/other"] != container2.Volumes["/other"] {
- t.Fail()
- }
-}
-
-func TestRestartGhost(t *testing.T) {
- runtime := mkRuntime(t)
- defer nuke(runtime)
-
- container, _, err := runtime.Create(
- &runconfig.Config{
- Image: GetTestImage(runtime).ID,
- Cmd: []string{"sh", "-c", "echo -n bar > /test/foo"},
- Volumes: map[string]struct{}{"/test": {}},
- },
- "",
- )
- if err != nil {
- t.Fatal(err)
- }
-
- if err := container.Kill(); err != nil {
- t.Fatal(err)
- }
-
- container.State.SetGhost(true)
-
- _, err = container.Output()
- if err != nil {
- t.Fatal(err)
- }
-}
diff --git a/integration/fixtures/https/ca.pem b/integration/fixtures/https/ca.pem
new file mode 100644
index 0000000000..6825d6d1bd
--- /dev/null
+++ b/integration/fixtures/https/ca.pem
@@ -0,0 +1,23 @@
+-----BEGIN CERTIFICATE-----
+MIID0TCCAzqgAwIBAgIJAP2r7GqEJwSnMA0GCSqGSIb3DQEBBQUAMIGiMQswCQYD
+VQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG
+A1UEChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMI
+Y2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWls
+QGhvc3QuZG9tYWluMB4XDTEzMTIwMzE2NTYzMFoXDTIzMTIwMTE2NTYzMFowgaIx
+CzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2Nv
+MRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYD
+VQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEW
+EG1haWxAaG9zdC5kb21haW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALAn
+0xDw+5y7ZptQacq66pUhRu82JP2WU6IDgo5QUtNU6/CX5PwQATe/OnYTZQFbksxp
+AU9boG0FCkgxfsgPYXEuZxVEGKI2fxfKHOZZI8mrkWmj6eWU/0cvCjGVc9rTITP5
+sNQvg+hORyVDdNp2IdsbMJayiB3AQYMFx3vSDOMTAgMBAAGjggELMIIBBzAdBgNV
+HQ4EFgQUZu7DFz09q0QBa2+ymRm9qgK1NPswgdcGA1UdIwSBzzCBzIAUZu7DFz09
+q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD
+QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x
+ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI
+Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq
+hCcEpzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAF8fJKKM+/oOdnNi
+zEd0M1+PmZOyqvjYQn/2ZR8UHH6Imgc/OPQKZXf0bVE1Txc/DaUNn9Isd1SuCuaE
+ic3vAIYYU7PmgeNN6vwec48V96T7jr+GAi6AVMhQEc2hHCfVtx11Xx+x6aHDZzJt
+Zxtf5lL6KSO9Y+EFwM+rju6hm5hW
+-----END CERTIFICATE-----
diff --git a/integration/fixtures/https/client-cert.pem b/integration/fixtures/https/client-cert.pem
new file mode 100644
index 0000000000..c05ed47c2c
--- /dev/null
+++ b/integration/fixtures/https/client-cert.pem
@@ -0,0 +1,73 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 3 (0x3)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain
+ Validity
+ Not Before: Dec 4 14:17:54 2013 GMT
+ Not After : Dec 2 14:17:54 2023 GMT
+ Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (1024 bit)
+ Modulus:
+ 00:ca:c9:05:d0:09:4e:3e:a4:fc:d5:14:f4:a5:e8:
+ 34:d3:6b:51:e3:f3:62:ea:a1:f0:e8:ed:c4:2a:bc:
+ f0:4f:ca:07:df:e3:88:fa:f4:21:99:35:0e:3d:ea:
+ b0:86:e7:c4:d2:8a:83:2b:42:b8:ec:a3:99:62:70:
+ 81:46:cc:fc:a5:1d:d2:63:e8:eb:07:25:9a:e2:25:
+ 6d:11:56:f2:1a:51:a1:b6:3e:1c:57:32:e9:7b:2c:
+ aa:1b:cc:97:2d:89:2d:b1:c9:5e:35:28:4d:7c:fa:
+ 65:31:3e:f7:70:dd:6e:0b:3c:58:af:a8:2e:24:c0:
+ 7e:4e:78:7d:0a:9e:8f:42:43
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ Easy-RSA Generated Certificate
+ X509v3 Subject Key Identifier:
+ DE:42:EF:2D:98:A3:6C:A8:AA:E0:8C:71:2C:9D:64:23:A9:E2:7E:81
+ X509v3 Authority Key Identifier:
+ keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB
+ DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain
+ serial:FD:AB:EC:6A:84:27:04:A7
+
+ X509v3 Extended Key Usage:
+ TLS Web Client Authentication
+ X509v3 Key Usage:
+ Digital Signature
+ Signature Algorithm: sha1WithRSAEncryption
+ 1c:44:26:ea:e1:66:25:cb:e4:8e:57:1c:f6:b9:17:22:62:40:
+ 12:90:8f:3b:b2:61:7a:54:94:8f:b1:20:0b:bf:a3:51:e3:fa:
+ 1c:a1:be:92:3a:d0:76:44:c0:57:83:ab:6a:e4:1a:45:49:a4:
+ af:39:0d:60:32:fc:3a:be:d7:fb:5d:99:7a:1f:87:e7:d5:ab:
+ 84:a2:5e:90:d8:bf:fa:89:6d:32:26:02:5e:31:35:68:7f:31:
+ f5:6b:51:46:bc:af:70:ed:5a:09:7d:ec:b2:48:4f:fe:c5:2f:
+ 56:04:ad:f6:c1:d2:2a:e4:6a:c4:87:fe:08:35:c5:38:cb:5e:
+ 4a:c4
+-----BEGIN CERTIFICATE-----
+MIIEFTCCA36gAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv
+cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l
+MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv
+bWFpbjAeFw0xMzEyMDQxNDE3NTRaFw0yMzEyMDIxNDE3NTRaMIGgMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE
+ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEPMA0GA1UEAxMGY2xp
+ZW50MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0
+LmRvbWFpbjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAyskF0AlOPqT81RT0
+peg002tR4/Ni6qHw6O3EKrzwT8oH3+OI+vQhmTUOPeqwhufE0oqDK0K47KOZYnCB
+Rsz8pR3SY+jrByWa4iVtEVbyGlGhtj4cVzLpeyyqG8yXLYktscleNShNfPplMT73
+cN1uCzxYr6guJMB+Tnh9Cp6PQkMCAwEAAaOCAVkwggFVMAkGA1UdEwQCMAAwLQYJ
+YIZIAYb4QgENBCAWHkVhc3ktUlNBIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNV
+HQ4EFgQU3kLvLZijbKiq4IxxLJ1kI6nifoEwgdcGA1UdIwSBzzCBzIAUZu7DFz09
+q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJD
+QTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24x
+ETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQDEwhjaGFuZ2VtZTERMA8GA1UEKRMI
+Y2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21haW6CCQD9q+xq
+hCcEpzATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZIhvcN
+AQEFBQADgYEAHEQm6uFmJcvkjlcc9rkXImJAEpCPO7JhelSUj7EgC7+jUeP6HKG+
+kjrQdkTAV4OrauQaRUmkrzkNYDL8Or7X+12Zeh+H59WrhKJekNi/+oltMiYCXjE1
+aH8x9WtRRryvcO1aCX3sskhP/sUvVgSt9sHSKuRqxIf+CDXFOMteSsQ=
+-----END CERTIFICATE-----
diff --git a/integration/fixtures/https/client-key.pem b/integration/fixtures/https/client-key.pem
new file mode 100644
index 0000000000..b5c15f8dc7
--- /dev/null
+++ b/integration/fixtures/https/client-key.pem
@@ -0,0 +1,16 @@
+-----BEGIN PRIVATE KEY-----
+MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAMrJBdAJTj6k/NUU
+9KXoNNNrUePzYuqh8OjtxCq88E/KB9/jiPr0IZk1Dj3qsIbnxNKKgytCuOyjmWJw
+gUbM/KUd0mPo6wclmuIlbRFW8hpRobY+HFcy6XssqhvMly2JLbHJXjUoTXz6ZTE+
+93Ddbgs8WK+oLiTAfk54fQqej0JDAgMBAAECgYBOFEzKp2qbMEexe9ofL2N3rDDh
+xkrl8OijpzkLA6i78BxMFn4dsnZlWUpciMrjhsYAExkiRRSS+QMMJimAq1jzQqc3
+FAQV2XGYwkd0cUn7iZGvfNnEPysjsfyYQM+m+sT0ATj4BZjVShC6kkSjTdm1leLN
+OSvcHdcu3Xxg9ufF0QJBAPYdnNt5sIndt2WECePuRVi+uF4mlxTobFY0fjn26yhC
+4RsnhhD3Vldygo9gvnkwrAZYaALGSPBewes2InxvjA8CQQDS7erKiNXpwoqz5XiU
+SVEsIIVTdWzBjGbIqMOu/hUwM5FK4j6JTBks0aTGMyh0YV9L1EzM0X79J29JahCe
+iQKNAkBKNMOGqTpBV0hko1sYDk96YobUXG5RL4L6uvkUIQ7mJMQam+AgXXL7Ctuy
+v0iu4a38e8tgisiTMP7nHHtpaXihAkAOiN54/lzfMsykANgCP9scE1GcoqbP34Dl
+qttxH4kOPT9xzY1JoLjLYdbc4YGUI3GRpBt2sajygNkmUey7P+2xAkBBsVCZFvTw
+qHvOpPS2kX5ml5xoc/QAHK9N7kR+X7XFYx82RTVSqJEK4lPb+aEWn+CjiIewO4Q5
+ksDFuNxAzbhl
+-----END PRIVATE KEY-----
diff --git a/integration/fixtures/https/client-rogue-cert.pem b/integration/fixtures/https/client-rogue-cert.pem
new file mode 100644
index 0000000000..21ae4bd579
--- /dev/null
+++ b/integration/fixtures/https/client-rogue-cert.pem
@@ -0,0 +1,73 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 2 (0x2)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain
+ Validity
+ Not Before: Feb 24 17:54:59 2014 GMT
+ Not After : Feb 22 17:54:59 2024 GMT
+ Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=client/name=changeme/emailAddress=mail@host.domain
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (1024 bit)
+ Modulus:
+ 00:e8:e2:2c:b8:d4:db:89:50:4f:47:1e:68:db:f7:
+ e4:cc:47:41:63:75:03:37:50:7a:a8:4d:27:36:d5:
+ 15:01:08:b6:cf:56:f7:56:6d:3d:f9:e2:8d:1a:5d:
+ bf:a0:24:5e:07:55:8e:d0:dc:f1:fa:19:87:1d:d6:
+ b6:58:82:2e:ba:69:6d:e9:d9:c8:16:0d:1d:59:7f:
+ f4:8e:58:10:01:3d:21:14:16:3c:ec:cd:8c:b7:0e:
+ e6:7b:77:b4:f9:90:a5:17:01:bb:84:c6:b2:12:87:
+ 70:eb:9f:6d:4f:d0:68:8b:96:c0:e7:0b:51:b4:9d:
+ 1d:7b:6c:7b:be:89:6b:88:8b
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ Easy-RSA Generated Certificate
+ X509v3 Subject Key Identifier:
+ 9E:F8:49:D0:A2:76:30:5C:AB:2B:8A:B5:8D:C6:45:1F:A7:F8:CF:85
+ X509v3 Authority Key Identifier:
+ keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F
+ DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain
+ serial:E7:21:1E:18:41:1B:96:83
+
+ X509v3 Extended Key Usage:
+ TLS Web Client Authentication
+ X509v3 Key Usage:
+ Digital Signature
+ Signature Algorithm: sha1WithRSAEncryption
+ 48:76:c0:18:fa:0a:ee:4e:1a:ec:02:9d:d4:83:ca:94:54:a1:
+ 3f:51:2f:3e:4b:95:c3:42:9b:71:a0:4b:d9:af:47:23:b9:1c:
+ fb:85:ba:76:e2:09:cb:65:bb:d2:7d:44:3d:4b:67:ba:80:83:
+ be:a8:ed:c4:b9:ea:1a:1b:c7:59:3b:d9:5c:0d:46:d8:c9:92:
+ cb:10:c5:f2:1a:38:a4:aa:07:2c:e3:84:16:79:c7:95:09:e3:
+ 01:d2:15:a2:77:0b:8b:bf:94:04:e9:7f:c0:cd:e6:2e:64:cd:
+ 1e:a3:32:ec:11:cc:62:ce:c7:4e:cd:ad:48:5c:b1:b8:e9:76:
+ b3:f9
+-----BEGIN CERTIFICATE-----
+MIIEDTCCA3agAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2
+aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP
+BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu
+MB4XDTE0MDIyNDE3NTQ1OVoXDTI0MDIyMjE3NTQ1OVowgaAxCzAJBgNVBAYTAlVT
+MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG
+b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMQ8wDQYDVQQDEwZjbGllbnQx
+ETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9t
+YWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDo4iy41NuJUE9HHmjb9+TM
+R0FjdQM3UHqoTSc21RUBCLbPVvdWbT354o0aXb+gJF4HVY7Q3PH6GYcd1rZYgi66
+aW3p2cgWDR1Zf/SOWBABPSEUFjzszYy3DuZ7d7T5kKUXAbuExrISh3Drn21P0GiL
+lsDnC1G0nR17bHu+iWuIiwIDAQABo4IBVTCCAVEwCQYDVR0TBAIwADAtBglghkgB
+hvhCAQ0EIBYeRWFzeS1SU0EgR2VuZXJhdGVkIENlcnRpZmljYXRlMB0GA1UdDgQW
+BBSe+EnQonYwXKsrirWNxkUfp/jPhTCB0wYDVR0jBIHLMIHIgBTcpfF2207Nju+x
+I1YdkoCZdDvqb6GBpKSBoTCBnjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRUw
+EwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2aWwgSW5jMREwDwYDVQQL
+EwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAPBgNVBCkTCGNoYW5nZW1l
+MR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWluggkA5yEeGEEbloMwEwYD
+VR0lBAwwCgYIKwYBBQUHAwIwCwYDVR0PBAQDAgeAMA0GCSqGSIb3DQEBBQUAA4GB
+AEh2wBj6Cu5OGuwCndSDypRUoT9RLz5LlcNCm3GgS9mvRyO5HPuFunbiCctlu9J9
+RD1LZ7qAg76o7cS56hobx1k72VwNRtjJkssQxfIaOKSqByzjhBZ5x5UJ4wHSFaJ3
+C4u/lATpf8DN5i5kzR6jMuwRzGLOx07NrUhcsbjpdrP5
+-----END CERTIFICATE-----
diff --git a/integration/fixtures/https/client-rogue-key.pem b/integration/fixtures/https/client-rogue-key.pem
new file mode 100644
index 0000000000..53c122ab70
--- /dev/null
+++ b/integration/fixtures/https/client-rogue-key.pem
@@ -0,0 +1,16 @@
+-----BEGIN PRIVATE KEY-----
+MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAOjiLLjU24lQT0ce
+aNv35MxHQWN1AzdQeqhNJzbVFQEIts9W91ZtPfnijRpdv6AkXgdVjtDc8foZhx3W
+tliCLrppbenZyBYNHVl/9I5YEAE9IRQWPOzNjLcO5nt3tPmQpRcBu4TGshKHcOuf
+bU/QaIuWwOcLUbSdHXtse76Ja4iLAgMBAAECgYADs+TmI2xCKKa6CL++D5jxrohZ
+nnionnz0xBVFh+nHlG3jqgxQsXf0yydXLfpn/2wHTdLxezHVuiYt0UYg7iD0CglW
++IjcgMebzyjLeYqYOE5llPlMvhp2HoEMYJNb+7bRrZ1WCITbu+Su0w1cgA7Cs+Ej
+VlfvGzN+qqnDThRUYQJBAPY0sMWZJKly8QhUmUvmcXdPczzSOf6Mm7gc5LR6wzxd
+vW7syuqk50qjqVqFpN81vCV7GoDxRUWbTM9ftf7JGFkCQQDyJc/1RMygE2o+enU1
+6UBxJyclXITEYtDn8aoEpLNc7RakP1WoPUKjZOnjkcoKcIkFNkSPeCfQujrb5f3F
+MkuDAkByAI/hzzmkpK5rFxEsjfX4Mve/L/DepyjrpaVY1IdWimlO1aJX6CeY7hNa
+8QsYt/74s/nfvtg+lNyKIV1aLq9xAkB+WSSNgfyTeg3x08vc+Xxajmdqoz/TiQwg
+OoTQL3A3iK5LvZBgXLasszcnOycFE3srcQmNItEDpGiZ3QPxJTEpAkEA45EE9NMJ
+SA7EGWSFlbz4f4u4oBeiDiJRJbGGfAyVxZlpCWUjPpg9+swsWoFEOjnGYaChAMk5
+nrOdMf15T6QF7Q==
+-----END PRIVATE KEY-----
diff --git a/integration/fixtures/https/server-cert.pem b/integration/fixtures/https/server-cert.pem
new file mode 100644
index 0000000000..08abfd1a3b
--- /dev/null
+++ b/integration/fixtures/https/server-cert.pem
@@ -0,0 +1,76 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 4 (0x4)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain
+ Validity
+ Not Before: Dec 4 15:01:20 2013 GMT
+ Not After : Dec 2 15:01:20 2023 GMT
+ Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=*/name=changeme/emailAddress=mail@host.domain
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (1024 bit)
+ Modulus:
+ 00:c1:ff:7d:30:6f:64:4a:b1:92:b1:71:d1:c1:74:
+ e2:1d:db:2d:11:24:e1:00:d4:00:ae:6f:c8:9e:ae:
+ 67:b3:4a:bd:f7:e6:9e:57:6d:19:4c:3c:23:94:2d:
+ 3d:d6:63:84:d8:fa:76:2b:38:12:c1:ed:20:9d:32:
+ e0:e8:c2:bf:9a:77:70:04:3f:7f:ca:8c:2c:82:d6:
+ 3d:25:5c:02:1a:4f:64:93:03:dd:9c:42:97:5e:09:
+ 49:af:f0:c2:e1:30:08:0e:21:46:95:d1:13:59:c0:
+ c8:76:be:94:0d:8b:43:67:21:33:b2:08:60:9d:76:
+ a8:05:32:1e:f9:95:09:14:75
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Cert Type:
+ SSL Server
+ Netscape Comment:
+ Easy-RSA Generated Server Certificate
+ X509v3 Subject Key Identifier:
+ 14:02:FD:FD:DD:13:38:E0:71:EA:D1:BE:C0:0E:89:1A:2D:B6:19:06
+ X509v3 Authority Key Identifier:
+ keyid:66:EE:C3:17:3D:3D:AB:44:01:6B:6F:B2:99:19:BD:AA:02:B5:34:FB
+ DirName:/C=US/ST=CA/L=SanFrancisco/O=Fort-Funston/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain
+ serial:FD:AB:EC:6A:84:27:04:A7
+
+ X509v3 Extended Key Usage:
+ TLS Web Server Authentication
+ X509v3 Key Usage:
+ Digital Signature, Key Encipherment
+ Signature Algorithm: sha1WithRSAEncryption
+ 40:0f:10:39:c4:b7:0f:0d:2f:bf:d2:16:cc:8e:d3:9a:fb:8b:
+ ce:4b:7b:0d:48:77:ce:f1:fe:d5:8f:ea:b1:71:ed:49:1d:9f:
+ 23:3a:16:d4:70:7c:c5:29:bf:e4:90:34:d0:f0:00:24:f4:e4:
+ df:2c:c3:83:01:66:61:c9:a8:ab:29:e7:98:6d:27:89:4a:76:
+ c9:2e:19:8e:fe:6e:d5:f8:99:11:0e:97:67:4b:34:e3:1e:e3:
+ 9f:35:00:a5:32:f9:b5:2c:f2:e0:c5:2e:cc:81:bd:18:dd:5c:
+ 12:c8:6b:fa:0c:17:74:30:55:f6:6e:20:9a:6c:1e:09:b4:0c:
+ 15:42
+-----BEGIN CERTIFICATE-----
+MIIEKjCCA5OgAwIBAgIBBDANBgkqhkiG9w0BAQUFADCBojELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xFTATBgNVBAoTDEZv
+cnQtRnVuc3RvbjERMA8GA1UECxMIY2hhbmdlbWUxETAPBgNVBAMTCGNoYW5nZW1l
+MREwDwYDVQQpEwhjaGFuZ2VtZTEfMB0GCSqGSIb3DQEJARYQbWFpbEBob3N0LmRv
+bWFpbjAeFw0xMzEyMDQxNTAxMjBaFw0yMzEyMDIxNTAxMjBaMIGbMQswCQYDVQQG
+EwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UE
+ChMMRm9ydC1GdW5zdG9uMREwDwYDVQQLEwhjaGFuZ2VtZTEKMAgGA1UEAxQBKjER
+MA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1haWxAaG9zdC5kb21h
+aW4wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMH/fTBvZEqxkrFx0cF04h3b
+LREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y4OjCv5p3
+cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+lA2LQ2ch
+M7IIYJ12qAUyHvmVCRR1AgMBAAGjggFzMIIBbzAJBgNVHRMEAjAAMBEGCWCGSAGG
++EIBAQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNl
+cnZlciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUFAL9/d0TOOBx6tG+wA6JGi22GQYw
+gdcGA1UdIwSBzzCBzIAUZu7DFz09q0QBa2+ymRm9qgK1NPuhgaikgaUwgaIxCzAJ
+BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUw
+EwYDVQQKEwxGb3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD
+EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h
+aWxAaG9zdC5kb21haW6CCQD9q+xqhCcEpzATBgNVHSUEDDAKBggrBgEFBQcDATAL
+BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEAQA8QOcS3Dw0vv9IWzI7TmvuL
+zkt7DUh3zvH+1Y/qsXHtSR2fIzoW1HB8xSm/5JA00PAAJPTk3yzDgwFmYcmoqynn
+mG0niUp2yS4Zjv5u1fiZEQ6XZ0s04x7jnzUApTL5tSzy4MUuzIG9GN1cEshr+gwX
+dDBV9m4gmmweCbQMFUI=
+-----END CERTIFICATE-----
diff --git a/integration/fixtures/https/server-key.pem b/integration/fixtures/https/server-key.pem
new file mode 100644
index 0000000000..c269320ef0
--- /dev/null
+++ b/integration/fixtures/https/server-key.pem
@@ -0,0 +1,16 @@
+-----BEGIN PRIVATE KEY-----
+MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMH/fTBvZEqxkrFx
+0cF04h3bLREk4QDUAK5vyJ6uZ7NKvffmnldtGUw8I5QtPdZjhNj6dis4EsHtIJ0y
+4OjCv5p3cAQ/f8qMLILWPSVcAhpPZJMD3ZxCl14JSa/wwuEwCA4hRpXRE1nAyHa+
+lA2LQ2chM7IIYJ12qAUyHvmVCRR1AgMBAAECgYAmwckb9RUfSwyYgLm8IYLPHiuJ
+wkllZfVg5Bo7gXJcQnFjZmJ56uTj8xvUjZlODIHM63TSO5ibv6kFXtXKCqZGd2M+
+wGbhZ0f+2GvKcwMmJERnIQjuoNaYSQLT0tM0VB9Iz0rJlZC+tzPZ+5pPqEumRdsS
+IzWNXfF42AhcbwAQYQJBAPVXtMYIJc9EZsz86ZcQiMPWUpCX5vnRmtwL8kKyR8D5
+4KfYeiowyFffSRMMcclwNHq7TgSXN+nIXM9WyzyzwikCQQDKbNA28AgZp9aT54HP
+WnbeE2pmt+uk/zl/BtxJSoK6H+69Jec+lf7EgL7HgOWYRSNot4uQWu8IhsHLTiUq
++0FtAkEAqwlRxRy4/x24bP+D+QRV0/D97j93joFJbE4Hved7jlSlAV4xDGilwlyv
+HNB4Iu5OJ6Gcaibhm+FKkmD3noHSwQJBAIpu3fokLzX0bS+bDFBU6qO3HXX/47xj
++tsfQvkwZrSI8AkU6c8IX0HdVhsz0FBRQAT2ORDQz1XCarfxykNZrwUCQQCGCBIc
+BBCWzhHlswlGidWJg3HqqO6hPPClEr3B5G87oCsdeYwiO23XT6rUnoJXfJHp6oCW
+5nCwDu5ZTP+khltg
+-----END PRIVATE KEY-----
diff --git a/integration/fixtures/https/server-rogue-cert.pem b/integration/fixtures/https/server-rogue-cert.pem
new file mode 100644
index 0000000000..28feba6656
--- /dev/null
+++ b/integration/fixtures/https/server-rogue-cert.pem
@@ -0,0 +1,76 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 3 (0x3)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=CA, L=SanFrancisco, O=Evil Inc, OU=changeme, CN=changeme/name=changeme/emailAddress=mail@host.domain
+ Validity
+ Not Before: Feb 28 18:49:31 2014 GMT
+ Not After : Feb 26 18:49:31 2024 GMT
+ Subject: C=US, ST=CA, L=SanFrancisco, O=Fort-Funston, OU=changeme, CN=localhost/name=changeme/emailAddress=mail@host.domain
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (1024 bit)
+ Modulus:
+ 00:d1:08:58:24:60:a1:69:65:4b:76:46:8f:88:75:
+ 7c:49:3a:d8:03:cc:5b:58:c5:d1:bb:e5:f9:54:b9:
+ 75:65:df:7e:bb:fb:54:d4:b2:e9:6f:58:a2:a4:84:
+ 43:94:77:24:81:38:36:36:f0:66:65:26:e5:5b:2a:
+ 14:1c:a9:ae:57:7f:75:00:23:14:4b:61:58:e4:82:
+ aa:15:97:94:bd:50:35:0d:5d:18:18:ed:10:6a:bb:
+ d3:64:5a:eb:36:98:5b:58:a7:fe:67:48:c1:6c:3f:
+ 51:2f:02:65:96:54:77:9b:34:f9:a7:d2:63:54:6a:
+ 9e:02:5c:be:65:98:a4:b4:b5
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Cert Type:
+ SSL Server
+ Netscape Comment:
+ Easy-RSA Generated Server Certificate
+ X509v3 Subject Key Identifier:
+ 1F:E0:57:CA:CB:76:C9:C4:86:B9:EA:69:17:C0:F3:51:CE:95:40:EC
+ X509v3 Authority Key Identifier:
+ keyid:DC:A5:F1:76:DB:4E:CD:8E:EF:B1:23:56:1D:92:80:99:74:3B:EA:6F
+ DirName:/C=US/ST=CA/L=SanFrancisco/O=Evil Inc/OU=changeme/CN=changeme/name=changeme/emailAddress=mail@host.domain
+ serial:E7:21:1E:18:41:1B:96:83
+
+ X509v3 Extended Key Usage:
+ TLS Web Server Authentication
+ X509v3 Key Usage:
+ Digital Signature, Key Encipherment
+ Signature Algorithm: sha1WithRSAEncryption
+ 04:93:0e:28:01:94:18:f0:8c:7c:d3:0c:ad:e9:b7:46:b1:30:
+ 65:ed:68:7c:8c:91:cd:1a:86:66:87:4a:4f:c0:97:bc:f7:85:
+ 4b:38:79:31:b2:65:88:b1:76:16:9e:80:93:38:f4:b9:eb:65:
+ 00:6d:bb:89:e0:a1:bf:95:5e:80:13:8e:01:73:d3:f1:08:73:
+ 85:a5:33:75:0b:42:8a:a3:07:09:35:ef:d7:c6:58:eb:60:a3:
+ 06:89:a0:53:99:e2:aa:41:90:e0:1a:d2:12:4b:48:7d:c3:9c:
+ ad:bd:0e:5e:5f:f7:09:0c:5d:7c:86:24:dd:92:d5:b3:14:06:
+ c7:9f
+-----BEGIN CERTIFICATE-----
+MIIEKjCCA5OgAwIBAgIBAzANBgkqhkiG9w0BAQUFADCBnjELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRUwEwYDVQQHEwxTYW5GcmFuY2lzY28xETAPBgNVBAoTCEV2
+aWwgSW5jMREwDwYDVQQLEwhjaGFuZ2VtZTERMA8GA1UEAxMIY2hhbmdlbWUxETAP
+BgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3QuZG9tYWlu
+MB4XDTE0MDIyODE4NDkzMVoXDTI0MDIyNjE4NDkzMVowgaMxCzAJBgNVBAYTAlVT
+MQswCQYDVQQIEwJDQTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG
+b3J0LUZ1bnN0b24xETAPBgNVBAsTCGNoYW5nZW1lMRIwEAYDVQQDEwlsb2NhbGhv
+c3QxETAPBgNVBCkTCGNoYW5nZW1lMR8wHQYJKoZIhvcNAQkBFhBtYWlsQGhvc3Qu
+ZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRCFgkYKFpZUt2Ro+I
+dXxJOtgDzFtYxdG75flUuXVl3367+1TUsulvWKKkhEOUdySBODY28GZlJuVbKhQc
+qa5Xf3UAIxRLYVjkgqoVl5S9UDUNXRgY7RBqu9NkWus2mFtYp/5nSMFsP1EvAmWW
+VHebNPmn0mNUap4CXL5lmKS0tQIDAQABo4IBbzCCAWswCQYDVR0TBAIwADARBglg
+hkgBhvhCAQEEBAMCBkAwNAYJYIZIAYb4QgENBCcWJUVhc3ktUlNBIEdlbmVyYXRl
+ZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFB/gV8rLdsnEhrnqaRfA81HO
+lUDsMIHTBgNVHSMEgcswgciAFNyl8XbbTs2O77EjVh2SgJl0O+pvoYGkpIGhMIGe
+MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFTATBgNVBAcTDFNhbkZyYW5jaXNj
+bzERMA8GA1UEChMIRXZpbCBJbmMxETAPBgNVBAsTCGNoYW5nZW1lMREwDwYDVQQD
+EwhjaGFuZ2VtZTERMA8GA1UEKRMIY2hhbmdlbWUxHzAdBgkqhkiG9w0BCQEWEG1h
+aWxAaG9zdC5kb21haW6CCQDnIR4YQRuWgzATBgNVHSUEDDAKBggrBgEFBQcDATAL
+BgNVHQ8EBAMCBaAwDQYJKoZIhvcNAQEFBQADgYEABJMOKAGUGPCMfNMMrem3RrEw
+Ze1ofIyRzRqGZodKT8CXvPeFSzh5MbJliLF2Fp6Akzj0uetlAG27ieChv5VegBOO
+AXPT8QhzhaUzdQtCiqMHCTXv18ZY62CjBomgU5niqkGQ4BrSEktIfcOcrb0OXl/3
+CQxdfIYk3ZLVsxQGx58=
+-----END CERTIFICATE-----
diff --git a/integration/fixtures/https/server-rogue-key.pem b/integration/fixtures/https/server-rogue-key.pem
new file mode 100644
index 0000000000..10f7c65001
--- /dev/null
+++ b/integration/fixtures/https/server-rogue-key.pem
@@ -0,0 +1,16 @@
+-----BEGIN PRIVATE KEY-----
+MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBANEIWCRgoWllS3ZG
+j4h1fEk62APMW1jF0bvl+VS5dWXffrv7VNSy6W9YoqSEQ5R3JIE4NjbwZmUm5Vsq
+FByprld/dQAjFEthWOSCqhWXlL1QNQ1dGBjtEGq702Ra6zaYW1in/mdIwWw/US8C
+ZZZUd5s0+afSY1RqngJcvmWYpLS1AgMBAAECgYAJXh9dGfuB1qlIFqduDR3RxlJR
+8UGSu+LHUeoXkuwg8aAjWoMVuSLe+5DmYIsKx0AajmNXmPRtyg1zRXJ7SltmubJ8
+6qQVDsRk6biMdkpkl6a9Gk2av40psD9/VPGxagEoop7IKYhf3AeKPvPiwVB2qFrl
+1aYMZm0aMR55pgRajQJBAOk8IsJDf0beooDZXVdv/oe4hcbM9fxO8Cn3qzoGImqD
+37LL+PCzDP7AEV3fk43SsZDeSk+LDX+h0o9nPyhzHasCQQDlb3aDgcQY9NaGLUWO
+moOCB3148eBVcAwCocu+OSkf7sbQdvXxgThBOrZl11wwRIMQqh99c2yeUwj+tELl
+3VcfAkBZTiNpCvtDIaBLge9RuZpWUXs3wec2cutWxnSTxSGMc25GQf/R+l0xdk2w
+ChmvpktDUzpU9sN2aXn8WuY+EMX9AkEApbLpUbKPUELLB958RLA819TW/lkZXjrs
+wZ3eSoR3ufM1rOqtVvyvBxUDE+wETWu9iHSFB5Ir2PA5J9JCGkbPmwJAFI1ndfBj
+iuyU93nFX0p+JE2wVHKx4dMzKCearNKiJh/lGDtUq3REGgamTNUnG8RAITUbxFs+
+Z1hrIq8xYl2LOQ==
+-----END PRIVATE KEY-----
diff --git a/integration/graph_test.go b/integration/graph_test.go
index ff1c0d9361..5602b3938d 100644
--- a/integration/graph_test.go
+++ b/integration/graph_test.go
@@ -2,10 +2,11 @@ package docker
import (
"errors"
- "github.com/dotcloud/docker"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/dockerversion"
- "github.com/dotcloud/docker/graphdriver"
+ "github.com/dotcloud/docker/graph"
+ "github.com/dotcloud/docker/image"
+ "github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@@ -24,7 +25,7 @@ func TestMount(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- image, err := graph.Create(archive, nil, "Testing", "", nil)
+ image, err := graph.Create(archive, "", "", "Testing", "", nil, nil)
if err != nil {
t.Fatal(err)
}
@@ -67,8 +68,8 @@ func TestInterruptedRegister(t *testing.T) {
graph, _ := tempGraph(t)
defer nukeGraph(graph)
badArchive, w := io.Pipe() // Use a pipe reader as a fake archive which never yields data
- image := &docker.Image{
- ID: docker.GenerateID(),
+ image := &image.Image{
+ ID: utils.GenerateRandomID(),
Comment: "testing",
Created: time.Now(),
}
@@ -96,18 +97,18 @@ func TestGraphCreate(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- image, err := graph.Create(archive, nil, "Testing", "", nil)
+ img, err := graph.Create(archive, "", "", "Testing", "", nil, nil)
if err != nil {
t.Fatal(err)
}
- if err := docker.ValidateID(image.ID); err != nil {
+ if err := utils.ValidateID(img.ID); err != nil {
t.Fatal(err)
}
- if image.Comment != "Testing" {
- t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", image.Comment)
+ if img.Comment != "Testing" {
+ t.Fatalf("Wrong comment: should be '%s', not '%s'", "Testing", img.Comment)
}
- if image.DockerVersion != dockerversion.VERSION {
- t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, image.DockerVersion)
+ if img.DockerVersion != dockerversion.VERSION {
+ t.Fatalf("Wrong docker_version: should be '%s', not '%s'", dockerversion.VERSION, img.DockerVersion)
}
images, err := graph.Map()
if err != nil {
@@ -115,8 +116,8 @@ func TestGraphCreate(t *testing.T) {
} else if l := len(images); l != 1 {
t.Fatalf("Wrong number of images. Should be %d, not %d", 1, l)
}
- if images[image.ID] == nil {
- t.Fatalf("Could not find image with id %s", image.ID)
+ if images[img.ID] == nil {
+ t.Fatalf("Could not find image with id %s", img.ID)
}
}
@@ -127,8 +128,8 @@ func TestRegister(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- image := &docker.Image{
- ID: docker.GenerateID(),
+ image := &image.Image{
+ ID: utils.GenerateRandomID(),
Comment: "testing",
Created: time.Now(),
}
@@ -164,12 +165,12 @@ func TestDeletePrefix(t *testing.T) {
assertNImages(graph, t, 0)
}
-func createTestImage(graph *docker.Graph, t *testing.T) *docker.Image {
+func createTestImage(graph *graph.Graph, t *testing.T) *image.Image {
archive, err := fakeTar()
if err != nil {
t.Fatal(err)
}
- img, err := graph.Create(archive, nil, "Test image", "", nil)
+ img, err := graph.Create(archive, "", "", "Test image", "", nil, nil)
if err != nil {
t.Fatal(err)
}
@@ -184,7 +185,7 @@ func TestDelete(t *testing.T) {
t.Fatal(err)
}
assertNImages(graph, t, 0)
- img, err := graph.Create(archive, nil, "Bla bla", "", nil)
+ img, err := graph.Create(archive, "", "", "Bla bla", "", nil, nil)
if err != nil {
t.Fatal(err)
}
@@ -199,7 +200,7 @@ func TestDelete(t *testing.T) {
t.Fatal(err)
}
// Test 2 create (same name) / 1 delete
- img1, err := graph.Create(archive, nil, "Testing", "", nil)
+ img1, err := graph.Create(archive, "", "", "Testing", "", nil, nil)
if err != nil {
t.Fatal(err)
}
@@ -207,7 +208,7 @@ func TestDelete(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if _, err = graph.Create(archive, nil, "Testing", "", nil); err != nil {
+ if _, err = graph.Create(archive, "", "", "Testing", "", nil, nil); err != nil {
t.Fatal(err)
}
assertNImages(graph, t, 2)
@@ -243,20 +244,20 @@ func TestByParent(t *testing.T) {
graph, _ := tempGraph(t)
defer nukeGraph(graph)
- parentImage := &docker.Image{
- ID: docker.GenerateID(),
+ parentImage := &image.Image{
+ ID: utils.GenerateRandomID(),
Comment: "parent",
Created: time.Now(),
Parent: "",
}
- childImage1 := &docker.Image{
- ID: docker.GenerateID(),
+ childImage1 := &image.Image{
+ ID: utils.GenerateRandomID(),
Comment: "child1",
Created: time.Now(),
Parent: parentImage.ID,
}
- childImage2 := &docker.Image{
- ID: docker.GenerateID(),
+ childImage2 := &image.Image{
+ ID: utils.GenerateRandomID(),
Comment: "child2",
Created: time.Now(),
Parent: parentImage.ID,
@@ -279,7 +280,7 @@ func TestByParent(t *testing.T) {
* HELPER FUNCTIONS
*/
-func assertNImages(graph *docker.Graph, t *testing.T, n int) {
+func assertNImages(graph *graph.Graph, t *testing.T, n int) {
if images, err := graph.Map(); err != nil {
t.Fatal(err)
} else if actualN := len(images); actualN != n {
@@ -287,7 +288,7 @@ func assertNImages(graph *docker.Graph, t *testing.T, n int) {
}
}
-func tempGraph(t *testing.T) (*docker.Graph, graphdriver.Driver) {
+func tempGraph(t *testing.T) (*graph.Graph, graphdriver.Driver) {
tmp, err := ioutil.TempDir("", "docker-graph-")
if err != nil {
t.Fatal(err)
@@ -296,14 +297,14 @@ func tempGraph(t *testing.T) (*docker.Graph, graphdriver.Driver) {
if err != nil {
t.Fatal(err)
}
- graph, err := docker.NewGraph(tmp, driver)
+ graph, err := graph.NewGraph(tmp, driver)
if err != nil {
t.Fatal(err)
}
return graph, driver
}
-func nukeGraph(graph *docker.Graph) {
+func nukeGraph(graph *graph.Graph) {
graph.Driver().Cleanup()
os.RemoveAll(graph.Root)
}
diff --git a/integration/https_test.go b/integration/https_test.go
new file mode 100644
index 0000000000..0b4abea881
--- /dev/null
+++ b/integration/https_test.go
@@ -0,0 +1,82 @@
+package docker
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "github.com/dotcloud/docker/api/client"
+ "io/ioutil"
+ "testing"
+ "time"
+)
+
+const (
+ errBadCertificate = "remote error: bad certificate"
+ errCaUnknown = "x509: certificate signed by unknown authority"
+)
+
+func getTlsConfig(certFile, keyFile string, t *testing.T) *tls.Config {
+ certPool := x509.NewCertPool()
+ file, err := ioutil.ReadFile("fixtures/https/ca.pem")
+ if err != nil {
+ t.Fatal(err)
+ }
+ certPool.AppendCertsFromPEM(file)
+
+ cert, err := tls.LoadX509KeyPair("fixtures/https/"+certFile, "fixtures/https/"+keyFile)
+ if err != nil {
+ t.Fatalf("Couldn't load X509 key pair: %s", err)
+ }
+ tlsConfig := &tls.Config{
+ RootCAs: certPool,
+ Certificates: []tls.Certificate{cert},
+ }
+ return tlsConfig
+}
+
+// TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint
+func TestHttpsInfo(t *testing.T) {
+ cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto,
+ testDaemonHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t))
+
+ setTimeout(t, "Reading command output time out", 10*time.Second, func() {
+ if err := cli.CmdInfo(); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
+
+// TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint
+// by using a rogue client certificate and checks that it fails with the expected error.
+func TestHttpsInfoRogueCert(t *testing.T) {
+ cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto,
+ testDaemonHttpsAddr, getTlsConfig("client-rogue-cert.pem", "client-rogue-key.pem", t))
+
+ setTimeout(t, "Reading command output time out", 10*time.Second, func() {
+ err := cli.CmdInfo()
+ if err == nil {
+ t.Fatal("Expected error but got nil")
+ }
+ if err.Error() != errBadCertificate {
+ t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err)
+ }
+ })
+}
+
+// TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint
+// which provides a rogue server certificate and checks that it fails with the expected error
+func TestHttpsInfoRogueServerCert(t *testing.T) {
+ cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, testDaemonProto,
+ testDaemonRogueHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t))
+
+ setTimeout(t, "Reading command output time out", 10*time.Second, func() {
+ err := cli.CmdInfo()
+ if err == nil {
+ t.Fatal("Expected error but got nil")
+ }
+
+ if err.Error() != errCaUnknown {
+ t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err)
+ }
+
+ })
+}
diff --git a/integration/runtime_test.go b/integration/runtime_test.go
index 1e912c1bb4..6058d8f3e8 100644
--- a/integration/runtime_test.go
+++ b/integration/runtime_test.go
@@ -3,10 +3,11 @@ package docker
import (
"bytes"
"fmt"
- "github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
+ "github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/runconfig"
+ "github.com/dotcloud/docker/runtime"
"github.com/dotcloud/docker/sysinit"
"github.com/dotcloud/docker/utils"
"io"
@@ -15,7 +16,7 @@ import (
"net/url"
"os"
"path/filepath"
- "runtime"
+ goruntime "runtime"
"strconv"
"strings"
"syscall"
@@ -24,25 +25,30 @@ import (
)
const (
- unitTestImageName = "docker-test-image"
- unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
- unitTestImageIDShort = "83599e29c455"
- unitTestNetworkBridge = "testdockbr0"
- unitTestStoreBase = "/var/lib/docker/unit-tests"
- testDaemonAddr = "127.0.0.1:4270"
- testDaemonProto = "tcp"
+ unitTestImageName = "docker-test-image"
+ unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0
+ unitTestImageIDShort = "83599e29c455"
+ unitTestNetworkBridge = "testdockbr0"
+ unitTestStoreBase = "/var/lib/docker/unit-tests"
+ testDaemonAddr = "127.0.0.1:4270"
+ testDaemonProto = "tcp"
+ testDaemonHttpsProto = "tcp"
+ testDaemonHttpsAddr = "localhost:4271"
+ testDaemonRogueHttpsAddr = "localhost:4272"
)
var (
// FIXME: globalRuntime is deprecated by globalEngine. All tests should be converted.
- globalRuntime *docker.Runtime
- globalEngine *engine.Engine
- startFds int
- startGoroutines int
+ globalRuntime *runtime.Runtime
+ globalEngine *engine.Engine
+ globalHttpsEngine *engine.Engine
+ globalRogueHttpsEngine *engine.Engine
+ startFds int
+ startGoroutines int
)
// FIXME: nuke() is deprecated by Runtime.Nuke()
-func nuke(runtime *docker.Runtime) error {
+func nuke(runtime *runtime.Runtime) error {
return runtime.Nuke()
}
@@ -117,9 +123,11 @@ func init() {
// (no tests are run directly in the base)
setupBaseImage()
- // Create the "global runtime" with a long-running daemon for integration tests
+ // Create the "global runtime" with a long-running daemons for integration tests
spawnGlobalDaemon()
- startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine()
+ spawnLegitHttpsDaemon()
+ spawnRogueHttpsDaemon()
+ startFds, startGoroutines = utils.GetTotalUsedFds(), goruntime.NumGoroutine()
}
func setupBaseImage() {
@@ -170,9 +178,64 @@ func spawnGlobalDaemon() {
}
}
+func spawnLegitHttpsDaemon() {
+ if globalHttpsEngine != nil {
+ return
+ }
+ globalHttpsEngine = spawnHttpsDaemon(testDaemonHttpsAddr, "fixtures/https/ca.pem",
+ "fixtures/https/server-cert.pem", "fixtures/https/server-key.pem")
+}
+
+func spawnRogueHttpsDaemon() {
+ if globalRogueHttpsEngine != nil {
+ return
+ }
+ globalRogueHttpsEngine = spawnHttpsDaemon(testDaemonRogueHttpsAddr, "fixtures/https/ca.pem",
+ "fixtures/https/server-rogue-cert.pem", "fixtures/https/server-rogue-key.pem")
+}
+
+func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine {
+ t := log.New(os.Stderr, "", 0)
+ root, err := newTestDirectory(unitTestStoreBase)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // FIXME: here we don't use NewTestEngine because it calls initserver with Autorestart=false,
+ // and we want to set it to true.
+
+ eng := newTestEngine(t, true, root)
+
+ // Spawn a Daemon
+ go func() {
+ utils.Debugf("Spawning https daemon for integration tests")
+ listenURL := &url.URL{
+ Scheme: testDaemonHttpsProto,
+ Host: addr,
+ }
+ job := eng.Job("serveapi", listenURL.String())
+ job.SetenvBool("Logging", true)
+ job.SetenvBool("Tls", true)
+ job.SetenvBool("TlsVerify", true)
+ job.Setenv("TlsCa", cacert)
+ job.Setenv("TlsCert", cert)
+ job.Setenv("TlsKey", key)
+ if err := job.Run(); err != nil {
+ log.Fatalf("Unable to spawn the test daemon: %s", err)
+ }
+ }()
+
+ // Give some time to ListenAndServer to actually start
+ time.Sleep(time.Second)
+
+ if err := eng.Job("acceptconnections").Run(); err != nil {
+ log.Fatalf("Unable to accept connections for test api: %s", err)
+ }
+ return eng
+}
+
// FIXME: test that ImagePull(json=true) send correct json output
-func GetTestImage(runtime *docker.Runtime) *docker.Image {
+func GetTestImage(runtime *runtime.Runtime) *image.Image {
imgs, err := runtime.Graph().Map()
if err != nil {
log.Fatalf("Unable to get the test image: %s", err)
@@ -356,7 +419,7 @@ func TestGet(t *testing.T) {
}
-func startEchoServerContainer(t *testing.T, proto string) (*docker.Runtime, *docker.Container, string) {
+func startEchoServerContainer(t *testing.T, proto string) (*runtime.Runtime, *runtime.Container, string) {
var (
err error
id string
diff --git a/integration/server_test.go b/integration/server_test.go
index 69a90527bf..9137e8031b 100644
--- a/integration/server_test.go
+++ b/integration/server_test.go
@@ -1,73 +1,14 @@
package docker
import (
- "github.com/dotcloud/docker"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/runconfig"
+ "github.com/dotcloud/docker/server"
"strings"
"testing"
"time"
)
-func TestImageTagImageDelete(t *testing.T) {
- eng := NewTestEngine(t)
- defer mkRuntimeFromEngine(eng, t).Nuke()
-
- srv := mkServerFromEngine(eng, t)
-
- initialImages := getAllImages(eng, t)
- if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil {
- t.Fatal(err)
- }
-
- if err := eng.Job("tag", unitTestImageName, "utest/docker", "tag2").Run(); err != nil {
- t.Fatal(err)
- }
-
- if err := eng.Job("tag", unitTestImageName, "utest:5000/docker", "tag3").Run(); err != nil {
- t.Fatal(err)
- }
-
- images := getAllImages(eng, t)
-
- nExpected := len(initialImages.Data[0].GetList("RepoTags")) + 3
- nActual := len(images.Data[0].GetList("RepoTags"))
- if nExpected != nActual {
- t.Errorf("Expected %d images, %d found", nExpected, nActual)
- }
-
- if err := srv.DeleteImage("utest/docker:tag2", engine.NewTable("", 0), true, false); err != nil {
- t.Fatal(err)
- }
-
- images = getAllImages(eng, t)
-
- nExpected = len(initialImages.Data[0].GetList("RepoTags")) + 2
- nActual = len(images.Data[0].GetList("RepoTags"))
- if nExpected != nActual {
- t.Errorf("Expected %d images, %d found", nExpected, nActual)
- }
-
- if err := srv.DeleteImage("utest:5000/docker:tag3", engine.NewTable("", 0), true, false); err != nil {
- t.Fatal(err)
- }
-
- images = getAllImages(eng, t)
-
- nExpected = len(initialImages.Data[0].GetList("RepoTags")) + 1
- nActual = len(images.Data[0].GetList("RepoTags"))
-
- if err := srv.DeleteImage("utest:tag1", engine.NewTable("", 0), true, false); err != nil {
- t.Fatal(err)
- }
-
- images = getAllImages(eng, t)
-
- if images.Len() != initialImages.Len() {
- t.Errorf("Expected %d image, %d found", initialImages.Len(), images.Len())
- }
-}
-
func TestCreateRm(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
@@ -203,15 +144,22 @@ func TestCreateRmRunning(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
- config, hostConfig, _, err := runconfig.Parse([]string{"-name", "foo", unitTestImageID, "sleep 300"}, nil)
+ config, hostConfig, _, err := runconfig.Parse([]string{"--name", "foo", unitTestImageID, "sleep 300"}, nil)
if err != nil {
t.Fatal(err)
}
id := createTestContainer(eng, config, t)
- job := eng.Job("containers")
- job.SetenvBool("all", true)
+ job := eng.Job("start", id)
+ if err := job.ImportEnv(hostConfig); err != nil {
+ t.Fatal(err)
+ }
+ if err := job.Run(); err != nil {
+ t.Fatal(err)
+ }
+
+ job = eng.Job("containers")
outs, err := job.Stdout.AddListTable()
if err != nil {
t.Fatal(err)
@@ -224,19 +172,24 @@ func TestCreateRmRunning(t *testing.T) {
t.Errorf("Expected 1 container, %v found", len(outs.Data))
}
- job = eng.Job("start", id)
- if err := job.ImportEnv(hostConfig); err != nil {
+ // Test cannot remove running container
+ job = eng.Job("container_delete", id)
+ job.SetenvBool("forceRemove", false)
+ if err := job.Run(); err == nil {
+ t.Fatal("Expected container delete to fail")
+ }
+
+ job = eng.Job("containers")
+ outs, err = job.Stdout.AddListTable()
+ if err != nil {
t.Fatal(err)
}
if err := job.Run(); err != nil {
t.Fatal(err)
}
- // Test cannot remove running container
- job = eng.Job("container_delete", id)
- job.SetenvBool("forceRemove", false)
- if err := job.Run(); err == nil {
- t.Fatal("Expected container delete to fail")
+ if len(outs.Data) != 1 {
+ t.Errorf("Expected 1 container, %v found", len(outs.Data))
}
// Test can force removal of running container
@@ -281,6 +234,63 @@ func TestCommit(t *testing.T) {
}
}
+func TestMergeConfigOnCommit(t *testing.T) {
+ eng := NewTestEngine(t)
+ runtime := mkRuntimeFromEngine(eng, t)
+ defer runtime.Nuke()
+
+ container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t)
+ defer runtime.Destroy(container1)
+
+ config, _, _, err := runconfig.Parse([]string{container1.ID, "cat /tmp/foo"}, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ job := eng.Job("commit", container1.ID)
+ job.Setenv("repo", "testrepo")
+ job.Setenv("tag", "testtag")
+ job.SetenvJson("config", config)
+ var newId string
+ job.Stdout.AddString(&newId)
+ if err := job.Run(); err != nil {
+ t.Error(err)
+ }
+
+ container2, _, _ := mkContainer(runtime, []string{newId}, t)
+ defer runtime.Destroy(container2)
+
+ job = eng.Job("inspect", container1.Name, "container")
+ baseContainer, _ := job.Stdout.AddEnv()
+ if err := job.Run(); err != nil {
+ t.Error(err)
+ }
+
+ job = eng.Job("inspect", container2.Name, "container")
+ commitContainer, _ := job.Stdout.AddEnv()
+ if err := job.Run(); err != nil {
+ t.Error(err)
+ }
+
+ baseConfig := baseContainer.GetSubEnv("Config")
+ commitConfig := commitContainer.GetSubEnv("Config")
+
+ if commitConfig.Get("Env") != baseConfig.Get("Env") {
+ t.Fatalf("Env config in committed container should be %v, was %v",
+ baseConfig.Get("Env"), commitConfig.Get("Env"))
+ }
+
+ if baseConfig.Get("Cmd") != "[\"echo test \\u003e /tmp/foo\"]" {
+ t.Fatalf("Cmd in base container should be [\"echo test \\u003e /tmp/foo\"], was %s",
+ baseConfig.Get("Cmd"))
+ }
+
+ if commitConfig.Get("Cmd") != "[\"cat /tmp/foo\"]" {
+ t.Fatalf("Cmd in committed container should be [\"cat /tmp/foo\"], was %s",
+ commitConfig.Get("Cmd"))
+ }
+}
+
func TestRestartKillWait(t *testing.T) {
eng := NewTestEngine(t)
srv := mkServerFromEngine(eng, t)
@@ -510,7 +520,7 @@ func TestRmi(t *testing.T) {
t.Fatalf("Expected 2 new images, found %d.", images.Len()-initialImages.Len())
}
- if err = srv.DeleteImage(imageID, engine.NewTable("", 0), true, false); err != nil {
+ if err = srv.DeleteImage(imageID, engine.NewTable("", 0), true, false, false); err != nil {
t.Fatal(err)
}
@@ -571,6 +581,7 @@ func TestImagesFilter(t *testing.T) {
}
}
+// FIXE: 'insert' is deprecated and should be removed in a future version.
func TestImageInsert(t *testing.T) {
eng := NewTestEngine(t)
defer mkRuntimeFromEngine(eng, t).Nuke()
@@ -682,7 +693,7 @@ func TestListContainers(t *testing.T) {
}
}
-func assertContainerList(srv *docker.Server, all bool, limit int, since, before string, expected []string) bool {
+func assertContainerList(srv *server.Server, all bool, limit int, since, before string, expected []string) bool {
job := srv.Eng.Job("containers")
job.SetenvBool("all", all)
job.SetenvInt("limit", limit)
@@ -746,7 +757,7 @@ func TestDeleteTagWithExistingContainers(t *testing.T) {
// Try to remove the tag
imgs := engine.NewTable("", 0)
- if err := srv.DeleteImage("utest:tag1", imgs, true, false); err != nil {
+ if err := srv.DeleteImage("utest:tag1", imgs, true, false, false); err != nil {
t.Fatal(err)
}
diff --git a/integration/utils_test.go b/integration/utils_test.go
index 05d73df52a..8ad6ccb123 100644
--- a/integration/utils_test.go
+++ b/integration/utils_test.go
@@ -14,10 +14,11 @@ import (
"testing"
"time"
- "github.com/dotcloud/docker"
"github.com/dotcloud/docker/builtins"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/runconfig"
+ "github.com/dotcloud/docker/runtime"
+ "github.com/dotcloud/docker/server"
"github.com/dotcloud/docker/utils"
)
@@ -27,7 +28,7 @@ import (
// Create a temporary runtime suitable for unit testing.
// Call t.Fatal() at the first error.
-func mkRuntime(f utils.Fataler) *docker.Runtime {
+func mkRuntime(f utils.Fataler) *runtime.Runtime {
eng := newTestEngine(f, false, "")
return mkRuntimeFromEngine(eng, f)
// FIXME:
@@ -70,7 +71,7 @@ func containerFileExists(eng *engine.Engine, id, dir string, t utils.Fataler) bo
t.Fatal(err)
}
defer c.Unmount()
- if _, err := os.Stat(path.Join(c.BasefsPath(), dir)); err != nil {
+ if _, err := os.Stat(path.Join(c.RootfsPath(), dir)); err != nil {
if os.IsNotExist(err) {
return false
}
@@ -139,7 +140,7 @@ func assertHttpError(r *httptest.ResponseRecorder, t utils.Fataler) {
}
}
-func getContainer(eng *engine.Engine, id string, t utils.Fataler) *docker.Container {
+func getContainer(eng *engine.Engine, id string, t utils.Fataler) *runtime.Container {
runtime := mkRuntimeFromEngine(eng, t)
c := runtime.Get(id)
if c == nil {
@@ -148,26 +149,26 @@ func getContainer(eng *engine.Engine, id string, t utils.Fataler) *docker.Contai
return c
}
-func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Server {
+func mkServerFromEngine(eng *engine.Engine, t utils.Fataler) *server.Server {
iSrv := eng.Hack_GetGlobalVar("httpapi.server")
if iSrv == nil {
panic("Legacy server field not set in engine")
}
- srv, ok := iSrv.(*docker.Server)
+ srv, ok := iSrv.(*server.Server)
if !ok {
- panic("Legacy server field in engine does not cast to *docker.Server")
+ panic("Legacy server field in engine does not cast to *server.Server")
}
return srv
}
-func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *docker.Runtime {
+func mkRuntimeFromEngine(eng *engine.Engine, t utils.Fataler) *runtime.Runtime {
iRuntime := eng.Hack_GetGlobalVar("httpapi.runtime")
if iRuntime == nil {
panic("Legacy runtime field not set in engine")
}
- runtime, ok := iRuntime.(*docker.Runtime)
+ runtime, ok := iRuntime.(*runtime.Runtime)
if !ok {
- panic("Legacy runtime field in engine does not cast to *docker.Runtime")
+ panic("Legacy runtime field in engine does not cast to *runtime.Runtime")
}
return runtime
}
@@ -249,7 +250,7 @@ func readFile(src string, t *testing.T) (content string) {
// dynamically replaced by the current test image.
// The caller is responsible for destroying the container.
// Call t.Fatal() at the first error.
-func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Container, *runconfig.HostConfig, error) {
+func mkContainer(r *runtime.Runtime, args []string, t *testing.T) (*runtime.Container, *runconfig.HostConfig, error) {
config, hc, _, err := runconfig.Parse(args, nil)
defer func() {
if err != nil && t != nil {
@@ -280,7 +281,7 @@ func mkContainer(r *docker.Runtime, args []string, t *testing.T) (*docker.Contai
// and return its standard output as a string.
// The image name (eg. the XXX in []string{"-i", "-t", "XXX", "bash"}, is dynamically replaced by the current test image.
// If t is not nil, call t.Fatal() at the first error. Otherwise return errors normally.
-func runContainer(eng *engine.Engine, r *docker.Runtime, args []string, t *testing.T) (output string, err error) {
+func runContainer(eng *engine.Engine, r *runtime.Runtime, args []string, t *testing.T) (output string, err error) {
defer func() {
if err != nil && t != nil {
t.Fatal(err)
diff --git a/opts/envfile.go b/opts/envfile.go
new file mode 100644
index 0000000000..19ee8955f9
--- /dev/null
+++ b/opts/envfile.go
@@ -0,0 +1,54 @@
+package opts
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strings"
+)
+
+/*
+Read in a line delimited file with environment variables enumerated
+*/
+func ParseEnvFile(filename string) ([]string, error) {
+ fh, err := os.Open(filename)
+ if err != nil {
+ return []string{}, err
+ }
+ defer fh.Close()
+
+ lines := []string{}
+ scanner := bufio.NewScanner(fh)
+ for scanner.Scan() {
+ line := scanner.Text()
+ // line is not empty, and not starting with '#'
+ if len(line) > 0 && !strings.HasPrefix(line, "#") {
+ if strings.Contains(line, "=") {
+ data := strings.SplitN(line, "=", 2)
+
+ // trim the front of a variable, but nothing else
+ variable := strings.TrimLeft(data[0], whiteSpaces)
+ if strings.ContainsAny(variable, whiteSpaces) {
+ return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)}
+ }
+
+ // pass the value through, no trimming
+ lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1]))
+ } else {
+ // if only a pass-through variable is given, clean it up.
+ lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line)))
+ }
+ }
+ }
+ return lines, nil
+}
+
+var whiteSpaces = " \t"
+
+type ErrBadEnvVariable struct {
+ msg string
+}
+
+func (e ErrBadEnvVariable) Error() string {
+ return fmt.Sprintf("poorly formatted environment: %s", e.msg)
+}
diff --git a/pkg/opts/opts.go b/opts/opts.go
index a1b8752bad..67f1c8fd48 100644
--- a/pkg/opts/opts.go
+++ b/opts/opts.go
@@ -92,22 +92,12 @@ func ValidateAttach(val string) (string, error) {
}
func ValidateLink(val string) (string, error) {
- if _, err := parseLink(val); err != nil {
+ if _, err := utils.PartParser("name:alias", val); err != nil {
return val, err
}
return val, nil
}
-// FIXME: this is a duplicate of docker.utils.parseLink.
-// it can't be moved to a separate links/ package because
-// links depends on Container which is defined in the core.
-//
-// Links come in the format of
-// name:alias
-func parseLink(rawLink string) (map[string]string, error) {
- return utils.PartParser("name:alias", rawLink)
-}
-
func ValidatePath(val string) (string, error) {
var containerPath string
@@ -146,3 +136,16 @@ func ValidateIp4Address(val string) (string, error) {
}
return "", fmt.Errorf("%s is not an ip4 address", val)
}
+
+func ValidateDomain(val string) (string, error) {
+ alpha := regexp.MustCompile(`[a-zA-Z]`)
+ if alpha.FindString(val) == "" {
+ return "", fmt.Errorf("%s is not a valid domain", val)
+ }
+ re := regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
+ ns := re.FindSubmatch([]byte(val))
+ if len(ns) > 0 {
+ return string(ns[1]), nil
+ }
+ return "", fmt.Errorf("%s is not a valid domain", val)
+}
diff --git a/opts/opts_test.go b/opts/opts_test.go
new file mode 100644
index 0000000000..299cbfe503
--- /dev/null
+++ b/opts/opts_test.go
@@ -0,0 +1,78 @@
+package opts
+
+import (
+ "testing"
+)
+
+func TestValidateIP4(t *testing.T) {
+ if ret, err := ValidateIp4Address(`1.2.3.4`); err != nil || ret == "" {
+ t.Fatalf("ValidateIp4Address(`1.2.3.4`) got %s %s", ret, err)
+ }
+
+ if ret, err := ValidateIp4Address(`127.0.0.1`); err != nil || ret == "" {
+ t.Fatalf("ValidateIp4Address(`127.0.0.1`) got %s %s", ret, err)
+ }
+
+ if ret, err := ValidateIp4Address(`127`); err == nil || ret != "" {
+ t.Fatalf("ValidateIp4Address(`127`) got %s %s", ret, err)
+ }
+
+ if ret, err := ValidateIp4Address(`random invalid string`); err == nil || ret != "" {
+ t.Fatalf("ValidateIp4Address(`random invalid string`) got %s %s", ret, err)
+ }
+
+}
+
+func TestValidateDomain(t *testing.T) {
+ valid := []string{
+ `a`,
+ `a.`,
+ `1.foo`,
+ `17.foo`,
+ `foo.bar`,
+ `foo.bar.baz`,
+ `foo.bar.`,
+ `foo.bar.baz`,
+ `foo1.bar2`,
+ `foo1.bar2.baz`,
+ `1foo.2bar.`,
+ `1foo.2bar.baz`,
+ `foo-1.bar-2`,
+ `foo-1.bar-2.baz`,
+ `foo-1.bar-2.`,
+ `foo-1.bar-2.baz`,
+ `1-foo.2-bar`,
+ `1-foo.2-bar.baz`,
+ `1-foo.2-bar.`,
+ `1-foo.2-bar.baz`,
+ }
+
+ invalid := []string{
+ ``,
+ `.`,
+ `17`,
+ `17.`,
+ `.17`,
+ `17-.`,
+ `17-.foo`,
+ `.foo`,
+ `foo-.bar`,
+ `-foo.bar`,
+ `foo.bar-`,
+ `foo.bar-.baz`,
+ `foo.-bar`,
+ `foo.-bar.baz`,
+ }
+
+ for _, domain := range valid {
+ if ret, err := ValidateDomain(domain); err != nil || ret == "" {
+ t.Fatalf("ValidateDomain(`"+domain+"`) got %s %s", ret, err)
+ }
+ }
+
+ for _, domain := range invalid {
+ if ret, err := ValidateDomain(domain); err == nil || ret != "" {
+ t.Fatalf("ValidateDomain(`"+domain+"`) got %s %s", ret, err)
+ }
+ }
+}
diff --git a/pkg/cgroups/apply_nosystemd.go b/pkg/cgroups/apply_nosystemd.go
new file mode 100644
index 0000000000..f94d475907
--- /dev/null
+++ b/pkg/cgroups/apply_nosystemd.go
@@ -0,0 +1,15 @@
+// +build !linux
+
+package cgroups
+
+import (
+ "fmt"
+)
+
+func useSystemd() bool {
+ return false
+}
+
+func systemdApply(c *Cgroup, pid int) (ActiveCgroup, error) {
+ return nil, fmt.Errorf("Systemd not supported")
+}
diff --git a/pkg/cgroups/apply_raw.go b/pkg/cgroups/apply_raw.go
new file mode 100644
index 0000000000..220f08f1dc
--- /dev/null
+++ b/pkg/cgroups/apply_raw.go
@@ -0,0 +1,216 @@
+package cgroups
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+)
+
+type rawCgroup struct {
+ root string
+ cgroup string
+}
+
+func rawApply(c *Cgroup, pid int) (ActiveCgroup, error) {
+ // We have two implementation of cgroups support, one is based on
+ // systemd and the dbus api, and one is based on raw cgroup fs operations
+ // following the pre-single-writer model docs at:
+ // http://www.freedesktop.org/wiki/Software/systemd/PaxControlGroups/
+ //
+ // we can pick any subsystem to find the root
+
+ cgroupRoot, err := FindCgroupMountpoint("cpu")
+ if err != nil {
+ return nil, err
+ }
+ cgroupRoot = filepath.Dir(cgroupRoot)
+
+ if _, err := os.Stat(cgroupRoot); err != nil {
+ return nil, fmt.Errorf("cgroups fs not found")
+ }
+
+ cgroup := c.Name
+ if c.Parent != "" {
+ cgroup = filepath.Join(c.Parent, cgroup)
+ }
+
+ raw := &rawCgroup{
+ root: cgroupRoot,
+ cgroup: cgroup,
+ }
+
+ if err := raw.setupDevices(c, pid); err != nil {
+ return nil, err
+ }
+ if err := raw.setupMemory(c, pid); err != nil {
+ return nil, err
+ }
+ if err := raw.setupCpu(c, pid); err != nil {
+ return nil, err
+ }
+ if err := raw.setupCpuset(c, pid); err != nil {
+ return nil, err
+ }
+ return raw, nil
+}
+
+func (raw *rawCgroup) path(subsystem string) (string, error) {
+ initPath, err := GetInitCgroupDir(subsystem)
+ if err != nil {
+ return "", err
+ }
+ return filepath.Join(raw.root, subsystem, initPath, raw.cgroup), nil
+}
+
+func (raw *rawCgroup) join(subsystem string, pid int) (string, error) {
+ path, err := raw.path(subsystem)
+ if err != nil {
+ return "", err
+ }
+ if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
+ return "", err
+ }
+ if err := writeFile(path, "cgroup.procs", strconv.Itoa(pid)); err != nil {
+ return "", err
+ }
+ return path, nil
+}
+
+func (raw *rawCgroup) setupDevices(c *Cgroup, pid int) (err error) {
+ if !c.DeviceAccess {
+ dir, err := raw.join("devices", pid)
+ if err != nil {
+ return err
+ }
+
+ defer func() {
+ if err != nil {
+ os.RemoveAll(dir)
+ }
+ }()
+
+ if err := writeFile(dir, "devices.deny", "a"); err != nil {
+ return err
+ }
+
+ allow := []string{
+ // allow mknod for any device
+ "c *:* m",
+ "b *:* m",
+
+ // /dev/null, zero, full
+ "c 1:3 rwm",
+ "c 1:5 rwm",
+ "c 1:7 rwm",
+
+ // consoles
+ "c 5:1 rwm",
+ "c 5:0 rwm",
+ "c 4:0 rwm",
+ "c 4:1 rwm",
+
+ // /dev/urandom,/dev/random
+ "c 1:9 rwm",
+ "c 1:8 rwm",
+
+ // /dev/pts/ - pts namespaces are "coming soon"
+ "c 136:* rwm",
+ "c 5:2 rwm",
+
+ // tuntap
+ "c 10:200 rwm",
+ }
+
+ for _, val := range allow {
+ if err := writeFile(dir, "devices.allow", val); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (raw *rawCgroup) setupMemory(c *Cgroup, pid int) (err error) {
+ if c.Memory != 0 || c.MemorySwap != 0 {
+ dir, err := raw.join("memory", pid)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ os.RemoveAll(dir)
+ }
+ }()
+
+ if c.Memory != 0 {
+ if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil {
+ return err
+ }
+ if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil {
+ return err
+ }
+ }
+ // By default, MemorySwap is set to twice the size of RAM.
+ // If you want to omit MemorySwap, set it to `-1'.
+ if c.MemorySwap != -1 {
+ if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(c.Memory*2, 10)); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (raw *rawCgroup) setupCpu(c *Cgroup, pid int) (err error) {
+ // We always want to join the cpu group, to allow fair cpu scheduling
+ // on a container basis
+ dir, err := raw.join("cpu", pid)
+ if err != nil {
+ return err
+ }
+ if c.CpuShares != 0 {
+ if err := writeFile(dir, "cpu.shares", strconv.FormatInt(c.CpuShares, 10)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (raw *rawCgroup) setupCpuset(c *Cgroup, pid int) (err error) {
+ if c.CpusetCpus != "" {
+ dir, err := raw.join("cpuset", pid)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ os.RemoveAll(dir)
+ }
+ }()
+
+ if err := writeFile(dir, "cpuset.cpus", c.CpusetCpus); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (raw *rawCgroup) Cleanup() error {
+ get := func(subsystem string) string {
+ path, _ := raw.path(subsystem)
+ return path
+ }
+
+ for _, path := range []string{
+ get("memory"),
+ get("devices"),
+ get("cpu"),
+ get("cpuset"),
+ } {
+ if path != "" {
+ os.RemoveAll(path)
+ }
+ }
+ return nil
+}
diff --git a/pkg/cgroups/apply_systemd.go b/pkg/cgroups/apply_systemd.go
new file mode 100644
index 0000000000..c689d5753e
--- /dev/null
+++ b/pkg/cgroups/apply_systemd.go
@@ -0,0 +1,158 @@
+// +build linux
+
+package cgroups
+
+import (
+ "fmt"
+ systemd1 "github.com/coreos/go-systemd/dbus"
+ "github.com/dotcloud/docker/pkg/systemd"
+ "github.com/godbus/dbus"
+ "path/filepath"
+ "strings"
+ "sync"
+)
+
+type systemdCgroup struct {
+}
+
+var (
+ connLock sync.Mutex
+ theConn *systemd1.Conn
+ hasStartTransientUnit bool
+)
+
+func useSystemd() bool {
+ if !systemd.SdBooted() {
+ return false
+ }
+
+ connLock.Lock()
+ defer connLock.Unlock()
+
+ if theConn == nil {
+ var err error
+ theConn, err = systemd1.New()
+ if err != nil {
+ return false
+ }
+
+ // Assume we have StartTransientUnit
+ hasStartTransientUnit = true
+
+ // But if we get UnknownMethod error we don't
+ if _, err := theConn.StartTransientUnit("test.scope", "invalid"); err != nil {
+ if dbusError, ok := err.(dbus.Error); ok {
+ if dbusError.Name == "org.freedesktop.DBus.Error.UnknownMethod" {
+ hasStartTransientUnit = false
+ }
+ }
+ }
+ }
+
+ return hasStartTransientUnit
+}
+
+type DeviceAllow struct {
+ Node string
+ Permissions string
+}
+
+func getIfaceForUnit(unitName string) string {
+ if strings.HasSuffix(unitName, ".scope") {
+ return "Scope"
+ }
+ if strings.HasSuffix(unitName, ".service") {
+ return "Service"
+ }
+ return "Unit"
+}
+
+func systemdApply(c *Cgroup, pid int) (ActiveCgroup, error) {
+ unitName := c.Parent + "-" + c.Name + ".scope"
+ slice := "system.slice"
+
+ var properties []systemd1.Property
+
+ for _, v := range c.UnitProperties {
+ switch v[0] {
+ case "Slice":
+ slice = v[1]
+ default:
+ return nil, fmt.Errorf("Unknown unit propery %s", v[0])
+ }
+ }
+
+ properties = append(properties,
+ systemd1.Property{"Slice", dbus.MakeVariant(slice)},
+ systemd1.Property{"Description", dbus.MakeVariant("docker container " + c.Name)},
+ systemd1.Property{"PIDs", dbus.MakeVariant([]uint32{uint32(pid)})})
+
+ if !c.DeviceAccess {
+ properties = append(properties,
+ systemd1.Property{"DevicePolicy", dbus.MakeVariant("strict")},
+ systemd1.Property{"DeviceAllow", dbus.MakeVariant([]DeviceAllow{
+ {"/dev/null", "rwm"},
+ {"/dev/zero", "rwm"},
+ {"/dev/full", "rwm"},
+ {"/dev/random", "rwm"},
+ {"/dev/urandom", "rwm"},
+ {"/dev/tty", "rwm"},
+ {"/dev/console", "rwm"},
+ {"/dev/tty0", "rwm"},
+ {"/dev/tty1", "rwm"},
+ {"/dev/pts/ptmx", "rwm"},
+ // There is no way to add /dev/pts/* here atm, so we hack this manually below
+ // /dev/pts/* (how to add this?)
+ // Same with tuntap, which doesn't exist as a node most of the time
+ })})
+ }
+
+ if c.Memory != 0 {
+ properties = append(properties,
+ systemd1.Property{"MemoryLimit", dbus.MakeVariant(uint64(c.Memory))})
+ }
+ // TODO: MemorySwap not available in systemd
+
+ if c.CpuShares != 0 {
+ properties = append(properties,
+ systemd1.Property{"CPUShares", dbus.MakeVariant(uint64(c.CpuShares))})
+ }
+
+ if _, err := theConn.StartTransientUnit(unitName, "replace", properties...); err != nil {
+ return nil, err
+ }
+
+ // To work around the lack of /dev/pts/* support above we need to manually add these
+ // so, ask systemd for the cgroup used
+ props, err := theConn.GetUnitTypeProperties(unitName, getIfaceForUnit(unitName))
+ if err != nil {
+ return nil, err
+ }
+
+ cgroup := props["ControlGroup"].(string)
+
+ if !c.DeviceAccess {
+ mountpoint, err := FindCgroupMountpoint("devices")
+ if err != nil {
+ return nil, err
+ }
+
+ path := filepath.Join(mountpoint, cgroup)
+
+ // /dev/pts/*
+ if err := writeFile(path, "devices.allow", "c 136:* rwm"); err != nil {
+ return nil, err
+ }
+ // tuntap
+ if err := writeFile(path, "devices.allow", "c 10:200 rwm"); err != nil {
+ return nil, err
+ }
+ }
+
+ return &systemdCgroup{}, nil
+}
+
+func (c *systemdCgroup) Cleanup() error {
+ // systemd cleans up, we don't need to do anything
+ return nil
+}
diff --git a/pkg/cgroups/cgroups.go b/pkg/cgroups/cgroups.go
index b40e1a31fa..5fe10346df 100644
--- a/pkg/cgroups/cgroups.go
+++ b/pkg/cgroups/cgroups.go
@@ -8,7 +8,6 @@ import (
"io/ioutil"
"os"
"path/filepath"
- "strconv"
"strings"
)
@@ -16,10 +15,17 @@ type Cgroup struct {
Name string `json:"name,omitempty"`
Parent string `json:"parent,omitempty"`
- DeviceAccess bool `json:"device_access,omitempty"` // name of parent cgroup or slice
- Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes)
- MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap
- CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers)
+ DeviceAccess bool `json:"device_access,omitempty"` // name of parent cgroup or slice
+ Memory int64 `json:"memory,omitempty"` // Memory limit (in bytes)
+ MemorySwap int64 `json:"memory_swap,omitempty"` // Total memory usage (memory + swap); set `-1' to disable swap
+ CpuShares int64 `json:"cpu_shares,omitempty"` // CPU shares (relative weight vs. other containers)
+ CpusetCpus string `json:"cpuset_cpus,omitempty"` // CPU to use
+
+ UnitProperties [][2]string `json:"unit_properties,omitempty"` // systemd unit properties
+}
+
+type ActiveCgroup interface {
+ Cleanup() error
}
// https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt
@@ -62,48 +68,6 @@ func GetInitCgroupDir(subsystem string) (string, error) {
return parseCgroupFile(subsystem, f)
}
-func (c *Cgroup) Path(root, subsystem string) (string, error) {
- cgroup := c.Name
- if c.Parent != "" {
- cgroup = filepath.Join(c.Parent, cgroup)
- }
- initPath, err := GetInitCgroupDir(subsystem)
- if err != nil {
- return "", err
- }
- return filepath.Join(root, subsystem, initPath, cgroup), nil
-}
-
-func (c *Cgroup) Join(root, subsystem string, pid int) (string, error) {
- path, err := c.Path(root, subsystem)
- if err != nil {
- return "", err
- }
- if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
- return "", err
- }
- if err := writeFile(path, "tasks", strconv.Itoa(pid)); err != nil {
- return "", err
- }
- return path, nil
-}
-
-func (c *Cgroup) Cleanup(root string) error {
- get := func(subsystem string) string {
- path, _ := c.Path(root, subsystem)
- return path
- }
-
- for _, path := range []string{
- get("memory"),
- get("devices"),
- get("cpu"),
- } {
- os.RemoveAll(path)
- }
- return nil
-}
-
func parseCgroupFile(subsystem string, r io.Reader) (string, error) {
s := bufio.NewScanner(r)
for s.Scan() {
@@ -125,126 +89,15 @@ func writeFile(dir, file, data string) error {
return ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700)
}
-func (c *Cgroup) Apply(pid int) error {
+func (c *Cgroup) Apply(pid int) (ActiveCgroup, error) {
// We have two implementation of cgroups support, one is based on
// systemd and the dbus api, and one is based on raw cgroup fs operations
// following the pre-single-writer model docs at:
// http://www.freedesktop.org/wiki/Software/systemd/PaxControlGroups/
- //
- // we can pick any subsystem to find the root
- cgroupRoot, err := FindCgroupMountpoint("cpu")
- if err != nil {
- return err
- }
- cgroupRoot = filepath.Dir(cgroupRoot)
-
- if _, err := os.Stat(cgroupRoot); err != nil {
- return fmt.Errorf("cgroups fs not found")
- }
- if err := c.setupDevices(cgroupRoot, pid); err != nil {
- return err
- }
- if err := c.setupMemory(cgroupRoot, pid); err != nil {
- return err
- }
- if err := c.setupCpu(cgroupRoot, pid); err != nil {
- return err
- }
- return nil
-}
-func (c *Cgroup) setupDevices(cgroupRoot string, pid int) (err error) {
- if !c.DeviceAccess {
- dir, err := c.Join(cgroupRoot, "devices", pid)
- if err != nil {
- return err
- }
-
- defer func() {
- if err != nil {
- os.RemoveAll(dir)
- }
- }()
-
- if err := writeFile(dir, "devices.deny", "a"); err != nil {
- return err
- }
-
- allow := []string{
- // /dev/null, zero, full
- "c 1:3 rwm",
- "c 1:5 rwm",
- "c 1:7 rwm",
-
- // consoles
- "c 5:1 rwm",
- "c 5:0 rwm",
- "c 4:0 rwm",
- "c 4:1 rwm",
-
- // /dev/urandom,/dev/random
- "c 1:9 rwm",
- "c 1:8 rwm",
-
- // /dev/pts/ - pts namespaces are "coming soon"
- "c 136:* rwm",
- "c 5:2 rwm",
-
- // tuntap
- "c 10:200 rwm",
- }
-
- for _, val := range allow {
- if err := writeFile(dir, "devices.allow", val); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (c *Cgroup) setupMemory(cgroupRoot string, pid int) (err error) {
- if c.Memory != 0 || c.MemorySwap != 0 {
- dir, err := c.Join(cgroupRoot, "memory", pid)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- os.RemoveAll(dir)
- }
- }()
-
- if c.Memory != 0 {
- if err := writeFile(dir, "memory.limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil {
- return err
- }
- if err := writeFile(dir, "memory.soft_limit_in_bytes", strconv.FormatInt(c.Memory, 10)); err != nil {
- return err
- }
- }
- // By default, MemorySwap is set to twice the size of RAM.
- // If you want to omit MemorySwap, set it to `-1'.
- if c.MemorySwap != -1 {
- if err := writeFile(dir, "memory.memsw.limit_in_bytes", strconv.FormatInt(c.Memory*2, 10)); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (c *Cgroup) setupCpu(cgroupRoot string, pid int) (err error) {
- // We always want to join the cpu group, to allow fair cpu scheduling
- // on a container basis
- dir, err := c.Join(cgroupRoot, "cpu", pid)
- if err != nil {
- return err
- }
- if c.CpuShares != 0 {
- if err := writeFile(dir, "cpu.shares", strconv.FormatInt(c.CpuShares, 10)); err != nil {
- return err
- }
+ if useSystemd() {
+ return systemdApply(c, pid)
+ } else {
+ return rawApply(c, pid)
}
- return nil
}
diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go
index 4cdd67ef7c..1f25952bd9 100644
--- a/pkg/iptables/iptables.go
+++ b/pkg/iptables/iptables.go
@@ -66,7 +66,6 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, dest_addr str
"-p", proto,
"-d", daddr,
"--dport", strconv.Itoa(port),
- "!", "-i", c.Bridge,
"-j", "DNAT",
"--to-destination", net.JoinHostPort(dest_addr, strconv.Itoa(dest_port))); err != nil {
return err
diff --git a/pkg/label/label.go b/pkg/label/label.go
new file mode 100644
index 0000000000..38f026bc5a
--- /dev/null
+++ b/pkg/label/label.go
@@ -0,0 +1,26 @@
+// +build !selinux !linux
+
+package label
+
+func GenLabels(options string) (string, string, error) {
+ return "", "", nil
+}
+
+func FormatMountLabel(src string, mountLabel string) string {
+ return src
+}
+
+func SetProcessLabel(processLabel string) error {
+ return nil
+}
+
+func SetFileLabel(path string, fileLabel string) error {
+ return nil
+}
+
+func GetPidCon(pid int) (string, error) {
+ return "", nil
+}
+
+func Init() {
+}
diff --git a/pkg/label/label_selinux.go b/pkg/label/label_selinux.go
new file mode 100644
index 0000000000..9f7463f79b
--- /dev/null
+++ b/pkg/label/label_selinux.go
@@ -0,0 +1,77 @@
+// +build selinux,linux
+
+package label
+
+import (
+ "fmt"
+ "github.com/dotcloud/docker/pkg/selinux"
+ "strings"
+)
+
+func GenLabels(options string) (string, string, error) {
+ if !selinux.SelinuxEnabled() {
+ return "", "", nil
+ }
+ var err error
+ processLabel, mountLabel := selinux.GetLxcContexts()
+ if processLabel != "" {
+ var (
+ s = strings.Fields(options)
+ l = len(s)
+ )
+ if l > 0 {
+ pcon := selinux.NewContext(processLabel)
+ for i := 0; i < l; i++ {
+ o := strings.Split(s[i], "=")
+ pcon[o[0]] = o[1]
+ }
+ processLabel = pcon.Get()
+ mountLabel, err = selinux.CopyLevel(processLabel, mountLabel)
+ }
+ }
+ return processLabel, mountLabel, err
+}
+
+func FormatMountLabel(src string, mountLabel string) string {
+ if selinux.SelinuxEnabled() && mountLabel != "" {
+ switch src {
+ case "":
+ src = fmt.Sprintf("%s,context=%s", src, mountLabel)
+ default:
+ src = fmt.Sprintf("context=%s", mountLabel)
+ }
+ }
+ return src
+}
+
+func SetProcessLabel(processLabel string) error {
+ if selinux.SelinuxEnabled() {
+ return selinux.Setexeccon(processLabel)
+ }
+ return nil
+}
+
+func GetProcessLabel() (string, error) {
+ if selinux.SelinuxEnabled() {
+ return selinux.Getexeccon()
+ }
+ return "", nil
+}
+
+func SetFileLabel(path string, fileLabel string) error {
+ if selinux.SelinuxEnabled() && fileLabel != "" {
+ return selinux.Setfilecon(path, fileLabel)
+ }
+ return nil
+}
+
+func GetPidCon(pid int) (string, error) {
+ if !selinux.SelinuxEnabled() {
+ return "", nil
+ }
+ return selinux.Getpidcon(pid)
+}
+
+func Init() {
+ selinux.SelinuxEnabled()
+}
diff --git a/pkg/libcontainer/MAINTAINERS b/pkg/libcontainer/MAINTAINERS
index e53d933d47..1cb551364d 100644
--- a/pkg/libcontainer/MAINTAINERS
+++ b/pkg/libcontainer/MAINTAINERS
@@ -1,2 +1,2 @@
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
-Guillaume Charmes <guillaume@dotcloud.com> (@creack)
+Guillaume J. Charmes <guillaume@docker.com> (@creack)
diff --git a/pkg/libcontainer/README.md b/pkg/libcontainer/README.md
index d6e4dedd63..d6d0fbae44 100644
--- a/pkg/libcontainer/README.md
+++ b/pkg/libcontainer/README.md
@@ -3,7 +3,7 @@
#### background
libcontainer specifies configuration options for what a container is. It provides a native Go implementation
-for using linux namespaces with no external dependencies. libcontainer provides many convience functions for working with namespaces, networking, and management.
+for using Linux namespaces with no external dependencies. libcontainer provides many convenience functions for working with namespaces, networking, and management.
#### container
@@ -16,59 +16,82 @@ process are specified in this file. The configuration is used for each process
Sample `container.json` file:
```json
{
- "hostname": "koye",
- "tty": true,
- "environment": [
- "HOME=/",
- "PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin",
- "container=docker",
- "TERM=xterm-256color"
- ],
- "namespaces": [
- "NEWIPC",
- "NEWNS",
- "NEWPID",
- "NEWUTS",
- "NEWNET"
- ],
- "capabilities": [
- "SETPCAP",
- "SYS_MODULE",
- "SYS_RAWIO",
- "SYS_PACCT",
- "SYS_ADMIN",
- "SYS_NICE",
- "SYS_RESOURCE",
- "SYS_TIME",
- "SYS_TTY_CONFIG",
- "MKNOD",
- "AUDIT_WRITE",
- "AUDIT_CONTROL",
- "MAC_OVERRIDE",
- "MAC_ADMIN",
- "NET_ADMIN"
- ],
- "networks": [{
- "type": "veth",
- "context": {
- "bridge": "docker0",
- "prefix": "dock"
- },
- "address": "172.17.0.100/16",
- "gateway": "172.17.42.1",
- "mtu": 1500
- }
- ],
- "cgroups": {
- "name": "docker-koye",
- "parent": "docker",
- "memory": 5248000
- }
+ "hostname" : "koye",
+ "networks" : [
+ {
+ "gateway" : "172.17.42.1",
+ "context" : {
+ "bridge" : "docker0",
+ "prefix" : "veth"
+ },
+ "address" : "172.17.0.2/16",
+ "type" : "veth",
+ "mtu" : 1500
+ }
+ ],
+ "cgroups" : {
+ "parent" : "docker",
+ "name" : "11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620"
+ },
+ "tty" : true,
+ "environment" : [
+ "HOME=/",
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "HOSTNAME=11bb30683fb0",
+ "TERM=xterm"
+ ],
+ "capabilities_mask" : [
+ "SETPCAP",
+ "SYS_MODULE",
+ "SYS_RAWIO",
+ "SYS_PACCT",
+ "SYS_ADMIN",
+ "SYS_NICE",
+ "SYS_RESOURCE",
+ "SYS_TIME",
+ "SYS_TTY_CONFIG",
+ "MKNOD",
+ "AUDIT_WRITE",
+ "AUDIT_CONTROL",
+ "MAC_OVERRIDE",
+ "MAC_ADMIN",
+ "NET_ADMIN"
+ ],
+ "context" : {
+ "apparmor_profile" : "docker-default"
+ },
+ "mounts" : [
+ {
+ "source" : "/var/lib/docker/containers/11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620/resolv.conf",
+ "writable" : false,
+ "destination" : "/etc/resolv.conf",
+ "private" : true
+ },
+ {
+ "source" : "/var/lib/docker/containers/11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620/hostname",
+ "writable" : false,
+ "destination" : "/etc/hostname",
+ "private" : true
+ },
+ {
+ "source" : "/var/lib/docker/containers/11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620/hosts",
+ "writable" : false,
+ "destination" : "/etc/hosts",
+ "private" : true
+ }
+ ],
+ "namespaces" : [
+ "NEWNS",
+ "NEWUTS",
+ "NEWIPC",
+ "NEWPID",
+ "NEWNET"
+ ]
}
```
Using this configuration and the current directory holding the rootfs for a process, one can use libcontainer to exec the container. Running the life of the namespace, a `pid` file
-is written to the current directory with the pid of the namespaced process to the external world. A client can use this pid to wait, kill, or perform other operation with the container. If a user tries to run an new process inside an existing container with a live namespace the namespace will be joined by the new process.
+is written to the current directory with the pid of the namespaced process to the external world. A client can use this pid to wait, kill, or perform other operation with the container. If a user tries to run a new process inside an existing container with a live namespace, the namespace will be joined by the new process.
You may also specify an alternate root place where the `container.json` file is read and where the `pid` file will be saved.
@@ -76,7 +99,7 @@ You may also specify an alternate root place where the `container.json` file is
#### nsinit
`nsinit` is a cli application used as the reference implementation of libcontainer. It is able to
-spawn or join new containers giving the current directory. To use `nsinit` cd into a linux
+spawn or join new containers giving the current directory. To use `nsinit` cd into a Linux
rootfs and copy a `container.json` file into the directory with your specified configuration.
To execute `/bin/bash` in the current directory as a container just run:
diff --git a/pkg/libcontainer/TODO.md b/pkg/libcontainer/TODO.md
index f18c0b4c51..87224db85d 100644
--- a/pkg/libcontainer/TODO.md
+++ b/pkg/libcontainer/TODO.md
@@ -1,17 +1,11 @@
#### goals
* small and simple - line count is not everything but less code is better
-* clean lines between what we do in the pkg
* provide primitives for working with namespaces not cater to every option
* extend via configuration not by features - host networking, no networking, veth network can be accomplished via adjusting the container.json, nothing to do with code
#### tasks
-* proper tty for a new process in an existing container
-* use exec or raw syscalls for new process in existing container
-* setup proper user in namespace if specified
-* implement hook or clean interface for cgroups
+* reexec or raw syscalls for new process in existing container
* example configs for different setups (host networking, boot init)
* improve pkg documentation with comments
* testing - this is hard in a low level pkg but we could do some, maybe
-* pivot root
* selinux
-* apparmor
diff --git a/pkg/libcontainer/apparmor/setup.go b/pkg/libcontainer/apparmor/setup.go
index e07759cc64..548e72f550 100644
--- a/pkg/libcontainer/apparmor/setup.go
+++ b/pkg/libcontainer/apparmor/setup.go
@@ -2,21 +2,23 @@ package apparmor
import (
"fmt"
+ "io"
"io/ioutil"
"os"
"os/exec"
+ "path"
+)
+
+const (
+ DefaultProfilePath = "/etc/apparmor.d/docker"
)
-const DefaultProfilePath = "/etc/apparmor.d/docker"
const DefaultProfile = `
# AppArmor profile from lxc for containers.
-@{HOME}=@{HOMEDIRS}/*/ /root/
-@{HOMEDIRS}=/home/
-#@{HOMEDIRS}+=
-@{multiarch}=*-linux-gnu*
-@{PROC}=/proc/
+#include <tunables/global>
profile docker-default flags=(attach_disconnected,mediate_deleted) {
+ #include <abstractions/base>
network,
capability,
file,
@@ -75,14 +77,43 @@ profile docker-default flags=(attach_disconnected,mediate_deleted) {
}
`
-func InstallDefaultProfile() error {
+func InstallDefaultProfile(backupPath string) error {
if !IsEnabled() {
return nil
}
- // If the profile already exists, let it be.
+ // If the profile already exists, check if we already have a backup
+ // if not, do the backup and override it. (docker 0.10 upgrade changed the apparmor profile)
+ // see gh#5049, apparmor blocks signals in ubuntu 14.04
if _, err := os.Stat(DefaultProfilePath); err == nil {
- return nil
+ if _, err := os.Stat(backupPath); err == nil {
+ // If both the profile and the backup are present, do nothing
+ return nil
+ }
+ // Make sure the directory exists
+ if err := os.MkdirAll(path.Dir(backupPath), 0755); err != nil {
+ return err
+ }
+
+ // Create the backup file
+ f, err := os.Create(backupPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ src, err := os.Open(DefaultProfilePath)
+ if err != nil {
+ return err
+ }
+ defer src.Close()
+ if _, err := io.Copy(f, src); err != nil {
+ return err
+ }
+ }
+
+ // Make sure /etc/apparmor.d exists
+ if err := os.MkdirAll(path.Dir(DefaultProfilePath), 0755); err != nil {
+ return err
}
if err := ioutil.WriteFile(DefaultProfilePath, []byte(DefaultProfile), 0644); err != nil {
diff --git a/pkg/libcontainer/capabilities/capabilities.go b/pkg/libcontainer/capabilities/capabilities.go
index 3c6d752496..4b81e708c7 100644
--- a/pkg/libcontainer/capabilities/capabilities.go
+++ b/pkg/libcontainer/capabilities/capabilities.go
@@ -9,7 +9,7 @@ import (
// DropCapabilities drops capabilities for the current process based
// on the container's configuration.
func DropCapabilities(container *libcontainer.Container) error {
- if drop := getCapabilities(container); len(drop) > 0 {
+ if drop := getCapabilitiesMask(container); len(drop) > 0 {
c, err := capability.NewPid(os.Getpid())
if err != nil {
return err
@@ -23,11 +23,13 @@ func DropCapabilities(container *libcontainer.Container) error {
return nil
}
-// getCapabilities returns the specific cap values for the libcontainer types
-func getCapabilities(container *libcontainer.Container) []capability.Cap {
+// getCapabilitiesMask returns the specific cap mask values for the libcontainer types
+func getCapabilitiesMask(container *libcontainer.Container) []capability.Cap {
drop := []capability.Cap{}
- for _, c := range container.Capabilities {
- drop = append(drop, c.Value)
+ for _, c := range container.CapabilitiesMask {
+ if !c.Enabled {
+ drop = append(drop, c.Value)
+ }
}
return drop
}
diff --git a/pkg/libcontainer/container.go b/pkg/libcontainer/container.go
index a777da58a4..c7cac35428 100644
--- a/pkg/libcontainer/container.go
+++ b/pkg/libcontainer/container.go
@@ -11,18 +11,19 @@ type Context map[string]string
// Container defines configuration options for how a
// container is setup inside a directory and how a process should be executed
type Container struct {
- Hostname string `json:"hostname,omitempty"` // hostname
- ReadonlyFs bool `json:"readonly_fs,omitempty"` // set the containers rootfs as readonly
- NoPivotRoot bool `json:"no_pivot_root,omitempty"` // this can be enabled if you are running in ramdisk
- User string `json:"user,omitempty"` // user to execute the process as
- WorkingDir string `json:"working_dir,omitempty"` // current working directory
- Env []string `json:"environment,omitempty"` // environment to set
- Tty bool `json:"tty,omitempty"` // setup a proper tty or not
- Namespaces Namespaces `json:"namespaces,omitempty"` // namespaces to apply
- Capabilities Capabilities `json:"capabilities,omitempty"` // capabilities to drop
- Networks []*Network `json:"networks,omitempty"` // nil for host's network stack
- Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` // cgroups
- Context Context `json:"context,omitempty"` // generic context for specific options (apparmor, selinux)
+ Hostname string `json:"hostname,omitempty"` // hostname
+ ReadonlyFs bool `json:"readonly_fs,omitempty"` // set the containers rootfs as readonly
+ NoPivotRoot bool `json:"no_pivot_root,omitempty"` // this can be enabled if you are running in ramdisk
+ User string `json:"user,omitempty"` // user to execute the process as
+ WorkingDir string `json:"working_dir,omitempty"` // current working directory
+ Env []string `json:"environment,omitempty"` // environment to set
+ Tty bool `json:"tty,omitempty"` // setup a proper tty or not
+ Namespaces Namespaces `json:"namespaces,omitempty"` // namespaces to apply
+ CapabilitiesMask Capabilities `json:"capabilities_mask,omitempty"` // capabilities to drop
+ Networks []*Network `json:"networks,omitempty"` // nil for host's network stack
+ Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` // cgroups
+ Context Context `json:"context,omitempty"` // generic context for specific options (apparmor, selinux)
+ Mounts []Mount `json:"mounts,omitempty"`
}
// Network defines configuration for a container's networking stack
@@ -36,3 +37,12 @@ type Network struct {
Gateway string `json:"gateway,omitempty"`
Mtu int `json:"mtu,omitempty"`
}
+
+// Bind mounts from the host system to the container
+//
+type Mount struct {
+ Source string `json:"source"` // Source path, in the host namespace
+ Destination string `json:"destination"` // Destination path, in the container
+ Writable bool `json:"writable"`
+ Private bool `json:"private"`
+}
diff --git a/pkg/libcontainer/container.json b/pkg/libcontainer/container.json
index 83e407467c..f045315a41 100644
--- a/pkg/libcontainer/container.json
+++ b/pkg/libcontainer/container.json
@@ -14,7 +14,7 @@
"NEWUTS",
"NEWNET"
],
- "capabilities": [
+ "capabilities_mask": [
"SETPCAP",
"SYS_MODULE",
"SYS_RAWIO",
diff --git a/pkg/libcontainer/network/loopback.go b/pkg/libcontainer/network/loopback.go
new file mode 100644
index 0000000000..6215061dc2
--- /dev/null
+++ b/pkg/libcontainer/network/loopback.go
@@ -0,0 +1,24 @@
+package network
+
+import (
+ "fmt"
+ "github.com/dotcloud/docker/pkg/libcontainer"
+)
+
+// Loopback is a network strategy that provides a basic loopback device
+type Loopback struct {
+}
+
+func (l *Loopback) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error {
+ return nil
+}
+
+func (l *Loopback) Initialize(config *libcontainer.Network, context libcontainer.Context) error {
+ if err := SetMtu("lo", config.Mtu); err != nil {
+ return fmt.Errorf("set lo mtu to %d %s", config.Mtu, err)
+ }
+ if err := InterfaceUp("lo"); err != nil {
+ return fmt.Errorf("lo up %s", err)
+ }
+ return nil
+}
diff --git a/pkg/libcontainer/network/netns.go b/pkg/libcontainer/network/netns.go
new file mode 100644
index 0000000000..7e311f22d8
--- /dev/null
+++ b/pkg/libcontainer/network/netns.go
@@ -0,0 +1,34 @@
+package network
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+
+ "github.com/dotcloud/docker/pkg/libcontainer"
+ "github.com/dotcloud/docker/pkg/system"
+)
+
+// crosbymichael: could make a network strategy that instead of returning veth pair names it returns a pid to an existing network namespace
+type NetNS struct {
+}
+
+func (v *NetNS) Create(n *libcontainer.Network, nspid int, context libcontainer.Context) error {
+ context["nspath"] = n.Context["nspath"]
+ return nil
+}
+
+func (v *NetNS) Initialize(config *libcontainer.Network, context libcontainer.Context) error {
+ nspath, exists := context["nspath"]
+ if !exists {
+ return fmt.Errorf("nspath does not exist in network context")
+ }
+ f, err := os.OpenFile(nspath, os.O_RDONLY, 0)
+ if err != nil {
+ return fmt.Errorf("failed get network namespace fd: %v", err)
+ }
+ if err := system.Setns(f.Fd(), syscall.CLONE_NEWNET); err != nil {
+ return fmt.Errorf("failed to setns current network namespace: %v", err)
+ }
+ return nil
+}
diff --git a/pkg/libcontainer/network/strategy.go b/pkg/libcontainer/network/strategy.go
index 234fcc0aa2..e41ecc3ea6 100644
--- a/pkg/libcontainer/network/strategy.go
+++ b/pkg/libcontainer/network/strategy.go
@@ -2,6 +2,7 @@ package network
import (
"errors"
+
"github.com/dotcloud/docker/pkg/libcontainer"
)
@@ -10,7 +11,9 @@ var (
)
var strategies = map[string]NetworkStrategy{
- "veth": &Veth{},
+ "veth": &Veth{},
+ "loopback": &Loopback{},
+ "netns": &NetNS{},
}
// NetworkStrategy represents a specific network configuration for
diff --git a/pkg/libcontainer/network/veth.go b/pkg/libcontainer/network/veth.go
index 3ab1b2393b..3df0cd61ee 100644
--- a/pkg/libcontainer/network/veth.go
+++ b/pkg/libcontainer/network/veth.go
@@ -68,12 +68,6 @@ func (v *Veth) Initialize(config *libcontainer.Network, context libcontainer.Con
if err := InterfaceUp("eth0"); err != nil {
return fmt.Errorf("eth0 up %s", err)
}
- if err := SetMtu("lo", config.Mtu); err != nil {
- return fmt.Errorf("set lo mtu to %d %s", config.Mtu, err)
- }
- if err := InterfaceUp("lo"); err != nil {
- return fmt.Errorf("lo up %s", err)
- }
if config.Gateway != "" {
if err := SetDefaultGateway(config.Gateway); err != nil {
return fmt.Errorf("set gateway to %s %s", config.Gateway, err)
diff --git a/pkg/libcontainer/nsinit/command.go b/pkg/libcontainer/nsinit/command.go
index 5546065b6d..153a48ab59 100644
--- a/pkg/libcontainer/nsinit/command.go
+++ b/pkg/libcontainer/nsinit/command.go
@@ -39,7 +39,9 @@ func (c *DefaultCommandFactory) Create(container *libcontainer.Container, consol
// flags on clone, unshare, and setns
func GetNamespaceFlags(namespaces libcontainer.Namespaces) (flag int) {
for _, ns := range namespaces {
- flag |= ns.Value
+ if ns.Enabled {
+ flag |= ns.Value
+ }
}
return flag
}
diff --git a/pkg/libcontainer/nsinit/exec.go b/pkg/libcontainer/nsinit/exec.go
index 4963f126e9..c07c45de3c 100644
--- a/pkg/libcontainer/nsinit/exec.go
+++ b/pkg/libcontainer/nsinit/exec.go
@@ -3,12 +3,14 @@
package nsinit
import (
- "github.com/dotcloud/docker/pkg/libcontainer"
- "github.com/dotcloud/docker/pkg/libcontainer/network"
- "github.com/dotcloud/docker/pkg/system"
"os"
"os/exec"
"syscall"
+
+ "github.com/dotcloud/docker/pkg/cgroups"
+ "github.com/dotcloud/docker/pkg/libcontainer"
+ "github.com/dotcloud/docker/pkg/libcontainer/network"
+ "github.com/dotcloud/docker/pkg/system"
)
// Exec performes setup outside of a namespace so that a container can be
@@ -26,8 +28,10 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [
if err != nil {
return -1, err
}
+ ns.logger.Printf("created sync pipe parent fd %d child fd %d\n", syncPipe.parent.Fd(), syncPipe.child.Fd())
if container.Tty {
+ ns.logger.Println("creating master and console")
master, console, err = system.CreateMasterAndConsole()
if err != nil {
return -1, err
@@ -36,31 +40,50 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [
}
command := ns.commandFactory.Create(container, console, syncPipe.child, args)
+ ns.logger.Println("attach terminal to command")
if err := term.Attach(command); err != nil {
return -1, err
}
defer term.Close()
+ ns.logger.Println("starting command")
if err := command.Start(); err != nil {
return -1, err
}
- if err := ns.stateWriter.WritePid(command.Process.Pid); err != nil {
+
+ started, err := system.GetProcessStartTime(command.Process.Pid)
+ if err != nil {
+ return -1, err
+ }
+ ns.logger.Printf("writting pid %d to file\n", command.Process.Pid)
+ if err := ns.stateWriter.WritePid(command.Process.Pid, started); err != nil {
command.Process.Kill()
return -1, err
}
- defer ns.stateWriter.DeletePid()
+ defer func() {
+ ns.logger.Println("removing pid file")
+ ns.stateWriter.DeletePid()
+ }()
// Do this before syncing with child so that no children
// can escape the cgroup
- if err := ns.SetupCgroups(container, command.Process.Pid); err != nil {
+ ns.logger.Println("setting cgroups")
+ activeCgroup, err := ns.SetupCgroups(container, command.Process.Pid)
+ if err != nil {
command.Process.Kill()
return -1, err
}
+ if activeCgroup != nil {
+ defer activeCgroup.Cleanup()
+ }
+
+ ns.logger.Println("setting up network")
if err := ns.InitializeNetworking(container, command.Process.Pid, syncPipe); err != nil {
command.Process.Kill()
return -1, err
}
+ ns.logger.Println("closing sync pipe with child")
// Sync with child
syncPipe.Close()
@@ -69,16 +92,16 @@ func (ns *linuxNs) Exec(container *libcontainer.Container, term Terminal, args [
return -1, err
}
}
- return command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus(), nil
+ status := command.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+ ns.logger.Printf("process exited with status %d\n", status)
+ return status, err
}
-func (ns *linuxNs) SetupCgroups(container *libcontainer.Container, nspid int) error {
+func (ns *linuxNs) SetupCgroups(container *libcontainer.Container, nspid int) (cgroups.ActiveCgroup, error) {
if container.Cgroups != nil {
- if err := container.Cgroups.Apply(nspid); err != nil {
- return err
- }
+ return container.Cgroups.Apply(nspid)
}
- return nil
+ return nil, nil
}
func (ns *linuxNs) InitializeNetworking(container *libcontainer.Container, nspid int, pipe *SyncPipe) error {
diff --git a/pkg/libcontainer/nsinit/execin.go b/pkg/libcontainer/nsinit/execin.go
index 488fe0e248..9017af06e9 100644
--- a/pkg/libcontainer/nsinit/execin.go
+++ b/pkg/libcontainer/nsinit/execin.go
@@ -4,6 +4,7 @@ package nsinit
import (
"fmt"
+ "github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/system"
"os"
@@ -14,9 +15,12 @@ import (
// ExecIn uses an existing pid and joins the pid's namespaces with the new command.
func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []string) (int, error) {
- for _, ns := range container.Namespaces {
- if err := system.Unshare(ns.Value); err != nil {
- return -1, err
+ for _, nsv := range container.Namespaces {
+ // skip the PID namespace on unshare because it it not supported
+ if nsv.Key != "NEWPID" {
+ if err := system.Unshare(nsv.Value); err != nil {
+ return -1, err
+ }
}
}
fds, err := ns.getNsFds(nspid, container)
@@ -29,10 +33,15 @@ func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []s
closeFds()
return -1, err
}
-
+ processLabel, err := label.GetPidCon(nspid)
+ if err != nil {
+ closeFds()
+ return -1, err
+ }
// foreach namespace fd, use setns to join an existing container's namespaces
for _, fd := range fds {
if fd > 0 {
+ ns.logger.Printf("setns on %d\n", fd)
if err := system.Setns(fd, 0); err != nil {
closeFds()
return -1, fmt.Errorf("setns %s", err)
@@ -44,6 +53,7 @@ func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []s
// if the container has a new pid and mount namespace we need to
// remount proc and sys to pick up the changes
if container.Namespaces.Contains("NEWNS") && container.Namespaces.Contains("NEWPID") {
+ ns.logger.Println("forking to remount /proc and /sys")
pid, err := system.Fork()
if err != nil {
return -1, err
@@ -75,6 +85,10 @@ dropAndExec:
if err := finalizeNamespace(container); err != nil {
return -1, err
}
+ err = label.SetProcessLabel(processLabel)
+ if err != nil {
+ return -1, err
+ }
if err := system.Execv(args[0], args[0:], container.Env); err != nil {
return -1, err
}
diff --git a/pkg/libcontainer/nsinit/init.go b/pkg/libcontainer/nsinit/init.go
index 336fc1eaaf..b6c02eafd5 100644
--- a/pkg/libcontainer/nsinit/init.go
+++ b/pkg/libcontainer/nsinit/init.go
@@ -4,6 +4,11 @@ package nsinit
import (
"fmt"
+ "os"
+ "runtime"
+ "syscall"
+
+ "github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/apparmor"
"github.com/dotcloud/docker/pkg/libcontainer/capabilities"
@@ -11,8 +16,6 @@ import (
"github.com/dotcloud/docker/pkg/libcontainer/utils"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/pkg/user"
- "os"
- "syscall"
)
// Init is the init process that first runs inside a new namespace to setup mounts, users, networking,
@@ -24,14 +27,17 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol
}
// We always read this as it is a way to sync with the parent as well
+ ns.logger.Printf("reading from sync pipe fd %d\n", syncPipe.child.Fd())
context, err := syncPipe.ReadFromParent()
if err != nil {
syncPipe.Close()
return err
}
+ ns.logger.Println("received context from parent")
syncPipe.Close()
if console != "" {
+ ns.logger.Printf("setting up %s as console\n", console)
slave, err := system.OpenTerminal(console, syscall.O_RDWR)
if err != nil {
return fmt.Errorf("open terminal %s", err)
@@ -48,15 +54,15 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol
return fmt.Errorf("setctty %s", err)
}
}
- if err := system.ParentDeathSignal(); err != nil {
- return fmt.Errorf("parent death signal %s", err)
- }
- if err := setupNewMountNamespace(rootfs, console, container.ReadonlyFs, container.NoPivotRoot); err != nil {
- return fmt.Errorf("setup mount namespace %s", err)
- }
if err := setupNetwork(container, context); err != nil {
return fmt.Errorf("setup networking %s", err)
}
+
+ label.Init()
+ ns.logger.Println("setup mount namespace")
+ if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot, container.Context["mount_label"]); err != nil {
+ return fmt.Errorf("setup mount namespace %s", err)
+ }
if err := system.Sethostname(container.Hostname); err != nil {
return fmt.Errorf("sethostname %s", err)
}
@@ -64,9 +70,17 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol
return fmt.Errorf("finalize namespace %s", err)
}
- if err := apparmor.ApplyProfile(os.Getpid(), container.Context["apparmor_profile"]); err != nil {
- return err
+ if profile := container.Context["apparmor_profile"]; profile != "" {
+ ns.logger.Printf("setting apparmor profile %s\n", profile)
+ if err := apparmor.ApplyProfile(os.Getpid(), profile); err != nil {
+ return err
+ }
+ }
+ runtime.LockOSThread()
+ if err := label.SetProcessLabel(container.Context["process_label"]); err != nil {
+ return fmt.Errorf("SetProcessLabel label %s", err)
}
+ ns.logger.Printf("execing %s\n", args[0])
return system.Execv(args[0], args[0:], container.Env)
}
@@ -124,7 +138,11 @@ func setupNetwork(container *libcontainer.Container, context libcontainer.Contex
if err != nil {
return err
}
- return strategy.Initialize(config, context)
+
+ err1 := strategy.Initialize(config, context)
+ if err1 != nil {
+ return err1
+ }
}
return nil
}
diff --git a/pkg/libcontainer/nsinit/mount.go b/pkg/libcontainer/nsinit/mount.go
index 83577cfa8c..3b0cf13bc9 100644
--- a/pkg/libcontainer/nsinit/mount.go
+++ b/pkg/libcontainer/nsinit/mount.go
@@ -4,6 +4,8 @@ package nsinit
import (
"fmt"
+ "github.com/dotcloud/docker/pkg/label"
+ "github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/system"
"io/ioutil"
"os"
@@ -19,7 +21,7 @@ const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NOD
//
// There is no need to unmount the new mounts because as soon as the mount namespace
// is no longer in use, the mounts will be removed automatically
-func setupNewMountNamespace(rootfs, console string, readonly, noPivotRoot bool) error {
+func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, console string, readonly, noPivotRoot bool, mountLabel string) error {
flag := syscall.MS_PRIVATE
if noPivotRoot {
flag = syscall.MS_SLAVE
@@ -30,27 +32,39 @@ func setupNewMountNamespace(rootfs, console string, readonly, noPivotRoot bool)
if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil {
return fmt.Errorf("mouting %s as bind %s", rootfs, err)
}
- if readonly {
- if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, ""); err != nil {
- return fmt.Errorf("mounting %s as readonly %s", rootfs, err)
- }
- }
- if err := mountSystem(rootfs); err != nil {
+ if err := mountSystem(rootfs, mountLabel); err != nil {
return fmt.Errorf("mount system %s", err)
}
+
+ for _, m := range bindMounts {
+ var (
+ flags = syscall.MS_BIND | syscall.MS_REC
+ dest = filepath.Join(rootfs, m.Destination)
+ )
+ if !m.Writable {
+ flags = flags | syscall.MS_RDONLY
+ }
+ if err := system.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil {
+ return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err)
+ }
+ if !m.Writable {
+ if err := system.Mount(m.Source, dest, "bind", uintptr(flags|syscall.MS_REMOUNT), ""); err != nil {
+ return fmt.Errorf("remounting %s into %s %s", m.Source, dest, err)
+ }
+ }
+ if m.Private {
+ if err := system.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil {
+ return fmt.Errorf("mounting %s private %s", dest, err)
+ }
+ }
+ }
+
if err := copyDevNodes(rootfs); err != nil {
return fmt.Errorf("copy dev nodes %s", err)
}
- // In non-privileged mode, this fails. Discard the error.
- setupLoopbackDevices(rootfs)
- if err := setupDev(rootfs); err != nil {
+ if err := setupPtmx(rootfs, console, mountLabel); err != nil {
return err
}
- if console != "" {
- if err := setupPtmx(rootfs, console); err != nil {
- return err
- }
- }
if err := system.Chdir(rootfs); err != nil {
return fmt.Errorf("chdir into %s %s", rootfs, err)
}
@@ -65,6 +79,12 @@ func setupNewMountNamespace(rootfs, console string, readonly, noPivotRoot bool)
}
}
+ if readonly {
+ if err := system.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, ""); err != nil {
+ return fmt.Errorf("mounting %s as readonly %s", rootfs, err)
+ }
+ }
+
system.Umask(0022)
return nil
@@ -127,19 +147,6 @@ func copyDevNodes(rootfs string) error {
return nil
}
-func setupLoopbackDevices(rootfs string) error {
- for i := 0; ; i++ {
- if err := copyDevNode(rootfs, fmt.Sprintf("loop%d", i)); err != nil {
- if !os.IsNotExist(err) {
- return err
- }
- break
- }
-
- }
- return nil
-}
-
func copyDevNode(rootfs, node string) error {
stat, err := os.Stat(filepath.Join("/dev", node))
if err != nil {
@@ -155,32 +162,8 @@ func copyDevNode(rootfs, node string) error {
return nil
}
-// setupDev symlinks the current processes pipes into the
-// appropriate destination on the containers rootfs
-func setupDev(rootfs string) error {
- for _, link := range []struct {
- from string
- to string
- }{
- {"/proc/kcore", "/dev/core"},
- {"/proc/self/fd", "/dev/fd"},
- {"/proc/self/fd/0", "/dev/stdin"},
- {"/proc/self/fd/1", "/dev/stdout"},
- {"/proc/self/fd/2", "/dev/stderr"},
- } {
- dest := filepath.Join(rootfs, link.to)
- if err := os.Remove(dest); err != nil && !os.IsNotExist(err) {
- return fmt.Errorf("remove %s %s", dest, err)
- }
- if err := os.Symlink(link.from, dest); err != nil {
- return fmt.Errorf("symlink %s %s", dest, err)
- }
- }
- return nil
-}
-
// setupConsole ensures that the container has a proper /dev/console setup
-func setupConsole(rootfs, console string) error {
+func setupConsole(rootfs, console string, mountLabel string) error {
oldMask := system.Umask(0000)
defer system.Umask(oldMask)
@@ -204,6 +187,9 @@ func setupConsole(rootfs, console string) error {
if err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil {
return fmt.Errorf("mknod %s %s", dest, err)
}
+ if err := label.SetFileLabel(console, mountLabel); err != nil {
+ return fmt.Errorf("SetFileLabel Failed %s %s", dest, err)
+ }
if err := system.Mount(console, dest, "bind", syscall.MS_BIND, ""); err != nil {
return fmt.Errorf("bind %s to %s %s", console, dest, err)
}
@@ -212,7 +198,7 @@ func setupConsole(rootfs, console string) error {
// mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts
// inside the mount namespace
-func mountSystem(rootfs string) error {
+func mountSystem(rootfs string, mountLabel string) error {
for _, m := range []struct {
source string
path string
@@ -222,8 +208,8 @@ func mountSystem(rootfs string) error {
}{
{source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags},
{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags},
- {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: "mode=1777,size=65536k"},
- {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: "newinstance,ptmxmode=0666,mode=620,gid=5"},
+ {source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1755,size=65536k", mountLabel)},
+ {source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)},
} {
if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) {
return fmt.Errorf("mkdirall %s %s", m.path, err)
@@ -237,7 +223,7 @@ func mountSystem(rootfs string) error {
// setupPtmx adds a symlink to pts/ptmx for /dev/ptmx and
// finishes setting up /dev/console
-func setupPtmx(rootfs, console string) error {
+func setupPtmx(rootfs, console string, mountLabel string) error {
ptmx := filepath.Join(rootfs, "dev/ptmx")
if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {
return err
@@ -245,8 +231,10 @@ func setupPtmx(rootfs, console string) error {
if err := os.Symlink("pts/ptmx", ptmx); err != nil {
return fmt.Errorf("symlink dev ptmx %s", err)
}
- if err := setupConsole(rootfs, console); err != nil {
- return err
+ if console != "" {
+ if err := setupConsole(rootfs, console, mountLabel); err != nil {
+ return err
+ }
}
return nil
}
diff --git a/pkg/libcontainer/nsinit/nsinit.go b/pkg/libcontainer/nsinit/nsinit.go
index f09a130aa2..c308692af6 100644
--- a/pkg/libcontainer/nsinit/nsinit.go
+++ b/pkg/libcontainer/nsinit/nsinit.go
@@ -2,6 +2,7 @@ package nsinit
import (
"github.com/dotcloud/docker/pkg/libcontainer"
+ "log"
)
// NsInit is an interface with the public facing methods to provide high level
@@ -16,11 +17,13 @@ type linuxNs struct {
root string
commandFactory CommandFactory
stateWriter StateWriter
+ logger *log.Logger
}
-func NewNsInit(command CommandFactory, state StateWriter) NsInit {
+func NewNsInit(command CommandFactory, state StateWriter, logger *log.Logger) NsInit {
return &linuxNs{
commandFactory: command,
stateWriter: state,
+ logger: logger,
}
}
diff --git a/pkg/libcontainer/nsinit/nsinit/main.go b/pkg/libcontainer/nsinit/nsinit/main.go
index 61921c59a3..37aa784981 100644
--- a/pkg/libcontainer/nsinit/nsinit/main.go
+++ b/pkg/libcontainer/nsinit/nsinit/main.go
@@ -3,24 +3,27 @@ package main
import (
"encoding/json"
"flag"
- "github.com/dotcloud/docker/pkg/libcontainer"
- "github.com/dotcloud/docker/pkg/libcontainer/nsinit"
+ "io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
+
+ "github.com/dotcloud/docker/pkg/libcontainer"
+ "github.com/dotcloud/docker/pkg/libcontainer/nsinit"
)
var (
- root, console string
- pipeFd int
+ root, console, logs string
+ pipeFd int
)
func registerFlags() {
flag.StringVar(&console, "console", "", "console (pty slave) path")
flag.IntVar(&pipeFd, "pipe", 0, "sync pipe fd")
flag.StringVar(&root, "root", ".", "root for storing configuration data")
+ flag.StringVar(&logs, "log", "none", "set stderr or a filepath to enable logging")
flag.Parse()
}
@@ -33,20 +36,25 @@ func main() {
}
container, err := loadContainer()
if err != nil {
- log.Fatal(err)
+ log.Fatalf("Unable to load container: %s", err)
}
- ns, err := newNsInit()
+ l, err := getLogger("[exec] ")
if err != nil {
log.Fatal(err)
}
+ ns, err := newNsInit(l)
+ if err != nil {
+ log.Fatalf("Unable to initialize nsinit: %s", err)
+ }
+
switch flag.Arg(0) {
case "exec": // this is executed outside of the namespace in the cwd
var exitCode int
nspid, err := readPid()
if err != nil {
if !os.IsNotExist(err) {
- log.Fatal(err)
+ l.Fatalf("Unable to read pid: %s", err)
}
}
if nspid > 0 {
@@ -56,26 +64,26 @@ func main() {
exitCode, err = ns.Exec(container, term, flag.Args()[1:])
}
if err != nil {
- log.Fatal(err)
+ l.Fatalf("Failed to exec: %s", err)
}
os.Exit(exitCode)
case "init": // this is executed inside of the namespace to setup the container
cwd, err := os.Getwd()
if err != nil {
- log.Fatal(err)
+ l.Fatal(err)
}
if flag.NArg() < 2 {
- log.Fatalf("wrong number of argments %d", flag.NArg())
+ l.Fatalf("wrong number of argments %d", flag.NArg())
}
syncPipe, err := nsinit.NewSyncPipeFromFd(0, uintptr(pipeFd))
if err != nil {
- log.Fatal(err)
+ l.Fatalf("Unable to create sync pipe: %s", err)
}
if err := ns.Init(container, cwd, console, syncPipe, flag.Args()[1:]); err != nil {
- log.Fatal(err)
+ l.Fatalf("Unable to initialize for container: %s", err)
}
default:
- log.Fatalf("command not supported for nsinit %s", flag.Arg(0))
+ l.Fatalf("command not supported for nsinit %s", flag.Arg(0))
}
}
@@ -105,6 +113,23 @@ func readPid() (int, error) {
return pid, nil
}
-func newNsInit() (nsinit.NsInit, error) {
- return nsinit.NewNsInit(&nsinit.DefaultCommandFactory{root}, &nsinit.DefaultStateWriter{root}), nil
+func newNsInit(l *log.Logger) (nsinit.NsInit, error) {
+ return nsinit.NewNsInit(&nsinit.DefaultCommandFactory{root}, &nsinit.DefaultStateWriter{root}, l), nil
+}
+
+func getLogger(prefix string) (*log.Logger, error) {
+ var w io.Writer
+ switch logs {
+ case "", "none":
+ w = ioutil.Discard
+ case "stderr":
+ w = os.Stderr
+ default: // we have a filepath
+ f, err := os.OpenFile(logs, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0755)
+ if err != nil {
+ return nil, err
+ }
+ w = f
+ }
+ return log.New(w, prefix, log.LstdFlags), nil
}
diff --git a/pkg/libcontainer/nsinit/state.go b/pkg/libcontainer/nsinit/state.go
index af38008c03..26d7fa4230 100644
--- a/pkg/libcontainer/nsinit/state.go
+++ b/pkg/libcontainer/nsinit/state.go
@@ -10,7 +10,7 @@ import (
// StateWriter handles writing and deleting the pid file
// on disk
type StateWriter interface {
- WritePid(pid int) error
+ WritePid(pid int, startTime string) error
DeletePid() error
}
@@ -19,10 +19,18 @@ type DefaultStateWriter struct {
}
// writePidFile writes the namespaced processes pid to pid in the rootfs for the container
-func (d *DefaultStateWriter) WritePid(pid int) error {
- return ioutil.WriteFile(filepath.Join(d.Root, "pid"), []byte(fmt.Sprint(pid)), 0655)
+func (d *DefaultStateWriter) WritePid(pid int, startTime string) error {
+ err := ioutil.WriteFile(filepath.Join(d.Root, "pid"), []byte(fmt.Sprint(pid)), 0655)
+ if err != nil {
+ return err
+ }
+ return ioutil.WriteFile(filepath.Join(d.Root, "start"), []byte(startTime), 0655)
}
func (d *DefaultStateWriter) DeletePid() error {
- return os.Remove(filepath.Join(d.Root, "pid"))
+ err := os.Remove(filepath.Join(d.Root, "pid"))
+ if serr := os.Remove(filepath.Join(d.Root, "start")); err == nil {
+ err = serr
+ }
+ return err
}
diff --git a/pkg/libcontainer/types.go b/pkg/libcontainer/types.go
index 94fe876554..ffeb55a022 100644
--- a/pkg/libcontainer/types.go
+++ b/pkg/libcontainer/types.go
@@ -1,7 +1,6 @@
package libcontainer
import (
- "encoding/json"
"errors"
"github.com/syndtr/gocapability/capability"
)
@@ -19,29 +18,30 @@ var (
namespaceList = Namespaces{}
capabilityList = Capabilities{
- {Key: "SETPCAP", Value: capability.CAP_SETPCAP},
- {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE},
- {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO},
- {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT},
- {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN},
- {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE},
- {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE},
- {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME},
- {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG},
- {Key: "MKNOD", Value: capability.CAP_MKNOD},
- {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE},
- {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL},
- {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE},
- {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN},
- {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN},
+ {Key: "SETPCAP", Value: capability.CAP_SETPCAP, Enabled: false},
+ {Key: "SYS_MODULE", Value: capability.CAP_SYS_MODULE, Enabled: false},
+ {Key: "SYS_RAWIO", Value: capability.CAP_SYS_RAWIO, Enabled: false},
+ {Key: "SYS_PACCT", Value: capability.CAP_SYS_PACCT, Enabled: false},
+ {Key: "SYS_ADMIN", Value: capability.CAP_SYS_ADMIN, Enabled: false},
+ {Key: "SYS_NICE", Value: capability.CAP_SYS_NICE, Enabled: false},
+ {Key: "SYS_RESOURCE", Value: capability.CAP_SYS_RESOURCE, Enabled: false},
+ {Key: "SYS_TIME", Value: capability.CAP_SYS_TIME, Enabled: false},
+ {Key: "SYS_TTY_CONFIG", Value: capability.CAP_SYS_TTY_CONFIG, Enabled: false},
+ {Key: "MKNOD", Value: capability.CAP_MKNOD, Enabled: false},
+ {Key: "AUDIT_WRITE", Value: capability.CAP_AUDIT_WRITE, Enabled: false},
+ {Key: "AUDIT_CONTROL", Value: capability.CAP_AUDIT_CONTROL, Enabled: false},
+ {Key: "MAC_OVERRIDE", Value: capability.CAP_MAC_OVERRIDE, Enabled: false},
+ {Key: "MAC_ADMIN", Value: capability.CAP_MAC_ADMIN, Enabled: false},
+ {Key: "NET_ADMIN", Value: capability.CAP_NET_ADMIN, Enabled: false},
}
)
type (
Namespace struct {
- Key string
- Value int
- File string
+ Key string `json:"key,omitempty"`
+ Enabled bool `json:"enabled,omitempty"`
+ Value int `json:"value,omitempty"`
+ File string `json:"file,omitempty"`
}
Namespaces []*Namespace
)
@@ -50,27 +50,11 @@ func (ns *Namespace) String() string {
return ns.Key
}
-func (ns *Namespace) MarshalJSON() ([]byte, error) {
- return json.Marshal(ns.Key)
-}
-
-func (ns *Namespace) UnmarshalJSON(src []byte) error {
- var nsName string
- if err := json.Unmarshal(src, &nsName); err != nil {
- return err
- }
- ret := GetNamespace(nsName)
- if ret == nil {
- return ErrUnkownNamespace
- }
- *ns = *ret
- return nil
-}
-
func GetNamespace(key string) *Namespace {
for _, ns := range namespaceList {
if ns.Key == key {
- return ns
+ cpy := *ns
+ return &cpy
}
}
return nil
@@ -79,18 +63,23 @@ func GetNamespace(key string) *Namespace {
// Contains returns true if the specified Namespace is
// in the slice
func (n Namespaces) Contains(ns string) bool {
+ return n.Get(ns) != nil
+}
+
+func (n Namespaces) Get(ns string) *Namespace {
for _, nsp := range n {
if nsp.Key == ns {
- return true
+ return nsp
}
}
- return false
+ return nil
}
type (
Capability struct {
- Key string
- Value capability.Cap
+ Key string `json:"key,omitempty"`
+ Enabled bool `json:"enabled"`
+ Value capability.Cap `json:"value,omitempty"`
}
Capabilities []*Capability
)
@@ -99,27 +88,11 @@ func (c *Capability) String() string {
return c.Key
}
-func (c *Capability) MarshalJSON() ([]byte, error) {
- return json.Marshal(c.Key)
-}
-
-func (c *Capability) UnmarshalJSON(src []byte) error {
- var capName string
- if err := json.Unmarshal(src, &capName); err != nil {
- return err
- }
- ret := GetCapability(capName)
- if ret == nil {
- return ErrUnkownCapability
- }
- *c = *ret
- return nil
-}
-
func GetCapability(key string) *Capability {
for _, capp := range capabilityList {
if capp.Key == key {
- return capp
+ cpy := *capp
+ return &cpy
}
}
return nil
@@ -128,10 +101,14 @@ func GetCapability(key string) *Capability {
// Contains returns true if the specified Capability is
// in the slice
func (c Capabilities) Contains(capp string) bool {
+ return c.Get(capp) != nil
+}
+
+func (c Capabilities) Get(capp string) *Capability {
for _, cap := range c {
if cap.Key == capp {
- return true
+ return cap
}
}
- return false
+ return nil
}
diff --git a/pkg/libcontainer/types_linux.go b/pkg/libcontainer/types_linux.go
index c14531df20..1f937e0c97 100644
--- a/pkg/libcontainer/types_linux.go
+++ b/pkg/libcontainer/types_linux.go
@@ -6,11 +6,11 @@ import (
func init() {
namespaceList = Namespaces{
- {Key: "NEWNS", Value: syscall.CLONE_NEWNS, File: "mnt"},
- {Key: "NEWUTS", Value: syscall.CLONE_NEWUTS, File: "uts"},
- {Key: "NEWIPC", Value: syscall.CLONE_NEWIPC, File: "ipc"},
- {Key: "NEWUSER", Value: syscall.CLONE_NEWUSER, File: "user"},
- {Key: "NEWPID", Value: syscall.CLONE_NEWPID, File: "pid"},
- {Key: "NEWNET", Value: syscall.CLONE_NEWNET, File: "net"},
+ {Key: "NEWNS", Value: syscall.CLONE_NEWNS, File: "mnt", Enabled: true},
+ {Key: "NEWUTS", Value: syscall.CLONE_NEWUTS, File: "uts", Enabled: true},
+ {Key: "NEWIPC", Value: syscall.CLONE_NEWIPC, File: "ipc", Enabled: true},
+ {Key: "NEWUSER", Value: syscall.CLONE_NEWUSER, File: "user", Enabled: true},
+ {Key: "NEWPID", Value: syscall.CLONE_NEWPID, File: "pid", Enabled: true},
+ {Key: "NEWNET", Value: syscall.CLONE_NEWNET, File: "net", Enabled: true},
}
}
diff --git a/pkg/libcontainer/types_test.go b/pkg/libcontainer/types_test.go
index 52b85a4db9..9735937b76 100644
--- a/pkg/libcontainer/types_test.go
+++ b/pkg/libcontainer/types_test.go
@@ -30,6 +30,6 @@ func TestCapabilitiesContains(t *testing.T) {
t.Fatal("capabilities should not contain SYS_ADMIN")
}
if !caps.Contains("MKNOD") {
- t.Fatal("capabilities should container MKNOD but does not")
+ t.Fatal("capabilities should contain MKNOD but does not")
}
}
diff --git a/pkg/listenbuffer/buffer.go b/pkg/listenbuffer/buffer.go
index c350805a7d..17572c8a0e 100644
--- a/pkg/listenbuffer/buffer.go
+++ b/pkg/listenbuffer/buffer.go
@@ -5,15 +5,10 @@
*/
package listenbuffer
-import (
- "fmt"
- "net"
- "time"
-)
+import "net"
-// NewListenBuffer returns a listener listening on addr with the protocol. It sets the
-// timeout to wait on first connection before an error is returned
-func NewListenBuffer(proto, addr string, activate chan struct{}, timeout time.Duration) (net.Listener, error) {
+// NewListenBuffer returns a listener listening on addr with the protocol.
+func NewListenBuffer(proto, addr string, activate chan struct{}) (net.Listener, error) {
wrapped, err := net.Listen(proto, addr)
if err != nil {
return nil, err
@@ -22,7 +17,6 @@ func NewListenBuffer(proto, addr string, activate chan struct{}, timeout time.Du
return &defaultListener{
wrapped: wrapped,
activate: activate,
- timeout: timeout,
}, nil
}
@@ -30,7 +24,6 @@ type defaultListener struct {
wrapped net.Listener // the real listener to wrap
ready bool // is the listner ready to start accpeting connections
activate chan struct{}
- timeout time.Duration // how long to wait before we consider this an error
}
func (l *defaultListener) Close() error {
@@ -47,15 +40,7 @@ func (l *defaultListener) Accept() (net.Conn, error) {
if l.ready {
return l.wrapped.Accept()
}
-
- select {
- case <-time.After(l.timeout):
- // close the connection so any clients are disconnected
- l.Close()
- return nil, fmt.Errorf("timeout (%s) reached waiting for listener to become ready", l.timeout.String())
- case <-l.activate:
- l.ready = true
- return l.Accept()
- }
- panic("unreachable")
+ <-l.activate
+ l.ready = true
+ return l.Accept()
}
diff --git a/pkg/mflag/MAINTAINERS b/pkg/mflag/MAINTAINERS
new file mode 100644
index 0000000000..ceeb0cfd18
--- /dev/null
+++ b/pkg/mflag/MAINTAINERS
@@ -0,0 +1 @@
+Victor Vieux <victor.vieux@docker.com> (@vieux)
diff --git a/pkg/mflag/example/example.go b/pkg/mflag/example/example.go
index ed940e8d70..ce9dd30e4c 100644
--- a/pkg/mflag/example/example.go
+++ b/pkg/mflag/example/example.go
@@ -13,7 +13,8 @@ var (
func init() {
flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp")
- flag.BoolVar(&b, []string{"b"}, false, "a simple bool")
+ flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool")
+ flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool")
flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool")
flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer")
flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage
diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go
index 7125c030ed..ed6fad3b46 100644
--- a/pkg/mflag/flag.go
+++ b/pkg/mflag/flag.go
@@ -10,7 +10,7 @@
Define flags using flag.String(), Bool(), Int(), etc.
This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int.
- import "flag"
+ import "flag /github.com/dotcloud/docker/pkg/mflag"
var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname")
If you like, you can bind the flag to a variable using the Var() functions.
var flagvar int
@@ -23,6 +23,18 @@
flag.Var(&flagVal, []string{"name"}, "help message for flagname")
For such flags, the default value is just the initial value of the variable.
+ You can also add "deprecated" flags, they are still usable, bur are not shown
+ in the usage and will display a warning when you try to use them:
+ var ip = flag.Int([]string{"f", "#flagname", "-flagname"}, 1234, "help message for flagname")
+ this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` and
+ var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname")
+ will display: `Warning: '-t' is deprecated, it will be removed soon. See usage.`
+
+ You can also group one letter flags, bif you declare
+ var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose")
+ var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow")
+ you will be able to use the -vs or -sv
+
After all flags are defined, call
flag.Parse()
to parse the command line into the defined flags.
@@ -286,9 +298,24 @@ type Flag struct {
DefValue string // default value (as text); for usage message
}
+type flagSlice []string
+
+func (p flagSlice) Len() int { return len(p) }
+func (p flagSlice) Less(i, j int) bool {
+ pi, pj := strings.ToLower(p[i]), strings.ToLower(p[j])
+ if pi[0] == '-' {
+ pi = pi[1:]
+ }
+ if pj[0] == '-' {
+ pj = pj[1:]
+ }
+ return pi < pj
+}
+func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
// sortFlags returns the flags as a slice in lexicographical sorted order.
func sortFlags(flags map[string]*Flag) []*Flag {
- var list sort.StringSlice
+ var list flagSlice
for _, f := range flags {
fName := strings.TrimPrefix(f.Names[0], "#")
if len(f.Names) == 1 {
@@ -307,7 +334,7 @@ func sortFlags(flags map[string]*Flag) []*Flag {
list = append(list, fName)
}
}
- list.Sort()
+ sort.Sort(list)
result := make([]*Flag, len(list))
for i, name := range list {
result[i] = flags[name]
@@ -805,9 +832,20 @@ func (f *FlagSet) parseOne() (bool, string, error) {
f.actual = make(map[string]*Flag)
}
f.actual[name] = flag
- for _, n := range flag.Names {
+ for i, n := range flag.Names {
if n == fmt.Sprintf("#%s", name) {
- fmt.Fprintf(f.out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name)
+ replacement := ""
+ for j := i; j < len(flag.Names); j++ {
+ if flag.Names[j][0] != '#' {
+ replacement = flag.Names[j]
+ break
+ }
+ }
+ if replacement != "" {
+ fmt.Fprintf(f.out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement)
+ } else {
+ fmt.Fprintf(f.out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name)
+ }
}
}
return true, "", nil
diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go
index dfece5d611..07fadf8171 100644
--- a/pkg/namesgenerator/names-generator.go
+++ b/pkg/namesgenerator/names-generator.go
@@ -15,33 +15,60 @@ var (
// Docker 0.7.x generates names from notable scientists and hackers.
//
// Ada Lovelace invented the first algorithm. http://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull)
+ // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. http://en.wikipedia.org/wiki/Ada_Yonath
+ // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. http://en.wikipedia.org/wiki/Adele_Goldstine
// Alan Turing was a founding father of computer science. http://en.wikipedia.org/wiki/Alan_Turing.
// Albert Einstein invented the general theory of relativity. http://en.wikipedia.org/wiki/Albert_Einstein
// Ambroise Pare invented modern surgery. http://en.wikipedia.org/wiki/Ambroise_Par%C3%A9
// Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. http://en.wikipedia.org/wiki/Archimedes
+ // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. http://en.wikipedia.org/wiki/Barbara_McClintock
// Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod.
// Charles Babbage invented the concept of a programmable computer. http://en.wikipedia.org/wiki/Charles_Babbage.
// Charles Darwin established the principles of natural evolution. http://en.wikipedia.org/wiki/Charles_Darwin.
// Dennis Ritchie and Ken Thompson created UNIX and the C programming language. http://en.wikipedia.org/wiki/Dennis_Ritchie http://en.wikipedia.org/wiki/Ken_Thompson
// Douglas Engelbart gave the mother of all demos: http://en.wikipedia.org/wiki/Douglas_Engelbart
+ // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - http://en.wikipedia.org/wiki/Elizabeth_Blackwell
// Emmett Brown invented time travel. http://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff)
// Enrico Fermi invented the first nuclear reactor. http://en.wikipedia.org/wiki/Enrico_Fermi.
+ // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephon switching method. http://en.wikipedia.org/wiki/Erna_Schneider_Hoover
// Euclid invented geometry. http://en.wikipedia.org/wiki/Euclid
+ // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. http://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi
// Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. http://en.wikipedia.org/wiki/Galileo_Galilei
+ // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - http://en.wikipedia.org/wiki/Gertrude_Elion
+ // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. http://en.wikipedia.org/wiki/Grace_Hopper
// Henry Poincare made fundamental contributions in several fields of mathematics. http://en.wikipedia.org/wiki/Henri_Poincar%C3%A9
+ // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - http://en.wikipedia.org/wiki/Hypatia
// Isaac Newton invented classic mechanics and modern optics. http://en.wikipedia.org/wiki/Isaac_Newton
+ // Jane Colden - American botanist widely considered the first female American botanist - http://en.wikipedia.org/wiki/Jane_Colden
+ // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - http://en.wikipedia.org/wiki/Jane_Goodall
+ // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. http://en.wikipedia.org/wiki/Jean_Bartik
+ // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. http://en.wikipedia.org/wiki/Jean_E._Sammet
+ // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - http://en.wikipedia.org/wiki/Johanna_Mestorf
// John McCarthy invented LISP: http://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist)
+ // June Almeida - Scottish virologist who took the first pictures of the rubella virus - http://en.wikipedia.org/wiki/June_Almeida
+ // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. http://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones
// Leonardo Da Vinci invented too many things to list here. http://en.wikipedia.org/wiki/Leonardo_da_Vinci.
// Linus Torvalds invented Linux and Git. http://en.wikipedia.org/wiki/Linus_Torvalds
+ // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - http://en.wikipedia.org/wiki/Lise_Meitner
// Louis Pasteur discovered vaccination, fermentation and pasteurization. http://en.wikipedia.org/wiki/Louis_Pasteur.
// Malcolm McLean invented the modern shipping container: http://en.wikipedia.org/wiki/Malcom_McLean
+ // Maria Ardinghelli - Italian translator, mathematician and physicist - http://en.wikipedia.org/wiki/Maria_Ardinghelli
+ // Maria Kirch - German astronomer and first woman to discover a comet - http://en.wikipedia.org/wiki/Maria_Margarethe_Kirch
+ // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - http://en.wikipedia.org/wiki/Maria_Mayer
// Marie Curie discovered radioactivity. http://en.wikipedia.org/wiki/Marie_Curie.
+ // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - http://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande
+ // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - http://en.wikipedia.org/wiki/Mary_Leakey
// Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. http://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB
// Niels Bohr is the father of quantum theory. http://en.wikipedia.org/wiki/Niels_Bohr.
// Nikola Tesla invented the AC electric system and every gaget ever used by a James Bond villain. http://en.wikipedia.org/wiki/Nikola_Tesla
// Pierre de Fermat pioneered several aspects of modern mathematics. http://en.wikipedia.org/wiki/Pierre_de_Fermat
+ // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. http://en.wikipedia.org/wiki/Rachel_Carson
+ // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). http://en.wikipedia.org/wiki/Radia_Perlman
// Richard Feynman was a key contributor to quantum mechanics and particle physics. http://en.wikipedia.org/wiki/Richard_Feynman
// Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. http://en.wikipedia.org/wiki/Rob_Pike
+ // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - http://en.wikipedia.org/wiki/Rosalind_Franklin
+ // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - http://en.wikipedia.org/wiki/Sofia_Kovalevskaya
+ // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. http://en.wikipedia.org/wiki/Sophie_Wilson
// Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. http://en.wikipedia.org/wiki/Stephen_Hawking
// Steve Wozniak invented the Apple I and Apple II. http://en.wikipedia.org/wiki/Steve_Wozniak
// Werner Heisenberg was a founding father of quantum mechanics. http://en.wikipedia.org/wiki/Werner_Heisenberg
@@ -49,14 +76,14 @@ var (
// http://en.wikipedia.org/wiki/John_Bardeen
// http://en.wikipedia.org/wiki/Walter_Houser_Brattain
// http://en.wikipedia.org/wiki/William_Shockley
- right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclid", "newton", "fermat", "archimedes", "poincare", "heisenberg", "feynman", "hawking", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley"}
+ right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclid", "newton", "fermat", "archimedes", "poincare", "heisenberg", "feynman", "hawking", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley", "goldstine", "hoover", "hopper", "bartik", "sammet", "jones", "perlman", "wilson", "kowalevski", "hypatia", "goodall", "mayer", "elion", "blackwell", "lalande", "kirch", "ardinghelli", "colden", "almeida", "leakey", "meitner", "mestorf", "rosalind", "sinoussi", "carson", "mcclintock", "yonath"}
)
func GenerateRandomName(checker NameChecker) (string, error) {
retry := 5
rand.Seed(time.Now().UnixNano())
name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))])
- for checker != nil && checker.Exists(name) && retry > 0 {
+ for checker != nil && checker.Exists(name) && retry > 0 || name == "boring_wozniak" /* Steve Wozniak is not boring */ {
name = fmt.Sprintf("%s%d", name, rand.Intn(10))
retry = retry - 1
}
diff --git a/pkg/netlink/MAINTAINERS b/pkg/netlink/MAINTAINERS
index e53d933d47..1cb551364d 100644
--- a/pkg/netlink/MAINTAINERS
+++ b/pkg/netlink/MAINTAINERS
@@ -1,2 +1,2 @@
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
-Guillaume Charmes <guillaume@dotcloud.com> (@creack)
+Guillaume J. Charmes <guillaume@docker.com> (@creack)
diff --git a/pkg/netlink/netlink_linux.go b/pkg/netlink/netlink_linux.go
index f8bb6bac3c..f4aa92ed34 100644
--- a/pkg/netlink/netlink_linux.go
+++ b/pkg/netlink/netlink_linux.go
@@ -5,6 +5,7 @@ package netlink
import (
"encoding/binary"
"fmt"
+ "math/rand"
"net"
"syscall"
"unsafe"
@@ -17,10 +18,16 @@ const (
IFLA_INFO_DATA = 2
VETH_INFO_PEER = 1
IFLA_NET_NS_FD = 28
+ SIOC_BRADDBR = 0x89a0
)
var nextSeqNr int
+type ifreqHwaddr struct {
+ IfrnName [16]byte
+ IfruHwaddr syscall.RawSockaddr
+}
+
func nativeEndian() binary.ByteOrder {
var x uint32 = 0x01020304
if *(*byte)(unsafe.Pointer(&x)) == 0x01 {
@@ -808,3 +815,47 @@ func NetworkCreateVethPair(name1, name2 string) error {
}
return s.HandleAck(wb.Seq)
}
+
+// Create the actual bridge device. This is more backward-compatible than
+// netlink.NetworkLinkAdd and works on RHEL 6.
+func CreateBridge(name string, setMacAddr bool) error {
+ s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
+ if err != nil {
+ // ipv6 issue, creating with ipv4
+ s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
+ if err != nil {
+ return err
+ }
+ }
+ defer syscall.Close(s)
+
+ nameBytePtr, err := syscall.BytePtrFromString(name)
+ if err != nil {
+ return err
+ }
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), SIOC_BRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 {
+ return err
+ }
+ if setMacAddr {
+ return setBridgeMacAddress(s, name)
+ }
+ return nil
+}
+
+func setBridgeMacAddress(s int, name string) error {
+ ifr := ifreqHwaddr{}
+ ifr.IfruHwaddr.Family = syscall.ARPHRD_ETHER
+ copy(ifr.IfrnName[:], name)
+
+ for i := 0; i < 6; i++ {
+ ifr.IfruHwaddr.Data[i] = int8(rand.Intn(255))
+ }
+
+ ifr.IfruHwaddr.Data[0] &^= 0x1 // clear multicast bit
+ ifr.IfruHwaddr.Data[0] |= 0x2 // set local assignment bit (IEEE802)
+
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), syscall.SIOCSIFHWADDR, uintptr(unsafe.Pointer(&ifr))); err != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/pkg/netlink/netlink_unsupported.go b/pkg/netlink/netlink_unsupported.go
index bd9e962d35..00a3b3fae8 100644
--- a/pkg/netlink/netlink_unsupported.go
+++ b/pkg/netlink/netlink_unsupported.go
@@ -59,3 +59,7 @@ func NetworkSetMaster(iface, master *net.Interface) error {
func NetworkLinkDown(iface *net.Interface) error {
return ErrNotImplemented
}
+
+func CreateBridge(name string, setMacAddr bool) error {
+ return ErrNotImplemented
+}
diff --git a/pkg/opts/opts_test.go b/pkg/opts/opts_test.go
deleted file mode 100644
index a5c1fac9ca..0000000000
--- a/pkg/opts/opts_test.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package opts
-
-import (
- "testing"
-)
-
-func TestValidateIP4(t *testing.T) {
- if ret, err := ValidateIp4Address(`1.2.3.4`); err != nil || ret == "" {
- t.Fatalf("ValidateIp4Address(`1.2.3.4`) got %s %s", ret, err)
- }
-
- if ret, err := ValidateIp4Address(`127.0.0.1`); err != nil || ret == "" {
- t.Fatalf("ValidateIp4Address(`127.0.0.1`) got %s %s", ret, err)
- }
-
- if ret, err := ValidateIp4Address(`127`); err == nil || ret != "" {
- t.Fatalf("ValidateIp4Address(`127`) got %s %s", ret, err)
- }
-
- if ret, err := ValidateIp4Address(`random invalid string`); err == nil || ret != "" {
- t.Fatalf("ValidateIp4Address(`random invalid string`) got %s %s", ret, err)
- }
-
-}
diff --git a/pkg/selinux/selinux.go b/pkg/selinux/selinux.go
new file mode 100644
index 0000000000..edabc4f7dd
--- /dev/null
+++ b/pkg/selinux/selinux.go
@@ -0,0 +1,378 @@
+package selinux
+
+import (
+ "bufio"
+ "crypto/rand"
+ "encoding/binary"
+ "fmt"
+ "github.com/dotcloud/docker/pkg/mount"
+ "github.com/dotcloud/docker/pkg/system"
+ "io"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+const (
+ Enforcing = 1
+ Permissive = 0
+ Disabled = -1
+ selinuxDir = "/etc/selinux/"
+ selinuxConfig = selinuxDir + "config"
+ selinuxTypeTag = "SELINUXTYPE"
+ selinuxTag = "SELINUX"
+ selinuxPath = "/sys/fs/selinux"
+ xattrNameSelinux = "security.selinux"
+ stRdOnly = 0x01
+)
+
+var (
+ assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
+ spaceRegex = regexp.MustCompile(`^([^=]+) (.*)$`)
+ mcsList = make(map[string]bool)
+ selinuxfs = "unknown"
+ selinuxEnabled = false
+ selinuxEnabledChecked = false
+)
+
+type SELinuxContext map[string]string
+
+// SetDisabled disables selinux support for the package
+func SetDisabled() {
+ selinuxEnabled, selinuxEnabledChecked = false, true
+}
+
+func getSelinuxMountPoint() string {
+ if selinuxfs != "unknown" {
+ return selinuxfs
+ }
+ selinuxfs = ""
+
+ mounts, err := mount.GetMounts()
+ if err != nil {
+ return selinuxfs
+ }
+ for _, mount := range mounts {
+ if mount.Fstype == "selinuxfs" {
+ selinuxfs = mount.Mountpoint
+ break
+ }
+ }
+ if selinuxfs != "" {
+ var buf syscall.Statfs_t
+ syscall.Statfs(selinuxfs, &buf)
+ if (buf.Flags & stRdOnly) == 1 {
+ selinuxfs = ""
+ }
+ }
+ return selinuxfs
+}
+
+func SelinuxEnabled() bool {
+ if selinuxEnabledChecked {
+ return selinuxEnabled
+ }
+ selinuxEnabledChecked = true
+ if fs := getSelinuxMountPoint(); fs != "" {
+ if con, _ := getcon(); con != "kernel" {
+ selinuxEnabled = true
+ }
+ }
+ return selinuxEnabled
+}
+
+func readConfig(target string) (value string) {
+ var (
+ val, key string
+ bufin *bufio.Reader
+ )
+
+ in, err := os.Open(selinuxConfig)
+ if err != nil {
+ return ""
+ }
+ defer in.Close()
+
+ bufin = bufio.NewReader(in)
+
+ for done := false; !done; {
+ var line string
+ if line, err = bufin.ReadString('\n'); err != nil {
+ if err != io.EOF {
+ return ""
+ }
+ done = true
+ }
+ line = strings.TrimSpace(line)
+ if len(line) == 0 {
+ // Skip blank lines
+ continue
+ }
+ if line[0] == ';' || line[0] == '#' {
+ // Skip comments
+ continue
+ }
+ if groups := assignRegex.FindStringSubmatch(line); groups != nil {
+ key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
+ if key == target {
+ return strings.Trim(val, "\"")
+ }
+ }
+ }
+ return ""
+}
+
+func getSELinuxPolicyRoot() string {
+ return selinuxDir + readConfig(selinuxTypeTag)
+}
+
+func readCon(name string) (string, error) {
+ var val string
+
+ in, err := os.Open(name)
+ if err != nil {
+ return "", err
+ }
+ defer in.Close()
+
+ _, err = fmt.Fscanf(in, "%s", &val)
+ return val, err
+}
+
+func Setfilecon(path string, scon string) error {
+ return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0)
+}
+
+func Setfscreatecon(scon string) error {
+ return writeCon("/proc/self/attr/fscreate", scon)
+}
+
+func Getfscreatecon() (string, error) {
+ return readCon("/proc/self/attr/fscreate")
+}
+
+func getcon() (string, error) {
+ return readCon("/proc/self/attr/current")
+}
+
+func Getpidcon(pid int) (string, error) {
+ return readCon(fmt.Sprintf("/proc/%d/attr/current", pid))
+}
+
+func Getexeccon() (string, error) {
+ return readCon("/proc/self/attr/exec")
+}
+
+func writeCon(name string, val string) error {
+ if !SelinuxEnabled() {
+ return nil
+ }
+ out, err := os.OpenFile(name, os.O_WRONLY, 0)
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+
+ if val != "" {
+ _, err = out.Write([]byte(val))
+ } else {
+ _, err = out.Write(nil)
+ }
+ return err
+}
+
+func Setexeccon(scon string) error {
+ return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", system.Gettid()), scon)
+}
+
+func (c SELinuxContext) Get() string {
+ return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"])
+}
+
+func NewContext(scon string) SELinuxContext {
+ c := make(SELinuxContext)
+
+ if len(scon) != 0 {
+ con := strings.SplitN(scon, ":", 4)
+ c["user"] = con[0]
+ c["role"] = con[1]
+ c["type"] = con[2]
+ c["level"] = con[3]
+ }
+ return c
+}
+
+func SelinuxGetEnforce() int {
+ var enforce int
+
+ enforceS, err := readCon(fmt.Sprintf("%s/enforce", selinuxPath))
+ if err != nil {
+ return -1
+ }
+
+ enforce, err = strconv.Atoi(string(enforceS))
+ if err != nil {
+ return -1
+ }
+ return enforce
+}
+
+func SelinuxGetEnforceMode() int {
+ switch readConfig(selinuxTag) {
+ case "enforcing":
+ return Enforcing
+ case "permissive":
+ return Permissive
+ }
+ return Disabled
+}
+
+func mcsAdd(mcs string) {
+ mcsList[mcs] = true
+}
+
+func mcsDelete(mcs string) {
+ mcsList[mcs] = false
+}
+
+func mcsExists(mcs string) bool {
+ return mcsList[mcs]
+}
+
+func IntToMcs(id int, catRange uint32) string {
+ var (
+ SETSIZE = int(catRange)
+ TIER = SETSIZE
+ ORD = id
+ )
+
+ if id < 1 || id > 523776 {
+ return ""
+ }
+
+ for ORD > TIER {
+ ORD = ORD - TIER
+ TIER -= 1
+ }
+ TIER = SETSIZE - TIER
+ ORD = ORD + TIER
+ return fmt.Sprintf("s0:c%d,c%d", TIER, ORD)
+}
+
+func uniqMcs(catRange uint32) string {
+ var (
+ n uint32
+ c1, c2 uint32
+ mcs string
+ )
+
+ for {
+ binary.Read(rand.Reader, binary.LittleEndian, &n)
+ c1 = n % catRange
+ binary.Read(rand.Reader, binary.LittleEndian, &n)
+ c2 = n % catRange
+ if c1 == c2 {
+ continue
+ } else {
+ if c1 > c2 {
+ t := c1
+ c1 = c2
+ c2 = t
+ }
+ }
+ mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2)
+ if mcsExists(mcs) {
+ continue
+ }
+ mcsAdd(mcs)
+ break
+ }
+ return mcs
+}
+
+func GetLxcContexts() (processLabel string, fileLabel string) {
+ var (
+ val, key string
+ bufin *bufio.Reader
+ )
+
+ if !SelinuxEnabled() {
+ return "", ""
+ }
+ lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", getSELinuxPolicyRoot())
+ in, err := os.Open(lxcPath)
+ if err != nil {
+ return "", ""
+ }
+ defer in.Close()
+
+ bufin = bufio.NewReader(in)
+
+ for done := false; !done; {
+ var line string
+ if line, err = bufin.ReadString('\n'); err != nil {
+ if err == io.EOF {
+ done = true
+ } else {
+ goto exit
+ }
+ }
+ line = strings.TrimSpace(line)
+ if len(line) == 0 {
+ // Skip blank lines
+ continue
+ }
+ if line[0] == ';' || line[0] == '#' {
+ // Skip comments
+ continue
+ }
+ if groups := assignRegex.FindStringSubmatch(line); groups != nil {
+ key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
+ if key == "process" {
+ processLabel = strings.Trim(val, "\"")
+ }
+ if key == "file" {
+ fileLabel = strings.Trim(val, "\"")
+ }
+ }
+ }
+
+ if processLabel == "" || fileLabel == "" {
+ return "", ""
+ }
+
+exit:
+ mcs := IntToMcs(os.Getpid(), 1024)
+ scon := NewContext(processLabel)
+ scon["level"] = mcs
+ processLabel = scon.Get()
+ scon = NewContext(fileLabel)
+ scon["level"] = mcs
+ fileLabel = scon.Get()
+ return processLabel, fileLabel
+}
+
+func SecurityCheckContext(val string) error {
+ return writeCon(fmt.Sprintf("%s.context", selinuxPath), val)
+}
+
+func CopyLevel(src, dest string) (string, error) {
+ if !SelinuxEnabled() {
+ return "", nil
+ }
+ if src == "" {
+ return "", nil
+ }
+ if err := SecurityCheckContext(src); err != nil {
+ return "", err
+ }
+ if err := SecurityCheckContext(dest); err != nil {
+ return "", err
+ }
+ scon := NewContext(src)
+ tcon := NewContext(dest)
+ tcon["level"] = scon["level"]
+ return tcon.Get(), nil
+}
diff --git a/pkg/selinux/selinux_test.go b/pkg/selinux/selinux_test.go
new file mode 100644
index 0000000000..fde6ab147d
--- /dev/null
+++ b/pkg/selinux/selinux_test.go
@@ -0,0 +1,59 @@
+package selinux_test
+
+import (
+ "github.com/dotcloud/docker/pkg/selinux"
+ "os"
+ "testing"
+)
+
+func testSetfilecon(t *testing.T) {
+ if selinux.SelinuxEnabled() {
+ tmp := "selinux_test"
+ out, _ := os.OpenFile(tmp, os.O_WRONLY, 0)
+ out.Close()
+ err := selinux.Setfilecon(tmp, "system_u:object_r:bin_t:s0")
+ if err != nil {
+ t.Log("Setfilecon failed")
+ t.Fatal(err)
+ }
+ os.Remove(tmp)
+ }
+}
+
+func TestSELinux(t *testing.T) {
+ var (
+ err error
+ plabel, flabel string
+ )
+
+ if selinux.SelinuxEnabled() {
+ t.Log("Enabled")
+ plabel, flabel = selinux.GetLxcContexts()
+ t.Log(plabel)
+ t.Log(flabel)
+ plabel, flabel = selinux.GetLxcContexts()
+ t.Log(plabel)
+ t.Log(flabel)
+ t.Log("getenforce ", selinux.SelinuxGetEnforce())
+ t.Log("getenforcemode ", selinux.SelinuxGetEnforceMode())
+ pid := os.Getpid()
+ t.Log("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023))
+ err = selinux.Setfscreatecon("unconfined_u:unconfined_r:unconfined_t:s0")
+ if err == nil {
+ t.Log(selinux.Getfscreatecon())
+ } else {
+ t.Log("setfscreatecon failed", err)
+ t.Fatal(err)
+ }
+ err = selinux.Setfscreatecon("")
+ if err == nil {
+ t.Log(selinux.Getfscreatecon())
+ } else {
+ t.Log("setfscreatecon failed", err)
+ t.Fatal(err)
+ }
+ t.Log(selinux.Getpidcon(1))
+ } else {
+ t.Log("Disabled")
+ }
+}
diff --git a/pkg/signal/MAINTAINERS b/pkg/signal/MAINTAINERS
new file mode 100644
index 0000000000..3300331598
--- /dev/null
+++ b/pkg/signal/MAINTAINERS
@@ -0,0 +1,2 @@
+Guillaume J. Charmes <guillaume@docker.com> (@creack)
+
diff --git a/pkg/signal/signal.go b/pkg/signal/signal.go
new file mode 100644
index 0000000000..63337542d7
--- /dev/null
+++ b/pkg/signal/signal.go
@@ -0,0 +1,19 @@
+package signal
+
+import (
+ "os"
+ "os/signal"
+)
+
+func CatchAll(sigc chan os.Signal) {
+ handledSigs := []os.Signal{}
+ for _, s := range SignalMap {
+ handledSigs = append(handledSigs, s)
+ }
+ signal.Notify(sigc, handledSigs...)
+}
+
+func StopCatch(sigc chan os.Signal) {
+ signal.Stop(sigc)
+ close(sigc)
+}
diff --git a/pkg/signal/signal_darwin.go b/pkg/signal/signal_darwin.go
new file mode 100644
index 0000000000..fcd3a8f2c9
--- /dev/null
+++ b/pkg/signal/signal_darwin.go
@@ -0,0 +1,40 @@
+package signal
+
+import (
+ "syscall"
+)
+
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": syscall.SIGABRT,
+ "ALRM": syscall.SIGALRM,
+ "BUG": syscall.SIGBUS,
+ "CHLD": syscall.SIGCHLD,
+ "CONT": syscall.SIGCONT,
+ "EMT": syscall.SIGEMT,
+ "FPE": syscall.SIGFPE,
+ "HUP": syscall.SIGHUP,
+ "ILL": syscall.SIGILL,
+ "INFO": syscall.SIGINFO,
+ "INT": syscall.SIGINT,
+ "IO": syscall.SIGIO,
+ "IOT": syscall.SIGIOT,
+ "KILL": syscall.SIGKILL,
+ "PIPE": syscall.SIGPIPE,
+ "PROF": syscall.SIGPROF,
+ "QUIT": syscall.SIGQUIT,
+ "SEGV": syscall.SIGSEGV,
+ "STOP": syscall.SIGSTOP,
+ "SYS": syscall.SIGSYS,
+ "TERM": syscall.SIGTERM,
+ "TRAP": syscall.SIGTRAP,
+ "TSTP": syscall.SIGTSTP,
+ "TTIN": syscall.SIGTTIN,
+ "TTOU": syscall.SIGTTOU,
+ "URG": syscall.SIGURG,
+ "USR1": syscall.SIGUSR1,
+ "USR2": syscall.SIGUSR2,
+ "VTALRM": syscall.SIGVTALRM,
+ "WINCH": syscall.SIGWINCH,
+ "XCPU": syscall.SIGXCPU,
+ "XFSZ": syscall.SIGXFSZ,
+}
diff --git a/pkg/signal/signal_freebsd.go b/pkg/signal/signal_freebsd.go
new file mode 100644
index 0000000000..102e918486
--- /dev/null
+++ b/pkg/signal/signal_freebsd.go
@@ -0,0 +1,42 @@
+package signal
+
+import (
+ "syscall"
+)
+
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": syscall.SIGABRT,
+ "ALRM": syscall.SIGALRM,
+ "BUF": syscall.SIGBUS,
+ "CHLD": syscall.SIGCHLD,
+ "CONT": syscall.SIGCONT,
+ "EMT": syscall.SIGEMT,
+ "FPE": syscall.SIGFPE,
+ "HUP": syscall.SIGHUP,
+ "ILL": syscall.SIGILL,
+ "INFO": syscall.SIGINFO,
+ "INT": syscall.SIGINT,
+ "IO": syscall.SIGIO,
+ "IOT": syscall.SIGIOT,
+ "KILL": syscall.SIGKILL,
+ "LWP": syscall.SIGLWP,
+ "PIPE": syscall.SIGPIPE,
+ "PROF": syscall.SIGPROF,
+ "QUIT": syscall.SIGQUIT,
+ "SEGV": syscall.SIGSEGV,
+ "STOP": syscall.SIGSTOP,
+ "SYS": syscall.SIGSYS,
+ "TERM": syscall.SIGTERM,
+ "THR": syscall.SIGTHR,
+ "TRAP": syscall.SIGTRAP,
+ "TSTP": syscall.SIGTSTP,
+ "TTIN": syscall.SIGTTIN,
+ "TTOU": syscall.SIGTTOU,
+ "URG": syscall.SIGURG,
+ "USR1": syscall.SIGUSR1,
+ "USR2": syscall.SIGUSR2,
+ "VTALRM": syscall.SIGVTALRM,
+ "WINCH": syscall.SIGWINCH,
+ "XCPU": syscall.SIGXCPU,
+ "XFSZ": syscall.SIGXFSZ,
+}
diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go
new file mode 100644
index 0000000000..a62f79d4af
--- /dev/null
+++ b/pkg/signal/signal_linux.go
@@ -0,0 +1,43 @@
+package signal
+
+import (
+ "syscall"
+)
+
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": syscall.SIGABRT,
+ "ALRM": syscall.SIGALRM,
+ "BUS": syscall.SIGBUS,
+ "CHLD": syscall.SIGCHLD,
+ "CLD": syscall.SIGCLD,
+ "CONT": syscall.SIGCONT,
+ "FPE": syscall.SIGFPE,
+ "HUP": syscall.SIGHUP,
+ "ILL": syscall.SIGILL,
+ "INT": syscall.SIGINT,
+ "IO": syscall.SIGIO,
+ "IOT": syscall.SIGIOT,
+ "KILL": syscall.SIGKILL,
+ "PIPE": syscall.SIGPIPE,
+ "POLL": syscall.SIGPOLL,
+ "PROF": syscall.SIGPROF,
+ "PWR": syscall.SIGPWR,
+ "QUIT": syscall.SIGQUIT,
+ "SEGV": syscall.SIGSEGV,
+ "STKFLT": syscall.SIGSTKFLT,
+ "STOP": syscall.SIGSTOP,
+ "SYS": syscall.SIGSYS,
+ "TERM": syscall.SIGTERM,
+ "TRAP": syscall.SIGTRAP,
+ "TSTP": syscall.SIGTSTP,
+ "TTIN": syscall.SIGTTIN,
+ "TTOU": syscall.SIGTTOU,
+ "UNUSED": syscall.SIGUNUSED,
+ "URG": syscall.SIGURG,
+ "USR1": syscall.SIGUSR1,
+ "USR2": syscall.SIGUSR2,
+ "VTALRM": syscall.SIGVTALRM,
+ "WINCH": syscall.SIGWINCH,
+ "XCPU": syscall.SIGXCPU,
+ "XFSZ": syscall.SIGXFSZ,
+}
diff --git a/pkg/signal/signal_unsupported.go b/pkg/signal/signal_unsupported.go
new file mode 100644
index 0000000000..99f9465970
--- /dev/null
+++ b/pkg/signal/signal_unsupported.go
@@ -0,0 +1,9 @@
+// +build !linux,!darwin,!freebsd
+
+package signal
+
+import (
+ "syscall"
+)
+
+var SignalMap = map[string]syscall.Signal{}
diff --git a/pkg/system/calls_linux.go b/pkg/system/calls_linux.go
index bf667c535b..cc4727aaa2 100644
--- a/pkg/system/calls_linux.go
+++ b/pkg/system/calls_linux.go
@@ -115,8 +115,8 @@ func Mknod(path string, mode uint32, dev int) error {
return syscall.Mknod(path, mode, dev)
}
-func ParentDeathSignal() error {
- if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, uintptr(syscall.SIGKILL), 0); err != 0 {
+func ParentDeathSignal(sig uintptr) error {
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 {
return err
}
return nil
@@ -143,3 +143,7 @@ func SetCloneFlags(cmd *exec.Cmd, flag uintptr) {
}
cmd.SysProcAttr.Cloneflags = flag
}
+
+func Gettid() int {
+ return syscall.Gettid()
+}
diff --git a/pkg/system/proc.go b/pkg/system/proc.go
new file mode 100644
index 0000000000..a492346c7f
--- /dev/null
+++ b/pkg/system/proc.go
@@ -0,0 +1,26 @@
+package system
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// look in /proc to find the process start time so that we can verify
+// that this pid has started after ourself
+func GetProcessStartTime(pid int) (string, error) {
+ data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat"))
+ if err != nil {
+ return "", err
+ }
+ parts := strings.Split(string(data), " ")
+ // the starttime is located at pos 22
+ // from the man page
+ //
+ // starttime %llu (was %lu before Linux 2.6)
+ // (22) The time the process started after system boot. In kernels before Linux 2.6, this
+ // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks
+ // (divide by sysconf(_SC_CLK_TCK)).
+ return parts[22-1], nil // starts at 1
+}
diff --git a/pkg/system/unsupported.go b/pkg/system/unsupported.go
index eb3ec7ee92..c52a1e5d00 100644
--- a/pkg/system/unsupported.go
+++ b/pkg/system/unsupported.go
@@ -13,3 +13,7 @@ func SetCloneFlags(cmd *exec.Cmd, flag uintptr) {
func UsetCloseOnExec(fd uintptr) error {
return ErrNotSupportedPlatform
}
+
+func Gettid() int {
+ return 0
+}
diff --git a/pkg/systemd/booted.go b/pkg/systemd/booted.go
new file mode 100644
index 0000000000..2aae931ec1
--- /dev/null
+++ b/pkg/systemd/booted.go
@@ -0,0 +1,15 @@
+package systemd
+
+import (
+ "os"
+)
+
+// Conversion to Go of systemd's sd_booted()
+func SdBooted() bool {
+ s, err := os.Stat("/run/systemd/system")
+ if err != nil {
+ return false
+ }
+
+ return s.IsDir()
+}
diff --git a/pkg/systemd/listendfd.go b/pkg/systemd/listendfd.go
index f6044328c2..0fbc0a6ab6 100644
--- a/pkg/systemd/listendfd.go
+++ b/pkg/systemd/listendfd.go
@@ -5,7 +5,7 @@ import (
"net"
"strconv"
- "github.com/dotcloud/docker/pkg/systemd/activation"
+ "github.com/coreos/go-systemd/activation"
)
// ListenFD returns the specified socket activated files as a slice of
diff --git a/pkg/term/MAINTAINERS b/pkg/term/MAINTAINERS
index 48d4d91b2a..15b8ac3729 100644
--- a/pkg/term/MAINTAINERS
+++ b/pkg/term/MAINTAINERS
@@ -1,2 +1,2 @@
-Guillaume Charmes <guillaume@dotcloud.com> (@creack)
+Guillaume J. Charmes <guillaume@docker.com> (@creack)
Solomon Hykes <solomon@dotcloud.com> (@shykes)
diff --git a/pkg/term/termios_darwin.go b/pkg/term/termios_darwin.go
index 24e79de4b2..11cd70d10b 100644
--- a/pkg/term/termios_darwin.go
+++ b/pkg/term/termios_darwin.go
@@ -9,16 +9,24 @@ const (
getTermios = syscall.TIOCGETA
setTermios = syscall.TIOCSETA
- ECHO = 0x00000008
- ONLCR = 0x2
- ISTRIP = 0x20
- INLCR = 0x40
- ISIG = 0x80
- IGNCR = 0x80
- ICANON = 0x100
- ICRNL = 0x100
- IXOFF = 0x400
- IXON = 0x200
+ IGNBRK = syscall.IGNBRK
+ PARMRK = syscall.PARMRK
+ INLCR = syscall.INLCR
+ IGNCR = syscall.IGNCR
+ ECHONL = syscall.ECHONL
+ CSIZE = syscall.CSIZE
+ ICRNL = syscall.ICRNL
+ ISTRIP = syscall.ISTRIP
+ PARENB = syscall.PARENB
+ ECHO = syscall.ECHO
+ ICANON = syscall.ICANON
+ ISIG = syscall.ISIG
+ IXON = syscall.IXON
+ BRKINT = syscall.BRKINT
+ INPCK = syscall.INPCK
+ OPOST = syscall.OPOST
+ CS8 = syscall.CS8
+ IEXTEN = syscall.IEXTEN
)
type Termios struct {
@@ -41,10 +49,13 @@ func MakeRaw(fd uintptr) (*State, error) {
}
newState := oldState.termios
- newState.Iflag &^= (ISTRIP | INLCR | IGNCR | IXON | IXOFF)
- newState.Iflag |= ICRNL
- newState.Oflag |= ONLCR
- newState.Lflag &^= (ECHO | ICANON | ISIG)
+ newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
+ newState.Oflag &^= OPOST
+ newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
+ newState.Cflag &^= (CSIZE | PARENB)
+ newState.Cflag |= CS8
+ newState.Cc[syscall.VMIN] = 1
+ newState.Cc[syscall.VTIME] = 0
if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
return nil, err
diff --git a/pkg/term/termios_freebsd.go b/pkg/term/termios_freebsd.go
new file mode 100644
index 0000000000..ed3659572c
--- /dev/null
+++ b/pkg/term/termios_freebsd.go
@@ -0,0 +1,65 @@
+package term
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ getTermios = syscall.TIOCGETA
+ setTermios = syscall.TIOCSETA
+
+ IGNBRK = syscall.IGNBRK
+ PARMRK = syscall.PARMRK
+ INLCR = syscall.INLCR
+ IGNCR = syscall.IGNCR
+ ECHONL = syscall.ECHONL
+ CSIZE = syscall.CSIZE
+ ICRNL = syscall.ICRNL
+ ISTRIP = syscall.ISTRIP
+ PARENB = syscall.PARENB
+ ECHO = syscall.ECHO
+ ICANON = syscall.ICANON
+ ISIG = syscall.ISIG
+ IXON = syscall.IXON
+ BRKINT = syscall.BRKINT
+ INPCK = syscall.INPCK
+ OPOST = syscall.OPOST
+ CS8 = syscall.CS8
+ IEXTEN = syscall.IEXTEN
+)
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]byte
+ Ispeed uint32
+ Ospeed uint32
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ var oldState State
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
+ return nil, err
+ }
+
+ newState := oldState.termios
+ newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
+ newState.Oflag &^= OPOST
+ newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
+ newState.Cflag &^= (CSIZE | PARENB)
+ newState.Cflag |= CS8
+ newState.Cc[syscall.VMIN] = 1
+ newState.Cc[syscall.VTIME] = 0
+
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
+ return nil, err
+ }
+
+ return &oldState, nil
+}
diff --git a/pkg/version/version.go b/pkg/version/version.go
index 3721d64aa8..5ff9d2ed2a 100644
--- a/pkg/version/version.go
+++ b/pkg/version/version.go
@@ -7,10 +7,10 @@ import (
type Version string
-func (me Version) compareTo(other string) int {
+func (me Version) compareTo(other Version) int {
var (
meTab = strings.Split(string(me), ".")
- otherTab = strings.Split(other, ".")
+ otherTab = strings.Split(string(other), ".")
)
for i, s := range meTab {
var meInt, otherInt int
@@ -31,22 +31,22 @@ func (me Version) compareTo(other string) int {
return 0
}
-func (me Version) LessThan(other string) bool {
+func (me Version) LessThan(other Version) bool {
return me.compareTo(other) == -1
}
-func (me Version) LessThanOrEqualTo(other string) bool {
+func (me Version) LessThanOrEqualTo(other Version) bool {
return me.compareTo(other) <= 0
}
-func (me Version) GreaterThan(other string) bool {
+func (me Version) GreaterThan(other Version) bool {
return me.compareTo(other) == 1
}
-func (me Version) GreaterThanOrEqualTo(other string) bool {
+func (me Version) GreaterThanOrEqualTo(other Version) bool {
return me.compareTo(other) >= 0
}
-func (me Version) Equal(other string) bool {
+func (me Version) Equal(other Version) bool {
return me.compareTo(other) == 0
}
diff --git a/pkg/version/version_test.go b/pkg/version/version_test.go
index 4bebd0c434..27c0536c2f 100644
--- a/pkg/version/version_test.go
+++ b/pkg/version/version_test.go
@@ -5,7 +5,7 @@ import (
)
func assertVersion(t *testing.T, a, b string, result int) {
- if r := Version(a).compareTo(b); r != result {
+ if r := Version(a).compareTo(Version(b)); r != result {
t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result)
}
}
diff --git a/auth/auth.go b/registry/auth.go
index 4417dd0f7a..4fdd51fda4 100644
--- a/auth/auth.go
+++ b/registry/auth.go
@@ -1,4 +1,4 @@
-package auth
+package registry
import (
"encoding/base64"
diff --git a/auth/auth_test.go b/registry/auth_test.go
index 2335072609..3cb1a9ac4b 100644
--- a/auth/auth_test.go
+++ b/registry/auth_test.go
@@ -1,4 +1,4 @@
-package auth
+package registry
import (
"io/ioutil"
diff --git a/registry/registry.go b/registry/registry.go
index 543dcea383..817c08afa9 100644
--- a/registry/registry.go
+++ b/registry/registry.go
@@ -6,7 +6,6 @@ import (
"encoding/json"
"errors"
"fmt"
- "github.com/dotcloud/docker/auth"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@@ -27,7 +26,7 @@ var (
)
func pingRegistryEndpoint(endpoint string) (bool, error) {
- if endpoint == auth.IndexServerAddress() {
+ if endpoint == IndexServerAddress() {
// Skip the check, we now this one is valid
// (and we never want to fallback to http in case of error)
return false, nil
@@ -42,7 +41,10 @@ func pingRegistryEndpoint(endpoint string) (bool, error) {
conn.SetDeadline(time.Now().Add(time.Duration(10) * time.Second))
return conn, nil
}
- httpTransport := &http.Transport{Dial: httpDial}
+ httpTransport := &http.Transport{
+ Dial: httpDial,
+ Proxy: http.ProxyFromEnvironment,
+ }
client := &http.Client{Transport: httpTransport}
resp, err := client.Get(endpoint + "_ping")
if err != nil {
@@ -103,7 +105,7 @@ func ResolveRepositoryName(reposName string) (string, string, error) {
nameParts[0] != "localhost" {
// This is a Docker Index repos (ex: samalba/hipache or ubuntu)
err := validateRepositoryName(reposName)
- return auth.IndexServerAddress(), reposName, err
+ return IndexServerAddress(), reposName, err
}
if len(nameParts) < 2 {
// There is a dot in repos name (and no registry address)
@@ -149,20 +151,6 @@ func ExpandAndVerifyRegistryUrl(hostname string) (string, error) {
return endpoint, nil
}
-func doWithCookies(c *http.Client, req *http.Request) (*http.Response, error) {
- for _, cookie := range c.Jar.Cookies(req.URL) {
- req.AddCookie(cookie)
- }
- res, err := c.Do(req)
- if err != nil {
- return nil, err
- }
- if len(res.Cookies()) > 0 {
- c.Jar.SetCookies(req.URL, res.Cookies())
- }
- return res, err
-}
-
func setTokenAuth(req *http.Request, token []string) {
if req.Header.Get("Authorization") == "" { // Don't override
req.Header.Set("Authorization", "Token "+strings.Join(token, ","))
@@ -177,7 +165,7 @@ func (r *Registry) GetRemoteHistory(imgID, registry string, token []string) ([]s
return nil, err
}
setTokenAuth(req, token)
- res, err := doWithCookies(r.client, req)
+ res, err := r.client.Do(req)
if err != nil {
return nil, err
}
@@ -212,7 +200,7 @@ func (r *Registry) LookupRemoteImage(imgID, registry string, token []string) boo
return false
}
setTokenAuth(req, token)
- res, err := doWithCookies(r.client, req)
+ res, err := r.client.Do(req)
if err != nil {
utils.Errorf("Error in LookupRemoteImage %s", err)
return false
@@ -229,7 +217,7 @@ func (r *Registry) GetRemoteImageJSON(imgID, registry string, token []string) ([
return nil, -1, fmt.Errorf("Failed to download json: %s", err)
}
setTokenAuth(req, token)
- res, err := doWithCookies(r.client, req)
+ res, err := r.client.Do(req)
if err != nil {
return nil, -1, fmt.Errorf("Failed to download json: %s", err)
}
@@ -256,7 +244,7 @@ func (r *Registry) GetRemoteImageLayer(imgID, registry string, token []string) (
return nil, fmt.Errorf("Error while getting from the server: %s\n", err)
}
setTokenAuth(req, token)
- res, err := doWithCookies(r.client, req)
+ res, err := r.client.Do(req)
if err != nil {
return nil, err
}
@@ -282,7 +270,7 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
return nil, err
}
setTokenAuth(req, token)
- res, err := doWithCookies(r.client, req)
+ res, err := r.client.Do(req)
if err != nil {
return nil, err
}
@@ -388,7 +376,7 @@ func (r *Registry) PushImageChecksumRegistry(imgData *ImgData, registry string,
req.Header.Set("X-Docker-Checksum", imgData.Checksum)
req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload)
- res, err := doWithCookies(r.client, req)
+ res, err := r.client.Do(req)
if err != nil {
return fmt.Errorf("Failed to upload metadata: %s", err)
}
@@ -424,11 +412,14 @@ func (r *Registry) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regis
req.Header.Add("Content-type", "application/json")
setTokenAuth(req, token)
- res, err := doWithCookies(r.client, req)
+ res, err := r.client.Do(req)
if err != nil {
return fmt.Errorf("Failed to upload metadata: %s", err)
}
defer res.Body.Close()
+ if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") {
+ return utils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res)
+ }
if res.StatusCode != 200 {
errBody, err := ioutil.ReadAll(res.Body)
if err != nil {
@@ -449,18 +440,20 @@ func (r *Registry) PushImageLayerRegistry(imgID string, layer io.Reader, registr
utils.Debugf("[registry] Calling PUT %s", registry+"images/"+imgID+"/layer")
+ tarsumLayer := &utils.TarSum{Reader: layer}
h := sha256.New()
- checksumLayer := &utils.CheckSum{Reader: layer, Hash: h}
- tarsumLayer := &utils.TarSum{Reader: checksumLayer}
+ h.Write(jsonRaw)
+ h.Write([]byte{'\n'})
+ checksumLayer := &utils.CheckSum{Reader: tarsumLayer, Hash: h}
- req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", tarsumLayer)
+ req, err := r.reqFactory.NewRequest("PUT", registry+"images/"+imgID+"/layer", checksumLayer)
if err != nil {
return "", "", err
}
req.ContentLength = -1
req.TransferEncoding = []string{"chunked"}
setTokenAuth(req, token)
- res, err := doWithCookies(r.client, req)
+ res, err := r.client.Do(req)
if err != nil {
return "", "", fmt.Errorf("Failed to upload layer: %s", err)
}
@@ -497,7 +490,7 @@ func (r *Registry) PushRegistryTag(remote, revision, tag, registry string, token
req.Header.Add("Content-type", "application/json")
setTokenAuth(req, token)
req.ContentLength = int64(len(revision))
- res, err := doWithCookies(r.client, req)
+ res, err := r.client.Do(req)
if err != nil {
return err
}
@@ -615,7 +608,7 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat
func (r *Registry) SearchRepositories(term string) (*SearchResults, error) {
utils.Debugf("Index server: %s", r.indexEndpoint)
- u := auth.IndexServerAddress() + "search?q=" + url.QueryEscape(term)
+ u := r.indexEndpoint + "search?q=" + url.QueryEscape(term)
req, err := r.reqFactory.NewRequest("GET", u, nil)
if err != nil {
return nil, err
@@ -641,12 +634,12 @@ func (r *Registry) SearchRepositories(term string) (*SearchResults, error) {
return result, err
}
-func (r *Registry) GetAuthConfig(withPasswd bool) *auth.AuthConfig {
+func (r *Registry) GetAuthConfig(withPasswd bool) *AuthConfig {
password := ""
if withPasswd {
password = r.authConfig.Password
}
- return &auth.AuthConfig{
+ return &AuthConfig{
Username: r.authConfig.Username,
Password: password,
Email: r.authConfig.Email,
@@ -682,12 +675,12 @@ type ImgData struct {
type Registry struct {
client *http.Client
- authConfig *auth.AuthConfig
+ authConfig *AuthConfig
reqFactory *utils.HTTPRequestFactory
indexEndpoint string
}
-func NewRegistry(authConfig *auth.AuthConfig, factory *utils.HTTPRequestFactory, indexEndpoint string) (r *Registry, err error) {
+func NewRegistry(authConfig *AuthConfig, factory *utils.HTTPRequestFactory, indexEndpoint string) (r *Registry, err error) {
httpTransport := &http.Transport{
DisableKeepAlives: true,
Proxy: http.ProxyFromEnvironment,
@@ -707,13 +700,13 @@ func NewRegistry(authConfig *auth.AuthConfig, factory *utils.HTTPRequestFactory,
// If we're working with a standalone private registry over HTTPS, send Basic Auth headers
// alongside our requests.
- if indexEndpoint != auth.IndexServerAddress() && strings.HasPrefix(indexEndpoint, "https://") {
+ if indexEndpoint != IndexServerAddress() && strings.HasPrefix(indexEndpoint, "https://") {
standalone, err := pingRegistryEndpoint(indexEndpoint)
if err != nil {
return nil, err
}
if standalone {
- utils.Debugf("Endpoint %s is eligible for private registry auth. Enabling decorator.", indexEndpoint)
+ utils.Debugf("Endpoint %s is eligible for private registry registry. Enabling decorator.", indexEndpoint)
dec := utils.NewHTTPAuthDecorator(authConfig.Username, authConfig.Password)
factory.AddDecorator(dec)
}
diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go
index 6eb94b63cc..dd5da6bd50 100644
--- a/registry/registry_mock_test.go
+++ b/registry/registry_mock_test.go
@@ -321,7 +321,12 @@ func handlerAuth(w http.ResponseWriter, r *http.Request) {
}
func handlerSearch(w http.ResponseWriter, r *http.Request) {
- writeResponse(w, "{}", 200)
+ result := &SearchResults{
+ Query: "fakequery",
+ NumResults: 1,
+ Results: []SearchResult{{Name: "fakeimage", StarCount: 42}},
+ }
+ writeResponse(w, result, 200)
}
func TestPing(t *testing.T) {
diff --git a/registry/registry_test.go b/registry/registry_test.go
index 82a27a166f..c072da41c5 100644
--- a/registry/registry_test.go
+++ b/registry/registry_test.go
@@ -1,7 +1,6 @@
package registry
import (
- "github.com/dotcloud/docker/auth"
"github.com/dotcloud/docker/utils"
"strings"
"testing"
@@ -14,7 +13,7 @@ var (
)
func spawnTestRegistry(t *testing.T) *Registry {
- authConfig := &auth.AuthConfig{}
+ authConfig := &AuthConfig{}
r, err := NewRegistry(authConfig, utils.NewHTTPRequestFactory(), makeURL("/v1/"))
if err != nil {
t.Fatal(err)
@@ -137,7 +136,7 @@ func TestResolveRepositoryName(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- assertEqual(t, ep, auth.IndexServerAddress(), "Expected endpoint to be index server address")
+ assertEqual(t, ep, IndexServerAddress(), "Expected endpoint to be index server address")
assertEqual(t, repo, "fooo/bar", "Expected resolved repo to be foo/bar")
u := makeURL("")[7:]
@@ -187,14 +186,16 @@ func TestPushImageJSONIndex(t *testing.T) {
func TestSearchRepositories(t *testing.T) {
r := spawnTestRegistry(t)
- results, err := r.SearchRepositories("supercalifragilisticepsialidocious")
+ results, err := r.SearchRepositories("fakequery")
if err != nil {
t.Fatal(err)
}
if results == nil {
t.Fatal("Expected non-nil SearchResults object")
}
- assertEqual(t, results.NumResults, 0, "Expected 0 search results")
+ assertEqual(t, results.NumResults, 1, "Expected 1 search results")
+ assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query")
+ assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' a ot hae 42 stars")
}
func TestValidRepositoryName(t *testing.T) {
@@ -205,4 +206,8 @@ func TestValidRepositoryName(t *testing.T) {
t.Log("Repository name should be invalid")
t.Fail()
}
+ if err := validateRepositoryName("docker///docker"); err == nil {
+ t.Log("Repository name should be invalid")
+ t.Fail()
+ }
}
diff --git a/runconfig/compare.go b/runconfig/compare.go
index c09f897716..5c1bf46575 100644
--- a/runconfig/compare.go
+++ b/runconfig/compare.go
@@ -14,12 +14,10 @@ func Compare(a, b *Config) bool {
a.MemorySwap != b.MemorySwap ||
a.CpuShares != b.CpuShares ||
a.OpenStdin != b.OpenStdin ||
- a.Tty != b.Tty ||
- a.VolumesFrom != b.VolumesFrom {
+ a.Tty != b.Tty {
return false
}
if len(a.Cmd) != len(b.Cmd) ||
- len(a.Dns) != len(b.Dns) ||
len(a.Env) != len(b.Env) ||
len(a.PortSpecs) != len(b.PortSpecs) ||
len(a.ExposedPorts) != len(b.ExposedPorts) ||
@@ -33,11 +31,6 @@ func Compare(a, b *Config) bool {
return false
}
}
- for i := 0; i < len(a.Dns); i++ {
- if a.Dns[i] != b.Dns[i] {
- return false
- }
- }
for i := 0; i < len(a.Env); i++ {
if a.Env[i] != b.Env[i] {
return false
diff --git a/runconfig/config.go b/runconfig/config.go
index 9faa823a57..33a7882b6f 100644
--- a/runconfig/config.go
+++ b/runconfig/config.go
@@ -25,10 +25,8 @@ type Config struct {
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string
Cmd []string
- Dns []string
Image string // Name of the image as it was passed by the operator (eg. could be symbolic)
Volumes map[string]struct{}
- VolumesFrom string
WorkingDir string
Entrypoint []string
NetworkDisabled bool
@@ -50,7 +48,6 @@ func ContainerConfigFromJob(job *engine.Job) *Config {
OpenStdin: job.GetenvBool("OpenStdin"),
StdinOnce: job.GetenvBool("StdinOnce"),
Image: job.Getenv("Image"),
- VolumesFrom: job.Getenv("VolumesFrom"),
WorkingDir: job.Getenv("WorkingDir"),
NetworkDisabled: job.GetenvBool("NetworkDisabled"),
}
@@ -65,12 +62,8 @@ func ContainerConfigFromJob(job *engine.Job) *Config {
if Cmd := job.GetenvList("Cmd"); Cmd != nil {
config.Cmd = Cmd
}
- if Dns := job.GetenvList("Dns"); Dns != nil {
- config.Dns = Dns
- }
if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil {
config.Entrypoint = Entrypoint
}
-
return config
}
diff --git a/runconfig/config_test.go b/runconfig/config_test.go
index 3ef31491fc..f71528ff8e 100644
--- a/runconfig/config_test.go
+++ b/runconfig/config_test.go
@@ -2,58 +2,186 @@ package runconfig
import (
"github.com/dotcloud/docker/nat"
+ "strings"
"testing"
)
+func parse(t *testing.T, args string) (*Config, *HostConfig, error) {
+ config, hostConfig, _, err := Parse(strings.Split(args+" ubuntu bash", " "), nil)
+ return config, hostConfig, err
+}
+
+func mustParse(t *testing.T, args string) (*Config, *HostConfig) {
+ config, hostConfig, err := parse(t, args)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return config, hostConfig
+}
+
+func TestParseRunLinks(t *testing.T) {
+ if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
+ t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
+ }
+ if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
+ t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links)
+ }
+ if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
+ t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
+ }
+
+ if _, _, err := parse(t, "--link a"); err == nil {
+ t.Fatalf("Error parsing links. `--link a` should be an error but is not")
+ }
+ if _, _, err := parse(t, "--link"); err == nil {
+ t.Fatalf("Error parsing links. `--link` should be an error but is not")
+ }
+}
+
+func TestParseRunAttach(t *testing.T) {
+ if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr {
+ t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
+ }
+ if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr {
+ t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
+ }
+ if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
+ t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
+ }
+ if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
+ t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
+ }
+
+ if _, _, err := parse(t, "-a"); err == nil {
+ t.Fatalf("Error parsing attach flags, `-a` should be an error but is not")
+ }
+ if _, _, err := parse(t, "-a invalid"); err == nil {
+ t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not")
+ }
+ if _, _, err := parse(t, "-a invalid -a stdout"); err == nil {
+ t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not")
+ }
+ if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil {
+ t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not")
+ }
+ if _, _, err := parse(t, "-a stdin -d"); err == nil {
+ t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not")
+ }
+ if _, _, err := parse(t, "-a stdout -d"); err == nil {
+ t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not")
+ }
+ if _, _, err := parse(t, "-a stderr -d"); err == nil {
+ t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not")
+ }
+ if _, _, err := parse(t, "-d --rm"); err == nil {
+ t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not")
+ }
+}
+
+func TestParseRunVolumes(t *testing.T) {
+ if config, hostConfig := mustParse(t, "-v /tmp"); hostConfig.Binds != nil {
+ t.Fatalf("Error parsing volume flags, `-v /tmp` should not mount-bind anything. Received %v", hostConfig.Binds)
+ } else if _, exists := config.Volumes["/tmp"]; !exists {
+ t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
+ }
+
+ if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil {
+ t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds)
+ } else if _, exists := config.Volumes["/tmp"]; !exists {
+ t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes)
+ } else if _, exists := config.Volumes["/var"]; !exists {
+ t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes)
+ }
+
+ if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
+ t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp` should mount-bind /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
+ } else if _, exists := config.Volumes["/containerTmp"]; !exists {
+ t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes)
+ }
+
+ if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /hostVar:/containerVar"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp" || hostConfig.Binds[1] != "/hostVar:/containerVar" {
+ t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /hostVar:/containerVar` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
+ } else if _, exists := config.Volumes["/containerTmp"]; !exists {
+ t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
+ } else if _, exists := config.Volumes["/containerVar"]; !exists {
+ t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
+ }
+
+ if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw"); hostConfig.Binds == nil || hostConfig.Binds[0] != "/hostTmp:/containerTmp:ro" || hostConfig.Binds[1] != "/hostVar:/containerVar:rw" {
+ t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp:ro -v /hostVar:/containerVar:rw` should mount-bind /hostTmp into /containeTmp and /hostVar into /hostContainer. Received %v", hostConfig.Binds)
+ } else if _, exists := config.Volumes["/containerTmp"]; !exists {
+ t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
+ } else if _, exists := config.Volumes["/containerVar"]; !exists {
+ t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
+ }
+
+ if config, hostConfig := mustParse(t, "-v /hostTmp:/containerTmp -v /containerVar"); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != "/hostTmp:/containerTmp" {
+ t.Fatalf("Error parsing volume flags, `-v /hostTmp:/containerTmp -v /containerVar` should mount-bind only /hostTmp into /containeTmp. Received %v", hostConfig.Binds)
+ } else if _, exists := config.Volumes["/containerTmp"]; !exists {
+ t.Fatalf("Error parsing volume flags, `-v /containerTmp` is missing from volumes. Received %v", config.Volumes)
+ } else if _, exists := config.Volumes["/containerVar"]; !exists {
+ t.Fatalf("Error parsing volume flags, `-v /containerVar` is missing from volumes. Received %v", config.Volumes)
+ }
+
+ if config, hostConfig := mustParse(t, ""); hostConfig.Binds != nil {
+ t.Fatalf("Error parsing volume flags, without volume, nothing should be mount-binded. Received %v", hostConfig.Binds)
+ } else if len(config.Volumes) != 0 {
+ t.Fatalf("Error parsing volume flags, without volume, no volume should be present. Received %v", config.Volumes)
+ }
+
+ if _, _, err := parse(t, "-v /"); err == nil {
+ t.Fatalf("Expected error, but got none")
+ }
+
+ if _, _, err := parse(t, "-v /:/"); err == nil {
+ t.Fatalf("Error parsing volume flags, `-v /:/` should fail but didn't")
+ }
+ if _, _, err := parse(t, "-v"); err == nil {
+ t.Fatalf("Error parsing volume flags, `-v` should fail but didn't")
+ }
+ if _, _, err := parse(t, "-v /tmp:"); err == nil {
+ t.Fatalf("Error parsing volume flags, `-v /tmp:` should fail but didn't")
+ }
+ if _, _, err := parse(t, "-v /tmp:ro"); err == nil {
+ t.Fatalf("Error parsing volume flags, `-v /tmp:ro` should fail but didn't")
+ }
+ if _, _, err := parse(t, "-v /tmp::"); err == nil {
+ t.Fatalf("Error parsing volume flags, `-v /tmp::` should fail but didn't")
+ }
+ if _, _, err := parse(t, "-v :"); err == nil {
+ t.Fatalf("Error parsing volume flags, `-v :` should fail but didn't")
+ }
+ if _, _, err := parse(t, "-v ::"); err == nil {
+ t.Fatalf("Error parsing volume flags, `-v ::` should fail but didn't")
+ }
+ if _, _, err := parse(t, "-v /tmp:/tmp:/tmp:/tmp"); err == nil {
+ t.Fatalf("Error parsing volume flags, `-v /tmp:/tmp:/tmp:/tmp` should fail but didn't")
+ }
+}
+
func TestCompare(t *testing.T) {
volumes1 := make(map[string]struct{})
volumes1["/test1"] = struct{}{}
config1 := Config{
- Dns: []string{"1.1.1.1", "2.2.2.2"},
- PortSpecs: []string{"1111:1111", "2222:2222"},
- Env: []string{"VAR1=1", "VAR2=2"},
- VolumesFrom: "11111111",
- Volumes: volumes1,
- }
- config2 := Config{
- Dns: []string{"0.0.0.0", "2.2.2.2"},
- PortSpecs: []string{"1111:1111", "2222:2222"},
- Env: []string{"VAR1=1", "VAR2=2"},
- VolumesFrom: "11111111",
- Volumes: volumes1,
+ PortSpecs: []string{"1111:1111", "2222:2222"},
+ Env: []string{"VAR1=1", "VAR2=2"},
+ Volumes: volumes1,
}
config3 := Config{
- Dns: []string{"1.1.1.1", "2.2.2.2"},
- PortSpecs: []string{"0000:0000", "2222:2222"},
- Env: []string{"VAR1=1", "VAR2=2"},
- VolumesFrom: "11111111",
- Volumes: volumes1,
- }
- config4 := Config{
- Dns: []string{"1.1.1.1", "2.2.2.2"},
- PortSpecs: []string{"0000:0000", "2222:2222"},
- Env: []string{"VAR1=1", "VAR2=2"},
- VolumesFrom: "22222222",
- Volumes: volumes1,
+ PortSpecs: []string{"0000:0000", "2222:2222"},
+ Env: []string{"VAR1=1", "VAR2=2"},
+ Volumes: volumes1,
}
volumes2 := make(map[string]struct{})
volumes2["/test2"] = struct{}{}
config5 := Config{
- Dns: []string{"1.1.1.1", "2.2.2.2"},
- PortSpecs: []string{"0000:0000", "2222:2222"},
- Env: []string{"VAR1=1", "VAR2=2"},
- VolumesFrom: "11111111",
- Volumes: volumes2,
- }
- if Compare(&config1, &config2) {
- t.Fatalf("Compare should return false, Dns are different")
+ PortSpecs: []string{"0000:0000", "2222:2222"},
+ Env: []string{"VAR1=1", "VAR2=2"},
+ Volumes: volumes2,
}
if Compare(&config1, &config3) {
t.Fatalf("Compare should return false, PortSpecs are different")
}
- if Compare(&config1, &config4) {
- t.Fatalf("Compare should return false, VolumesFrom are different")
- }
if Compare(&config1, &config5) {
t.Fatalf("Compare should return false, Volumes are different")
}
@@ -67,17 +195,14 @@ func TestMerge(t *testing.T) {
volumesImage["/test1"] = struct{}{}
volumesImage["/test2"] = struct{}{}
configImage := &Config{
- Dns: []string{"1.1.1.1", "2.2.2.2"},
- PortSpecs: []string{"1111:1111", "2222:2222"},
- Env: []string{"VAR1=1", "VAR2=2"},
- VolumesFrom: "1111",
- Volumes: volumesImage,
+ PortSpecs: []string{"1111:1111", "2222:2222"},
+ Env: []string{"VAR1=1", "VAR2=2"},
+ Volumes: volumesImage,
}
volumesUser := make(map[string]struct{})
volumesUser["/test3"] = struct{}{}
configUser := &Config{
- Dns: []string{"3.3.3.3"},
PortSpecs: []string{"3333:2222", "3333:3333"},
Env: []string{"VAR2=3", "VAR3=3"},
Volumes: volumesUser,
@@ -87,15 +212,6 @@ func TestMerge(t *testing.T) {
t.Error(err)
}
- if len(configUser.Dns) != 3 {
- t.Fatalf("Expected 3 dns, 1.1.1.1, 2.2.2.2 and 3.3.3.3, found %d", len(configUser.Dns))
- }
- for _, dns := range configUser.Dns {
- if dns != "1.1.1.1" && dns != "2.2.2.2" && dns != "3.3.3.3" {
- t.Fatalf("Expected 1.1.1.1 or 2.2.2.2 or 3.3.3.3, found %s", dns)
- }
- }
-
if len(configUser.ExposedPorts) != 3 {
t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts))
}
@@ -122,10 +238,6 @@ func TestMerge(t *testing.T) {
}
}
- if configUser.VolumesFrom != "1111" {
- t.Fatalf("Expected VolumesFrom to be 1111, found %s", configUser.VolumesFrom)
- }
-
ports, _, err := nat.ParsePortSpecs([]string{"0000"})
if err != nil {
t.Error(err)
diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go
index 6c8618ee81..3235bf1f4e 100644
--- a/runconfig/hostconfig.go
+++ b/runconfig/hostconfig.go
@@ -3,21 +3,20 @@ package runconfig
import (
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/nat"
+ "github.com/dotcloud/docker/utils"
)
type HostConfig struct {
Binds []string
ContainerIDFile string
- LxcConf []KeyValuePair
+ LxcConf []utils.KeyValuePair
Privileged bool
PortBindings nat.PortMap
Links []string
PublishAllPorts bool
-}
-
-type KeyValuePair struct {
- Key string
- Value string
+ Dns []string
+ DnsSearch []string
+ VolumesFrom []string
}
func ContainerHostConfigFromJob(job *engine.Job) *HostConfig {
@@ -34,6 +33,14 @@ func ContainerHostConfigFromJob(job *engine.Job) *HostConfig {
if Links := job.GetenvList("Links"); Links != nil {
hostConfig.Links = Links
}
-
+ if Dns := job.GetenvList("Dns"); Dns != nil {
+ hostConfig.Dns = Dns
+ }
+ if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil {
+ hostConfig.DnsSearch = DnsSearch
+ }
+ if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil {
+ hostConfig.VolumesFrom = VolumesFrom
+ }
return hostConfig
}
diff --git a/runconfig/merge.go b/runconfig/merge.go
index a8d677baa8..1240dbcacd 100644
--- a/runconfig/merge.go
+++ b/runconfig/merge.go
@@ -64,6 +64,7 @@ func Merge(userConf, imageConf *Config) error {
}
}
}
+
if !userConf.Tty {
userConf.Tty = imageConf.Tty
}
@@ -93,21 +94,12 @@ func Merge(userConf, imageConf *Config) error {
if userConf.Cmd == nil || len(userConf.Cmd) == 0 {
userConf.Cmd = imageConf.Cmd
}
- if userConf.Dns == nil || len(userConf.Dns) == 0 {
- userConf.Dns = imageConf.Dns
- } else {
- //duplicates aren't an issue here
- userConf.Dns = append(userConf.Dns, imageConf.Dns...)
- }
if userConf.Entrypoint == nil || len(userConf.Entrypoint) == 0 {
userConf.Entrypoint = imageConf.Entrypoint
}
if userConf.WorkingDir == "" {
userConf.WorkingDir = imageConf.WorkingDir
}
- if userConf.VolumesFrom == "" {
- userConf.VolumesFrom = imageConf.VolumesFrom
- }
if userConf.Volumes == nil || len(userConf.Volumes) == 0 {
userConf.Volumes = imageConf.Volumes
} else {
diff --git a/runconfig/parse.go b/runconfig/parse.go
index fb08c068b2..d395b49e80 100644
--- a/runconfig/parse.go
+++ b/runconfig/parse.go
@@ -3,8 +3,8 @@ package runconfig
import (
"fmt"
"github.com/dotcloud/docker/nat"
+ "github.com/dotcloud/docker/opts"
flag "github.com/dotcloud/docker/pkg/mflag"
- "github.com/dotcloud/docker/pkg/opts"
"github.com/dotcloud/docker/pkg/sysinfo"
"github.com/dotcloud/docker/utils"
"io/ioutil"
@@ -15,7 +15,7 @@ import (
var (
ErrInvalidWorikingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.")
ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d")
- ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: -rm and -d")
+ ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d")
)
//FIXME Only used in tests
@@ -42,8 +42,10 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
flPublish opts.ListOpts
flExpose opts.ListOpts
flDns opts.ListOpts
+ flDnsSearch = opts.NewListOpts(opts.ValidateDomain)
flVolumesFrom opts.ListOpts
flLxcOpts opts.ListOpts
+ flEnvFile opts.ListOpts
flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)")
flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id")
@@ -69,12 +71,14 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)")
cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables")
+ cmd.Var(&flEnvFile, []string{"-env-file"}, "Read in a line delimited file of ENV variables")
cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", nat.PortSpecTemplateFormat))
cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host")
cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers")
+ cmd.Var(&flDnsSearch, []string{"-dns-search"}, "Set custom dns search domains")
cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)")
- cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
+ cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "(lxc exec-driver only) Add custom lxc options --lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
if err := cmd.Parse(args); err != nil {
return nil, nil, cmd, err
@@ -148,7 +152,7 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
entrypoint = []string{*flEntrypoint}
}
- lxcConf, err := parseLxcConfOpts(flLxcOpts)
+ lxcConf, err := parseKeyValueOpts(flLxcOpts)
if err != nil {
return nil, nil, cmd, err
}
@@ -179,6 +183,20 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
}
}
+ // collect all the environment variables for the container
+ envVariables := []string{}
+ for _, ef := range flEnvFile.GetAll() {
+ parsedVars, err := opts.ParseEnvFile(ef)
+ if err != nil {
+ return nil, nil, cmd, err
+ }
+ envVariables = append(envVariables, parsedVars...)
+ }
+ // parse the '-e' and '--env' after, to allow override
+ envVariables = append(envVariables, flEnv.GetAll()...)
+ // boo, there's no debug output for docker run
+ //utils.Debugf("Environment variables for the container: %#v", envVariables)
+
config := &Config{
Hostname: hostname,
Domainname: domainname,
@@ -193,12 +211,10 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
- Env: flEnv.GetAll(),
+ Env: envVariables,
Cmd: runCmd,
- Dns: flDns.GetAll(),
Image: image,
Volumes: flVolumes.GetMap(),
- VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","),
Entrypoint: entrypoint,
WorkingDir: *flWorkingDir,
}
@@ -211,6 +227,9 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
PortBindings: portBindings,
Links: flLinks.GetAll(),
PublishAllPorts: *flPublishAll,
+ Dns: flDns.GetAll(),
+ DnsSearch: flDnsSearch.GetAll(),
+ VolumesFrom: flVolumesFrom.GetAll(),
}
if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit {
@@ -225,22 +244,33 @@ func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Conf
return config, hostConfig, cmd, nil
}
-func parseLxcConfOpts(opts opts.ListOpts) ([]KeyValuePair, error) {
- out := make([]KeyValuePair, opts.Len())
- for i, o := range opts.GetAll() {
- k, v, err := parseLxcOpt(o)
- if err != nil {
- return nil, err
+// options will come in the format of name.key=value or name.option
+func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) {
+ out := make(map[string][]string, len(opts.GetAll()))
+ for _, o := range opts.GetAll() {
+ parts := strings.SplitN(o, ".", 2)
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid opt format %s", o)
+ } else if strings.TrimSpace(parts[0]) == "" {
+ return nil, fmt.Errorf("key cannot be empty %s", o)
}
- out[i] = KeyValuePair{Key: k, Value: v}
+ values, exists := out[parts[0]]
+ if !exists {
+ values = []string{}
+ }
+ out[parts[0]] = append(values, parts[1])
}
return out, nil
}
-func parseLxcOpt(opt string) (string, string, error) {
- parts := strings.SplitN(opt, "=", 2)
- if len(parts) != 2 {
- return "", "", fmt.Errorf("Unable to parse lxc conf option: %s", opt)
+func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) {
+ out := make([]utils.KeyValuePair, opts.Len())
+ for i, o := range opts.GetAll() {
+ k, v, err := utils.ParseKeyValueOpt(o)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = utils.KeyValuePair{Key: k, Value: v}
}
- return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
+ return out, nil
}
diff --git a/runconfig/parse_test.go b/runconfig/parse_test.go
index 2b89e88ec3..fd28c4593e 100644
--- a/runconfig/parse_test.go
+++ b/runconfig/parse_test.go
@@ -1,6 +1,7 @@
package runconfig
import (
+ "github.com/dotcloud/docker/utils"
"testing"
)
@@ -8,7 +9,7 @@ func TestParseLxcConfOpt(t *testing.T) {
opts := []string{"lxc.utsname=docker", "lxc.utsname = docker "}
for _, o := range opts {
- k, v, err := parseLxcOpt(o)
+ k, v, err := utils.ParseKeyValueOpt(o)
if err != nil {
t.FailNow()
}
diff --git a/container.go b/runtime/container.go
index 50332f27de..c8053b146c 100644
--- a/container.go
+++ b/runtime/container.go
@@ -1,4 +1,4 @@
-package docker
+package runtime
import (
"encoding/json"
@@ -6,11 +6,12 @@ import (
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/engine"
- "github.com/dotcloud/docker/execdriver"
- "github.com/dotcloud/docker/graphdriver"
+ "github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/links"
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/runconfig"
+ "github.com/dotcloud/docker/runtime/execdriver"
+ "github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@@ -23,7 +24,7 @@ import (
"time"
)
-const defaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
var (
ErrNotATTY = errors.New("The PTY is not a file")
@@ -173,7 +174,7 @@ func (container *Container) ToDisk() (err error) {
if err != nil {
return
}
- return container.writeHostConfig()
+ return container.WriteHostConfig()
}
func (container *Container) readHostConfig() error {
@@ -192,7 +193,7 @@ func (container *Container) readHostConfig() error {
return json.Unmarshal(data, container.hostConfig)
}
-func (container *Container) writeHostConfig() (err error) {
+func (container *Container) WriteHostConfig() (err error) {
data, err := json.Marshal(container.hostConfig)
if err != nil {
return
@@ -360,25 +361,27 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
func populateCommand(c *Container) {
var (
en *execdriver.Network
- driverConfig []string
+ driverConfig = make(map[string][]string)
)
+ en = &execdriver.Network{
+ Mtu: c.runtime.config.Mtu,
+ Interface: nil,
+ }
+
if !c.Config.NetworkDisabled {
network := c.NetworkSettings
- en = &execdriver.Network{
+ en.Interface = &execdriver.NetworkInterface{
Gateway: network.Gateway,
Bridge: network.Bridge,
IPAddress: network.IPAddress,
IPPrefixLen: network.IPPrefixLen,
- Mtu: c.runtime.config.Mtu,
}
}
- if lxcConf := c.hostConfig.LxcConf; lxcConf != nil {
- for _, pair := range lxcConf {
- driverConfig = append(driverConfig, fmt.Sprintf("%s = %s", pair.Key, pair.Value))
- }
- }
+ // TODO: this can be removed after lxc-conf is fully deprecated
+ mergeLxcConfIntoOptions(c.hostConfig, driverConfig)
+
resources := &execdriver.Resources{
Memory: c.Config.Memory,
MemorySwap: c.Config.MemorySwap,
@@ -401,12 +404,24 @@ func populateCommand(c *Container) {
c.command.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
}
+func (container *Container) ArgsAsString() string {
+ var args []string
+ for _, arg := range container.Args {
+ if strings.Contains(arg, " ") {
+ args = append(args, fmt.Sprintf("'%s'", arg))
+ } else {
+ args = append(args, arg)
+ }
+ }
+ return strings.Join(args, " ")
+}
+
func (container *Container) Start() (err error) {
container.Lock()
defer container.Unlock()
if container.State.IsRunning() {
- return fmt.Errorf("The container %s is already running.", container.ID)
+ return nil
}
defer func() {
@@ -415,6 +430,12 @@ func (container *Container) Start() (err error) {
}
}()
+ if container.ResolvConfPath == "" {
+ if err := container.setupContainerDns(); err != nil {
+ return err
+ }
+ }
+
if err := container.Mount(); err != nil {
return err
}
@@ -450,7 +471,7 @@ func (container *Container) Start() (err error) {
// Setup environment
env := []string{
"HOME=/",
- "PATH=" + defaultPathEnv,
+ "PATH=" + DefaultPathEnv,
"HOSTNAME=" + container.Config.Hostname,
}
@@ -518,8 +539,18 @@ func (container *Container) Start() (err error) {
if container.Config.WorkingDir != "" {
container.Config.WorkingDir = path.Clean(container.Config.WorkingDir)
- if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil {
- return nil
+
+ pthInfo, err := os.Stat(path.Join(container.basefs, container.Config.WorkingDir))
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return err
+ }
+ if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil {
+ return err
+ }
+ }
+ if pthInfo != nil && !pthInfo.IsDir() {
+ return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir)
}
}
@@ -528,13 +559,13 @@ func (container *Container) Start() (err error) {
return err
}
- if err := mountVolumesForContainer(container, envPath); err != nil {
- return err
- }
-
populateCommand(container)
container.command.Env = env
+ if err := setupMountsForContainer(container, envPath); err != nil {
+ return err
+ }
+
// Setup logging of stdout and stderr to disk
if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
return err
@@ -692,7 +723,7 @@ func (container *Container) allocateNetwork() error {
return err
}
container.Config.PortSpecs = nil
- if err := container.writeHostConfig(); err != nil {
+ if err := container.WriteHostConfig(); err != nil {
return err
}
}
@@ -750,7 +781,7 @@ func (container *Container) allocateNetwork() error {
}
bindings[port] = binding
}
- container.writeHostConfig()
+ container.WriteHostConfig()
container.NetworkSettings.Ports = bindings
@@ -784,7 +815,7 @@ func (container *Container) monitor(callback execdriver.StartCallback) error {
utils.Errorf("Error running container: %s", err)
}
- if container.runtime.srv.IsRunning() {
+ if container.runtime != nil && container.runtime.srv != nil && container.runtime.srv.IsRunning() {
container.State.SetStopped(exitCode)
// FIXME: there is a race condition here which causes this to fail during the unit tests.
@@ -842,14 +873,12 @@ func (container *Container) cleanup() {
}
}
- unmountVolumesForContainer(container)
-
if err := container.Unmount(); err != nil {
log.Printf("%v: Failed to umount filesystem: %v", container.ID, err)
}
}
-func (container *Container) kill(sig int) error {
+func (container *Container) KillSig(sig int) error {
container.Lock()
defer container.Unlock()
@@ -865,17 +894,14 @@ func (container *Container) Kill() error {
}
// 1. Send SIGKILL
- if err := container.kill(9); err != nil {
+ if err := container.KillSig(9); err != nil {
return err
}
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := container.WaitTimeout(10 * time.Second); err != nil {
- if container.command == nil {
- return fmt.Errorf("lxc-kill failed, impossible to kill the container %s", utils.TruncateID(container.ID))
- }
- log.Printf("Container %s failed to exit within 10 seconds of lxc-kill %s - trying direct SIGKILL", "SIGKILL", utils.TruncateID(container.ID))
- if err := container.runtime.Kill(container, 9); err != nil {
+ log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
+ if err := syscall.Kill(container.State.Pid, 9); err != nil {
return err
}
}
@@ -890,10 +916,9 @@ func (container *Container) Stop(seconds int) error {
}
// 1. Send a SIGTERM
- if err := container.kill(15); err != nil {
- utils.Debugf("Error sending kill SIGTERM: %s", err)
+ if err := container.KillSig(15); err != nil {
log.Print("Failed to send SIGTERM to the process, force killing")
- if err := container.kill(9); err != nil {
+ if err := container.KillSig(9); err != nil {
return err
}
}
@@ -946,10 +971,11 @@ func (container *Container) ExportRw() (archive.Archive, error) {
return nil, err
}
return utils.NewReadCloserWrapper(archive, func() error {
- err := archive.Close()
- container.Unmount()
- return err
- }), nil
+ err := archive.Close()
+ container.Unmount()
+ return err
+ }),
+ nil
}
func (container *Container) Export() (archive.Archive, error) {
@@ -963,10 +989,11 @@ func (container *Container) Export() (archive.Archive, error) {
return nil, err
}
return utils.NewReadCloserWrapper(archive, func() error {
- err := archive.Close()
- container.Unmount()
- return err
- }), nil
+ err := archive.Close()
+ container.Unmount()
+ return err
+ }),
+ nil
}
func (container *Container) WaitTimeout(timeout time.Duration) error {
@@ -992,7 +1019,7 @@ func (container *Container) Changes() ([]archive.Change, error) {
return container.runtime.Changes(container)
}
-func (container *Container) GetImage() (*Image, error) {
+func (container *Container) GetImage() (*image.Image, error) {
if container.runtime == nil {
return nil, fmt.Errorf("Can't get image of unregistered container")
}
@@ -1038,12 +1065,6 @@ func (container *Container) EnvConfigPath() (string, error) {
// This method must be exported to be used from the lxc template
// This directory is only usable when the container is running
func (container *Container) RootfsPath() string {
- return path.Join(container.root, "root")
-}
-
-// This is the stand-alone version of the root fs, without any additional mounts.
-// This directory is usable whenever the container is mounted (and not unmounted)
-func (container *Container) BasefsPath() string {
return container.basefs
}
@@ -1121,10 +1142,11 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) {
return nil, err
}
return utils.NewReadCloserWrapper(archive, func() error {
- err := archive.Close()
- container.Unmount()
- return err
- }), nil
+ err := archive.Close()
+ container.Unmount()
+ return err
+ }),
+ nil
}
// Returns true if the container exposes a certain port
@@ -1140,3 +1162,68 @@ func (container *Container) GetPtyMaster() (*os.File, error) {
}
return ttyConsole.Master(), nil
}
+
+func (container *Container) HostConfig() *runconfig.HostConfig {
+ return container.hostConfig
+}
+
+func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) {
+ container.hostConfig = hostConfig
+}
+
+func (container *Container) DisableLink(name string) {
+ if container.activeLinks != nil {
+ if link, exists := container.activeLinks[name]; exists {
+ link.Disable()
+ } else {
+ utils.Debugf("Could not find active link for %s", name)
+ }
+ }
+}
+
+func (container *Container) setupContainerDns() error {
+ var (
+ config = container.hostConfig
+ runtime = container.runtime
+ )
+ resolvConf, err := utils.GetResolvConf()
+ if err != nil {
+ return err
+ }
+ // If custom dns exists, then create a resolv.conf for the container
+ if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 || len(config.DnsSearch) > 0 || len(runtime.config.DnsSearch) > 0 {
+ var (
+ dns = utils.GetNameservers(resolvConf)
+ dnsSearch = utils.GetSearchDomains(resolvConf)
+ )
+ if len(config.Dns) > 0 {
+ dns = config.Dns
+ } else if len(runtime.config.Dns) > 0 {
+ dns = runtime.config.Dns
+ }
+ if len(config.DnsSearch) > 0 {
+ dnsSearch = config.DnsSearch
+ } else if len(runtime.config.DnsSearch) > 0 {
+ dnsSearch = runtime.config.DnsSearch
+ }
+ container.ResolvConfPath = path.Join(container.root, "resolv.conf")
+ f, err := os.Create(container.ResolvConfPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ for _, dns := range dns {
+ if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil {
+ return err
+ }
+ }
+ if len(dnsSearch) > 0 {
+ if _, err := f.Write([]byte("search " + strings.Join(dnsSearch, " ") + "\n")); err != nil {
+ return err
+ }
+ }
+ } else {
+ container.ResolvConfPath = "/etc/resolv.conf"
+ }
+ return nil
+}
diff --git a/container_unit_test.go b/runtime/container_unit_test.go
index 3877b7f0da..fba036ca50 100644
--- a/container_unit_test.go
+++ b/runtime/container_unit_test.go
@@ -1,4 +1,4 @@
-package docker
+package runtime
import (
"github.com/dotcloud/docker/nat"
@@ -132,14 +132,14 @@ func TestParseNetworkOptsUdp(t *testing.T) {
}
func TestGetFullName(t *testing.T) {
- name, err := getFullName("testing")
+ name, err := GetFullContainerName("testing")
if err != nil {
t.Fatal(err)
}
if name != "/testing" {
t.Fatalf("Expected /testing got %s", name)
}
- if _, err := getFullName(""); err == nil {
+ if _, err := GetFullContainerName(""); err == nil {
t.Fatal("Error should not be nil")
}
}
diff --git a/execdriver/MAINTAINERS b/runtime/execdriver/MAINTAINERS
index e53d933d47..1cb551364d 100644
--- a/execdriver/MAINTAINERS
+++ b/runtime/execdriver/MAINTAINERS
@@ -1,2 +1,2 @@
Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
-Guillaume Charmes <guillaume@dotcloud.com> (@creack)
+Guillaume J. Charmes <guillaume@docker.com> (@creack)
diff --git a/execdriver/driver.go b/runtime/execdriver/driver.go
index ec8f48f52d..27a575cb3a 100644
--- a/execdriver/driver.go
+++ b/runtime/execdriver/driver.go
@@ -7,6 +7,10 @@ import (
"os/exec"
)
+// Context is a generic key value pair that allows
+// arbatrary data to be sent
+type Context map[string]string
+
var (
ErrNotRunning = errors.New("Process could not be started")
ErrWaitTimeoutReached = errors.New("Wait timeout reached")
@@ -80,15 +84,20 @@ type Driver interface {
Name() string // Driver name
Info(id string) Info // "temporary" hack (until we move state from core to plugins)
GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container.
+ Terminate(c *Command) error // kill it with fire
}
// Network settings of the container
type Network struct {
+ Interface *NetworkInterface `json:"interface"` // if interface is nil then networking is disabled
+ Mtu int `json:"mtu"`
+}
+
+type NetworkInterface struct {
Gateway string `json:"gateway"`
IPAddress string `json:"ip"`
Bridge string `json:"bridge"`
IPPrefixLen int `json:"ip_prefix_len"`
- Mtu int `json:"mtu"`
}
type Resources struct {
@@ -97,23 +106,31 @@ type Resources struct {
CpuShares int64 `json:"cpu_shares"`
}
+type Mount struct {
+ Source string `json:"source"`
+ Destination string `json:"destination"`
+ Writable bool `json:"writable"`
+ Private bool `json:"private"`
+}
+
// Process wrapps an os/exec.Cmd to add more metadata
type Command struct {
exec.Cmd `json:"-"`
- ID string `json:"id"`
- Privileged bool `json:"privileged"`
- User string `json:"user"`
- Rootfs string `json:"rootfs"` // root fs of the container
- InitPath string `json:"initpath"` // dockerinit
- Entrypoint string `json:"entrypoint"`
- Arguments []string `json:"arguments"`
- WorkingDir string `json:"working_dir"`
- ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver
- Tty bool `json:"tty"`
- Network *Network `json:"network"` // if network is nil then networking is disabled
- Config []string `json:"config"` // generic values that specific drivers can consume
- Resources *Resources `json:"resources"`
+ ID string `json:"id"`
+ Privileged bool `json:"privileged"`
+ User string `json:"user"`
+ Rootfs string `json:"rootfs"` // root fs of the container
+ InitPath string `json:"initpath"` // dockerinit
+ Entrypoint string `json:"entrypoint"`
+ Arguments []string `json:"arguments"`
+ WorkingDir string `json:"working_dir"`
+ ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver
+ Tty bool `json:"tty"`
+ Network *Network `json:"network"`
+ Config map[string][]string `json:"config"` // generic values that specific drivers can consume
+ Resources *Resources `json:"resources"`
+ Mounts []Mount `json:"mounts"`
Terminal Terminal `json:"-"` // standard or tty terminal
Console string `json:"-"` // dev/console path
diff --git a/runtime/execdriver/execdrivers/execdrivers.go b/runtime/execdriver/execdrivers/execdrivers.go
new file mode 100644
index 0000000000..9e277c86df
--- /dev/null
+++ b/runtime/execdriver/execdrivers/execdrivers.go
@@ -0,0 +1,23 @@
+package execdrivers
+
+import (
+ "fmt"
+ "github.com/dotcloud/docker/pkg/sysinfo"
+ "github.com/dotcloud/docker/runtime/execdriver"
+ "github.com/dotcloud/docker/runtime/execdriver/lxc"
+ "github.com/dotcloud/docker/runtime/execdriver/native"
+ "path"
+)
+
+func NewDriver(name, root, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) {
+ switch name {
+ case "lxc":
+ // we want to five the lxc driver the full docker root because it needs
+ // to access and write config and template files in /var/lib/docker/containers/*
+ // to be backwards compatible
+ return lxc.NewDriver(root, sysInfo.AppArmor)
+ case "native":
+ return native.NewDriver(path.Join(root, "execdriver", "native"), initPath)
+ }
+ return nil, fmt.Errorf("unknown exec driver %s", name)
+}
diff --git a/execdriver/lxc/driver.go b/runtime/execdriver/lxc/driver.go
index 765a52ee43..ef16dcc380 100644
--- a/execdriver/lxc/driver.go
+++ b/runtime/execdriver/lxc/driver.go
@@ -2,8 +2,9 @@ package lxc
import (
"fmt"
- "github.com/dotcloud/docker/execdriver"
"github.com/dotcloud/docker/pkg/cgroups"
+ "github.com/dotcloud/docker/pkg/label"
+ "github.com/dotcloud/docker/runtime/execdriver"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"log"
@@ -21,6 +22,10 @@ const DriverName = "lxc"
func init() {
execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error {
+ if err := setupEnv(args); err != nil {
+ return err
+ }
+
if err := setupHostname(args); err != nil {
return err
}
@@ -94,13 +99,15 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
DriverName,
}
- if c.Network != nil {
+ if c.Network.Interface != nil {
params = append(params,
- "-g", c.Network.Gateway,
- "-i", fmt.Sprintf("%s/%d", c.Network.IPAddress, c.Network.IPPrefixLen),
- "-mtu", strconv.Itoa(c.Network.Mtu),
+ "-g", c.Network.Interface.Gateway,
+ "-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
)
}
+ params = append(params,
+ "-mtu", strconv.Itoa(c.Network.Mtu),
+ )
if c.User != "" {
params = append(params, "-u", c.User)
@@ -168,6 +175,9 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
// Poll lxc for RUNNING status
pid, err := d.waitForStart(c, waitLock)
if err != nil {
+ if c.Process != nil {
+ c.Process.Kill()
+ }
return -1, err
}
c.ContainerPid = pid
@@ -194,6 +204,10 @@ func (d *driver) Kill(c *execdriver.Command, sig int) error {
return KillLxc(c.ID, sig)
}
+func (d *driver) Terminate(c *execdriver.Command) error {
+ return KillLxc(c.ID, 9)
+}
+
func (d *driver) version() string {
var (
version string
@@ -369,19 +383,34 @@ func rootIsShared() bool {
}
func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) {
- root := path.Join(d.root, "containers", c.ID, "config.lxc")
+ var (
+ process, mount string
+ root = path.Join(d.root, "containers", c.ID, "config.lxc")
+ labels = c.Config["label"]
+ )
fo, err := os.Create(root)
if err != nil {
return "", err
}
defer fo.Close()
+ if len(labels) > 0 {
+ process, mount, err = label.GenLabels(labels[0])
+ if err != nil {
+ return "", err
+ }
+ }
+
if err := LxcTemplateCompiled.Execute(fo, struct {
*execdriver.Command
- AppArmor bool
+ AppArmor bool
+ ProcessLabel string
+ MountLabel string
}{
- Command: c,
- AppArmor: d.apparmor,
+ Command: c,
+ AppArmor: d.apparmor,
+ ProcessLabel: process,
+ MountLabel: mount,
}); err != nil {
return "", err
}
diff --git a/execdriver/lxc/info.go b/runtime/execdriver/lxc/info.go
index 3b2ea0d07f..27b4c58604 100644
--- a/execdriver/lxc/info.go
+++ b/runtime/execdriver/lxc/info.go
@@ -36,7 +36,7 @@ func parseLxcInfo(raw string) (*lxcInfo, error) {
if len(parts) < 2 {
continue
}
- switch strings.TrimSpace(parts[0]) {
+ switch strings.ToLower(strings.TrimSpace(parts[0])) {
case "state":
info.Running = strings.TrimSpace(parts[1]) == "RUNNING"
case "pid":
diff --git a/execdriver/lxc/info_test.go b/runtime/execdriver/lxc/info_test.go
index edafc02511..edafc02511 100644
--- a/execdriver/lxc/info_test.go
+++ b/runtime/execdriver/lxc/info_test.go
diff --git a/execdriver/lxc/init.go b/runtime/execdriver/lxc/init.go
index e138915212..c1933a5e43 100644
--- a/execdriver/lxc/init.go
+++ b/runtime/execdriver/lxc/init.go
@@ -1,17 +1,47 @@
package lxc
import (
+ "encoding/json"
"fmt"
- "github.com/dotcloud/docker/execdriver"
"github.com/dotcloud/docker/pkg/netlink"
"github.com/dotcloud/docker/pkg/user"
+ "github.com/dotcloud/docker/runtime/execdriver"
"github.com/syndtr/gocapability/capability"
+ "io/ioutil"
"net"
"os"
"strings"
"syscall"
)
+// Clear environment pollution introduced by lxc-start
+func setupEnv(args *execdriver.InitArgs) error {
+ // Get env
+ var env []string
+ content, err := ioutil.ReadFile(".dockerenv")
+ if err != nil {
+ return fmt.Errorf("Unable to load environment variables: %v", err)
+ }
+ if err := json.Unmarshal(content, &env); err != nil {
+ return fmt.Errorf("Unable to unmarshal environment variables: %v", err)
+ }
+ // Propagate the plugin-specific container env variable
+ env = append(env, "container="+os.Getenv("container"))
+
+ args.Env = env
+
+ os.Clearenv()
+ for _, kv := range args.Env {
+ parts := strings.SplitN(kv, "=", 2)
+ if len(parts) == 1 {
+ parts = append(parts, "")
+ }
+ os.Setenv(parts[0], parts[1])
+ }
+
+ return nil
+}
+
func setupHostname(args *execdriver.InitArgs) error {
hostname := getEnv(args, "HOSTNAME")
if hostname == "" {
@@ -114,7 +144,6 @@ func setupCapabilities(args *execdriver.InitArgs) error {
capability.CAP_SYS_RESOURCE,
capability.CAP_SYS_TIME,
capability.CAP_SYS_TTY_CONFIG,
- capability.CAP_MKNOD,
capability.CAP_AUDIT_WRITE,
capability.CAP_AUDIT_CONTROL,
capability.CAP_MAC_OVERRIDE,
diff --git a/execdriver/lxc/lxc_init_linux.go b/runtime/execdriver/lxc/lxc_init_linux.go
index 7288f5877b..7288f5877b 100644
--- a/execdriver/lxc/lxc_init_linux.go
+++ b/runtime/execdriver/lxc/lxc_init_linux.go
diff --git a/execdriver/lxc/lxc_init_unsupported.go b/runtime/execdriver/lxc/lxc_init_unsupported.go
index d68cb91a1e..d68cb91a1e 100644
--- a/execdriver/lxc/lxc_init_unsupported.go
+++ b/runtime/execdriver/lxc/lxc_init_unsupported.go
diff --git a/execdriver/lxc/lxc_template.go b/runtime/execdriver/lxc/lxc_template.go
index 1181396a18..c49753c6aa 100644
--- a/execdriver/lxc/lxc_template.go
+++ b/runtime/execdriver/lxc/lxc_template.go
@@ -1,23 +1,24 @@
package lxc
import (
- "github.com/dotcloud/docker/execdriver"
+ "github.com/dotcloud/docker/pkg/label"
+ "github.com/dotcloud/docker/runtime/execdriver"
"strings"
"text/template"
)
const LxcTemplate = `
-{{if .Network}}
+{{if .Network.Interface}}
# network configuration
lxc.network.type = veth
-lxc.network.link = {{.Network.Bridge}}
+lxc.network.link = {{.Network.Interface.Bridge}}
lxc.network.name = eth0
-lxc.network.mtu = {{.Network.Mtu}}
{{else}}
# network is disabled (-n=false)
lxc.network.type = empty
lxc.network.flags = up
{{end}}
+lxc.network.mtu = {{.Network.Mtu}}
# root filesystem
{{$ROOTFS := .Rootfs}}
@@ -29,6 +30,10 @@ lxc.pts = 1024
# disable the main console
lxc.console = none
+{{if .ProcessLabel}}
+lxc.se_context = {{ .ProcessLabel}}
+{{end}}
+{{$MOUNTLABEL := .MountLabel}}
# no controlling tty at all
lxc.tty = 1
@@ -39,6 +44,10 @@ lxc.cgroup.devices.allow = a
# no implicit access to devices
lxc.cgroup.devices.deny = a
+# but allow mknod for any device
+lxc.cgroup.devices.allow = c *:* m
+lxc.cgroup.devices.allow = b *:* m
+
# /dev/null and zero
lxc.cgroup.devices.allow = c 1:3 rwm
lxc.cgroup.devices.allow = c 1:5 rwm
@@ -85,8 +94,16 @@ lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noe
lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0
{{end}}
-lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts newinstance,ptmxmode=0666,nosuid,noexec 0 0
-lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs size=65536k,nosuid,nodev,noexec 0 0
+lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMountLabel "newinstance,ptmxmode=0666,nosuid,noexec" $MOUNTLABEL}} 0 0
+lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec" $MOUNTLABEL}} 0 0
+
+{{range $value := .Mounts}}
+{{if $value.Writable}}
+lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,rw 0 0
+{{else}}
+lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabSpaces $value.Destination}} none bind,ro 0 0
+{{end}}
+{{end}}
{{if .Privileged}}
{{if .AppArmor}}
@@ -110,9 +127,9 @@ lxc.cgroup.cpu.shares = {{.Resources.CpuShares}}
{{end}}
{{end}}
-{{if .Config}}
-{{range $value := .Config}}
-{{$value}}
+{{if .Config.lxc}}
+{{range $value := .Config.lxc}}
+lxc.{{$value}}
{{end}}
{{end}}
`
@@ -134,11 +151,23 @@ func getMemorySwap(v *execdriver.Resources) int64 {
return v.Memory * 2
}
+func getLabel(c map[string][]string, name string) string {
+ label := c["label"]
+ for _, l := range label {
+ parts := strings.SplitN(l, "=", 2)
+ if strings.TrimSpace(parts[0]) == name {
+ return strings.TrimSpace(parts[1])
+ }
+ }
+ return ""
+}
+
func init() {
var err error
funcMap := template.FuncMap{
"getMemorySwap": getMemorySwap,
"escapeFstabSpaces": escapeFstabSpaces,
+ "formatMountLabel": label.FormatMountLabel,
}
LxcTemplateCompiled, err = template.New("lxc").Funcs(funcMap).Parse(LxcTemplate)
if err != nil {
diff --git a/execdriver/lxc/lxc_template_unit_test.go b/runtime/execdriver/lxc/lxc_template_unit_test.go
index 99d6e636f5..7f473a0502 100644
--- a/execdriver/lxc/lxc_template_unit_test.go
+++ b/runtime/execdriver/lxc/lxc_template_unit_test.go
@@ -3,7 +3,7 @@ package lxc
import (
"bufio"
"fmt"
- "github.com/dotcloud/docker/execdriver"
+ "github.com/dotcloud/docker/runtime/execdriver"
"io/ioutil"
"math/rand"
"os"
@@ -43,6 +43,10 @@ func TestLXCConfig(t *testing.T) {
Memory: int64(mem),
CpuShares: int64(cpu),
},
+ Network: &execdriver.Network{
+ Mtu: 1500,
+ Interface: nil,
+ },
}
p, err := driver.generateLXCConfig(command)
if err != nil {
@@ -71,9 +75,15 @@ func TestCustomLxcConfig(t *testing.T) {
command := &execdriver.Command{
ID: "1",
Privileged: false,
- Config: []string{
- "lxc.utsname = docker",
- "lxc.cgroup.cpuset.cpus = 0,1",
+ Config: map[string][]string{
+ "lxc": {
+ "lxc.utsname = docker",
+ "lxc.cgroup.cpuset.cpus = 0,1",
+ },
+ },
+ Network: &execdriver.Network{
+ Mtu: 1500,
+ Interface: nil,
},
}
diff --git a/runtime/execdriver/native/configuration/parse.go b/runtime/execdriver/native/configuration/parse.go
new file mode 100644
index 0000000000..6d6c643919
--- /dev/null
+++ b/runtime/execdriver/native/configuration/parse.go
@@ -0,0 +1,186 @@
+package configuration
+
+import (
+ "fmt"
+ "github.com/dotcloud/docker/pkg/libcontainer"
+ "github.com/dotcloud/docker/utils"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+type Action func(*libcontainer.Container, interface{}, string) error
+
+var actions = map[string]Action{
+ "cap.add": addCap, // add a cap
+ "cap.drop": dropCap, // drop a cap
+
+ "ns.add": addNamespace, // add a namespace
+ "ns.drop": dropNamespace, // drop a namespace when cloning
+
+ "net.join": joinNetNamespace, // join another containers net namespace
+
+ "cgroups.cpu_shares": cpuShares, // set the cpu shares
+ "cgroups.memory": memory, // set the memory limit
+ "cgroups.memory_swap": memorySwap, // set the memory swap limit
+ "cgroups.cpuset.cpus": cpusetCpus, // set the cpus used
+
+ "apparmor_profile": apparmorProfile, // set the apparmor profile to apply
+
+ "fs.readonly": readonlyFs, // make the rootfs of the container read only
+}
+
+func cpusetCpus(container *libcontainer.Container, context interface{}, value string) error {
+ if container.Cgroups == nil {
+ return fmt.Errorf("cannot set cgroups when they are disabled")
+ }
+ container.Cgroups.CpusetCpus = value
+
+ return nil
+}
+
+func apparmorProfile(container *libcontainer.Container, context interface{}, value string) error {
+ container.Context["apparmor_profile"] = value
+ return nil
+}
+
+func cpuShares(container *libcontainer.Container, context interface{}, value string) error {
+ if container.Cgroups == nil {
+ return fmt.Errorf("cannot set cgroups when they are disabled")
+ }
+ v, err := strconv.ParseInt(value, 10, 0)
+ if err != nil {
+ return err
+ }
+ container.Cgroups.CpuShares = v
+ return nil
+}
+
+func memory(container *libcontainer.Container, context interface{}, value string) error {
+ if container.Cgroups == nil {
+ return fmt.Errorf("cannot set cgroups when they are disabled")
+ }
+
+ v, err := utils.RAMInBytes(value)
+ if err != nil {
+ return err
+ }
+ container.Cgroups.Memory = v
+ return nil
+}
+
+func memorySwap(container *libcontainer.Container, context interface{}, value string) error {
+ if container.Cgroups == nil {
+ return fmt.Errorf("cannot set cgroups when they are disabled")
+ }
+ v, err := strconv.ParseInt(value, 0, 64)
+ if err != nil {
+ return err
+ }
+ container.Cgroups.MemorySwap = v
+ return nil
+}
+
+func addCap(container *libcontainer.Container, context interface{}, value string) error {
+ c := container.CapabilitiesMask.Get(value)
+ if c == nil {
+ return fmt.Errorf("%s is not a valid capability", value)
+ }
+ c.Enabled = true
+ return nil
+}
+
+func dropCap(container *libcontainer.Container, context interface{}, value string) error {
+ c := container.CapabilitiesMask.Get(value)
+ if c == nil {
+ return fmt.Errorf("%s is not a valid capability", value)
+ }
+ c.Enabled = false
+ return nil
+}
+
+func addNamespace(container *libcontainer.Container, context interface{}, value string) error {
+ ns := container.Namespaces.Get(value)
+ if ns == nil {
+ return fmt.Errorf("%s is not a valid namespace", value[1:])
+ }
+ ns.Enabled = true
+ return nil
+}
+
+func dropNamespace(container *libcontainer.Container, context interface{}, value string) error {
+ ns := container.Namespaces.Get(value)
+ if ns == nil {
+ return fmt.Errorf("%s is not a valid namespace", value[1:])
+ }
+ ns.Enabled = false
+ return nil
+}
+
+func readonlyFs(container *libcontainer.Container, context interface{}, value string) error {
+ switch value {
+ case "1", "true":
+ container.ReadonlyFs = true
+ default:
+ container.ReadonlyFs = false
+ }
+ return nil
+}
+
+func joinNetNamespace(container *libcontainer.Container, context interface{}, value string) error {
+ var (
+ running = context.(map[string]*exec.Cmd)
+ cmd = running[value]
+ )
+
+ if cmd == nil || cmd.Process == nil {
+ return fmt.Errorf("%s is not a valid running container to join", value)
+ }
+ nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net")
+ container.Networks = append(container.Networks, &libcontainer.Network{
+ Type: "netns",
+ Context: libcontainer.Context{
+ "nspath": nspath,
+ },
+ })
+ return nil
+}
+
+func vethMacAddress(container *libcontainer.Container, context interface{}, value string) error {
+ var veth *libcontainer.Network
+ for _, network := range container.Networks {
+ if network.Type == "veth" {
+ veth = network
+ break
+ }
+ }
+ if veth == nil {
+ return fmt.Errorf("not veth configured for container")
+ }
+ veth.Context["mac"] = value
+ return nil
+}
+
+// configureCustomOptions takes string commands from the user and allows modification of the
+// container's default configuration.
+//
+// TODO: this can be moved to a general utils or parser in pkg
+func ParseConfiguration(container *libcontainer.Container, running map[string]*exec.Cmd, opts []string) error {
+ for _, opt := range opts {
+ kv := strings.SplitN(opt, "=", 2)
+ if len(kv) < 2 {
+ return fmt.Errorf("invalid format for %s", opt)
+ }
+
+ action, exists := actions[kv[0]]
+ if !exists {
+ return fmt.Errorf("%s is not a valid option for the native driver", kv[0])
+ }
+
+ if err := action(container, running, kv[1]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/runtime/execdriver/native/configuration/parse_test.go b/runtime/execdriver/native/configuration/parse_test.go
new file mode 100644
index 0000000000..8001358766
--- /dev/null
+++ b/runtime/execdriver/native/configuration/parse_test.go
@@ -0,0 +1,166 @@
+package configuration
+
+import (
+ "github.com/dotcloud/docker/runtime/execdriver/native/template"
+ "testing"
+)
+
+func TestSetReadonlyRootFs(t *testing.T) {
+ var (
+ container = template.New()
+ opts = []string{
+ "fs.readonly=true",
+ }
+ )
+
+ if container.ReadonlyFs {
+ t.Fatal("container should not have a readonly rootfs by default")
+ }
+ if err := ParseConfiguration(container, nil, opts); err != nil {
+ t.Fatal(err)
+ }
+
+ if !container.ReadonlyFs {
+ t.Fatal("container should have a readonly rootfs")
+ }
+}
+
+func TestConfigurationsDoNotConflict(t *testing.T) {
+ var (
+ container1 = template.New()
+ container2 = template.New()
+ opts = []string{
+ "cap.add=NET_ADMIN",
+ }
+ )
+
+ if err := ParseConfiguration(container1, nil, opts); err != nil {
+ t.Fatal(err)
+ }
+
+ if !container1.CapabilitiesMask.Get("NET_ADMIN").Enabled {
+ t.Fatal("container one should have NET_ADMIN enabled")
+ }
+ if container2.CapabilitiesMask.Get("NET_ADMIN").Enabled {
+ t.Fatal("container two should not have NET_ADMIN enabled")
+ }
+}
+
+func TestCpusetCpus(t *testing.T) {
+ var (
+ container = template.New()
+ opts = []string{
+ "cgroups.cpuset.cpus=1,2",
+ }
+ )
+ if err := ParseConfiguration(container, nil, opts); err != nil {
+ t.Fatal(err)
+ }
+
+ if expected := "1,2"; container.Cgroups.CpusetCpus != expected {
+ t.Fatalf("expected %s got %s for cpuset.cpus", expected, container.Cgroups.CpusetCpus)
+ }
+}
+
+func TestAppArmorProfile(t *testing.T) {
+ var (
+ container = template.New()
+ opts = []string{
+ "apparmor_profile=koye-the-protector",
+ }
+ )
+ if err := ParseConfiguration(container, nil, opts); err != nil {
+ t.Fatal(err)
+ }
+ if expected := "koye-the-protector"; container.Context["apparmor_profile"] != expected {
+ t.Fatalf("expected profile %s got %s", expected, container.Context["apparmor_profile"])
+ }
+}
+
+func TestCpuShares(t *testing.T) {
+ var (
+ container = template.New()
+ opts = []string{
+ "cgroups.cpu_shares=1048",
+ }
+ )
+ if err := ParseConfiguration(container, nil, opts); err != nil {
+ t.Fatal(err)
+ }
+
+ if expected := int64(1048); container.Cgroups.CpuShares != expected {
+ t.Fatalf("expected cpu shares %d got %d", expected, container.Cgroups.CpuShares)
+ }
+}
+
+func TestCgroupMemory(t *testing.T) {
+ var (
+ container = template.New()
+ opts = []string{
+ "cgroups.memory=500m",
+ }
+ )
+ if err := ParseConfiguration(container, nil, opts); err != nil {
+ t.Fatal(err)
+ }
+
+ if expected := int64(500 * 1024 * 1024); container.Cgroups.Memory != expected {
+ t.Fatalf("expected memory %d got %d", expected, container.Cgroups.Memory)
+ }
+}
+
+func TestAddCap(t *testing.T) {
+ var (
+ container = template.New()
+ opts = []string{
+ "cap.add=MKNOD",
+ "cap.add=SYS_ADMIN",
+ }
+ )
+ if err := ParseConfiguration(container, nil, opts); err != nil {
+ t.Fatal(err)
+ }
+
+ if !container.CapabilitiesMask.Get("MKNOD").Enabled {
+ t.Fatal("container should have MKNOD enabled")
+ }
+ if !container.CapabilitiesMask.Get("SYS_ADMIN").Enabled {
+ t.Fatal("container should have SYS_ADMIN enabled")
+ }
+}
+
+func TestDropCap(t *testing.T) {
+ var (
+ container = template.New()
+ opts = []string{
+ "cap.drop=MKNOD",
+ }
+ )
+ // enabled all caps like in privileged mode
+ for _, c := range container.CapabilitiesMask {
+ c.Enabled = true
+ }
+ if err := ParseConfiguration(container, nil, opts); err != nil {
+ t.Fatal(err)
+ }
+
+ if container.CapabilitiesMask.Get("MKNOD").Enabled {
+ t.Fatal("container should not have MKNOD enabled")
+ }
+}
+
+func TestDropNamespace(t *testing.T) {
+ var (
+ container = template.New()
+ opts = []string{
+ "ns.drop=NEWNET",
+ }
+ )
+ if err := ParseConfiguration(container, nil, opts); err != nil {
+ t.Fatal(err)
+ }
+
+ if container.Namespaces.Get("NEWNET").Enabled {
+ t.Fatal("container should not have NEWNET enabled")
+ }
+}
diff --git a/runtime/execdriver/native/create.go b/runtime/execdriver/native/create.go
new file mode 100644
index 0000000000..71fab3e064
--- /dev/null
+++ b/runtime/execdriver/native/create.go
@@ -0,0 +1,114 @@
+package native
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/dotcloud/docker/pkg/label"
+ "github.com/dotcloud/docker/pkg/libcontainer"
+ "github.com/dotcloud/docker/runtime/execdriver"
+ "github.com/dotcloud/docker/runtime/execdriver/native/configuration"
+ "github.com/dotcloud/docker/runtime/execdriver/native/template"
+)
+
+// createContainer populates and configures the container type with the
+// data provided by the execdriver.Command
+func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container, error) {
+ container := template.New()
+
+ container.Hostname = getEnv("HOSTNAME", c.Env)
+ container.Tty = c.Tty
+ container.User = c.User
+ container.WorkingDir = c.WorkingDir
+ container.Env = c.Env
+ container.Cgroups.Name = c.ID
+ // check to see if we are running in ramdisk to disable pivot root
+ container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
+
+ if err := d.createNetwork(container, c); err != nil {
+ return nil, err
+ }
+ if c.Privileged {
+ if err := d.setPrivileged(container); err != nil {
+ return nil, err
+ }
+ }
+ if err := d.setupCgroups(container, c); err != nil {
+ return nil, err
+ }
+ if err := d.setupMounts(container, c); err != nil {
+ return nil, err
+ }
+ if err := d.setupLabels(container, c); err != nil {
+ return nil, err
+ }
+ if err := configuration.ParseConfiguration(container, d.activeContainers, c.Config["native"]); err != nil {
+ return nil, err
+ }
+ return container, nil
+}
+
+func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.Command) error {
+ container.Networks = []*libcontainer.Network{
+ {
+ Mtu: c.Network.Mtu,
+ Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0),
+ Gateway: "localhost",
+ Type: "loopback",
+ Context: libcontainer.Context{},
+ },
+ }
+
+ if c.Network.Interface != nil {
+ vethNetwork := libcontainer.Network{
+ Mtu: c.Network.Mtu,
+ Address: fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
+ Gateway: c.Network.Interface.Gateway,
+ Type: "veth",
+ Context: libcontainer.Context{
+ "prefix": "veth",
+ "bridge": c.Network.Interface.Bridge,
+ },
+ }
+ container.Networks = append(container.Networks, &vethNetwork)
+ }
+ return nil
+}
+
+func (d *driver) setPrivileged(container *libcontainer.Container) error {
+ for _, c := range container.CapabilitiesMask {
+ c.Enabled = true
+ }
+ container.Cgroups.DeviceAccess = true
+ container.Context["apparmor_profile"] = "unconfined"
+ return nil
+}
+
+func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.Command) error {
+ if c.Resources != nil {
+ container.Cgroups.CpuShares = c.Resources.CpuShares
+ container.Cgroups.Memory = c.Resources.Memory
+ container.Cgroups.MemorySwap = c.Resources.MemorySwap
+ }
+ return nil
+}
+
+func (d *driver) setupMounts(container *libcontainer.Container, c *execdriver.Command) error {
+ for _, m := range c.Mounts {
+ container.Mounts = append(container.Mounts, libcontainer.Mount{m.Source, m.Destination, m.Writable, m.Private})
+ }
+ return nil
+}
+
+func (d *driver) setupLabels(container *libcontainer.Container, c *execdriver.Command) error {
+ labels := c.Config["label"]
+ if len(labels) > 0 {
+ process, mount, err := label.GenLabels(labels[0])
+ if err != nil {
+ return err
+ }
+ container.Context["mount_label"] = mount
+ container.Context["process_label"] = process
+ }
+ return nil
+}
diff --git a/execdriver/native/driver.go b/runtime/execdriver/native/driver.go
index 452e802523..d18865e508 100644
--- a/execdriver/native/driver.go
+++ b/runtime/execdriver/native/driver.go
@@ -3,13 +3,15 @@ package native
import (
"encoding/json"
"fmt"
- "github.com/dotcloud/docker/execdriver"
"github.com/dotcloud/docker/pkg/cgroups"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/apparmor"
"github.com/dotcloud/docker/pkg/libcontainer/nsinit"
"github.com/dotcloud/docker/pkg/system"
+ "github.com/dotcloud/docker/runtime/execdriver"
+ "io"
"io/ioutil"
+ "log"
"os"
"os/exec"
"path/filepath"
@@ -19,15 +21,16 @@ import (
)
const (
- DriverName = "native"
- Version = "0.1"
+ DriverName = "native"
+ Version = "0.1"
+ BackupApparmorProfilePath = "apparmor/docker.back" // relative to docker root
)
func init() {
execdriver.RegisterInitFunc(DriverName, func(args *execdriver.InitArgs) error {
var (
container *libcontainer.Container
- ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root})
+ ns = nsinit.NewNsInit(&nsinit.DefaultCommandFactory{}, &nsinit.DefaultStateWriter{args.Root}, createLogger(""))
)
f, err := os.Open(filepath.Join(args.Root, "container.json"))
if err != nil {
@@ -55,35 +58,43 @@ func init() {
}
type driver struct {
- root string
+ root string
+ initPath string
+ activeContainers map[string]*exec.Cmd
}
-func NewDriver(root string) (*driver, error) {
+func NewDriver(root, initPath string) (*driver, error) {
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
- if err := apparmor.InstallDefaultProfile(); err != nil {
+ // native driver root is at docker_root/execdriver/native. Put apparmor at docker_root
+ if err := apparmor.InstallDefaultProfile(filepath.Join(root, "../..", BackupApparmorProfilePath)); err != nil {
return nil, err
}
return &driver{
- root: root,
+ root: root,
+ initPath: initPath,
+ activeContainers: make(map[string]*exec.Cmd),
}, nil
}
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
- if err := d.validateCommand(c); err != nil {
+ // take the Command and populate the libcontainer.Container from it
+ container, err := d.createContainer(c)
+ if err != nil {
return -1, err
}
+ d.activeContainers[c.ID] = &c.Cmd
+
var (
term nsinit.Terminal
- container = createContainer(c)
factory = &dockerCommandFactory{c: c, driver: d}
stateWriter = &dockerStateWriter{
callback: startCallback,
c: c,
dsw: &nsinit.DefaultStateWriter{filepath.Join(d.root, c.ID)},
}
- ns = nsinit.NewNsInit(factory, stateWriter)
+ ns = nsinit.NewNsInit(factory, stateWriter, createLogger(os.Getenv("DEBUG")))
args = append([]string{c.Entrypoint}, c.Arguments...)
)
if err := d.createContainerRoot(c.ID); err != nil {
@@ -108,9 +119,39 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba
}
func (d *driver) Kill(p *execdriver.Command, sig int) error {
- err := syscall.Kill(p.Process.Pid, syscall.Signal(sig))
+ return syscall.Kill(p.Process.Pid, syscall.Signal(sig))
+}
+
+func (d *driver) Terminate(p *execdriver.Command) error {
+ // lets check the start time for the process
+ started, err := d.readStartTime(p)
+ if err != nil {
+ // if we don't have the data on disk then we can assume the process is gone
+ // because this is only removed after we know the process has stopped
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ currentStartTime, err := system.GetProcessStartTime(p.Process.Pid)
+ if err != nil {
+ return err
+ }
+ if started == currentStartTime {
+ err = syscall.Kill(p.Process.Pid, 9)
+ }
d.removeContainerRoot(p.ID)
return err
+
+}
+
+func (d *driver) readStartTime(p *execdriver.Command) (string, error) {
+ data, err := ioutil.ReadFile(filepath.Join(d.root, p.ID, "start"))
+ if err != nil {
+ return "", err
+ }
+ return string(data), nil
}
func (d *driver) Info(id string) execdriver.Info {
@@ -177,17 +218,6 @@ func (d *driver) removeContainerRoot(id string) error {
return os.RemoveAll(filepath.Join(d.root, id))
}
-func (d *driver) validateCommand(c *execdriver.Command) error {
- // we need to check the Config of the command to make sure that we
- // do not have any of the lxc-conf variables
- for _, conf := range c.Config {
- if strings.Contains(conf, "lxc") {
- return fmt.Errorf("%s is not supported by the native driver", conf)
- }
- }
- return nil
-}
-
func getEnv(key string, env []string) string {
for _, pair := range env {
parts := strings.Split(pair, "=")
@@ -210,7 +240,7 @@ func (d *dockerCommandFactory) Create(container *libcontainer.Container, console
// we need to join the rootfs because nsinit will setup the rootfs and chroot
initPath := filepath.Join(d.c.Rootfs, d.c.InitPath)
- d.c.Path = initPath
+ d.c.Path = d.driver.initPath
d.c.Args = append([]string{
initPath,
"-driver", DriverName,
@@ -237,9 +267,9 @@ type dockerStateWriter struct {
callback execdriver.StartCallback
}
-func (d *dockerStateWriter) WritePid(pid int) error {
+func (d *dockerStateWriter) WritePid(pid int, started string) error {
d.c.ContainerPid = pid
- err := d.dsw.WritePid(pid)
+ err := d.dsw.WritePid(pid, started)
if d.callback != nil {
d.callback(d.c)
}
@@ -249,3 +279,14 @@ func (d *dockerStateWriter) WritePid(pid int) error {
func (d *dockerStateWriter) DeletePid() error {
return d.dsw.DeletePid()
}
+
+func createLogger(debug string) *log.Logger {
+ var w io.Writer
+ // if we are in debug mode set the logger to stderr
+ if debug != "" {
+ w = os.Stderr
+ } else {
+ w = ioutil.Discard
+ }
+ return log.New(w, "[libcontainer] ", log.LstdFlags)
+}
diff --git a/execdriver/native/info.go b/runtime/execdriver/native/info.go
index aef2f85c6b..aef2f85c6b 100644
--- a/execdriver/native/info.go
+++ b/runtime/execdriver/native/info.go
diff --git a/runtime/execdriver/native/template/default_template.go b/runtime/execdriver/native/template/default_template.go
new file mode 100644
index 0000000000..a1ecb04d76
--- /dev/null
+++ b/runtime/execdriver/native/template/default_template.go
@@ -0,0 +1,45 @@
+package template
+
+import (
+ "github.com/dotcloud/docker/pkg/cgroups"
+ "github.com/dotcloud/docker/pkg/libcontainer"
+)
+
+// New returns the docker default configuration for libcontainer
+func New() *libcontainer.Container {
+ container := &libcontainer.Container{
+ CapabilitiesMask: libcontainer.Capabilities{
+ libcontainer.GetCapability("SETPCAP"),
+ libcontainer.GetCapability("SYS_MODULE"),
+ libcontainer.GetCapability("SYS_RAWIO"),
+ libcontainer.GetCapability("SYS_PACCT"),
+ libcontainer.GetCapability("SYS_ADMIN"),
+ libcontainer.GetCapability("SYS_NICE"),
+ libcontainer.GetCapability("SYS_RESOURCE"),
+ libcontainer.GetCapability("SYS_TIME"),
+ libcontainer.GetCapability("SYS_TTY_CONFIG"),
+ libcontainer.GetCapability("AUDIT_WRITE"),
+ libcontainer.GetCapability("AUDIT_CONTROL"),
+ libcontainer.GetCapability("MAC_OVERRIDE"),
+ libcontainer.GetCapability("MAC_ADMIN"),
+ libcontainer.GetCapability("NET_ADMIN"),
+ libcontainer.GetCapability("MKNOD"),
+ },
+ Namespaces: libcontainer.Namespaces{
+ libcontainer.GetNamespace("NEWNS"),
+ libcontainer.GetNamespace("NEWUTS"),
+ libcontainer.GetNamespace("NEWIPC"),
+ libcontainer.GetNamespace("NEWPID"),
+ libcontainer.GetNamespace("NEWNET"),
+ },
+ Cgroups: &cgroups.Cgroup{
+ Parent: "docker",
+ DeviceAccess: false,
+ },
+ Context: libcontainer.Context{
+ "apparmor_profile": "docker-default",
+ },
+ }
+ container.CapabilitiesMask.Get("MKNOD").Enabled = true
+ return container
+}
diff --git a/execdriver/native/term.go b/runtime/execdriver/native/term.go
index ec69820f75..0d5298d388 100644
--- a/execdriver/native/term.go
+++ b/runtime/execdriver/native/term.go
@@ -5,7 +5,7 @@
package native
import (
- "github.com/dotcloud/docker/execdriver"
+ "github.com/dotcloud/docker/runtime/execdriver"
"io"
"os"
"os/exec"
diff --git a/execdriver/pipes.go b/runtime/execdriver/pipes.go
index 158219f0c5..158219f0c5 100644
--- a/execdriver/pipes.go
+++ b/runtime/execdriver/pipes.go
diff --git a/execdriver/termconsole.go b/runtime/execdriver/termconsole.go
index af6b88d3d1..af6b88d3d1 100644
--- a/execdriver/termconsole.go
+++ b/runtime/execdriver/termconsole.go
diff --git a/graphdriver/aufs/aufs.go b/runtime/graphdriver/aufs/aufs.go
index a15cf6b273..401bbd8c86 100644
--- a/graphdriver/aufs/aufs.go
+++ b/runtime/graphdriver/aufs/aufs.go
@@ -24,8 +24,8 @@ import (
"bufio"
"fmt"
"github.com/dotcloud/docker/archive"
- "github.com/dotcloud/docker/graphdriver"
mountpk "github.com/dotcloud/docker/pkg/mount"
+ "github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/utils"
"os"
"os/exec"
@@ -134,7 +134,7 @@ func (a Driver) Exists(id string) bool {
// Three folders are created for each id
// mnt, layers, and diff
-func (a *Driver) Create(id, parent string) error {
+func (a *Driver) Create(id, parent string, mountLabel string) error {
if err := a.createDirsFor(id); err != nil {
return err
}
diff --git a/graphdriver/aufs/aufs_test.go b/runtime/graphdriver/aufs/aufs_test.go
index 6002bec5a1..9cfdebd160 100644
--- a/graphdriver/aufs/aufs_test.go
+++ b/runtime/graphdriver/aufs/aufs_test.go
@@ -5,7 +5,7 @@ import (
"encoding/hex"
"fmt"
"github.com/dotcloud/docker/archive"
- "github.com/dotcloud/docker/graphdriver"
+ "github.com/dotcloud/docker/runtime/graphdriver"
"io/ioutil"
"os"
"path"
@@ -90,7 +90,7 @@ func TestCreateNewDir(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
}
@@ -99,7 +99,7 @@ func TestCreateNewDirStructure(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -120,7 +120,7 @@ func TestRemoveImage(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -145,7 +145,7 @@ func TestGetWithoutParent(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -172,7 +172,7 @@ func TestCleanupWithDir(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -185,7 +185,7 @@ func TestMountedFalseResponse(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -204,10 +204,10 @@ func TestMountedTrueReponse(t *testing.T) {
defer os.RemoveAll(tmp)
defer d.Cleanup()
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
- if err := d.Create("2", "1"); err != nil {
+ if err := d.Create("2", "1", ""); err != nil {
t.Fatal(err)
}
@@ -230,10 +230,10 @@ func TestMountWithParent(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
- if err := d.Create("2", "1"); err != nil {
+ if err := d.Create("2", "1", ""); err != nil {
t.Fatal(err)
}
@@ -261,10 +261,10 @@ func TestRemoveMountedDir(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
- if err := d.Create("2", "1"); err != nil {
+ if err := d.Create("2", "1", ""); err != nil {
t.Fatal(err)
}
@@ -300,7 +300,7 @@ func TestCreateWithInvalidParent(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
- if err := d.Create("1", "docker"); err == nil {
+ if err := d.Create("1", "docker", ""); err == nil {
t.Fatalf("Error should not be nil with parent does not exist")
}
}
@@ -309,7 +309,7 @@ func TestGetDiff(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -343,10 +343,10 @@ func TestChanges(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
- if err := d.Create("2", "1"); err != nil {
+ if err := d.Create("2", "1", ""); err != nil {
t.Fatal(err)
}
@@ -392,7 +392,7 @@ func TestChanges(t *testing.T) {
t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind)
}
- if err := d.Create("3", "2"); err != nil {
+ if err := d.Create("3", "2", ""); err != nil {
t.Fatal(err)
}
mntPoint, err = d.Get("3")
@@ -437,7 +437,7 @@ func TestDiffSize(t *testing.T) {
d := newDriver(t)
defer os.RemoveAll(tmp)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -479,7 +479,7 @@ func TestChildDiffSize(t *testing.T) {
defer os.RemoveAll(tmp)
defer d.Cleanup()
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -515,7 +515,7 @@ func TestChildDiffSize(t *testing.T) {
t.Fatalf("Expected size to be %d got %d", size, diffSize)
}
- if err := d.Create("2", "1"); err != nil {
+ if err := d.Create("2", "1", ""); err != nil {
t.Fatal(err)
}
@@ -534,7 +534,7 @@ func TestExists(t *testing.T) {
defer os.RemoveAll(tmp)
defer d.Cleanup()
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -552,7 +552,7 @@ func TestStatus(t *testing.T) {
defer os.RemoveAll(tmp)
defer d.Cleanup()
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -581,7 +581,7 @@ func TestApplyDiff(t *testing.T) {
defer os.RemoveAll(tmp)
defer d.Cleanup()
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -607,10 +607,10 @@ func TestApplyDiff(t *testing.T) {
t.Fatal(err)
}
- if err := d.Create("2", ""); err != nil {
+ if err := d.Create("2", "", ""); err != nil {
t.Fatal(err)
}
- if err := d.Create("3", "2"); err != nil {
+ if err := d.Create("3", "2", ""); err != nil {
t.Fatal(err)
}
@@ -656,7 +656,7 @@ func TestMountMoreThan42Layers(t *testing.T) {
}
current = hash(current)
- if err := d.Create(current, parent); err != nil {
+ if err := d.Create(current, parent, ""); err != nil {
t.Logf("Current layer %d", i)
t.Fatal(err)
}
diff --git a/graphdriver/aufs/dirs.go b/runtime/graphdriver/aufs/dirs.go
index fb9b81edd2..fb9b81edd2 100644
--- a/graphdriver/aufs/dirs.go
+++ b/runtime/graphdriver/aufs/dirs.go
diff --git a/graphdriver/aufs/migrate.go b/runtime/graphdriver/aufs/migrate.go
index 6018342d6c..400e260797 100644
--- a/graphdriver/aufs/migrate.go
+++ b/runtime/graphdriver/aufs/migrate.go
@@ -77,7 +77,7 @@ func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) e
}
initID := fmt.Sprintf("%s-init", id)
- if err := a.Create(initID, metadata.Image); err != nil {
+ if err := a.Create(initID, metadata.Image, ""); err != nil {
return err
}
@@ -90,7 +90,7 @@ func (a *Driver) migrateContainers(pth string, setupInit func(p string) error) e
return err
}
- if err := a.Create(id, initID); err != nil {
+ if err := a.Create(id, initID, ""); err != nil {
return err
}
}
@@ -144,7 +144,7 @@ func (a *Driver) migrateImage(m *metadata, pth string, migrated map[string]bool)
return err
}
if !a.Exists(m.ID) {
- if err := a.Create(m.ID, m.ParentID); err != nil {
+ if err := a.Create(m.ID, m.ParentID, ""); err != nil {
return err
}
}
diff --git a/graphdriver/aufs/mount.go b/runtime/graphdriver/aufs/mount.go
index 1f1d98f809..1f1d98f809 100644
--- a/graphdriver/aufs/mount.go
+++ b/runtime/graphdriver/aufs/mount.go
diff --git a/graphdriver/aufs/mount_linux.go b/runtime/graphdriver/aufs/mount_linux.go
index 6082d9f240..6082d9f240 100644
--- a/graphdriver/aufs/mount_linux.go
+++ b/runtime/graphdriver/aufs/mount_linux.go
diff --git a/graphdriver/aufs/mount_unsupported.go b/runtime/graphdriver/aufs/mount_unsupported.go
index 2735624112..2735624112 100644
--- a/graphdriver/aufs/mount_unsupported.go
+++ b/runtime/graphdriver/aufs/mount_unsupported.go
diff --git a/graphdriver/btrfs/btrfs.go b/runtime/graphdriver/btrfs/btrfs.go
index 592e058458..2a94a4089f 100644
--- a/graphdriver/btrfs/btrfs.go
+++ b/runtime/graphdriver/btrfs/btrfs.go
@@ -11,7 +11,7 @@ import "C"
import (
"fmt"
- "github.com/dotcloud/docker/graphdriver"
+ "github.com/dotcloud/docker/runtime/graphdriver"
"os"
"path"
"syscall"
@@ -80,7 +80,7 @@ func getDirFd(dir *C.DIR) uintptr {
return uintptr(C.dirfd(dir))
}
-func subvolCreate(path, name string) error {
+func subvolCreate(path, name string, mountLabel string) error {
dir, err := openDir(path)
if err != nil {
return err
@@ -155,13 +155,13 @@ func (d *Driver) subvolumesDirId(id string) string {
return path.Join(d.subvolumesDir(), id)
}
-func (d *Driver) Create(id string, parent string) error {
+func (d *Driver) Create(id string, parent string, mountLabel string) error {
subvolumes := path.Join(d.home, "subvolumes")
if err := os.MkdirAll(subvolumes, 0700); err != nil {
return err
}
if parent == "" {
- if err := subvolCreate(subvolumes, id); err != nil {
+ if err := subvolCreate(subvolumes, id, mountLabel); err != nil {
return err
}
} else {
diff --git a/graphdriver/btrfs/dummy_unsupported.go b/runtime/graphdriver/btrfs/dummy_unsupported.go
index 6c44615763..6c44615763 100644
--- a/graphdriver/btrfs/dummy_unsupported.go
+++ b/runtime/graphdriver/btrfs/dummy_unsupported.go
diff --git a/graphdriver/devmapper/attach_loopback.go b/runtime/graphdriver/devmapper/attach_loopback.go
index 23339076e8..23339076e8 100644
--- a/graphdriver/devmapper/attach_loopback.go
+++ b/runtime/graphdriver/devmapper/attach_loopback.go
diff --git a/graphdriver/devmapper/deviceset.go b/runtime/graphdriver/devmapper/deviceset.go
index 303e363e92..97d670a3d9 100644
--- a/graphdriver/devmapper/deviceset.go
+++ b/runtime/graphdriver/devmapper/deviceset.go
@@ -6,6 +6,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@@ -14,6 +15,7 @@ import (
"strconv"
"strings"
"sync"
+ "syscall"
"time"
)
@@ -39,15 +41,28 @@ type DevInfo struct {
// first get (since we need to mount to set up the device
// a bit first).
floating bool `json:"-"`
+
+ // The global DeviceSet lock guarantees that we serialize all
+ // the calls to libdevmapper (which is not threadsafe), but we
+ // sometimes release that lock while sleeping. In that case
+ // this per-device lock is still held, protecting against
+ // other accesses to the device that we're doing the wait on.
+ //
+ // WARNING: In order to avoid AB-BA deadlocks when releasing
+ // the global lock while holding the per-device locks all
+ // device locks must be aquired *before* the device lock, and
+ // multiple device locks should be aquired parent before child.
+ lock sync.Mutex `json:"-"`
}
type MetaData struct {
- Devices map[string]*DevInfo `json:devices`
+ Devices map[string]*DevInfo `json:devices`
+ devicesLock sync.Mutex `json:"-"` // Protects all read/writes to Devices map
}
type DeviceSet struct {
MetaData
- sync.Mutex
+ sync.Mutex // Protects Devices map and serializes calls into libdevmapper
root string
devicePrefix string
TransactionId uint64
@@ -170,7 +185,9 @@ func (devices *DeviceSet) allocateTransactionId() uint64 {
}
func (devices *DeviceSet) saveMetadata() error {
+ devices.devicesLock.Lock()
jsonData, err := json.Marshal(devices.MetaData)
+ devices.devicesLock.Unlock()
if err != nil {
return fmt.Errorf("Error encoding metadata to json: %s", err)
}
@@ -205,6 +222,16 @@ func (devices *DeviceSet) saveMetadata() error {
return nil
}
+func (devices *DeviceSet) lookupDevice(hash string) (*DevInfo, error) {
+ devices.devicesLock.Lock()
+ defer devices.devicesLock.Unlock()
+ info := devices.Devices[hash]
+ if info == nil {
+ return nil, fmt.Errorf("Unknown device %s", hash)
+ }
+ return info, nil
+}
+
func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*DevInfo, error) {
utils.Debugf("registerDevice(%v, %v)", id, hash)
info := &DevInfo{
@@ -216,22 +243,23 @@ func (devices *DeviceSet) registerDevice(id int, hash string, size uint64) (*Dev
devices: devices,
}
+ devices.devicesLock.Lock()
devices.Devices[hash] = info
+ devices.devicesLock.Unlock()
+
if err := devices.saveMetadata(); err != nil {
// Try to remove unused device
+ devices.devicesLock.Lock()
delete(devices.Devices, hash)
+ devices.devicesLock.Unlock()
return nil, err
}
return info, nil
}
-func (devices *DeviceSet) activateDeviceIfNeeded(hash string) error {
- utils.Debugf("activateDeviceIfNeeded(%v)", hash)
- info := devices.Devices[hash]
- if info == nil {
- return fmt.Errorf("Unknown device %s", hash)
- }
+func (devices *DeviceSet) activateDeviceIfNeeded(info *DevInfo) error {
+ utils.Debugf("activateDeviceIfNeeded(%v)", info.Hash)
if devinfo, _ := getInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 {
return nil
@@ -301,14 +329,14 @@ func (devices *DeviceSet) loadMetaData() error {
}
func (devices *DeviceSet) setupBaseImage() error {
- oldInfo := devices.Devices[""]
+ oldInfo, _ := devices.lookupDevice("")
if oldInfo != nil && oldInfo.Initialized {
return nil
}
if oldInfo != nil && !oldInfo.Initialized {
utils.Debugf("Removing uninitialized base image")
- if err := devices.deleteDevice(""); err != nil {
+ if err := devices.deleteDevice(oldInfo); err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
@@ -334,7 +362,7 @@ func (devices *DeviceSet) setupBaseImage() error {
utils.Debugf("Creating filesystem on base device-manager snapshot")
- if err = devices.activateDeviceIfNeeded(""); err != nil {
+ if err = devices.activateDeviceIfNeeded(info); err != nil {
utils.Debugf("\n--->Err: %s\n", err)
return err
}
@@ -557,16 +585,19 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
}
func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
+ baseInfo, err := devices.lookupDevice(baseHash)
+ if err != nil {
+ return err
+ }
+
+ baseInfo.lock.Lock()
+ defer baseInfo.lock.Unlock()
+
devices.Lock()
defer devices.Unlock()
- if devices.Devices[hash] != nil {
- return fmt.Errorf("hash %s already exists", hash)
- }
-
- baseInfo := devices.Devices[baseHash]
- if baseInfo == nil {
- return fmt.Errorf("Error adding device for '%s': can't find device for parent '%s'", hash, baseHash)
+ if info, _ := devices.lookupDevice(hash); info != nil {
+ return fmt.Errorf("device %s already exists", hash)
}
deviceId := devices.allocateDeviceId()
@@ -584,16 +615,11 @@ func (devices *DeviceSet) AddDevice(hash, baseHash string) error {
return nil
}
-func (devices *DeviceSet) deleteDevice(hash string) error {
- info := devices.Devices[hash]
- if info == nil {
- return fmt.Errorf("hash %s doesn't exists", hash)
- }
-
+func (devices *DeviceSet) deleteDevice(info *DevInfo) error {
// This is a workaround for the kernel not discarding block so
// on the thin pool when we remove a thinp device, so we do it
// manually
- if err := devices.activateDeviceIfNeeded(hash); err == nil {
+ if err := devices.activateDeviceIfNeeded(info); err == nil {
if err := BlockDeviceDiscard(info.DevName()); err != nil {
utils.Debugf("Error discarding block on device: %s (ignoring)\n", err)
}
@@ -621,10 +647,14 @@ func (devices *DeviceSet) deleteDevice(hash string) error {
}
devices.allocateTransactionId()
+ devices.devicesLock.Lock()
delete(devices.Devices, info.Hash)
+ devices.devicesLock.Unlock()
if err := devices.saveMetadata(); err != nil {
+ devices.devicesLock.Lock()
devices.Devices[info.Hash] = info
+ devices.devicesLock.Unlock()
utils.Debugf("Error saving meta data: %s\n", err)
return err
}
@@ -633,10 +663,18 @@ func (devices *DeviceSet) deleteDevice(hash string) error {
}
func (devices *DeviceSet) DeleteDevice(hash string) error {
+ info, err := devices.lookupDevice(hash)
+ if err != nil {
+ return err
+ }
+
+ info.lock.Lock()
+ defer info.lock.Unlock()
+
devices.Lock()
defer devices.Unlock()
- return devices.deleteDevice(hash)
+ return devices.deleteDevice(info)
}
func (devices *DeviceSet) deactivatePool() error {
@@ -655,14 +693,16 @@ func (devices *DeviceSet) deactivatePool() error {
return nil
}
-func (devices *DeviceSet) deactivateDevice(hash string) error {
- utils.Debugf("[devmapper] deactivateDevice(%s)", hash)
+func (devices *DeviceSet) deactivateDevice(info *DevInfo) error {
+ utils.Debugf("[devmapper] deactivateDevice(%s)", info.Hash)
defer utils.Debugf("[devmapper] deactivateDevice END")
- info := devices.Devices[hash]
- if info == nil {
- return fmt.Errorf("Unknown device %s", hash)
+ // Wait for the unmount to be effective,
+ // by watching the value of Info.OpenCount for the device
+ if err := devices.waitClose(info); err != nil {
+ utils.Errorf("Warning: error waiting for device %s to close: %s\n", info.Hash, err)
}
+
devinfo, err := getInfo(info.Name())
if err != nil {
utils.Debugf("\n--->Err: %s\n", err)
@@ -683,7 +723,7 @@ func (devices *DeviceSet) deactivateDevice(hash string) error {
func (devices *DeviceSet) removeDeviceAndWait(devname string) error {
var err error
- for i := 0; i < 10; i++ {
+ for i := 0; i < 1000; i++ {
devices.sawBusy = false
err = removeDevice(devname)
if err == nil {
@@ -695,7 +735,9 @@ func (devices *DeviceSet) removeDeviceAndWait(devname string) error {
// If we see EBUSY it may be a transient error,
// sleep a bit a retry a few times.
- time.Sleep(5 * time.Millisecond)
+ devices.Unlock()
+ time.Sleep(10 * time.Millisecond)
+ devices.Lock()
}
if err != nil {
return err
@@ -709,7 +751,7 @@ func (devices *DeviceSet) removeDeviceAndWait(devname string) error {
// waitRemove blocks until either:
// a) the device registered at <device_set_prefix>-<hash> is removed,
-// or b) the 1 second timeout expires.
+// or b) the 10 second timeout expires.
func (devices *DeviceSet) waitRemove(devname string) error {
utils.Debugf("[deviceset %s] waitRemove(%s)", devices.devicePrefix, devname)
defer utils.Debugf("[deviceset %s] waitRemove(%s) END", devices.devicePrefix, devname)
@@ -728,7 +770,9 @@ func (devices *DeviceSet) waitRemove(devname string) error {
break
}
- time.Sleep(1 * time.Millisecond)
+ devices.Unlock()
+ time.Sleep(10 * time.Millisecond)
+ devices.Lock()
}
if i == 1000 {
return fmt.Errorf("Timeout while waiting for device %s to be removed", devname)
@@ -738,12 +782,8 @@ func (devices *DeviceSet) waitRemove(devname string) error {
// waitClose blocks until either:
// a) the device registered at <device_set_prefix>-<hash> is closed,
-// or b) the 1 second timeout expires.
-func (devices *DeviceSet) waitClose(hash string) error {
- info := devices.Devices[hash]
- if info == nil {
- return fmt.Errorf("Unknown device %s", hash)
- }
+// or b) the 10 second timeout expires.
+func (devices *DeviceSet) waitClose(info *DevInfo) error {
i := 0
for ; i < 1000; i += 1 {
devinfo, err := getInfo(info.Name())
@@ -751,60 +791,86 @@ func (devices *DeviceSet) waitClose(hash string) error {
return err
}
if i%100 == 0 {
- utils.Debugf("Waiting for unmount of %s: opencount=%d", hash, devinfo.OpenCount)
+ utils.Debugf("Waiting for unmount of %s: opencount=%d", info.Hash, devinfo.OpenCount)
}
if devinfo.OpenCount == 0 {
break
}
- time.Sleep(1 * time.Millisecond)
+ devices.Unlock()
+ time.Sleep(10 * time.Millisecond)
+ devices.Lock()
}
if i == 1000 {
- return fmt.Errorf("Timeout while waiting for device %s to close", hash)
+ return fmt.Errorf("Timeout while waiting for device %s to close", info.Hash)
}
return nil
}
func (devices *DeviceSet) Shutdown() error {
- devices.Lock()
- defer devices.Unlock()
utils.Debugf("[deviceset %s] shutdown()", devices.devicePrefix)
utils.Debugf("[devmapper] Shutting down DeviceSet: %s", devices.root)
defer utils.Debugf("[deviceset %s] shutdown END", devices.devicePrefix)
+ var devs []*DevInfo
+
+ devices.devicesLock.Lock()
for _, info := range devices.Devices {
+ devs = append(devs, info)
+ }
+ devices.devicesLock.Unlock()
+
+ for _, info := range devs {
+ info.lock.Lock()
if info.mountCount > 0 {
- if err := sysUnmount(info.mountPath, 0); err != nil {
+ // We use MNT_DETACH here in case it is still busy in some running
+ // container. This means it'll go away from the global scope directly,
+ // and the device will be released when that container dies.
+ if err := sysUnmount(info.mountPath, syscall.MNT_DETACH); err != nil {
utils.Debugf("Shutdown unmounting %s, error: %s\n", info.mountPath, err)
}
+
+ devices.Lock()
+ if err := devices.deactivateDevice(info); err != nil {
+ utils.Debugf("Shutdown deactivate %s , error: %s\n", info.Hash, err)
+ }
+ devices.Unlock()
}
+ info.lock.Unlock()
}
- for _, d := range devices.Devices {
- if err := devices.waitClose(d.Hash); err != nil {
- utils.Errorf("Warning: error waiting for device %s to unmount: %s\n", d.Hash, err)
- }
- if err := devices.deactivateDevice(d.Hash); err != nil {
- utils.Debugf("Shutdown deactivate %s , error: %s\n", d.Hash, err)
+ info, _ := devices.lookupDevice("")
+ if info != nil {
+ info.lock.Lock()
+ devices.Lock()
+ if err := devices.deactivateDevice(info); err != nil {
+ utils.Debugf("Shutdown deactivate base , error: %s\n", err)
}
+ devices.Unlock()
+ info.lock.Unlock()
}
+ devices.Lock()
if err := devices.deactivatePool(); err != nil {
utils.Debugf("Shutdown deactivate pool , error: %s\n", err)
}
+ devices.Unlock()
return nil
}
-func (devices *DeviceSet) MountDevice(hash, path string) error {
+func (devices *DeviceSet) MountDevice(hash, path string, mountLabel string) error {
+ info, err := devices.lookupDevice(hash)
+ if err != nil {
+ return err
+ }
+
+ info.lock.Lock()
+ defer info.lock.Unlock()
+
devices.Lock()
defer devices.Unlock()
- info := devices.Devices[hash]
- if info == nil {
- return fmt.Errorf("Unknown device %s", hash)
- }
-
if info.mountCount > 0 {
if path != info.mountPath {
return fmt.Errorf("Trying to mount devmapper device in multple places (%s, %s)", info.mountPath, path)
@@ -819,15 +885,17 @@ func (devices *DeviceSet) MountDevice(hash, path string) error {
return nil
}
- if err := devices.activateDeviceIfNeeded(hash); err != nil {
+ if err := devices.activateDeviceIfNeeded(info); err != nil {
return fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err)
}
var flags uintptr = sysMsMgcVal
- err := sysMount(info.DevName(), path, "ext4", flags, "discard")
+ mountOptions := label.FormatMountLabel("discard", mountLabel)
+ err = sysMount(info.DevName(), path, "ext4", flags, mountOptions)
if err != nil && err == sysEInval {
- err = sysMount(info.DevName(), path, "ext4", flags, "")
+ mountOptions = label.FormatMountLabel(mountLabel, "")
+ err = sysMount(info.DevName(), path, "ext4", flags, mountOptions)
}
if err != nil {
return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), path, err)
@@ -837,20 +905,24 @@ func (devices *DeviceSet) MountDevice(hash, path string) error {
info.mountPath = path
info.floating = false
- return devices.setInitialized(hash)
+ return devices.setInitialized(info)
}
func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error {
utils.Debugf("[devmapper] UnmountDevice(hash=%s, mode=%d)", hash, mode)
defer utils.Debugf("[devmapper] UnmountDevice END")
- devices.Lock()
- defer devices.Unlock()
- info := devices.Devices[hash]
- if info == nil {
- return fmt.Errorf("UnmountDevice: no such device %s\n", hash)
+ info, err := devices.lookupDevice(hash)
+ if err != nil {
+ return err
}
+ info.lock.Lock()
+ defer info.lock.Unlock()
+
+ devices.Lock()
+ defer devices.Unlock()
+
if mode == UnmountFloat {
if info.floating {
return fmt.Errorf("UnmountDevice: can't float floating reference %s\n", hash)
@@ -884,14 +956,11 @@ func (devices *DeviceSet) UnmountDevice(hash string, mode UnmountMode) error {
return err
}
utils.Debugf("[devmapper] Unmount done")
- // Wait for the unmount to be effective,
- // by watching the value of Info.OpenCount for the device
- if err := devices.waitClose(hash); err != nil {
+
+ if err := devices.deactivateDevice(info); err != nil {
return err
}
- devices.deactivateDevice(hash)
-
info.mountPath = ""
return nil
@@ -901,35 +970,35 @@ func (devices *DeviceSet) HasDevice(hash string) bool {
devices.Lock()
defer devices.Unlock()
- return devices.Devices[hash] != nil
+ info, _ := devices.lookupDevice(hash)
+ return info != nil
}
func (devices *DeviceSet) HasInitializedDevice(hash string) bool {
devices.Lock()
defer devices.Unlock()
- info := devices.Devices[hash]
+ info, _ := devices.lookupDevice(hash)
return info != nil && info.Initialized
}
func (devices *DeviceSet) HasActivatedDevice(hash string) bool {
- devices.Lock()
- defer devices.Unlock()
-
- info := devices.Devices[hash]
+ info, _ := devices.lookupDevice(hash)
if info == nil {
return false
}
+
+ info.lock.Lock()
+ defer info.lock.Unlock()
+
+ devices.Lock()
+ defer devices.Unlock()
+
devinfo, _ := getInfo(info.Name())
return devinfo != nil && devinfo.Exists != 0
}
-func (devices *DeviceSet) setInitialized(hash string) error {
- info := devices.Devices[hash]
- if info == nil {
- return fmt.Errorf("Unknown device %s", hash)
- }
-
+func (devices *DeviceSet) setInitialized(info *DevInfo) error {
info.Initialized = true
if err := devices.saveMetadata(); err != nil {
info.Initialized = false
@@ -944,12 +1013,15 @@ func (devices *DeviceSet) List() []string {
devices.Lock()
defer devices.Unlock()
+ devices.devicesLock.Lock()
ids := make([]string, len(devices.Devices))
i := 0
for k := range devices.Devices {
ids[i] = k
i++
}
+ devices.devicesLock.Unlock()
+
return ids
}
@@ -966,21 +1038,24 @@ func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSec
}
func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) {
+ info, err := devices.lookupDevice(hash)
+ if err != nil {
+ return nil, err
+ }
+
+ info.lock.Lock()
+ defer info.lock.Unlock()
+
devices.Lock()
defer devices.Unlock()
- info := devices.Devices[hash]
- if info == nil {
- return nil, fmt.Errorf("No device %s", hash)
- }
-
status := &DevStatus{
DeviceId: info.DeviceId,
Size: info.Size,
TransactionId: info.TransactionId,
}
- if err := devices.activateDeviceIfNeeded(hash); err != nil {
+ if err := devices.activateDeviceIfNeeded(info); err != nil {
return nil, fmt.Errorf("Error activating devmapper device for '%s': %s", hash, err)
}
diff --git a/graphdriver/devmapper/devmapper.go b/runtime/graphdriver/devmapper/devmapper.go
index 7317118dcf..7317118dcf 100644
--- a/graphdriver/devmapper/devmapper.go
+++ b/runtime/graphdriver/devmapper/devmapper.go
diff --git a/graphdriver/devmapper/devmapper_doc.go b/runtime/graphdriver/devmapper/devmapper_doc.go
index c1c3e3891b..c1c3e3891b 100644
--- a/graphdriver/devmapper/devmapper_doc.go
+++ b/runtime/graphdriver/devmapper/devmapper_doc.go
diff --git a/graphdriver/devmapper/devmapper_log.go b/runtime/graphdriver/devmapper/devmapper_log.go
index 18dde7cca5..18dde7cca5 100644
--- a/graphdriver/devmapper/devmapper_log.go
+++ b/runtime/graphdriver/devmapper/devmapper_log.go
diff --git a/graphdriver/devmapper/devmapper_test.go b/runtime/graphdriver/devmapper/devmapper_test.go
index 3ffa163ceb..3ffa163ceb 100644
--- a/graphdriver/devmapper/devmapper_test.go
+++ b/runtime/graphdriver/devmapper/devmapper_test.go
diff --git a/graphdriver/devmapper/devmapper_wrapper.go b/runtime/graphdriver/devmapper/devmapper_wrapper.go
index bf558affc8..bf558affc8 100644
--- a/graphdriver/devmapper/devmapper_wrapper.go
+++ b/runtime/graphdriver/devmapper/devmapper_wrapper.go
diff --git a/graphdriver/devmapper/driver.go b/runtime/graphdriver/devmapper/driver.go
index 4d414f9a75..35fe883f26 100644
--- a/graphdriver/devmapper/driver.go
+++ b/runtime/graphdriver/devmapper/driver.go
@@ -4,7 +4,7 @@ package devmapper
import (
"fmt"
- "github.com/dotcloud/docker/graphdriver"
+ "github.com/dotcloud/docker/runtime/graphdriver"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"os"
@@ -60,11 +60,10 @@ func (d *Driver) Cleanup() error {
return d.DeviceSet.Shutdown()
}
-func (d *Driver) Create(id, parent string) error {
+func (d *Driver) Create(id, parent string, mountLabel string) error {
if err := d.DeviceSet.AddDevice(id, parent); err != nil {
return err
}
-
mp := path.Join(d.home, "mnt", id)
if err := d.mount(id, mp); err != nil {
return err
@@ -90,6 +89,13 @@ func (d *Driver) Create(id, parent string) error {
}
func (d *Driver) Remove(id string) error {
+ if !d.DeviceSet.HasDevice(id) {
+ // Consider removing a non-existing device a no-op
+ // This is useful to be able to progress on container removal
+ // if the underlying device has gone away due to earlier errors
+ return nil
+ }
+
// Sink the float from create in case no Get() call was made
if err := d.DeviceSet.UnmountDevice(id, UnmountSink); err != nil {
return err
@@ -128,7 +134,7 @@ func (d *Driver) mount(id, mountPoint string) error {
return err
}
// Mount the device
- return d.DeviceSet.MountDevice(id, mountPoint)
+ return d.DeviceSet.MountDevice(id, mountPoint, "")
}
func (d *Driver) Exists(id string) bool {
diff --git a/graphdriver/devmapper/driver_test.go b/runtime/graphdriver/devmapper/driver_test.go
index 68699f208e..4ca72db0ca 100644
--- a/graphdriver/devmapper/driver_test.go
+++ b/runtime/graphdriver/devmapper/driver_test.go
@@ -4,7 +4,7 @@ package devmapper
import (
"fmt"
- "github.com/dotcloud/docker/graphdriver"
+ "github.com/dotcloud/docker/runtime/graphdriver"
"io/ioutil"
"path"
"runtime"
@@ -494,7 +494,7 @@ func TestDriverCreate(t *testing.T) {
"?ioctl.loopctlgetfree",
)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
calls.Assert(t,
@@ -612,7 +612,7 @@ func TestDriverRemove(t *testing.T) {
"?ioctl.loopctlgetfree",
)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -668,7 +668,7 @@ func TestCleanup(t *testing.T) {
mountPoints := make([]string, 2)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
// Mount the id
@@ -678,7 +678,7 @@ func TestCleanup(t *testing.T) {
}
mountPoints[0] = p
- if err := d.Create("2", "1"); err != nil {
+ if err := d.Create("2", "1", ""); err != nil {
t.Fatal(err)
}
@@ -731,7 +731,7 @@ func TestNotMounted(t *testing.T) {
d := newDriver(t)
defer cleanup(d)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -749,7 +749,7 @@ func TestMounted(t *testing.T) {
d := newDriver(t)
defer cleanup(d)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
if _, err := d.Get("1"); err != nil {
@@ -769,7 +769,7 @@ func TestInitCleanedDriver(t *testing.T) {
t.Skip("FIXME: not a unit test")
d := newDriver(t)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
if _, err := d.Get("1"); err != nil {
@@ -797,7 +797,7 @@ func TestMountMountedDriver(t *testing.T) {
d := newDriver(t)
defer cleanup(d)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -816,7 +816,7 @@ func TestGetReturnsValidDevice(t *testing.T) {
d := newDriver(t)
defer cleanup(d)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
@@ -844,7 +844,7 @@ func TestDriverGetSize(t *testing.T) {
d := newDriver(t)
defer cleanup(d)
- if err := d.Create("1", ""); err != nil {
+ if err := d.Create("1", "", ""); err != nil {
t.Fatal(err)
}
diff --git a/graphdriver/devmapper/ioctl.go b/runtime/graphdriver/devmapper/ioctl.go
index 30bafff943..30bafff943 100644
--- a/graphdriver/devmapper/ioctl.go
+++ b/runtime/graphdriver/devmapper/ioctl.go
diff --git a/graphdriver/devmapper/mount.go b/runtime/graphdriver/devmapper/mount.go
index 4f19109bf8..4f19109bf8 100644
--- a/graphdriver/devmapper/mount.go
+++ b/runtime/graphdriver/devmapper/mount.go
diff --git a/graphdriver/devmapper/sys.go b/runtime/graphdriver/devmapper/sys.go
index 5a9ab4d74b..5a9ab4d74b 100644
--- a/graphdriver/devmapper/sys.go
+++ b/runtime/graphdriver/devmapper/sys.go
diff --git a/graphdriver/driver.go b/runtime/graphdriver/driver.go
index 89fd03a624..bd4c2faaca 100644
--- a/graphdriver/driver.go
+++ b/runtime/graphdriver/driver.go
@@ -13,7 +13,7 @@ type InitFunc func(root string) (Driver, error)
type Driver interface {
String() string
- Create(id, parent string) error
+ Create(id, parent string, mountLabel string) error
Remove(id string) error
Get(id string) (dir string, err error)
@@ -39,10 +39,9 @@ var (
// Slice of drivers that should be used in an order
priority = []string{
"aufs",
+ "btrfs",
"devicemapper",
"vfs",
- // experimental, has to be enabled manually for now
- "btrfs",
}
)
diff --git a/graphdriver/vfs/driver.go b/runtime/graphdriver/vfs/driver.go
index 21da63878a..fe09560f24 100644
--- a/graphdriver/vfs/driver.go
+++ b/runtime/graphdriver/vfs/driver.go
@@ -2,7 +2,7 @@ package vfs
import (
"fmt"
- "github.com/dotcloud/docker/graphdriver"
+ "github.com/dotcloud/docker/runtime/graphdriver"
"os"
"os/exec"
"path"
@@ -42,7 +42,7 @@ func copyDir(src, dst string) error {
return nil
}
-func (d *Driver) Create(id string, parent string) error {
+func (d *Driver) Create(id string, parent string, mountLabel string) error {
dir := d.dir(id)
if err := os.MkdirAll(path.Dir(dir), 0700); err != nil {
return err
diff --git a/runtime/history.go b/runtime/history.go
new file mode 100644
index 0000000000..835ac9c11e
--- /dev/null
+++ b/runtime/history.go
@@ -0,0 +1,30 @@
+package runtime
+
+import (
+ "sort"
+)
+
+// History is a convenience type for storing a list of containers,
+// ordered by creation date.
+type History []*Container
+
+func (history *History) Len() int {
+ return len(*history)
+}
+
+func (history *History) Less(i, j int) bool {
+ containers := *history
+ return containers[j].When().Before(containers[i].When())
+}
+
+func (history *History) Swap(i, j int) {
+ containers := *history
+ tmp := containers[i]
+ containers[i] = containers[j]
+ containers[j] = tmp
+}
+
+func (history *History) Add(container *Container) {
+ *history = append(*history, container)
+ sort.Sort(history)
+}
diff --git a/networkdriver/lxc/driver.go b/runtime/networkdriver/bridge/driver.go
index 6185c42752..f7c3bc6b01 100644
--- a/networkdriver/lxc/driver.go
+++ b/runtime/networkdriver/bridge/driver.go
@@ -1,26 +1,23 @@
-package lxc
+package bridge
import (
"fmt"
"github.com/dotcloud/docker/engine"
- "github.com/dotcloud/docker/networkdriver"
- "github.com/dotcloud/docker/networkdriver/ipallocator"
- "github.com/dotcloud/docker/networkdriver/portallocator"
- "github.com/dotcloud/docker/networkdriver/portmapper"
"github.com/dotcloud/docker/pkg/iptables"
"github.com/dotcloud/docker/pkg/netlink"
+ "github.com/dotcloud/docker/runtime/networkdriver"
+ "github.com/dotcloud/docker/runtime/networkdriver/ipallocator"
+ "github.com/dotcloud/docker/runtime/networkdriver/portallocator"
+ "github.com/dotcloud/docker/runtime/networkdriver/portmapper"
"github.com/dotcloud/docker/utils"
"io/ioutil"
"log"
"net"
"strings"
- "syscall"
- "unsafe"
)
const (
DefaultNetworkBridge = "docker0"
- siocBRADDBR = 0x89a0
)
// Network interface represents the networking stack of a container
@@ -93,6 +90,12 @@ func InitDriver(job *engine.Job) engine.Status {
network = addr.(*net.IPNet)
} else {
network = addr.(*net.IPNet)
+ // validate that the bridge ip matches the ip specified by BridgeIP
+ if bridgeIP != "" {
+ if !network.IP.Equal(net.ParseIP(bridgeIP)) {
+ return job.Errorf("bridge ip (%s) does not match existing bridge configuration %s", network.IP, bridgeIP)
+ }
+ }
}
// Configure iptables for link support
@@ -275,28 +278,13 @@ func createBridge(bridgeIP string) error {
return nil
}
-// Create the actual bridge device. This is more backward-compatible than
-// netlink.NetworkLinkAdd and works on RHEL 6.
func createBridgeIface(name string) error {
- s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
- if err != nil {
- utils.Debugf("Bridge socket creation failed IPv6 probably not enabled: %v", err)
- s, err = syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_IP)
- if err != nil {
- return fmt.Errorf("Error creating bridge creation socket: %s", err)
- }
- }
- defer syscall.Close(s)
-
- nameBytePtr, err := syscall.BytePtrFromString(name)
- if err != nil {
- return fmt.Errorf("Error converting bridge name %s to byte array: %s", name, err)
- }
-
- if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), siocBRADDBR, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 {
- return fmt.Errorf("Error creating bridge: %s", err)
- }
- return nil
+ kv, err := utils.GetKernelVersion()
+ // only set the bridge's mac address if the kernel version is > 3.3
+ // before that it was not supported
+ setBridgeMacAddr := err == nil && (kv.Kernel >= 3 && kv.Major >= 3)
+ utils.Debugf("setting bridge mac address = %v", setBridgeMacAddr)
+ return netlink.CreateBridge(name, setBridgeMacAddr)
}
// Allocate a network interface
diff --git a/networkdriver/ipallocator/allocator.go b/runtime/networkdriver/ipallocator/allocator.go
index 1c5a7b4cc2..70a7028bbe 100644
--- a/networkdriver/ipallocator/allocator.go
+++ b/runtime/networkdriver/ipallocator/allocator.go
@@ -3,8 +3,8 @@ package ipallocator
import (
"encoding/binary"
"errors"
- "github.com/dotcloud/docker/networkdriver"
"github.com/dotcloud/docker/pkg/collections"
+ "github.com/dotcloud/docker/runtime/networkdriver"
"net"
"sync"
)
diff --git a/networkdriver/ipallocator/allocator_test.go b/runtime/networkdriver/ipallocator/allocator_test.go
index 5e9fcfc983..5e9fcfc983 100644
--- a/networkdriver/ipallocator/allocator_test.go
+++ b/runtime/networkdriver/ipallocator/allocator_test.go
diff --git a/networkdriver/network.go b/runtime/networkdriver/network.go
index 8dda789d2f..8dda789d2f 100644
--- a/networkdriver/network.go
+++ b/runtime/networkdriver/network.go
diff --git a/networkdriver/network_test.go b/runtime/networkdriver/network_test.go
index 6224c2dffb..6224c2dffb 100644
--- a/networkdriver/network_test.go
+++ b/runtime/networkdriver/network_test.go
diff --git a/networkdriver/portallocator/portallocator.go b/runtime/networkdriver/portallocator/portallocator.go
index 71cac82703..9ecd447116 100644
--- a/networkdriver/portallocator/portallocator.go
+++ b/runtime/networkdriver/portallocator/portallocator.go
@@ -18,8 +18,8 @@ type (
)
var (
+ ErrAllPortsAllocated = errors.New("all ports are allocated")
ErrPortAlreadyAllocated = errors.New("port has already been allocated")
- ErrPortExceedsRange = errors.New("port exceeds upper range")
ErrUnknownProtocol = errors.New("unknown protocol")
)
@@ -100,22 +100,30 @@ func ReleaseAll() error {
}
func registerDynamicPort(ip net.IP, proto string) (int, error) {
- allocated := defaultAllocatedPorts[proto]
-
- port := nextPort(proto)
- if port > EndPortRange {
- return 0, ErrPortExceedsRange
- }
if !equalsDefault(ip) {
registerIP(ip)
ipAllocated := otherAllocatedPorts[ip.String()][proto]
+
+ port, err := findNextPort(proto, ipAllocated)
+ if err != nil {
+ return 0, err
+ }
ipAllocated.Push(port)
+ return port, nil
+
} else {
+
+ allocated := defaultAllocatedPorts[proto]
+
+ port, err := findNextPort(proto, allocated)
+ if err != nil {
+ return 0, err
+ }
allocated.Push(port)
+ return port, nil
}
- return port, nil
}
func registerSetPort(ip net.IP, proto string, port int) error {
@@ -142,8 +150,23 @@ func equalsDefault(ip net.IP) bool {
return ip == nil || ip.Equal(defaultIP)
}
+func findNextPort(proto string, allocated *collections.OrderedIntSet) (int, error) {
+ port := nextPort(proto)
+ startSearchPort := port
+ for allocated.Exists(port) {
+ port = nextPort(proto)
+ if startSearchPort == port {
+ return 0, ErrAllPortsAllocated
+ }
+ }
+ return port, nil
+}
+
func nextPort(proto string) int {
c := currentDynamicPort[proto] + 1
+ if c > EndPortRange {
+ c = BeginPortRange
+ }
currentDynamicPort[proto] = c
return c
}
diff --git a/networkdriver/portallocator/portallocator_test.go b/runtime/networkdriver/portallocator/portallocator_test.go
index 603bd03bd7..5a4765ddd4 100644
--- a/networkdriver/portallocator/portallocator_test.go
+++ b/runtime/networkdriver/portallocator/portallocator_test.go
@@ -110,14 +110,27 @@ func TestAllocateAllPorts(t *testing.T) {
}
}
- if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrPortExceedsRange {
- t.Fatalf("Expected error %s got %s", ErrPortExceedsRange, err)
+ if _, err := RequestPort(defaultIP, "tcp", 0); err != ErrAllPortsAllocated {
+ t.Fatalf("Expected error %s got %s", ErrAllPortsAllocated, err)
}
_, err := RequestPort(defaultIP, "udp", 0)
if err != nil {
t.Fatal(err)
}
+
+ // release a port in the middle and ensure we get another tcp port
+ port := BeginPortRange + 5
+ if err := ReleasePort(defaultIP, "tcp", port); err != nil {
+ t.Fatal(err)
+ }
+ newPort, err := RequestPort(defaultIP, "tcp", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if newPort != port {
+ t.Fatalf("Expected port %d got %d", port, newPort)
+ }
}
func BenchmarkAllocatePorts(b *testing.B) {
@@ -181,4 +194,20 @@ func TestPortAllocation(t *testing.T) {
if _, err := RequestPort(ip, "tcp", 80); err != nil {
t.Fatal(err)
}
+
+ port, err = RequestPort(ip, "tcp", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ port2, err := RequestPort(ip, "tcp", port+1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ port3, err := RequestPort(ip, "tcp", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if port3 == port2 {
+ t.Fatal("Requesting a dynamic port should never allocate a used port")
+ }
}
diff --git a/networkdriver/portmapper/mapper.go b/runtime/networkdriver/portmapper/mapper.go
index e29959a245..e29959a245 100644
--- a/networkdriver/portmapper/mapper.go
+++ b/runtime/networkdriver/portmapper/mapper.go
diff --git a/networkdriver/portmapper/mapper_test.go b/runtime/networkdriver/portmapper/mapper_test.go
index 4c09f3c651..4c09f3c651 100644
--- a/networkdriver/portmapper/mapper_test.go
+++ b/runtime/networkdriver/portmapper/mapper_test.go
diff --git a/networkdriver/utils.go b/runtime/networkdriver/utils.go
index 0a4ef70c95..0a4ef70c95 100644
--- a/networkdriver/utils.go
+++ b/runtime/networkdriver/utils.go
diff --git a/runtime.go b/runtime/runtime.go
index 84f11e87b2..98903cfa08 100644
--- a/runtime.go
+++ b/runtime/runtime.go
@@ -1,31 +1,33 @@
-package docker
+package runtime
import (
"container/list"
"fmt"
"github.com/dotcloud/docker/archive"
+ "github.com/dotcloud/docker/daemonconfig"
"github.com/dotcloud/docker/dockerversion"
"github.com/dotcloud/docker/engine"
- "github.com/dotcloud/docker/execdriver"
- "github.com/dotcloud/docker/execdriver/lxc"
- "github.com/dotcloud/docker/execdriver/native"
- "github.com/dotcloud/docker/graphdriver"
- "github.com/dotcloud/docker/graphdriver/aufs"
- _ "github.com/dotcloud/docker/graphdriver/btrfs"
- _ "github.com/dotcloud/docker/graphdriver/devmapper"
- _ "github.com/dotcloud/docker/graphdriver/vfs"
- _ "github.com/dotcloud/docker/networkdriver/lxc"
- "github.com/dotcloud/docker/networkdriver/portallocator"
+ "github.com/dotcloud/docker/graph"
+ "github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/pkg/graphdb"
+ "github.com/dotcloud/docker/pkg/mount"
+ "github.com/dotcloud/docker/pkg/selinux"
"github.com/dotcloud/docker/pkg/sysinfo"
"github.com/dotcloud/docker/runconfig"
+ "github.com/dotcloud/docker/runtime/execdriver"
+ "github.com/dotcloud/docker/runtime/execdriver/execdrivers"
+ "github.com/dotcloud/docker/runtime/execdriver/lxc"
+ "github.com/dotcloud/docker/runtime/graphdriver"
+ _ "github.com/dotcloud/docker/runtime/graphdriver/vfs"
+ _ "github.com/dotcloud/docker/runtime/networkdriver/bridge"
+ "github.com/dotcloud/docker/runtime/networkdriver/portallocator"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
+ "log"
"os"
"path"
"regexp"
- "sort"
"strings"
"sync"
"time"
@@ -37,7 +39,7 @@ import (
const MaxImageDepth = 127
var (
- defaultDns = []string{"8.8.8.8", "8.8.4.4"}
+ DefaultDns = []string{"8.8.8.8", "8.8.4.4"}
validContainerNameChars = `[a-zA-Z0-9_.-]`
validContainerNamePattern = regexp.MustCompile(`^/?` + validContainerNameChars + `+$`)
)
@@ -46,19 +48,34 @@ type Runtime struct {
repository string
sysInitPath string
containers *list.List
- graph *Graph
- repositories *TagStore
+ graph *graph.Graph
+ repositories *graph.TagStore
idIndex *utils.TruncIndex
sysInfo *sysinfo.SysInfo
- volumes *Graph
- srv *Server
+ volumes *graph.Graph
+ srv Server
eng *engine.Engine
- config *DaemonConfig
+ config *daemonconfig.Config
containerGraph *graphdb.Database
driver graphdriver.Driver
execDriver execdriver.Driver
}
+// Mountpoints should be private to the container
+func remountPrivate(mountPoint string) error {
+ mounted, err := mount.Mounted(mountPoint)
+ if err != nil {
+ return err
+ }
+
+ if !mounted {
+ if err := mount.Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
+ return err
+ }
+ }
+ return mount.ForceMount("", mountPoint, "none", "private")
+}
+
// List returns an array of all containers registered in the runtime.
func (runtime *Runtime) List() []*Container {
containers := new(History)
@@ -161,20 +178,28 @@ func (runtime *Runtime) Register(container *Container) error {
container.State.SetGhost(false)
container.State.SetStopped(0)
+ // We only have to handle this for lxc because the other drivers will ensure that
+ // no ghost processes are left when docker dies
if container.ExecDriver == "" || strings.Contains(container.ExecDriver, "lxc") {
lxc.KillLxc(container.ID, 9)
} else {
- command := &execdriver.Command{
+ // use the current driver and ensure that the container is dead x.x
+ cmd := &execdriver.Command{
ID: container.ID,
}
- command.Process = &os.Process{Pid: existingPid}
- runtime.execDriver.Kill(command, 9)
+ var err error
+ cmd.Process, err = os.FindProcess(existingPid)
+ if err != nil {
+ utils.Debugf("cannot find existing process for %d", existingPid)
+ }
+ runtime.execDriver.Terminate(cmd)
}
- // ensure that the filesystem is also unmounted
- unmountVolumesForContainer(container)
if err := container.Unmount(); err != nil {
utils.Debugf("ghost unmount error %s", err)
}
+ if err := container.ToDisk(); err != nil {
+ utils.Debugf("saving ghost state to disk %s", err)
+ }
}
info := runtime.execDriver.Info(container.ID)
@@ -182,7 +207,6 @@ func (runtime *Runtime) Register(container *Container) error {
utils.Debugf("Container %s was supposed to be running but is not.", container.ID)
if runtime.config.AutoRestart {
utils.Debugf("Restarting")
- unmountVolumesForContainer(container)
if err := container.Unmount(); err != nil {
utils.Debugf("restart unmount error %s", err)
}
@@ -349,53 +373,83 @@ func (runtime *Runtime) restore() error {
// Create creates a new container from the given configuration with a given name.
func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Container, []string, error) {
- // Lookup image
+ var (
+ container *Container
+ warnings []string
+ )
+
img, err := runtime.repositories.LookupImage(config.Image)
if err != nil {
return nil, nil, err
}
+ if err := runtime.checkImageDepth(img); err != nil {
+ return nil, nil, err
+ }
+ if warnings, err = runtime.mergeAndVerifyConfig(config, img); err != nil {
+ return nil, nil, err
+ }
+ if container, err = runtime.newContainer(name, config, img); err != nil {
+ return nil, nil, err
+ }
+ if err := runtime.createRootfs(container, img); err != nil {
+ return nil, nil, err
+ }
+ if err := container.ToDisk(); err != nil {
+ return nil, nil, err
+ }
+ if err := runtime.Register(container); err != nil {
+ return nil, nil, err
+ }
+ return container, warnings, nil
+}
+func (runtime *Runtime) checkImageDepth(img *image.Image) error {
// We add 2 layers to the depth because the container's rw and
// init layer add to the restriction
depth, err := img.Depth()
if err != nil {
- return nil, nil, err
+ return err
}
-
if depth+2 >= MaxImageDepth {
- return nil, nil, fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth)
+ return fmt.Errorf("Cannot create container with more than %d parents", MaxImageDepth)
}
+ return nil
+}
- checkDeprecatedExpose := func(config *runconfig.Config) bool {
- if config != nil {
- if config.PortSpecs != nil {
- for _, p := range config.PortSpecs {
- if strings.Contains(p, ":") {
- return true
- }
+func (runtime *Runtime) checkDeprecatedExpose(config *runconfig.Config) bool {
+ if config != nil {
+ if config.PortSpecs != nil {
+ for _, p := range config.PortSpecs {
+ if strings.Contains(p, ":") {
+ return true
}
}
}
- return false
}
+ return false
+}
+func (runtime *Runtime) mergeAndVerifyConfig(config *runconfig.Config, img *image.Image) ([]string, error) {
warnings := []string{}
- if checkDeprecatedExpose(img.Config) || checkDeprecatedExpose(config) {
+ if runtime.checkDeprecatedExpose(img.Config) || runtime.checkDeprecatedExpose(config) {
warnings = append(warnings, "The mapping to public ports on your host via Dockerfile EXPOSE (host:port:port) has been deprecated. Use -p to publish the ports.")
}
-
if img.Config != nil {
if err := runconfig.Merge(config, img.Config); err != nil {
- return nil, nil, err
+ return nil, err
}
}
-
if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 {
- return nil, nil, fmt.Errorf("No command specified")
+ return nil, fmt.Errorf("No command specified")
}
+ return warnings, nil
+}
- // Generate id
- id := GenerateID()
+func (runtime *Runtime) generateIdAndName(name string) (string, string, error) {
+ var (
+ err error
+ id = utils.GenerateRandomID()
+ )
if name == "" {
name, err = generateRandomName(runtime)
@@ -404,47 +458,51 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe
}
} else {
if !validContainerNamePattern.MatchString(name) {
- return nil, nil, fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)
+ return "", "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)
}
}
-
if name[0] != '/' {
name = "/" + name
}
-
// Set the enitity in the graph using the default name specified
if _, err := runtime.containerGraph.Set(name, id); err != nil {
if !graphdb.IsNonUniqueNameError(err) {
- return nil, nil, err
+ return "", "", err
}
conflictingContainer, err := runtime.GetByName(name)
if err != nil {
if strings.Contains(err.Error(), "Could not find entity") {
- return nil, nil, err
+ return "", "", err
}
// Remove name and continue starting the container
if err := runtime.containerGraph.Delete(name); err != nil {
- return nil, nil, err
+ return "", "", err
}
} else {
nameAsKnownByUser := strings.TrimPrefix(name, "/")
- return nil, nil, fmt.Errorf(
+ return "", "", fmt.Errorf(
"Conflict, The name %s is already assigned to %s. You have to delete (or rename) that container to be able to assign %s to a container again.", nameAsKnownByUser,
utils.TruncateID(conflictingContainer.ID), nameAsKnownByUser)
}
}
+ return id, name, nil
+}
+func (runtime *Runtime) generateHostname(id string, config *runconfig.Config) {
// Generate default hostname
// FIXME: the lxc template no longer needs to set a default hostname
if config.Hostname == "" {
config.Hostname = id[:12]
}
+}
- var args []string
- var entrypoint string
-
+func (runtime *Runtime) getEntrypointAndArgs(config *runconfig.Config) (string, []string) {
+ var (
+ entrypoint string
+ args []string
+ )
if len(config.Entrypoint) != 0 {
entrypoint = config.Entrypoint[0]
args = append(config.Entrypoint[1:], config.Cmd...)
@@ -452,6 +510,21 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe
entrypoint = config.Cmd[0]
args = config.Cmd[1:]
}
+ return entrypoint, args
+}
+
+func (runtime *Runtime) newContainer(name string, config *runconfig.Config, img *image.Image) (*Container, error) {
+ var (
+ id string
+ err error
+ )
+ id, name, err = runtime.generateIdAndName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ runtime.generateHostname(id, config)
+ entrypoint, args := runtime.getEntrypointAndArgs(config)
container := &Container{
// FIXME: we should generate the ID here instead of receiving it as an argument
@@ -468,79 +541,39 @@ func (runtime *Runtime) Create(config *runconfig.Config, name string) (*Containe
ExecDriver: runtime.execDriver.Name(),
}
container.root = runtime.containerRoot(container.ID)
+ return container, nil
+}
+
+func (runtime *Runtime) createRootfs(container *Container, img *image.Image) error {
// Step 1: create the container directory.
// This doubles as a barrier to avoid race conditions.
if err := os.Mkdir(container.root, 0700); err != nil {
- return nil, nil, err
+ return err
}
-
initID := fmt.Sprintf("%s-init", container.ID)
- if err := runtime.driver.Create(initID, img.ID); err != nil {
- return nil, nil, err
+ if err := runtime.driver.Create(initID, img.ID, ""); err != nil {
+ return err
}
initPath, err := runtime.driver.Get(initID)
if err != nil {
- return nil, nil, err
+ return err
}
defer runtime.driver.Put(initID)
- if err := setupInitLayer(initPath); err != nil {
- return nil, nil, err
- }
-
- if err := runtime.driver.Create(container.ID, initID); err != nil {
- return nil, nil, err
- }
- resolvConf, err := utils.GetResolvConf()
- if err != nil {
- return nil, nil, err
- }
-
- if len(config.Dns) == 0 && len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
- //"WARNING: Docker detected local DNS server on resolv.conf. Using default external servers: %v", defaultDns
- runtime.config.Dns = defaultDns
- }
-
- // If custom dns exists, then create a resolv.conf for the container
- if len(config.Dns) > 0 || len(runtime.config.Dns) > 0 {
- var dns []string
- if len(config.Dns) > 0 {
- dns = config.Dns
- } else {
- dns = runtime.config.Dns
- }
- container.ResolvConfPath = path.Join(container.root, "resolv.conf")
- f, err := os.Create(container.ResolvConfPath)
- if err != nil {
- return nil, nil, err
- }
- defer f.Close()
- for _, dns := range dns {
- if _, err := f.Write([]byte("nameserver " + dns + "\n")); err != nil {
- return nil, nil, err
- }
- }
- } else {
- container.ResolvConfPath = "/etc/resolv.conf"
- }
-
- // Step 2: save the container json
- if err := container.ToDisk(); err != nil {
- return nil, nil, err
+ if err := graph.SetupInitLayer(initPath); err != nil {
+ return err
}
- // Step 3: register the container
- if err := runtime.Register(container); err != nil {
- return nil, nil, err
+ if err := runtime.driver.Create(container.ID, initID, ""); err != nil {
+ return err
}
- return container, warnings, nil
+ return nil
}
// Commit creates a new filesystem image from the current state of a container.
// The image can optionally be tagged into a repository
-func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*Image, error) {
+func (runtime *Runtime) Commit(container *Container, repository, tag, comment, author string, config *runconfig.Config) (*image.Image, error) {
// FIXME: freeze the container before copying it to avoid data corruption?
- // FIXME: this shouldn't be in commands.
if err := container.Mount(); err != nil {
return nil, err
}
@@ -553,7 +586,16 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a
defer rwTar.Close()
// Create a new image from the container's base layers + a new layer from container changes
- img, err := runtime.graph.Create(rwTar, container, comment, author, config)
+ var (
+ containerID, containerImage string
+ containerConfig *runconfig.Config
+ )
+ if container != nil {
+ containerID = container.ID
+ containerImage = container.Image
+ containerConfig = container.Config
+ }
+ img, err := runtime.graph.Create(rwTar, containerID, containerImage, comment, author, containerConfig, config)
if err != nil {
return nil, err
}
@@ -566,7 +608,7 @@ func (runtime *Runtime) Commit(container *Container, repository, tag, comment, a
return img, nil
}
-func getFullName(name string) (string, error) {
+func GetFullContainerName(name string) (string, error) {
if name == "" {
return "", fmt.Errorf("Container name cannot be empty")
}
@@ -577,7 +619,7 @@ func getFullName(name string) (string, error) {
}
func (runtime *Runtime) GetByName(name string) (*Container, error) {
- fullName, err := getFullName(name)
+ fullName, err := GetFullContainerName(name)
if err != nil {
return nil, err
}
@@ -593,7 +635,7 @@ func (runtime *Runtime) GetByName(name string) (*Container, error) {
}
func (runtime *Runtime) Children(name string) (map[string]*Container, error) {
- name, err := getFullName(name)
+ name, err := GetFullContainerName(name)
if err != nil {
return nil, err
}
@@ -624,7 +666,7 @@ func (runtime *Runtime) RegisterLink(parent, child *Container, alias string) err
}
// FIXME: harmonize with NewGraph()
-func NewRuntime(config *DaemonConfig, eng *engine.Engine) (*Runtime, error) {
+func NewRuntime(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) {
runtime, err := NewRuntimeFromDirectory(config, eng)
if err != nil {
return nil, err
@@ -632,7 +674,10 @@ func NewRuntime(config *DaemonConfig, eng *engine.Engine) (*Runtime, error) {
return runtime, nil
}
-func NewRuntimeFromDirectory(config *DaemonConfig, eng *engine.Engine) (*Runtime, error) {
+func NewRuntimeFromDirectory(config *daemonconfig.Config, eng *engine.Engine) (*Runtime, error) {
+ if !config.EnableSelinuxSupport {
+ selinux.SetDisabled()
+ }
// Set the default driver
graphdriver.DefaultDriver = config.GraphDriver
@@ -644,21 +689,23 @@ func NewRuntimeFromDirectory(config *DaemonConfig, eng *engine.Engine) (*Runtime
}
utils.Debugf("Using graph driver %s", driver)
+ if err := remountPrivate(config.Root); err != nil {
+ return nil, err
+ }
+
runtimeRepo := path.Join(config.Root, "containers")
if err := os.MkdirAll(runtimeRepo, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
- if ad, ok := driver.(*aufs.Driver); ok {
- utils.Debugf("Migrating existing containers")
- if err := ad.Migrate(config.Root, setupInitLayer); err != nil {
- return nil, err
- }
+ // Migrate the container if it is aufs and aufs is enabled
+ if err = migrateIfAufs(driver, config.Root); err != nil {
+ return nil, err
}
utils.Debugf("Creating images graph")
- g, err := NewGraph(path.Join(config.Root, "graph"), driver)
+ g, err := graph.NewGraph(path.Join(config.Root, "graph"), driver)
if err != nil {
return nil, err
}
@@ -670,12 +717,12 @@ func NewRuntimeFromDirectory(config *DaemonConfig, eng *engine.Engine) (*Runtime
return nil, err
}
utils.Debugf("Creating volumes graph")
- volumes, err := NewGraph(path.Join(config.Root, "volumes"), volumesDriver)
+ volumes, err := graph.NewGraph(path.Join(config.Root, "volumes"), volumesDriver)
if err != nil {
return nil, err
}
utils.Debugf("Creating repository list")
- repositories, err := NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g)
+ repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g)
if err != nil {
return nil, fmt.Errorf("Couldn't create Tag store: %s", err)
}
@@ -721,22 +768,8 @@ func NewRuntimeFromDirectory(config *DaemonConfig, eng *engine.Engine) (*Runtime
sysInitPath = localCopy
}
- var (
- ed execdriver.Driver
- sysInfo = sysinfo.New(false)
- )
-
- switch config.ExecDriver {
- case "lxc":
- // we want to five the lxc driver the full docker root because it needs
- // to access and write config and template files in /var/lib/docker/containers/*
- // to be backwards compatible
- ed, err = lxc.NewDriver(config.Root, sysInfo.AppArmor)
- case "native":
- ed, err = native.NewDriver(path.Join(config.Root, "execdriver", "native"))
- default:
- return nil, fmt.Errorf("unknown exec driver %s", config.ExecDriver)
- }
+ sysInfo := sysinfo.New(false)
+ ed, err := execdrivers.NewDriver(config.ExecDriver, config.Root, sysInitPath, sysInfo)
if err != nil {
return nil, err
}
@@ -757,14 +790,45 @@ func NewRuntimeFromDirectory(config *DaemonConfig, eng *engine.Engine) (*Runtime
eng: eng,
}
+ if err := runtime.checkLocaldns(); err != nil {
+ return nil, err
+ }
if err := runtime.restore(); err != nil {
return nil, err
}
return runtime, nil
}
+func (runtime *Runtime) shutdown() error {
+ group := sync.WaitGroup{}
+ utils.Debugf("starting clean shutdown of all containers...")
+ for _, container := range runtime.List() {
+ c := container
+ if c.State.IsRunning() {
+ utils.Debugf("stopping %s", c.ID)
+ group.Add(1)
+
+ go func() {
+ defer group.Done()
+ if err := c.KillSig(15); err != nil {
+ utils.Debugf("kill 15 error for %s - %s", c.ID, err)
+ }
+ c.Wait()
+ utils.Debugf("container stopped %s", c.ID)
+ }()
+ }
+ }
+ group.Wait()
+
+ return nil
+}
+
func (runtime *Runtime) Close() error {
errorsStrings := []string{}
+ if err := runtime.shutdown(); err != nil {
+ utils.Errorf("runtime.shutdown(): %s", err)
+ errorsStrings = append(errorsStrings, err.Error())
+ }
if err := portallocator.ReleaseAll(); err != nil {
utils.Errorf("portallocator.ReleaseAll(): %s", err)
errorsStrings = append(errorsStrings, err.Error())
@@ -876,31 +940,54 @@ func (runtime *Runtime) Nuke() error {
// which need direct access to runtime.graph.
// Once the tests switch to using engine and jobs, this method
// can go away.
-func (runtime *Runtime) Graph() *Graph {
+func (runtime *Runtime) Graph() *graph.Graph {
return runtime.graph
}
-// History is a convenience type for storing a list of containers,
-// ordered by creation date.
-type History []*Container
+func (runtime *Runtime) Repositories() *graph.TagStore {
+ return runtime.repositories
+}
+
+func (runtime *Runtime) Config() *daemonconfig.Config {
+ return runtime.config
+}
+
+func (runtime *Runtime) SystemConfig() *sysinfo.SysInfo {
+ return runtime.sysInfo
+}
+
+func (runtime *Runtime) SystemInitPath() string {
+ return runtime.sysInitPath
+}
+
+func (runtime *Runtime) GraphDriver() graphdriver.Driver {
+ return runtime.driver
+}
-func (history *History) Len() int {
- return len(*history)
+func (runtime *Runtime) ExecutionDriver() execdriver.Driver {
+ return runtime.execDriver
}
-func (history *History) Less(i, j int) bool {
- containers := *history
- return containers[j].When().Before(containers[i].When())
+func (runtime *Runtime) Volumes() *graph.Graph {
+ return runtime.volumes
}
-func (history *History) Swap(i, j int) {
- containers := *history
- tmp := containers[i]
- containers[i] = containers[j]
- containers[j] = tmp
+func (runtime *Runtime) ContainerGraph() *graphdb.Database {
+ return runtime.containerGraph
}
-func (history *History) Add(container *Container) {
- *history = append(*history, container)
- sort.Sort(history)
+func (runtime *Runtime) SetServer(server Server) {
+ runtime.srv = server
+}
+
+func (runtime *Runtime) checkLocaldns() error {
+ resolvConf, err := utils.GetResolvConf()
+ if err != nil {
+ return err
+ }
+ if len(runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
+ log.Printf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", DefaultDns)
+ runtime.config.Dns = DefaultDns
+ }
+ return nil
}
diff --git a/runtime/runtime_aufs.go b/runtime/runtime_aufs.go
new file mode 100644
index 0000000000..5a32615df5
--- /dev/null
+++ b/runtime/runtime_aufs.go
@@ -0,0 +1,22 @@
+// +build !exclude_graphdriver_aufs
+
+package runtime
+
+import (
+ "github.com/dotcloud/docker/graph"
+ "github.com/dotcloud/docker/runtime/graphdriver"
+ "github.com/dotcloud/docker/runtime/graphdriver/aufs"
+ "github.com/dotcloud/docker/utils"
+)
+
+// Given the graphdriver ad, if it is aufs, then migrate it.
+// If aufs driver is not built, this func is a noop.
+func migrateIfAufs(driver graphdriver.Driver, root string) error {
+ if ad, ok := driver.(*aufs.Driver); ok {
+ utils.Debugf("Migrating existing containers")
+ if err := ad.Migrate(root, graph.SetupInitLayer); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/runtime/runtime_btrfs.go b/runtime/runtime_btrfs.go
new file mode 100644
index 0000000000..c59b103ff9
--- /dev/null
+++ b/runtime/runtime_btrfs.go
@@ -0,0 +1,7 @@
+// +build !exclude_graphdriver_btrfs
+
+package runtime
+
+import (
+ _ "github.com/dotcloud/docker/runtime/graphdriver/btrfs"
+)
diff --git a/runtime/runtime_devicemapper.go b/runtime/runtime_devicemapper.go
new file mode 100644
index 0000000000..5b418b377a
--- /dev/null
+++ b/runtime/runtime_devicemapper.go
@@ -0,0 +1,7 @@
+// +build !exclude_graphdriver_devicemapper
+
+package runtime
+
+import (
+ _ "github.com/dotcloud/docker/runtime/graphdriver/devmapper"
+)
diff --git a/runtime/runtime_no_aufs.go b/runtime/runtime_no_aufs.go
new file mode 100644
index 0000000000..05a01fe151
--- /dev/null
+++ b/runtime/runtime_no_aufs.go
@@ -0,0 +1,11 @@
+// +build exclude_graphdriver_aufs
+
+package runtime
+
+import (
+ "github.com/dotcloud/docker/runtime/graphdriver"
+)
+
+func migrateIfAufs(driver graphdriver.Driver, root string) error {
+ return nil
+}
diff --git a/runtime/server.go b/runtime/server.go
new file mode 100644
index 0000000000..a74c4d1200
--- /dev/null
+++ b/runtime/server.go
@@ -0,0 +1,10 @@
+package runtime
+
+import (
+ "github.com/dotcloud/docker/utils"
+)
+
+type Server interface {
+ LogEvent(action, id, from string) *utils.JSONMessage
+ IsRunning() bool // returns true if the server is currently in operation
+}
diff --git a/sorter.go b/runtime/sorter.go
index b49ac58c24..c5af772dae 100644
--- a/sorter.go
+++ b/runtime/sorter.go
@@ -1,4 +1,4 @@
-package docker
+package runtime
import "sort"
diff --git a/state.go b/runtime/state.go
index 1dc92af204..316b8a40f1 100644
--- a/state.go
+++ b/runtime/state.go
@@ -1,4 +1,4 @@
-package docker
+package runtime
import (
"fmt"
@@ -28,7 +28,10 @@ func (s *State) String() string {
}
return fmt.Sprintf("Up %s", utils.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
}
- return fmt.Sprintf("Exit %d", s.ExitCode)
+ if s.FinishedAt.IsZero() {
+ return ""
+ }
+ return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, utils.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
}
func (s *State) IsRunning() bool {
diff --git a/utils.go b/runtime/utils.go
index ef666b0de1..b983e67d41 100644
--- a/utils.go
+++ b/runtime/utils.go
@@ -1,17 +1,13 @@
-package docker
+package runtime
import (
- "github.com/dotcloud/docker/archive"
+ "fmt"
"github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/pkg/namesgenerator"
"github.com/dotcloud/docker/runconfig"
- "github.com/dotcloud/docker/utils"
+ "strings"
)
-type Change struct {
- archive.Change
-}
-
func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error {
if config.PortSpecs != nil {
ports, bindings, err := nat.ParsePortSpecs(config.PortSpecs)
@@ -36,10 +32,22 @@ func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostCon
return nil
}
-// Links come in the format of
-// name:alias
-func parseLink(rawLink string) (map[string]string, error) {
- return utils.PartParser("name:alias", rawLink)
+func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig, driverConfig map[string][]string) {
+ if hostConfig == nil {
+ return
+ }
+
+ // merge in the lxc conf options into the generic config map
+ if lxcConf := hostConfig.LxcConf; lxcConf != nil {
+ lxc := driverConfig["lxc"]
+ for _, pair := range lxcConf {
+ // because lxc conf gets the driver name lxc.XXXX we need to trim it off
+ // and let the lxc driver add it back later if needed
+ parts := strings.SplitN(pair.Key, ".", 2)
+ lxc = append(lxc, fmt.Sprintf("%s=%s", parts[1], pair.Value))
+ }
+ driverConfig["lxc"] = lxc
+ }
}
type checker struct {
diff --git a/runtime/utils_test.go b/runtime/utils_test.go
new file mode 100644
index 0000000000..bdf3543a49
--- /dev/null
+++ b/runtime/utils_test.go
@@ -0,0 +1,29 @@
+package runtime
+
+import (
+ "testing"
+
+ "github.com/dotcloud/docker/runconfig"
+ "github.com/dotcloud/docker/utils"
+)
+
+func TestMergeLxcConfig(t *testing.T) {
+ var (
+ hostConfig = &runconfig.HostConfig{
+ LxcConf: []utils.KeyValuePair{
+ {Key: "lxc.cgroups.cpuset", Value: "1,2"},
+ },
+ }
+ driverConfig = make(map[string][]string)
+ )
+
+ mergeLxcConfIntoOptions(hostConfig, driverConfig)
+ if l := len(driverConfig["lxc"]); l > 1 {
+ t.Fatalf("expected lxc options len of 1 got %d", l)
+ }
+
+ cpuset := driverConfig["lxc"][0]
+ if expected := "cgroups.cpuset=1,2"; cpuset != expected {
+ t.Fatalf("expected %s got %s", expected, cpuset)
+ }
+}
diff --git a/volumes.go b/runtime/volumes.go
index 9f76e3698b..004f1bb024 100644
--- a/volumes.go
+++ b/runtime/volumes.go
@@ -1,12 +1,11 @@
-package docker
+package runtime
import (
"fmt"
"github.com/dotcloud/docker/archive"
- "github.com/dotcloud/docker/pkg/mount"
+ "github.com/dotcloud/docker/runtime/execdriver"
"github.com/dotcloud/docker/utils"
"io/ioutil"
- "log"
"os"
"path/filepath"
"strings"
@@ -34,97 +33,35 @@ func prepareVolumesForContainer(container *Container) error {
return nil
}
-func mountVolumesForContainer(container *Container, envPath string) error {
- // Setup the root fs as a bind mount of the base fs
- var (
- root = container.RootfsPath()
- runtime = container.runtime
- )
- if err := os.MkdirAll(root, 0755); err != nil && !os.IsExist(err) {
- return nil
- }
-
- // Create a bind mount of the base fs as a place where we can add mounts
- // without affecting the ability to access the base fs
- if err := mount.Mount(container.basefs, root, "none", "bind,rw"); err != nil {
- return err
- }
-
- // Make sure the root fs is private so the mounts here don't propagate to basefs
- if err := mount.ForceMount(root, root, "none", "private"); err != nil {
- return err
- }
-
- // Mount docker specific files into the containers root fs
- if err := mount.Mount(runtime.sysInitPath, filepath.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil {
- return err
- }
- if err := mount.Mount(envPath, filepath.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil {
- return err
- }
- if err := mount.Mount(container.ResolvConfPath, filepath.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil {
- return err
+func setupMountsForContainer(container *Container, envPath string) error {
+ mounts := []execdriver.Mount{
+ {container.runtime.sysInitPath, "/.dockerinit", false, true},
+ {envPath, "/.dockerenv", false, true},
+ {container.ResolvConfPath, "/etc/resolv.conf", false, true},
}
if container.HostnamePath != "" && container.HostsPath != "" {
- if err := mount.Mount(container.HostnamePath, filepath.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil {
- return err
- }
- if err := mount.Mount(container.HostsPath, filepath.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil {
- return err
- }
+ mounts = append(mounts, execdriver.Mount{container.HostnamePath, "/etc/hostname", false, true})
+ mounts = append(mounts, execdriver.Mount{container.HostsPath, "/etc/hosts", false, true})
}
// Mount user specified volumes
+ // Note, these are not private because you may want propagation of (un)mounts from host
+ // volumes. For instance if you use -v /usr:/usr and the host later mounts /usr/share you
+ // want this new mount in the container
for r, v := range container.Volumes {
- mountAs := "ro"
- if container.VolumesRW[r] {
- mountAs = "rw"
- }
-
- r = filepath.Join(root, r)
- if p, err := utils.FollowSymlinkInScope(r, root); err != nil {
- return err
- } else {
- r = p
- }
-
- if err := mount.Mount(v, r, "none", fmt.Sprintf("bind,%s", mountAs)); err != nil {
- return err
- }
- }
- return nil
-}
-
-func unmountVolumesForContainer(container *Container) {
- var (
- root = container.RootfsPath()
- mounts = []string{
- root,
- filepath.Join(root, "/.dockerinit"),
- filepath.Join(root, "/.dockerenv"),
- filepath.Join(root, "/etc/resolv.conf"),
- }
- )
-
- if container.HostnamePath != "" && container.HostsPath != "" {
- mounts = append(mounts, filepath.Join(root, "/etc/hostname"), filepath.Join(root, "/etc/hosts"))
+ mounts = append(mounts, execdriver.Mount{v, r, container.VolumesRW[r], false})
}
- for r := range container.Volumes {
- mounts = append(mounts, filepath.Join(root, r))
- }
+ container.command.Mounts = mounts
- for i := len(mounts) - 1; i >= 0; i-- {
- if lastError := mount.Unmount(mounts[i]); lastError != nil {
- log.Printf("Failed to umount %v: %v", mounts[i], lastError)
- }
- }
+ return nil
}
func applyVolumesFrom(container *Container) error {
- if container.Config.VolumesFrom != "" {
- for _, containerSpec := range strings.Split(container.Config.VolumesFrom, ",") {
+ volumesFrom := container.hostConfig.VolumesFrom
+ if len(volumesFrom) > 0 {
+ for _, containerSpec := range volumesFrom {
var (
mountRW = true
specParts = strings.SplitN(containerSpec, ":", 2)
@@ -132,7 +69,7 @@ func applyVolumesFrom(container *Container) error {
switch len(specParts) {
case 0:
- return fmt.Errorf("Malformed volumes-from specification: %s", container.Config.VolumesFrom)
+ return fmt.Errorf("Malformed volumes-from specification: %s", containerSpec)
case 2:
switch specParts[1] {
case "ro":
@@ -145,14 +82,23 @@ func applyVolumesFrom(container *Container) error {
c := container.runtime.Get(specParts[0])
if c == nil {
- return fmt.Errorf("Container %s not found. Impossible to mount its volumes", container.ID)
+ return fmt.Errorf("Container %s not found. Impossible to mount its volumes", specParts[0])
}
+ if err := c.Mount(); err != nil {
+ return fmt.Errorf("Container %s failed to mount. Impossible to mount its volumes", specParts[0])
+ }
+ defer c.Unmount()
+
for volPath, id := range c.Volumes {
if _, exists := container.Volumes[volPath]; exists {
continue
}
- if err := os.MkdirAll(filepath.Join(container.basefs, volPath), 0755); err != nil {
+ stat, err := os.Stat(filepath.Join(c.basefs, volPath))
+ if err != nil {
+ return err
+ }
+ if err := createIfNotExists(filepath.Join(container.basefs, volPath), stat.IsDir()); err != nil {
return err
}
container.Volumes[volPath] = id
@@ -216,7 +162,7 @@ func createVolumes(container *Container) error {
return err
}
- volumesDriver := container.runtime.volumes.driver
+ volumesDriver := container.runtime.volumes.Driver()
// Create the requested volumes if they don't exist
for volPath := range container.Config.Volumes {
volPath = filepath.Clean(volPath)
@@ -232,6 +178,9 @@ func createVolumes(container *Container) error {
if bindMap, exists := binds[volPath]; exists {
isBindMount = true
srcPath = bindMap.SrcPath
+ if !filepath.IsAbs(srcPath) {
+ return fmt.Errorf("%s must be an absolute path", srcPath)
+ }
if strings.ToLower(bindMap.Mode) == "rw" {
srcRW = true
}
@@ -246,7 +195,7 @@ func createVolumes(container *Container) error {
// Do not pass a container as the parameter for the volume creation.
// The graph driver using the container's information ( Image ) to
// create the parent.
- c, err := container.runtime.volumes.Create(nil, nil, "", "", nil)
+ c, err := container.runtime.volumes.Create(nil, "", "", "", "", nil, nil)
if err != nil {
return err
}
@@ -272,24 +221,8 @@ func createVolumes(container *Container) error {
if err != nil {
return err
}
-
- if _, err := os.Stat(rootVolPath); err != nil {
- if os.IsNotExist(err) {
- if volIsDir {
- if err := os.MkdirAll(rootVolPath, 0755); err != nil {
- return err
- }
- } else {
- if err := os.MkdirAll(filepath.Dir(rootVolPath), 0755); err != nil {
- return err
- }
- if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil {
- return err
- } else {
- f.Close()
- }
- }
- }
+ if err := createIfNotExists(rootVolPath, volIsDir); err != nil {
+ return err
}
// Do not copy or change permissions if we are mounting from the host
@@ -330,3 +263,25 @@ func createVolumes(container *Container) error {
}
return nil
}
+
+func createIfNotExists(path string, isDir bool) error {
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ if isDir {
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return err
+ }
+ } else {
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ f, err := os.OpenFile(path, os.O_CREATE, 0755)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ }
+ }
+ }
+ return nil
+}
diff --git a/buildfile.go b/server/buildfile.go
index 6fae6a24a5..b4a860ad4d 100644
--- a/buildfile.go
+++ b/server/buildfile.go
@@ -1,4 +1,4 @@
-package docker
+package server
import (
"crypto/sha256"
@@ -7,9 +7,10 @@ import (
"errors"
"fmt"
"github.com/dotcloud/docker/archive"
- "github.com/dotcloud/docker/auth"
+ "github.com/dotcloud/docker/nat"
"github.com/dotcloud/docker/registry"
"github.com/dotcloud/docker/runconfig"
+ "github.com/dotcloud/docker/runtime"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@@ -34,7 +35,7 @@ type BuildFile interface {
}
type buildFile struct {
- runtime *Runtime
+ runtime *runtime.Runtime
srv *Server
image string
@@ -48,8 +49,8 @@ type buildFile struct {
utilizeCache bool
rm bool
- authConfig *auth.AuthConfig
- configFile *auth.ConfigFile
+ authConfig *registry.AuthConfig
+ configFile *registry.ConfigFile
tmpContainers map[string]struct{}
tmpImages map[string]struct{}
@@ -74,9 +75,9 @@ func (b *buildFile) clearTmp(containers map[string]struct{}) {
}
func (b *buildFile) CmdFrom(name string) error {
- image, err := b.runtime.repositories.LookupImage(name)
+ image, err := b.runtime.Repositories().LookupImage(name)
if err != nil {
- if b.runtime.graph.IsNotExist(err) {
+ if b.runtime.Graph().IsNotExist(err) {
remote, tag := utils.ParseRepositoryTag(name)
pullRegistryAuth := b.authConfig
if len(b.configFile.Configs) > 0 {
@@ -96,7 +97,7 @@ func (b *buildFile) CmdFrom(name string) error {
if err := job.Run(); err != nil {
return err
}
- image, err = b.runtime.repositories.LookupImage(name)
+ image, err = b.runtime.Repositories().LookupImage(name)
if err != nil {
return err
}
@@ -110,7 +111,7 @@ func (b *buildFile) CmdFrom(name string) error {
b.config = image.Config
}
if b.config.Env == nil || len(b.config.Env) == 0 {
- b.config.Env = append(b.config.Env, "HOME=/", "PATH="+defaultPathEnv)
+ b.config.Env = append(b.config.Env, "HOME=/", "PATH="+runtime.DefaultPathEnv)
}
// Process ONBUILD triggers if they exist
if nTriggers := len(b.config.OnBuild); nTriggers != 0 {
@@ -304,8 +305,22 @@ func (b *buildFile) CmdEntrypoint(args string) error {
}
func (b *buildFile) CmdExpose(args string) error {
- ports := strings.Split(args, " ")
- b.config.PortSpecs = append(ports, b.config.PortSpecs...)
+ portsTab := strings.Split(args, " ")
+
+ if b.config.ExposedPorts == nil {
+ b.config.ExposedPorts = make(nat.PortSet)
+ }
+ ports, _, err := nat.ParsePortSpecs(append(portsTab, b.config.PortSpecs...))
+ if err != nil {
+ return err
+ }
+ for port := range ports {
+ if _, exists := b.config.ExposedPorts[port]; !exists {
+ b.config.ExposedPorts[port] = struct{}{}
+ }
+ }
+ b.config.PortSpecs = nil
+
return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
}
@@ -323,7 +338,14 @@ func (b *buildFile) CmdCopy(args string) error {
}
func (b *buildFile) CmdWorkdir(workdir string) error {
- b.config.WorkingDir = workdir
+ if workdir[0] == '/' {
+ b.config.WorkingDir = workdir
+ } else {
+ if b.config.WorkingDir == "" {
+ b.config.WorkingDir = "/"
+ }
+ b.config.WorkingDir = filepath.Join(b.config.WorkingDir, workdir)
+ }
return b.commit("", b.config.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
}
@@ -371,11 +393,20 @@ func (b *buildFile) checkPathForAddition(orig string) error {
return nil
}
-func (b *buildFile) addContext(container *Container, orig, dest string, remote bool) error {
+func (b *buildFile) addContext(container *runtime.Container, orig, dest string, remote bool) error {
var (
+ err error
origPath = path.Join(b.contextPath, orig)
- destPath = path.Join(container.BasefsPath(), dest)
+ destPath = path.Join(container.RootfsPath(), dest)
)
+
+ if destPath != container.RootfsPath() {
+ destPath, err = utils.FollowSymlinkInScope(destPath, container.RootfsPath())
+ if err != nil {
+ return err
+ }
+ }
+
// Preserve the trailing '/'
if strings.HasSuffix(dest, "/") {
destPath = destPath + "/"
@@ -388,10 +419,22 @@ func (b *buildFile) addContext(container *Container, orig, dest string, remote b
return err
}
+ chownR := func(destPath string, uid, gid int) error {
+ return filepath.Walk(destPath, func(path string, info os.FileInfo, err error) error {
+ if err := os.Lchown(path, uid, gid); err != nil {
+ return err
+ }
+ return nil
+ })
+ }
+
if fi.IsDir() {
if err := archive.CopyWithTar(origPath, destPath); err != nil {
return err
}
+ if err := chownR(destPath, 0, 0); err != nil {
+ return err
+ }
return nil
}
@@ -421,6 +464,10 @@ func (b *buildFile) addContext(container *Container, orig, dest string, remote b
if err := archive.CopyWithTar(origPath, destPath); err != nil {
return err
}
+
+ if err := chownR(destPath, 0, 0); err != nil {
+ return err
+ }
return nil
}
@@ -455,28 +502,36 @@ func (b *buildFile) CmdAdd(args string) error {
)
if utils.IsURL(orig) {
+ // Initiate the download
isRemote = true
resp, err := utils.Download(orig)
if err != nil {
return err
}
+
+ // Create a tmp dir
tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
if err != nil {
return err
}
+
+ // Create a tmp file within our tmp dir
tmpFileName := path.Join(tmpDirName, "tmp")
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return err
}
defer os.RemoveAll(tmpDirName)
- if _, err = io.Copy(tmpFile, resp.Body); err != nil {
+
+ // Download and dump result to tmp file
+ if _, err := io.Copy(tmpFile, resp.Body); err != nil {
tmpFile.Close()
return err
}
- origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
tmpFile.Close()
+ origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
+
// Process the checksum
r, err := archive.Tar(tmpFileName, archive.Uncompressed)
if err != nil {
@@ -576,35 +631,7 @@ func (b *buildFile) CmdAdd(args string) error {
return nil
}
-type StdoutFormater struct {
- io.Writer
- *utils.StreamFormatter
-}
-
-func (sf *StdoutFormater) Write(buf []byte) (int, error) {
- formattedBuf := sf.StreamFormatter.FormatStream(string(buf))
- n, err := sf.Writer.Write(formattedBuf)
- if n != len(formattedBuf) {
- return n, io.ErrShortWrite
- }
- return len(buf), err
-}
-
-type StderrFormater struct {
- io.Writer
- *utils.StreamFormatter
-}
-
-func (sf *StderrFormater) Write(buf []byte) (int, error) {
- formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m")
- n, err := sf.Writer.Write(formattedBuf)
- if n != len(formattedBuf) {
- return n, io.ErrShortWrite
- }
- return len(buf), err
-}
-
-func (b *buildFile) create() (*Container, error) {
+func (b *buildFile) create() (*runtime.Container, error) {
if b.image == "" {
return nil, fmt.Errorf("Please provide a source image with `from` prior to run")
}
@@ -625,7 +652,7 @@ func (b *buildFile) create() (*Container, error) {
return c, nil
}
-func (b *buildFile) run(c *Container) error {
+func (b *buildFile) run(c *runtime.Container) error {
var errCh chan error
if b.verbose {
@@ -686,12 +713,12 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
b.tmpContainers[container.ID] = struct{}{}
fmt.Fprintf(b.outStream, " ---> Running in %s\n", utils.TruncateID(container.ID))
id = container.ID
+
if err := container.Mount(); err != nil {
return err
}
defer container.Unmount()
}
-
container := b.runtime.Get(id)
if container == nil {
return fmt.Errorf("An error occured while creating the container")
@@ -742,20 +769,19 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
if len(fileBytes) == 0 {
return "", ErrDockerfileEmpty
}
- dockerfile := string(fileBytes)
- dockerfile = lineContinuation.ReplaceAllString(dockerfile, "")
- stepN := 0
+ var (
+ dockerfile = lineContinuation.ReplaceAllString(stripComments(fileBytes), "")
+ stepN = 0
+ )
for _, line := range strings.Split(dockerfile, "\n") {
line = strings.Trim(strings.Replace(line, "\t", " ", -1), " \t\r\n")
- // Skip comments and empty line
- if len(line) == 0 || line[0] == '#' {
+ if len(line) == 0 {
continue
}
if err := b.BuildStep(fmt.Sprintf("%d", stepN), line); err != nil {
return "", err
}
stepN += 1
-
}
if b.image != "" {
fmt.Fprintf(b.outStream, "Successfully built %s\n", utils.TruncateID(b.image))
@@ -792,7 +818,21 @@ func (b *buildFile) BuildStep(name, expression string) error {
return nil
}
-func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *auth.AuthConfig, authConfigFile *auth.ConfigFile) BuildFile {
+func stripComments(raw []byte) string {
+ var (
+ out []string
+ lines = strings.Split(string(raw), "\n")
+ )
+ for _, l := range lines {
+ if len(l) == 0 || l[0] == '#' {
+ continue
+ }
+ out = append(out, l)
+ }
+ return strings.Join(out, "\n")
+}
+
+func NewBuildFile(srv *Server, outStream, errStream io.Writer, verbose, utilizeCache, rm bool, outOld io.Writer, sf *utils.StreamFormatter, auth *registry.AuthConfig, authConfigFile *registry.ConfigFile) BuildFile {
return &buildFile{
runtime: srv.runtime,
srv: srv,
diff --git a/server.go b/server/server.go
index 2c8a7ee4f8..0feaff4eac 100644
--- a/server.go
+++ b/server/server.go
@@ -1,15 +1,20 @@
-package docker
+package server
import (
"encoding/json"
"fmt"
+ "github.com/dotcloud/docker/api"
"github.com/dotcloud/docker/archive"
- "github.com/dotcloud/docker/auth"
+ "github.com/dotcloud/docker/daemonconfig"
"github.com/dotcloud/docker/dockerversion"
"github.com/dotcloud/docker/engine"
+ "github.com/dotcloud/docker/graph"
+ "github.com/dotcloud/docker/image"
"github.com/dotcloud/docker/pkg/graphdb"
+ "github.com/dotcloud/docker/pkg/signal"
"github.com/dotcloud/docker/registry"
"github.com/dotcloud/docker/runconfig"
+ "github.com/dotcloud/docker/runtime"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
@@ -18,10 +23,10 @@ import (
"net/url"
"os"
"os/exec"
- "os/signal"
+ gosignal "os/signal"
"path"
"path/filepath"
- "runtime"
+ goruntime "runtime"
"strconv"
"strings"
"sync"
@@ -34,30 +39,50 @@ import (
// The signals SIGINT, SIGQUIT and SIGTERM are intercepted for cleanup.
func InitServer(job *engine.Job) engine.Status {
job.Logf("Creating server")
- srv, err := NewServer(job.Eng, DaemonConfigFromJob(job))
+ srv, err := NewServer(job.Eng, daemonconfig.ConfigFromJob(job))
if err != nil {
return job.Error(err)
}
- if srv.runtime.config.Pidfile != "" {
+ if srv.runtime.Config().Pidfile != "" {
job.Logf("Creating pidfile")
- if err := utils.CreatePidFile(srv.runtime.config.Pidfile); err != nil {
+ if err := utils.CreatePidFile(srv.runtime.Config().Pidfile); err != nil {
// FIXME: do we need fatal here instead of returning a job error?
log.Fatal(err)
}
}
job.Logf("Setting up signal traps")
c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
+ gosignal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
go func() {
- sig := <-c
- log.Printf("Received signal '%v', exiting\n", sig)
- utils.RemovePidFile(srv.runtime.config.Pidfile)
- srv.Close()
- os.Exit(0)
+ interruptCount := 0
+ for sig := range c {
+ go func() {
+ log.Printf("Received signal '%v', starting shutdown of docker...\n", sig)
+ switch sig {
+ case os.Interrupt, syscall.SIGTERM:
+ // If the user really wants to interrupt, let him do so.
+ if interruptCount < 3 {
+ interruptCount++
+ // Initiate the cleanup only once
+ if interruptCount == 1 {
+ utils.RemovePidFile(srv.runtime.Config().Pidfile)
+ srv.Close()
+ } else {
+ return
+ }
+ } else {
+ log.Printf("Force shutdown of docker, interrupting cleanup\n")
+ }
+ case syscall.SIGQUIT:
+ }
+ os.Exit(128 + int(sig.(syscall.Signal)))
+ }()
+ }
}()
job.Eng.Hack_SetGlobalVar("httpapi.server", srv)
job.Eng.Hack_SetGlobalVar("httpapi.runtime", srv.runtime)
+ // FIXME: 'insert' is deprecated and should be removed in a future version.
for name, handler := range map[string]engine.Handler{
"export": srv.ContainerExport,
"create": srv.ContainerCreate,
@@ -81,6 +106,7 @@ func InitServer(job *engine.Job) engine.Status {
"search": srv.ImagesSearch,
"changes": srv.ContainerChanges,
"top": srv.ContainerTop,
+ "version": srv.DockerVersion,
"load": srv.ImageLoad,
"build": srv.Build,
"pull": srv.ImagePull,
@@ -122,66 +148,40 @@ func (v *simpleVersionInfo) Version() string {
// for the container to exit.
// If a signal is given, then just send it to the container and return.
func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
- signalMap := map[string]syscall.Signal{
- "HUP": syscall.SIGHUP,
- "INT": syscall.SIGINT,
- "QUIT": syscall.SIGQUIT,
- "ILL": syscall.SIGILL,
- "TRAP": syscall.SIGTRAP,
- "ABRT": syscall.SIGABRT,
- "BUS": syscall.SIGBUS,
- "FPE": syscall.SIGFPE,
- "KILL": syscall.SIGKILL,
- "USR1": syscall.SIGUSR1,
- "SEGV": syscall.SIGSEGV,
- "USR2": syscall.SIGUSR2,
- "PIPE": syscall.SIGPIPE,
- "ALRM": syscall.SIGALRM,
- "TERM": syscall.SIGTERM,
- //"STKFLT": syscall.SIGSTKFLT,
- "CHLD": syscall.SIGCHLD,
- "CONT": syscall.SIGCONT,
- "STOP": syscall.SIGSTOP,
- "TSTP": syscall.SIGTSTP,
- "TTIN": syscall.SIGTTIN,
- "TTOU": syscall.SIGTTOU,
- "URG": syscall.SIGURG,
- "XCPU": syscall.SIGXCPU,
- "XFSZ": syscall.SIGXFSZ,
- "VTALRM": syscall.SIGVTALRM,
- "PROF": syscall.SIGPROF,
- "WINCH": syscall.SIGWINCH,
- "IO": syscall.SIGIO,
- //"PWR": syscall.SIGPWR,
- "SYS": syscall.SIGSYS,
- }
-
if n := len(job.Args); n < 1 || n > 2 {
return job.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name)
}
- name := job.Args[0]
- var sig uint64
+ var (
+ name = job.Args[0]
+ sig uint64
+ err error
+ )
+
+ // If we have a signal, look at it. Otherwise, do nothing
if len(job.Args) == 2 && job.Args[1] != "" {
- sig = uint64(signalMap[job.Args[1]])
- if sig == 0 {
- var err error
- // The largest legal signal is 31, so let's parse on 5 bits
- sig, err = strconv.ParseUint(job.Args[1], 10, 5)
- if err != nil {
+ // Check if we passed the signal as a number:
+ // The largest legal signal is 31, so let's parse on 5 bits
+ sig, err = strconv.ParseUint(job.Args[1], 10, 5)
+ if err != nil {
+ // The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL")
+ sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")])
+ if sig == 0 {
return job.Errorf("Invalid signal: %s", job.Args[1])
}
+
}
}
+
if container := srv.runtime.Get(name); container != nil {
// If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait())
if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL {
if err := container.Kill(); err != nil {
return job.Errorf("Cannot kill container %s: %s", name, err)
}
- srv.LogEvent("kill", container.ID, srv.runtime.repositories.ImageName(container.Image))
+ srv.LogEvent("kill", container.ID, srv.runtime.Repositories().ImageName(container.Image))
} else {
// Otherwise, just send the requested signal
- if err := container.kill(int(sig)); err != nil {
+ if err := container.KillSig(int(sig)); err != nil {
return job.Errorf("Cannot kill container %s: %s", name, err)
}
// FIXME: Add event for signals
@@ -195,19 +195,19 @@ func (srv *Server) ContainerKill(job *engine.Job) engine.Status {
func (srv *Server) Auth(job *engine.Job) engine.Status {
var (
err error
- authConfig = &auth.AuthConfig{}
+ authConfig = &registry.AuthConfig{}
)
job.GetenvJson("authConfig", authConfig)
// TODO: this is only done here because auth and registry need to be merged into one pkg
- if addr := authConfig.ServerAddress; addr != "" && addr != auth.IndexServerAddress() {
+ if addr := authConfig.ServerAddress; addr != "" && addr != registry.IndexServerAddress() {
addr, err = registry.ExpandAndVerifyRegistryUrl(addr)
if err != nil {
return job.Error(err)
}
authConfig.ServerAddress = addr
}
- status, err := auth.Login(authConfig, srv.HTTPRequestFactory(nil))
+ status, err := registry.Login(authConfig, srv.HTTPRequestFactory(nil))
if err != nil {
return job.Error(err)
}
@@ -243,6 +243,10 @@ func (srv *Server) Events(job *engine.Job) engine.Status {
listener := make(chan utils.JSONMessage)
srv.Lock()
+ if old, ok := srv.listeners[from]; ok {
+ delete(srv.listeners, from)
+ close(old)
+ }
srv.listeners[from] = listener
srv.Unlock()
job.Stdout.Write(nil) // flush
@@ -290,7 +294,7 @@ func (srv *Server) ContainerExport(job *engine.Job) engine.Status {
return job.Errorf("%s: %s", name, err)
}
// FIXME: factor job-specific LogEvent to engine.Job.Run()
- srv.LogEvent("export", container.ID, srv.runtime.repositories.ImageName(container.Image))
+ srv.LogEvent("export", container.ID, srv.runtime.Repositories().ImageName(container.Image))
return engine.StatusOK
}
return job.Errorf("No such container: %s", name)
@@ -315,7 +319,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status {
utils.Debugf("Serializing %s", name)
- rootRepo, err := srv.runtime.repositories.Get(name)
+ rootRepo, err := srv.runtime.Repositories().Get(name)
if err != nil {
return job.Error(err)
}
@@ -332,7 +336,7 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status {
}
// write repositories
- rootRepoMap := map[string]Repository{}
+ rootRepoMap := map[string]graph.Repository{}
rootRepoMap[name] = rootRepo
rootRepoJson, _ := json.Marshal(rootRepoMap)
@@ -361,8 +365,8 @@ func (srv *Server) ImageExport(job *engine.Job) engine.Status {
return engine.StatusOK
}
-func (srv *Server) exportImage(image *Image, tempdir string) error {
- for i := image; i != nil; {
+func (srv *Server) exportImage(img *image.Image, tempdir string) error {
+ for i := img; i != nil; {
// temporary directory
tmpImageDir := path.Join(tempdir, i.ID)
if err := os.Mkdir(tmpImageDir, os.ModeDir); err != nil {
@@ -399,10 +403,15 @@ func (srv *Server) exportImage(image *Image, tempdir string) error {
if err != nil {
return err
}
- if _, err = io.Copy(fsTar, fs); err != nil {
+ if written, err := io.Copy(fsTar, fs); err != nil {
+ return err
+ } else {
+ utils.Debugf("rendered layer for %s of [%d] size", i.ID, written)
+ }
+
+ if err = fsTar.Close(); err != nil {
return err
}
- fsTar.Close()
// find parent
if i.Parent != "" {
@@ -427,8 +436,8 @@ func (srv *Server) Build(job *engine.Job) engine.Status {
suppressOutput = job.GetenvBool("q")
noCache = job.GetenvBool("nocache")
rm = job.GetenvBool("rm")
- authConfig = &auth.AuthConfig{}
- configFile = &auth.ConfigFile{}
+ authConfig = &registry.AuthConfig{}
+ configFile = &registry.ConfigFile{}
tag string
context io.ReadCloser
)
@@ -477,11 +486,11 @@ func (srv *Server) Build(job *engine.Job) engine.Status {
sf := utils.NewStreamFormatter(job.GetenvBool("json"))
b := NewBuildFile(srv,
- &StdoutFormater{
+ &utils.StdoutFormater{
Writer: job.Stdout,
StreamFormatter: sf,
},
- &StderrFormater{
+ &utils.StderrFormater{
Writer: job.Stdout,
StreamFormatter: sf,
},
@@ -491,7 +500,7 @@ func (srv *Server) Build(job *engine.Job) engine.Status {
return job.Error(err)
}
if repoName != "" {
- srv.runtime.repositories.Set(repoName, tag, id, false)
+ srv.runtime.Repositories().Set(repoName, tag, id, false)
}
return engine.StatusOK
}
@@ -545,14 +554,14 @@ func (srv *Server) ImageLoad(job *engine.Job) engine.Status {
repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories"))
if err == nil {
- repositories := map[string]Repository{}
+ repositories := map[string]graph.Repository{}
if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
return job.Error(err)
}
for imageName, tagMap := range repositories {
for tag, address := range tagMap {
- if err := srv.runtime.repositories.Set(imageName, tag, address, true); err != nil {
+ if err := srv.runtime.Repositories().Set(imageName, tag, address, true); err != nil {
return job.Error(err)
}
}
@@ -579,19 +588,19 @@ func (srv *Server) recursiveLoad(address, tmpImageDir string) error {
utils.Debugf("Error reading embedded tar", err)
return err
}
- img, err := NewImgJSON(imageJson)
+ img, err := image.NewImgJSON(imageJson)
if err != nil {
utils.Debugf("Error unmarshalling json", err)
return err
}
if img.Parent != "" {
- if !srv.runtime.graph.Exists(img.Parent) {
+ if !srv.runtime.Graph().Exists(img.Parent) {
if err := srv.recursiveLoad(img.Parent, tmpImageDir); err != nil {
return err
}
}
}
- if err := srv.runtime.graph.Register(imageJson, layer, img); err != nil {
+ if err := srv.runtime.Graph().Register(imageJson, layer, img); err != nil {
return err
}
}
@@ -607,12 +616,12 @@ func (srv *Server) ImagesSearch(job *engine.Job) engine.Status {
var (
term = job.Args[0]
metaHeaders = map[string][]string{}
- authConfig = &auth.AuthConfig{}
+ authConfig = &registry.AuthConfig{}
)
job.GetenvJson("authConfig", authConfig)
job.GetenvJson("metaHeaders", metaHeaders)
- r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), auth.IndexServerAddress())
+ r, err := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), registry.IndexServerAddress())
if err != nil {
return job.Error(err)
}
@@ -633,7 +642,9 @@ func (srv *Server) ImagesSearch(job *engine.Job) engine.Status {
return engine.StatusOK
}
+// FIXME: 'insert' is deprecated and should be removed in a future version.
func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
+ fmt.Fprintf(job.Stderr, "Warning: '%s' is deprecated and will be removed in a future version. Please use 'build' and 'ADD' instead.\n", job.Name)
if len(job.Args) != 3 {
return job.Errorf("Usage: %s IMAGE URL PATH\n", job.Name)
}
@@ -647,7 +658,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
sf := utils.NewStreamFormatter(job.GetenvBool("json"))
out := utils.NewWriteFlusher(job.Stdout)
- img, err := srv.runtime.repositories.LookupImage(name)
+ img, err := srv.runtime.Repositories().LookupImage(name)
if err != nil {
return job.Error(err)
}
@@ -658,7 +669,7 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
}
defer file.Body.Close()
- config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.sysInfo)
+ config, _, _, err := runconfig.Parse([]string{img.ID, "echo", "insert", url, path}, srv.runtime.SystemConfig())
if err != nil {
return job.Error(err)
}
@@ -682,14 +693,14 @@ func (srv *Server) ImageInsert(job *engine.Job) engine.Status {
}
func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
- images, _ := srv.runtime.graph.Map()
+ images, _ := srv.runtime.Graph().Map()
if images == nil {
return engine.StatusOK
}
job.Stdout.Write([]byte("digraph docker {\n"))
var (
- parentImage *Image
+ parentImage *image.Image
err error
)
for _, image := range images {
@@ -706,7 +717,7 @@ func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
reporefs := make(map[string][]string)
- for name, repository := range srv.runtime.repositories.Repositories {
+ for name, repository := range srv.runtime.Repositories().Repositories {
for tag, id := range repository {
reporefs[utils.TruncateID(id)] = append(reporefs[utils.TruncateID(id)], fmt.Sprintf("%s:%s", name, tag))
}
@@ -721,26 +732,26 @@ func (srv *Server) ImagesViz(job *engine.Job) engine.Status {
func (srv *Server) Images(job *engine.Job) engine.Status {
var (
- allImages map[string]*Image
+ allImages map[string]*image.Image
err error
)
if job.GetenvBool("all") {
- allImages, err = srv.runtime.graph.Map()
+ allImages, err = srv.runtime.Graph().Map()
} else {
- allImages, err = srv.runtime.graph.Heads()
+ allImages, err = srv.runtime.Graph().Heads()
}
if err != nil {
return job.Error(err)
}
lookup := make(map[string]*engine.Env)
- for name, repository := range srv.runtime.repositories.Repositories {
+ for name, repository := range srv.runtime.Repositories().Repositories {
if job.Getenv("filter") != "" {
if match, _ := path.Match(job.Getenv("filter"), name); !match {
continue
}
}
for tag, id := range repository {
- image, err := srv.runtime.graph.Get(id)
+ image, err := srv.runtime.Graph().Get(id)
if err != nil {
log.Printf("Warning: couldn't load %s from %s/%s: %s", id, name, tag, err)
continue
@@ -756,7 +767,7 @@ func (srv *Server) Images(job *engine.Job) engine.Status {
out.Set("Id", image.ID)
out.SetInt64("Created", image.Created.Unix())
out.SetInt64("Size", image.Size)
- out.SetInt64("VirtualSize", image.getParentsSize(0)+image.Size)
+ out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
lookup[id] = out
}
@@ -777,7 +788,7 @@ func (srv *Server) Images(job *engine.Job) engine.Status {
out.Set("Id", image.ID)
out.SetInt64("Created", image.Created.Unix())
out.SetInt64("Size", image.Size)
- out.SetInt64("VirtualSize", image.getParentsSize(0)+image.Size)
+ out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size)
outs.Add(out)
}
}
@@ -790,7 +801,7 @@ func (srv *Server) Images(job *engine.Job) engine.Status {
}
func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
- images, _ := srv.runtime.graph.Map()
+ images, _ := srv.runtime.Graph().Map()
var imgcount int
if images == nil {
imgcount = 0
@@ -806,24 +817,24 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
initPath := utils.DockerInitPath("")
if initPath == "" {
// if that fails, we'll just return the path from the runtime
- initPath = srv.runtime.sysInitPath
+ initPath = srv.runtime.SystemInitPath()
}
v := &engine.Env{}
v.SetInt("Containers", len(srv.runtime.List()))
v.SetInt("Images", imgcount)
- v.Set("Driver", srv.runtime.driver.String())
- v.SetJson("DriverStatus", srv.runtime.driver.Status())
- v.SetBool("MemoryLimit", srv.runtime.sysInfo.MemoryLimit)
- v.SetBool("SwapLimit", srv.runtime.sysInfo.SwapLimit)
- v.SetBool("IPv4Forwarding", !srv.runtime.sysInfo.IPv4ForwardingDisabled)
+ v.Set("Driver", srv.runtime.GraphDriver().String())
+ v.SetJson("DriverStatus", srv.runtime.GraphDriver().Status())
+ v.SetBool("MemoryLimit", srv.runtime.SystemConfig().MemoryLimit)
+ v.SetBool("SwapLimit", srv.runtime.SystemConfig().SwapLimit)
+ v.SetBool("IPv4Forwarding", !srv.runtime.SystemConfig().IPv4ForwardingDisabled)
v.SetBool("Debug", os.Getenv("DEBUG") != "")
v.SetInt("NFd", utils.GetTotalUsedFds())
- v.SetInt("NGoroutines", runtime.NumGoroutine())
- v.Set("ExecutionDriver", srv.runtime.execDriver.Name())
+ v.SetInt("NGoroutines", goruntime.NumGoroutine())
+ v.Set("ExecutionDriver", srv.runtime.ExecutionDriver().Name())
v.SetInt("NEventsListener", len(srv.listeners))
v.Set("KernelVersion", kernelVersion)
- v.Set("IndexServerAddress", auth.IndexServerAddress())
+ v.Set("IndexServerAddress", registry.IndexServerAddress())
v.Set("InitSha1", dockerversion.INITSHA1)
v.Set("InitPath", initPath)
if _, err := v.WriteTo(job.Stdout); err != nil {
@@ -832,18 +843,35 @@ func (srv *Server) DockerInfo(job *engine.Job) engine.Status {
return engine.StatusOK
}
+func (srv *Server) DockerVersion(job *engine.Job) engine.Status {
+ v := &engine.Env{}
+ v.Set("Version", dockerversion.VERSION)
+ v.SetJson("ApiVersion", api.APIVERSION)
+ v.Set("GitCommit", dockerversion.GITCOMMIT)
+ v.Set("GoVersion", goruntime.Version())
+ v.Set("Os", goruntime.GOOS)
+ v.Set("Arch", goruntime.GOARCH)
+ if kernelVersion, err := utils.GetKernelVersion(); err == nil {
+ v.Set("KernelVersion", kernelVersion.String())
+ }
+ if _, err := v.WriteTo(job.Stdout); err != nil {
+ return job.Error(err)
+ }
+ return engine.StatusOK
+}
+
func (srv *Server) ImageHistory(job *engine.Job) engine.Status {
if n := len(job.Args); n != 1 {
return job.Errorf("Usage: %s IMAGE", job.Name)
}
name := job.Args[0]
- image, err := srv.runtime.repositories.LookupImage(name)
+ foundImage, err := srv.runtime.Repositories().LookupImage(name)
if err != nil {
return job.Error(err)
}
lookupMap := make(map[string][]string)
- for name, repository := range srv.runtime.repositories.Repositories {
+ for name, repository := range srv.runtime.Repositories().Repositories {
for tag, id := range repository {
// If the ID already has a reverse lookup, do not update it unless for "latest"
if _, exists := lookupMap[id]; !exists {
@@ -854,7 +882,7 @@ func (srv *Server) ImageHistory(job *engine.Job) engine.Status {
}
outs := engine.NewTable("Created", 0)
- err = image.WalkHistory(func(img *Image) error {
+ err = foundImage.WalkHistory(func(img *image.Image) error {
out := &engine.Env{}
out.Set("Id", img.ID)
out.SetInt64("Created", img.Created.Unix())
@@ -888,7 +916,7 @@ func (srv *Server) ContainerTop(job *engine.Job) engine.Status {
if !container.State.IsRunning() {
return job.Errorf("Container %s is not running", name)
}
- pids, err := srv.runtime.execDriver.GetPidsForContainer(container.ID)
+ pids, err := srv.runtime.ExecutionDriver().GetPidsForContainer(container.ID)
if err != nil {
return job.Error(err)
}
@@ -981,17 +1009,32 @@ func (srv *Server) Containers(job *engine.Job) engine.Status {
outs := engine.NewTable("Created", 0)
names := map[string][]string{}
- srv.runtime.containerGraph.Walk("/", func(p string, e *graphdb.Entity) error {
+ srv.runtime.ContainerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
names[e.ID()] = append(names[e.ID()], p)
return nil
}, -1)
+ var beforeCont, sinceCont *runtime.Container
+ if before != "" {
+ beforeCont = srv.runtime.Get(before)
+ if beforeCont == nil {
+ return job.Error(fmt.Errorf("Could not find container with name or id %s", before))
+ }
+ }
+
+ if since != "" {
+ sinceCont = srv.runtime.Get(since)
+ if sinceCont == nil {
+ return job.Error(fmt.Errorf("Could not find container with name or id %s", since))
+ }
+ }
+
for _, container := range srv.runtime.List() {
if !container.State.IsRunning() && !all && n <= 0 && since == "" && before == "" {
continue
}
if before != "" && !foundBefore {
- if container.ID == before || utils.TruncateID(container.ID) == before {
+ if container.ID == beforeCont.ID {
foundBefore = true
}
continue
@@ -999,15 +1042,21 @@ func (srv *Server) Containers(job *engine.Job) engine.Status {
if n > 0 && displayed == n {
break
}
- if container.ID == since || utils.TruncateID(container.ID) == since {
- break
+ if since != "" {
+ if container.ID == sinceCont.ID {
+ break
+ }
}
displayed++
out := &engine.Env{}
out.Set("Id", container.ID)
out.SetList("Names", names[container.ID])
- out.Set("Image", srv.runtime.repositories.ImageName(container.Image))
- out.Set("Command", fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " ")))
+ out.Set("Image", srv.runtime.Repositories().ImageName(container.Image))
+ if len(container.Args) > 0 {
+ out.Set("Command", fmt.Sprintf("\"%s %s\"", container.Path, container.ArgsAsString()))
+ } else {
+ out.Set("Command", fmt.Sprintf("\"%s\"", container.Path))
+ }
out.SetInt64("Created", container.Created.Unix())
out.Set("Status", container.State.String())
str, err := container.NetworkSettings.PortMappingAPI().ToListString()
@@ -1039,12 +1088,17 @@ func (srv *Server) ContainerCommit(job *engine.Job) engine.Status {
if container == nil {
return job.Errorf("No such container: %s", name)
}
- var config runconfig.Config
- if err := job.GetenvJson("config", &config); err != nil {
+ var config = container.Config
+ var newConfig runconfig.Config
+ if err := job.GetenvJson("config", &newConfig); err != nil {
return job.Error(err)
}
- img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &config)
+ if err := runconfig.Merge(&newConfig, config); err != nil {
+ return job.Error(err)
+ }
+
+ img, err := srv.runtime.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), &newConfig)
if err != nil {
return job.Error(err)
}
@@ -1060,7 +1114,7 @@ func (srv *Server) ImageTag(job *engine.Job) engine.Status {
if len(job.Args) == 3 {
tag = job.Args[2]
}
- if err := srv.runtime.repositories.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
+ if err := srv.runtime.Repositories().Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")); err != nil {
return job.Error(err)
}
return engine.StatusOK
@@ -1085,18 +1139,34 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
}
defer srv.poolRemove("pull", "layer:"+id)
- if !srv.runtime.graph.Exists(id) {
+ if !srv.runtime.Graph().Exists(id) {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
- imgJSON, imgSize, err := r.GetRemoteImageJSON(id, endpoint, token)
- if err != nil {
- out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
- // FIXME: Keep going in case of error?
- return err
- }
- img, err := NewImgJSON(imgJSON)
- if err != nil {
- out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
- return fmt.Errorf("Failed to parse json: %s", err)
+ var (
+ imgJSON []byte
+ imgSize int
+ err error
+ img *image.Image
+ )
+ retries := 5
+ for j := 1; j <= retries; j++ {
+ imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
+ if err != nil && j == retries {
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
+ return err
+ } else if err != nil {
+ time.Sleep(time.Duration(j) * 500 * time.Millisecond)
+ continue
+ }
+ img, err = image.NewImgJSON(imgJSON)
+ if err != nil && j == retries {
+ out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
+ return fmt.Errorf("Failed to parse json: %s", err)
+ } else if err != nil {
+ time.Sleep(time.Duration(j) * 500 * time.Millisecond)
+ continue
+ } else {
+ break
+ }
}
// Get the layer
@@ -1107,7 +1177,7 @@ func (srv *Server) pullImage(r *registry.Registry, out io.Writer, imgID, endpoin
return err
}
defer layer.Close()
- if err := srv.runtime.graph.Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
+ if err := srv.runtime.Graph().Register(imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"), img); err != nil {
out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
return err
}
@@ -1242,11 +1312,11 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, localName
if askedTag != "" && tag != askedTag {
continue
}
- if err := srv.runtime.repositories.Set(localName, tag, id, true); err != nil {
+ if err := srv.runtime.Repositories().Set(localName, tag, id, true); err != nil {
return err
}
}
- if err := srv.runtime.repositories.Save(); err != nil {
+ if err := srv.runtime.Repositories().Save(); err != nil {
return err
}
@@ -1304,7 +1374,7 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status {
localName = job.Args[0]
tag string
sf = utils.NewStreamFormatter(job.GetenvBool("json"))
- authConfig = &auth.AuthConfig{}
+ authConfig = &registry.AuthConfig{}
metaHeaders map[string][]string
)
if len(job.Args) > 1 {
@@ -1342,7 +1412,7 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status {
return job.Error(err)
}
- if endpoint == auth.IndexServerAddress() {
+ if endpoint == registry.IndexServerAddress() {
// If pull "index.docker.io/foo/bar", it's stored locally under "foo/bar"
localName = remoteName
}
@@ -1355,7 +1425,7 @@ func (srv *Server) ImagePull(job *engine.Job) engine.Status {
}
// Retrieve the all the images to be uploaded in the correct order
-func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[string][]string, error) {
+func (srv *Server) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) {
var (
imageList []string
imagesSeen map[string]bool = make(map[string]bool)
@@ -1363,11 +1433,14 @@ func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[stri
)
for tag, id := range localRepo {
+ if requestedTag != "" && requestedTag != tag {
+ continue
+ }
var imageListForThisTag []string
tagsByImage[id] = append(tagsByImage[id], tag)
- for img, err := srv.runtime.graph.Get(id); img != nil; img, err = img.GetParent() {
+ for img, err := srv.runtime.Graph().Get(id); img != nil; img, err = img.GetParent() {
if err != nil {
return nil, nil, err
}
@@ -1389,25 +1462,29 @@ func (srv *Server) getImageList(localRepo map[string]string) ([]string, map[stri
// append to main image list
imageList = append(imageList, imageListForThisTag...)
}
-
+ if len(imageList) == 0 {
+ return nil, nil, fmt.Errorf("No images found for the requested repository / tag")
+ }
utils.Debugf("Image list: %v", imageList)
utils.Debugf("Tags by image: %v", tagsByImage)
return imageList, tagsByImage, nil
}
-func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, sf *utils.StreamFormatter) error {
+func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error {
out = utils.NewWriteFlusher(out)
utils.Debugf("Local repo: %s", localRepo)
- imgList, tagsByImage, err := srv.getImageList(localRepo)
+ imgList, tagsByImage, err := srv.getImageList(localRepo, tag)
if err != nil {
return err
}
out.Write(sf.FormatStatus("", "Sending image list"))
- var repoData *registry.RepositoryData
- var imageIndex []*registry.ImgData
+ var (
+ repoData *registry.RepositoryData
+ imageIndex []*registry.ImgData
+ )
for _, imgId := range imgList {
if tags, exists := tagsByImage[imgId]; exists {
@@ -1442,8 +1519,12 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName
return err
}
+ nTag := 1
+ if tag == "" {
+ nTag = len(localRepo)
+ }
for _, ep := range repoData.Endpoints {
- out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, len(localRepo)))
+ out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, nTag))
for _, imgId := range imgList {
if r.LookupRemoteImage(imgId, ep, repoData.Tokens) {
@@ -1474,7 +1555,7 @@ func (srv *Server) pushRepository(r *registry.Registry, out io.Writer, localName
func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) {
out = utils.NewWriteFlusher(out)
- jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgID, "json"))
+ jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.Graph().Root, imgID, "json"))
if err != nil {
return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
}
@@ -1493,13 +1574,15 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
return "", err
}
- layerData, err := srv.runtime.graph.TempLayerArchive(imgID, archive.Uncompressed, sf, out)
+ layerData, err := srv.runtime.Graph().TempLayerArchive(imgID, archive.Uncompressed, sf, out)
if err != nil {
return "", fmt.Errorf("Failed to generate layer archive: %s", err)
}
defer os.RemoveAll(layerData.Name())
// Send the layer
+ utils.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size)
+
checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, utils.ProgressReader(layerData, int(layerData.Size), out, sf, false, utils.TruncateID(imgData.ID), "Pushing"), ep, token, jsonRaw)
if err != nil {
return "", err
@@ -1523,10 +1606,11 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status {
var (
localName = job.Args[0]
sf = utils.NewStreamFormatter(job.GetenvBool("json"))
- authConfig = &auth.AuthConfig{}
+ authConfig = &registry.AuthConfig{}
metaHeaders map[string][]string
)
+ tag := job.Getenv("tag")
job.GetenvJson("authConfig", authConfig)
job.GetenvJson("metaHeaders", metaHeaders)
if _, err := srv.poolAdd("push", localName); err != nil {
@@ -1545,18 +1629,21 @@ func (srv *Server) ImagePush(job *engine.Job) engine.Status {
return job.Error(err)
}
- img, err := srv.runtime.graph.Get(localName)
+ img, err := srv.runtime.Graph().Get(localName)
r, err2 := registry.NewRegistry(authConfig, srv.HTTPRequestFactory(metaHeaders), endpoint)
if err2 != nil {
return job.Error(err2)
}
if err != nil {
- reposLen := len(srv.runtime.repositories.Repositories[localName])
+ reposLen := 1
+ if tag == "" {
+ reposLen = len(srv.runtime.Repositories().Repositories[localName])
+ }
job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", localName, reposLen))
// If it fails, try to get the repository
- if localRepo, exists := srv.runtime.repositories.Repositories[localName]; exists {
- if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, sf); err != nil {
+ if localRepo, exists := srv.runtime.Repositories().Repositories[localName]; exists {
+ if err := srv.pushRepository(r, job.Stdout, localName, remoteName, localRepo, tag, sf); err != nil {
return job.Error(err)
}
return engine.StatusOK
@@ -1611,13 +1698,13 @@ func (srv *Server) ImageImport(job *engine.Job) engine.Status {
defer progressReader.Close()
archive = progressReader
}
- img, err := srv.runtime.graph.Create(archive, nil, "Imported from "+src, "", nil)
+ img, err := srv.runtime.Graph().Create(archive, "", "", "Imported from "+src, "", nil, nil)
if err != nil {
return job.Error(err)
}
// Optionally register the image at REPO/TAG
if repo != "" {
- if err := srv.runtime.repositories.Set(repo, tag, img.ID, true); err != nil {
+ if err := srv.runtime.Repositories().Set(repo, tag, img.ID, true); err != nil {
return job.Error(err)
}
}
@@ -1636,38 +1723,29 @@ func (srv *Server) ContainerCreate(job *engine.Job) engine.Status {
if config.Memory != 0 && config.Memory < 524288 {
return job.Errorf("Minimum memory limit allowed is 512k")
}
- if config.Memory > 0 && !srv.runtime.sysInfo.MemoryLimit {
+ if config.Memory > 0 && !srv.runtime.SystemConfig().MemoryLimit {
job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n")
config.Memory = 0
}
- if config.Memory > 0 && !srv.runtime.sysInfo.SwapLimit {
+ if config.Memory > 0 && !srv.runtime.SystemConfig().SwapLimit {
job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
- resolvConf, err := utils.GetResolvConf()
- if err != nil {
- return job.Error(err)
- }
- if !config.NetworkDisabled && len(config.Dns) == 0 && len(srv.runtime.config.Dns) == 0 && utils.CheckLocalDns(resolvConf) {
- job.Errorf("Local (127.0.0.1) DNS resolver found in resolv.conf and containers can't use it. Using default external servers : %v\n", defaultDns)
- config.Dns = defaultDns
- }
-
container, buildWarnings, err := srv.runtime.Create(config, name)
if err != nil {
- if srv.runtime.graph.IsNotExist(err) {
+ if srv.runtime.Graph().IsNotExist(err) {
_, tag := utils.ParseRepositoryTag(config.Image)
if tag == "" {
- tag = DEFAULTTAG
+ tag = graph.DEFAULTTAG
}
return job.Errorf("No such image: %s (tag: %s)", config.Image, tag)
}
return job.Error(err)
}
- if !container.Config.NetworkDisabled && srv.runtime.sysInfo.IPv4ForwardingDisabled {
+ if !container.Config.NetworkDisabled && srv.runtime.SystemConfig().IPv4ForwardingDisabled {
job.Errorf("IPv4 forwarding is disabled.\n")
}
- srv.LogEvent("create", container.ID, srv.runtime.repositories.ImageName(container.Image))
+ srv.LogEvent("create", container.ID, srv.runtime.Repositories().ImageName(container.Image))
// FIXME: this is necessary because runtime.Create might return a nil container
// with a non-nil error. This should not happen! Once it's fixed we
// can remove this workaround.
@@ -1695,7 +1773,7 @@ func (srv *Server) ContainerRestart(job *engine.Job) engine.Status {
if err := container.Restart(int(t)); err != nil {
return job.Errorf("Cannot restart container %s: %s\n", name, err)
}
- srv.LogEvent("restart", container.ID, srv.runtime.repositories.ImageName(container.Image))
+ srv.LogEvent("restart", container.ID, srv.runtime.Repositories().ImageName(container.Image))
} else {
return job.Errorf("No such container: %s\n", name)
}
@@ -1717,7 +1795,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
if container == nil {
return job.Errorf("No such link: %s", name)
}
- name, err := getFullName(name)
+ name, err := runtime.GetFullContainerName(name)
if err != nil {
job.Error(err)
}
@@ -1725,21 +1803,17 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
if parent == "/" {
return job.Errorf("Conflict, cannot remove the default name of the container")
}
- pe := srv.runtime.containerGraph.Get(parent)
+ pe := srv.runtime.ContainerGraph().Get(parent)
if pe == nil {
return job.Errorf("Cannot get parent %s for name %s", parent, name)
}
parentContainer := srv.runtime.Get(pe.ID())
- if parentContainer != nil && parentContainer.activeLinks != nil {
- if link, exists := parentContainer.activeLinks[n]; exists {
- link.Disable()
- } else {
- utils.Debugf("Could not find active link for %s", name)
- }
+ if parentContainer != nil {
+ parentContainer.DisableLink(n)
}
- if err := srv.runtime.containerGraph.Delete(name); err != nil {
+ if err := srv.runtime.ContainerGraph().Delete(name); err != nil {
return job.Error(err)
}
return engine.StatusOK
@@ -1758,13 +1832,13 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
if err := srv.runtime.Destroy(container); err != nil {
return job.Errorf("Cannot destroy container %s: %s", name, err)
}
- srv.LogEvent("destroy", container.ID, srv.runtime.repositories.ImageName(container.Image))
+ srv.LogEvent("destroy", container.ID, srv.runtime.Repositories().ImageName(container.Image))
if removeVolume {
var (
volumes = make(map[string]struct{})
binds = make(map[string]struct{})
- usedVolumes = make(map[string]*Container)
+ usedVolumes = make(map[string]*runtime.Container)
)
// the volume id is always the base of the path
@@ -1773,7 +1847,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
}
// populate bind map so that they can be skipped and not removed
- for _, bind := range container.hostConfig.Binds {
+ for _, bind := range container.HostConfig().Binds {
source := strings.Split(bind, ":")[0]
// TODO: refactor all volume stuff, all of it
// this is very important that we eval the link
@@ -1812,7 +1886,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
log.Printf("The volume %s is used by the container %s. Impossible to remove it. Skipping.\n", volumeId, c.ID)
continue
}
- if err := srv.runtime.volumes.Delete(volumeId); err != nil {
+ if err := srv.runtime.Volumes().Delete(volumeId); err != nil {
return job.Errorf("Error calling volumes.Delete(%q): %v", volumeId, err)
}
}
@@ -1823,7 +1897,7 @@ func (srv *Server) ContainerDestroy(job *engine.Job) engine.Status {
return engine.StatusOK
}
-func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force bool) error {
+func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force, noprune bool) error {
var (
repoName, tag string
tags = []string{}
@@ -1831,12 +1905,12 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo
repoName, tag = utils.ParseRepositoryTag(name)
if tag == "" {
- tag = DEFAULTTAG
+ tag = graph.DEFAULTTAG
}
- img, err := srv.runtime.repositories.LookupImage(name)
+ img, err := srv.runtime.Repositories().LookupImage(name)
if err != nil {
- if r, _ := srv.runtime.repositories.Get(repoName); r != nil {
+ if r, _ := srv.runtime.Repositories().Get(repoName); r != nil {
return fmt.Errorf("No such image: %s:%s", repoName, tag)
}
return fmt.Errorf("No such image: %s", name)
@@ -1847,14 +1921,14 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo
tag = ""
}
- byParents, err := srv.runtime.graph.ByParent()
+ byParents, err := srv.runtime.Graph().ByParent()
if err != nil {
return err
}
//If delete by id, see if the id belong only to one repository
if repoName == "" {
- for _, repoAndTag := range srv.runtime.repositories.ByID()[img.ID] {
+ for _, repoAndTag := range srv.runtime.Repositories().ByID()[img.ID] {
parsedRepo, parsedTag := utils.ParseRepositoryTag(repoAndTag)
if repoName == "" || repoName == parsedRepo {
repoName = parsedRepo
@@ -1877,7 +1951,7 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo
//Untag the current image
for _, tag := range tags {
- tagDeleted, err := srv.runtime.repositories.Delete(repoName, tag)
+ tagDeleted, err := srv.runtime.Repositories().Delete(repoName, tag)
if err != nil {
return err
}
@@ -1888,24 +1962,24 @@ func (srv *Server) DeleteImage(name string, imgs *engine.Table, first, force boo
srv.LogEvent("untag", img.ID, "")
}
}
- tags = srv.runtime.repositories.ByID()[img.ID]
+ tags = srv.runtime.Repositories().ByID()[img.ID]
if (len(tags) <= 1 && repoName == "") || len(tags) == 0 {
if len(byParents[img.ID]) == 0 {
if err := srv.canDeleteImage(img.ID); err != nil {
return err
}
- if err := srv.runtime.repositories.DeleteAll(img.ID); err != nil {
+ if err := srv.runtime.Repositories().DeleteAll(img.ID); err != nil {
return err
}
- if err := srv.runtime.graph.Delete(img.ID); err != nil {
+ if err := srv.runtime.Graph().Delete(img.ID); err != nil {
return err
}
out := &engine.Env{}
out.Set("Deleted", img.ID)
imgs.Add(out)
srv.LogEvent("delete", img.ID, "")
- if img.Parent != "" {
- err := srv.DeleteImage(img.Parent, imgs, false, force)
+ if img.Parent != "" && !noprune {
+ err := srv.DeleteImage(img.Parent, imgs, false, force, noprune)
if first {
return err
}
@@ -1922,7 +1996,7 @@ func (srv *Server) ImageDelete(job *engine.Job) engine.Status {
return job.Errorf("Usage: %s IMAGE", job.Name)
}
imgs := engine.NewTable("", 0)
- if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force")); err != nil {
+ if err := srv.DeleteImage(job.Args[0], imgs, true, job.GetenvBool("force"), job.GetenvBool("noprune")); err != nil {
return job.Error(err)
}
if len(imgs.Data) == 0 {
@@ -1936,12 +2010,12 @@ func (srv *Server) ImageDelete(job *engine.Job) engine.Status {
func (srv *Server) canDeleteImage(imgID string) error {
for _, container := range srv.runtime.List() {
- parent, err := srv.runtime.repositories.LookupImage(container.Image)
+ parent, err := srv.runtime.Repositories().LookupImage(container.Image)
if err != nil {
return err
}
- if err := parent.WalkHistory(func(p *Image) error {
+ if err := parent.WalkHistory(func(p *image.Image) error {
if imgID == p.ID {
return fmt.Errorf("Conflict, cannot delete %s because the container %s is using it", utils.TruncateID(imgID), utils.TruncateID(container.ID))
}
@@ -1953,10 +2027,9 @@ func (srv *Server) canDeleteImage(imgID string) error {
return nil
}
-func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*Image, error) {
-
+func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*image.Image, error) {
// Retrieve all images
- images, err := srv.runtime.graph.Map()
+ images, err := srv.runtime.Graph().Map()
if err != nil {
return nil, err
}
@@ -1971,9 +2044,9 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*Imag
}
// Loop on the children of the given image and check the config
- var match *Image
+ var match *image.Image
for elem := range imageMap[imgID] {
- img, err := srv.runtime.graph.Get(elem)
+ img, err := srv.runtime.Graph().Get(elem)
if err != nil {
return nil, err
}
@@ -1986,12 +2059,12 @@ func (srv *Server) ImageGetCached(imgID string, config *runconfig.Config) (*Imag
return match, nil
}
-func (srv *Server) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error {
+func (srv *Server) RegisterLinks(container *runtime.Container, hostConfig *runconfig.HostConfig) error {
runtime := srv.runtime
if hostConfig != nil && hostConfig.Links != nil {
for _, l := range hostConfig.Links {
- parts, err := parseLink(l)
+ parts, err := utils.PartParser("name:alias", l)
if err != nil {
return err
}
@@ -2010,7 +2083,7 @@ func (srv *Server) RegisterLinks(container *Container, hostConfig *runconfig.Hos
// After we load all the links into the runtime
// set them to nil on the hostconfig
hostConfig.Links = nil
- if err := container.writeHostConfig(); err != nil {
+ if err := container.WriteHostConfig(); err != nil {
return err
}
}
@@ -2021,9 +2094,11 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
if len(job.Args) < 1 {
return job.Errorf("Usage: %s container_id", job.Name)
}
- name := job.Args[0]
- runtime := srv.runtime
- container := runtime.Get(name)
+ var (
+ name = job.Args[0]
+ runtime = srv.runtime
+ container = runtime.Get(name)
+ )
if container == nil {
return job.Errorf("No such container: %s", name)
@@ -2058,13 +2133,13 @@ func (srv *Server) ContainerStart(job *engine.Job) engine.Status {
if err := srv.RegisterLinks(container, hostConfig); err != nil {
return job.Error(err)
}
- container.hostConfig = hostConfig
+ container.SetHostConfig(hostConfig)
container.ToDisk()
}
if err := container.Start(); err != nil {
return job.Errorf("Cannot start container %s: %s", name, err)
}
- srv.LogEvent("start", container.ID, runtime.repositories.ImageName(container.Image))
+ srv.LogEvent("start", container.ID, runtime.Repositories().ImageName(container.Image))
return engine.StatusOK
}
@@ -2084,7 +2159,7 @@ func (srv *Server) ContainerStop(job *engine.Job) engine.Status {
if err := container.Stop(int(t)); err != nil {
return job.Errorf("Cannot stop container %s: %s\n", name, err)
}
- srv.LogEvent("stop", container.ID, srv.runtime.repositories.ImageName(container.Image))
+ srv.LogEvent("stop", container.ID, srv.runtime.Repositories().ImageName(container.Image))
} else {
return job.Errorf("No such container: %s\n", name)
}
@@ -2230,15 +2305,15 @@ func (srv *Server) ContainerAttach(job *engine.Job) engine.Status {
return engine.StatusOK
}
-func (srv *Server) ContainerInspect(name string) (*Container, error) {
+func (srv *Server) ContainerInspect(name string) (*runtime.Container, error) {
if container := srv.runtime.Get(name); container != nil {
return container, nil
}
return nil, fmt.Errorf("No such container: %s", name)
}
-func (srv *Server) ImageInspect(name string) (*Image, error) {
- if image, err := srv.runtime.repositories.LookupImage(name); err == nil && image != nil {
+func (srv *Server) ImageInspect(name string) (*image.Image, error) {
+ if image, err := srv.runtime.Repositories().LookupImage(name); err == nil && image != nil {
return image, nil
}
return nil, fmt.Errorf("No such image: %s", name)
@@ -2273,9 +2348,9 @@ func (srv *Server) JobInspect(job *engine.Job) engine.Status {
return job.Error(errContainer)
}
object = &struct {
- *Container
+ *runtime.Container
HostConfig *runconfig.HostConfig
- }{container, container.hostConfig}
+ }{container, container.HostConfig()}
default:
return job.Errorf("Unknown kind: %s", kind)
}
@@ -2314,8 +2389,8 @@ func (srv *Server) ContainerCopy(job *engine.Job) engine.Status {
return job.Errorf("No such container: %s", name)
}
-func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) {
- runtime, err := NewRuntime(config, eng)
+func NewServer(eng *engine.Engine, config *daemonconfig.Config) (*Server, error) {
+ runtime, err := runtime.NewRuntime(config, eng)
if err != nil {
return nil, err
}
@@ -2328,21 +2403,20 @@ func NewServer(eng *engine.Engine, config *DaemonConfig) (*Server, error) {
listeners: make(map[string]chan utils.JSONMessage),
running: true,
}
- runtime.srv = srv
+ runtime.SetServer(srv)
return srv, nil
}
func (srv *Server) HTTPRequestFactory(metaHeaders map[string][]string) *utils.HTTPRequestFactory {
- srv.Lock()
- defer srv.Unlock()
- v := dockerVersion()
httpVersion := make([]utils.VersionInfo, 0, 4)
- httpVersion = append(httpVersion, &simpleVersionInfo{"docker", v.Get("Version")})
- httpVersion = append(httpVersion, &simpleVersionInfo{"go", v.Get("GoVersion")})
- httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", v.Get("GitCommit")})
- httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", v.Get("KernelVersion")})
- httpVersion = append(httpVersion, &simpleVersionInfo{"os", v.Get("Os")})
- httpVersion = append(httpVersion, &simpleVersionInfo{"arch", v.Get("Arch")})
+ httpVersion = append(httpVersion, &simpleVersionInfo{"docker", dockerversion.VERSION})
+ httpVersion = append(httpVersion, &simpleVersionInfo{"go", goruntime.Version()})
+ httpVersion = append(httpVersion, &simpleVersionInfo{"git-commit", dockerversion.GITCOMMIT})
+ if kernelVersion, err := utils.GetKernelVersion(); err == nil {
+ httpVersion = append(httpVersion, &simpleVersionInfo{"kernel", kernelVersion.String()})
+ }
+ httpVersion = append(httpVersion, &simpleVersionInfo{"os", goruntime.GOOS})
+ httpVersion = append(httpVersion, &simpleVersionInfo{"arch", goruntime.GOARCH})
ud := utils.NewHTTPUserAgentDecorator(httpVersion...)
md := &utils.HTTPMetaHeadersDecorator{
Headers: metaHeaders,
@@ -2390,13 +2464,19 @@ func (srv *Server) IsRunning() bool {
}
func (srv *Server) Close() error {
+ if srv == nil {
+ return nil
+ }
srv.SetRunning(false)
+ if srv.runtime == nil {
+ return nil
+ }
return srv.runtime.Close()
}
type Server struct {
sync.RWMutex
- runtime *Runtime
+ runtime *runtime.Runtime
pullingPool map[string]chan struct{}
pushingPool map[string]chan struct{}
events []utils.JSONMessage
diff --git a/server_unit_test.go b/server/server_unit_test.go
index 6a90ca5892..b471c5c581 100644
--- a/server_unit_test.go
+++ b/server/server_unit_test.go
@@ -1,4 +1,4 @@
-package docker
+package server
import (
"github.com/dotcloud/docker/utils"
diff --git a/sysinit/sysinit.go b/sysinit/sysinit.go
index c84c05982c..50c858296f 100644
--- a/sysinit/sysinit.go
+++ b/sysinit/sysinit.go
@@ -1,33 +1,16 @@
package sysinit
import (
- "encoding/json"
"flag"
"fmt"
- "github.com/dotcloud/docker/execdriver"
- _ "github.com/dotcloud/docker/execdriver/lxc"
- _ "github.com/dotcloud/docker/execdriver/native"
- "io/ioutil"
+ "github.com/dotcloud/docker/runtime/execdriver"
+ _ "github.com/dotcloud/docker/runtime/execdriver/lxc"
+ _ "github.com/dotcloud/docker/runtime/execdriver/native"
"log"
"os"
- "strings"
)
-// Clear environment pollution introduced by lxc-start
-func setupEnv(args *execdriver.InitArgs) {
- os.Clearenv()
- for _, kv := range args.Env {
- parts := strings.SplitN(kv, "=", 2)
- if len(parts) == 1 {
- parts = append(parts, "")
- }
- os.Setenv(parts[0], parts[1])
- }
-}
-
func executeProgram(args *execdriver.InitArgs) error {
- setupEnv(args)
-
dockerInitFct, err := execdriver.GetInitFunc(args.Driver)
if err != nil {
panic(err)
@@ -59,25 +42,12 @@ func SysInit() {
)
flag.Parse()
- // Get env
- var env []string
- content, err := ioutil.ReadFile(".dockerenv")
- if err != nil {
- log.Fatalf("Unable to load environment variables: %v", err)
- }
- if err := json.Unmarshal(content, &env); err != nil {
- log.Fatalf("Unable to unmarshal environment variables: %v", err)
- }
- // Propagate the plugin-specific container env variable
- env = append(env, "container="+os.Getenv("container"))
-
args := &execdriver.InitArgs{
User: *user,
Gateway: *gateway,
Ip: *ip,
WorkDir: *workDir,
Privileged: *privileged,
- Env: env,
Args: flag.Args(),
Mtu: *mtu,
Driver: *driver,
diff --git a/utils/jsonmessage.go b/utils/jsonmessage.go
index 9050dda746..6be421be94 100644
--- a/utils/jsonmessage.go
+++ b/utils/jsonmessage.go
@@ -85,7 +85,7 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
return jm.Error
}
var endl string
- if isTerminal {
+ if isTerminal && jm.Stream == "" {
// <ESC>[2K = erase entire current line
fmt.Fprintf(out, "%c[2K\r", 27)
endl = "\r"
@@ -131,7 +131,7 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr,
if jm.Progress != nil {
jm.Progress.terminalFd = terminalFd
}
- if jm.Progress != nil || jm.ProgressMessage != "" {
+ if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
line, ok := ids[jm.ID]
if !ok {
line = len(ids)
@@ -141,17 +141,15 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr,
} else {
diff = len(ids) - line
}
- if isTerminal {
+ if jm.ID != "" && isTerminal {
// <ESC>[{diff}A = move cursor up diff rows
fmt.Fprintf(out, "%c[%dA", 27, diff)
}
}
err := jm.Display(out, isTerminal)
- if jm.ID != "" {
- if isTerminal {
- // <ESC>[{diff}B = move cursor down diff rows
- fmt.Fprintf(out, "%c[%dB", 27, diff)
- }
+ if jm.ID != "" && isTerminal {
+ // <ESC>[{diff}B = move cursor down diff rows
+ fmt.Fprintf(out, "%c[%dB", 27, diff)
}
if err != nil {
return err
diff --git a/utils/signal.go b/utils/signal.go
deleted file mode 100644
index 0cac7d113f..0000000000
--- a/utils/signal.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package utils
-
-import (
- "os"
- "os/signal"
-)
-
-func StopCatch(sigc chan os.Signal) {
- signal.Stop(sigc)
- close(sigc)
-}
diff --git a/utils/signal_darwin.go b/utils/signal_freebsd.go
index 28730db8e5..65a700e894 100644
--- a/utils/signal_darwin.go
+++ b/utils/signal_freebsd.go
@@ -13,11 +13,9 @@ func CatchAll(sigc chan os.Signal) {
syscall.SIGBUS,
syscall.SIGCHLD,
syscall.SIGCONT,
- syscall.SIGEMT,
syscall.SIGFPE,
syscall.SIGHUP,
syscall.SIGILL,
- syscall.SIGINFO,
syscall.SIGINT,
syscall.SIGIO,
syscall.SIGIOT,
diff --git a/utils/signal_linux.go b/utils/signal_linux.go
deleted file mode 100644
index 26cfd56967..0000000000
--- a/utils/signal_linux.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package utils
-
-import (
- "os"
- "os/signal"
- "syscall"
-)
-
-func CatchAll(sigc chan os.Signal) {
- signal.Notify(sigc,
- syscall.SIGABRT,
- syscall.SIGALRM,
- syscall.SIGBUS,
- syscall.SIGCHLD,
- syscall.SIGCLD,
- syscall.SIGCONT,
- syscall.SIGFPE,
- syscall.SIGHUP,
- syscall.SIGILL,
- syscall.SIGINT,
- syscall.SIGIO,
- syscall.SIGIOT,
- syscall.SIGKILL,
- syscall.SIGPIPE,
- syscall.SIGPOLL,
- syscall.SIGPROF,
- syscall.SIGPWR,
- syscall.SIGQUIT,
- syscall.SIGSEGV,
- syscall.SIGSTKFLT,
- syscall.SIGSTOP,
- syscall.SIGSYS,
- syscall.SIGTERM,
- syscall.SIGTRAP,
- syscall.SIGTSTP,
- syscall.SIGTTIN,
- syscall.SIGTTOU,
- syscall.SIGUNUSED,
- syscall.SIGURG,
- syscall.SIGUSR1,
- syscall.SIGUSR2,
- syscall.SIGVTALRM,
- syscall.SIGWINCH,
- syscall.SIGXCPU,
- syscall.SIGXFSZ,
- )
-}
diff --git a/utils/stdcopy.go b/utils/stdcopy.go
index 3cb8ab02b3..8b43386140 100644
--- a/utils/stdcopy.go
+++ b/utils/stdcopy.go
@@ -108,12 +108,13 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
// Retrieve the size of the frame
frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4]))
+ Debugf("framesize: %d", frameSize)
// Check if the buffer is big enough to read the frame.
// Extend it if necessary.
if frameSize+StdWriterPrefixLen > bufLen {
- Debugf("Extending buffer cap.")
- buf = append(buf, make([]byte, frameSize-len(buf)+1)...)
+ Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
+ buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...)
bufLen = len(buf)
}
diff --git a/utils/streamformatter.go b/utils/streamformatter.go
index 8876fa5cb7..d2758d3ca6 100644
--- a/utils/streamformatter.go
+++ b/utils/streamformatter.go
@@ -3,6 +3,7 @@ package utils
import (
"encoding/json"
"fmt"
+ "io"
)
type StreamFormatter struct {
@@ -90,3 +91,31 @@ func (sf *StreamFormatter) Used() bool {
func (sf *StreamFormatter) Json() bool {
return sf.json
}
+
+type StdoutFormater struct {
+ io.Writer
+ *StreamFormatter
+}
+
+func (sf *StdoutFormater) Write(buf []byte) (int, error) {
+ formattedBuf := sf.StreamFormatter.FormatStream(string(buf))
+ n, err := sf.Writer.Write(formattedBuf)
+ if n != len(formattedBuf) {
+ return n, io.ErrShortWrite
+ }
+ return len(buf), err
+}
+
+type StderrFormater struct {
+ io.Writer
+ *StreamFormatter
+}
+
+func (sf *StderrFormater) Write(buf []byte) (int, error) {
+ formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m")
+ n, err := sf.Writer.Write(formattedBuf)
+ if n != len(formattedBuf) {
+ return n, io.ErrShortWrite
+ }
+ return len(buf), err
+}
diff --git a/utils/utils.go b/utils/utils.go
index 07b8f6a3d0..1fe2e87b4f 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -2,6 +2,7 @@ package utils
import (
"bytes"
+ "crypto/rand"
"crypto/sha1"
"crypto/sha256"
"encoding/hex"
@@ -24,6 +25,11 @@ import (
"time"
)
+type KeyValuePair struct {
+ Key string
+ Value string
+}
+
// A common interface to access the Fatal method of
// both testing.B and testing.T.
type Fataler interface {
@@ -493,6 +499,34 @@ func TruncateID(id string) string {
return id[:shortLen]
}
+// GenerateRandomID returns an unique id
+func GenerateRandomID() string {
+ for {
+ id := make([]byte, 32)
+ if _, err := io.ReadFull(rand.Reader, id); err != nil {
+ panic(err) // This shouldn't happen
+ }
+ value := hex.EncodeToString(id)
+ // if we try to parse the truncated for as an int and we don't have
+ // an error then the value is all numberic and causes issues when
+ // used as a hostname. ref #3869
+ if _, err := strconv.Atoi(TruncateID(value)); err == nil {
+ continue
+ }
+ return value
+ }
+}
+
+func ValidateID(id string) error {
+ if id == "" {
+ return fmt.Errorf("Id can't be empty")
+ }
+ if strings.Contains(id, ":") {
+ return fmt.Errorf("Invalid character in id: ':'")
+ }
+ return nil
+}
+
// Code c/c from io.Copy() modified to handle escape sequence
func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
buf := make([]byte, 32*1024)
@@ -685,7 +719,7 @@ func IsURL(str string) bool {
}
func IsGIT(str string) bool {
- return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/")
+ return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") || strings.HasPrefix(str, "git@github.com:") || (strings.HasSuffix(str, ".git") && IsURL(str))
}
// GetResolvConf opens and read the content of /etc/resolv.conf.
@@ -702,54 +736,78 @@ func GetResolvConf() ([]byte, error) {
// CheckLocalDns looks into the /etc/resolv.conf,
// it returns true if there is a local nameserver or if there is no nameserver.
func CheckLocalDns(resolvConf []byte) bool {
- var parsedResolvConf = StripComments(resolvConf, []byte("#"))
- if !bytes.Contains(parsedResolvConf, []byte("nameserver")) {
- return true
- }
- for _, ip := range [][]byte{
- []byte("127.0.0.1"),
- []byte("127.0.1.1"),
- } {
- if bytes.Contains(parsedResolvConf, ip) {
- return true
+ for _, line := range GetLines(resolvConf, []byte("#")) {
+ if !bytes.Contains(line, []byte("nameserver")) {
+ continue
+ }
+ for _, ip := range [][]byte{
+ []byte("127.0.0.1"),
+ []byte("127.0.1.1"),
+ } {
+ if bytes.Contains(line, ip) {
+ return true
+ }
}
+ return false
}
- return false
+ return true
}
-// StripComments parses input into lines and strips away comments.
-func StripComments(input []byte, commentMarker []byte) []byte {
+// GetLines parses input into lines and strips away comments.
+func GetLines(input []byte, commentMarker []byte) [][]byte {
lines := bytes.Split(input, []byte("\n"))
- var output []byte
+ var output [][]byte
for _, currentLine := range lines {
var commentIndex = bytes.Index(currentLine, commentMarker)
if commentIndex == -1 {
- output = append(output, currentLine...)
+ output = append(output, currentLine)
} else {
- output = append(output, currentLine[:commentIndex]...)
+ output = append(output, currentLine[:commentIndex])
}
- output = append(output, []byte("\n")...)
}
return output
}
-// GetNameserversAsCIDR returns nameservers (if any) listed in
-// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
-// This function's output is intended for net.ParseCIDR
-func GetNameserversAsCIDR(resolvConf []byte) []string {
- var parsedResolvConf = StripComments(resolvConf, []byte("#"))
+// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf
+func GetNameservers(resolvConf []byte) []string {
nameservers := []string{}
re := regexp.MustCompile(`^\s*nameserver\s*(([0-9]+\.){3}([0-9]+))\s*$`)
- for _, line := range bytes.Split(parsedResolvConf, []byte("\n")) {
+ for _, line := range GetLines(resolvConf, []byte("#")) {
var ns = re.FindSubmatch(line)
if len(ns) > 0 {
- nameservers = append(nameservers, string(ns[1])+"/32")
+ nameservers = append(nameservers, string(ns[1]))
}
}
+ return nameservers
+}
+// GetNameserversAsCIDR returns nameservers (if any) listed in
+// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32")
+// This function's output is intended for net.ParseCIDR
+func GetNameserversAsCIDR(resolvConf []byte) []string {
+ nameservers := []string{}
+ for _, nameserver := range GetNameservers(resolvConf) {
+ nameservers = append(nameservers, nameserver+"/32")
+ }
return nameservers
}
+// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf
+// If more than one search line is encountered, only the contents of the last
+// one is returned.
+func GetSearchDomains(resolvConf []byte) []string {
+ re := regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`)
+ domains := []string{}
+ for _, line := range GetLines(resolvConf, []byte("#")) {
+ match := re.FindSubmatch(line)
+ if match == nil {
+ continue
+ }
+ domains = strings.Fields(string(match[1]))
+ }
+ return domains
+}
+
// FIXME: Change this not to receive default value as parameter
func ParseHost(defaultHost string, defaultUnix, addr string) (string, error) {
var (
@@ -1018,3 +1076,11 @@ func ReadSymlinkedDirectory(path string) (string, error) {
}
return realPath, nil
}
+
+func ParseKeyValueOpt(opt string) (string, string, error) {
+ parts := strings.SplitN(opt, "=", 2)
+ if len(parts) != 2 {
+ return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
+ }
+ return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
+}
diff --git a/utils/utils_test.go b/utils/utils_test.go
index 444d2a2428..177d3667e1 100644
--- a/utils/utils_test.go
+++ b/utils/utils_test.go
@@ -444,6 +444,30 @@ func TestParsePortMapping(t *testing.T) {
}
}
+func TestGetNameservers(t *testing.T) {
+ for resolv, result := range map[string][]string{`
+nameserver 1.2.3.4
+nameserver 40.3.200.10
+search example.com`: {"1.2.3.4", "40.3.200.10"},
+ `search example.com`: {},
+ `nameserver 1.2.3.4
+search example.com
+nameserver 4.30.20.100`: {"1.2.3.4", "4.30.20.100"},
+ ``: {},
+ ` nameserver 1.2.3.4 `: {"1.2.3.4"},
+ `search example.com
+nameserver 1.2.3.4
+#nameserver 4.3.2.1`: {"1.2.3.4"},
+ `search example.com
+nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4"},
+ } {
+ test := GetNameservers([]byte(resolv))
+ if !StrSlicesEqual(test, result) {
+ t.Fatalf("Wrong nameserver string {%s} should be %v. Input: %s", test, result, resolv)
+ }
+ }
+}
+
func TestGetNameserversAsCIDR(t *testing.T) {
for resolv, result := range map[string][]string{`
nameserver 1.2.3.4
@@ -468,6 +492,33 @@ nameserver 1.2.3.4 # not 4.3.2.1`: {"1.2.3.4/32"},
}
}
+func TestGetSearchDomains(t *testing.T) {
+ for resolv, result := range map[string][]string{
+ `search example.com`: {"example.com"},
+ `search example.com # ignored`: {"example.com"},
+ ` search example.com `: {"example.com"},
+ ` search example.com # ignored`: {"example.com"},
+ `search foo.example.com example.com`: {"foo.example.com", "example.com"},
+ ` search foo.example.com example.com `: {"foo.example.com", "example.com"},
+ ` search foo.example.com example.com # ignored`: {"foo.example.com", "example.com"},
+ ``: {},
+ `# ignored`: {},
+ `nameserver 1.2.3.4
+search foo.example.com example.com`: {"foo.example.com", "example.com"},
+ `nameserver 1.2.3.4
+search dup1.example.com dup2.example.com
+search foo.example.com example.com`: {"foo.example.com", "example.com"},
+ `nameserver 1.2.3.4
+search foo.example.com example.com
+nameserver 4.30.20.100`: {"foo.example.com", "example.com"},
+ } {
+ test := GetSearchDomains([]byte(resolv))
+ if !StrSlicesEqual(test, result) {
+ t.Fatalf("Wrong search domain string {%s} should be %v. Input: %s", test, result, resolv)
+ }
+ }
+}
+
func StrSlicesEqual(a, b []string) bool {
if len(a) != len(b) {
return false
diff --git a/utils_test.go b/utils_test.go
deleted file mode 100644
index 31fa12b6ad..0000000000
--- a/utils_test.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package docker
-
-import (
- "bytes"
- "github.com/dotcloud/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar"
- "io"
-)
-
-func fakeTar() (io.Reader, error) {
- content := []byte("Hello world!\n")
- buf := new(bytes.Buffer)
- tw := tar.NewWriter(buf)
- for _, name := range []string{"/etc/postgres/postgres.conf", "/etc/passwd", "/var/log/postgres/postgres.conf"} {
- hdr := new(tar.Header)
- hdr.Size = int64(len(content))
- hdr.Name = name
- if err := tw.WriteHeader(hdr); err != nil {
- return nil, err
- }
- tw.Write([]byte(content))
- }
- tw.Close()
- return buf, nil
-}
diff --git a/vendor/src/github.com/coreos/go-systemd/.travis.yml b/vendor/src/github.com/coreos/go-systemd/.travis.yml
new file mode 100644
index 0000000000..8c9f56e44a
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+go: 1.2
+
+install:
+ - echo "Skip install"
+
+script:
+ - ./test
diff --git a/vendor/src/github.com/coreos/go-systemd/LICENSE b/vendor/src/github.com/coreos/go-systemd/LICENSE
new file mode 100644
index 0000000000..37ec93a14f
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/src/github.com/coreos/go-systemd/README.md b/vendor/src/github.com/coreos/go-systemd/README.md
new file mode 100644
index 0000000000..0ee09fec0a
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/README.md
@@ -0,0 +1,44 @@
+# go-systemd
+
+Go bindings to systemd. The project has three packages:
+
+- activation - for writing and using socket activation from Go
+- journal - for writing to systemd's logging service, journal
+- dbus - for starting/stopping/inspecting running services and units
+
+Go docs for the entire project are here:
+
+http://godoc.org/github.com/coreos/go-systemd
+
+## Socket Activation
+
+An example HTTP server using socket activation can be quickly setup by
+following this README on a Linux machine running systemd:
+
+https://github.com/coreos/go-systemd/tree/master/examples/activation/httpserver
+
+## Journal
+
+Using this package you can submit journal entries directly to systemd's journal taking advantage of features like indexed key/value pairs for each log entry.
+
+## D-Bus
+
+The D-Bus API lets you start, stop and introspect systemd units. The API docs are here:
+
+http://godoc.org/github.com/coreos/go-systemd/dbus
+
+### Debugging
+
+Create `/etc/dbus-1/system-local.conf` that looks like this:
+
+```
+<!DOCTYPE busconfig PUBLIC
+"-//freedesktop//DTD D-Bus Bus Configuration 1.0//EN"
+"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
+<busconfig>
+ <policy user="root">
+ <allow eavesdrop="true"/>
+ <allow eavesdrop="true" send_destination="*"/>
+ </policy>
+</busconfig>
+```
diff --git a/pkg/systemd/activation/files.go b/vendor/src/github.com/coreos/go-systemd/activation/files.go
index 0281146310..74b4fc10f3 100644
--- a/pkg/systemd/activation/files.go
+++ b/vendor/src/github.com/coreos/go-systemd/activation/files.go
@@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
+
// Package activation implements primitives for systemd socket activation.
package activation
diff --git a/vendor/src/github.com/coreos/go-systemd/activation/files_test.go b/vendor/src/github.com/coreos/go-systemd/activation/files_test.go
new file mode 100644
index 0000000000..a1c6948fb2
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/activation/files_test.go
@@ -0,0 +1,84 @@
+/*
+Copyright 2013 CoreOS Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package activation
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "os/exec"
+ "testing"
+)
+
+// correctStringWritten fails the text if the correct string wasn't written
+// to the other side of the pipe.
+func correctStringWritten(t *testing.T, r *os.File, expected string) bool {
+ bytes := make([]byte, len(expected))
+ io.ReadAtLeast(r, bytes, len(expected))
+
+ if string(bytes) != expected {
+ t.Fatalf("Unexpected string %s", string(bytes))
+ }
+
+ return true
+}
+
+// TestActivation forks out a copy of activation.go example and reads back two
+// strings from the pipes that are passed in.
+func TestActivation(t *testing.T) {
+ cmd := exec.Command("go", "run", "../examples/activation/activation.go")
+
+ r1, w1, _ := os.Pipe()
+ r2, w2, _ := os.Pipe()
+ cmd.ExtraFiles = []*os.File{
+ w1,
+ w2,
+ }
+
+ cmd.Env = os.Environ()
+ cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1")
+
+ err := cmd.Run()
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+
+ correctStringWritten(t, r1, "Hello world")
+ correctStringWritten(t, r2, "Goodbye world")
+}
+
+func TestActivationNoFix(t *testing.T) {
+ cmd := exec.Command("go", "run", "../examples/activation/activation.go")
+ cmd.Env = os.Environ()
+ cmd.Env = append(cmd.Env, "LISTEN_FDS=2")
+
+ out, _ := cmd.CombinedOutput()
+ if bytes.Contains(out, []byte("No files")) == false {
+ t.Fatalf("Child didn't error out as expected")
+ }
+}
+
+func TestActivationNoFiles(t *testing.T) {
+ cmd := exec.Command("go", "run", "../examples/activation/activation.go")
+ cmd.Env = os.Environ()
+ cmd.Env = append(cmd.Env, "LISTEN_FDS=0", "FIX_LISTEN_PID=1")
+
+ out, _ := cmd.CombinedOutput()
+ if bytes.Contains(out, []byte("No files")) == false {
+ t.Fatalf("Child didn't error out as expected")
+ }
+}
diff --git a/pkg/systemd/activation/listeners.go b/vendor/src/github.com/coreos/go-systemd/activation/listeners.go
index 3296a08361..cdb2cf4bb4 100644
--- a/pkg/systemd/activation/listeners.go
+++ b/vendor/src/github.com/coreos/go-systemd/activation/listeners.go
@@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
+
package activation
import (
diff --git a/vendor/src/github.com/coreos/go-systemd/activation/listeners_test.go b/vendor/src/github.com/coreos/go-systemd/activation/listeners_test.go
new file mode 100644
index 0000000000..c3627d6d4d
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/activation/listeners_test.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2014 CoreOS Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package activation
+
+import (
+ "io"
+ "net"
+ "os"
+ "os/exec"
+ "testing"
+)
+
+// correctStringWritten fails the text if the correct string wasn't written
+// to the other side of the pipe.
+func correctStringWrittenNet(t *testing.T, r net.Conn, expected string) bool {
+ bytes := make([]byte, len(expected))
+ io.ReadAtLeast(r, bytes, len(expected))
+
+ if string(bytes) != expected {
+ t.Fatalf("Unexpected string %s", string(bytes))
+ }
+
+ return true
+}
+
+// TestActivation forks out a copy of activation.go example and reads back two
+// strings from the pipes that are passed in.
+func TestListeners(t *testing.T) {
+ cmd := exec.Command("go", "run", "../examples/activation/listen.go")
+
+ l1, err := net.Listen("tcp", ":9999")
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+ l2, err := net.Listen("tcp", ":1234")
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+
+ t1 := l1.(*net.TCPListener)
+ t2 := l2.(*net.TCPListener)
+
+ f1, _ := t1.File()
+ f2, _ := t2.File()
+
+ cmd.ExtraFiles = []*os.File{
+ f1,
+ f2,
+ }
+
+ r1, err := net.Dial("tcp", "127.0.0.1:9999")
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+ r1.Write([]byte("Hi"))
+
+ r2, err := net.Dial("tcp", "127.0.0.1:1234")
+ if err != nil {
+ t.Fatalf(err.Error())
+ }
+ r2.Write([]byte("Hi"))
+
+ cmd.Env = os.Environ()
+ cmd.Env = append(cmd.Env, "LISTEN_FDS=2", "FIX_LISTEN_PID=1")
+
+ out, err := cmd.Output()
+ if err != nil {
+ println(string(out))
+ t.Fatalf(err.Error())
+ }
+
+ correctStringWrittenNet(t, r1, "Hello world")
+ correctStringWrittenNet(t, r2, "Goodbye world")
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/src/github.com/coreos/go-systemd/dbus/dbus.go
new file mode 100644
index 0000000000..91d7112145
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/dbus/dbus.go
@@ -0,0 +1,104 @@
+/*
+Copyright 2013 CoreOS Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/
+package dbus
+
+import (
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/godbus/dbus"
+)
+
+const signalBuffer = 100
+
+// ObjectPath creates a dbus.ObjectPath using the rules that systemd uses for
+// serializing special characters.
+func ObjectPath(path string) dbus.ObjectPath {
+ path = strings.Replace(path, ".", "_2e", -1)
+ path = strings.Replace(path, "-", "_2d", -1)
+ path = strings.Replace(path, "@", "_40", -1)
+
+ return dbus.ObjectPath(path)
+}
+
+// Conn is a connection to systemds dbus endpoint.
+type Conn struct {
+ sysconn *dbus.Conn
+ sysobj *dbus.Object
+ jobListener struct {
+ jobs map[dbus.ObjectPath]chan string
+ sync.Mutex
+ }
+ subscriber struct {
+ updateCh chan<- *SubStateUpdate
+ errCh chan<- error
+ sync.Mutex
+ ignore map[dbus.ObjectPath]int64
+ cleanIgnore int64
+ }
+ dispatch map[string]func(dbus.Signal)
+}
+
+// New() establishes a connection to the system bus and authenticates.
+func New() (*Conn, error) {
+ c := new(Conn)
+
+ if err := c.initConnection(); err != nil {
+ return nil, err
+ }
+
+ c.initJobs()
+ return c, nil
+}
+
+func (c *Conn) initConnection() error {
+ var err error
+ c.sysconn, err = dbus.SystemBusPrivate()
+ if err != nil {
+ return err
+ }
+
+ // Only use EXTERNAL method, and hardcode the uid (not username)
+ // to avoid a username lookup (which requires a dynamically linked
+ // libc)
+ methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
+
+ err = c.sysconn.Auth(methods)
+ if err != nil {
+ c.sysconn.Close()
+ return err
+ }
+
+ err = c.sysconn.Hello()
+ if err != nil {
+ c.sysconn.Close()
+ return err
+ }
+
+ c.sysobj = c.sysconn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
+
+ // Setup the listeners on jobs so that we can get completions
+ c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
+ c.initSubscription()
+ c.initDispatch()
+
+ return nil
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/dbus_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/dbus_test.go
new file mode 100644
index 0000000000..2e80f73ef7
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/dbus/dbus_test.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2013 CoreOS Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dbus
+
+import (
+ "testing"
+)
+
+// TestObjectPath ensures path encoding of the systemd rules works.
+func TestObjectPath(t *testing.T) {
+ input := "/silly-path/to@a/unit..service"
+ output := ObjectPath(input)
+ expected := "/silly_2dpath/to_40a/unit_2e_2eservice"
+
+ if string(output) != expected {
+ t.Fatalf("Output '%s' did not match expected '%s'", output, expected)
+ }
+}
+
+// TestNew ensures that New() works without errors.
+func TestNew(t *testing.T) {
+ _, err := New()
+
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/methods.go b/vendor/src/github.com/coreos/go-systemd/dbus/methods.go
new file mode 100644
index 0000000000..11d5cda945
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/dbus/methods.go
@@ -0,0 +1,354 @@
+/*
+Copyright 2013 CoreOS Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dbus
+
+import (
+ "errors"
+ "github.com/godbus/dbus"
+)
+
+func (c *Conn) initJobs() {
+ c.jobListener.jobs = make(map[dbus.ObjectPath]chan string)
+}
+
+func (c *Conn) jobComplete(signal *dbus.Signal) {
+ var id uint32
+ var job dbus.ObjectPath
+ var unit string
+ var result string
+ dbus.Store(signal.Body, &id, &job, &unit, &result)
+ c.jobListener.Lock()
+ out, ok := c.jobListener.jobs[job]
+ if ok {
+ out <- result
+ delete(c.jobListener.jobs, job)
+ }
+ c.jobListener.Unlock()
+}
+
+func (c *Conn) startJob(job string, args ...interface{}) (<-chan string, error) {
+ c.jobListener.Lock()
+ defer c.jobListener.Unlock()
+
+ ch := make(chan string, 1)
+ var path dbus.ObjectPath
+ err := c.sysobj.Call(job, 0, args...).Store(&path)
+ if err != nil {
+ return nil, err
+ }
+ c.jobListener.jobs[path] = ch
+ return ch, nil
+}
+
+func (c *Conn) runJob(job string, args ...interface{}) (string, error) {
+ respCh, err := c.startJob(job, args...)
+ if err != nil {
+ return "", err
+ }
+ return <-respCh, nil
+}
+
+// StartUnit enqeues a start job and depending jobs, if any (unless otherwise
+// specified by the mode string).
+//
+// Takes the unit to activate, plus a mode string. The mode needs to be one of
+// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
+// "replace" the call will start the unit and its dependencies, possibly
+// replacing already queued jobs that conflict with this. If "fail" the call
+// will start the unit and its dependencies, but will fail if this would change
+// an already queued job. If "isolate" the call will start the unit in question
+// and terminate all units that aren't dependencies of it. If
+// "ignore-dependencies" it will start a unit but ignore all its dependencies.
+// If "ignore-requirements" it will start a unit but only ignore the
+// requirement dependencies. It is not recommended to make use of the latter
+// two options.
+//
+// Result string: one of done, canceled, timeout, failed, dependency, skipped.
+// done indicates successful execution of a job. canceled indicates that a job
+// has been canceled before it finished execution. timeout indicates that the
+// job timeout was reached. failed indicates that the job failed. dependency
+// indicates that a job this job has been depending on failed and the job hence
+// has been removed too. skipped indicates that a job was skipped because it
+// didn't apply to the units current state.
+func (c *Conn) StartUnit(name string, mode string) (string, error) {
+ return c.runJob("org.freedesktop.systemd1.Manager.StartUnit", name, mode)
+}
+
+// StopUnit is similar to StartUnit but stops the specified unit rather
+// than starting it.
+func (c *Conn) StopUnit(name string, mode string) (string, error) {
+ return c.runJob("org.freedesktop.systemd1.Manager.StopUnit", name, mode)
+}
+
+// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise.
+func (c *Conn) ReloadUnit(name string, mode string) (string, error) {
+ return c.runJob("org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
+}
+
+// RestartUnit restarts a service. If a service is restarted that isn't
+// running it will be started.
+func (c *Conn) RestartUnit(name string, mode string) (string, error) {
+ return c.runJob("org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
+}
+
+// TryRestartUnit is like RestartUnit, except that a service that isn't running
+// is not affected by the restart.
+func (c *Conn) TryRestartUnit(name string, mode string) (string, error) {
+ return c.runJob("org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
+}
+
+// ReloadOrRestart attempts a reload if the unit supports it and use a restart
+// otherwise.
+func (c *Conn) ReloadOrRestartUnit(name string, mode string) (string, error) {
+ return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
+}
+
+// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
+// flavored restart otherwise.
+func (c *Conn) ReloadOrTryRestartUnit(name string, mode string) (string, error) {
+ return c.runJob("org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
+}
+
+// StartTransientUnit() may be used to create and start a transient unit, which
+// will be released as soon as it is not running or referenced anymore or the
+// system is rebooted. name is the unit name including suffix, and must be
+// unique. mode is the same as in StartUnit(), properties contains properties
+// of the unit.
+func (c *Conn) StartTransientUnit(name string, mode string, properties ...Property) (string, error) {
+ return c.runJob("org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
+}
+
+// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's
+// processes are killed.
+func (c *Conn) KillUnit(name string, signal int32) {
+ c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
+}
+
+// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface
+func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) {
+ var err error
+ var props map[string]dbus.Variant
+
+ path := ObjectPath("/org/freedesktop/systemd1/unit/" + unit)
+ if !path.IsValid() {
+ return nil, errors.New("invalid unit name: " + unit)
+ }
+
+ obj := c.sysconn.Object("org.freedesktop.systemd1", path)
+ err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
+ if err != nil {
+ return nil, err
+ }
+
+ out := make(map[string]interface{}, len(props))
+ for k, v := range props {
+ out[k] = v.Value()
+ }
+
+ return out, nil
+}
+
+// GetUnitProperties takes the unit name and returns all of its dbus object properties.
+func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
+ return c.getProperties(unit, "org.freedesktop.systemd1.Unit")
+}
+
+func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
+ var err error
+ var prop dbus.Variant
+
+ path := ObjectPath("/org/freedesktop/systemd1/unit/" + unit)
+ if !path.IsValid() {
+ return nil, errors.New("invalid unit name: " + unit)
+ }
+
+ obj := c.sysconn.Object("org.freedesktop.systemd1", path)
+ err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Property{Name: propertyName, Value: prop}, nil
+}
+
+func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
+ return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
+}
+
+// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
+// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
+// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
+func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
+ return c.getProperties(unit, "org.freedesktop.systemd1."+unitType)
+}
+
+// SetUnitProperties() may be used to modify certain unit properties at runtime.
+// Not all properties may be changed at runtime, but many resource management
+// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
+// instantly, and stored on disk for future boots, unless runtime is true, in which
+// case the settings only apply until the next reboot. name is the name of the unit
+// to modify. properties are the settings to set, encoded as an array of property
+// name and value pairs.
+func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
+ return c.sysobj.Call("SetUnitProperties", 0, name, runtime, properties).Store()
+}
+
+func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
+ return c.getProperty(unit, "org.freedesktop.systemd1." + unitType, propertyName)
+}
+
+// ListUnits returns an array with all currently loaded units. Note that
+// units may be known by multiple names at the same time, and hence there might
+// be more unit names loaded than actual units behind them.
+func (c *Conn) ListUnits() ([]UnitStatus, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ status := make([]UnitStatus, len(result))
+ statusInterface := make([]interface{}, len(status))
+ for i := range status {
+ statusInterface[i] = &status[i]
+ }
+
+ err = dbus.Store(resultInterface, statusInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return status, nil
+}
+
+type UnitStatus struct {
+ Name string // The primary unit name as string
+ Description string // The human readable description string
+ LoadState string // The load state (i.e. whether the unit file has been loaded successfully)
+ ActiveState string // The active state (i.e. whether the unit is currently started or not)
+ SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
+ Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
+ Path dbus.ObjectPath // The unit object path
+ JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise
+ JobType string // The job type as string
+ JobPath dbus.ObjectPath // The job object path
+}
+
+// EnableUnitFiles() may be used to enable one or more units in the system (by
+// creating symlinks to them in /etc or /run).
+//
+// It takes a list of unit files to enable (either just file names or full
+// absolute paths if the unit files are residing outside the usual unit
+// search paths), and two booleans: the first controls whether the unit shall
+// be enabled for runtime only (true, /run), or persistently (false, /etc).
+// The second one controls whether symlinks pointing to other units shall
+// be replaced if necessary.
+//
+// This call returns one boolean and an array with the changes made. The
+// boolean signals whether the unit files contained any enablement
+// information (i.e. an [Install]) section. The changes list consists of
+// structures with three strings: the type of the change (one of symlink
+// or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
+ var carries_install_info bool
+
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
+ if err != nil {
+ return false, nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]EnableUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return false, nil, err
+ }
+
+ return carries_install_info, changes, nil
+}
+
+type EnableUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// DisableUnitFiles() may be used to disable one or more units in the system (by
+// removing symlinks to them from /etc or /run).
+//
+// It takes a list of unit files to disable (either just file names or full
+// absolute paths if the unit files are residing outside the usual unit
+// search paths), and one boolean: whether the unit was enabled for runtime
+// only (true, /run), or persistently (false, /etc).
+//
+// This call returns an array with the changes made. The changes list
+// consists of structures with three strings: the type of the change (one of
+// symlink or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
+ result := make([][]interface{}, 0)
+ err := c.sysobj.Call("DisableUnitFiles", 0, files, runtime).Store(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ resultInterface := make([]interface{}, len(result))
+ for i := range result {
+ resultInterface[i] = result[i]
+ }
+
+ changes := make([]DisableUnitFileChange, len(result))
+ changesInterface := make([]interface{}, len(changes))
+ for i := range changes {
+ changesInterface[i] = &changes[i]
+ }
+
+ err = dbus.Store(resultInterface, changesInterface...)
+ if err != nil {
+ return nil, err
+ }
+
+ return changes, nil
+}
+
+type DisableUnitFileChange struct {
+ Type string // Type of the change (one of symlink or unlink)
+ Filename string // File name of the symlink
+ Destination string // Destination of the symlink
+}
+
+// Reload instructs systemd to scan for and reload unit files. This is
+// equivalent to a 'systemctl daemon-reload'.
+func (c *Conn) Reload() error {
+ return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go
new file mode 100644
index 0000000000..9e2f22323f
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/dbus/methods_test.go
@@ -0,0 +1,314 @@
+/*
+Copyright 2013 CoreOS Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dbus
+
+import (
+ "fmt"
+ "github.com/guelfey/go.dbus"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "reflect"
+ "testing"
+)
+
+func setupConn(t *testing.T) *Conn {
+ conn, err := New()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return conn
+}
+
+func setupUnit(target string, conn *Conn, t *testing.T) {
+ // Blindly stop the unit in case it is running
+ conn.StopUnit(target, "replace")
+
+ // Blindly remove the symlink in case it exists
+ targetRun := filepath.Join("/run/systemd/system/", target)
+ err := os.Remove(targetRun)
+
+ // 1. Enable the unit
+ abs, err := filepath.Abs("../fixtures/" + target)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ fixture := []string{abs}
+
+ install, changes, err := conn.EnableUnitFiles(fixture, true, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if install != false {
+ t.Fatal("Install was true")
+ }
+
+ if len(changes) < 1 {
+ t.Fatalf("Expected one change, got %v", changes)
+ }
+
+ if changes[0].Filename != targetRun {
+ t.Fatal("Unexpected target filename")
+ }
+}
+
+// Ensure that basic unit starting and stopping works.
+func TestStartStopUnit(t *testing.T) {
+ target := "start-stop.service"
+ conn := setupConn(t)
+
+ setupUnit(target, conn, t)
+
+ // 2. Start the unit
+ job, err := conn.StartUnit(target, "replace")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if job != "done" {
+ t.Fatal("Job is not done, %v", job)
+ }
+
+ units, err := conn.ListUnits()
+
+ var unit *UnitStatus
+ for _, u := range units {
+ if u.Name == target {
+ unit = &u
+ }
+ }
+
+ if unit == nil {
+ t.Fatalf("Test unit not found in list")
+ }
+
+ if unit.ActiveState != "active" {
+ t.Fatalf("Test unit not active")
+ }
+
+ // 3. Stop the unit
+ job, err = conn.StopUnit(target, "replace")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ units, err = conn.ListUnits()
+
+ unit = nil
+ for _, u := range units {
+ if u.Name == target {
+ unit = &u
+ }
+ }
+
+ if unit != nil {
+ t.Fatalf("Test unit found in list, should be stopped")
+ }
+}
+
+// Enables a unit and then immediately tears it down
+func TestEnableDisableUnit(t *testing.T) {
+ target := "enable-disable.service"
+ conn := setupConn(t)
+
+ setupUnit(target, conn, t)
+
+ abs, err := filepath.Abs("../fixtures/" + target)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ path := filepath.Join("/run/systemd/system/", target)
+
+ // 2. Disable the unit
+ changes, err := conn.DisableUnitFiles([]string{abs}, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(changes) != 1 {
+ t.Fatalf("Changes should include the path, %v", changes)
+ }
+ if changes[0].Filename != path {
+ t.Fatalf("Change should include correct filename, %+v", changes[0])
+ }
+ if changes[0].Destination != "" {
+ t.Fatalf("Change destination should be empty, %+v", changes[0])
+ }
+}
+
+// TestGetUnitProperties reads the `-.mount` which should exist on all systemd
+// systems and ensures that one of its properties is valid.
+func TestGetUnitProperties(t *testing.T) {
+ conn := setupConn(t)
+
+ unit := "-.mount"
+
+ info, err := conn.GetUnitProperties(unit)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ names := info["Wants"].([]string)
+
+ if len(names) < 1 {
+ t.Fatal("/ is unwanted")
+ }
+
+ if names[0] != "system.slice" {
+ t.Fatal("unexpected wants for /")
+ }
+
+ prop, err := conn.GetUnitProperty(unit, "Wants")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if prop.Name != "Wants" {
+ t.Fatal("unexpected property name")
+ }
+
+ val := prop.Value.Value().([]string)
+ if !reflect.DeepEqual(val, names) {
+ t.Fatal("unexpected property value")
+ }
+}
+
+// TestGetUnitPropertiesRejectsInvalidName attempts to get the properties for a
+// unit with an invalid name. This test should be run with --test.timeout set,
+// as a fail will manifest as GetUnitProperties hanging indefinitely.
+func TestGetUnitPropertiesRejectsInvalidName(t *testing.T) {
+ conn := setupConn(t)
+
+ unit := "//invalid#$^/"
+
+ _, err := conn.GetUnitProperties(unit)
+ if err == nil {
+ t.Fatal("Expected an error, got nil")
+ }
+
+ _, err = conn.GetUnitProperty(unit, "Wants")
+ if err == nil {
+ t.Fatal("Expected an error, got nil")
+ }
+}
+
+// TestSetUnitProperties changes a cgroup setting on the `tmp.mount`
+// which should exist on all systemd systems and ensures that the
+// property was set.
+func TestSetUnitProperties(t *testing.T) {
+ conn := setupConn(t)
+
+ unit := "tmp.mount"
+
+ if err := conn.SetUnitProperties(unit, true, Property{"CPUShares", dbus.MakeVariant(uint64(1023))}); err != nil {
+ t.Fatal(err)
+ }
+
+ info, err := conn.GetUnitTypeProperties(unit, "Mount")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ value := info["CPUShares"].(uint64)
+ if value != 1023 {
+ t.Fatal("CPUShares of unit is not 1023, %s", value)
+ }
+}
+
+// Ensure that basic transient unit starting and stopping works.
+func TestStartStopTransientUnit(t *testing.T) {
+ conn := setupConn(t)
+
+ props := []Property{
+ PropExecStart([]string{"/bin/sleep", "400"}, false),
+ }
+ target := fmt.Sprintf("testing-transient-%d.service", rand.Int())
+
+ // Start the unit
+ job, err := conn.StartTransientUnit(target, "replace", props...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if job != "done" {
+ t.Fatal("Job is not done, %v", job)
+ }
+
+ units, err := conn.ListUnits()
+
+ var unit *UnitStatus
+ for _, u := range units {
+ if u.Name == target {
+ unit = &u
+ }
+ }
+
+ if unit == nil {
+ t.Fatalf("Test unit not found in list")
+ }
+
+ if unit.ActiveState != "active" {
+ t.Fatalf("Test unit not active")
+ }
+
+ // 3. Stop the unit
+ job, err = conn.StopUnit(target, "replace")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ units, err = conn.ListUnits()
+
+ unit = nil
+ for _, u := range units {
+ if u.Name == target {
+ unit = &u
+ }
+ }
+
+ if unit != nil {
+ t.Fatalf("Test unit found in list, should be stopped")
+ }
+}
+
+func TestConnJobListener(t *testing.T) {
+ target := "start-stop.service"
+ conn := setupConn(t)
+
+ setupUnit(target, conn, t)
+
+ jobSize := len(conn.jobListener.jobs)
+
+ _, err := conn.StartUnit(target, "replace")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = conn.StopUnit(target, "replace")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ currentJobSize := len(conn.jobListener.jobs)
+ if jobSize != currentJobSize {
+ t.Fatal("JobListener jobs leaked")
+ }
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/properties.go b/vendor/src/github.com/coreos/go-systemd/dbus/properties.go
new file mode 100644
index 0000000000..a06ccda761
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/dbus/properties.go
@@ -0,0 +1,220 @@
+/*
+Copyright 2013 CoreOS Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dbus
+
+import (
+ "github.com/godbus/dbus"
+)
+
+// From the systemd docs:
+//
+// The properties array of StartTransientUnit() may take many of the settings
+// that may also be configured in unit files. Not all parameters are currently
+// accepted though, but we plan to cover more properties with future release.
+// Currently you may set the Description, Slice and all dependency types of
+// units, as well as RemainAfterExit, ExecStart for service units,
+// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
+// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
+// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
+// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
+// directly to their counterparts in unit files and as normal D-Bus object
+// properties. The exception here is the PIDs field of scope units which is
+// used for construction of the scope only and specifies the initial PIDs to
+// add to the scope object.
+
+type Property struct {
+ Name string
+ Value dbus.Variant
+}
+
+type PropertyCollection struct {
+ Name string
+ Properties []Property
+}
+
+type execStart struct {
+ Path string // the binary path to execute
+ Args []string // an array with all arguments to pass to the executed command, starting with argument 0
+ UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly
+}
+
+// PropExecStart sets the ExecStart service property. The first argument is a
+// slice with the binary path to execute followed by the arguments to pass to
+// the executed command. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
+func PropExecStart(command []string, uncleanIsFailure bool) Property {
+ execStarts := []execStart{
+ execStart{
+ Path: command[0],
+ Args: command,
+ UncleanIsFailure: uncleanIsFailure,
+ },
+ }
+
+ return Property{
+ Name: "ExecStart",
+ Value: dbus.MakeVariant(execStarts),
+ }
+}
+
+// PropRemainAfterExit sets the RemainAfterExit service property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
+func PropRemainAfterExit(b bool) Property {
+ return Property{
+ Name: "RemainAfterExit",
+ Value: dbus.MakeVariant(b),
+ }
+}
+
+// PropDescription sets the Description unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
+func PropDescription(desc string) Property {
+ return Property{
+ Name: "Description",
+ Value: dbus.MakeVariant(desc),
+ }
+}
+
+func propDependency(name string, units []string) Property {
+ return Property{
+ Name: name,
+ Value: dbus.MakeVariant(units),
+ }
+}
+
+// PropRequires sets the Requires unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
+func PropRequires(units ...string) Property {
+ return propDependency("Requires", units)
+}
+
+// PropRequiresOverridable sets the RequiresOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
+func PropRequiresOverridable(units ...string) Property {
+ return propDependency("RequiresOverridable", units)
+}
+
+// PropRequisite sets the Requisite unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
+func PropRequisite(units ...string) Property {
+ return propDependency("Requisite", units)
+}
+
+// PropRequisiteOverridable sets the RequisiteOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
+func PropRequisiteOverridable(units ...string) Property {
+ return propDependency("RequisiteOverridable", units)
+}
+
+// PropWants sets the Wants unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
+func PropWants(units ...string) Property {
+ return propDependency("Wants", units)
+}
+
+// PropBindsTo sets the BindsTo unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
+func PropBindsTo(units ...string) Property {
+ return propDependency("BindsTo", units)
+}
+
+// PropRequiredBy sets the RequiredBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
+func PropRequiredBy(units ...string) Property {
+ return propDependency("RequiredBy", units)
+}
+
+// PropRequiredByOverridable sets the RequiredByOverridable unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
+func PropRequiredByOverridable(units ...string) Property {
+ return propDependency("RequiredByOverridable", units)
+}
+
+// PropWantedBy sets the WantedBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
+func PropWantedBy(units ...string) Property {
+ return propDependency("WantedBy", units)
+}
+
+// PropBoundBy sets the BoundBy unit property. See
+// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
+func PropBoundBy(units ...string) Property {
+ return propDependency("BoundBy", units)
+}
+
+// PropConflicts sets the Conflicts unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
+func PropConflicts(units ...string) Property {
+ return propDependency("Conflicts", units)
+}
+
+// PropConflictedBy sets the ConflictedBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
+func PropConflictedBy(units ...string) Property {
+ return propDependency("ConflictedBy", units)
+}
+
+// PropBefore sets the Before unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
+func PropBefore(units ...string) Property {
+ return propDependency("Before", units)
+}
+
+// PropAfter sets the After unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
+func PropAfter(units ...string) Property {
+ return propDependency("After", units)
+}
+
+// PropOnFailure sets the OnFailure unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
+func PropOnFailure(units ...string) Property {
+ return propDependency("OnFailure", units)
+}
+
+// PropTriggers sets the Triggers unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
+func PropTriggers(units ...string) Property {
+ return propDependency("Triggers", units)
+}
+
+// PropTriggeredBy sets the TriggeredBy unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
+func PropTriggeredBy(units ...string) Property {
+ return propDependency("TriggeredBy", units)
+}
+
+// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
+func PropPropagatesReloadTo(units ...string) Property {
+ return propDependency("PropagatesReloadTo", units)
+}
+
+// PropRequiresMountsFor sets the RequiresMountsFor unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
+func PropRequiresMountsFor(units ...string) Property {
+ return propDependency("RequiresMountsFor", units)
+}
+
+// PropSlice sets the Slice unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
+func PropSlice(slice string) Property {
+ return Property{
+ Name: "Slice",
+ Value: dbus.MakeVariant(slice),
+ }
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/set.go b/vendor/src/github.com/coreos/go-systemd/dbus/set.go
new file mode 100644
index 0000000000..88378b29a1
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/dbus/set.go
@@ -0,0 +1,26 @@
+package dbus
+
+type set struct {
+ data map[string]bool
+}
+
+func (s *set) Add(value string) {
+ s.data[value] = true
+}
+
+func (s *set) Remove(value string) {
+ delete(s.data, value)
+}
+
+func (s *set) Contains(value string) (exists bool) {
+ _, exists = s.data[value]
+ return
+}
+
+func (s *set) Length() (int) {
+ return len(s.data)
+}
+
+func newSet() (*set) {
+ return &set{make(map[string] bool)}
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/set_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/set_test.go
new file mode 100644
index 0000000000..d8d174d0c4
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/dbus/set_test.go
@@ -0,0 +1,26 @@
+package dbus
+
+import (
+ "testing"
+)
+
+// TestBasicSetActions asserts that Add & Remove behavior is correct
+func TestBasicSetActions(t *testing.T) {
+ s := newSet()
+
+ if s.Contains("foo") {
+ t.Fatal("set should not contain 'foo'")
+ }
+
+ s.Add("foo")
+
+ if !s.Contains("foo") {
+ t.Fatal("set should contain 'foo'")
+ }
+
+ s.Remove("foo")
+
+ if s.Contains("foo") {
+ t.Fatal("set should not contain 'foo'")
+ }
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go
new file mode 100644
index 0000000000..3d896d896f
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription.go
@@ -0,0 +1,249 @@
+/*
+Copyright 2013 CoreOS Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dbus
+
+import (
+ "errors"
+ "time"
+
+ "github.com/godbus/dbus"
+)
+
+const (
+ cleanIgnoreInterval = int64(10 * time.Second)
+ ignoreInterval = int64(30 * time.Millisecond)
+)
+
+// Subscribe sets up this connection to subscribe to all systemd dbus events.
+// This is required before calling SubscribeUnits. When the connection closes
+// systemd will automatically stop sending signals so there is no need to
+// explicitly call Unsubscribe().
+func (c *Conn) Subscribe() error {
+ c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
+ c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
+
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
+ if err != nil {
+ c.sysconn.Close()
+ return err
+ }
+
+ return nil
+}
+
+// Unsubscribe this connection from systemd dbus events.
+func (c *Conn) Unsubscribe() error {
+ err := c.sysobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
+ if err != nil {
+ c.sysconn.Close()
+ return err
+ }
+
+ return nil
+}
+
+func (c *Conn) initSubscription() {
+ c.subscriber.ignore = make(map[dbus.ObjectPath]int64)
+}
+
+func (c *Conn) initDispatch() {
+ ch := make(chan *dbus.Signal, signalBuffer)
+
+ c.sysconn.Signal(ch)
+
+ go func() {
+ for {
+ signal := <-ch
+ switch signal.Name {
+ case "org.freedesktop.systemd1.Manager.JobRemoved":
+ c.jobComplete(signal)
+
+ unitName := signal.Body[2].(string)
+ var unitPath dbus.ObjectPath
+ c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
+ if unitPath != dbus.ObjectPath("") {
+ c.sendSubStateUpdate(unitPath)
+ }
+ case "org.freedesktop.systemd1.Manager.UnitNew":
+ c.sendSubStateUpdate(signal.Body[1].(dbus.ObjectPath))
+ case "org.freedesktop.DBus.Properties.PropertiesChanged":
+ if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
+ // we only care about SubState updates, which are a Unit property
+ c.sendSubStateUpdate(signal.Path)
+ }
+ }
+ }
+ }()
+}
+
+// Returns two unbuffered channels which will receive all changed units every
+// interval. Deleted units are sent as nil.
+func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
+ return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
+}
+
+// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
+// size of the channels, the comparison function for detecting changes and a filter
+// function for cutting down on the noise that your channel receives.
+func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func (string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
+ old := make(map[string]*UnitStatus)
+ statusChan := make(chan map[string]*UnitStatus, buffer)
+ errChan := make(chan error, buffer)
+
+ go func() {
+ for {
+ timerChan := time.After(interval)
+
+ units, err := c.ListUnits()
+ if err == nil {
+ cur := make(map[string]*UnitStatus)
+ for i := range units {
+ if filterUnit != nil && filterUnit(units[i].Name) {
+ continue
+ }
+ cur[units[i].Name] = &units[i]
+ }
+
+ // add all new or changed units
+ changed := make(map[string]*UnitStatus)
+ for n, u := range cur {
+ if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
+ changed[n] = u
+ }
+ delete(old, n)
+ }
+
+ // add all deleted units
+ for oldN := range old {
+ changed[oldN] = nil
+ }
+
+ old = cur
+
+ if len(changed) != 0 {
+ statusChan <- changed
+ }
+ } else {
+ errChan <- err
+ }
+
+ <-timerChan
+ }
+ }()
+
+ return statusChan, errChan
+}
+
+type SubStateUpdate struct {
+ UnitName string
+ SubState string
+}
+
+// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
+// Although this writes to updateCh on every state change, the reported state
+// may be more recent than the change that generated it (due to an unavoidable
+// race in the systemd dbus interface). That is, this method provides a good
+// way to keep a current view of all units' states, but is not guaranteed to
+// show every state transition they go through. Furthermore, state changes
+// will only be written to the channel with non-blocking writes. If updateCh
+// is full, it attempts to write an error to errCh; if errCh is full, the error
+// passes silently.
+func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
+ c.subscriber.Lock()
+ defer c.subscriber.Unlock()
+ c.subscriber.updateCh = updateCh
+ c.subscriber.errCh = errCh
+}
+
+func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) {
+ c.subscriber.Lock()
+ defer c.subscriber.Unlock()
+ if c.subscriber.updateCh == nil {
+ return
+ }
+
+ if c.shouldIgnore(path) {
+ return
+ }
+
+ info, err := c.GetUnitProperties(string(path))
+ if err != nil {
+ select {
+ case c.subscriber.errCh <- err:
+ default:
+ }
+ }
+
+ name := info["Id"].(string)
+ substate := info["SubState"].(string)
+
+ update := &SubStateUpdate{name, substate}
+ select {
+ case c.subscriber.updateCh <- update:
+ default:
+ select {
+ case c.subscriber.errCh <- errors.New("update channel full!"):
+ default:
+ }
+ }
+
+ c.updateIgnore(path, info)
+}
+
+// The ignore functions work around a wart in the systemd dbus interface.
+// Requesting the properties of an unloaded unit will cause systemd to send a
+// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's
+// properties on UnitNew (as that's the only indication of a new unit coming up
+// for the first time), we would enter an infinite loop if we did not attempt
+// to detect and ignore these spurious signals. The signal themselves are
+// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
+// unloaded unit's signals for a short time after requesting its properties.
+// This means that we will miss e.g. a transient unit being restarted
+// *immediately* upon failure and also a transient unit being started
+// immediately after requesting its status (with systemctl status, for example,
+// because this causes a UnitNew signal to be sent which then causes us to fetch
+// the properties).
+
+func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
+ t, ok := c.subscriber.ignore[path]
+ return ok && t >= time.Now().UnixNano()
+}
+
+func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
+ c.cleanIgnore()
+
+ // unit is unloaded - it will trigger bad systemd dbus behavior
+ if info["LoadState"].(string) == "not-found" {
+ c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
+ }
+}
+
+// without this, ignore would grow unboundedly over time
+func (c *Conn) cleanIgnore() {
+ now := time.Now().UnixNano()
+ if c.subscriber.cleanIgnore < now {
+ c.subscriber.cleanIgnore = now + cleanIgnoreInterval
+
+ for p, t := range c.subscriber.ignore {
+ if t < now {
+ delete(c.subscriber.ignore, p)
+ }
+ }
+ }
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set.go
new file mode 100644
index 0000000000..2625786052
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set.go
@@ -0,0 +1,32 @@
+package dbus
+
+import (
+ "time"
+)
+
+// SubscriptionSet returns a subscription set which is like conn.Subscribe but
+// can filter to only return events for a set of units.
+type SubscriptionSet struct {
+ *set
+ conn *Conn
+}
+
+
+func (s *SubscriptionSet) filter(unit string) bool {
+ return !s.Contains(unit)
+}
+
+// Subscribe starts listening for dbus events for all of the units in the set.
+// Returns channels identical to conn.SubscribeUnits.
+func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
+ // TODO: Make fully evented by using systemd 209 with properties changed values
+ return s.conn.SubscribeUnitsCustom(time.Second, 0,
+ func(u1, u2 *UnitStatus) bool { return *u1 != *u2 },
+ func(unit string) bool { return s.filter(unit) },
+ )
+}
+
+// NewSubscriptionSet returns a new subscription set.
+func (conn *Conn) NewSubscriptionSet() (*SubscriptionSet) {
+ return &SubscriptionSet{newSet(), conn}
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go
new file mode 100644
index 0000000000..db600850c2
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_set_test.go
@@ -0,0 +1,67 @@
+package dbus
+
+import (
+ "testing"
+ "time"
+)
+
+// TestSubscribeUnit exercises the basics of subscription of a particular unit.
+func TestSubscriptionSetUnit(t *testing.T) {
+ target := "subscribe-events-set.service"
+
+ conn, err := New()
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = conn.Subscribe()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ subSet := conn.NewSubscriptionSet()
+ evChan, errChan := subSet.Subscribe()
+
+ subSet.Add(target)
+ setupUnit(target, conn, t)
+
+ job, err := conn.StartUnit(target, "replace")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if job != "done" {
+ t.Fatal("Couldn't start", target)
+ }
+
+ timeout := make(chan bool, 1)
+ go func() {
+ time.Sleep(3 * time.Second)
+ close(timeout)
+ }()
+
+ for {
+ select {
+ case changes := <-evChan:
+ tCh, ok := changes[target]
+
+ if !ok {
+ t.Fatal("Unexpected event %v", changes)
+ }
+
+ if tCh.ActiveState == "active" && tCh.Name == target {
+ goto success
+ }
+ case err = <-errChan:
+ t.Fatal(err)
+ case <-timeout:
+ t.Fatal("Reached timeout")
+ }
+ }
+
+success:
+ return
+}
+
+
diff --git a/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go
new file mode 100644
index 0000000000..6f4d0b32a6
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/dbus/subscription_test.go
@@ -0,0 +1,90 @@
+package dbus
+
+import (
+ "testing"
+ "time"
+)
+
+// TestSubscribe exercises the basics of subscription
+func TestSubscribe(t *testing.T) {
+ conn, err := New()
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = conn.Subscribe()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = conn.Unsubscribe()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// TestSubscribeUnit exercises the basics of subscription of a particular unit.
+func TestSubscribeUnit(t *testing.T) {
+ target := "subscribe-events.service"
+
+ conn, err := New()
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = conn.Subscribe()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = conn.Unsubscribe()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ evChan, errChan := conn.SubscribeUnits(time.Second)
+
+ setupUnit(target, conn, t)
+
+ job, err := conn.StartUnit(target, "replace")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if job != "done" {
+ t.Fatal("Couldn't start", target)
+ }
+
+ timeout := make(chan bool, 1)
+ go func() {
+ time.Sleep(3 * time.Second)
+ close(timeout)
+ }()
+
+ for {
+ select {
+ case changes := <-evChan:
+ tCh, ok := changes[target]
+
+ // Just continue until we see our event.
+ if !ok {
+ continue
+ }
+
+ if tCh.ActiveState == "active" && tCh.Name == target {
+ goto success
+ }
+ case err = <-errChan:
+ t.Fatal(err)
+ case <-timeout:
+ t.Fatal("Reached timeout")
+ }
+ }
+
+success:
+ return
+}
+
+
diff --git a/vendor/src/github.com/coreos/go-systemd/examples/activation/activation.go b/vendor/src/github.com/coreos/go-systemd/examples/activation/activation.go
new file mode 100644
index 0000000000..b3cf70ed84
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/examples/activation/activation.go
@@ -0,0 +1,44 @@
+// Activation example used by the activation unit tests.
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/coreos/go-systemd/activation"
+)
+
+func fixListenPid() {
+ if os.Getenv("FIX_LISTEN_PID") != "" {
+ // HACK: real systemd would set LISTEN_PID before exec'ing but
+ // this is too difficult in golang for the purpose of a test.
+ // Do not do this in real code.
+ os.Setenv("LISTEN_PID", fmt.Sprintf("%d", os.Getpid()))
+ }
+}
+
+func main() {
+ fixListenPid()
+
+ files := activation.Files(false)
+
+ if len(files) == 0 {
+ panic("No files")
+ }
+
+ if os.Getenv("LISTEN_PID") == "" || os.Getenv("LISTEN_FDS") == "" {
+ panic("Should not unset envs")
+ }
+
+ files = activation.Files(true)
+
+ if os.Getenv("LISTEN_PID") != "" || os.Getenv("LISTEN_FDS") != "" {
+ panic("Can not unset envs")
+ }
+
+ // Write out the expected strings to the two pipes
+ files[0].Write([]byte("Hello world"))
+ files[1].Write([]byte("Goodbye world"))
+
+ return
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/README.md b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/README.md
new file mode 100644
index 0000000000..a350cca5e5
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/README.md
@@ -0,0 +1,19 @@
+## socket activated http server
+
+This is a simple example of using socket activation with systemd to serve a
+simple HTTP server on http://127.0.0.1:8076
+
+To try it out `go get` the httpserver and run it under the systemd-activate helper
+
+```
+export GOPATH=`pwd`
+go get github.com/coreos/go-systemd/examples/activation/httpserver
+sudo /usr/lib/systemd/systemd-activate -l 127.0.0.1:8076 ./bin/httpserver
+```
+
+Then curl the URL and you will notice that it starts up:
+
+```
+curl 127.0.0.1:8076
+hello socket activated world!
+```
diff --git a/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service
new file mode 100644
index 0000000000..c8dea0f6b3
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Hello World HTTP
+Requires=network.target
+After=multi-user.target
+
+[Service]
+Type=simple
+ExecStart=/usr/local/bin/httpserver
+
+[Install]
+WantedBy=multi-user.target
diff --git a/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket
new file mode 100644
index 0000000000..723ed7ed92
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/hello.socket
@@ -0,0 +1,5 @@
+[Socket]
+ListenStream=127.0.0.1:8076
+
+[Install]
+WantedBy=sockets.target
diff --git a/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go
new file mode 100644
index 0000000000..380c325d61
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/examples/activation/httpserver/httpserver.go
@@ -0,0 +1,26 @@
+package main
+
+import (
+ "io"
+ "net/http"
+
+ "github.com/coreos/go-systemd/activation"
+)
+
+func HelloServer(w http.ResponseWriter, req *http.Request) {
+ io.WriteString(w, "hello socket activated world!\n")
+}
+
+func main() {
+ listeners, err := activation.Listeners(true)
+ if err != nil {
+ panic(err)
+ }
+
+ if len(listeners) != 1 {
+ panic("Unexpected number of socket activation fds")
+ }
+
+ http.HandleFunc("/", HelloServer)
+ http.Serve(listeners[0], nil)
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/examples/activation/listen.go b/vendor/src/github.com/coreos/go-systemd/examples/activation/listen.go
new file mode 100644
index 0000000000..5850a8b796
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/examples/activation/listen.go
@@ -0,0 +1,50 @@
+// Activation example used by the activation unit tests.
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/coreos/go-systemd/activation"
+)
+
+func fixListenPid() {
+ if os.Getenv("FIX_LISTEN_PID") != "" {
+ // HACK: real systemd would set LISTEN_PID before exec'ing but
+ // this is too difficult in golang for the purpose of a test.
+ // Do not do this in real code.
+ os.Setenv("LISTEN_PID", fmt.Sprintf("%d", os.Getpid()))
+ }
+}
+
+func main() {
+ fixListenPid()
+
+ listeners, _ := activation.Listeners(false)
+
+ if len(listeners) == 0 {
+ panic("No listeners")
+ }
+
+ if os.Getenv("LISTEN_PID") == "" || os.Getenv("LISTEN_FDS") == "" {
+ panic("Should not unset envs")
+ }
+
+ listeners, err := activation.Listeners(true)
+ if err != nil {
+ panic(err)
+ }
+
+ if os.Getenv("LISTEN_PID") != "" || os.Getenv("LISTEN_FDS") != "" {
+ panic("Can not unset envs")
+ }
+
+ c0, _ := listeners[0].Accept()
+ c1, _ := listeners[1].Accept()
+
+ // Write out the expected strings to the two pipes
+ c0.Write([]byte("Hello world"))
+ c1.Write([]byte("Goodbye world"))
+
+ return
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/fixtures/start-stop.service b/vendor/src/github.com/coreos/go-systemd/fixtures/start-stop.service
new file mode 100644
index 0000000000..a1f8c36773
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/fixtures/start-stop.service
@@ -0,0 +1,5 @@
+[Unit]
+Description=start stop test
+
+[Service]
+ExecStart=/bin/sleep 400
diff --git a/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service b/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service
new file mode 100644
index 0000000000..a1f8c36773
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events-set.service
@@ -0,0 +1,5 @@
+[Unit]
+Description=start stop test
+
+[Service]
+ExecStart=/bin/sleep 400
diff --git a/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events.service b/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events.service
new file mode 100644
index 0000000000..a1f8c36773
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/fixtures/subscribe-events.service
@@ -0,0 +1,5 @@
+[Unit]
+Description=start stop test
+
+[Service]
+ExecStart=/bin/sleep 400
diff --git a/vendor/src/github.com/coreos/go-systemd/journal/send.go b/vendor/src/github.com/coreos/go-systemd/journal/send.go
new file mode 100644
index 0000000000..a29bcbf0fa
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/journal/send.go
@@ -0,0 +1,168 @@
+/*
+Copyright 2013 CoreOS Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package journal provides write bindings to the systemd journal
+package journal
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+// Priority of a journal message
+type Priority int
+
+const (
+ PriEmerg Priority = iota
+ PriAlert
+ PriCrit
+ PriErr
+ PriWarning
+ PriNotice
+ PriInfo
+ PriDebug
+)
+
+var conn net.Conn
+
+func init() {
+ var err error
+ conn, err = net.Dial("unixgram", "/run/systemd/journal/socket")
+ if err != nil {
+ conn = nil
+ }
+}
+
+// Enabled returns true iff the systemd journal is available for logging
+func Enabled() bool {
+ return conn != nil
+}
+
+// Send a message to the systemd journal. vars is a map of journald fields to
+// values. Fields must be composed of uppercase letters, numbers, and
+// underscores, but must not start with an underscore. Within these
+// restrictions, any arbitrary field name may be used. Some names have special
+// significance: see the journalctl documentation
+// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
+// for more details. vars may be nil.
+func Send(message string, priority Priority, vars map[string]string) error {
+ if conn == nil {
+ return journalError("could not connect to journald socket")
+ }
+
+ data := new(bytes.Buffer)
+ appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
+ appendVariable(data, "MESSAGE", message)
+ for k, v := range vars {
+ appendVariable(data, k, v)
+ }
+
+ _, err := io.Copy(conn, data)
+ if err != nil && isSocketSpaceError(err) {
+ file, err := tempFd()
+ if err != nil {
+ return journalError(err.Error())
+ }
+ _, err = io.Copy(file, data)
+ if err != nil {
+ return journalError(err.Error())
+ }
+
+ rights := syscall.UnixRights(int(file.Fd()))
+
+ /* this connection should always be a UnixConn, but better safe than sorry */
+ unixConn, ok := conn.(*net.UnixConn)
+ if !ok {
+ return journalError("can't send file through non-Unix connection")
+ }
+ unixConn.WriteMsgUnix([]byte{}, rights, nil)
+ } else if err != nil {
+ return journalError(err.Error())
+ }
+ return nil
+}
+
+func appendVariable(w io.Writer, name, value string) {
+ if !validVarName(name) {
+ journalError("variable name contains invalid character, ignoring")
+ }
+ if strings.ContainsRune(value, '\n') {
+ /* When the value contains a newline, we write:
+ * - the variable name, followed by a newline
+ * - the size (in 64bit little endian format)
+ * - the data, followed by a newline
+ */
+ fmt.Fprintln(w, name)
+ binary.Write(w, binary.LittleEndian, uint64(len(value)))
+ fmt.Fprintln(w, value)
+ } else {
+ /* just write the variable and value all on one line */
+ fmt.Fprintln(w, "%s=%s", name, value)
+ }
+}
+
+func validVarName(name string) bool {
+ /* The variable name must be in uppercase and consist only of characters,
+ * numbers and underscores, and may not begin with an underscore. (from the docs)
+ */
+
+ valid := name[0] != '_'
+ for _, c := range name {
+ valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_'
+ }
+ return valid
+}
+
+func isSocketSpaceError(err error) bool {
+ opErr, ok := err.(*net.OpError)
+ if !ok {
+ return false
+ }
+
+ sysErr, ok := opErr.Err.(syscall.Errno)
+ if !ok {
+ return false
+ }
+
+ return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS
+}
+
+func tempFd() (*os.File, error) {
+ file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
+ if err != nil {
+ return nil, err
+ }
+ syscall.Unlink(file.Name())
+ if err != nil {
+ return nil, err
+ }
+ return file, nil
+}
+
+func journalError(s string) error {
+ s = "journal error: " + s
+ fmt.Fprintln(os.Stderr, s)
+ return errors.New(s)
+}
diff --git a/vendor/src/github.com/coreos/go-systemd/test b/vendor/src/github.com/coreos/go-systemd/test
new file mode 100755
index 0000000000..6e043658ae
--- /dev/null
+++ b/vendor/src/github.com/coreos/go-systemd/test
@@ -0,0 +1,3 @@
+#!/bin/sh -e
+
+go test -v ./...
diff --git a/vendor/src/github.com/godbus/dbus/LICENSE b/vendor/src/github.com/godbus/dbus/LICENSE
new file mode 100644
index 0000000000..06b252bcbc
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013, Georg Reinke (<guelfey at gmail dot com>)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/src/github.com/godbus/dbus/README.markdown b/vendor/src/github.com/godbus/dbus/README.markdown
new file mode 100644
index 0000000000..3ab2116651
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/README.markdown
@@ -0,0 +1,38 @@
+dbus
+----
+
+dbus is a simple library that implements native Go client bindings for the
+D-Bus message bus system.
+
+### Features
+
+* Complete native implementation of the D-Bus message protocol
+* Go-like API (channels for signals / asynchronous method calls, Goroutine-safe connections)
+* Subpackages that help with the introspection / property interfaces
+
+### Installation
+
+This packages requires Go 1.1. If you installed it and set up your GOPATH, just run:
+
+```
+go get github.com/godbus/dbus
+```
+
+If you want to use the subpackages, you can install them the same way.
+
+### Usage
+
+The complete package documentation and some simple examples are available at
+[godoc.org](http://godoc.org/github.com/godbus/dbus). Also, the
+[_examples](https://github.com/godbus/dbus/tree/master/_examples) directory
+gives a short overview over the basic usage.
+
+Please note that the API is considered unstable for now and may change without
+further notice.
+
+### License
+
+go.dbus is available under the Simplified BSD License; see LICENSE for the full
+text.
+
+Nearly all of the credit for this library goes to github.com/guelfey/go.dbus.
diff --git a/vendor/src/github.com/godbus/dbus/_examples/eavesdrop.go b/vendor/src/github.com/godbus/dbus/_examples/eavesdrop.go
new file mode 100644
index 0000000000..11deef3cf8
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/_examples/eavesdrop.go
@@ -0,0 +1,30 @@
+package main
+
+import (
+ "fmt"
+ "github.com/godbus/dbus"
+ "os"
+)
+
+func main() {
+ conn, err := dbus.SessionBus()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "Failed to connect to session bus:", err)
+ os.Exit(1)
+ }
+
+ for _, v := range []string{"method_call", "method_return", "error", "signal"} {
+ call := conn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "eavesdrop='true',type='"+v+"'")
+ if call.Err != nil {
+ fmt.Fprintln(os.Stderr, "Failed to add match:", call.Err)
+ os.Exit(1)
+ }
+ }
+ c := make(chan *dbus.Message, 10)
+ conn.Eavesdrop(c)
+ fmt.Println("Listening for everything")
+ for v := range c {
+ fmt.Println(v)
+ }
+}
diff --git a/vendor/src/github.com/godbus/dbus/_examples/introspect.go b/vendor/src/github.com/godbus/dbus/_examples/introspect.go
new file mode 100644
index 0000000000..a2af4e5f24
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/_examples/introspect.go
@@ -0,0 +1,21 @@
+package main
+
+import (
+ "encoding/json"
+ "github.com/godbus/dbus"
+ "github.com/godbus/dbus/introspect"
+ "os"
+)
+
+func main() {
+ conn, err := dbus.SessionBus()
+ if err != nil {
+ panic(err)
+ }
+ node, err := introspect.Call(conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus"))
+ if err != nil {
+ panic(err)
+ }
+ data, _ := json.MarshalIndent(node, "", " ")
+ os.Stdout.Write(data)
+}
diff --git a/vendor/src/github.com/godbus/dbus/_examples/list-names.go b/vendor/src/github.com/godbus/dbus/_examples/list-names.go
new file mode 100644
index 0000000000..ce1f7ec52e
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/_examples/list-names.go
@@ -0,0 +1,27 @@
+package main
+
+import (
+ "fmt"
+ "github.com/godbus/dbus"
+ "os"
+)
+
+func main() {
+ conn, err := dbus.SessionBus()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "Failed to connect to session bus:", err)
+ os.Exit(1)
+ }
+
+ var s []string
+ err = conn.BusObject().Call("org.freedesktop.DBus.ListNames", 0).Store(&s)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "Failed to get list of owned names:", err)
+ os.Exit(1)
+ }
+
+ fmt.Println("Currently owned names on the session bus:")
+ for _, v := range s {
+ fmt.Println(v)
+ }
+}
diff --git a/vendor/src/github.com/godbus/dbus/_examples/notification.go b/vendor/src/github.com/godbus/dbus/_examples/notification.go
new file mode 100644
index 0000000000..5fe11d04c4
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/_examples/notification.go
@@ -0,0 +1,17 @@
+package main
+
+import "github.com/godbus/dbus"
+
+func main() {
+ conn, err := dbus.SessionBus()
+ if err != nil {
+ panic(err)
+ }
+ obj := conn.Object("org.freedesktop.Notifications", "/org/freedesktop/Notifications")
+ call := obj.Call("org.freedesktop.Notifications.Notify", 0, "", uint32(0),
+ "", "Test", "This is a test of the DBus bindings for go.", []string{},
+ map[string]dbus.Variant{}, int32(5000))
+ if call.Err != nil {
+ panic(call.Err)
+ }
+}
diff --git a/vendor/src/github.com/godbus/dbus/_examples/prop.go b/vendor/src/github.com/godbus/dbus/_examples/prop.go
new file mode 100644
index 0000000000..e3408c53e9
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/_examples/prop.go
@@ -0,0 +1,68 @@
+package main
+
+import (
+ "fmt"
+ "github.com/godbus/dbus"
+ "github.com/godbus/dbus/introspect"
+ "github.com/godbus/dbus/prop"
+ "os"
+)
+
+type foo string
+
+func (f foo) Foo() (string, *dbus.Error) {
+ fmt.Println(f)
+ return string(f), nil
+}
+
+func main() {
+ conn, err := dbus.SessionBus()
+ if err != nil {
+ panic(err)
+ }
+ reply, err := conn.RequestName("com.github.guelfey.Demo",
+ dbus.NameFlagDoNotQueue)
+ if err != nil {
+ panic(err)
+ }
+ if reply != dbus.RequestNameReplyPrimaryOwner {
+ fmt.Fprintln(os.Stderr, "name already taken")
+ os.Exit(1)
+ }
+ propsSpec := map[string]map[string]*prop.Prop{
+ "com.github.guelfey.Demo": {
+ "SomeInt": {
+ int32(0),
+ true,
+ prop.EmitTrue,
+ func(c *prop.Change) *dbus.Error {
+ fmt.Println(c.Name, "changed to", c.Value)
+ return nil
+ },
+ },
+ },
+ }
+ f := foo("Bar")
+ conn.Export(f, "/com/github/guelfey/Demo", "com.github.guelfey.Demo")
+ props := prop.New(conn, "/com/github/guelfey/Demo", propsSpec)
+ n := &introspect.Node{
+ Name: "/com/github/guelfey/Demo",
+ Interfaces: []introspect.Interface{
+ introspect.IntrospectData,
+ prop.IntrospectData,
+ {
+ Name: "com.github.guelfey.Demo",
+ Methods: introspect.Methods(f),
+ Properties: props.Introspection("com.github.guelfey.Demo"),
+ },
+ },
+ }
+ conn.Export(introspect.NewIntrospectable(n), "/com/github/guelfey/Demo",
+ "org.freedesktop.DBus.Introspectable")
+ fmt.Println("Listening on com.github.guelfey.Demo / /com/github/guelfey/Demo ...")
+
+ c := make(chan *dbus.Signal)
+ conn.Signal(c)
+ for _ = range c {
+ }
+}
diff --git a/vendor/src/github.com/godbus/dbus/_examples/server.go b/vendor/src/github.com/godbus/dbus/_examples/server.go
new file mode 100644
index 0000000000..32b7b291c7
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/_examples/server.go
@@ -0,0 +1,45 @@
+package main
+
+import (
+ "fmt"
+ "github.com/godbus/dbus"
+ "github.com/godbus/dbus/introspect"
+ "os"
+)
+
+const intro = `
+<node>
+ <interface name="com.github.guelfey.Demo">
+ <method name="Foo">
+ <arg direction="out" type="s"/>
+ </method>
+ </interface>` + introspect.IntrospectDataString + `</node> `
+
+type foo string
+
+func (f foo) Foo() (string, *dbus.Error) {
+ fmt.Println(f)
+ return string(f), nil
+}
+
+func main() {
+ conn, err := dbus.SessionBus()
+ if err != nil {
+ panic(err)
+ }
+ reply, err := conn.RequestName("com.github.guelfey.Demo",
+ dbus.NameFlagDoNotQueue)
+ if err != nil {
+ panic(err)
+ }
+ if reply != dbus.RequestNameReplyPrimaryOwner {
+ fmt.Fprintln(os.Stderr, "name already taken")
+ os.Exit(1)
+ }
+ f := foo("Bar!")
+ conn.Export(f, "/com/github/guelfey/Demo", "com.github.guelfey.Demo")
+ conn.Export(introspect.Introspectable(intro), "/com/github/guelfey/Demo",
+ "org.freedesktop.DBus.Introspectable")
+ fmt.Println("Listening on com.github.guelfey.Demo / /com/github/guelfey/Demo ...")
+ select {}
+}
diff --git a/vendor/src/github.com/godbus/dbus/_examples/signal.go b/vendor/src/github.com/godbus/dbus/_examples/signal.go
new file mode 100644
index 0000000000..8f3f809759
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/_examples/signal.go
@@ -0,0 +1,24 @@
+package main
+
+import (
+ "fmt"
+ "github.com/godbus/dbus"
+ "os"
+)
+
+func main() {
+ conn, err := dbus.SessionBus()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "Failed to connect to session bus:", err)
+ os.Exit(1)
+ }
+
+ conn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+ "type='signal',path='/org/freedesktop/DBus',interface='org.freedesktop.DBus',sender='org.freedesktop.DBus'")
+
+ c := make(chan *dbus.Signal, 10)
+ conn.Signal(c)
+ for v := range c {
+ fmt.Println(v)
+ }
+}
diff --git a/vendor/src/github.com/godbus/dbus/auth.go b/vendor/src/github.com/godbus/dbus/auth.go
new file mode 100644
index 0000000000..98017b693e
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/auth.go
@@ -0,0 +1,253 @@
+package dbus
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+)
+
+// AuthStatus represents the Status of an authentication mechanism.
+type AuthStatus byte
+
+const (
+ // AuthOk signals that authentication is finished; the next command
+ // from the server should be an OK.
+ AuthOk AuthStatus = iota
+
+ // AuthContinue signals that additional data is needed; the next command
+ // from the server should be a DATA.
+ AuthContinue
+
+ // AuthError signals an error; the server sent invalid data or some
+ // other unexpected thing happened and the current authentication
+ // process should be aborted.
+ AuthError
+)
+
+type authState byte
+
+const (
+ waitingForData authState = iota
+ waitingForOk
+ waitingForReject
+)
+
+// Auth defines the behaviour of an authentication mechanism.
+type Auth interface {
+ // Return the name of the mechnism, the argument to the first AUTH command
+ // and the next status.
+ FirstData() (name, resp []byte, status AuthStatus)
+
+ // Process the given DATA command, and return the argument to the DATA
+ // command and the next status. If len(resp) == 0, no DATA command is sent.
+ HandleData(data []byte) (resp []byte, status AuthStatus)
+}
+
+// Auth authenticates the connection, trying the given list of authentication
+// mechanisms (in that order). If nil is passed, the EXTERNAL and
+// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private
+// connections, this method must be called before sending any messages to the
+// bus. Auth must not be called on shared connections.
+func (conn *Conn) Auth(methods []Auth) error {
+ if methods == nil {
+ uid := strconv.Itoa(os.Getuid())
+ methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())}
+ }
+ in := bufio.NewReader(conn.transport)
+ err := conn.transport.SendNullByte()
+ if err != nil {
+ return err
+ }
+ err = authWriteLine(conn.transport, []byte("AUTH"))
+ if err != nil {
+ return err
+ }
+ s, err := authReadLine(in)
+ if err != nil {
+ return err
+ }
+ if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) {
+ return errors.New("dbus: authentication protocol error")
+ }
+ s = s[1:]
+ for _, v := range s {
+ for _, m := range methods {
+ if name, data, status := m.FirstData(); bytes.Equal(v, name) {
+ var ok bool
+ err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data)
+ if err != nil {
+ return err
+ }
+ switch status {
+ case AuthOk:
+ err, ok = conn.tryAuth(m, waitingForOk, in)
+ case AuthContinue:
+ err, ok = conn.tryAuth(m, waitingForData, in)
+ default:
+ panic("dbus: invalid authentication status")
+ }
+ if err != nil {
+ return err
+ }
+ if ok {
+ if conn.transport.SupportsUnixFDs() {
+ err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD"))
+ if err != nil {
+ return err
+ }
+ line, err := authReadLine(in)
+ if err != nil {
+ return err
+ }
+ switch {
+ case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")):
+ conn.EnableUnixFDs()
+ conn.unixFD = true
+ case bytes.Equal(line[0], []byte("ERROR")):
+ default:
+ return errors.New("dbus: authentication protocol error")
+ }
+ }
+ err = authWriteLine(conn.transport, []byte("BEGIN"))
+ if err != nil {
+ return err
+ }
+ go conn.inWorker()
+ go conn.outWorker()
+ return nil
+ }
+ }
+ }
+ }
+ return errors.New("dbus: authentication failed")
+}
+
+// tryAuth tries to authenticate with m as the mechanism, using state as the
+// initial authState and in for reading input. It returns (nil, true) on
+// success, (nil, false) on a REJECTED and (someErr, false) if some other
+// error occured.
+func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) {
+ for {
+ s, err := authReadLine(in)
+ if err != nil {
+ return err, false
+ }
+ switch {
+ case state == waitingForData && string(s[0]) == "DATA":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ continue
+ }
+ data, status := m.HandleData(s[1])
+ switch status {
+ case AuthOk, AuthContinue:
+ if len(data) != 0 {
+ err = authWriteLine(conn.transport, []byte("DATA"), data)
+ if err != nil {
+ return err, false
+ }
+ }
+ if status == AuthOk {
+ state = waitingForOk
+ }
+ case AuthError:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ }
+ case state == waitingForData && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForData && string(s[0]) == "ERROR":
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ case state == waitingForData && string(s[0]) == "OK":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ }
+ conn.uuid = string(s[1])
+ return nil, true
+ case state == waitingForData:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ case state == waitingForOk && string(s[0]) == "OK":
+ if len(s) != 2 {
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ }
+ conn.uuid = string(s[1])
+ return nil, true
+ case state == waitingForOk && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForOk && (string(s[0]) == "DATA" ||
+ string(s[0]) == "ERROR"):
+
+ err = authWriteLine(conn.transport, []byte("CANCEL"))
+ if err != nil {
+ return err, false
+ }
+ state = waitingForReject
+ case state == waitingForOk:
+ err = authWriteLine(conn.transport, []byte("ERROR"))
+ if err != nil {
+ return err, false
+ }
+ case state == waitingForReject && string(s[0]) == "REJECTED":
+ return nil, false
+ case state == waitingForReject:
+ return errors.New("dbus: authentication protocol error"), false
+ default:
+ panic("dbus: invalid auth state")
+ }
+ }
+}
+
+// authReadLine reads a line and separates it into its fields.
+func authReadLine(in *bufio.Reader) ([][]byte, error) {
+ data, err := in.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+ data = bytes.TrimSuffix(data, []byte("\r\n"))
+ return bytes.Split(data, []byte{' '}), nil
+}
+
+// authWriteLine writes the given line in the authentication protocol format
+// (elements of data separated by a " " and terminated by "\r\n").
+func authWriteLine(out io.Writer, data ...[]byte) error {
+ buf := make([]byte, 0)
+ for i, v := range data {
+ buf = append(buf, v...)
+ if i != len(data)-1 {
+ buf = append(buf, ' ')
+ }
+ }
+ buf = append(buf, '\r')
+ buf = append(buf, '\n')
+ n, err := out.Write(buf)
+ if err != nil {
+ return err
+ }
+ if n != len(buf) {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/godbus/dbus/auth_external.go b/vendor/src/github.com/godbus/dbus/auth_external.go
new file mode 100644
index 0000000000..7e376d3ef6
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/auth_external.go
@@ -0,0 +1,26 @@
+package dbus
+
+import (
+ "encoding/hex"
+)
+
+// AuthExternal returns an Auth that authenticates as the given user with the
+// EXTERNAL mechanism.
+func AuthExternal(user string) Auth {
+ return authExternal{user}
+}
+
+// AuthExternal implements the EXTERNAL authentication mechanism.
+type authExternal struct {
+ user string
+}
+
+func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) {
+ b := make([]byte, 2*len(a.user))
+ hex.Encode(b, []byte(a.user))
+ return []byte("EXTERNAL"), b, AuthOk
+}
+
+func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) {
+ return nil, AuthError
+}
diff --git a/vendor/src/github.com/godbus/dbus/auth_sha1.go b/vendor/src/github.com/godbus/dbus/auth_sha1.go
new file mode 100644
index 0000000000..df15b46119
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/auth_sha1.go
@@ -0,0 +1,102 @@
+package dbus
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/hex"
+ "os"
+)
+
+// AuthCookieSha1 returns an Auth that authenticates as the given user with the
+// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home
+// directory of the user.
+func AuthCookieSha1(user, home string) Auth {
+ return authCookieSha1{user, home}
+}
+
+type authCookieSha1 struct {
+ user, home string
+}
+
+func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) {
+ b := make([]byte, 2*len(a.user))
+ hex.Encode(b, []byte(a.user))
+ return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue
+}
+
+func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) {
+ challenge := make([]byte, len(data)/2)
+ _, err := hex.Decode(challenge, data)
+ if err != nil {
+ return nil, AuthError
+ }
+ b := bytes.Split(challenge, []byte{' '})
+ if len(b) != 3 {
+ return nil, AuthError
+ }
+ context := b[0]
+ id := b[1]
+ svchallenge := b[2]
+ cookie := a.getCookie(context, id)
+ if cookie == nil {
+ return nil, AuthError
+ }
+ clchallenge := a.generateChallenge()
+ if clchallenge == nil {
+ return nil, AuthError
+ }
+ hash := sha1.New()
+ hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'}))
+ hexhash := make([]byte, 2*hash.Size())
+ hex.Encode(hexhash, hash.Sum(nil))
+ data = append(clchallenge, ' ')
+ data = append(data, hexhash...)
+ resp := make([]byte, 2*len(data))
+ hex.Encode(resp, data)
+ return resp, AuthOk
+}
+
+// getCookie searches for the cookie identified by id in context and returns
+// the cookie content or nil. (Since HandleData can't return a specific error,
+// but only whether an error occured, this function also doesn't bother to
+// return an error.)
+func (a authCookieSha1) getCookie(context, id []byte) []byte {
+ file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context))
+ if err != nil {
+ return nil
+ }
+ defer file.Close()
+ rd := bufio.NewReader(file)
+ for {
+ line, err := rd.ReadBytes('\n')
+ if err != nil {
+ return nil
+ }
+ line = line[:len(line)-1]
+ b := bytes.Split(line, []byte{' '})
+ if len(b) != 3 {
+ return nil
+ }
+ if bytes.Equal(b[0], id) {
+ return b[2]
+ }
+ }
+}
+
+// generateChallenge returns a random, hex-encoded challenge, or nil on error
+// (see above).
+func (a authCookieSha1) generateChallenge() []byte {
+ b := make([]byte, 16)
+ n, err := rand.Read(b)
+ if err != nil {
+ return nil
+ }
+ if n != 16 {
+ return nil
+ }
+ enc := make([]byte, 32)
+ hex.Encode(enc, b)
+ return enc
+}
diff --git a/vendor/src/github.com/godbus/dbus/call.go b/vendor/src/github.com/godbus/dbus/call.go
new file mode 100644
index 0000000000..1d2fbc7efd
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/call.go
@@ -0,0 +1,147 @@
+package dbus
+
+import (
+ "errors"
+ "strings"
+)
+
+// Call represents a pending or completed method call.
+type Call struct {
+ Destination string
+ Path ObjectPath
+ Method string
+ Args []interface{}
+
+ // Strobes when the call is complete.
+ Done chan *Call
+
+ // After completion, the error status. If this is non-nil, it may be an
+ // error message from the peer (with Error as its type) or some other error.
+ Err error
+
+ // Holds the response once the call is done.
+ Body []interface{}
+}
+
+var errSignature = errors.New("dbus: mismatched signature")
+
+// Store stores the body of the reply into the provided pointers. It returns
+// an error if the signatures of the body and retvalues don't match, or if
+// the error status is not nil.
+func (c *Call) Store(retvalues ...interface{}) error {
+ if c.Err != nil {
+ return c.Err
+ }
+
+ return Store(c.Body, retvalues...)
+}
+
+// Object represents a remote object on which methods can be invoked.
+type Object struct {
+ conn *Conn
+ dest string
+ path ObjectPath
+}
+
+// Call calls a method with (*Object).Go and waits for its reply.
+func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call {
+ return <-o.Go(method, flags, make(chan *Call, 1), args...).Done
+}
+
+// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given
+// object. The property name must be given in interface.member notation.
+func (o *Object) GetProperty(p string) (Variant, error) {
+ idx := strings.LastIndex(p, ".")
+ if idx == -1 || idx+1 == len(p) {
+ return Variant{}, errors.New("dbus: invalid property " + p)
+ }
+
+ iface := p[:idx]
+ prop := p[idx+1:]
+
+ result := Variant{}
+ err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result)
+
+ if err != nil {
+ return Variant{}, err
+ }
+
+ return result, nil
+}
+
+// Go calls a method with the given arguments asynchronously. It returns a
+// Call structure representing this method call. The passed channel will
+// return the same value once the call is done. If ch is nil, a new channel
+// will be allocated. Otherwise, ch has to be buffered or Go will panic.
+//
+// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure
+// is returned of which only the Err member is valid.
+//
+// If the method parameter contains a dot ('.'), the part before the last dot
+// specifies the interface on which the method is called.
+func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call {
+ iface := ""
+ i := strings.LastIndex(method, ".")
+ if i != -1 {
+ iface = method[:i]
+ }
+ method = method[i+1:]
+ msg := new(Message)
+ msg.Type = TypeMethodCall
+ msg.serial = o.conn.getSerial()
+ msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected)
+ msg.Headers = make(map[HeaderField]Variant)
+ msg.Headers[FieldPath] = MakeVariant(o.path)
+ msg.Headers[FieldDestination] = MakeVariant(o.dest)
+ msg.Headers[FieldMember] = MakeVariant(method)
+ if iface != "" {
+ msg.Headers[FieldInterface] = MakeVariant(iface)
+ }
+ msg.Body = args
+ if len(args) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...))
+ }
+ if msg.Flags&FlagNoReplyExpected == 0 {
+ if ch == nil {
+ ch = make(chan *Call, 10)
+ } else if cap(ch) == 0 {
+ panic("dbus: unbuffered channel passed to (*Object).Go")
+ }
+ call := &Call{
+ Destination: o.dest,
+ Path: o.path,
+ Method: method,
+ Args: args,
+ Done: ch,
+ }
+ o.conn.callsLck.Lock()
+ o.conn.calls[msg.serial] = call
+ o.conn.callsLck.Unlock()
+ o.conn.outLck.RLock()
+ if o.conn.closed {
+ call.Err = ErrClosed
+ call.Done <- call
+ } else {
+ o.conn.out <- msg
+ }
+ o.conn.outLck.RUnlock()
+ return call
+ }
+ o.conn.outLck.RLock()
+ defer o.conn.outLck.RUnlock()
+ if o.conn.closed {
+ return &Call{Err: ErrClosed}
+ }
+ o.conn.out <- msg
+ return &Call{Err: nil}
+}
+
+// Destination returns the destination that calls on o are sent to.
+func (o *Object) Destination() string {
+ return o.dest
+}
+
+// Path returns the path that calls on o are sent to.
+func (o *Object) Path() ObjectPath {
+ return o.path
+}
diff --git a/vendor/src/github.com/godbus/dbus/conn.go b/vendor/src/github.com/godbus/dbus/conn.go
new file mode 100644
index 0000000000..75dd22652a
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/conn.go
@@ -0,0 +1,601 @@
+package dbus
+
+import (
+ "errors"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket"
+
+var (
+ systemBus *Conn
+ systemBusLck sync.Mutex
+ sessionBus *Conn
+ sessionBusLck sync.Mutex
+)
+
+// ErrClosed is the error returned by calls on a closed connection.
+var ErrClosed = errors.New("dbus: connection closed by user")
+
+// Conn represents a connection to a message bus (usually, the system or
+// session bus).
+//
+// Connections are either shared or private. Shared connections
+// are shared between calls to the functions that return them. As a result,
+// the methods Close, Auth and Hello must not be called on them.
+//
+// Multiple goroutines may invoke methods on a connection simultaneously.
+type Conn struct {
+ transport
+
+ busObj *Object
+ unixFD bool
+ uuid string
+
+ names []string
+ namesLck sync.RWMutex
+
+ serialLck sync.Mutex
+ nextSerial uint32
+ serialUsed map[uint32]bool
+
+ calls map[uint32]*Call
+ callsLck sync.RWMutex
+
+ handlers map[ObjectPath]map[string]interface{}
+ handlersLck sync.RWMutex
+
+ out chan *Message
+ closed bool
+ outLck sync.RWMutex
+
+ signals []chan<- *Signal
+ signalsLck sync.Mutex
+
+ eavesdropped chan<- *Message
+ eavesdroppedLck sync.Mutex
+}
+
+// SessionBus returns a shared connection to the session bus, connecting to it
+// if not already done.
+func SessionBus() (conn *Conn, err error) {
+ sessionBusLck.Lock()
+ defer sessionBusLck.Unlock()
+ if sessionBus != nil {
+ return sessionBus, nil
+ }
+ defer func() {
+ if conn != nil {
+ sessionBus = conn
+ }
+ }()
+ conn, err = SessionBusPrivate()
+ if err != nil {
+ return
+ }
+ if err = conn.Auth(nil); err != nil {
+ conn.Close()
+ conn = nil
+ return
+ }
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ conn = nil
+ }
+ return
+}
+
+// SessionBusPrivate returns a new private connection to the session bus.
+func SessionBusPrivate() (*Conn, error) {
+ address := os.Getenv("DBUS_SESSION_BUS_ADDRESS")
+ if address != "" && address != "autolaunch:" {
+ return Dial(address)
+ }
+
+ return sessionBusPlatform()
+}
+
+// SystemBus returns a shared connection to the system bus, connecting to it if
+// not already done.
+func SystemBus() (conn *Conn, err error) {
+ systemBusLck.Lock()
+ defer systemBusLck.Unlock()
+ if systemBus != nil {
+ return systemBus, nil
+ }
+ defer func() {
+ if conn != nil {
+ systemBus = conn
+ }
+ }()
+ conn, err = SystemBusPrivate()
+ if err != nil {
+ return
+ }
+ if err = conn.Auth(nil); err != nil {
+ conn.Close()
+ conn = nil
+ return
+ }
+ if err = conn.Hello(); err != nil {
+ conn.Close()
+ conn = nil
+ }
+ return
+}
+
+// SystemBusPrivate returns a new private connection to the system bus.
+func SystemBusPrivate() (*Conn, error) {
+ address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
+ if address != "" {
+ return Dial(address)
+ }
+ return Dial(defaultSystemBusAddress)
+}
+
+// Dial establishes a new private connection to the message bus specified by address.
+func Dial(address string) (*Conn, error) {
+ tr, err := getTransport(address)
+ if err != nil {
+ return nil, err
+ }
+ return newConn(tr)
+}
+
+// NewConn creates a new private *Conn from an already established connection.
+func NewConn(conn io.ReadWriteCloser) (*Conn, error) {
+ return newConn(genericTransport{conn})
+}
+
+// newConn creates a new *Conn from a transport.
+func newConn(tr transport) (*Conn, error) {
+ conn := new(Conn)
+ conn.transport = tr
+ conn.calls = make(map[uint32]*Call)
+ conn.out = make(chan *Message, 10)
+ conn.handlers = make(map[ObjectPath]map[string]interface{})
+ conn.nextSerial = 1
+ conn.serialUsed = map[uint32]bool{0: true}
+ conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus")
+ return conn, nil
+}
+
+// BusObject returns the object owned by the bus daemon which handles
+// administrative requests.
+func (conn *Conn) BusObject() *Object {
+ return conn.busObj
+}
+
+// Close closes the connection. Any blocked operations will return with errors
+// and the channels passed to Eavesdrop and Signal are closed. This method must
+// not be called on shared connections.
+func (conn *Conn) Close() error {
+ conn.outLck.Lock()
+ close(conn.out)
+ conn.closed = true
+ conn.outLck.Unlock()
+ conn.signalsLck.Lock()
+ for _, ch := range conn.signals {
+ close(ch)
+ }
+ conn.signalsLck.Unlock()
+ conn.eavesdroppedLck.Lock()
+ if conn.eavesdropped != nil {
+ close(conn.eavesdropped)
+ }
+ conn.eavesdroppedLck.Unlock()
+ return conn.transport.Close()
+}
+
+// Eavesdrop causes conn to send all incoming messages to the given channel
+// without further processing. Method replies, errors and signals will not be
+// sent to the appropiate channels and method calls will not be handled. If nil
+// is passed, the normal behaviour is restored.
+//
+// The caller has to make sure that ch is sufficiently buffered;
+// if a message arrives when a write to ch is not possible, the message is
+// discarded.
+func (conn *Conn) Eavesdrop(ch chan<- *Message) {
+ conn.eavesdroppedLck.Lock()
+ conn.eavesdropped = ch
+ conn.eavesdroppedLck.Unlock()
+}
+
+// getSerial returns an unused serial.
+func (conn *Conn) getSerial() uint32 {
+ conn.serialLck.Lock()
+ defer conn.serialLck.Unlock()
+ n := conn.nextSerial
+ for conn.serialUsed[n] {
+ n++
+ }
+ conn.serialUsed[n] = true
+ conn.nextSerial = n + 1
+ return n
+}
+
+// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be
+// called after authentication, but before sending any other messages to the
+// bus. Hello must not be called for shared connections.
+func (conn *Conn) Hello() error {
+ var s string
+ err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s)
+ if err != nil {
+ return err
+ }
+ conn.namesLck.Lock()
+ conn.names = make([]string, 1)
+ conn.names[0] = s
+ conn.namesLck.Unlock()
+ return nil
+}
+
+// inWorker runs in an own goroutine, reading incoming messages from the
+// transport and dispatching them appropiately.
+func (conn *Conn) inWorker() {
+ for {
+ msg, err := conn.ReadMessage()
+ if err == nil {
+ conn.eavesdroppedLck.Lock()
+ if conn.eavesdropped != nil {
+ select {
+ case conn.eavesdropped <- msg:
+ default:
+ }
+ conn.eavesdroppedLck.Unlock()
+ continue
+ }
+ conn.eavesdroppedLck.Unlock()
+ dest, _ := msg.Headers[FieldDestination].value.(string)
+ found := false
+ if dest == "" {
+ found = true
+ } else {
+ conn.namesLck.RLock()
+ if len(conn.names) == 0 {
+ found = true
+ }
+ for _, v := range conn.names {
+ if dest == v {
+ found = true
+ break
+ }
+ }
+ conn.namesLck.RUnlock()
+ }
+ if !found {
+ // Eavesdropped a message, but no channel for it is registered.
+ // Ignore it.
+ continue
+ }
+ switch msg.Type {
+ case TypeMethodReply, TypeError:
+ serial := msg.Headers[FieldReplySerial].value.(uint32)
+ conn.callsLck.Lock()
+ if c, ok := conn.calls[serial]; ok {
+ if msg.Type == TypeError {
+ name, _ := msg.Headers[FieldErrorName].value.(string)
+ c.Err = Error{name, msg.Body}
+ } else {
+ c.Body = msg.Body
+ }
+ c.Done <- c
+ conn.serialLck.Lock()
+ delete(conn.serialUsed, serial)
+ conn.serialLck.Unlock()
+ delete(conn.calls, serial)
+ }
+ conn.callsLck.Unlock()
+ case TypeSignal:
+ iface := msg.Headers[FieldInterface].value.(string)
+ member := msg.Headers[FieldMember].value.(string)
+ // as per http://dbus.freedesktop.org/doc/dbus-specification.html ,
+ // sender is optional for signals.
+ sender, _ := msg.Headers[FieldSender].value.(string)
+ if iface == "org.freedesktop.DBus" && member == "NameLost" &&
+ sender == "org.freedesktop.DBus" {
+
+ name, _ := msg.Body[0].(string)
+ conn.namesLck.Lock()
+ for i, v := range conn.names {
+ if v == name {
+ copy(conn.names[i:], conn.names[i+1:])
+ conn.names = conn.names[:len(conn.names)-1]
+ }
+ }
+ conn.namesLck.Unlock()
+ }
+ signal := &Signal{
+ Sender: sender,
+ Path: msg.Headers[FieldPath].value.(ObjectPath),
+ Name: iface + "." + member,
+ Body: msg.Body,
+ }
+ conn.signalsLck.Lock()
+ for _, ch := range conn.signals {
+ // don't block trying to send a signal
+ select {
+ case ch <- signal:
+ default:
+ }
+ }
+ conn.signalsLck.Unlock()
+ case TypeMethodCall:
+ go conn.handleCall(msg)
+ }
+ } else if _, ok := err.(InvalidMessageError); !ok {
+ // Some read error occured (usually EOF); we can't really do
+ // anything but to shut down all stuff and returns errors to all
+ // pending replies.
+ conn.Close()
+ conn.callsLck.RLock()
+ for _, v := range conn.calls {
+ v.Err = err
+ v.Done <- v
+ }
+ conn.callsLck.RUnlock()
+ return
+ }
+ // invalid messages are ignored
+ }
+}
+
+// Names returns the list of all names that are currently owned by this
+// connection. The slice is always at least one element long, the first element
+// being the unique name of the connection.
+func (conn *Conn) Names() []string {
+ conn.namesLck.RLock()
+ // copy the slice so it can't be modified
+ s := make([]string, len(conn.names))
+ copy(s, conn.names)
+ conn.namesLck.RUnlock()
+ return s
+}
+
+// Object returns the object identified by the given destination name and path.
+func (conn *Conn) Object(dest string, path ObjectPath) *Object {
+ return &Object{conn, dest, path}
+}
+
+// outWorker runs in an own goroutine, encoding and sending messages that are
+// sent to conn.out.
+func (conn *Conn) outWorker() {
+ for msg := range conn.out {
+ err := conn.SendMessage(msg)
+ conn.callsLck.RLock()
+ if err != nil {
+ if c := conn.calls[msg.serial]; c != nil {
+ c.Err = err
+ c.Done <- c
+ }
+ conn.serialLck.Lock()
+ delete(conn.serialUsed, msg.serial)
+ conn.serialLck.Unlock()
+ } else if msg.Type != TypeMethodCall {
+ conn.serialLck.Lock()
+ delete(conn.serialUsed, msg.serial)
+ conn.serialLck.Unlock()
+ }
+ conn.callsLck.RUnlock()
+ }
+}
+
+// Send sends the given message to the message bus. You usually don't need to
+// use this; use the higher-level equivalents (Call / Go, Emit and Export)
+// instead. If msg is a method call and NoReplyExpected is not set, a non-nil
+// call is returned and the same value is sent to ch (which must be buffered)
+// once the call is complete. Otherwise, ch is ignored and a Call structure is
+// returned of which only the Err member is valid.
+func (conn *Conn) Send(msg *Message, ch chan *Call) *Call {
+ var call *Call
+
+ msg.serial = conn.getSerial()
+ if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 {
+ if ch == nil {
+ ch = make(chan *Call, 5)
+ } else if cap(ch) == 0 {
+ panic("dbus: unbuffered channel passed to (*Conn).Send")
+ }
+ call = new(Call)
+ call.Destination, _ = msg.Headers[FieldDestination].value.(string)
+ call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath)
+ iface, _ := msg.Headers[FieldInterface].value.(string)
+ member, _ := msg.Headers[FieldMember].value.(string)
+ call.Method = iface + "." + member
+ call.Args = msg.Body
+ call.Done = ch
+ conn.callsLck.Lock()
+ conn.calls[msg.serial] = call
+ conn.callsLck.Unlock()
+ conn.outLck.RLock()
+ if conn.closed {
+ call.Err = ErrClosed
+ call.Done <- call
+ } else {
+ conn.out <- msg
+ }
+ conn.outLck.RUnlock()
+ } else {
+ conn.outLck.RLock()
+ if conn.closed {
+ call = &Call{Err: ErrClosed}
+ } else {
+ conn.out <- msg
+ call = &Call{Err: nil}
+ }
+ conn.outLck.RUnlock()
+ }
+ return call
+}
+
+// sendError creates an error message corresponding to the parameters and sends
+// it to conn.out.
+func (conn *Conn) sendError(e Error, dest string, serial uint32) {
+ msg := new(Message)
+ msg.Type = TypeError
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ if dest != "" {
+ msg.Headers[FieldDestination] = MakeVariant(dest)
+ }
+ msg.Headers[FieldErrorName] = MakeVariant(e.Name)
+ msg.Headers[FieldReplySerial] = MakeVariant(serial)
+ msg.Body = e.Body
+ if len(e.Body) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...))
+ }
+ conn.outLck.RLock()
+ if !conn.closed {
+ conn.out <- msg
+ }
+ conn.outLck.RUnlock()
+}
+
+// sendReply creates a method reply message corresponding to the parameters and
+// sends it to conn.out.
+func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) {
+ msg := new(Message)
+ msg.Type = TypeMethodReply
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ if dest != "" {
+ msg.Headers[FieldDestination] = MakeVariant(dest)
+ }
+ msg.Headers[FieldReplySerial] = MakeVariant(serial)
+ msg.Body = values
+ if len(values) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
+ }
+ conn.outLck.RLock()
+ if !conn.closed {
+ conn.out <- msg
+ }
+ conn.outLck.RUnlock()
+}
+
+// Signal registers the given channel to be passed all received signal messages.
+// The caller has to make sure that ch is sufficiently buffered; if a message
+// arrives when a write to c is not possible, it is discarded.
+//
+// Multiple of these channels can be registered at the same time. Passing a
+// channel that already is registered will remove it from the list of the
+// registered channels.
+//
+// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a
+// channel for eavesdropped messages, this channel receives all signals, and
+// none of the channels passed to Signal will receive any signals.
+func (conn *Conn) Signal(ch chan<- *Signal) {
+ conn.signalsLck.Lock()
+ conn.signals = append(conn.signals, ch)
+ conn.signalsLck.Unlock()
+}
+
+// SupportsUnixFDs returns whether the underlying transport supports passing of
+// unix file descriptors. If this is false, method calls containing unix file
+// descriptors will return an error and emitted signals containing them will
+// not be sent.
+func (conn *Conn) SupportsUnixFDs() bool {
+ return conn.unixFD
+}
+
+// Error represents a D-Bus message of type Error.
+type Error struct {
+ Name string
+ Body []interface{}
+}
+
+func (e Error) Error() string {
+ if len(e.Body) >= 1 {
+ s, ok := e.Body[0].(string)
+ if ok {
+ return s
+ }
+ }
+ return e.Name
+}
+
+// Signal represents a D-Bus message of type Signal. The name member is given in
+// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost.
+type Signal struct {
+ Sender string
+ Path ObjectPath
+ Name string
+ Body []interface{}
+}
+
+// transport is a D-Bus transport.
+type transport interface {
+ // Read and Write raw data (for example, for the authentication protocol).
+ io.ReadWriteCloser
+
+ // Send the initial null byte used for the EXTERNAL mechanism.
+ SendNullByte() error
+
+ // Returns whether this transport supports passing Unix FDs.
+ SupportsUnixFDs() bool
+
+ // Signal the transport that Unix FD passing is enabled for this connection.
+ EnableUnixFDs()
+
+ // Read / send a message, handling things like Unix FDs.
+ ReadMessage() (*Message, error)
+ SendMessage(*Message) error
+}
+
+func getTransport(address string) (transport, error) {
+ var err error
+ var t transport
+
+ m := map[string]func(string) (transport, error){
+ "unix": newUnixTransport,
+ }
+ addresses := strings.Split(address, ";")
+ for _, v := range addresses {
+ i := strings.IndexRune(v, ':')
+ if i == -1 {
+ err = errors.New("dbus: invalid bus address (no transport)")
+ continue
+ }
+ f := m[v[:i]]
+ if f == nil {
+ err = errors.New("dbus: invalid bus address (invalid or unsupported transport)")
+ }
+ t, err = f(v[i+1:])
+ if err == nil {
+ return t, nil
+ }
+ }
+ return nil, err
+}
+
+// dereferenceAll returns a slice that, assuming that vs is a slice of pointers
+// of arbitrary types, containes the values that are obtained from dereferencing
+// all elements in vs.
+func dereferenceAll(vs []interface{}) []interface{} {
+ for i := range vs {
+ v := reflect.ValueOf(vs[i])
+ v = v.Elem()
+ vs[i] = v.Interface()
+ }
+ return vs
+}
+
+// getKey gets a key from a the list of keys. Returns "" on error / not found...
+func getKey(s, key string) string {
+ i := strings.Index(s, key)
+ if i == -1 {
+ return ""
+ }
+ if i+len(key)+1 >= len(s) || s[i+len(key)] != '=' {
+ return ""
+ }
+ j := strings.Index(s, ",")
+ if j == -1 {
+ j = len(s)
+ }
+ return s[i+len(key)+1 : j]
+}
diff --git a/vendor/src/github.com/godbus/dbus/conn_darwin.go b/vendor/src/github.com/godbus/dbus/conn_darwin.go
new file mode 100644
index 0000000000..b67bb1b81d
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/conn_darwin.go
@@ -0,0 +1,21 @@
+package dbus
+
+import (
+ "errors"
+ "os/exec"
+)
+
+func sessionBusPlatform() (*Conn, error) {
+ cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET")
+ b, err := cmd.CombinedOutput()
+
+ if err != nil {
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ return nil, errors.New("dbus: couldn't determine address of session bus")
+ }
+
+ return Dial("unix:path=" + string(b[:len(b)-1]))
+}
diff --git a/vendor/src/github.com/godbus/dbus/conn_other.go b/vendor/src/github.com/godbus/dbus/conn_other.go
new file mode 100644
index 0000000000..f74b8758d4
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/conn_other.go
@@ -0,0 +1,27 @@
+// +build !darwin
+
+package dbus
+
+import (
+ "bytes"
+ "errors"
+ "os/exec"
+)
+
+func sessionBusPlatform() (*Conn, error) {
+ cmd := exec.Command("dbus-launch")
+ b, err := cmd.CombinedOutput()
+
+ if err != nil {
+ return nil, err
+ }
+
+ i := bytes.IndexByte(b, '=')
+ j := bytes.IndexByte(b, '\n')
+
+ if i == -1 || j == -1 {
+ return nil, errors.New("dbus: couldn't determine address of session bus")
+ }
+
+ return Dial(string(b[i+1 : j]))
+}
diff --git a/vendor/src/github.com/godbus/dbus/conn_test.go b/vendor/src/github.com/godbus/dbus/conn_test.go
new file mode 100644
index 0000000000..a2b14e8cc4
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/conn_test.go
@@ -0,0 +1,199 @@
+package dbus
+
+import "testing"
+
+func TestSessionBus(t *testing.T) {
+ _, err := SessionBus()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestSystemBus(t *testing.T) {
+ _, err := SystemBus()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestSend(t *testing.T) {
+ bus, err := SessionBus()
+ if err != nil {
+ t.Error(err)
+ }
+ ch := make(chan *Call, 1)
+ msg := &Message{
+ Type: TypeMethodCall,
+ Flags: 0,
+ Headers: map[HeaderField]Variant{
+ FieldDestination: MakeVariant(bus.Names()[0]),
+ FieldPath: MakeVariant(ObjectPath("/org/freedesktop/DBus")),
+ FieldInterface: MakeVariant("org.freedesktop.DBus.Peer"),
+ FieldMember: MakeVariant("Ping"),
+ },
+ }
+ call := bus.Send(msg, ch)
+ <-ch
+ if call.Err != nil {
+ t.Error(call.Err)
+ }
+}
+
+type server struct{}
+
+func (server) Double(i int64) (int64, *Error) {
+ return 2 * i, nil
+}
+
+func BenchmarkCall(b *testing.B) {
+ b.StopTimer()
+ var s string
+ bus, err := SessionBus()
+ if err != nil {
+ b.Fatal(err)
+ }
+ name := bus.Names()[0]
+ obj := bus.BusObject()
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ err := obj.Call("org.freedesktop.DBus.GetNameOwner", 0, name).Store(&s)
+ if err != nil {
+ b.Fatal(err)
+ }
+ if s != name {
+ b.Errorf("got %s, wanted %s", s, name)
+ }
+ }
+}
+
+func BenchmarkCallAsync(b *testing.B) {
+ b.StopTimer()
+ bus, err := SessionBus()
+ if err != nil {
+ b.Fatal(err)
+ }
+ name := bus.Names()[0]
+ obj := bus.BusObject()
+ c := make(chan *Call, 50)
+ done := make(chan struct{})
+ go func() {
+ for i := 0; i < b.N; i++ {
+ v := <-c
+ if v.Err != nil {
+ b.Error(v.Err)
+ }
+ s := v.Body[0].(string)
+ if s != name {
+ b.Errorf("got %s, wanted %s", s, name)
+ }
+ }
+ close(done)
+ }()
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ obj.Go("org.freedesktop.DBus.GetNameOwner", 0, c, name)
+ }
+ <-done
+}
+
+func BenchmarkServe(b *testing.B) {
+ b.StopTimer()
+ srv, err := SessionBus()
+ if err != nil {
+ b.Fatal(err)
+ }
+ cli, err := SessionBusPrivate()
+ if err != nil {
+ b.Fatal(err)
+ }
+ if err = cli.Auth(nil); err != nil {
+ b.Fatal(err)
+ }
+ if err = cli.Hello(); err != nil {
+ b.Fatal(err)
+ }
+ benchmarkServe(b, srv, cli)
+}
+
+func BenchmarkServeAsync(b *testing.B) {
+ b.StopTimer()
+ srv, err := SessionBus()
+ if err != nil {
+ b.Fatal(err)
+ }
+ cli, err := SessionBusPrivate()
+ if err != nil {
+ b.Fatal(err)
+ }
+ if err = cli.Auth(nil); err != nil {
+ b.Fatal(err)
+ }
+ if err = cli.Hello(); err != nil {
+ b.Fatal(err)
+ }
+ benchmarkServeAsync(b, srv, cli)
+}
+
+func BenchmarkServeSameConn(b *testing.B) {
+ b.StopTimer()
+ bus, err := SessionBus()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ benchmarkServe(b, bus, bus)
+}
+
+func BenchmarkServeSameConnAsync(b *testing.B) {
+ b.StopTimer()
+ bus, err := SessionBus()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ benchmarkServeAsync(b, bus, bus)
+}
+
+func benchmarkServe(b *testing.B, srv, cli *Conn) {
+ var r int64
+ var err error
+ dest := srv.Names()[0]
+ srv.Export(server{}, "/org/guelfey/DBus/Test", "org.guelfey.DBus.Test")
+ obj := cli.Object(dest, "/org/guelfey/DBus/Test")
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ err = obj.Call("org.guelfey.DBus.Test.Double", 0, int64(i)).Store(&r)
+ if err != nil {
+ b.Fatal(err)
+ }
+ if r != 2*int64(i) {
+ b.Errorf("got %d, wanted %d", r, 2*int64(i))
+ }
+ }
+}
+
+func benchmarkServeAsync(b *testing.B, srv, cli *Conn) {
+ dest := srv.Names()[0]
+ srv.Export(server{}, "/org/guelfey/DBus/Test", "org.guelfey.DBus.Test")
+ obj := cli.Object(dest, "/org/guelfey/DBus/Test")
+ c := make(chan *Call, 50)
+ done := make(chan struct{})
+ go func() {
+ for i := 0; i < b.N; i++ {
+ v := <-c
+ if v.Err != nil {
+ b.Fatal(v.Err)
+ }
+ i, r := v.Args[0].(int64), v.Body[0].(int64)
+ if 2*i != r {
+ b.Errorf("got %d, wanted %d", r, 2*i)
+ }
+ }
+ close(done)
+ }()
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ obj.Go("org.guelfey.DBus.Test.Double", 0, c, int64(i))
+ }
+ <-done
+}
diff --git a/vendor/src/github.com/godbus/dbus/dbus.go b/vendor/src/github.com/godbus/dbus/dbus.go
new file mode 100644
index 0000000000..2ce68735cd
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/dbus.go
@@ -0,0 +1,258 @@
+package dbus
+
+import (
+ "errors"
+ "reflect"
+ "strings"
+)
+
+var (
+ byteType = reflect.TypeOf(byte(0))
+ boolType = reflect.TypeOf(false)
+ uint8Type = reflect.TypeOf(uint8(0))
+ int16Type = reflect.TypeOf(int16(0))
+ uint16Type = reflect.TypeOf(uint16(0))
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+ stringType = reflect.TypeOf("")
+ signatureType = reflect.TypeOf(Signature{""})
+ objectPathType = reflect.TypeOf(ObjectPath(""))
+ variantType = reflect.TypeOf(Variant{Signature{""}, nil})
+ interfacesType = reflect.TypeOf([]interface{}{})
+ unixFDType = reflect.TypeOf(UnixFD(0))
+ unixFDIndexType = reflect.TypeOf(UnixFDIndex(0))
+)
+
+// An InvalidTypeError signals that a value which cannot be represented in the
+// D-Bus wire format was passed to a function.
+type InvalidTypeError struct {
+ Type reflect.Type
+}
+
+func (e InvalidTypeError) Error() string {
+ return "dbus: invalid type " + e.Type.String()
+}
+
+// Store copies the values contained in src to dest, which must be a slice of
+// pointers. It converts slices of interfaces from src to corresponding structs
+// in dest. An error is returned if the lengths of src and dest or the types of
+// their elements don't match.
+func Store(src []interface{}, dest ...interface{}) error {
+ if len(src) != len(dest) {
+ return errors.New("dbus.Store: length mismatch")
+ }
+
+ for i := range src {
+ if err := store(src[i], dest[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func store(src, dest interface{}) error {
+ if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) {
+ reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src))
+ return nil
+ } else if hasStruct(dest) {
+ rv := reflect.ValueOf(dest).Elem()
+ switch rv.Kind() {
+ case reflect.Struct:
+ vs, ok := src.([]interface{})
+ if !ok {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ t := rv.Type()
+ ndest := make([]interface{}, 0, rv.NumField())
+ for i := 0; i < rv.NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
+ ndest = append(ndest, rv.Field(i).Addr().Interface())
+ }
+ }
+ if len(vs) != len(ndest) {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ err := Store(vs, ndest...)
+ if err != nil {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ case reflect.Slice:
+ sv := reflect.ValueOf(src)
+ if sv.Kind() != reflect.Slice {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len()))
+ for i := 0; i < sv.Len(); i++ {
+ if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil {
+ return err
+ }
+ }
+ case reflect.Map:
+ sv := reflect.ValueOf(src)
+ if sv.Kind() != reflect.Map {
+ return errors.New("dbus.Store: type mismatch")
+ }
+ keys := sv.MapKeys()
+ rv.Set(reflect.MakeMap(sv.Type()))
+ for _, key := range keys {
+ v := reflect.New(sv.Type().Elem())
+ if err := store(v, sv.MapIndex(key).Interface()); err != nil {
+ return err
+ }
+ rv.SetMapIndex(key, v.Elem())
+ }
+ default:
+ return errors.New("dbus.Store: type mismatch")
+ }
+ return nil
+ } else {
+ return errors.New("dbus.Store: type mismatch")
+ }
+}
+
+func hasStruct(v interface{}) bool {
+ t := reflect.TypeOf(v)
+ for {
+ switch t.Kind() {
+ case reflect.Struct:
+ return true
+ case reflect.Slice, reflect.Ptr, reflect.Map:
+ t = t.Elem()
+ default:
+ return false
+ }
+ }
+}
+
+// An ObjectPath is an object path as defined by the D-Bus spec.
+type ObjectPath string
+
+// IsValid returns whether the object path is valid.
+func (o ObjectPath) IsValid() bool {
+ s := string(o)
+ if len(s) == 0 {
+ return false
+ }
+ if s[0] != '/' {
+ return false
+ }
+ if s[len(s)-1] == '/' && len(s) != 1 {
+ return false
+ }
+ // probably not used, but technically possible
+ if s == "/" {
+ return true
+ }
+ split := strings.Split(s[1:], "/")
+ for _, v := range split {
+ if len(v) == 0 {
+ return false
+ }
+ for _, c := range v {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// A UnixFD is a Unix file descriptor sent over the wire. See the package-level
+// documentation for more information about Unix file descriptor passsing.
+type UnixFD int32
+
+// A UnixFDIndex is the representation of a Unix file descriptor in a message.
+type UnixFDIndex uint32
+
+// alignment returns the alignment of values of type t.
+func alignment(t reflect.Type) int {
+ switch t {
+ case variantType:
+ return 1
+ case objectPathType:
+ return 4
+ case signatureType:
+ return 1
+ case interfacesType: // sometimes used for structs
+ return 8
+ }
+ switch t.Kind() {
+ case reflect.Uint8:
+ return 1
+ case reflect.Uint16, reflect.Int16:
+ return 2
+ case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map:
+ return 4
+ case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct:
+ return 8
+ case reflect.Ptr:
+ return alignment(t.Elem())
+ }
+ return 1
+}
+
+// isKeyType returns whether t is a valid type for a D-Bus dict.
+func isKeyType(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64,
+ reflect.String:
+
+ return true
+ }
+ return false
+}
+
+// isValidInterface returns whether s is a valid name for an interface.
+func isValidInterface(s string) bool {
+ if len(s) == 0 || len(s) > 255 || s[0] == '.' {
+ return false
+ }
+ elem := strings.Split(s, ".")
+ if len(elem) < 2 {
+ return false
+ }
+ for _, v := range elem {
+ if len(v) == 0 {
+ return false
+ }
+ if v[0] >= '0' && v[0] <= '9' {
+ return false
+ }
+ for _, c := range v {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// isValidMember returns whether s is a valid name for a member.
+func isValidMember(s string) bool {
+ if len(s) == 0 || len(s) > 255 {
+ return false
+ }
+ i := strings.Index(s, ".")
+ if i != -1 {
+ return false
+ }
+ if s[0] >= '0' && s[0] <= '9' {
+ return false
+ }
+ for _, c := range s {
+ if !isMemberChar(c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isMemberChar(c rune) bool {
+ return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') || c == '_'
+}
diff --git a/vendor/src/github.com/godbus/dbus/decoder.go b/vendor/src/github.com/godbus/dbus/decoder.go
new file mode 100644
index 0000000000..ef50dcab98
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/decoder.go
@@ -0,0 +1,228 @@
+package dbus
+
+import (
+ "encoding/binary"
+ "io"
+ "reflect"
+)
+
+type decoder struct {
+ in io.Reader
+ order binary.ByteOrder
+ pos int
+}
+
+// newDecoder returns a new decoder that reads values from in. The input is
+// expected to be in the given byte order.
+func newDecoder(in io.Reader, order binary.ByteOrder) *decoder {
+ dec := new(decoder)
+ dec.in = in
+ dec.order = order
+ return dec
+}
+
+// align aligns the input to the given boundary and panics on error.
+func (dec *decoder) align(n int) {
+ if dec.pos%n != 0 {
+ newpos := (dec.pos + n - 1) & ^(n - 1)
+ empty := make([]byte, newpos-dec.pos)
+ if _, err := io.ReadFull(dec.in, empty); err != nil {
+ panic(err)
+ }
+ dec.pos = newpos
+ }
+}
+
+// Calls binary.Read(dec.in, dec.order, v) and panics on read errors.
+func (dec *decoder) binread(v interface{}) {
+ if err := binary.Read(dec.in, dec.order, v); err != nil {
+ panic(err)
+ }
+}
+
+func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) {
+ defer func() {
+ var ok bool
+ v := recover()
+ if err, ok = v.(error); ok {
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ err = FormatError("unexpected EOF")
+ }
+ }
+ }()
+ vs = make([]interface{}, 0)
+ s := sig.str
+ for s != "" {
+ err, rem := validSingle(s, 0)
+ if err != nil {
+ return nil, err
+ }
+ v := dec.decode(s[:len(s)-len(rem)], 0)
+ vs = append(vs, v)
+ s = rem
+ }
+ return vs, nil
+}
+
+func (dec *decoder) decode(s string, depth int) interface{} {
+ dec.align(alignment(typeFor(s)))
+ switch s[0] {
+ case 'y':
+ var b [1]byte
+ if _, err := dec.in.Read(b[:]); err != nil {
+ panic(err)
+ }
+ dec.pos++
+ return b[0]
+ case 'b':
+ i := dec.decode("u", depth).(uint32)
+ switch {
+ case i == 0:
+ return false
+ case i == 1:
+ return true
+ default:
+ panic(FormatError("invalid value for boolean"))
+ }
+ case 'n':
+ var i int16
+ dec.binread(&i)
+ dec.pos += 2
+ return i
+ case 'i':
+ var i int32
+ dec.binread(&i)
+ dec.pos += 4
+ return i
+ case 'x':
+ var i int64
+ dec.binread(&i)
+ dec.pos += 8
+ return i
+ case 'q':
+ var i uint16
+ dec.binread(&i)
+ dec.pos += 2
+ return i
+ case 'u':
+ var i uint32
+ dec.binread(&i)
+ dec.pos += 4
+ return i
+ case 't':
+ var i uint64
+ dec.binread(&i)
+ dec.pos += 8
+ return i
+ case 'd':
+ var f float64
+ dec.binread(&f)
+ dec.pos += 8
+ return f
+ case 's':
+ length := dec.decode("u", depth).(uint32)
+ b := make([]byte, int(length)+1)
+ if _, err := io.ReadFull(dec.in, b); err != nil {
+ panic(err)
+ }
+ dec.pos += int(length) + 1
+ return string(b[:len(b)-1])
+ case 'o':
+ return ObjectPath(dec.decode("s", depth).(string))
+ case 'g':
+ length := dec.decode("y", depth).(byte)
+ b := make([]byte, int(length)+1)
+ if _, err := io.ReadFull(dec.in, b); err != nil {
+ panic(err)
+ }
+ dec.pos += int(length) + 1
+ sig, err := ParseSignature(string(b[:len(b)-1]))
+ if err != nil {
+ panic(err)
+ }
+ return sig
+ case 'v':
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ var variant Variant
+ sig := dec.decode("g", depth).(Signature)
+ if len(sig.str) == 0 {
+ panic(FormatError("variant signature is empty"))
+ }
+ err, rem := validSingle(sig.str, 0)
+ if err != nil {
+ panic(err)
+ }
+ if rem != "" {
+ panic(FormatError("variant signature has multiple types"))
+ }
+ variant.sig = sig
+ variant.value = dec.decode(sig.str, depth+1)
+ return variant
+ case 'h':
+ return UnixFDIndex(dec.decode("u", depth).(uint32))
+ case 'a':
+ if len(s) > 1 && s[1] == '{' {
+ ksig := s[2:3]
+ vsig := s[3 : len(s)-1]
+ v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig)))
+ if depth >= 63 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ length := dec.decode("u", depth).(uint32)
+ // Even for empty maps, the correct padding must be included
+ dec.align(8)
+ spos := dec.pos
+ for dec.pos < spos+int(length) {
+ dec.align(8)
+ if !isKeyType(v.Type().Key()) {
+ panic(InvalidTypeError{v.Type()})
+ }
+ kv := dec.decode(ksig, depth+2)
+ vv := dec.decode(vsig, depth+2)
+ v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
+ }
+ return v.Interface()
+ }
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ length := dec.decode("u", depth).(uint32)
+ v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length))
+ // Even for empty arrays, the correct padding must be included
+ dec.align(alignment(typeFor(s[1:])))
+ spos := dec.pos
+ for dec.pos < spos+int(length) {
+ ev := dec.decode(s[1:], depth+1)
+ v = reflect.Append(v, reflect.ValueOf(ev))
+ }
+ return v.Interface()
+ case '(':
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ dec.align(8)
+ v := make([]interface{}, 0)
+ s = s[1 : len(s)-1]
+ for s != "" {
+ err, rem := validSingle(s, 0)
+ if err != nil {
+ panic(err)
+ }
+ ev := dec.decode(s[:len(s)-len(rem)], depth+1)
+ v = append(v, ev)
+ s = rem
+ }
+ return v
+ default:
+ panic(SignatureError{Sig: s})
+ }
+}
+
+// A FormatError is an error in the wire format.
+type FormatError string
+
+func (e FormatError) Error() string {
+ return "dbus: wire format error: " + string(e)
+}
diff --git a/vendor/src/github.com/godbus/dbus/doc.go b/vendor/src/github.com/godbus/dbus/doc.go
new file mode 100644
index 0000000000..deff554a38
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/doc.go
@@ -0,0 +1,63 @@
+/*
+Package dbus implements bindings to the D-Bus message bus system.
+
+To use the message bus API, you first need to connect to a bus (usually the
+session or system bus). The acquired connection then can be used to call methods
+on remote objects and emit or receive signals. Using the Export method, you can
+arrange D-Bus methods calls to be directly translated to method calls on a Go
+value.
+
+Conversion Rules
+
+For outgoing messages, Go types are automatically converted to the
+corresponding D-Bus types. The following types are directly encoded as their
+respective D-Bus equivalents:
+
+ Go type | D-Bus type
+ ------------+-----------
+ byte | BYTE
+ bool | BOOLEAN
+ int16 | INT16
+ uint16 | UINT16
+ int32 | INT32
+ uint32 | UINT32
+ int64 | INT64
+ uint64 | UINT64
+ float64 | DOUBLE
+ string | STRING
+ ObjectPath | OBJECT_PATH
+ Signature | SIGNATURE
+ Variant | VARIANT
+ UnixFDIndex | UNIX_FD
+
+Slices and arrays encode as ARRAYs of their element type.
+
+Maps encode as DICTs, provided that their key type can be used as a key for
+a DICT.
+
+Structs other than Variant and Signature encode as a STRUCT containing their
+exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will
+be skipped.
+
+Pointers encode as the value they're pointed to.
+
+Trying to encode any other type or a slice, map or struct containing an
+unsupported type will result in an InvalidTypeError.
+
+For incoming messages, the inverse of these rules are used, with the exception
+of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces
+containing the struct fields in the correct order. The Store function can be
+used to convert such values to Go structs.
+
+Unix FD passing
+
+Handling Unix file descriptors deserves special mention. To use them, you should
+first check that they are supported on a connection by calling SupportsUnixFDs.
+If it returns true, all method of Connection will translate messages containing
+UnixFD's to messages that are accompanied by the given file descriptors with the
+UnixFD values being substituted by the correct indices. Similarily, the indices
+of incoming messages are automatically resolved. It shouldn't be necessary to use
+UnixFDIndex.
+
+*/
+package dbus
diff --git a/vendor/src/github.com/godbus/dbus/encoder.go b/vendor/src/github.com/godbus/dbus/encoder.go
new file mode 100644
index 0000000000..f9d2f05716
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/encoder.go
@@ -0,0 +1,179 @@
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "reflect"
+)
+
+// An encoder encodes values to the D-Bus wire format.
+type encoder struct {
+ out io.Writer
+ order binary.ByteOrder
+ pos int
+}
+
+// NewEncoder returns a new encoder that writes to out in the given byte order.
+func newEncoder(out io.Writer, order binary.ByteOrder) *encoder {
+ enc := new(encoder)
+ enc.out = out
+ enc.order = order
+ return enc
+}
+
+// Aligns the next output to be on a multiple of n. Panics on write errors.
+func (enc *encoder) align(n int) {
+ if enc.pos%n != 0 {
+ newpos := (enc.pos + n - 1) & ^(n - 1)
+ empty := make([]byte, newpos-enc.pos)
+ if _, err := enc.out.Write(empty); err != nil {
+ panic(err)
+ }
+ enc.pos = newpos
+ }
+}
+
+// Calls binary.Write(enc.out, enc.order, v) and panics on write errors.
+func (enc *encoder) binwrite(v interface{}) {
+ if err := binary.Write(enc.out, enc.order, v); err != nil {
+ panic(err)
+ }
+}
+
+// Encode encodes the given values to the underyling reader. All written values
+// are aligned properly as required by the D-Bus spec.
+func (enc *encoder) Encode(vs ...interface{}) (err error) {
+ defer func() {
+ err, _ = recover().(error)
+ }()
+ for _, v := range vs {
+ enc.encode(reflect.ValueOf(v), 0)
+ }
+ return nil
+}
+
+// encode encodes the given value to the writer and panics on error. depth holds
+// the depth of the container nesting.
+func (enc *encoder) encode(v reflect.Value, depth int) {
+ enc.align(alignment(v.Type()))
+ switch v.Kind() {
+ case reflect.Uint8:
+ var b [1]byte
+ b[0] = byte(v.Uint())
+ if _, err := enc.out.Write(b[:]); err != nil {
+ panic(err)
+ }
+ enc.pos++
+ case reflect.Bool:
+ if v.Bool() {
+ enc.encode(reflect.ValueOf(uint32(1)), depth)
+ } else {
+ enc.encode(reflect.ValueOf(uint32(0)), depth)
+ }
+ case reflect.Int16:
+ enc.binwrite(int16(v.Int()))
+ enc.pos += 2
+ case reflect.Uint16:
+ enc.binwrite(uint16(v.Uint()))
+ enc.pos += 2
+ case reflect.Int32:
+ enc.binwrite(int32(v.Int()))
+ enc.pos += 4
+ case reflect.Uint32:
+ enc.binwrite(uint32(v.Uint()))
+ enc.pos += 4
+ case reflect.Int64:
+ enc.binwrite(v.Int())
+ enc.pos += 8
+ case reflect.Uint64:
+ enc.binwrite(v.Uint())
+ enc.pos += 8
+ case reflect.Float64:
+ enc.binwrite(v.Float())
+ enc.pos += 8
+ case reflect.String:
+ enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth)
+ b := make([]byte, v.Len()+1)
+ copy(b, v.String())
+ b[len(b)-1] = 0
+ n, err := enc.out.Write(b)
+ if err != nil {
+ panic(err)
+ }
+ enc.pos += n
+ case reflect.Ptr:
+ enc.encode(v.Elem(), depth)
+ case reflect.Slice, reflect.Array:
+ if depth >= 64 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ var buf bytes.Buffer
+ bufenc := newEncoder(&buf, enc.order)
+
+ for i := 0; i < v.Len(); i++ {
+ bufenc.encode(v.Index(i), depth+1)
+ }
+ enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
+ length := buf.Len()
+ enc.align(alignment(v.Type().Elem()))
+ if _, err := buf.WriteTo(enc.out); err != nil {
+ panic(err)
+ }
+ enc.pos += length
+ case reflect.Struct:
+ if depth >= 64 && v.Type() != signatureType {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ switch t := v.Type(); t {
+ case signatureType:
+ str := v.Field(0)
+ enc.encode(reflect.ValueOf(byte(str.Len())), depth+1)
+ b := make([]byte, str.Len()+1)
+ copy(b, str.String())
+ b[len(b)-1] = 0
+ n, err := enc.out.Write(b)
+ if err != nil {
+ panic(err)
+ }
+ enc.pos += n
+ case variantType:
+ variant := v.Interface().(Variant)
+ enc.encode(reflect.ValueOf(variant.sig), depth+1)
+ enc.encode(reflect.ValueOf(variant.value), depth+1)
+ default:
+ for i := 0; i < v.Type().NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
+ enc.encode(v.Field(i), depth+1)
+ }
+ }
+ }
+ case reflect.Map:
+ // Maps are arrays of structures, so they actually increase the depth by
+ // 2.
+ if depth >= 63 {
+ panic(FormatError("input exceeds container depth limit"))
+ }
+ if !isKeyType(v.Type().Key()) {
+ panic(InvalidTypeError{v.Type()})
+ }
+ keys := v.MapKeys()
+ var buf bytes.Buffer
+ bufenc := newEncoder(&buf, enc.order)
+ for _, k := range keys {
+ bufenc.align(8)
+ bufenc.encode(k, depth+2)
+ bufenc.encode(v.MapIndex(k), depth+2)
+ }
+ enc.encode(reflect.ValueOf(uint32(buf.Len())), depth)
+ length := buf.Len()
+ enc.align(8)
+ if _, err := buf.WriteTo(enc.out); err != nil {
+ panic(err)
+ }
+ enc.pos += length
+ default:
+ panic(InvalidTypeError{v.Type()})
+ }
+}
diff --git a/vendor/src/github.com/godbus/dbus/examples_test.go b/vendor/src/github.com/godbus/dbus/examples_test.go
new file mode 100644
index 0000000000..0218ac5598
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/examples_test.go
@@ -0,0 +1,50 @@
+package dbus
+
+import "fmt"
+
+func ExampleConn_Emit() {
+ conn, err := SessionBus()
+ if err != nil {
+ panic(err)
+ }
+
+ conn.Emit("/foo/bar", "foo.bar.Baz", uint32(0xDAEDBEEF))
+}
+
+func ExampleObject_Call() {
+ var list []string
+
+ conn, err := SessionBus()
+ if err != nil {
+ panic(err)
+ }
+
+ err = conn.BusObject().Call("org.freedesktop.DBus.ListNames", 0).Store(&list)
+ if err != nil {
+ panic(err)
+ }
+ for _, v := range list {
+ fmt.Println(v)
+ }
+}
+
+func ExampleObject_Go() {
+ conn, err := SessionBus()
+ if err != nil {
+ panic(err)
+ }
+
+ ch := make(chan *Call, 10)
+ conn.BusObject().Go("org.freedesktop.DBus.ListActivatableNames", 0, ch)
+ select {
+ case call := <-ch:
+ if call.Err != nil {
+ panic(err)
+ }
+ list := call.Body[0].([]string)
+ for _, v := range list {
+ fmt.Println(v)
+ }
+ // put some other cases here
+ }
+}
diff --git a/vendor/src/github.com/godbus/dbus/export.go b/vendor/src/github.com/godbus/dbus/export.go
new file mode 100644
index 0000000000..1dd1591528
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/export.go
@@ -0,0 +1,302 @@
+package dbus
+
+import (
+ "errors"
+ "reflect"
+ "strings"
+ "unicode"
+)
+
+var (
+ errmsgInvalidArg = Error{
+ "org.freedesktop.DBus.Error.InvalidArgs",
+ []interface{}{"Invalid type / number of args"},
+ }
+ errmsgNoObject = Error{
+ "org.freedesktop.DBus.Error.NoSuchObject",
+ []interface{}{"No such object"},
+ }
+ errmsgUnknownMethod = Error{
+ "org.freedesktop.DBus.Error.UnknownMethod",
+ []interface{}{"Unknown / invalid method"},
+ }
+)
+
+// Sender is a type which can be used in exported methods to receive the message
+// sender.
+type Sender string
+
+func exportedMethod(v interface{}, name string) reflect.Value {
+ if v == nil {
+ return reflect.Value{}
+ }
+ m := reflect.ValueOf(v).MethodByName(name)
+ if !m.IsValid() {
+ return reflect.Value{}
+ }
+ t := m.Type()
+ if t.NumOut() == 0 ||
+ t.Out(t.NumOut()-1) != reflect.TypeOf(&errmsgInvalidArg) {
+
+ return reflect.Value{}
+ }
+ return m
+}
+
+// handleCall handles the given method call (i.e. looks if it's one of the
+// pre-implemented ones and searches for a corresponding handler if not).
+func (conn *Conn) handleCall(msg *Message) {
+ name := msg.Headers[FieldMember].value.(string)
+ path := msg.Headers[FieldPath].value.(ObjectPath)
+ ifaceName, hasIface := msg.Headers[FieldInterface].value.(string)
+ sender, hasSender := msg.Headers[FieldSender].value.(string)
+ serial := msg.serial
+ if ifaceName == "org.freedesktop.DBus.Peer" {
+ switch name {
+ case "Ping":
+ conn.sendReply(sender, serial)
+ case "GetMachineId":
+ conn.sendReply(sender, serial, conn.uuid)
+ default:
+ conn.sendError(errmsgUnknownMethod, sender, serial)
+ }
+ return
+ }
+ if len(name) == 0 || unicode.IsLower([]rune(name)[0]) {
+ conn.sendError(errmsgUnknownMethod, sender, serial)
+ }
+ var m reflect.Value
+ if hasIface {
+ conn.handlersLck.RLock()
+ obj, ok := conn.handlers[path]
+ if !ok {
+ conn.sendError(errmsgNoObject, sender, serial)
+ conn.handlersLck.RUnlock()
+ return
+ }
+ iface := obj[ifaceName]
+ conn.handlersLck.RUnlock()
+ m = exportedMethod(iface, name)
+ } else {
+ conn.handlersLck.RLock()
+ if _, ok := conn.handlers[path]; !ok {
+ conn.sendError(errmsgNoObject, sender, serial)
+ conn.handlersLck.RUnlock()
+ return
+ }
+ for _, v := range conn.handlers[path] {
+ m = exportedMethod(v, name)
+ if m.IsValid() {
+ break
+ }
+ }
+ conn.handlersLck.RUnlock()
+ }
+ if !m.IsValid() {
+ conn.sendError(errmsgUnknownMethod, sender, serial)
+ return
+ }
+ t := m.Type()
+ vs := msg.Body
+ pointers := make([]interface{}, t.NumIn())
+ decode := make([]interface{}, 0, len(vs))
+ for i := 0; i < t.NumIn(); i++ {
+ tp := t.In(i)
+ val := reflect.New(tp)
+ pointers[i] = val.Interface()
+ if tp == reflect.TypeOf((*Sender)(nil)).Elem() {
+ val.Elem().SetString(sender)
+ } else {
+ decode = append(decode, pointers[i])
+ }
+ }
+ if len(decode) != len(vs) {
+ conn.sendError(errmsgInvalidArg, sender, serial)
+ return
+ }
+ if err := Store(vs, decode...); err != nil {
+ conn.sendError(errmsgInvalidArg, sender, serial)
+ return
+ }
+ params := make([]reflect.Value, len(pointers))
+ for i := 0; i < len(pointers); i++ {
+ params[i] = reflect.ValueOf(pointers[i]).Elem()
+ }
+ ret := m.Call(params)
+ if em := ret[t.NumOut()-1].Interface().(*Error); em != nil {
+ conn.sendError(*em, sender, serial)
+ return
+ }
+ if msg.Flags&FlagNoReplyExpected == 0 {
+ reply := new(Message)
+ reply.Type = TypeMethodReply
+ reply.serial = conn.getSerial()
+ reply.Headers = make(map[HeaderField]Variant)
+ if hasSender {
+ reply.Headers[FieldDestination] = msg.Headers[FieldSender]
+ }
+ reply.Headers[FieldReplySerial] = MakeVariant(msg.serial)
+ reply.Body = make([]interface{}, len(ret)-1)
+ for i := 0; i < len(ret)-1; i++ {
+ reply.Body[i] = ret[i].Interface()
+ }
+ if len(ret) != 1 {
+ reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...))
+ }
+ conn.outLck.RLock()
+ if !conn.closed {
+ conn.out <- reply
+ }
+ conn.outLck.RUnlock()
+ }
+}
+
+// Emit emits the given signal on the message bus. The name parameter must be
+// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost".
+func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error {
+ if !path.IsValid() {
+ return errors.New("dbus: invalid object path")
+ }
+ i := strings.LastIndex(name, ".")
+ if i == -1 {
+ return errors.New("dbus: invalid method name")
+ }
+ iface := name[:i]
+ member := name[i+1:]
+ if !isValidMember(member) {
+ return errors.New("dbus: invalid method name")
+ }
+ if !isValidInterface(iface) {
+ return errors.New("dbus: invalid interface name")
+ }
+ msg := new(Message)
+ msg.Type = TypeSignal
+ msg.serial = conn.getSerial()
+ msg.Headers = make(map[HeaderField]Variant)
+ msg.Headers[FieldInterface] = MakeVariant(iface)
+ msg.Headers[FieldMember] = MakeVariant(member)
+ msg.Headers[FieldPath] = MakeVariant(path)
+ msg.Body = values
+ if len(values) > 0 {
+ msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...))
+ }
+ conn.outLck.RLock()
+ defer conn.outLck.RUnlock()
+ if conn.closed {
+ return ErrClosed
+ }
+ conn.out <- msg
+ return nil
+}
+
+// Export registers the given value to be exported as an object on the
+// message bus.
+//
+// If a method call on the given path and interface is received, an exported
+// method with the same name is called with v as the receiver if the
+// parameters match and the last return value is of type *Error. If this
+// *Error is not nil, it is sent back to the caller as an error.
+// Otherwise, a method reply is sent with the other return values as its body.
+//
+// Any parameters with the special type Sender are set to the sender of the
+// dbus message when the method is called. Parameters of this type do not
+// contribute to the dbus signature of the method (i.e. the method is exposed
+// as if the parameters of type Sender were not there).
+//
+// Every method call is executed in a new goroutine, so the method may be called
+// in multiple goroutines at once.
+//
+// Method calls on the interface org.freedesktop.DBus.Peer will be automatically
+// handled for every object.
+//
+// Passing nil as the first parameter will cause conn to cease handling calls on
+// the given combination of path and interface.
+//
+// Export returns an error if path is not a valid path name.
+func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error {
+ if !path.IsValid() {
+ return errors.New("dbus: invalid path name")
+ }
+ conn.handlersLck.Lock()
+ if v == nil {
+ if _, ok := conn.handlers[path]; ok {
+ delete(conn.handlers[path], iface)
+ if len(conn.handlers[path]) == 0 {
+ delete(conn.handlers, path)
+ }
+ }
+ return nil
+ }
+ if _, ok := conn.handlers[path]; !ok {
+ conn.handlers[path] = make(map[string]interface{})
+ }
+ conn.handlers[path][iface] = v
+ conn.handlersLck.Unlock()
+ return nil
+}
+
+// ReleaseName calls org.freedesktop.DBus.ReleaseName. You should use only this
+// method to release a name (see below).
+func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) {
+ var r uint32
+ err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r)
+ if err != nil {
+ return 0, err
+ }
+ if r == uint32(ReleaseNameReplyReleased) {
+ conn.namesLck.Lock()
+ for i, v := range conn.names {
+ if v == name {
+ copy(conn.names[i:], conn.names[i+1:])
+ conn.names = conn.names[:len(conn.names)-1]
+ }
+ }
+ conn.namesLck.Unlock()
+ }
+ return ReleaseNameReply(r), nil
+}
+
+// RequestName calls org.freedesktop.DBus.RequestName. You should use only this
+// method to request a name because package dbus needs to keep track of all
+// names that the connection has.
+func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) {
+ var r uint32
+ err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r)
+ if err != nil {
+ return 0, err
+ }
+ if r == uint32(RequestNameReplyPrimaryOwner) {
+ conn.namesLck.Lock()
+ conn.names = append(conn.names, name)
+ conn.namesLck.Unlock()
+ }
+ return RequestNameReply(r), nil
+}
+
+// ReleaseNameReply is the reply to a ReleaseName call.
+type ReleaseNameReply uint32
+
+const (
+ ReleaseNameReplyReleased ReleaseNameReply = 1 + iota
+ ReleaseNameReplyNonExistent
+ ReleaseNameReplyNotOwner
+)
+
+// RequestNameFlags represents the possible flags for a RequestName call.
+type RequestNameFlags uint32
+
+const (
+ NameFlagAllowReplacement RequestNameFlags = 1 << iota
+ NameFlagReplaceExisting
+ NameFlagDoNotQueue
+)
+
+// RequestNameReply is the reply to a RequestName call.
+type RequestNameReply uint32
+
+const (
+ RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota
+ RequestNameReplyInQueue
+ RequestNameReplyExists
+ RequestNameReplyAlreadyOwner
+)
diff --git a/vendor/src/github.com/godbus/dbus/homedir.go b/vendor/src/github.com/godbus/dbus/homedir.go
new file mode 100644
index 0000000000..0b745f9313
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/homedir.go
@@ -0,0 +1,28 @@
+package dbus
+
+import (
+ "os"
+ "sync"
+)
+
+var (
+ homeDir string
+ homeDirLock sync.Mutex
+)
+
+func getHomeDir() string {
+ homeDirLock.Lock()
+ defer homeDirLock.Unlock()
+
+ if homeDir != "" {
+ return homeDir
+ }
+
+ homeDir = os.Getenv("HOME")
+ if homeDir != "" {
+ return homeDir
+ }
+
+ homeDir = lookupHomeDir()
+ return homeDir
+}
diff --git a/vendor/src/github.com/godbus/dbus/homedir_dynamic.go b/vendor/src/github.com/godbus/dbus/homedir_dynamic.go
new file mode 100644
index 0000000000..2732081e73
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/homedir_dynamic.go
@@ -0,0 +1,15 @@
+// +build !static_build
+
+package dbus
+
+import (
+ "os/user"
+)
+
+func lookupHomeDir() string {
+ u, err := user.Current()
+ if err != nil {
+ return "/"
+ }
+ return u.HomeDir
+}
diff --git a/vendor/src/github.com/godbus/dbus/homedir_static.go b/vendor/src/github.com/godbus/dbus/homedir_static.go
new file mode 100644
index 0000000000..b9d9cb5525
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/homedir_static.go
@@ -0,0 +1,45 @@
+// +build static_build
+
+package dbus
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+ "strings"
+)
+
+func lookupHomeDir() string {
+ myUid := os.Getuid()
+
+ f, err := os.Open("/etc/passwd")
+ if err != nil {
+ return "/"
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ break
+ }
+
+ line := strings.TrimSpace(s.Text())
+ if line == "" {
+ continue
+ }
+
+ parts := strings.Split(line, ":")
+
+ if len(parts) >= 6 {
+ uid, err := strconv.Atoi(parts[2])
+ if err == nil && uid == myUid {
+ return parts[5]
+ }
+ }
+ }
+
+ // Default to / if we can't get a better value
+ return "/"
+}
diff --git a/vendor/src/github.com/godbus/dbus/introspect/call.go b/vendor/src/github.com/godbus/dbus/introspect/call.go
new file mode 100644
index 0000000000..4aca2ea63e
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/introspect/call.go
@@ -0,0 +1,27 @@
+package introspect
+
+import (
+ "encoding/xml"
+ "github.com/godbus/dbus"
+ "strings"
+)
+
+// Call calls org.freedesktop.Introspectable.Introspect on a remote object
+// and returns the introspection data.
+func Call(o *dbus.Object) (*Node, error) {
+ var xmldata string
+ var node Node
+
+ err := o.Call("org.freedesktop.DBus.Introspectable.Introspect", 0).Store(&xmldata)
+ if err != nil {
+ return nil, err
+ }
+ err = xml.NewDecoder(strings.NewReader(xmldata)).Decode(&node)
+ if err != nil {
+ return nil, err
+ }
+ if node.Name == "" {
+ node.Name = string(o.Path())
+ }
+ return &node, nil
+}
diff --git a/vendor/src/github.com/godbus/dbus/introspect/introspect.go b/vendor/src/github.com/godbus/dbus/introspect/introspect.go
new file mode 100644
index 0000000000..dafcdb8b7a
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/introspect/introspect.go
@@ -0,0 +1,80 @@
+// Package introspect provides some utilities for dealing with the DBus
+// introspection format.
+package introspect
+
+import "encoding/xml"
+
+// The introspection data for the org.freedesktop.DBus.Introspectable interface.
+var IntrospectData = Interface{
+ Name: "org.freedesktop.DBus.Introspectable",
+ Methods: []Method{
+ {
+ Name: "Introspect",
+ Args: []Arg{
+ {"out", "s", "out"},
+ },
+ },
+ },
+}
+
+// The introspection data for the org.freedesktop.DBus.Introspectable interface,
+// as a string.
+const IntrospectDataString = `
+ <interface name="org.freedesktop.DBus.Introspectable">
+ <method name="Introspect">
+ <arg name="out" direction="out" type="s"/>
+ </method>
+ </interface>
+`
+
+// Node is the root element of an introspection.
+type Node struct {
+ XMLName xml.Name `xml:"node"`
+ Name string `xml:"name,attr,omitempty"`
+ Interfaces []Interface `xml:"interface"`
+ Children []Node `xml:"node,omitempty"`
+}
+
+// Interface describes a DBus interface that is available on the message bus.
+type Interface struct {
+ Name string `xml:"name,attr"`
+ Methods []Method `xml:"method"`
+ Signals []Signal `xml:"signal"`
+ Properties []Property `xml:"property"`
+ Annotations []Annotation `xml:"annotation"`
+}
+
+// Method describes a Method on an Interface as retured by an introspection.
+type Method struct {
+ Name string `xml:"name,attr"`
+ Args []Arg `xml:"arg"`
+ Annotations []Annotation `xml:"annotation"`
+}
+
+// Signal describes a Signal emitted on an Interface.
+type Signal struct {
+ Name string `xml:"name,attr"`
+ Args []Arg `xml:"arg"`
+ Annotations []Annotation `xml:"annotation"`
+}
+
+// Property describes a property of an Interface.
+type Property struct {
+ Name string `xml:"name,attr"`
+ Type string `xml:"type,attr"`
+ Access string `xml:"access,attr"`
+ Annotations []Annotation `xml:"annotation"`
+}
+
+// Arg represents an argument of a method or a signal.
+type Arg struct {
+ Name string `xml:"name,attr,omitempty"`
+ Type string `xml:"type,attr"`
+ Direction string `xml:"direction,attr,omitempty"`
+}
+
+// Annotation is an annotation in the introspection format.
+type Annotation struct {
+ Name string `xml:"name,attr"`
+ Value string `xml:"value,attr"`
+}
diff --git a/vendor/src/github.com/godbus/dbus/introspect/introspectable.go b/vendor/src/github.com/godbus/dbus/introspect/introspectable.go
new file mode 100644
index 0000000000..a2a965a343
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/introspect/introspectable.go
@@ -0,0 +1,74 @@
+package introspect
+
+import (
+ "encoding/xml"
+ "github.com/godbus/dbus"
+ "reflect"
+)
+
+// Introspectable implements org.freedesktop.Introspectable.
+//
+// You can create it by converting the XML-formatted introspection data from a
+// string to an Introspectable or call NewIntrospectable with a Node. Then,
+// export it as org.freedesktop.Introspectable on you object.
+type Introspectable string
+
+// NewIntrospectable returns an Introspectable that returns the introspection
+// data that corresponds to the given Node. If n.Interfaces doesn't contain the
+// data for org.freedesktop.DBus.Introspectable, it is added automatically.
+func NewIntrospectable(n *Node) Introspectable {
+ found := false
+ for _, v := range n.Interfaces {
+ if v.Name == "org.freedesktop.DBus.Introspectable" {
+ found = true
+ break
+ }
+ }
+ if !found {
+ n.Interfaces = append(n.Interfaces, IntrospectData)
+ }
+ b, err := xml.Marshal(n)
+ if err != nil {
+ panic(err)
+ }
+ return Introspectable(b)
+}
+
+// Introspect implements org.freedesktop.Introspectable.Introspect.
+func (i Introspectable) Introspect() (string, *dbus.Error) {
+ return string(i), nil
+}
+
+// Methods returns the description of the methods of v. This can be used to
+// create a Node which can be passed to NewIntrospectable.
+func Methods(v interface{}) []Method {
+ t := reflect.TypeOf(v)
+ ms := make([]Method, 0, t.NumMethod())
+ for i := 0; i < t.NumMethod(); i++ {
+ if t.Method(i).PkgPath != "" {
+ continue
+ }
+ mt := t.Method(i).Type
+ if mt.NumOut() == 0 ||
+ mt.Out(mt.NumOut()-1) != reflect.TypeOf(&dbus.Error{"", nil}) {
+
+ continue
+ }
+ var m Method
+ m.Name = t.Method(i).Name
+ m.Args = make([]Arg, 0, mt.NumIn()+mt.NumOut()-2)
+ for j := 1; j < mt.NumIn(); j++ {
+ if mt.In(j) != reflect.TypeOf((*dbus.Sender)(nil)).Elem() {
+ arg := Arg{"", dbus.SignatureOfType(mt.In(j)).String(), "in"}
+ m.Args = append(m.Args, arg)
+ }
+ }
+ for j := 0; j < mt.NumOut()-1; j++ {
+ arg := Arg{"", dbus.SignatureOfType(mt.Out(j)).String(), "out"}
+ m.Args = append(m.Args, arg)
+ }
+ m.Annotations = make([]Annotation, 0)
+ ms = append(ms, m)
+ }
+ return ms
+}
diff --git a/vendor/src/github.com/godbus/dbus/message.go b/vendor/src/github.com/godbus/dbus/message.go
new file mode 100644
index 0000000000..075d6e38ba
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/message.go
@@ -0,0 +1,346 @@
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "reflect"
+ "strconv"
+)
+
+const protoVersion byte = 1
+
+// Flags represents the possible flags of a D-Bus message.
+type Flags byte
+
+const (
+ // FlagNoReplyExpected signals that the message is not expected to generate
+ // a reply. If this flag is set on outgoing messages, any possible reply
+ // will be discarded.
+ FlagNoReplyExpected Flags = 1 << iota
+ // FlagNoAutoStart signals that the message bus should not automatically
+ // start an application when handling this message.
+ FlagNoAutoStart
+)
+
+// Type represents the possible types of a D-Bus message.
+type Type byte
+
+const (
+ TypeMethodCall Type = 1 + iota
+ TypeMethodReply
+ TypeError
+ TypeSignal
+ typeMax
+)
+
+func (t Type) String() string {
+ switch t {
+ case TypeMethodCall:
+ return "method call"
+ case TypeMethodReply:
+ return "reply"
+ case TypeError:
+ return "error"
+ case TypeSignal:
+ return "signal"
+ }
+ return "invalid"
+}
+
+// HeaderField represents the possible byte codes for the headers
+// of a D-Bus message.
+type HeaderField byte
+
+const (
+ FieldPath HeaderField = 1 + iota
+ FieldInterface
+ FieldMember
+ FieldErrorName
+ FieldReplySerial
+ FieldDestination
+ FieldSender
+ FieldSignature
+ FieldUnixFDs
+ fieldMax
+)
+
+// An InvalidMessageError describes the reason why a D-Bus message is regarded as
+// invalid.
+type InvalidMessageError string
+
+func (e InvalidMessageError) Error() string {
+ return "dbus: invalid message: " + string(e)
+}
+
+// fieldType are the types of the various header fields.
+var fieldTypes = [fieldMax]reflect.Type{
+ FieldPath: objectPathType,
+ FieldInterface: stringType,
+ FieldMember: stringType,
+ FieldErrorName: stringType,
+ FieldReplySerial: uint32Type,
+ FieldDestination: stringType,
+ FieldSender: stringType,
+ FieldSignature: signatureType,
+ FieldUnixFDs: uint32Type,
+}
+
+// requiredFields lists the header fields that are required by the different
+// message types.
+var requiredFields = [typeMax][]HeaderField{
+ TypeMethodCall: {FieldPath, FieldMember},
+ TypeMethodReply: {FieldReplySerial},
+ TypeError: {FieldErrorName, FieldReplySerial},
+ TypeSignal: {FieldPath, FieldInterface, FieldMember},
+}
+
+// Message represents a single D-Bus message.
+type Message struct {
+ Type
+ Flags
+ Headers map[HeaderField]Variant
+ Body []interface{}
+
+ serial uint32
+}
+
+type header struct {
+ Field byte
+ Variant
+}
+
+// DecodeMessage tries to decode a single message in the D-Bus wire format
+// from the given reader. The byte order is figured out from the first byte.
+// The possibly returned error can be an error of the underlying reader, an
+// InvalidMessageError or a FormatError.
+func DecodeMessage(rd io.Reader) (msg *Message, err error) {
+ var order binary.ByteOrder
+ var hlength, length uint32
+ var typ, flags, proto byte
+ var headers []header
+
+ b := make([]byte, 1)
+ _, err = rd.Read(b)
+ if err != nil {
+ return
+ }
+ switch b[0] {
+ case 'l':
+ order = binary.LittleEndian
+ case 'B':
+ order = binary.BigEndian
+ default:
+ return nil, InvalidMessageError("invalid byte order")
+ }
+
+ dec := newDecoder(rd, order)
+ dec.pos = 1
+
+ msg = new(Message)
+ vs, err := dec.Decode(Signature{"yyyuu"})
+ if err != nil {
+ return nil, err
+ }
+ if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil {
+ return nil, err
+ }
+ msg.Type = Type(typ)
+ msg.Flags = Flags(flags)
+
+ // get the header length separately because we need it later
+ b = make([]byte, 4)
+ _, err = io.ReadFull(rd, b)
+ if err != nil {
+ return nil, err
+ }
+ binary.Read(bytes.NewBuffer(b), order, &hlength)
+ if hlength+length+16 > 1<<27 {
+ return nil, InvalidMessageError("message is too long")
+ }
+ dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order)
+ dec.pos = 12
+ vs, err = dec.Decode(Signature{"a(yv)"})
+ if err != nil {
+ return nil, err
+ }
+ if err = Store(vs, &headers); err != nil {
+ return nil, err
+ }
+
+ msg.Headers = make(map[HeaderField]Variant)
+ for _, v := range headers {
+ msg.Headers[HeaderField(v.Field)] = v.Variant
+ }
+
+ dec.align(8)
+ body := make([]byte, int(length))
+ if length != 0 {
+ _, err := io.ReadFull(rd, body)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if err = msg.IsValid(); err != nil {
+ return nil, err
+ }
+ sig, _ := msg.Headers[FieldSignature].value.(Signature)
+ if sig.str != "" {
+ buf := bytes.NewBuffer(body)
+ dec = newDecoder(buf, order)
+ vs, err := dec.Decode(sig)
+ if err != nil {
+ return nil, err
+ }
+ msg.Body = vs
+ }
+
+ return
+}
+
+// EncodeTo encodes and sends a message to the given writer. The byte order must
+// be either binary.LittleEndian or binary.BigEndian. If the message is not
+// valid or an error occurs when writing, an error is returned.
+func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error {
+ if err := msg.IsValid(); err != nil {
+ return err
+ }
+ var vs [7]interface{}
+ switch order {
+ case binary.LittleEndian:
+ vs[0] = byte('l')
+ case binary.BigEndian:
+ vs[0] = byte('B')
+ default:
+ return errors.New("dbus: invalid byte order")
+ }
+ body := new(bytes.Buffer)
+ enc := newEncoder(body, order)
+ if len(msg.Body) != 0 {
+ enc.Encode(msg.Body...)
+ }
+ vs[1] = msg.Type
+ vs[2] = msg.Flags
+ vs[3] = protoVersion
+ vs[4] = uint32(len(body.Bytes()))
+ vs[5] = msg.serial
+ headers := make([]header, 0, len(msg.Headers))
+ for k, v := range msg.Headers {
+ headers = append(headers, header{byte(k), v})
+ }
+ vs[6] = headers
+ var buf bytes.Buffer
+ enc = newEncoder(&buf, order)
+ enc.Encode(vs[:]...)
+ enc.align(8)
+ body.WriteTo(&buf)
+ if buf.Len() > 1<<27 {
+ return InvalidMessageError("message is too long")
+ }
+ if _, err := buf.WriteTo(out); err != nil {
+ return err
+ }
+ return nil
+}
+
+// IsValid checks whether msg is a valid message and returns an
+// InvalidMessageError if it is not.
+func (msg *Message) IsValid() error {
+ if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected) != 0 {
+ return InvalidMessageError("invalid flags")
+ }
+ if msg.Type == 0 || msg.Type >= typeMax {
+ return InvalidMessageError("invalid message type")
+ }
+ for k, v := range msg.Headers {
+ if k == 0 || k >= fieldMax {
+ return InvalidMessageError("invalid header")
+ }
+ if reflect.TypeOf(v.value) != fieldTypes[k] {
+ return InvalidMessageError("invalid type of header field")
+ }
+ }
+ for _, v := range requiredFields[msg.Type] {
+ if _, ok := msg.Headers[v]; !ok {
+ return InvalidMessageError("missing required header")
+ }
+ }
+ if path, ok := msg.Headers[FieldPath]; ok {
+ if !path.value.(ObjectPath).IsValid() {
+ return InvalidMessageError("invalid path name")
+ }
+ }
+ if iface, ok := msg.Headers[FieldInterface]; ok {
+ if !isValidInterface(iface.value.(string)) {
+ return InvalidMessageError("invalid interface name")
+ }
+ }
+ if member, ok := msg.Headers[FieldMember]; ok {
+ if !isValidMember(member.value.(string)) {
+ return InvalidMessageError("invalid member name")
+ }
+ }
+ if errname, ok := msg.Headers[FieldErrorName]; ok {
+ if !isValidInterface(errname.value.(string)) {
+ return InvalidMessageError("invalid error name")
+ }
+ }
+ if len(msg.Body) != 0 {
+ if _, ok := msg.Headers[FieldSignature]; !ok {
+ return InvalidMessageError("missing signature")
+ }
+ }
+ return nil
+}
+
+// Serial returns the message's serial number. The returned value is only valid
+// for messages received by eavesdropping.
+func (msg *Message) Serial() uint32 {
+ return msg.serial
+}
+
+// String returns a string representation of a message similar to the format of
+// dbus-monitor.
+func (msg *Message) String() string {
+ if err := msg.IsValid(); err != nil {
+ return "<invalid>"
+ }
+ s := msg.Type.String()
+ if v, ok := msg.Headers[FieldSender]; ok {
+ s += " from " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldDestination]; ok {
+ s += " to " + v.value.(string)
+ }
+ s += " serial " + strconv.FormatUint(uint64(msg.serial), 10)
+ if v, ok := msg.Headers[FieldReplySerial]; ok {
+ s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
+ }
+ if v, ok := msg.Headers[FieldUnixFDs]; ok {
+ s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10)
+ }
+ if v, ok := msg.Headers[FieldPath]; ok {
+ s += " path " + string(v.value.(ObjectPath))
+ }
+ if v, ok := msg.Headers[FieldInterface]; ok {
+ s += " interface " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldErrorName]; ok {
+ s += " error " + v.value.(string)
+ }
+ if v, ok := msg.Headers[FieldMember]; ok {
+ s += " member " + v.value.(string)
+ }
+ if len(msg.Body) != 0 {
+ s += "\n"
+ }
+ for i, v := range msg.Body {
+ s += " " + MakeVariant(v).String()
+ if i != len(msg.Body)-1 {
+ s += "\n"
+ }
+ }
+ return s
+}
diff --git a/vendor/src/github.com/godbus/dbus/prop/prop.go b/vendor/src/github.com/godbus/dbus/prop/prop.go
new file mode 100644
index 0000000000..ed5bdf2243
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/prop/prop.go
@@ -0,0 +1,264 @@
+// Package prop provides the Properties struct which can be used to implement
+// org.freedesktop.DBus.Properties.
+package prop
+
+import (
+ "github.com/godbus/dbus"
+ "github.com/godbus/dbus/introspect"
+ "sync"
+)
+
+// EmitType controls how org.freedesktop.DBus.Properties.PropertiesChanged is
+// emitted for a property. If it is EmitTrue, the signal is emitted. If it is
+// EmitInvalidates, the signal is also emitted, but the new value of the property
+// is not disclosed.
+type EmitType byte
+
+const (
+ EmitFalse EmitType = iota
+ EmitTrue
+ EmitInvalidates
+)
+
+// ErrIfaceNotFound is the error returned to peers who try to access properties
+// on interfaces that aren't found.
+var ErrIfaceNotFound = &dbus.Error{"org.freedesktop.DBus.Properties.Error.InterfaceNotFound", nil}
+
+// ErrPropNotFound is the error returned to peers trying to access properties
+// that aren't found.
+var ErrPropNotFound = &dbus.Error{"org.freedesktop.DBus.Properties.Error.PropertyNotFound", nil}
+
+// ErrReadOnly is the error returned to peers trying to set a read-only
+// property.
+var ErrReadOnly = &dbus.Error{"org.freedesktop.DBus.Properties.Error.ReadOnly", nil}
+
+// ErrInvalidArg is returned to peers if the type of the property that is being
+// changed and the argument don't match.
+var ErrInvalidArg = &dbus.Error{"org.freedesktop.DBus.Properties.Error.InvalidArg", nil}
+
+// The introspection data for the org.freedesktop.DBus.Properties interface.
+var IntrospectData = introspect.Interface{
+ Name: "org.freedesktop.DBus.Properties",
+ Methods: []introspect.Method{
+ {
+ Name: "Get",
+ Args: []introspect.Arg{
+ {"interface", "s", "in"},
+ {"property", "s", "in"},
+ {"value", "v", "out"},
+ },
+ },
+ {
+ Name: "GetAll",
+ Args: []introspect.Arg{
+ {"interface", "s", "in"},
+ {"props", "a{sv}", "out"},
+ },
+ },
+ {
+ Name: "Set",
+ Args: []introspect.Arg{
+ {"interface", "s", "in"},
+ {"property", "s", "in"},
+ {"value", "v", "in"},
+ },
+ },
+ },
+ Signals: []introspect.Signal{
+ {
+ Name: "PropertiesChanged",
+ Args: []introspect.Arg{
+ {"interface", "s", "out"},
+ {"changed_properties", "a{sv}", "out"},
+ {"invalidates_properties", "as", "out"},
+ },
+ },
+ },
+}
+
+// The introspection data for the org.freedesktop.DBus.Properties interface, as
+// a string.
+const IntrospectDataString = `
+ <interface name="org.freedesktop.DBus.Introspectable">
+ <method name="Get">
+ <arg name="interface" direction="in" type="s"/>
+ <arg name="property" direction="in" type="s"/>
+ <arg name="value" direction="out" type="v"/>
+ </method>
+ <method name="GetAll">
+ <arg name="interface" direction="in" type="s"/>
+ <arg name="props" direction="out" type="a{sv}"/>
+ </method>
+ <method name="Set">
+ <arg name="interface" direction="in" type="s"/>
+ <arg name="property" direction="in" type="s"/>
+ <arg name="value" direction="in" type="v"/>
+ </method>
+ <signal name="PropertiesChanged">
+ <arg name="interface" type="s"/>
+ <arg name="changed_properties" type="a{sv}"/>
+ <arg name="invalidates_properties" type="as"/>
+ </signal>
+ </interface>
+`
+
+// Prop represents a single property. It is used for creating a Properties
+// value.
+type Prop struct {
+ // Initial value. Must be a DBus-representable type.
+ Value interface{}
+
+ // If true, the value can be modified by calls to Set.
+ Writable bool
+
+ // Controls how org.freedesktop.DBus.Properties.PropertiesChanged is
+ // emitted if this property changes.
+ Emit EmitType
+
+ // If not nil, anytime this property is changed by Set, this function is
+ // called with an appropiate Change as its argument. If the returned error
+ // is not nil, it is sent back to the caller of Set and the property is not
+ // changed.
+ Callback func(*Change) *dbus.Error
+}
+
+// Change represents a change of a property by a call to Set.
+type Change struct {
+ Props *Properties
+ Iface string
+ Name string
+ Value interface{}
+}
+
+// Properties is a set of values that can be made available to the message bus
+// using the org.freedesktop.DBus.Properties interface. It is safe for
+// concurrent use by multiple goroutines.
+type Properties struct {
+ m map[string]map[string]*Prop
+ mut sync.RWMutex
+ conn *dbus.Conn
+ path dbus.ObjectPath
+}
+
+// New returns a new Properties structure that manages the given properties.
+// The key for the first-level map of props is the name of the interface; the
+// second-level key is the name of the property. The returned structure will be
+// exported as org.freedesktop.DBus.Properties on path.
+func New(conn *dbus.Conn, path dbus.ObjectPath, props map[string]map[string]*Prop) *Properties {
+ p := &Properties{m: props, conn: conn, path: path}
+ conn.Export(p, path, "org.freedesktop.DBus.Properties")
+ return p
+}
+
+// Get implements org.freedesktop.DBus.Properties.Get.
+func (p *Properties) Get(iface, property string) (dbus.Variant, *dbus.Error) {
+ p.mut.RLock()
+ defer p.mut.RUnlock()
+ m, ok := p.m[iface]
+ if !ok {
+ return dbus.Variant{}, ErrIfaceNotFound
+ }
+ prop, ok := m[property]
+ if !ok {
+ return dbus.Variant{}, ErrPropNotFound
+ }
+ return dbus.MakeVariant(prop.Value), nil
+}
+
+// GetAll implements org.freedesktop.DBus.Properties.GetAll.
+func (p *Properties) GetAll(iface string) (map[string]dbus.Variant, *dbus.Error) {
+ p.mut.RLock()
+ defer p.mut.RUnlock()
+ m, ok := p.m[iface]
+ if !ok {
+ return nil, ErrIfaceNotFound
+ }
+ rm := make(map[string]dbus.Variant, len(m))
+ for k, v := range m {
+ rm[k] = dbus.MakeVariant(v.Value)
+ }
+ return rm, nil
+}
+
+// GetMust returns the value of the given property and panics if either the
+// interface or the property name are invalid.
+func (p *Properties) GetMust(iface, property string) interface{} {
+ p.mut.RLock()
+ defer p.mut.RUnlock()
+ return p.m[iface][property].Value
+}
+
+// Introspection returns the introspection data that represents the properties
+// of iface.
+func (p *Properties) Introspection(iface string) []introspect.Property {
+ p.mut.RLock()
+ defer p.mut.RUnlock()
+ m := p.m[iface]
+ s := make([]introspect.Property, 0, len(m))
+ for k, v := range m {
+ p := introspect.Property{Name: k, Type: dbus.SignatureOf(v.Value).String()}
+ if v.Writable {
+ p.Access = "readwrite"
+ } else {
+ p.Access = "read"
+ }
+ s = append(s, p)
+ }
+ return s
+}
+
+// set sets the given property and emits PropertyChanged if appropiate. p.mut
+// must already be locked.
+func (p *Properties) set(iface, property string, v interface{}) {
+ prop := p.m[iface][property]
+ prop.Value = v
+ switch prop.Emit {
+ case EmitFalse:
+ // do nothing
+ case EmitInvalidates:
+ p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged",
+ iface, map[string]dbus.Variant{}, []string{property})
+ case EmitTrue:
+ p.conn.Emit(p.path, "org.freedesktop.DBus.Properties.PropertiesChanged",
+ iface, map[string]dbus.Variant{property: dbus.MakeVariant(v)},
+ []string{})
+ default:
+ panic("invalid value for EmitType")
+ }
+}
+
+// Set implements org.freedesktop.Properties.Set.
+func (p *Properties) Set(iface, property string, newv dbus.Variant) *dbus.Error {
+ p.mut.Lock()
+ defer p.mut.Unlock()
+ m, ok := p.m[iface]
+ if !ok {
+ return ErrIfaceNotFound
+ }
+ prop, ok := m[property]
+ if !ok {
+ return ErrPropNotFound
+ }
+ if !prop.Writable {
+ return ErrReadOnly
+ }
+ if newv.Signature() != dbus.SignatureOf(prop.Value) {
+ return ErrInvalidArg
+ }
+ if prop.Callback != nil {
+ err := prop.Callback(&Change{p, iface, property, newv.Value()})
+ if err != nil {
+ return err
+ }
+ }
+ p.set(iface, property, newv.Value())
+ return nil
+}
+
+// SetMust sets the value of the given property and panics if the interface or
+// the property name are invalid.
+func (p *Properties) SetMust(iface, property string, v interface{}) {
+ p.mut.Lock()
+ p.set(iface, property, v)
+ p.mut.Unlock()
+}
diff --git a/vendor/src/github.com/godbus/dbus/proto_test.go b/vendor/src/github.com/godbus/dbus/proto_test.go
new file mode 100644
index 0000000000..608a770d41
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/proto_test.go
@@ -0,0 +1,369 @@
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io/ioutil"
+ "math"
+ "reflect"
+ "testing"
+)
+
+var protoTests = []struct {
+ vs []interface{}
+ bigEndian []byte
+ littleEndian []byte
+}{
+ {
+ []interface{}{int32(0)},
+ []byte{0, 0, 0, 0},
+ []byte{0, 0, 0, 0},
+ },
+ {
+ []interface{}{true, false},
+ []byte{0, 0, 0, 1, 0, 0, 0, 0},
+ []byte{1, 0, 0, 0, 0, 0, 0, 0},
+ },
+ {
+ []interface{}{byte(0), uint16(12), int16(32), uint32(43)},
+ []byte{0, 0, 0, 12, 0, 32, 0, 0, 0, 0, 0, 43},
+ []byte{0, 0, 12, 0, 32, 0, 0, 0, 43, 0, 0, 0},
+ },
+ {
+ []interface{}{int64(-1), uint64(1<<64 - 1)},
+ bytes.Repeat([]byte{255}, 16),
+ bytes.Repeat([]byte{255}, 16),
+ },
+ {
+ []interface{}{math.Inf(+1)},
+ []byte{0x7f, 0xf0, 0, 0, 0, 0, 0, 0},
+ []byte{0, 0, 0, 0, 0, 0, 0xf0, 0x7f},
+ },
+ {
+ []interface{}{"foo"},
+ []byte{0, 0, 0, 3, 'f', 'o', 'o', 0},
+ []byte{3, 0, 0, 0, 'f', 'o', 'o', 0},
+ },
+ {
+ []interface{}{Signature{"ai"}},
+ []byte{2, 'a', 'i', 0},
+ []byte{2, 'a', 'i', 0},
+ },
+ {
+ []interface{}{[]int16{42, 256}},
+ []byte{0, 0, 0, 4, 0, 42, 1, 0},
+ []byte{4, 0, 0, 0, 42, 0, 0, 1},
+ },
+ {
+ []interface{}{MakeVariant("foo")},
+ []byte{1, 's', 0, 0, 0, 0, 0, 3, 'f', 'o', 'o', 0},
+ []byte{1, 's', 0, 0, 3, 0, 0, 0, 'f', 'o', 'o', 0},
+ },
+ {
+ []interface{}{MakeVariant(MakeVariant(Signature{"v"}))},
+ []byte{1, 'v', 0, 1, 'g', 0, 1, 'v', 0},
+ []byte{1, 'v', 0, 1, 'g', 0, 1, 'v', 0},
+ },
+ {
+ []interface{}{map[int32]bool{42: true}},
+ []byte{0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 1},
+ []byte{8, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 1, 0, 0, 0},
+ },
+ {
+ []interface{}{map[string]Variant{}, byte(42)},
+ []byte{0, 0, 0, 0, 0, 0, 0, 0, 42},
+ []byte{0, 0, 0, 0, 0, 0, 0, 0, 42},
+ },
+ {
+ []interface{}{[]uint64{}, byte(42)},
+ []byte{0, 0, 0, 0, 0, 0, 0, 0, 42},
+ []byte{0, 0, 0, 0, 0, 0, 0, 0, 42},
+ },
+}
+
+func TestProto(t *testing.T) {
+ for i, v := range protoTests {
+ buf := new(bytes.Buffer)
+ bigEnc := newEncoder(buf, binary.BigEndian)
+ bigEnc.Encode(v.vs...)
+ marshalled := buf.Bytes()
+ if bytes.Compare(marshalled, v.bigEndian) != 0 {
+ t.Errorf("test %d (marshal be): got '%v', but expected '%v'\n", i+1, marshalled,
+ v.bigEndian)
+ }
+ buf.Reset()
+ litEnc := newEncoder(buf, binary.LittleEndian)
+ litEnc.Encode(v.vs...)
+ marshalled = buf.Bytes()
+ if bytes.Compare(marshalled, v.littleEndian) != 0 {
+ t.Errorf("test %d (marshal le): got '%v', but expected '%v'\n", i+1, marshalled,
+ v.littleEndian)
+ }
+ unmarshalled := reflect.MakeSlice(reflect.TypeOf(v.vs),
+ 0, 0)
+ for i := range v.vs {
+ unmarshalled = reflect.Append(unmarshalled,
+ reflect.New(reflect.TypeOf(v.vs[i])))
+ }
+ bigDec := newDecoder(bytes.NewReader(v.bigEndian), binary.BigEndian)
+ vs, err := bigDec.Decode(SignatureOf(v.vs...))
+ if err != nil {
+ t.Errorf("test %d (unmarshal be): %s\n", i+1, err)
+ continue
+ }
+ if !reflect.DeepEqual(vs, v.vs) {
+ t.Errorf("test %d (unmarshal be): got %#v, but expected %#v\n", i+1, vs, v.vs)
+ }
+ litDec := newDecoder(bytes.NewReader(v.littleEndian), binary.LittleEndian)
+ vs, err = litDec.Decode(SignatureOf(v.vs...))
+ if err != nil {
+ t.Errorf("test %d (unmarshal le): %s\n", i+1, err)
+ continue
+ }
+ if !reflect.DeepEqual(vs, v.vs) {
+ t.Errorf("test %d (unmarshal le): got %#v, but expected %#v\n", i+1, vs, v.vs)
+ }
+
+ }
+}
+
+func TestProtoMap(t *testing.T) {
+ m := map[string]uint8{
+ "foo": 23,
+ "bar": 2,
+ }
+ var n map[string]uint8
+ buf := new(bytes.Buffer)
+ enc := newEncoder(buf, binary.LittleEndian)
+ enc.Encode(m)
+ dec := newDecoder(buf, binary.LittleEndian)
+ vs, err := dec.Decode(Signature{"a{sy}"})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err = Store(vs, &n); err != nil {
+ t.Fatal(err)
+ }
+ if len(n) != 2 || n["foo"] != 23 || n["bar"] != 2 {
+ t.Error("got", n)
+ }
+}
+
+func TestProtoVariantStruct(t *testing.T) {
+ var variant Variant
+ v := MakeVariant(struct {
+ A int32
+ B int16
+ }{1, 2})
+ buf := new(bytes.Buffer)
+ enc := newEncoder(buf, binary.LittleEndian)
+ enc.Encode(v)
+ dec := newDecoder(buf, binary.LittleEndian)
+ vs, err := dec.Decode(Signature{"v"})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err = Store(vs, &variant); err != nil {
+ t.Fatal(err)
+ }
+ sl := variant.Value().([]interface{})
+ v1, v2 := sl[0].(int32), sl[1].(int16)
+ if v1 != int32(1) {
+ t.Error("got", v1, "as first int")
+ }
+ if v2 != int16(2) {
+ t.Error("got", v2, "as second int")
+ }
+}
+
+func TestProtoStructTag(t *testing.T) {
+ type Bar struct {
+ A int32
+ B chan interface{} `dbus:"-"`
+ C int32
+ }
+ var bar1, bar2 Bar
+ bar1.A = 234
+ bar2.C = 345
+ buf := new(bytes.Buffer)
+ enc := newEncoder(buf, binary.LittleEndian)
+ enc.Encode(bar1)
+ dec := newDecoder(buf, binary.LittleEndian)
+ vs, err := dec.Decode(Signature{"(ii)"})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err = Store(vs, &bar2); err != nil {
+ t.Fatal(err)
+ }
+ if bar1 != bar2 {
+ t.Error("struct tag test: got", bar2)
+ }
+}
+
+func TestProtoStoreStruct(t *testing.T) {
+ var foo struct {
+ A int32
+ B string
+ c chan interface{}
+ D interface{} `dbus:"-"`
+ }
+ src := []interface{}{[]interface{}{int32(42), "foo"}}
+ err := Store(src, &foo)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestProtoStoreNestedStruct(t *testing.T) {
+ var foo struct {
+ A int32
+ B struct {
+ C string
+ D float64
+ }
+ }
+ src := []interface{}{
+ []interface{}{
+ int32(42),
+ []interface{}{
+ "foo",
+ 3.14,
+ },
+ },
+ }
+ err := Store(src, &foo)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestMessage(t *testing.T) {
+ buf := new(bytes.Buffer)
+ message := new(Message)
+ message.Type = TypeMethodCall
+ message.serial = 32
+ message.Headers = map[HeaderField]Variant{
+ FieldPath: MakeVariant(ObjectPath("/org/foo/bar")),
+ FieldMember: MakeVariant("baz"),
+ }
+ message.Body = make([]interface{}, 0)
+ err := message.EncodeTo(buf, binary.LittleEndian)
+ if err != nil {
+ t.Error(err)
+ }
+ _, err = DecodeMessage(buf)
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestProtoStructInterfaces(t *testing.T) {
+ b := []byte{42}
+ vs, err := newDecoder(bytes.NewReader(b), binary.LittleEndian).Decode(Signature{"(y)"})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if vs[0].([]interface{})[0].(byte) != 42 {
+ t.Errorf("wrongs results (got %v)", vs)
+ }
+}
+
+// ordinary org.freedesktop.DBus.Hello call
+var smallMessage = &Message{
+ Type: TypeMethodCall,
+ serial: 1,
+ Headers: map[HeaderField]Variant{
+ FieldDestination: MakeVariant("org.freedesktop.DBus"),
+ FieldPath: MakeVariant(ObjectPath("/org/freedesktop/DBus")),
+ FieldInterface: MakeVariant("org.freedesktop.DBus"),
+ FieldMember: MakeVariant("Hello"),
+ },
+}
+
+// org.freedesktop.Notifications.Notify
+var bigMessage = &Message{
+ Type: TypeMethodCall,
+ serial: 2,
+ Headers: map[HeaderField]Variant{
+ FieldDestination: MakeVariant("org.freedesktop.Notifications"),
+ FieldPath: MakeVariant(ObjectPath("/org/freedesktop/Notifications")),
+ FieldInterface: MakeVariant("org.freedesktop.Notifications"),
+ FieldMember: MakeVariant("Notify"),
+ FieldSignature: MakeVariant(Signature{"susssasa{sv}i"}),
+ },
+ Body: []interface{}{
+ "app_name",
+ uint32(0),
+ "dialog-information",
+ "Notification",
+ "This is the body of a notification",
+ []string{"ok", "Ok"},
+ map[string]Variant{
+ "sound-name": MakeVariant("dialog-information"),
+ },
+ int32(-1),
+ },
+}
+
+func BenchmarkDecodeMessageSmall(b *testing.B) {
+ var err error
+ var rd *bytes.Reader
+
+ b.StopTimer()
+ buf := new(bytes.Buffer)
+ err = smallMessage.EncodeTo(buf, binary.LittleEndian)
+ if err != nil {
+ b.Fatal(err)
+ }
+ decoded := buf.Bytes()
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ rd = bytes.NewReader(decoded)
+ _, err = DecodeMessage(rd)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkDecodeMessageBig(b *testing.B) {
+ var err error
+ var rd *bytes.Reader
+
+ b.StopTimer()
+ buf := new(bytes.Buffer)
+ err = bigMessage.EncodeTo(buf, binary.LittleEndian)
+ if err != nil {
+ b.Fatal(err)
+ }
+ decoded := buf.Bytes()
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ rd = bytes.NewReader(decoded)
+ _, err = DecodeMessage(rd)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkEncodeMessageSmall(b *testing.B) {
+ var err error
+ for i := 0; i < b.N; i++ {
+ err = smallMessage.EncodeTo(ioutil.Discard, binary.LittleEndian)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkEncodeMessageBig(b *testing.B) {
+ var err error
+ for i := 0; i < b.N; i++ {
+ err = bigMessage.EncodeTo(ioutil.Discard, binary.LittleEndian)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/src/github.com/godbus/dbus/sig.go b/vendor/src/github.com/godbus/dbus/sig.go
new file mode 100644
index 0000000000..f45b53ce1b
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/sig.go
@@ -0,0 +1,257 @@
+package dbus
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var sigToType = map[byte]reflect.Type{
+ 'y': byteType,
+ 'b': boolType,
+ 'n': int16Type,
+ 'q': uint16Type,
+ 'i': int32Type,
+ 'u': uint32Type,
+ 'x': int64Type,
+ 't': uint64Type,
+ 'd': float64Type,
+ 's': stringType,
+ 'g': signatureType,
+ 'o': objectPathType,
+ 'v': variantType,
+ 'h': unixFDIndexType,
+}
+
+// Signature represents a correct type signature as specified by the D-Bus
+// specification. The zero value represents the empty signature, "".
+type Signature struct {
+ str string
+}
+
+// SignatureOf returns the concatenation of all the signatures of the given
+// values. It panics if one of them is not representable in D-Bus.
+func SignatureOf(vs ...interface{}) Signature {
+ var s string
+ for _, v := range vs {
+ s += getSignature(reflect.TypeOf(v))
+ }
+ return Signature{s}
+}
+
+// SignatureOfType returns the signature of the given type. It panics if the
+// type is not representable in D-Bus.
+func SignatureOfType(t reflect.Type) Signature {
+ return Signature{getSignature(t)}
+}
+
+// getSignature returns the signature of the given type and panics on unknown types.
+func getSignature(t reflect.Type) string {
+ // handle simple types first
+ switch t.Kind() {
+ case reflect.Uint8:
+ return "y"
+ case reflect.Bool:
+ return "b"
+ case reflect.Int16:
+ return "n"
+ case reflect.Uint16:
+ return "q"
+ case reflect.Int32:
+ if t == unixFDType {
+ return "h"
+ }
+ return "i"
+ case reflect.Uint32:
+ if t == unixFDIndexType {
+ return "h"
+ }
+ return "u"
+ case reflect.Int64:
+ return "x"
+ case reflect.Uint64:
+ return "t"
+ case reflect.Float64:
+ return "d"
+ case reflect.Ptr:
+ return getSignature(t.Elem())
+ case reflect.String:
+ if t == objectPathType {
+ return "o"
+ }
+ return "s"
+ case reflect.Struct:
+ if t == variantType {
+ return "v"
+ } else if t == signatureType {
+ return "g"
+ }
+ var s string
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
+ s += getSignature(t.Field(i).Type)
+ }
+ }
+ return "(" + s + ")"
+ case reflect.Array, reflect.Slice:
+ return "a" + getSignature(t.Elem())
+ case reflect.Map:
+ if !isKeyType(t.Key()) {
+ panic(InvalidTypeError{t})
+ }
+ return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}"
+ }
+ panic(InvalidTypeError{t})
+}
+
+// ParseSignature returns the signature represented by this string, or a
+// SignatureError if the string is not a valid signature.
+func ParseSignature(s string) (sig Signature, err error) {
+ if len(s) == 0 {
+ return
+ }
+ if len(s) > 255 {
+ return Signature{""}, SignatureError{s, "too long"}
+ }
+ sig.str = s
+ for err == nil && len(s) != 0 {
+ err, s = validSingle(s, 0)
+ }
+ if err != nil {
+ sig = Signature{""}
+ }
+
+ return
+}
+
+// ParseSignatureMust behaves like ParseSignature, except that it panics if s
+// is not valid.
+func ParseSignatureMust(s string) Signature {
+ sig, err := ParseSignature(s)
+ if err != nil {
+ panic(err)
+ }
+ return sig
+}
+
+// Empty retruns whether the signature is the empty signature.
+func (s Signature) Empty() bool {
+ return s.str == ""
+}
+
+// Single returns whether the signature represents a single, complete type.
+func (s Signature) Single() bool {
+ err, r := validSingle(s.str, 0)
+ return err != nil && r == ""
+}
+
+// String returns the signature's string representation.
+func (s Signature) String() string {
+ return s.str
+}
+
+// A SignatureError indicates that a signature passed to a function or received
+// on a connection is not a valid signature.
+type SignatureError struct {
+ Sig string
+ Reason string
+}
+
+func (e SignatureError) Error() string {
+ return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason)
+}
+
+// Try to read a single type from this string. If it was successfull, err is nil
+// and rem is the remaining unparsed part. Otherwise, err is a non-nil
+// SignatureError and rem is "". depth is the current recursion depth which may
+// not be greater than 64 and should be given as 0 on the first call.
+func validSingle(s string, depth int) (err error, rem string) {
+ if s == "" {
+ return SignatureError{Sig: s, Reason: "empty signature"}, ""
+ }
+ if depth > 64 {
+ return SignatureError{Sig: s, Reason: "container nesting too deep"}, ""
+ }
+ switch s[0] {
+ case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h':
+ return nil, s[1:]
+ case 'a':
+ if len(s) > 1 && s[1] == '{' {
+ i := findMatching(s[1:], '{', '}')
+ if i == -1 {
+ return SignatureError{Sig: s, Reason: "unmatched '{'"}, ""
+ }
+ i++
+ rem = s[i+1:]
+ s = s[2:i]
+ if err, _ = validSingle(s[:1], depth+1); err != nil {
+ return err, ""
+ }
+ err, nr := validSingle(s[1:], depth+1)
+ if err != nil {
+ return err, ""
+ }
+ if nr != "" {
+ return SignatureError{Sig: s, Reason: "too many types in dict"}, ""
+ }
+ return nil, rem
+ }
+ return validSingle(s[1:], depth+1)
+ case '(':
+ i := findMatching(s, '(', ')')
+ if i == -1 {
+ return SignatureError{Sig: s, Reason: "unmatched ')'"}, ""
+ }
+ rem = s[i+1:]
+ s = s[1:i]
+ for err == nil && s != "" {
+ err, s = validSingle(s, depth+1)
+ }
+ if err != nil {
+ rem = ""
+ }
+ return
+ }
+ return SignatureError{Sig: s, Reason: "invalid type character"}, ""
+}
+
+func findMatching(s string, left, right rune) int {
+ n := 0
+ for i, v := range s {
+ if v == left {
+ n++
+ } else if v == right {
+ n--
+ }
+ if n == 0 {
+ return i
+ }
+ }
+ return -1
+}
+
+// typeFor returns the type of the given signature. It ignores any left over
+// characters and panics if s doesn't start with a valid type signature.
+func typeFor(s string) (t reflect.Type) {
+ err, _ := validSingle(s, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ if t, ok := sigToType[s[0]]; ok {
+ return t
+ }
+ switch s[0] {
+ case 'a':
+ if s[1] == '{' {
+ i := strings.LastIndex(s, "}")
+ t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i]))
+ } else {
+ t = reflect.SliceOf(typeFor(s[1:]))
+ }
+ case '(':
+ t = interfacesType
+ }
+ return
+}
diff --git a/vendor/src/github.com/godbus/dbus/sig_test.go b/vendor/src/github.com/godbus/dbus/sig_test.go
new file mode 100644
index 0000000000..da37bc968e
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/sig_test.go
@@ -0,0 +1,70 @@
+package dbus
+
+import (
+ "testing"
+)
+
+var sigTests = []struct {
+ vs []interface{}
+ sig Signature
+}{
+ {
+ []interface{}{new(int32)},
+ Signature{"i"},
+ },
+ {
+ []interface{}{new(string)},
+ Signature{"s"},
+ },
+ {
+ []interface{}{new(Signature)},
+ Signature{"g"},
+ },
+ {
+ []interface{}{new([]int16)},
+ Signature{"an"},
+ },
+ {
+ []interface{}{new(int16), new(uint32)},
+ Signature{"nu"},
+ },
+ {
+ []interface{}{new(map[byte]Variant)},
+ Signature{"a{yv}"},
+ },
+ {
+ []interface{}{new(Variant), new([]map[int32]string)},
+ Signature{"vaa{is}"},
+ },
+}
+
+func TestSig(t *testing.T) {
+ for i, v := range sigTests {
+ sig := SignatureOf(v.vs...)
+ if sig != v.sig {
+ t.Errorf("test %d: got %q, expected %q", i+1, sig.str, v.sig.str)
+ }
+ }
+}
+
+var getSigTest = []interface{}{
+ []struct {
+ b byte
+ i int32
+ t uint64
+ s string
+ }{},
+ map[string]Variant{},
+}
+
+func BenchmarkGetSignatureSimple(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ SignatureOf("", int32(0))
+ }
+}
+
+func BenchmarkGetSignatureLong(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ SignatureOf(getSigTest...)
+ }
+}
diff --git a/vendor/src/github.com/godbus/dbus/transport_darwin.go b/vendor/src/github.com/godbus/dbus/transport_darwin.go
new file mode 100644
index 0000000000..1bba0d6bf7
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/transport_darwin.go
@@ -0,0 +1,6 @@
+package dbus
+
+func (t *unixTransport) SendNullByte() error {
+ _, err := t.Write([]byte{0})
+ return err
+}
diff --git a/vendor/src/github.com/godbus/dbus/transport_generic.go b/vendor/src/github.com/godbus/dbus/transport_generic.go
new file mode 100644
index 0000000000..46f8f49d69
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/transport_generic.go
@@ -0,0 +1,35 @@
+package dbus
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+type genericTransport struct {
+ io.ReadWriteCloser
+}
+
+func (t genericTransport) SendNullByte() error {
+ _, err := t.Write([]byte{0})
+ return err
+}
+
+func (t genericTransport) SupportsUnixFDs() bool {
+ return false
+}
+
+func (t genericTransport) EnableUnixFDs() {}
+
+func (t genericTransport) ReadMessage() (*Message, error) {
+ return DecodeMessage(t)
+}
+
+func (t genericTransport) SendMessage(msg *Message) error {
+ for _, v := range msg.Body {
+ if _, ok := v.(UnixFD); ok {
+ return errors.New("dbus: unix fd passing not enabled")
+ }
+ }
+ return msg.EncodeTo(t, binary.LittleEndian)
+}
diff --git a/vendor/src/github.com/godbus/dbus/transport_unix.go b/vendor/src/github.com/godbus/dbus/transport_unix.go
new file mode 100644
index 0000000000..d16229be40
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/transport_unix.go
@@ -0,0 +1,190 @@
+package dbus
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "io"
+ "net"
+ "syscall"
+)
+
+type oobReader struct {
+ conn *net.UnixConn
+ oob []byte
+ buf [4096]byte
+}
+
+func (o *oobReader) Read(b []byte) (n int, err error) {
+ n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:])
+ if err != nil {
+ return n, err
+ }
+ if flags&syscall.MSG_CTRUNC != 0 {
+ return n, errors.New("dbus: control data truncated (too many fds received)")
+ }
+ o.oob = append(o.oob, o.buf[:oobn]...)
+ return n, nil
+}
+
+type unixTransport struct {
+ *net.UnixConn
+ hasUnixFDs bool
+}
+
+func newUnixTransport(keys string) (transport, error) {
+ var err error
+
+ t := new(unixTransport)
+ abstract := getKey(keys, "abstract")
+ path := getKey(keys, "path")
+ switch {
+ case abstract == "" && path == "":
+ return nil, errors.New("dbus: invalid address (neither path nor abstract set)")
+ case abstract != "" && path == "":
+ t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"})
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+ case abstract == "" && path != "":
+ t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"})
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+ default:
+ return nil, errors.New("dbus: invalid address (both path and abstract set)")
+ }
+}
+
+func (t *unixTransport) EnableUnixFDs() {
+ t.hasUnixFDs = true
+}
+
+func (t *unixTransport) ReadMessage() (*Message, error) {
+ var (
+ blen, hlen uint32
+ csheader [16]byte
+ headers []header
+ order binary.ByteOrder
+ unixfds uint32
+ )
+ // To be sure that all bytes of out-of-band data are read, we use a special
+ // reader that uses ReadUnix on the underlying connection instead of Read
+ // and gathers the out-of-band data in a buffer.
+ rd := &oobReader{conn: t.UnixConn}
+ // read the first 16 bytes (the part of the header that has a constant size),
+ // from which we can figure out the length of the rest of the message
+ if _, err := io.ReadFull(rd, csheader[:]); err != nil {
+ return nil, err
+ }
+ switch csheader[0] {
+ case 'l':
+ order = binary.LittleEndian
+ case 'B':
+ order = binary.BigEndian
+ default:
+ return nil, InvalidMessageError("invalid byte order")
+ }
+ // csheader[4:8] -> length of message body, csheader[12:16] -> length of
+ // header fields (without alignment)
+ binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen)
+ binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen)
+ if hlen%8 != 0 {
+ hlen += 8 - (hlen % 8)
+ }
+
+ // decode headers and look for unix fds
+ headerdata := make([]byte, hlen+4)
+ copy(headerdata, csheader[12:])
+ if _, err := io.ReadFull(t, headerdata[4:]); err != nil {
+ return nil, err
+ }
+ dec := newDecoder(bytes.NewBuffer(headerdata), order)
+ dec.pos = 12
+ vs, err := dec.Decode(Signature{"a(yv)"})
+ if err != nil {
+ return nil, err
+ }
+ Store(vs, &headers)
+ for _, v := range headers {
+ if v.Field == byte(FieldUnixFDs) {
+ unixfds, _ = v.Variant.value.(uint32)
+ }
+ }
+ all := make([]byte, 16+hlen+blen)
+ copy(all, csheader[:])
+ copy(all[16:], headerdata[4:])
+ if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil {
+ return nil, err
+ }
+ if unixfds != 0 {
+ if !t.hasUnixFDs {
+ return nil, errors.New("dbus: got unix fds on unsupported transport")
+ }
+ // read the fds from the OOB data
+ scms, err := syscall.ParseSocketControlMessage(rd.oob)
+ if err != nil {
+ return nil, err
+ }
+ if len(scms) != 1 {
+ return nil, errors.New("dbus: received more than one socket control message")
+ }
+ fds, err := syscall.ParseUnixRights(&scms[0])
+ if err != nil {
+ return nil, err
+ }
+ msg, err := DecodeMessage(bytes.NewBuffer(all))
+ if err != nil {
+ return nil, err
+ }
+ // substitute the values in the message body (which are indices for the
+ // array receiver via OOB) with the actual values
+ for i, v := range msg.Body {
+ if j, ok := v.(UnixFDIndex); ok {
+ if uint32(j) >= unixfds {
+ return nil, InvalidMessageError("invalid index for unix fd")
+ }
+ msg.Body[i] = UnixFD(fds[j])
+ }
+ }
+ return msg, nil
+ }
+ return DecodeMessage(bytes.NewBuffer(all))
+}
+
+func (t *unixTransport) SendMessage(msg *Message) error {
+ fds := make([]int, 0)
+ for i, v := range msg.Body {
+ if fd, ok := v.(UnixFD); ok {
+ msg.Body[i] = UnixFDIndex(len(fds))
+ fds = append(fds, int(fd))
+ }
+ }
+ if len(fds) != 0 {
+ if !t.hasUnixFDs {
+ return errors.New("dbus: unix fd passing not enabled")
+ }
+ msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds)))
+ oob := syscall.UnixRights(fds...)
+ buf := new(bytes.Buffer)
+ msg.EncodeTo(buf, binary.LittleEndian)
+ n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil)
+ if err != nil {
+ return err
+ }
+ if n != buf.Len() || oobn != len(oob) {
+ return io.ErrShortWrite
+ }
+ } else {
+ if err := msg.EncodeTo(t, binary.LittleEndian); err != nil {
+ return nil
+ }
+ }
+ return nil
+}
+
+func (t *unixTransport) SupportsUnixFDs() bool {
+ return true
+}
diff --git a/vendor/src/github.com/godbus/dbus/transport_unix_test.go b/vendor/src/github.com/godbus/dbus/transport_unix_test.go
new file mode 100644
index 0000000000..302233fc65
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/transport_unix_test.go
@@ -0,0 +1,49 @@
+package dbus
+
+import (
+ "os"
+ "testing"
+)
+
+const testString = `This is a test!
+This text should be read from the file that is created by this test.`
+
+type unixFDTest struct{}
+
+func (t unixFDTest) Test(fd UnixFD) (string, *Error) {
+ var b [4096]byte
+ file := os.NewFile(uintptr(fd), "testfile")
+ defer file.Close()
+ n, err := file.Read(b[:])
+ if err != nil {
+ return "", &Error{"com.github.guelfey.test.Error", nil}
+ }
+ return string(b[:n]), nil
+}
+
+func TestUnixFDs(t *testing.T) {
+ conn, err := SessionBus()
+ if err != nil {
+ t.Fatal(err)
+ }
+ r, w, err := os.Pipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer w.Close()
+ if _, err := w.Write([]byte(testString)); err != nil {
+ t.Fatal(err)
+ }
+ name := conn.Names()[0]
+ test := unixFDTest{}
+ conn.Export(test, "/com/github/guelfey/test", "com.github.guelfey.test")
+ var s string
+ obj := conn.Object(name, "/com/github/guelfey/test")
+ err = obj.Call("com.github.guelfey.test.Test", 0, UnixFD(r.Fd())).Store(&s)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if s != testString {
+ t.Fatal("got", s, "wanted", testString)
+ }
+}
diff --git a/vendor/src/github.com/godbus/dbus/transport_unixcred.go b/vendor/src/github.com/godbus/dbus/transport_unixcred.go
new file mode 100644
index 0000000000..42a0e769ef
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/transport_unixcred.go
@@ -0,0 +1,22 @@
+// +build !darwin
+
+package dbus
+
+import (
+ "io"
+ "os"
+ "syscall"
+)
+
+func (t *unixTransport) SendNullByte() error {
+ ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())}
+ b := syscall.UnixCredentials(ucred)
+ _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil)
+ if err != nil {
+ return err
+ }
+ if oobn != len(b) {
+ return io.ErrShortWrite
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/godbus/dbus/variant.go b/vendor/src/github.com/godbus/dbus/variant.go
new file mode 100644
index 0000000000..b1b53ceb47
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/variant.go
@@ -0,0 +1,129 @@
+package dbus
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+// Variant represents the D-Bus variant type.
+type Variant struct {
+ sig Signature
+ value interface{}
+}
+
+// MakeVariant converts the given value to a Variant. It panics if v cannot be
+// represented as a D-Bus type.
+func MakeVariant(v interface{}) Variant {
+ return Variant{SignatureOf(v), v}
+}
+
+// ParseVariant parses the given string as a variant as described at
+// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not
+// empty, it is taken to be the expected signature for the variant.
+func ParseVariant(s string, sig Signature) (Variant, error) {
+ tokens := varLex(s)
+ p := &varParser{tokens: tokens}
+ n, err := varMakeNode(p)
+ if err != nil {
+ return Variant{}, err
+ }
+ if sig.str == "" {
+ sig, err = varInfer(n)
+ if err != nil {
+ return Variant{}, err
+ }
+ }
+ v, err := n.Value(sig)
+ if err != nil {
+ return Variant{}, err
+ }
+ return MakeVariant(v), nil
+}
+
+// format returns a formatted version of v and whether this string can be parsed
+// unambigously.
+func (v Variant) format() (string, bool) {
+ switch v.sig.str[0] {
+ case 'b', 'i':
+ return fmt.Sprint(v.value), true
+ case 'n', 'q', 'u', 'x', 't', 'd', 'h':
+ return fmt.Sprint(v.value), false
+ case 's':
+ return strconv.Quote(v.value.(string)), true
+ case 'o':
+ return strconv.Quote(string(v.value.(ObjectPath))), false
+ case 'g':
+ return strconv.Quote(v.value.(Signature).str), false
+ case 'v':
+ s, unamb := v.value.(Variant).format()
+ if !unamb {
+ return "<@" + v.value.(Variant).sig.str + " " + s + ">", true
+ }
+ return "<" + s + ">", true
+ case 'y':
+ return fmt.Sprintf("%#x", v.value.(byte)), false
+ }
+ rv := reflect.ValueOf(v.value)
+ switch rv.Kind() {
+ case reflect.Slice:
+ if rv.Len() == 0 {
+ return "[]", false
+ }
+ unamb := true
+ buf := bytes.NewBuffer([]byte("["))
+ for i := 0; i < rv.Len(); i++ {
+ // TODO: slooow
+ s, b := MakeVariant(rv.Index(i).Interface()).format()
+ unamb = unamb && b
+ buf.WriteString(s)
+ if i != rv.Len()-1 {
+ buf.WriteString(", ")
+ }
+ }
+ buf.WriteByte(']')
+ return buf.String(), unamb
+ case reflect.Map:
+ if rv.Len() == 0 {
+ return "{}", false
+ }
+ unamb := true
+ buf := bytes.NewBuffer([]byte("{"))
+ for i, k := range rv.MapKeys() {
+ s, b := MakeVariant(k.Interface()).format()
+ unamb = unamb && b
+ buf.WriteString(s)
+ buf.WriteString(": ")
+ s, b = MakeVariant(rv.MapIndex(k).Interface()).format()
+ unamb = unamb && b
+ buf.WriteString(s)
+ if i != rv.Len()-1 {
+ buf.WriteString(", ")
+ }
+ }
+ buf.WriteByte('}')
+ return buf.String(), unamb
+ }
+ return `"INVALID"`, true
+}
+
+// Signature returns the D-Bus signature of the underlying value of v.
+func (v Variant) Signature() Signature {
+ return v.sig
+}
+
+// String returns the string representation of the underlying value of v as
+// described at https://developer.gnome.org/glib/unstable/gvariant-text.html.
+func (v Variant) String() string {
+ s, unamb := v.format()
+ if !unamb {
+ return "@" + v.sig.str + " " + s
+ }
+ return s
+}
+
+// Value returns the underlying value of v.
+func (v Variant) Value() interface{} {
+ return v.value
+}
diff --git a/vendor/src/github.com/godbus/dbus/variant_lexer.go b/vendor/src/github.com/godbus/dbus/variant_lexer.go
new file mode 100644
index 0000000000..332007d6f1
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/variant_lexer.go
@@ -0,0 +1,284 @@
+package dbus
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Heavily inspired by the lexer from text/template.
+
+type varToken struct {
+ typ varTokenType
+ val string
+}
+
+type varTokenType byte
+
+const (
+ tokEOF varTokenType = iota
+ tokError
+ tokNumber
+ tokString
+ tokBool
+ tokArrayStart
+ tokArrayEnd
+ tokDictStart
+ tokDictEnd
+ tokVariantStart
+ tokVariantEnd
+ tokComma
+ tokColon
+ tokType
+ tokByteString
+)
+
+type varLexer struct {
+ input string
+ start int
+ pos int
+ width int
+ tokens []varToken
+}
+
+type lexState func(*varLexer) lexState
+
+func varLex(s string) []varToken {
+ l := &varLexer{input: s}
+ l.run()
+ return l.tokens
+}
+
+func (l *varLexer) accept(valid string) bool {
+ if strings.IndexRune(valid, l.next()) >= 0 {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+func (l *varLexer) backup() {
+ l.pos -= l.width
+}
+
+func (l *varLexer) emit(t varTokenType) {
+ l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]})
+ l.start = l.pos
+}
+
+func (l *varLexer) errorf(format string, v ...interface{}) lexState {
+ l.tokens = append(l.tokens, varToken{
+ tokError,
+ fmt.Sprintf(format, v...),
+ })
+ return nil
+}
+
+func (l *varLexer) ignore() {
+ l.start = l.pos
+}
+
+func (l *varLexer) next() rune {
+ var r rune
+
+ if l.pos >= len(l.input) {
+ l.width = 0
+ return -1
+ }
+ r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pos += l.width
+ return r
+}
+
+func (l *varLexer) run() {
+ for state := varLexNormal; state != nil; {
+ state = state(l)
+ }
+}
+
+func (l *varLexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+func varLexNormal(l *varLexer) lexState {
+ for {
+ r := l.next()
+ switch {
+ case r == -1:
+ l.emit(tokEOF)
+ return nil
+ case r == '[':
+ l.emit(tokArrayStart)
+ case r == ']':
+ l.emit(tokArrayEnd)
+ case r == '{':
+ l.emit(tokDictStart)
+ case r == '}':
+ l.emit(tokDictEnd)
+ case r == '<':
+ l.emit(tokVariantStart)
+ case r == '>':
+ l.emit(tokVariantEnd)
+ case r == ':':
+ l.emit(tokColon)
+ case r == ',':
+ l.emit(tokComma)
+ case r == '\'' || r == '"':
+ l.backup()
+ return varLexString
+ case r == '@':
+ l.backup()
+ return varLexType
+ case unicode.IsSpace(r):
+ l.ignore()
+ case unicode.IsNumber(r) || r == '+' || r == '-':
+ l.backup()
+ return varLexNumber
+ case r == 'b':
+ pos := l.start
+ if n := l.peek(); n == '"' || n == '\'' {
+ return varLexByteString
+ }
+ // not a byte string; try to parse it as a type or bool below
+ l.pos = pos + 1
+ l.width = 1
+ fallthrough
+ default:
+ // either a bool or a type. Try bools first.
+ l.backup()
+ if l.pos+4 <= len(l.input) {
+ if l.input[l.pos:l.pos+4] == "true" {
+ l.pos += 4
+ l.emit(tokBool)
+ continue
+ }
+ }
+ if l.pos+5 <= len(l.input) {
+ if l.input[l.pos:l.pos+5] == "false" {
+ l.pos += 5
+ l.emit(tokBool)
+ continue
+ }
+ }
+ // must be a type.
+ return varLexType
+ }
+ }
+}
+
+var varTypeMap = map[string]string{
+ "boolean": "b",
+ "byte": "y",
+ "int16": "n",
+ "uint16": "q",
+ "int32": "i",
+ "uint32": "u",
+ "int64": "x",
+ "uint64": "t",
+ "double": "f",
+ "string": "s",
+ "objectpath": "o",
+ "signature": "g",
+}
+
+func varLexByteString(l *varLexer) lexState {
+ q := l.next()
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != -1 {
+ break
+ }
+ fallthrough
+ case -1:
+ return l.errorf("unterminated bytestring")
+ case q:
+ break Loop
+ }
+ }
+ l.emit(tokByteString)
+ return varLexNormal
+}
+
+func varLexNumber(l *varLexer) lexState {
+ l.accept("+-")
+ digits := "0123456789"
+ if l.accept("0") {
+ if l.accept("x") {
+ digits = "0123456789abcdefABCDEF"
+ } else {
+ digits = "01234567"
+ }
+ }
+ for strings.IndexRune(digits, l.next()) >= 0 {
+ }
+ l.backup()
+ if l.accept(".") {
+ for strings.IndexRune(digits, l.next()) >= 0 {
+ }
+ l.backup()
+ }
+ if l.accept("eE") {
+ l.accept("+-")
+ for strings.IndexRune("0123456789", l.next()) >= 0 {
+ }
+ l.backup()
+ }
+ if r := l.peek(); unicode.IsLetter(r) {
+ l.next()
+ return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
+ }
+ l.emit(tokNumber)
+ return varLexNormal
+}
+
+func varLexString(l *varLexer) lexState {
+ q := l.next()
+Loop:
+ for {
+ switch l.next() {
+ case '\\':
+ if r := l.next(); r != -1 {
+ break
+ }
+ fallthrough
+ case -1:
+ return l.errorf("unterminated string")
+ case q:
+ break Loop
+ }
+ }
+ l.emit(tokString)
+ return varLexNormal
+}
+
+func varLexType(l *varLexer) lexState {
+ at := l.accept("@")
+ for {
+ r := l.next()
+ if r == -1 {
+ break
+ }
+ if unicode.IsSpace(r) {
+ l.backup()
+ break
+ }
+ }
+ if at {
+ if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil {
+ return l.errorf("%s", err)
+ }
+ } else {
+ if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok {
+ l.emit(tokType)
+ return varLexNormal
+ }
+ return l.errorf("unrecognized type %q", l.input[l.start:l.pos])
+ }
+ l.emit(tokType)
+ return varLexNormal
+}
diff --git a/vendor/src/github.com/godbus/dbus/variant_parser.go b/vendor/src/github.com/godbus/dbus/variant_parser.go
new file mode 100644
index 0000000000..d20f5da6dd
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/variant_parser.go
@@ -0,0 +1,817 @@
+package dbus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type varParser struct {
+ tokens []varToken
+ i int
+}
+
+func (p *varParser) backup() {
+ p.i--
+}
+
+func (p *varParser) next() varToken {
+ if p.i < len(p.tokens) {
+ t := p.tokens[p.i]
+ p.i++
+ return t
+ }
+ return varToken{typ: tokEOF}
+}
+
+type varNode interface {
+ Infer() (Signature, error)
+ String() string
+ Sigs() sigSet
+ Value(Signature) (interface{}, error)
+}
+
+func varMakeNode(p *varParser) (varNode, error) {
+ var sig Signature
+
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokNumber:
+ return varMakeNumNode(t, sig)
+ case tokString:
+ return varMakeStringNode(t, sig)
+ case tokBool:
+ if sig.str != "" && sig.str != "b" {
+ return nil, varTypeError{t.val, sig}
+ }
+ b, err := strconv.ParseBool(t.val)
+ if err != nil {
+ return nil, err
+ }
+ return boolNode(b), nil
+ case tokArrayStart:
+ return varMakeArrayNode(p, sig)
+ case tokVariantStart:
+ return varMakeVariantNode(p, sig)
+ case tokDictStart:
+ return varMakeDictNode(p, sig)
+ case tokType:
+ if sig.str != "" {
+ return nil, errors.New("unexpected type annotation")
+ }
+ if t.val[0] == '@' {
+ sig.str = t.val[1:]
+ } else {
+ sig.str = varTypeMap[t.val]
+ }
+ case tokByteString:
+ if sig.str != "" && sig.str != "ay" {
+ return nil, varTypeError{t.val, sig}
+ }
+ b, err := varParseByteString(t.val)
+ if err != nil {
+ return nil, err
+ }
+ return byteStringNode(b), nil
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+}
+
+type varTypeError struct {
+ val string
+ sig Signature
+}
+
+func (e varTypeError) Error() string {
+ return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str)
+}
+
+type sigSet map[Signature]bool
+
+func (s sigSet) Empty() bool {
+ return len(s) == 0
+}
+
+func (s sigSet) Intersect(s2 sigSet) sigSet {
+ r := make(sigSet)
+ for k := range s {
+ if s2[k] {
+ r[k] = true
+ }
+ }
+ return r
+}
+
+func (s sigSet) Single() (Signature, bool) {
+ if len(s) == 1 {
+ for k := range s {
+ return k, true
+ }
+ }
+ return Signature{}, false
+}
+
+func (s sigSet) ToArray() sigSet {
+ r := make(sigSet, len(s))
+ for k := range s {
+ r[Signature{"a" + k.str}] = true
+ }
+ return r
+}
+
+type numNode struct {
+ sig Signature
+ str string
+ val interface{}
+}
+
+var numSigSet = sigSet{
+ Signature{"y"}: true,
+ Signature{"n"}: true,
+ Signature{"q"}: true,
+ Signature{"i"}: true,
+ Signature{"u"}: true,
+ Signature{"x"}: true,
+ Signature{"t"}: true,
+ Signature{"d"}: true,
+}
+
+func (n numNode) Infer() (Signature, error) {
+ if strings.ContainsAny(n.str, ".e") {
+ return Signature{"d"}, nil
+ }
+ return Signature{"i"}, nil
+}
+
+func (n numNode) String() string {
+ return n.str
+}
+
+func (n numNode) Sigs() sigSet {
+ if n.sig.str != "" {
+ return sigSet{n.sig: true}
+ }
+ if strings.ContainsAny(n.str, ".e") {
+ return sigSet{Signature{"d"}: true}
+ }
+ return numSigSet
+}
+
+func (n numNode) Value(sig Signature) (interface{}, error) {
+ if n.sig.str != "" && n.sig != sig {
+ return nil, varTypeError{n.str, sig}
+ }
+ if n.val != nil {
+ return n.val, nil
+ }
+ return varNumAs(n.str, sig)
+}
+
+func varMakeNumNode(tok varToken, sig Signature) (varNode, error) {
+ if sig.str == "" {
+ return numNode{str: tok.val}, nil
+ }
+ num, err := varNumAs(tok.val, sig)
+ if err != nil {
+ return nil, err
+ }
+ return numNode{sig: sig, val: num}, nil
+}
+
+func varNumAs(s string, sig Signature) (interface{}, error) {
+ isUnsigned := false
+ size := 32
+ switch sig.str {
+ case "n":
+ size = 16
+ case "i":
+ case "x":
+ size = 64
+ case "y":
+ size = 8
+ isUnsigned = true
+ case "q":
+ size = 16
+ isUnsigned = true
+ case "u":
+ isUnsigned = true
+ case "t":
+ size = 64
+ isUnsigned = true
+ case "d":
+ d, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, err
+ }
+ return d, nil
+ default:
+ return nil, varTypeError{s, sig}
+ }
+ base := 10
+ if strings.HasPrefix(s, "0x") {
+ base = 16
+ s = s[2:]
+ }
+ if strings.HasPrefix(s, "0") && len(s) != 1 {
+ base = 8
+ s = s[1:]
+ }
+ if isUnsigned {
+ i, err := strconv.ParseUint(s, base, size)
+ if err != nil {
+ return nil, err
+ }
+ var v interface{} = i
+ switch sig.str {
+ case "y":
+ v = byte(i)
+ case "q":
+ v = uint16(i)
+ case "u":
+ v = uint32(i)
+ }
+ return v, nil
+ }
+ i, err := strconv.ParseInt(s, base, size)
+ if err != nil {
+ return nil, err
+ }
+ var v interface{} = i
+ switch sig.str {
+ case "n":
+ v = int16(i)
+ case "i":
+ v = int32(i)
+ }
+ return v, nil
+}
+
+type stringNode struct {
+ sig Signature
+ str string // parsed
+ val interface{} // has correct type
+}
+
+var stringSigSet = sigSet{
+ Signature{"s"}: true,
+ Signature{"g"}: true,
+ Signature{"o"}: true,
+}
+
+func (n stringNode) Infer() (Signature, error) {
+ return Signature{"s"}, nil
+}
+
+func (n stringNode) String() string {
+ return n.str
+}
+
+func (n stringNode) Sigs() sigSet {
+ if n.sig.str != "" {
+ return sigSet{n.sig: true}
+ }
+ return stringSigSet
+}
+
+func (n stringNode) Value(sig Signature) (interface{}, error) {
+ if n.sig.str != "" && n.sig != sig {
+ return nil, varTypeError{n.str, sig}
+ }
+ if n.val != nil {
+ return n.val, nil
+ }
+ switch {
+ case sig.str == "g":
+ return Signature{n.str}, nil
+ case sig.str == "o":
+ return ObjectPath(n.str), nil
+ case sig.str == "s":
+ return n.str, nil
+ default:
+ return nil, varTypeError{n.str, sig}
+ }
+}
+
+func varMakeStringNode(tok varToken, sig Signature) (varNode, error) {
+ if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" {
+ return nil, fmt.Errorf("invalid type %q for string", sig.str)
+ }
+ s, err := varParseString(tok.val)
+ if err != nil {
+ return nil, err
+ }
+ n := stringNode{str: s}
+ if sig.str == "" {
+ return stringNode{str: s}, nil
+ }
+ n.sig = sig
+ switch sig.str {
+ case "o":
+ n.val = ObjectPath(s)
+ case "g":
+ n.val = Signature{s}
+ case "s":
+ n.val = s
+ }
+ return n, nil
+}
+
+func varParseString(s string) (string, error) {
+ // quotes are guaranteed to be there
+ s = s[1 : len(s)-1]
+ buf := new(bytes.Buffer)
+ for len(s) != 0 {
+ r, size := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && size == 1 {
+ return "", errors.New("invalid UTF-8")
+ }
+ s = s[size:]
+ if r != '\\' {
+ buf.WriteRune(r)
+ continue
+ }
+ r, size = utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && size == 1 {
+ return "", errors.New("invalid UTF-8")
+ }
+ s = s[size:]
+ switch r {
+ case 'a':
+ buf.WriteRune(0x7)
+ case 'b':
+ buf.WriteRune(0x8)
+ case 'f':
+ buf.WriteRune(0xc)
+ case 'n':
+ buf.WriteRune('\n')
+ case 'r':
+ buf.WriteRune('\r')
+ case 't':
+ buf.WriteRune('\t')
+ case '\n':
+ case 'u':
+ if len(s) < 4 {
+ return "", errors.New("short unicode escape")
+ }
+ r, err := strconv.ParseUint(s[:4], 16, 32)
+ if err != nil {
+ return "", err
+ }
+ buf.WriteRune(rune(r))
+ s = s[4:]
+ case 'U':
+ if len(s) < 8 {
+ return "", errors.New("short unicode escape")
+ }
+ r, err := strconv.ParseUint(s[:8], 16, 32)
+ if err != nil {
+ return "", err
+ }
+ buf.WriteRune(rune(r))
+ s = s[8:]
+ default:
+ buf.WriteRune(r)
+ }
+ }
+ return buf.String(), nil
+}
+
+var boolSigSet = sigSet{Signature{"b"}: true}
+
+type boolNode bool
+
+func (boolNode) Infer() (Signature, error) {
+ return Signature{"b"}, nil
+}
+
+func (b boolNode) String() string {
+ if b {
+ return "true"
+ }
+ return "false"
+}
+
+func (boolNode) Sigs() sigSet {
+ return boolSigSet
+}
+
+func (b boolNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "b" {
+ return nil, varTypeError{b.String(), sig}
+ }
+ return bool(b), nil
+}
+
+type arrayNode struct {
+ set sigSet
+ children []varNode
+ val interface{}
+}
+
+func (n arrayNode) Infer() (Signature, error) {
+ for _, v := range n.children {
+ csig, err := varInfer(v)
+ if err != nil {
+ continue
+ }
+ return Signature{"a" + csig.str}, nil
+ }
+ return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
+}
+
+func (n arrayNode) String() string {
+ s := "["
+ for i, v := range n.children {
+ s += v.String()
+ if i != len(n.children)-1 {
+ s += ", "
+ }
+ }
+ return s + "]"
+}
+
+func (n arrayNode) Sigs() sigSet {
+ return n.set
+}
+
+func (n arrayNode) Value(sig Signature) (interface{}, error) {
+ if n.set.Empty() {
+ // no type information whatsoever, so this must be an empty slice
+ return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil
+ }
+ if !n.set[sig] {
+ return nil, varTypeError{n.String(), sig}
+ }
+ s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children))
+ for i, v := range n.children {
+ rv, err := v.Value(Signature{sig.str[1:]})
+ if err != nil {
+ return nil, err
+ }
+ s.Index(i).Set(reflect.ValueOf(rv))
+ }
+ return s.Interface(), nil
+}
+
+func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) {
+ var n arrayNode
+ if sig.str != "" {
+ n.set = sigSet{sig: true}
+ }
+ if t := p.next(); t.typ == tokArrayEnd {
+ return n, nil
+ } else {
+ p.backup()
+ }
+Loop:
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ cn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if cset := cn.Sigs(); !cset.Empty() {
+ if n.set.Empty() {
+ n.set = cset.ToArray()
+ } else {
+ nset := cset.ToArray().Intersect(n.set)
+ if nset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", cn.String())
+ }
+ n.set = nset
+ }
+ }
+ n.children = append(n.children, cn)
+ switch t := p.next(); t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokArrayEnd:
+ break Loop
+ case tokComma:
+ continue
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+ return n, nil
+}
+
+type variantNode struct {
+ n varNode
+}
+
+var variantSet = sigSet{
+ Signature{"v"}: true,
+}
+
+func (variantNode) Infer() (Signature, error) {
+ return Signature{"v"}, nil
+}
+
+func (n variantNode) String() string {
+ return "<" + n.n.String() + ">"
+}
+
+func (variantNode) Sigs() sigSet {
+ return variantSet
+}
+
+func (n variantNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "v" {
+ return nil, varTypeError{n.String(), sig}
+ }
+ sig, err := varInfer(n.n)
+ if err != nil {
+ return nil, err
+ }
+ v, err := n.n.Value(sig)
+ if err != nil {
+ return nil, err
+ }
+ return MakeVariant(v), nil
+}
+
+func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) {
+ n, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if t := p.next(); t.typ != tokVariantEnd {
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ vn := variantNode{n}
+ if sig.str != "" && sig.str != "v" {
+ return nil, varTypeError{vn.String(), sig}
+ }
+ return variantNode{n}, nil
+}
+
+type dictEntry struct {
+ key, val varNode
+}
+
+type dictNode struct {
+ kset, vset sigSet
+ children []dictEntry
+ val interface{}
+}
+
+func (n dictNode) Infer() (Signature, error) {
+ for _, v := range n.children {
+ ksig, err := varInfer(v.key)
+ if err != nil {
+ continue
+ }
+ vsig, err := varInfer(v.val)
+ if err != nil {
+ continue
+ }
+ return Signature{"a{" + ksig.str + vsig.str + "}"}, nil
+ }
+ return Signature{}, fmt.Errorf("can't infer type for %q", n.String())
+}
+
+func (n dictNode) String() string {
+ s := "{"
+ for i, v := range n.children {
+ s += v.key.String() + ": " + v.val.String()
+ if i != len(n.children)-1 {
+ s += ", "
+ }
+ }
+ return s + "}"
+}
+
+func (n dictNode) Sigs() sigSet {
+ r := sigSet{}
+ for k := range n.kset {
+ for v := range n.vset {
+ sig := "a{" + k.str + v.str + "}"
+ r[Signature{sig}] = true
+ }
+ }
+ return r
+}
+
+func (n dictNode) Value(sig Signature) (interface{}, error) {
+ set := n.Sigs()
+ if set.Empty() {
+ // no type information -> empty dict
+ return reflect.MakeMap(typeFor(sig.str)).Interface(), nil
+ }
+ if !set[sig] {
+ return nil, varTypeError{n.String(), sig}
+ }
+ m := reflect.MakeMap(typeFor(sig.str))
+ ksig := Signature{sig.str[2:3]}
+ vsig := Signature{sig.str[3 : len(sig.str)-1]}
+ for _, v := range n.children {
+ kv, err := v.key.Value(ksig)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := v.val.Value(vsig)
+ if err != nil {
+ return nil, err
+ }
+ m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv))
+ }
+ return m.Interface(), nil
+}
+
+func varMakeDictNode(p *varParser, sig Signature) (varNode, error) {
+ var n dictNode
+
+ if sig.str != "" {
+ if len(sig.str) < 5 {
+ return nil, fmt.Errorf("invalid signature %q for dict type", sig)
+ }
+ ksig := Signature{string(sig.str[2])}
+ vsig := Signature{sig.str[3 : len(sig.str)-1]}
+ n.kset = sigSet{ksig: true}
+ n.vset = sigSet{vsig: true}
+ }
+ if t := p.next(); t.typ == tokDictEnd {
+ return n, nil
+ } else {
+ p.backup()
+ }
+Loop:
+ for {
+ t := p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ kn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if kset := kn.Sigs(); !kset.Empty() {
+ if n.kset.Empty() {
+ n.kset = kset
+ } else {
+ n.kset = kset.Intersect(n.kset)
+ if n.kset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", kn.String())
+ }
+ }
+ }
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokColon:
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ }
+ p.backup()
+ vn, err := varMakeNode(p)
+ if err != nil {
+ return nil, err
+ }
+ if vset := vn.Sigs(); !vset.Empty() {
+ if n.vset.Empty() {
+ n.vset = vset
+ } else {
+ n.vset = n.vset.Intersect(vset)
+ if n.vset.Empty() {
+ return nil, fmt.Errorf("can't parse %q with given type information", vn.String())
+ }
+ }
+ }
+ n.children = append(n.children, dictEntry{kn, vn})
+ t = p.next()
+ switch t.typ {
+ case tokEOF:
+ return nil, io.ErrUnexpectedEOF
+ case tokError:
+ return nil, errors.New(t.val)
+ case tokDictEnd:
+ break Loop
+ case tokComma:
+ continue
+ default:
+ return nil, fmt.Errorf("unexpected %q", t.val)
+ }
+ }
+ return n, nil
+}
+
+type byteStringNode []byte
+
+var byteStringSet = sigSet{
+ Signature{"ay"}: true,
+}
+
+func (byteStringNode) Infer() (Signature, error) {
+ return Signature{"ay"}, nil
+}
+
+func (b byteStringNode) String() string {
+ return string(b)
+}
+
+func (b byteStringNode) Sigs() sigSet {
+ return byteStringSet
+}
+
+func (b byteStringNode) Value(sig Signature) (interface{}, error) {
+ if sig.str != "ay" {
+ return nil, varTypeError{b.String(), sig}
+ }
+ return []byte(b), nil
+}
+
+func varParseByteString(s string) ([]byte, error) {
+ // quotes and b at start are guaranteed to be there
+ b := make([]byte, 0, 1)
+ s = s[2 : len(s)-1]
+ for len(s) != 0 {
+ c := s[0]
+ s = s[1:]
+ if c != '\\' {
+ b = append(b, c)
+ continue
+ }
+ c = s[0]
+ s = s[1:]
+ switch c {
+ case 'a':
+ b = append(b, 0x7)
+ case 'b':
+ b = append(b, 0x8)
+ case 'f':
+ b = append(b, 0xc)
+ case 'n':
+ b = append(b, '\n')
+ case 'r':
+ b = append(b, '\r')
+ case 't':
+ b = append(b, '\t')
+ case 'x':
+ if len(s) < 2 {
+ return nil, errors.New("short escape")
+ }
+ n, err := strconv.ParseUint(s[:2], 16, 8)
+ if err != nil {
+ return nil, err
+ }
+ b = append(b, byte(n))
+ s = s[2:]
+ case '0':
+ if len(s) < 3 {
+ return nil, errors.New("short escape")
+ }
+ n, err := strconv.ParseUint(s[:3], 8, 8)
+ if err != nil {
+ return nil, err
+ }
+ b = append(b, byte(n))
+ s = s[3:]
+ default:
+ b = append(b, c)
+ }
+ }
+ return append(b, 0), nil
+}
+
+func varInfer(n varNode) (Signature, error) {
+ if sig, ok := n.Sigs().Single(); ok {
+ return sig, nil
+ }
+ return n.Infer()
+}
diff --git a/vendor/src/github.com/godbus/dbus/variant_test.go b/vendor/src/github.com/godbus/dbus/variant_test.go
new file mode 100644
index 0000000000..da917c8e29
--- /dev/null
+++ b/vendor/src/github.com/godbus/dbus/variant_test.go
@@ -0,0 +1,78 @@
+package dbus
+
+import "reflect"
+import "testing"
+
+var variantFormatTests = []struct {
+ v interface{}
+ s string
+}{
+ {int32(1), `1`},
+ {"foo", `"foo"`},
+ {ObjectPath("/org/foo"), `@o "/org/foo"`},
+ {Signature{"i"}, `@g "i"`},
+ {[]byte{}, `@ay []`},
+ {[]int32{1, 2}, `[1, 2]`},
+ {[]int64{1, 2}, `@ax [1, 2]`},
+ {[][]int32{{3, 4}, {5, 6}}, `[[3, 4], [5, 6]]`},
+ {[]Variant{MakeVariant(int32(1)), MakeVariant(1.0)}, `[<1>, <@d 1>]`},
+ {map[string]int32{"one": 1, "two": 2}, `{"one": 1, "two": 2}`},
+ {map[int32]ObjectPath{1: "/org/foo"}, `@a{io} {1: "/org/foo"}`},
+ {map[string]Variant{}, `@a{sv} {}`},
+}
+
+func TestFormatVariant(t *testing.T) {
+ for i, v := range variantFormatTests {
+ if s := MakeVariant(v.v).String(); s != v.s {
+ t.Errorf("test %d: got %q, wanted %q", i+1, s, v.s)
+ }
+ }
+}
+
+var variantParseTests = []struct {
+ s string
+ v interface{}
+}{
+ {"1", int32(1)},
+ {"true", true},
+ {"false", false},
+ {"1.0", float64(1.0)},
+ {"0x10", int32(16)},
+ {"1e1", float64(10)},
+ {`"foo"`, "foo"},
+ {`"\a\b\f\n\r\t"`, "\x07\x08\x0c\n\r\t"},
+ {`"\u00e4\U0001f603"`, "\u00e4\U0001f603"},
+ {"[1]", []int32{1}},
+ {"[1, 2, 3]", []int32{1, 2, 3}},
+ {"@ai []", []int32{}},
+ {"[1, 5.0]", []float64{1, 5.0}},
+ {"[[1, 2], [3, 4.0]]", [][]float64{{1, 2}, {3, 4}}},
+ {`[@o "/org/foo", "/org/bar"]`, []ObjectPath{"/org/foo", "/org/bar"}},
+ {"<1>", MakeVariant(int32(1))},
+ {"[<1>, <2.0>]", []Variant{MakeVariant(int32(1)), MakeVariant(2.0)}},
+ {`[[], [""]]`, [][]string{{}, {""}}},
+ {`@a{ss} {}`, map[string]string{}},
+ {`{"foo": 1}`, map[string]int32{"foo": 1}},
+ {`[{}, {"foo": "bar"}]`, []map[string]string{{}, {"foo": "bar"}}},
+ {`{"a": <1>, "b": <"foo">}`,
+ map[string]Variant{"a": MakeVariant(int32(1)), "b": MakeVariant("foo")}},
+ {`b''`, []byte{0}},
+ {`b"abc"`, []byte{'a', 'b', 'c', 0}},
+ {`b"\x01\0002\a\b\f\n\r\t"`, []byte{1, 2, 0x7, 0x8, 0xc, '\n', '\r', '\t', 0}},
+ {`[[0], b""]`, [][]byte{{0}, {0}}},
+ {"int16 0", int16(0)},
+ {"byte 0", byte(0)},
+}
+
+func TestParseVariant(t *testing.T) {
+ for i, v := range variantParseTests {
+ nv, err := ParseVariant(v.s, Signature{})
+ if err != nil {
+ t.Errorf("test %d: parsing failed: %s", i+1, err)
+ continue
+ }
+ if !reflect.DeepEqual(nv.value, v.v) {
+ t.Errorf("test %d: got %q, wanted %q", i+1, nv, v.v)
+ }
+ }
+}
diff --git a/vendor/src/github.com/kr/pty/doc.go b/vendor/src/github.com/kr/pty/doc.go
index 491c060b28..190cfbea92 100644
--- a/vendor/src/github.com/kr/pty/doc.go
+++ b/vendor/src/github.com/kr/pty/doc.go
@@ -2,9 +2,14 @@
package pty
import (
+ "errors"
"os"
)
+// ErrUnsupported is returned if a function is not
+// available on the current platform.
+var ErrUnsupported = errors.New("unsupported")
+
// Opens a pty and its corresponding tty.
func Open() (pty, tty *os.File, err error) {
return open()
diff --git a/vendor/src/github.com/kr/pty/pty_freebsd.go b/vendor/src/github.com/kr/pty/pty_freebsd.go
new file mode 100644
index 0000000000..13b64d722e
--- /dev/null
+++ b/vendor/src/github.com/kr/pty/pty_freebsd.go
@@ -0,0 +1,53 @@
+package pty
+
+import (
+ "os"
+ "strconv"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ sys_TIOCGPTN = 0x4004740F
+ sys_TIOCSPTLCK = 0x40045431
+)
+
+func open() (pty, tty *os.File, err error) {
+ p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sname, err := ptsname(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t, err := os.OpenFile(sname, os.O_RDWR|syscall.O_NOCTTY, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return p, t, nil
+}
+
+func ptsname(f *os.File) (string, error) {
+ var n int
+ err := ioctl(f.Fd(), sys_TIOCGPTN, &n)
+ if err != nil {
+ return "", err
+ }
+ return "/dev/pts/" + strconv.Itoa(n), nil
+}
+
+func ioctl(fd uintptr, cmd uintptr, data *int) error {
+ _, _, e := syscall.Syscall(
+ syscall.SYS_IOCTL,
+ fd,
+ cmd,
+ uintptr(unsafe.Pointer(data)),
+ )
+ if e != 0 {
+ return syscall.ENOTTY
+ }
+ return nil
+}
diff --git a/vendor/src/github.com/kr/pty/pty_unsupported.go b/vendor/src/github.com/kr/pty/pty_unsupported.go
new file mode 100644
index 0000000000..d4958b3583
--- /dev/null
+++ b/vendor/src/github.com/kr/pty/pty_unsupported.go
@@ -0,0 +1,27 @@
+// +build !linux,!darwin,!freebsd
+
+package pty
+
+import (
+ "os"
+)
+
+func open() (pty, tty *os.File, err error) {
+ return nil, nil, ErrUnsupported
+}
+
+func ptsname(f *os.File) (string, error) {
+ return "", ErrUnsupported
+}
+
+func grantpt(f *os.File) error {
+ return ErrUnsupported
+}
+
+func unlockpt(f *os.File) error {
+ return ErrUnsupported
+}
+
+func ioctl(fd, cmd, ptr uintptr) error {
+ return ErrUnsupported
+}
diff --git a/version.go b/version.go
deleted file mode 100644
index d88def9619..0000000000
--- a/version.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package docker
-
-import (
- "github.com/dotcloud/docker/dockerversion"
- "github.com/dotcloud/docker/engine"
- "github.com/dotcloud/docker/utils"
- "runtime"
-)
-
-func GetVersion(job *engine.Job) engine.Status {
- if _, err := dockerVersion().WriteTo(job.Stdout); err != nil {
- job.Errorf("%s", err)
- return engine.StatusErr
- }
- return engine.StatusOK
-}
-
-// dockerVersion returns detailed version information in the form of a queriable
-// environment.
-func dockerVersion() *engine.Env {
- v := &engine.Env{}
- v.Set("Version", dockerversion.VERSION)
- v.Set("GitCommit", dockerversion.GITCOMMIT)
- v.Set("GoVersion", runtime.Version())
- v.Set("Os", runtime.GOOS)
- v.Set("Arch", runtime.GOARCH)
- // FIXME:utils.GetKernelVersion should only be needed here
- if kernelVersion, err := utils.GetKernelVersion(); err == nil {
- v.Set("KernelVersion", kernelVersion.String())
- }
- return v
-}