summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTibor Vass <teabee89@gmail.com>2014-12-11 15:31:43 -0500
committerTibor Vass <teabee89@gmail.com>2014-12-11 15:31:43 -0500
commitbe8f9a23134c7a63ef1d2d43bde558341f094d41 (patch)
tree031b20e1b9459bac20944b99b2f44edc16f07b35
parent6f514d28c0270860a6d508113499b90ea60dcc48 (diff)
parentd3446258471733fe667f0abdf015ee3ebd2f3485 (diff)
downloaddocker-release-1.3.tar.gz
Merge pull request #9617 from tiborvass/bump_v1.3.3release-1.3
Bump v1.3.3
-rw-r--r--CHANGELOG.md14
-rw-r--r--Dockerfile2
-rw-r--r--VERSION2
-rw-r--r--api/client/hijack.go105
-rw-r--r--builder/parser/testfiles/docker/Dockerfile2
-rw-r--r--builder/parser/testfiles/docker/result2
-rw-r--r--docs/sources/release-notes.md34
-rw-r--r--graph/load.go5
-rw-r--r--graph/tags_unit_test.go2
-rw-r--r--integration-cli/docker_cli_build_test.go322
-rw-r--r--integration-cli/docker_cli_cp_test.go38
-rw-r--r--integration-cli/docker_cli_save_load_test.go134
-rw-r--r--pkg/archive/archive.go61
-rw-r--r--pkg/archive/diff.go43
-rw-r--r--pkg/chrootarchive/archive.go53
-rw-r--r--pkg/chrootarchive/archive_test.go57
-rw-r--r--pkg/chrootarchive/diff.go28
-rw-r--r--pkg/chrootarchive/init.go8
-rw-r--r--pkg/symlink/LICENSE.APACHE191
-rw-r--r--pkg/symlink/LICENSE.BSD27
-rw-r--r--pkg/symlink/MAINTAINERS5
-rw-r--r--pkg/symlink/README.md5
-rw-r--r--pkg/symlink/fs.go184
-rw-r--r--pkg/symlink/fs_test.go472
l---------pkg/symlink/testdata/fs/a/d1
l---------pkg/symlink/testdata/fs/a/e1
l---------pkg/symlink/testdata/fs/a/f1
l---------pkg/symlink/testdata/fs/b/h1
l---------pkg/symlink/testdata/fs/g1
l---------pkg/symlink/testdata/fs/i1
l---------pkg/symlink/testdata/fs/j/k1
-rw-r--r--registry/registry.go4
-rw-r--r--utils/utils.go12
33 files changed, 1482 insertions, 337 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2cddc6c904..fdc05c106e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,19 @@
# Changelog
+## 1.3.3 (2014-12-11)
+
+#### Security
+- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356)
+- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357)
+- Validate image IDs (CVE-2014-9358)
+
+#### Runtime
+- Fix an issue when image archives are being read slowly
+
+#### Client
+- Fix a regression related to stdin redirection
+- Fix a regression with `docker cp` when destination is the current directory
+
## 1.3.2 (2014-11-20)
#### Security
diff --git a/Dockerfile b/Dockerfile
index d9c58a921d..bd943deebe 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -75,7 +75,7 @@ ENV GOARM 5
RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
# Grab Go's cover tool for dead-simple code coverage testing
-RUN go get code.google.com/p/go.tools/cmd/cover
+RUN go get golang.org/x/tools/cmd/cover
# TODO replace FPM with some very minimal debhelper stuff
RUN gem install --no-rdoc --no-ri fpm --version 1.3.2
diff --git a/VERSION b/VERSION
index 1892b92676..31e5c84349 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.3.2
+1.3.3
diff --git a/api/client/hijack.go b/api/client/hijack.go
index 00170a4a37..d0b5e93ef0 100644
--- a/api/client/hijack.go
+++ b/api/client/hijack.go
@@ -2,6 +2,7 @@ package client
import (
"crypto/tls"
+ "errors"
"fmt"
"io"
"net"
@@ -10,6 +11,7 @@ import (
"os"
"runtime"
"strings"
+ "time"
"github.com/docker/docker/api"
"github.com/docker/docker/dockerversion"
@@ -19,9 +21,99 @@ import (
"github.com/docker/docker/pkg/term"
)
+type tlsClientCon struct {
+ *tls.Conn
+ rawConn net.Conn
+}
+
+func (c *tlsClientCon) CloseWrite() error {
+ // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
+ // on its underlying connection.
+ if cwc, ok := c.rawConn.(interface {
+ CloseWrite() error
+ }); ok {
+ return cwc.CloseWrite()
+ }
+ return nil
+}
+
+func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
+ return tlsDialWithDialer(new(net.Dialer), network, addr, config)
+}
+
+// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in
+// order to return our custom tlsClientCon struct which holds both the tls.Conn
+// object _and_ its underlying raw connection. The rationale for this is that
+// we need to be able to close the write end of the connection when attaching,
+// which tls.Conn does not provide.
+func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
+ // We want the Timeout and Deadline values from dialer to cover the
+ // whole process: TCP connection and TLS handshake. This means that we
+ // also need to start our own timers now.
+ timeout := dialer.Timeout
+
+ if !dialer.Deadline.IsZero() {
+ deadlineTimeout := dialer.Deadline.Sub(time.Now())
+ if timeout == 0 || deadlineTimeout < timeout {
+ timeout = deadlineTimeout
+ }
+ }
+
+ var errChannel chan error
+
+ if timeout != 0 {
+ errChannel = make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- errors.New("")
+ })
+ }
+
+ rawConn, err := dialer.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+
+ colonPos := strings.LastIndex(addr, ":")
+ if colonPos == -1 {
+ colonPos = len(addr)
+ }
+ hostname := addr[:colonPos]
+
+ // If no ServerName is set, infer the ServerName
+ // from the hostname we're connecting to.
+ if config.ServerName == "" {
+ // Make a copy to avoid polluting argument or default.
+ c := *config
+ c.ServerName = hostname
+ config = &c
+ }
+
+ conn := tls.Client(rawConn, config)
+
+ if timeout == 0 {
+ err = conn.Handshake()
+ } else {
+ go func() {
+ errChannel <- conn.Handshake()
+ }()
+
+ err = <-errChannel
+ }
+
+ if err != nil {
+ rawConn.Close()
+ return nil, err
+ }
+
+ // This is Docker difference with standard's crypto/tls package: returned a
+ // wrapper which holds both the TLS and raw connections.
+ return &tlsClientCon{conn, rawConn}, nil
+}
+
func (cli *DockerCli) dial() (net.Conn, error) {
if cli.tlsConfig != nil && cli.proto != "unix" {
- return tls.Dial(cli.proto, cli.addr, cli.tlsConfig)
+ // Notice this isn't Go standard's tls.Dial function
+ return tlsDial(cli.proto, cli.addr, cli.tlsConfig)
}
return net.Dial(cli.proto, cli.addr)
}
@@ -109,12 +201,11 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea
io.Copy(rwc, in)
log.Debugf("[hijack] End of stdin")
}
- if tcpc, ok := rwc.(*net.TCPConn); ok {
- if err := tcpc.CloseWrite(); err != nil {
- log.Debugf("Couldn't send EOF: %s", err)
- }
- } else if unixc, ok := rwc.(*net.UnixConn); ok {
- if err := unixc.CloseWrite(); err != nil {
+
+ if conn, ok := rwc.(interface {
+ CloseWrite() error
+ }); ok {
+ if err := conn.CloseWrite(); err != nil {
log.Debugf("Couldn't send EOF: %s", err)
}
}
diff --git a/builder/parser/testfiles/docker/Dockerfile b/builder/parser/testfiles/docker/Dockerfile
index 68f8f0b78b..fba1d8b993 100644
--- a/builder/parser/testfiles/docker/Dockerfile
+++ b/builder/parser/testfiles/docker/Dockerfile
@@ -76,7 +76,7 @@ ENV GOARM 5
RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'
# Grab Go's cover tool for dead-simple code coverage testing
-RUN go get code.google.com/p/go.tools/cmd/cover
+RUN go get golang.org/x/tools/cmd/cover
# TODO replace FPM with some very minimal debhelper stuff
RUN gem install --no-rdoc --no-ri fpm --version 1.0.2
diff --git a/builder/parser/testfiles/docker/result b/builder/parser/testfiles/docker/result
index 915e2ef17b..db74e869be 100644
--- a/builder/parser/testfiles/docker/result
+++ b/builder/parser/testfiles/docker/result
@@ -11,7 +11,7 @@
(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm darwin/amd64 darwin/386 freebsd/amd64 freebsd/386 freebsd/arm")
(env "GOARM" "5")
(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'")
-(run "go get code.google.com/p/go.tools/cmd/cover")
+(run "go get golang.org/x/tools/cmd/cover")
(run "gem install --no-rdoc --no-ri fpm --version 1.0.2")
(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox")
(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg")
diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md
index a19eb71b6f..1e744e32a5 100644
--- a/docs/sources/release-notes.md
+++ b/docs/sources/release-notes.md
@@ -4,6 +4,40 @@ understanding, release
#Release Notes
+##Version 1.3.3
+(2014-12-11)
+
+This release fixes several security issues. In order to encourage immediate
+upgrading, this release also patches some critical bugs. All users are highly
+encouraged to upgrade as soon as possible.
+
+*Security fixes*
+
+Patches and changes were made to address the following vulnerabilities:
+
+* CVE-2014-9356: Path traversal during processing of absolute symlinks.
+Absolute symlinks were not adequately checked for traversal which created a
+vulnerability via image extraction and/or volume mounts.
+* CVE-2014-9357: Escalation of privileges during decompression of LZMA (.xz)
+archives. Docker 1.3.2 added `chroot` for archive extraction. This created a
+vulnerability that could allow malicious images or builds to write files to the
+host system and escape containerization, leading to privilege escalation.
+* CVE-2014-9358: Path traversal and spoofing opportunities via image
+identifiers. Image IDs passed either via `docker load` or registry communications
+were not sufficiently validated. This created a vulnerability to path traversal
+attacks wherein malicious images or repository spoofing could lead to graph
+corruption and manipulation.
+
+*Runtime fixes*
+
+* Fixed an issue that cause image archives to be read slowly.
+
+*Client fixes*
+
+* Fixed a regression related to STDIN redirection.
+* Fixed a regression involving `docker cp` when the current directory is the
+destination.
+
##Version 1.3.2
(2014-11-24)
diff --git a/graph/load.go b/graph/load.go
index fcbeef6196..f27aca4a67 100644
--- a/graph/load.go
+++ b/graph/load.go
@@ -12,6 +12,7 @@ import (
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/log"
+ "github.com/docker/docker/utils"
)
// Loads a set of images into the repository. This is the complementary of ImageExport.
@@ -112,6 +113,10 @@ func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string
log.Debugf("Error unmarshalling json", err)
return err
}
+ if err := utils.ValidateID(img.ID); err != nil {
+ log.Debugf("Error validating ID: %s", err)
+ return err
+ }
if img.Parent != "" {
if !s.graph.Exists(img.Parent) {
if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil {
diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go
index da512547d5..bf94deb445 100644
--- a/graph/tags_unit_test.go
+++ b/graph/tags_unit_test.go
@@ -16,7 +16,7 @@ import (
const (
testImageName = "myapp"
- testImageID = "foo"
+ testImageID = "1a2d3c4d4e5fa2d2a21acea242a5e2345d3aefc3e7dfa2a2a2a21a2a2ad2d234"
)
func fakeTar() (io.Reader, error) {
diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go
index 0885f9131a..408b801615 100644
--- a/integration-cli/docker_cli_build_test.go
+++ b/integration-cli/docker_cli_build_test.go
@@ -1167,6 +1167,133 @@ func TestBuildCopyDisallowRemote(t *testing.T) {
logDone("build - copy - disallow copy from remote")
}
+func TestBuildAddBadLinks(t *testing.T) {
+ const (
+ dockerfile = `
+ FROM scratch
+ ADD links.tar /
+ ADD foo.txt /symlink/
+ `
+ targetFile = "foo.txt"
+ )
+ var (
+ name = "test-link-absolute"
+ )
+ defer deleteImages(name)
+ ctx, err := fakeContext(dockerfile, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ctx.Close()
+
+ tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-")
+ if err != nil {
+ t.Fatalf("failed to create temporary directory: %s", tempDir)
+ }
+ defer os.RemoveAll(tempDir)
+
+ symlinkTarget := fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir)
+ tarPath := filepath.Join(ctx.Dir, "links.tar")
+ nonExistingFile := filepath.Join(tempDir, targetFile)
+ fooPath := filepath.Join(ctx.Dir, targetFile)
+
+ tarOut, err := os.Create(tarPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tarWriter := tar.NewWriter(tarOut)
+
+ header := &tar.Header{
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: symlinkTarget,
+ Mode: 0755,
+ Uid: 0,
+ Gid: 0,
+ }
+
+ err = tarWriter.WriteHeader(header)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tarWriter.Close()
+ tarOut.Close()
+
+ foo, err := os.Create(fooPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer foo.Close()
+
+ if _, err := foo.WriteString("test"); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := buildImageFromContext(name, ctx, true); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
+ t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
+ }
+
+ logDone("build - ADD must add files in container")
+}
+
+func TestBuildAddBadLinksVolume(t *testing.T) {
+ const (
+ dockerfileTemplate = `
+ FROM busybox
+ RUN ln -s /../../../../../../../../%s /x
+ VOLUME /x
+ ADD foo.txt /x/`
+ targetFile = "foo.txt"
+ )
+ var (
+ name = "test-link-absolute-volume"
+ dockerfile = ""
+ )
+ defer deleteImages(name)
+
+ tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-")
+ if err != nil {
+ t.Fatalf("failed to create temporary directory: %s", tempDir)
+ }
+ defer os.RemoveAll(tempDir)
+
+ dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir)
+ nonExistingFile := filepath.Join(tempDir, targetFile)
+
+ ctx, err := fakeContext(dockerfile, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ctx.Close()
+ fooPath := filepath.Join(ctx.Dir, targetFile)
+
+ foo, err := os.Create(fooPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer foo.Close()
+
+ if _, err := foo.WriteString("test"); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := buildImageFromContext(name, ctx, true); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
+ t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
+ }
+
+ logDone("build - ADD should add files in volume")
+}
+
// Issue #5270 - ensure we throw a better error than "unexpected EOF"
// when we can't access files in the context.
func TestBuildWithInaccessibleFilesInContext(t *testing.T) {
@@ -2867,6 +2994,118 @@ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi`
logDone("build - ADD tar")
}
+func TestBuildAddTarXz(t *testing.T) {
+ name := "testbuildaddtarxz"
+ defer deleteImages(name)
+
+ ctx := func() *FakeContext {
+ dockerfile := `
+ FROM busybox
+ ADD test.tar.xz /
+ RUN cat /test/foo | grep Hi`
+ tmpDir, err := ioutil.TempDir("", "fake-context")
+ testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
+ if err != nil {
+ t.Fatalf("failed to create test.tar archive: %v", err)
+ }
+ defer testTar.Close()
+
+ tw := tar.NewWriter(testTar)
+
+ if err := tw.WriteHeader(&tar.Header{
+ Name: "test/foo",
+ Size: 2,
+ }); err != nil {
+ t.Fatalf("failed to write tar file header: %v", err)
+ }
+ if _, err := tw.Write([]byte("Hi")); err != nil {
+ t.Fatalf("failed to write tar file content: %v", err)
+ }
+ if err := tw.Close(); err != nil {
+ t.Fatalf("failed to close tar archive: %v", err)
+ }
+ xzCompressCmd := exec.Command("xz", "test.tar")
+ xzCompressCmd.Dir = tmpDir
+ out, _, err := runCommandWithOutput(xzCompressCmd)
+ if err != nil {
+ t.Fatal(err, out)
+ }
+
+ if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
+ t.Fatalf("failed to open destination dockerfile: %v", err)
+ }
+ return &FakeContext{Dir: tmpDir}
+ }()
+
+ defer ctx.Close()
+
+ if _, err := buildImageFromContext(name, ctx, true); err != nil {
+ t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
+ }
+
+ logDone("build - ADD tar.xz")
+}
+
+func TestBuildAddTarXzGz(t *testing.T) {
+ name := "testbuildaddtarxzgz"
+ defer deleteImages(name)
+
+ ctx := func() *FakeContext {
+ dockerfile := `
+ FROM busybox
+ ADD test.tar.xz.gz /
+ RUN ls /test.tar.xz.gz`
+ tmpDir, err := ioutil.TempDir("", "fake-context")
+ testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
+ if err != nil {
+ t.Fatalf("failed to create test.tar archive: %v", err)
+ }
+ defer testTar.Close()
+
+ tw := tar.NewWriter(testTar)
+
+ if err := tw.WriteHeader(&tar.Header{
+ Name: "test/foo",
+ Size: 2,
+ }); err != nil {
+ t.Fatalf("failed to write tar file header: %v", err)
+ }
+ if _, err := tw.Write([]byte("Hi")); err != nil {
+ t.Fatalf("failed to write tar file content: %v", err)
+ }
+ if err := tw.Close(); err != nil {
+ t.Fatalf("failed to close tar archive: %v", err)
+ }
+
+ xzCompressCmd := exec.Command("xz", "test.tar")
+ xzCompressCmd.Dir = tmpDir
+ out, _, err := runCommandWithOutput(xzCompressCmd)
+ if err != nil {
+ t.Fatal(err, out)
+ }
+
+ gzipCompressCmd := exec.Command("gzip", "test.tar.xz")
+ gzipCompressCmd.Dir = tmpDir
+ out, _, err = runCommandWithOutput(gzipCompressCmd)
+ if err != nil {
+ t.Fatal(err, out)
+ }
+
+ if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
+ t.Fatalf("failed to open destination dockerfile: %v", err)
+ }
+ return &FakeContext{Dir: tmpDir}
+ }()
+
+ defer ctx.Close()
+
+ if _, err := buildImageFromContext(name, ctx, true); err != nil {
+ t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err)
+ }
+
+ logDone("build - ADD tar.xz.gz")
+}
+
func TestBuildFromGIT(t *testing.T) {
name := "testbuildfromgit"
defer deleteImages(name)
@@ -3175,3 +3414,86 @@ func TestBuildExoticShellInterpolation(t *testing.T) {
logDone("build - exotic shell interpolation")
}
+
+func TestBuildSymlinkBreakout(t *testing.T) {
+ name := "testbuildsymlinkbreakout"
+ tmpdir, err := ioutil.TempDir("", name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ ctx := filepath.Join(tmpdir, "context")
+ if err := os.MkdirAll(ctx, 0755); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(`
+ from busybox
+ add symlink.tar /
+ add inject /symlink/
+ `), 0644); err != nil {
+ t.Fatal(err)
+ }
+ inject := filepath.Join(ctx, "inject")
+ if err := ioutil.WriteFile(inject, nil, 0644); err != nil {
+ t.Fatal(err)
+ }
+ f, err := os.Create(filepath.Join(ctx, "symlink.tar"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ w := tar.NewWriter(f)
+ w.WriteHeader(&tar.Header{
+ Name: "symlink2",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "/../../../../../../../../../../../../../../",
+ Uid: os.Getuid(),
+ Gid: os.Getgid(),
+ })
+ w.WriteHeader(&tar.Header{
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: filepath.Join("symlink2", tmpdir),
+ Uid: os.Getuid(),
+ Gid: os.Getgid(),
+ })
+ w.Close()
+ f.Close()
+ if _, err := buildImageFromContext(name, &FakeContext{Dir: ctx}, false); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil {
+ t.Fatal("symlink breakout - inject")
+ } else if !os.IsNotExist(err) {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ logDone("build - symlink breakout")
+}
+
+func TestBuildXZHost(t *testing.T) {
+ name := "testbuildxzhost"
+ defer deleteImages(name)
+
+ ctx, err := fakeContext(`
+FROM busybox
+ADD xz /usr/local/sbin/
+RUN chmod 755 /usr/local/sbin/xz
+ADD test.xz /
+RUN [ ! -e /injected ]`,
+ map[string]string{
+ "test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" +
+ "\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" +
+ "\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21",
+ "xz": "#!/bin/sh\ntouch /injected",
+ })
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ctx.Close()
+
+ if _, err := buildImageFromContext(name, ctx, true); err != nil {
+ t.Fatal(err)
+ }
+
+ logDone("build - xz host is being used")
+}
diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go
index aecc68edb4..a5e16bb214 100644
--- a/integration-cli/docker_cli_cp_test.go
+++ b/integration-cli/docker_cli_cp_test.go
@@ -371,3 +371,41 @@ func TestCpUnprivilegedUser(t *testing.T) {
logDone("cp - unprivileged user")
}
+
+func TestCpToDot(t *testing.T) {
+ out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test")
+ if err != nil || exitCode != 0 {
+ t.Fatal("failed to create a container", out, err)
+ }
+
+ cleanedContainerID := stripTrailingCharacters(out)
+ defer deleteContainer(cleanedContainerID)
+
+ out, _, err = dockerCmd(t, "wait", cleanedContainerID)
+ if err != nil || stripTrailingCharacters(out) != "0" {
+ t.Fatal("failed to set up container", out, err)
+ }
+
+ tmpdir, err := ioutil.TempDir("", "docker-integration")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ cwd, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Chdir(cwd)
+ if err := os.Chdir(tmpdir); err != nil {
+ t.Fatal(err)
+ }
+ _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", ".")
+ if err != nil {
+ t.Fatalf("couldn't docker cp to \".\" path: %s", err)
+ }
+ content, err := ioutil.ReadFile("./test")
+ if string(content) != "lololol\n" {
+ t.Fatal("Wrong content in copied file %q, should be %q", content, "lololol\n")
+ }
+ logDone("cp - to dot path")
+}
diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go
index 8632247b7d..f15e6b51dd 100644
--- a/integration-cli/docker_cli_save_load_test.go
+++ b/integration-cli/docker_cli_save_load_test.go
@@ -62,6 +62,140 @@ func TestSaveAndLoadRepoStdout(t *testing.T) {
logDone("load - load a repo using stdout")
}
+// save a repo using gz compression and try to load it using stdout
+func TestSaveXzAndLoadRepoStdout(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "test-save-xz-gz-load-repo-stdout")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ tarballPath := filepath.Join(tempDir, "foobar-save-load-test.tar.xz.gz")
+
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+ out, _, err := runCommandWithOutput(runCmd)
+ if err != nil {
+ t.Fatalf("failed to create a container: %v %v", out, err)
+ }
+
+ cleanedContainerID := stripTrailingCharacters(out)
+
+ repoName := "foobar-save-load-test-xz-gz"
+
+ inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
+ out, _, err = runCommandWithOutput(inspectCmd)
+ if err != nil {
+ t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err)
+ }
+
+ commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+ out, _, err = runCommandWithOutput(commitCmd)
+ if err != nil {
+ t.Fatalf("failed to commit container: %v %v", out, err)
+ }
+
+ inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+ before, _, err := runCommandWithOutput(inspectCmd)
+ if err != nil {
+ t.Fatalf("the repo should exist before saving it: %v %v", before, err)
+ }
+
+ saveCmdTemplate := `%v save %v | xz -c | gzip -c > %s`
+ saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName, tarballPath)
+ saveCmd := exec.Command("bash", "-c", saveCmdFinal)
+ out, _, err = runCommandWithOutput(saveCmd)
+ if err != nil {
+ t.Fatalf("failed to save repo: %v %v", out, err)
+ }
+
+ deleteImages(repoName)
+
+ loadCmdFinal := fmt.Sprintf(`cat %s | docker load`, tarballPath)
+ loadCmd := exec.Command("bash", "-c", loadCmdFinal)
+ out, _, err = runCommandWithOutput(loadCmd)
+ if err == nil {
+ t.Fatalf("expected error, but succeeded with no error and output: %v", out)
+ }
+
+ inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+ after, _, err := runCommandWithOutput(inspectCmd)
+ if err == nil {
+ t.Fatalf("the repo should not exist: %v", after)
+ }
+
+ deleteContainer(cleanedContainerID)
+ deleteImages(repoName)
+
+ logDone("load - save a repo with xz compression & load it using stdout")
+}
+
+// save a repo using xz+gz compression and try to load it using stdout
+func TestSaveXzGzAndLoadRepoStdout(t *testing.T) {
+ tempDir, err := ioutil.TempDir("", "test-save-xz-gz-load-repo-stdout")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempDir)
+
+ tarballPath := filepath.Join(tempDir, "foobar-save-load-test.tar.xz.gz")
+
+ runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true")
+ out, _, err := runCommandWithOutput(runCmd)
+ if err != nil {
+ t.Fatalf("failed to create a container: %v %v", out, err)
+ }
+
+ cleanedContainerID := stripTrailingCharacters(out)
+
+ repoName := "foobar-save-load-test-xz-gz"
+
+ inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID)
+ out, _, err = runCommandWithOutput(inspectCmd)
+ if err != nil {
+ t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err)
+ }
+
+ commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName)
+ out, _, err = runCommandWithOutput(commitCmd)
+ if err != nil {
+ t.Fatalf("failed to commit container: %v %v", out, err)
+ }
+
+ inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+ before, _, err := runCommandWithOutput(inspectCmd)
+ if err != nil {
+ t.Fatalf("the repo should exist before saving it: %v %v", before, err)
+ }
+
+ saveCmdTemplate := `%v save %v | xz -c | gzip -c > %s`
+ saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName, tarballPath)
+ saveCmd := exec.Command("bash", "-c", saveCmdFinal)
+ out, _, err = runCommandWithOutput(saveCmd)
+ if err != nil {
+ t.Fatalf("failed to save repo: %v %v", out, err)
+ }
+
+ deleteImages(repoName)
+
+ loadCmdFinal := fmt.Sprintf(`cat %s | docker load`, tarballPath)
+ loadCmd := exec.Command("bash", "-c", loadCmdFinal)
+ out, _, err = runCommandWithOutput(loadCmd)
+ if err == nil {
+ t.Fatalf("expected error, but succeeded with no error and output: %v", out)
+ }
+
+ inspectCmd = exec.Command(dockerBinary, "inspect", repoName)
+ after, _, err := runCommandWithOutput(inspectCmd)
+ if err == nil {
+ t.Fatalf("the repo should not exist: %v", after)
+ }
+
+ deleteContainer(cleanedContainerID)
+ deleteImages(repoName)
+
+ logDone("load - save a repo with xz+gz compression & load it using stdout")
+}
+
func TestSaveSingleTag(t *testing.T) {
repoName := "foobar-save-single-tag-test"
diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go
index 155145f5d7..74c6014506 100644
--- a/pkg/archive/archive.go
+++ b/pkg/archive/archive.go
@@ -432,32 +432,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
return pipeReader, nil
}
-// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
-// and unpacks it into the directory at `path`.
-// The archive may be compressed with one of the following algorithms:
-// identity (uncompressed), gzip, bzip2, xz.
-// FIXME: specify behavior when target path exists vs. doesn't exist.
-func Untar(archive io.Reader, dest string, options *TarOptions) error {
- dest = filepath.Clean(dest)
-
- if options == nil {
- options = &TarOptions{}
- }
-
- if archive == nil {
- return fmt.Errorf("Empty archive")
- }
-
- if options.Excludes == nil {
- options.Excludes = []string{}
- }
-
- decompressedArchive, err := DecompressStream(archive)
- if err != nil {
- return err
- }
- defer decompressedArchive.Close()
-
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
tr := tar.NewReader(decompressedArchive)
trBuf := pools.BufioReader32KPool.Get(nil)
defer pools.BufioReader32KPool.Put(trBuf)
@@ -498,10 +473,13 @@ loop:
}
}
- // Prevent symlink breakout
path := filepath.Join(dest, hdr.Name)
- if !strings.HasPrefix(path, dest) {
- return breakoutError(fmt.Errorf("%q is outside of %q", path, dest))
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, "..") {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
}
// If path exits we almost always just want to remove and replace it
@@ -537,10 +515,33 @@ loop:
return err
}
}
-
return nil
}
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(archive io.Reader, dest string, options *TarOptions) error {
+ if archive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ dest = filepath.Clean(dest)
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.Excludes == nil {
+ options.Excludes = []string{}
+ }
+ decompressedArchive, err := DecompressStream(archive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ return Unpack(decompressedArchive, dest, options)
+}
+
func (archiver *Archiver) TarUntar(src, dst string) error {
log.Debugf("TarUntar(%s %s)", src, dst)
archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go
index 5ed1a1d906..80bb197468 100644
--- a/pkg/archive/diff.go
+++ b/pkg/archive/diff.go
@@ -21,20 +21,7 @@ func mkdev(major int64, minor int64) uint32 {
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
}
-// ApplyLayer parses a diff in the standard layer format from `layer`, and
-// applies it to the directory `dest`.
-func ApplyLayer(dest string, layer ArchiveReader) error {
- dest = filepath.Clean(dest)
-
- // We need to be able to set any perms
- oldmask := syscall.Umask(0)
- defer syscall.Umask(oldmask)
-
- layer, err := DecompressStream(layer)
- if err != nil {
- return err
- }
-
+func UnpackLayer(dest string, layer ArchiveReader) error {
tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf)
@@ -94,12 +81,14 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
}
path := filepath.Join(dest, hdr.Name)
- base := filepath.Base(path)
-
- // Prevent symlink breakout
- if !strings.HasPrefix(path, dest) {
- return breakoutError(fmt.Errorf("%q is outside of %q", path, dest))
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
}
+ if strings.HasPrefix(rel, "..") {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+ base := filepath.Base(path)
if strings.HasPrefix(base, ".wh.") {
originalBase := base[len(".wh."):]
@@ -159,6 +148,20 @@ func ApplyLayer(dest string, layer ArchiveReader) error {
return err
}
}
-
return nil
}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`.
+func ApplyLayer(dest string, layer ArchiveReader) error {
+ dest = filepath.Clean(dest)
+ // We need to be able to set any perms
+ oldmask := syscall.Umask(0)
+ defer syscall.Umask(oldmask)
+
+ layer, err := DecompressStream(layer)
+ if err != nil {
+ return err
+ }
+ return UnpackLayer(dest, layer)
+}
diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go
index fc2bea2c40..0077f930d6 100644
--- a/pkg/chrootarchive/archive.go
+++ b/pkg/chrootarchive/archive.go
@@ -7,6 +7,7 @@ import (
"fmt"
"io"
"os"
+ "path/filepath"
"runtime"
"strings"
"syscall"
@@ -15,34 +16,48 @@ import (
"github.com/docker/docker/pkg/reexec"
)
+var chrootArchiver = &archive.Archiver{Untar}
+
+func chroot(path string) error {
+ if err := syscall.Chroot(path); err != nil {
+ return err
+ }
+ return syscall.Chdir("/")
+}
+
func untar() {
runtime.LockOSThread()
flag.Parse()
-
- if err := syscall.Chroot(flag.Arg(0)); err != nil {
- fatal(err)
- }
- if err := syscall.Chdir("/"); err != nil {
+ if err := chroot(flag.Arg(0)); err != nil {
fatal(err)
}
- options := new(archive.TarOptions)
- dec := json.NewDecoder(strings.NewReader(flag.Arg(1)))
- if err := dec.Decode(options); err != nil {
+ var options *archive.TarOptions
+ if err := json.NewDecoder(strings.NewReader(flag.Arg(1))).Decode(&options); err != nil {
fatal(err)
}
- if err := archive.Untar(os.Stdin, "/", options); err != nil {
+ if err := archive.Unpack(os.Stdin, "/", options); err != nil {
fatal(err)
}
+ // fully consume stdin in case it is zero padded
+ flush(os.Stdin)
os.Exit(0)
}
-var (
- chrootArchiver = &archive.Archiver{Untar}
-)
+func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+ if tarArchive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ if options == nil {
+ options = &archive.TarOptions{}
+ }
+ if options.Excludes == nil {
+ options.Excludes = []string{}
+ }
-func Untar(archive io.Reader, dest string, options *archive.TarOptions) error {
- var buf bytes.Buffer
- enc := json.NewEncoder(&buf)
+ var (
+ buf bytes.Buffer
+ enc = json.NewEncoder(&buf)
+ )
if err := enc.Encode(options); err != nil {
return fmt.Errorf("Untar json encode: %v", err)
}
@@ -51,9 +66,15 @@ func Untar(archive io.Reader, dest string, options *archive.TarOptions) error {
return err
}
}
+ dest = filepath.Clean(dest)
+ decompressedArchive, err := archive.DecompressStream(tarArchive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
cmd := reexec.Command("docker-untar", dest, buf.String())
- cmd.Stdin = archive
+ cmd.Stdin = decompressedArchive
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Untar %s %s", err, out)
diff --git a/pkg/chrootarchive/archive_test.go b/pkg/chrootarchive/archive_test.go
index 69e18e3199..0fe3d64f95 100644
--- a/pkg/chrootarchive/archive_test.go
+++ b/pkg/chrootarchive/archive_test.go
@@ -1,10 +1,12 @@
package chrootarchive
import (
+ "io"
"io/ioutil"
"os"
"path/filepath"
"testing"
+ "time"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
@@ -42,3 +44,58 @@ func TestChrootTarUntar(t *testing.T) {
t.Fatal(err)
}
}
+
+type slowEmptyTarReader struct {
+ size int
+ offset int
+ chunkSize int
+}
+
+// Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null")
+func (s *slowEmptyTarReader) Read(p []byte) (int, error) {
+ time.Sleep(100 * time.Millisecond)
+ count := s.chunkSize
+ if len(p) < s.chunkSize {
+ count = len(p)
+ }
+ for i := 0; i < count; i++ {
+ p[i] = 0
+ }
+ s.offset += count
+ if s.offset > s.size {
+ return count, io.EOF
+ }
+ return count, nil
+}
+
+func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ dest := filepath.Join(tmpdir, "dest")
+ if err := os.MkdirAll(dest, 0700); err != nil {
+ t.Fatal(err)
+ }
+ stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024}
+ if err := Untar(stream, dest, nil); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ dest := filepath.Join(tmpdir, "dest")
+ if err := os.MkdirAll(dest, 0700); err != nil {
+ t.Fatal(err)
+ }
+ stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024}
+ if err := ApplyLayer(dest, stream); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/pkg/chrootarchive/diff.go b/pkg/chrootarchive/diff.go
index 2653aefe9d..d4e9529b6d 100644
--- a/pkg/chrootarchive/diff.go
+++ b/pkg/chrootarchive/diff.go
@@ -3,8 +3,10 @@ package chrootarchive
import (
"flag"
"fmt"
+ "io"
"io/ioutil"
"os"
+ "path/filepath"
"runtime"
"syscall"
@@ -16,28 +18,40 @@ func applyLayer() {
runtime.LockOSThread()
flag.Parse()
- if err := syscall.Chroot(flag.Arg(0)); err != nil {
- fatal(err)
- }
- if err := syscall.Chdir("/"); err != nil {
+ if err := chroot(flag.Arg(0)); err != nil {
fatal(err)
}
+ // We need to be able to set any perms
+ oldmask := syscall.Umask(0)
+ defer syscall.Umask(oldmask)
tmpDir, err := ioutil.TempDir("/", "temp-docker-extract")
if err != nil {
fatal(err)
}
os.Setenv("TMPDIR", tmpDir)
- if err := archive.ApplyLayer("/", os.Stdin); err != nil {
- os.RemoveAll(tmpDir)
+ err = archive.UnpackLayer("/", os.Stdin)
+ os.RemoveAll(tmpDir)
+ if err != nil {
fatal(err)
}
os.RemoveAll(tmpDir)
+ flush(os.Stdin)
os.Exit(0)
}
func ApplyLayer(dest string, layer archive.ArchiveReader) error {
+ dest = filepath.Clean(dest)
+ decompressed, err := archive.DecompressStream(layer)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if c, ok := decompressed.(io.Closer); ok {
+ c.Close()
+ }
+ }()
cmd := reexec.Command("docker-applyLayer", dest)
- cmd.Stdin = layer
+ cmd.Stdin = decompressed
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("ApplyLayer %s %s", err, out)
diff --git a/pkg/chrootarchive/init.go b/pkg/chrootarchive/init.go
index b548e9fe72..4116026eff 100644
--- a/pkg/chrootarchive/init.go
+++ b/pkg/chrootarchive/init.go
@@ -2,6 +2,8 @@ package chrootarchive
import (
"fmt"
+ "io"
+ "io/ioutil"
"os"
"github.com/docker/docker/pkg/reexec"
@@ -16,3 +18,9 @@ func fatal(err error) {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
+
+// flush consumes all the bytes from the reader discarding
+// any errors
+func flush(r io.Reader) {
+ io.Copy(ioutil.Discard, r)
+}
diff --git a/pkg/symlink/LICENSE.APACHE b/pkg/symlink/LICENSE.APACHE
new file mode 100644
index 0000000000..27448585ad
--- /dev/null
+++ b/pkg/symlink/LICENSE.APACHE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/pkg/symlink/LICENSE.BSD b/pkg/symlink/LICENSE.BSD
new file mode 100644
index 0000000000..ebcfbcc779
--- /dev/null
+++ b/pkg/symlink/LICENSE.BSD
@@ -0,0 +1,27 @@
+Copyright (c) 2014 The Docker & Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/pkg/symlink/MAINTAINERS b/pkg/symlink/MAINTAINERS
index 68a97d2fc2..51a41a5b60 100644
--- a/pkg/symlink/MAINTAINERS
+++ b/pkg/symlink/MAINTAINERS
@@ -1,2 +1,3 @@
-Michael Crosby <michael@crosbymichael.com> (@crosbymichael)
-Victor Vieux <vieux@docker.com> (@vieux)
+Tibor Vass <teabee89@gmail.com> (@tiborvass)
+Cristian Staretu <cristian.staretu@gmail.com> (@unclejack)
+Tianon Gravi <admwiggin@gmail.com> (@tianon)
diff --git a/pkg/symlink/README.md b/pkg/symlink/README.md
new file mode 100644
index 0000000000..0d1dbb70e6
--- /dev/null
+++ b/pkg/symlink/README.md
@@ -0,0 +1,5 @@
+Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks
+from the [Go standard library](https://golang.org/pkg/path/filepath).
+
+The code from filepath.EvalSymlinks has been adapted in fs.go.
+Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go.
diff --git a/pkg/symlink/fs.go b/pkg/symlink/fs.go
index 6ce99c6bda..b4bdff24dd 100644
--- a/pkg/symlink/fs.go
+++ b/pkg/symlink/fs.go
@@ -1,101 +1,131 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.BSD file.
+
+// This code is a modified version of path/filepath/symlink.go from the Go standard library.
+
package symlink
import (
- "fmt"
+ "bytes"
+ "errors"
"os"
- "path"
"path/filepath"
"strings"
)
-const maxLoopCounter = 100
-
-// FollowSymlink will follow an existing link and scope it to the root
-// path provided.
-// The role of this function is to return an absolute path in the root
-// or normalize to the root if the symlink leads to a path which is
-// outside of the root.
-// Errors encountered while attempting to follow the symlink in path
-// will be reported.
-// Normalizations to the root don't constitute errors.
-func FollowSymlinkInScope(link, root string) (string, error) {
- root, err := filepath.Abs(root)
+// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an absolute path
+func FollowSymlinkInScope(path, root string) (string, error) {
+ path, err := filepath.Abs(path)
if err != nil {
return "", err
}
-
- link, err = filepath.Abs(link)
+ root, err = filepath.Abs(root)
if err != nil {
return "", err
}
+ return evalSymlinksInScope(path, root)
+}
- if link == root {
- return root, nil
+// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return
+// a result guaranteed to be contained within the scope `root`, at the time of the call.
+// Symlinks in `root` are not evaluated and left as-is.
+// Errors encountered while attempting to evaluate symlinks in path will be returned.
+// Non-existing paths are valid and do not constitute an error.
+// `path` has to contain `root` as a prefix, or else an error will be returned.
+// Trying to break out from `root` does not constitute an error.
+//
+// Example:
+// If /foo/bar -> /outside,
+// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide"
+//
+// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks
+// are created and not to create subsequently, additional symlinks that could potentially make a
+// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo")
+// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should
+// no longer be considered safely contained in "/foo".
+func evalSymlinksInScope(path, root string) (string, error) {
+ root = filepath.Clean(root)
+ if path == root {
+ return path, nil
}
-
- if !strings.HasPrefix(filepath.Dir(link), root) {
- return "", fmt.Errorf("%s is not within %s", link, root)
+ if !strings.HasPrefix(path, root) {
+ return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
}
+ const maxIter = 255
+ originalPath := path
+ // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c"
+ path = path[len(root):]
+ if root == string(filepath.Separator) {
+ path = string(filepath.Separator) + path
+ }
+ if !strings.HasPrefix(path, string(filepath.Separator)) {
+ return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
+ }
+ path = filepath.Clean(path)
+ // consume path by taking each frontmost path element,
+ // expanding it if it's a symlink, and appending it to b
+ var b bytes.Buffer
+ // b here will always be considered to be the "current absolute path inside
+ // root" when we append paths to it, we also append a slash and use
+ // filepath.Clean after the loop to trim the trailing slash
+ for n := 0; path != ""; n++ {
+ if n > maxIter {
+ return "", errors.New("evalSymlinksInScope: too many links in " + originalPath)
+ }
- prev := "/"
-
- for _, p := range strings.Split(link, "/") {
- prev = filepath.Join(prev, p)
-
- loopCounter := 0
- for {
- loopCounter++
-
- if loopCounter >= maxLoopCounter {
- return "", fmt.Errorf("loopCounter reached MAX: %v", loopCounter)
- }
-
- if !strings.HasPrefix(prev, root) {
- // Don't resolve symlinks outside of root. For example,
- // we don't have to check /home in the below.
- //
- // /home -> usr/home
- // FollowSymlinkInScope("/home/bob/foo/bar", "/home/bob/foo")
- break
- }
-
- stat, err := os.Lstat(prev)
- if err != nil {
- if os.IsNotExist(err) {
- break
- }
- return "", err
- }
-
- // let's break if we're not dealing with a symlink
- if stat.Mode()&os.ModeSymlink != os.ModeSymlink {
- break
- }
+ // find next path component, p
+ i := strings.IndexRune(path, filepath.Separator)
+ var p string
+ if i == -1 {
+ p, path = path, ""
+ } else {
+ p, path = path[:i], path[i+1:]
+ }
- // process the symlink
- dest, err := os.Readlink(prev)
- if err != nil {
- return "", err
- }
+ if p == "" {
+ continue
+ }
- if path.IsAbs(dest) {
- prev = filepath.Join(root, dest)
- } else {
- prev, _ = filepath.Abs(prev)
+ // this takes a b.String() like "b/../" and a p like "c" and turns it
+ // into "/b/../c" which then gets filepath.Cleaned into "/c" and then
+ // root gets prepended and we Clean again (to remove any trailing slash
+ // if the first Clean gave us just "/")
+ cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p)
+ if cleanP == string(filepath.Separator) {
+ // never Lstat "/" itself
+ b.Reset()
+ continue
+ }
+ fullP := filepath.Clean(root + cleanP)
+
+ fi, err := os.Lstat(fullP)
+ if os.IsNotExist(err) {
+ // if p does not exist, accept it
+ b.WriteString(p)
+ b.WriteRune(filepath.Separator)
+ continue
+ }
+ if err != nil {
+ return "", err
+ }
+ if fi.Mode()&os.ModeSymlink == 0 {
+ b.WriteString(p + string(filepath.Separator))
+ continue
+ }
- dir := filepath.Dir(prev)
- prev = filepath.Join(dir, dest)
- if dir == root && !strings.HasPrefix(prev, root) {
- prev = root
- }
- if len(prev) < len(root) || (len(prev) == len(root) && prev != root) {
- prev = filepath.Join(root, filepath.Base(dest))
- }
- }
+ // it's a symlink, put it at the front of path
+ dest, err := os.Readlink(fullP)
+ if err != nil {
+ return "", err
}
+ if filepath.IsAbs(dest) {
+ b.Reset()
+ }
+ path = dest + string(filepath.Separator) + path
}
- if prev == "/" {
- prev = root
- }
- return prev, nil
+
+ // see note above on "fullP := ..." for why this is double-cleaned and
+ // what's happening here
+ return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil
}
diff --git a/pkg/symlink/fs_test.go b/pkg/symlink/fs_test.go
index 0e2f948b6a..6b2496c4e0 100644
--- a/pkg/symlink/fs_test.go
+++ b/pkg/symlink/fs_test.go
@@ -1,248 +1,402 @@
+// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE
+
package symlink
import (
+ "fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
)
-func abs(t *testing.T, p string) string {
- o, err := filepath.Abs(p)
- if err != nil {
- t.Fatal(err)
- }
- return o
+type dirOrLink struct {
+ path string
+ target string
}
-func TestFollowSymLinkNormal(t *testing.T) {
- link := "testdata/fs/a/d/c/data"
+func makeFs(tmpdir string, fs []dirOrLink) error {
+ for _, s := range fs {
+ s.path = filepath.Join(tmpdir, s.path)
+ if s.target == "" {
+ os.MkdirAll(s.path, 0755)
+ continue
+ }
+ if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil {
+ return err
+ }
+ if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) {
+ return err
+ }
+ }
+ return nil
+}
- rewrite, err := FollowSymlinkInScope(link, "testdata")
+func testSymlink(tmpdir, path, expected, scope string) error {
+ rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope))
if err != nil {
- t.Fatal(err)
+ return err
}
-
- if expected := abs(t, "testdata/b/c/data"); expected != rewrite {
- t.Fatalf("Expected %s got %s", expected, rewrite)
+ expected, err = filepath.Abs(filepath.Join(tmpdir, expected))
+ if err != nil {
+ return err
+ }
+ if expected != rewrite {
+ return fmt.Errorf("Expected %q got %q", expected, rewrite)
}
+ return nil
}
-func TestFollowSymLinkRelativePath(t *testing.T) {
- link := "testdata/fs/i"
-
- rewrite, err := FollowSymlinkInScope(link, "testdata")
+func TestFollowSymlinkAbsolute(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute")
if err != nil {
t.Fatal(err)
}
-
- if expected := abs(t, "testdata/fs/a"); expected != rewrite {
- t.Fatalf("Expected %s got %s", expected, rewrite)
+ defer os.RemoveAll(tmpdir)
+ if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil {
+ t.Fatal(err)
}
}
-func TestFollowSymLinkUnderLinkedDir(t *testing.T) {
- dir, err := ioutil.TempDir("", "docker-fs-test")
+func TestFollowSymlinkRelativePath(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath")
if err != nil {
t.Fatal(err)
}
- defer os.RemoveAll(dir)
-
- os.Mkdir(filepath.Join(dir, "realdir"), 0700)
- os.Symlink("realdir", filepath.Join(dir, "linkdir"))
-
- linkDir := filepath.Join(dir, "linkdir", "foo")
- dirUnderLinkDir := filepath.Join(dir, "linkdir", "foo", "bar")
- os.MkdirAll(dirUnderLinkDir, 0700)
+ defer os.RemoveAll(tmpdir)
+ if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil {
+ t.Fatal(err)
+ }
+}
- rewrite, err := FollowSymlinkInScope(dirUnderLinkDir, linkDir)
+func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope")
if err != nil {
t.Fatal(err)
}
-
- if rewrite != dirUnderLinkDir {
- t.Fatalf("Expected %s got %s", dirUnderLinkDir, rewrite)
+ defer os.RemoveAll(tmpdir)
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "linkdir", target: "realdir"},
+ {path: "linkdir/foo/bar"},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil {
+ t.Fatal(err)
}
}
-func TestFollowSymLinkRandomString(t *testing.T) {
+func TestFollowSymlinkInvalidScopePathPair(t *testing.T) {
if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil {
- t.Fatal("Random string should fail but didn't")
+ t.Fatal("expected an error")
}
}
-func TestFollowSymLinkLastLink(t *testing.T) {
- link := "testdata/fs/a/d"
-
- rewrite, err := FollowSymlinkInScope(link, "testdata")
+func TestFollowSymlinkLastLink(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink")
if err != nil {
t.Fatal(err)
}
-
- if expected := abs(t, "testdata/b"); expected != rewrite {
- t.Fatalf("Expected %s got %s", expected, rewrite)
+ defer os.RemoveAll(tmpdir)
+ if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil {
+ t.Fatal(err)
}
}
-func TestFollowSymLinkRelativeLink(t *testing.T) {
- link := "testdata/fs/a/e/c/data"
-
- rewrite, err := FollowSymlinkInScope(link, "testdata")
+func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope")
if err != nil {
t.Fatal(err)
}
-
- if expected := abs(t, "testdata/fs/b/c/data"); expected != rewrite {
- t.Fatalf("Expected %s got %s", expected, rewrite)
+ defer os.RemoveAll(tmpdir)
+ if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil {
+ t.Fatal(err)
+ }
+ // avoid letting allowing symlink e lead us to ../b
+ // normalize to the "testdata/fs/a"
+ if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil {
+ t.Fatal(err)
}
}
-func TestFollowSymLinkRelativeLinkScope(t *testing.T) {
+func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil {
+ t.Fatal(err)
+ }
// avoid letting symlink f lead us out of the "testdata" scope
// we don't normalize because symlink f is in scope and there is no
// information leak
- {
- link := "testdata/fs/a/f"
-
- rewrite, err := FollowSymlinkInScope(link, "testdata")
- if err != nil {
- t.Fatal(err)
- }
-
- if expected := abs(t, "testdata/test"); expected != rewrite {
- t.Fatalf("Expected %s got %s", expected, rewrite)
- }
+ if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil {
+ t.Fatal(err)
}
-
// avoid letting symlink f lead us out of the "testdata/fs" scope
// we don't normalize because symlink f is in scope and there is no
// information leak
- {
- link := "testdata/fs/a/f"
-
- rewrite, err := FollowSymlinkInScope(link, "testdata/fs")
- if err != nil {
- t.Fatal(err)
- }
+ if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil {
+ t.Fatal(err)
+ }
+}
- if expected := abs(t, "testdata/fs/test"); expected != rewrite {
- t.Fatalf("Expected %s got %s", expected, rewrite)
- }
+func TestFollowSymlinkRelativeLinkChain(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain")
+ if err != nil {
+ t.Fatal(err)
}
+ defer os.RemoveAll(tmpdir)
// avoid letting symlink g (pointed at by symlink h) take out of scope
// TODO: we should probably normalize to scope here because ../[....]/root
// is out of scope and we leak information
- {
- link := "testdata/fs/b/h"
-
- rewrite, err := FollowSymlinkInScope(link, "testdata")
- if err != nil {
- t.Fatal(err)
- }
-
- if expected := abs(t, "testdata/root"); expected != rewrite {
- t.Fatalf("Expected %s got %s", expected, rewrite)
- }
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "testdata/fs/b/h", target: "../g"},
+ {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"},
+ }); err != nil {
+ t.Fatal(err)
}
+ if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil {
+ t.Fatal(err)
+ }
+}
- // avoid letting allowing symlink e lead us to ../b
- // normalize to the "testdata/fs/a"
- {
- link := "testdata/fs/a/e"
-
- rewrite, err := FollowSymlinkInScope(link, "testdata/fs/a")
- if err != nil {
- t.Fatal(err)
- }
-
- if expected := abs(t, "testdata/fs/a"); expected != rewrite {
- t.Fatalf("Expected %s got %s", expected, rewrite)
- }
+func TestFollowSymlinkBreakoutPath(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath")
+ if err != nil {
+ t.Fatal(err)
}
+ defer os.RemoveAll(tmpdir)
// avoid letting symlink -> ../directory/file escape from scope
// normalize to "testdata/fs/j"
- {
- link := "testdata/fs/j/k"
-
- rewrite, err := FollowSymlinkInScope(link, "testdata/fs/j")
- if err != nil {
- t.Fatal(err)
- }
+ if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil {
+ t.Fatal(err)
+ }
+}
- if expected := abs(t, "testdata/fs/j"); expected != rewrite {
- t.Fatalf("Expected %s got %s", expected, rewrite)
- }
+func TestFollowSymlinkToRoot(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot")
+ if err != nil {
+ t.Fatal(err)
}
+ defer os.RemoveAll(tmpdir)
// make sure we don't allow escaping to /
// normalize to dir
- {
- dir, err := ioutil.TempDir("", "docker-fs-test")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
-
- linkFile := filepath.Join(dir, "foo")
- os.Mkdir(filepath.Join(dir, ""), 0700)
- os.Symlink("/", linkFile)
-
- rewrite, err := FollowSymlinkInScope(linkFile, dir)
- if err != nil {
- t.Fatal(err)
- }
+ if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
+ t.Fatal(err)
+ }
+}
- if rewrite != dir {
- t.Fatalf("Expected %s got %s", dir, rewrite)
- }
+func TestFollowSymlinkSlashDotdot(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot")
+ if err != nil {
+ t.Fatal(err)
}
+ defer os.RemoveAll(tmpdir)
+ tmpdir = filepath.Join(tmpdir, "dir", "subdir")
// make sure we don't allow escaping to /
// normalize to dir
- {
- dir, err := ioutil.TempDir("", "docker-fs-test")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
-
- linkFile := filepath.Join(dir, "foo")
- os.Mkdir(filepath.Join(dir, ""), 0700)
- os.Symlink("/../../", linkFile)
-
- rewrite, err := FollowSymlinkInScope(linkFile, dir)
- if err != nil {
- t.Fatal(err)
- }
+ if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
+ t.Fatal(err)
+ }
+}
- if rewrite != dir {
- t.Fatalf("Expected %s got %s", dir, rewrite)
- }
+func TestFollowSymlinkDotdot(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot")
+ if err != nil {
+ t.Fatal(err)
}
+ defer os.RemoveAll(tmpdir)
+ tmpdir = filepath.Join(tmpdir, "dir", "subdir")
// make sure we stay in scope without leaking information
// this also checks for escaping to /
// normalize to dir
- {
- dir, err := ioutil.TempDir("", "docker-fs-test")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
+ if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
+ t.Fatal(err)
+ }
+}
- linkFile := filepath.Join(dir, "foo")
- os.Mkdir(filepath.Join(dir, ""), 0700)
- os.Symlink("../../", linkFile)
+func TestFollowSymlinkRelativePath2(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
- rewrite, err := FollowSymlinkInScope(linkFile, dir)
- if err != nil {
- t.Fatal(err)
- }
+ if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil {
+ t.Fatal(err)
+ }
+}
- if rewrite != dir {
- t.Fatalf("Expected %s got %s", dir, rewrite)
- }
+func TestFollowSymlinkScopeLink(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "root2"},
+ {path: "root", target: "root2"},
+ {path: "root2/foo", target: "../bar"},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkRootScope(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ expected, err := filepath.EvalSymlinks(tmpdir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ rewrite, err := FollowSymlinkInScope(tmpdir, "/")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if rewrite != expected {
+ t.Fatalf("expected %q got %q", expected, rewrite)
+ }
+}
+
+func TestFollowSymlinkEmpty(t *testing.T) {
+ res, err := FollowSymlinkInScope("", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res != wd {
+ t.Fatal("expected %q got %q", wd, res)
+ }
+}
+
+func TestFollowSymlinkCircular(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil {
+ t.Fatal("expected an error for foo -> foo")
+ }
+
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "root/bar", target: "baz"},
+ {path: "root/baz", target: "../bak"},
+ {path: "root/bak", target: "/bar"},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil {
+ t.Fatal("expected an error for bar -> baz -> bak -> bar")
+ }
+}
+
+func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "root2"},
+ {path: "root", target: "root2"},
+ {path: "root/a", target: "r/s"},
+ {path: "root/r", target: "../root/t"},
+ {path: "root/root/t/s/b", target: "/../u"},
+ {path: "root/u/c", target: "."},
+ {path: "root/u/x/y", target: "../v"},
+ {path: "root/u/v", target: "/../w"},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkBreakoutNonExistent(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "root/slash", target: "/"},
+ {path: "root/sym", target: "/idontexist/../slash"},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkNoLexicalCleaning(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "root/sym", target: "/foo/bar"},
+ {path: "root/hello", target: "/sym/../baz"},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil {
+ t.Fatal(err)
}
}
diff --git a/pkg/symlink/testdata/fs/a/d b/pkg/symlink/testdata/fs/a/d
deleted file mode 120000
index 28abc96048..0000000000
--- a/pkg/symlink/testdata/fs/a/d
+++ /dev/null
@@ -1 +0,0 @@
-/b \ No newline at end of file
diff --git a/pkg/symlink/testdata/fs/a/e b/pkg/symlink/testdata/fs/a/e
deleted file mode 120000
index 42532fe13c..0000000000
--- a/pkg/symlink/testdata/fs/a/e
+++ /dev/null
@@ -1 +0,0 @@
-../b \ No newline at end of file
diff --git a/pkg/symlink/testdata/fs/a/f b/pkg/symlink/testdata/fs/a/f
deleted file mode 120000
index 21de7edc0a..0000000000
--- a/pkg/symlink/testdata/fs/a/f
+++ /dev/null
@@ -1 +0,0 @@
-../../../../test \ No newline at end of file
diff --git a/pkg/symlink/testdata/fs/b/h b/pkg/symlink/testdata/fs/b/h
deleted file mode 120000
index 24387a68fb..0000000000
--- a/pkg/symlink/testdata/fs/b/h
+++ /dev/null
@@ -1 +0,0 @@
-../g \ No newline at end of file
diff --git a/pkg/symlink/testdata/fs/g b/pkg/symlink/testdata/fs/g
deleted file mode 120000
index 0ce5de0647..0000000000
--- a/pkg/symlink/testdata/fs/g
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../../../../../../../../root \ No newline at end of file
diff --git a/pkg/symlink/testdata/fs/i b/pkg/symlink/testdata/fs/i
deleted file mode 120000
index 2e65efe2a1..0000000000
--- a/pkg/symlink/testdata/fs/i
+++ /dev/null
@@ -1 +0,0 @@
-a \ No newline at end of file
diff --git a/pkg/symlink/testdata/fs/j/k b/pkg/symlink/testdata/fs/j/k
deleted file mode 120000
index f559e8fda2..0000000000
--- a/pkg/symlink/testdata/fs/j/k
+++ /dev/null
@@ -1 +0,0 @@
-../i/a \ No newline at end of file
diff --git a/registry/registry.go b/registry/registry.go
index a03790af03..e0285a2336 100644
--- a/registry/registry.go
+++ b/registry/registry.go
@@ -23,7 +23,6 @@ var (
ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")")
ErrDoesNotExist = errors.New("Image does not exist")
errLoginRequired = errors.New("Authentication is required.")
- validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
validNamespace = regexp.MustCompile(`^([a-z0-9_]{4,30})$`)
validRepo = regexp.MustCompile(`^([a-z0-9-_.]+)$`)
)
@@ -177,7 +176,8 @@ func validateRepositoryName(repositoryName string) error {
namespace = "library"
name = nameParts[0]
- if validHex.MatchString(name) {
+ // the repository name must not be a valid image ID
+ if err := utils.ValidateID(name); err == nil {
return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name)
}
} else {
diff --git a/utils/utils.go b/utils/utils.go
index 792b80bd51..4c65f136aa 100644
--- a/utils/utils.go
+++ b/utils/utils.go
@@ -31,6 +31,10 @@ type KeyValuePair struct {
Value string
}
+var (
+ validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
+)
+
// Request a given URL and return an io.Reader
func Download(url string) (resp *http.Response, err error) {
if resp, err = http.Get(url); err != nil {
@@ -190,11 +194,9 @@ func GenerateRandomID() string {
}
func ValidateID(id string) error {
- if id == "" {
- return fmt.Errorf("Id can't be empty")
- }
- if strings.Contains(id, ":") {
- return fmt.Errorf("Invalid character in id: ':'")
+ if ok := validHex.MatchString(id); !ok {
+ err := fmt.Errorf("image ID '%s' is invalid", id)
+ return err
}
return nil
}