summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastiaan van Stijn <github@gone.nl>2023-03-12 13:48:44 +0100
committerSebastiaan van Stijn <github@gone.nl>2023-03-14 11:38:04 +0100
commit6c65a9a07fbc415a4fb7aadc6dfaa5f6cc395a75 (patch)
tree5463d9fb67315d0cd4f05fd222f6f1679c1e9595
parente3c642d1eacb43ef0b3e2abe4a210dbbbac3b654 (diff)
downloaddocker-6c65a9a07fbc415a4fb7aadc6dfaa5f6cc395a75.tar.gz
volumes: fix error-handling when removing volumes with swarm enabled
Commit 3246db3755698455740c812c6477f43e74924fa7 added handling for removing cluster volumes, but in some conditions, this resulted in errors not being returned if the volume was in use; docker swarm init docker volume create foo docker create -v foo:/foo busybox top docker volume rm foo This patch changes the logic for ignoring "local" volume errors if swarm is enabled (and cluster volumes supported). While working on this fix, I also discovered that Cluster.RemoveVolume() did not handle the "force" option correctly; while swarm correctly handled these, the cluster backend performs a lookup of the volume first (to obtain its ID), which would fail if the volume didn't exist. Before this patch: make TEST_FILTER=TestVolumesRemoveSwarmEnabled DOCKER_GRAPHDRIVER=vfs test-integration ... Running /go/src/github.com/docker/docker/integration/volume (arm64.integration.volume) flags=-test.v -test.timeout=10m -test.run TestVolumesRemoveSwarmEnabled ... === RUN TestVolumesRemoveSwarmEnabled === PAUSE TestVolumesRemoveSwarmEnabled === CONT TestVolumesRemoveSwarmEnabled === RUN TestVolumesRemoveSwarmEnabled/volume_in_use volume_test.go:122: assertion failed: error is nil, not errdefs.IsConflict volume_test.go:123: assertion failed: expected an error, got nil === RUN TestVolumesRemoveSwarmEnabled/volume_not_in_use === RUN TestVolumesRemoveSwarmEnabled/non-existing_volume === RUN TestVolumesRemoveSwarmEnabled/non-existing_volume_force volume_test.go:143: assertion failed: error is not nil: Error response from daemon: volume no_such_volume not found --- FAIL: TestVolumesRemoveSwarmEnabled (1.57s) --- FAIL: TestVolumesRemoveSwarmEnabled/volume_in_use (0.00s) --- PASS: TestVolumesRemoveSwarmEnabled/volume_not_in_use (0.01s) --- PASS: TestVolumesRemoveSwarmEnabled/non-existing_volume (0.00s) --- FAIL: TestVolumesRemoveSwarmEnabled/non-existing_volume_force (0.00s) FAIL With this patch: make TEST_FILTER=TestVolumesRemoveSwarmEnabled DOCKER_GRAPHDRIVER=vfs test-integration ... Running /go/src/github.com/docker/docker/integration/volume (arm64.integration.volume) flags=-test.v -test.timeout=10m -test.run TestVolumesRemoveSwarmEnabled ... make TEST_FILTER=TestVolumesRemoveSwarmEnabled DOCKER_GRAPHDRIVER=vfs test-integration ... Running /go/src/github.com/docker/docker/integration/volume (arm64.integration.volume) flags=-test.v -test.timeout=10m -test.run TestVolumesRemoveSwarmEnabled ... === RUN TestVolumesRemoveSwarmEnabled === PAUSE TestVolumesRemoveSwarmEnabled === CONT TestVolumesRemoveSwarmEnabled === RUN TestVolumesRemoveSwarmEnabled/volume_in_use === RUN TestVolumesRemoveSwarmEnabled/volume_not_in_use === RUN TestVolumesRemoveSwarmEnabled/non-existing_volume === RUN TestVolumesRemoveSwarmEnabled/non-existing_volume_force --- PASS: TestVolumesRemoveSwarmEnabled (1.53s) --- PASS: TestVolumesRemoveSwarmEnabled/volume_in_use (0.00s) --- PASS: TestVolumesRemoveSwarmEnabled/volume_not_in_use (0.01s) --- PASS: TestVolumesRemoveSwarmEnabled/non-existing_volume (0.00s) --- PASS: TestVolumesRemoveSwarmEnabled/non-existing_volume_force (0.00s) PASS Signed-off-by: Sebastiaan van Stijn <github@gone.nl> (cherry picked from commit 058a31e4790f3e3f0cf63bf4467ed9279cbd3456) Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
-rw-r--r--api/server/router/volume/volume_routes.go32
-rw-r--r--daemon/cluster/volumes.go3
-rw-r--r--integration/volume/volume_test.go54
3 files changed, 75 insertions, 14 deletions
diff --git a/api/server/router/volume/volume_routes.go b/api/server/router/volume/volume_routes.go
index 04c6b0d654..9bbe13687f 100644
--- a/api/server/router/volume/volume_routes.go
+++ b/api/server/router/volume/volume_routes.go
@@ -159,25 +159,29 @@ func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter,
}
force := httputils.BoolValue(r, "force")
- version := httputils.VersionFromContext(ctx)
-
+ // First we try deleting local volume. The volume may not be found as a
+ // local volume, but could be a cluster volume, so we ignore "not found"
+ // errors at this stage. Note that no "not found" error is produced if
+ // "force" is enabled.
err := v.backend.Remove(ctx, vars["name"], opts.WithPurgeOnError(force))
- // when a removal is forced, if the volume does not exist, no error will be
- // returned. this means that to ensure forcing works on swarm volumes as
- // well, we should always also force remove against the cluster.
- if err != nil || force {
+ if err != nil && !errdefs.IsNotFound(err) {
+ return err
+ }
+
+ // If no volume was found, the volume may be a cluster volume. If force
+ // is enabled, the volume backend won't return an error for non-existing
+ // volumes, so we don't know if removal succeeded (or not volume existed).
+ // In that case we always try to delete cluster volumes as well.
+ if errdefs.IsNotFound(err) || force {
+ version := httputils.VersionFromContext(ctx)
if versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() {
- if errdefs.IsNotFound(err) || force {
- err := v.cluster.RemoveVolume(vars["name"], force)
- if err != nil {
- return err
- }
- }
- } else {
- return err
+ err = v.cluster.RemoveVolume(vars["name"], force)
}
}
+ if err != nil {
+ return err
+ }
w.WriteHeader(http.StatusNoContent)
return nil
}
diff --git a/daemon/cluster/volumes.go b/daemon/cluster/volumes.go
index 6f7b085697..7b53dd4691 100644
--- a/daemon/cluster/volumes.go
+++ b/daemon/cluster/volumes.go
@@ -90,6 +90,9 @@ func (c *Cluster) RemoveVolume(nameOrID string, force bool) error {
return c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
volume, err := getVolume(ctx, state.controlClient, nameOrID)
if err != nil {
+ if force && errdefs.IsNotFound(err) {
+ return nil
+ }
return err
}
diff --git a/integration/volume/volume_test.go b/integration/volume/volume_test.go
index 365bbe2990..f9e1dfcb55 100644
--- a/integration/volume/volume_test.go
+++ b/integration/volume/volume_test.go
@@ -15,11 +15,13 @@ import (
clientpkg "github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/integration/internal/container"
+ "github.com/docker/docker/testutil/daemon"
"github.com/docker/docker/testutil/request"
"github.com/google/go-cmp/cmp/cmpopts"
"gotest.tools/v3/assert"
"gotest.tools/v3/assert/cmp"
is "gotest.tools/v3/assert/cmp"
+ "gotest.tools/v3/skip"
)
func TestVolumesCreateAndList(t *testing.T) {
@@ -103,6 +105,58 @@ func TestVolumesRemove(t *testing.T) {
})
}
+// TestVolumesRemoveSwarmEnabled tests that an error is returned if a volume
+// is in use, also if swarm is enabled (and cluster volumes are supported).
+//
+// Regression test for https://github.com/docker/cli/issues/4082
+func TestVolumesRemoveSwarmEnabled(t *testing.T) {
+ skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon")
+ skip.If(t, testEnv.OSType == "windows", "TODO enable on windows")
+ t.Parallel()
+ defer setupTest(t)()
+
+ // Spin up a new daemon, so that we can run this test in parallel (it's a slow test)
+ d := daemon.New(t)
+ d.StartAndSwarmInit(t)
+ defer d.Stop(t)
+
+ client := d.NewClientT(t)
+
+ ctx := context.Background()
+ prefix, slash := getPrefixAndSlashFromDaemonPlatform()
+ id := container.Create(ctx, t, client, container.WithVolume(prefix+slash+"foo"))
+
+ c, err := client.ContainerInspect(ctx, id)
+ assert.NilError(t, err)
+ vname := c.Mounts[0].Name
+
+ t.Run("volume in use", func(t *testing.T) {
+ err = client.VolumeRemove(ctx, vname, false)
+ assert.Check(t, is.ErrorType(err, errdefs.IsConflict))
+ assert.Check(t, is.ErrorContains(err, "volume is in use"))
+ })
+
+ t.Run("volume not in use", func(t *testing.T) {
+ err = client.ContainerRemove(ctx, id, types.ContainerRemoveOptions{
+ Force: true,
+ })
+ assert.NilError(t, err)
+
+ err = client.VolumeRemove(ctx, vname, false)
+ assert.NilError(t, err)
+ })
+
+ t.Run("non-existing volume", func(t *testing.T) {
+ err = client.VolumeRemove(ctx, "no_such_volume", false)
+ assert.Check(t, is.ErrorType(err, errdefs.IsNotFound))
+ })
+
+ t.Run("non-existing volume force", func(t *testing.T) {
+ err = client.VolumeRemove(ctx, "no_such_volume", true)
+ assert.NilError(t, err)
+ })
+}
+
func TestVolumesInspect(t *testing.T) {
defer setupTest(t)()
client := testEnv.APIClient()