2023-07-17 16:47:36 +02:00
|
|
|
/*
|
|
|
|
Copyright 2023 Docker Compose CLI authors
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package e2e
|
|
|
|
|
|
|
|
import (
|
pkg/api: replace uuid for basic random id
The uuid package in distribution was created as a utility for the distribution
project itself, to cut down external dependencies (see [1][1]).
For compose, this has the reverse effect, as it now brings all the dependencies
of the distribution module with it.
This patch switches to the uuid generation to crypto/rand to produce a random
id. I was considering using a different uuid implementation, or docker's
"stringid.GenerateRandomID", but all of those are doing more than needed,
so keep it simple.
Currently, this change has little effect, because compose also uses the
distribution module for other purposes, but the distribution project is
in the process of moving the "reference" package to a separate module,
in which case we don't want to depend on the distribution module only for
the uuid package.
[1]: https://github.com/distribution/distribution/commit/36e34a55ad127a80d008813cd25133a4e0ba5fb3
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2023-08-31 09:09:48 +02:00
|
|
|
"crypto/rand"
|
2023-07-17 16:47:36 +02:00
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2023-08-04 22:58:01 +02:00
|
|
|
"strconv"
|
2023-07-17 16:47:36 +02:00
|
|
|
"strings"
|
|
|
|
"sync/atomic"
|
|
|
|
"testing"
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 20:53:02 +02:00
|
|
|
"time"
|
2023-07-17 16:47:36 +02:00
|
|
|
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"gotest.tools/v3/assert"
|
|
|
|
"gotest.tools/v3/assert/cmp"
|
|
|
|
"gotest.tools/v3/icmd"
|
|
|
|
"gotest.tools/v3/poll"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestWatch(t *testing.T) {
|
2023-11-10 18:31:25 +01:00
|
|
|
t.Skip("Skipping watch tests until we can figure out why they are flaky/failing")
|
|
|
|
|
2023-07-17 16:47:36 +02:00
|
|
|
services := []string{"alpine", "busybox", "debian"}
|
2023-08-01 20:39:08 +02:00
|
|
|
t.Run("docker cp", func(t *testing.T) {
|
|
|
|
for _, svcName := range services {
|
|
|
|
t.Run(svcName, func(t *testing.T) {
|
|
|
|
t.Helper()
|
|
|
|
doTest(t, svcName, false)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("tar", func(t *testing.T) {
|
|
|
|
for _, svcName := range services {
|
|
|
|
t.Run(svcName, func(t *testing.T) {
|
|
|
|
t.Helper()
|
|
|
|
doTest(t, svcName, true)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
2023-07-17 16:47:36 +02:00
|
|
|
}
|
|
|
|
|
2023-10-13 02:26:26 +02:00
|
|
|
func TestRebuildOnDotEnvWithExternalNetwork(t *testing.T) {
|
|
|
|
const projectName = "test_rebuild_on_dotenv_with_external_network"
|
|
|
|
const svcName = "ext-alpine"
|
|
|
|
containerName := strings.Join([]string{projectName, svcName, "1"}, "-")
|
|
|
|
const networkName = "e2e-watch-external_network_test"
|
|
|
|
const dotEnvFilepath = "./fixtures/watch/.env"
|
|
|
|
|
|
|
|
c := NewCLI(t, WithEnv(
|
|
|
|
"COMPOSE_PROJECT_NAME="+projectName,
|
|
|
|
"COMPOSE_FILE=./fixtures/watch/with-external-network.yaml",
|
|
|
|
))
|
|
|
|
|
|
|
|
cleanup := func() {
|
|
|
|
c.RunDockerComposeCmdNoCheck(t, "down", "--remove-orphans", "--volumes", "--rmi=local")
|
|
|
|
c.RunDockerOrExitError(t, "network", "rm", networkName)
|
|
|
|
os.Remove(dotEnvFilepath) //nolint:errcheck
|
|
|
|
}
|
|
|
|
cleanup()
|
|
|
|
|
|
|
|
t.Log("create network that is referenced by the container we're testing")
|
|
|
|
c.RunDockerCmd(t, "network", "create", networkName)
|
|
|
|
res := c.RunDockerCmd(t, "network", "ls")
|
|
|
|
assert.Assert(t, !strings.Contains(res.Combined(), projectName), res.Combined())
|
|
|
|
|
|
|
|
t.Log("create a dotenv file that will be used to trigger the rebuild")
|
2023-10-28 21:19:39 +02:00
|
|
|
err := os.WriteFile(dotEnvFilepath, []byte("HELLO=WORLD"), 0o666)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
_, err = os.ReadFile(dotEnvFilepath)
|
2023-10-13 02:26:26 +02:00
|
|
|
assert.NilError(t, err)
|
|
|
|
|
|
|
|
// TODO: refactor this duplicated code into frameworks? Maybe?
|
|
|
|
t.Log("starting docker compose watch")
|
|
|
|
cmd := c.NewDockerComposeCmd(t, "--verbose", "watch", svcName)
|
|
|
|
// stream output since watch runs in the background
|
|
|
|
cmd.Stdout = os.Stdout
|
|
|
|
cmd.Stderr = os.Stderr
|
|
|
|
r := icmd.StartCmd(cmd)
|
|
|
|
require.NoError(t, r.Error)
|
|
|
|
var testComplete atomic.Bool
|
|
|
|
go func() {
|
|
|
|
// if the process exits abnormally before the test is done, fail the test
|
|
|
|
if err := r.Cmd.Wait(); err != nil && !t.Failed() && !testComplete.Load() {
|
|
|
|
assert.Check(t, cmp.Nil(err))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
t.Log("wait for watch to start watching")
|
|
|
|
c.WaitForCondition(t, func() (bool, string) {
|
|
|
|
out := r.String()
|
|
|
|
errors := r.String()
|
|
|
|
return strings.Contains(out,
|
2023-11-06 10:57:42 +01:00
|
|
|
"Watch configuration"), fmt.Sprintf("'Watch configuration' not found in : \n%s\nStderr: \n%s\n", out,
|
2023-10-13 02:26:26 +02:00
|
|
|
errors)
|
|
|
|
}, 30*time.Second, 1*time.Second)
|
|
|
|
|
|
|
|
n := c.RunDockerCmd(t, "network", "inspect", networkName, "-f", "{{ .Id }}")
|
|
|
|
pn := c.RunDockerCmd(t, "inspect", containerName, "-f", "{{ .HostConfig.NetworkMode }}")
|
|
|
|
assert.Equal(t, pn.Stdout(), n.Stdout())
|
|
|
|
|
|
|
|
t.Log("create a dotenv file that will be used to trigger the rebuild")
|
2023-10-28 21:19:39 +02:00
|
|
|
err = os.WriteFile(dotEnvFilepath, []byte("HELLO=WORLD\nTEST=REBUILD"), 0o666)
|
|
|
|
assert.NilError(t, err)
|
2023-10-13 02:26:26 +02:00
|
|
|
_, err = os.ReadFile(dotEnvFilepath)
|
|
|
|
assert.NilError(t, err)
|
|
|
|
|
|
|
|
// NOTE: are there any other ways to check if the container has been rebuilt?
|
|
|
|
t.Log("check if the container has been rebuild")
|
|
|
|
c.WaitForCondition(t, func() (bool, string) {
|
|
|
|
out := r.String()
|
|
|
|
if strings.Count(out, "batch complete: service["+svcName+"]") != 1 {
|
|
|
|
return false, fmt.Sprintf("container %s was not rebuilt", containerName)
|
|
|
|
}
|
|
|
|
return true, fmt.Sprintf("container %s was rebuilt", containerName)
|
|
|
|
}, 30*time.Second, 1*time.Second)
|
|
|
|
|
|
|
|
n2 := c.RunDockerCmd(t, "network", "inspect", networkName, "-f", "{{ .Id }}")
|
|
|
|
pn2 := c.RunDockerCmd(t, "inspect", containerName, "-f", "{{ .HostConfig.NetworkMode }}")
|
|
|
|
assert.Equal(t, pn2.Stdout(), n2.Stdout())
|
|
|
|
|
|
|
|
assert.Check(t, !strings.Contains(r.Combined(), "Application failed to start after update"))
|
|
|
|
|
|
|
|
t.Cleanup(cleanup)
|
|
|
|
t.Cleanup(func() {
|
|
|
|
// IMPORTANT: watch doesn't exit on its own, don't leak processes!
|
|
|
|
if r.Cmd.Process != nil {
|
|
|
|
t.Logf("Killing watch process: pid[%d]", r.Cmd.Process.Pid)
|
|
|
|
_ = r.Cmd.Process.Kill()
|
|
|
|
}
|
|
|
|
})
|
|
|
|
testComplete.Store(true)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2023-07-17 16:47:36 +02:00
|
|
|
// NOTE: these tests all share a single Compose file but are safe to run concurrently
|
2023-08-01 20:39:08 +02:00
|
|
|
func doTest(t *testing.T, svcName string, tarSync bool) {
|
2023-07-17 16:47:36 +02:00
|
|
|
tmpdir := t.TempDir()
|
|
|
|
dataDir := filepath.Join(tmpdir, "data")
|
2023-10-13 18:04:05 +02:00
|
|
|
configDir := filepath.Join(tmpdir, "config")
|
|
|
|
|
|
|
|
writeTestFile := func(name, contents, sourceDir string) {
|
2023-07-17 16:47:36 +02:00
|
|
|
t.Helper()
|
2023-10-13 18:04:05 +02:00
|
|
|
dest := filepath.Join(sourceDir, name)
|
2023-07-17 16:47:36 +02:00
|
|
|
require.NoError(t, os.MkdirAll(filepath.Dir(dest), 0o700))
|
|
|
|
t.Logf("writing %q to %q", contents, dest)
|
|
|
|
require.NoError(t, os.WriteFile(dest, []byte(contents+"\n"), 0o600))
|
|
|
|
}
|
2023-10-13 18:04:05 +02:00
|
|
|
writeDataFile := func(name, contents string) {
|
|
|
|
writeTestFile(name, contents, dataDir)
|
|
|
|
}
|
2023-07-17 16:47:36 +02:00
|
|
|
|
|
|
|
composeFilePath := filepath.Join(tmpdir, "compose.yaml")
|
|
|
|
CopyFile(t, filepath.Join("fixtures", "watch", "compose.yaml"), composeFilePath)
|
|
|
|
|
|
|
|
projName := "e2e-watch-" + svcName
|
2023-08-24 21:40:43 +02:00
|
|
|
if tarSync {
|
|
|
|
projName += "-tar"
|
|
|
|
}
|
2023-07-17 16:47:36 +02:00
|
|
|
env := []string{
|
|
|
|
"COMPOSE_FILE=" + composeFilePath,
|
|
|
|
"COMPOSE_PROJECT_NAME=" + projName,
|
2023-08-04 22:58:01 +02:00
|
|
|
"COMPOSE_EXPERIMENTAL_WATCH_TAR=" + strconv.FormatBool(tarSync),
|
2023-08-01 20:39:08 +02:00
|
|
|
}
|
2023-07-17 16:47:36 +02:00
|
|
|
|
2023-07-19 00:54:23 +02:00
|
|
|
cli := NewCLI(t, WithEnv(env...))
|
2023-07-17 16:47:36 +02:00
|
|
|
|
2023-09-07 19:27:23 +02:00
|
|
|
// important that --rmi is used to prune the images and ensure that watch builds on launch
|
2023-07-17 16:47:36 +02:00
|
|
|
cleanup := func() {
|
2023-09-19 22:04:23 +02:00
|
|
|
cli.RunDockerComposeCmd(t, "down", svcName, "--remove-orphans", "--volumes", "--rmi=local")
|
2023-07-17 16:47:36 +02:00
|
|
|
}
|
|
|
|
cleanup()
|
|
|
|
t.Cleanup(cleanup)
|
|
|
|
|
2023-09-19 16:59:26 +02:00
|
|
|
cmd := cli.NewDockerComposeCmd(t, "--verbose", "watch", svcName)
|
2023-07-17 16:47:36 +02:00
|
|
|
// stream output since watch runs in the background
|
|
|
|
cmd.Stdout = os.Stdout
|
|
|
|
cmd.Stderr = os.Stderr
|
|
|
|
r := icmd.StartCmd(cmd)
|
|
|
|
require.NoError(t, r.Error)
|
|
|
|
t.Cleanup(func() {
|
|
|
|
// IMPORTANT: watch doesn't exit on its own, don't leak processes!
|
|
|
|
if r.Cmd.Process != nil {
|
2023-08-24 21:40:43 +02:00
|
|
|
t.Logf("Killing watch process: pid[%d]", r.Cmd.Process.Pid)
|
2023-07-17 16:47:36 +02:00
|
|
|
_ = r.Cmd.Process.Kill()
|
|
|
|
}
|
|
|
|
})
|
|
|
|
var testComplete atomic.Bool
|
|
|
|
go func() {
|
|
|
|
// if the process exits abnormally before the test is done, fail the test
|
2023-08-01 20:39:08 +02:00
|
|
|
if err := r.Cmd.Wait(); err != nil && !t.Failed() && !testComplete.Load() {
|
2023-07-17 16:47:36 +02:00
|
|
|
assert.Check(t, cmp.Nil(err))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
require.NoError(t, os.Mkdir(dataDir, 0o700))
|
|
|
|
|
|
|
|
checkFileContents := func(path string, contents string) poll.Check {
|
|
|
|
return func(pollLog poll.LogT) poll.Result {
|
|
|
|
if r.Cmd.ProcessState != nil {
|
|
|
|
return poll.Error(fmt.Errorf("watch process exited early: %s", r.Cmd.ProcessState))
|
|
|
|
}
|
|
|
|
res := icmd.RunCmd(cli.NewDockerComposeCmd(t, "exec", svcName, "cat", path))
|
|
|
|
if strings.Contains(res.Stdout(), contents) {
|
|
|
|
return poll.Success()
|
|
|
|
}
|
|
|
|
return poll.Continue(res.Combined())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
waitForFlush := func() {
|
pkg/api: replace uuid for basic random id
The uuid package in distribution was created as a utility for the distribution
project itself, to cut down external dependencies (see [1][1]).
For compose, this has the reverse effect, as it now brings all the dependencies
of the distribution module with it.
This patch switches to the uuid generation to crypto/rand to produce a random
id. I was considering using a different uuid implementation, or docker's
"stringid.GenerateRandomID", but all of those are doing more than needed,
so keep it simple.
Currently, this change has little effect, because compose also uses the
distribution module for other purposes, but the distribution project is
in the process of moving the "reference" package to a separate module,
in which case we don't want to depend on the distribution module only for
the uuid package.
[1]: https://github.com/distribution/distribution/commit/36e34a55ad127a80d008813cd25133a4e0ba5fb3
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2023-08-31 09:09:48 +02:00
|
|
|
b := make([]byte, 32)
|
|
|
|
_, _ = rand.Read(b)
|
|
|
|
sentinelVal := fmt.Sprintf("%x", b)
|
2023-07-17 16:47:36 +02:00
|
|
|
writeDataFile("wait.txt", sentinelVal)
|
|
|
|
poll.WaitOn(t, checkFileContents("/app/data/wait.txt", sentinelVal))
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("Writing to a file until Compose watch is up and running")
|
|
|
|
poll.WaitOn(t, func(t poll.LogT) poll.Result {
|
|
|
|
writeDataFile("hello.txt", "hello world")
|
|
|
|
return checkFileContents("/app/data/hello.txt", "hello world")(t)
|
watch: batch & de-duplicate file events (#10865)
Adjust the debouncing logic so that it applies to all inbound file
events, regardless of whether they match a sync or rebuild rule.
When the batch is flushed out, if any event for the service is a
rebuild event, then the service is rebuilt and all sync events for
the batch are ignored. If _all_ events in the batch are sync events,
then a sync is triggered, passing the entire batch at once. This
provides a substantial performance win for the new `tar`-based
implementation, as it can efficiently transfer the changes in bulk.
Additionally, this helps with jitter, e.g. it's not uncommon for
there to be double-writes in quick succession to a file, so even if
there's not many files being modified at once, it can still prevent
some unnecessary transfers.
Signed-off-by: Milas Bowman <milas.bowman@docker.com>
2023-08-03 20:53:02 +02:00
|
|
|
}, poll.WithDelay(time.Second))
|
2023-07-17 16:47:36 +02:00
|
|
|
|
|
|
|
t.Logf("Modifying file contents")
|
|
|
|
writeDataFile("hello.txt", "hello watch")
|
|
|
|
poll.WaitOn(t, checkFileContents("/app/data/hello.txt", "hello watch"))
|
|
|
|
|
|
|
|
t.Logf("Deleting file")
|
|
|
|
require.NoError(t, os.Remove(filepath.Join(dataDir, "hello.txt")))
|
|
|
|
waitForFlush()
|
|
|
|
cli.RunDockerComposeCmdNoCheck(t, "exec", svcName, "stat", "/app/data/hello.txt").
|
|
|
|
Assert(t, icmd.Expected{
|
|
|
|
ExitCode: 1,
|
|
|
|
Err: "No such file or directory",
|
2023-08-01 20:39:08 +02:00
|
|
|
})
|
2023-07-17 16:47:36 +02:00
|
|
|
|
|
|
|
t.Logf("Writing to ignored paths")
|
|
|
|
writeDataFile("data.foo", "ignored")
|
|
|
|
writeDataFile(filepath.Join("ignored", "hello.txt"), "ignored")
|
|
|
|
waitForFlush()
|
|
|
|
cli.RunDockerComposeCmdNoCheck(t, "exec", svcName, "stat", "/app/data/data.foo").
|
|
|
|
Assert(t, icmd.Expected{
|
|
|
|
ExitCode: 1,
|
|
|
|
Err: "No such file or directory",
|
2023-09-07 19:27:23 +02:00
|
|
|
})
|
2023-07-17 16:47:36 +02:00
|
|
|
cli.RunDockerComposeCmdNoCheck(t, "exec", svcName, "stat", "/app/data/ignored").
|
|
|
|
Assert(t, icmd.Expected{
|
|
|
|
ExitCode: 1,
|
|
|
|
Err: "No such file or directory",
|
2023-09-07 19:27:23 +02:00
|
|
|
})
|
2023-07-17 16:47:36 +02:00
|
|
|
|
|
|
|
t.Logf("Creating subdirectory")
|
|
|
|
require.NoError(t, os.Mkdir(filepath.Join(dataDir, "subdir"), 0o700))
|
|
|
|
waitForFlush()
|
|
|
|
cli.RunDockerComposeCmd(t, "exec", svcName, "stat", "/app/data/subdir")
|
|
|
|
|
|
|
|
t.Logf("Writing to file in subdirectory")
|
|
|
|
writeDataFile(filepath.Join("subdir", "file.txt"), "a")
|
|
|
|
poll.WaitOn(t, checkFileContents("/app/data/subdir/file.txt", "a"))
|
|
|
|
|
|
|
|
t.Logf("Writing to file multiple times")
|
|
|
|
writeDataFile(filepath.Join("subdir", "file.txt"), "x")
|
|
|
|
writeDataFile(filepath.Join("subdir", "file.txt"), "y")
|
|
|
|
writeDataFile(filepath.Join("subdir", "file.txt"), "z")
|
|
|
|
poll.WaitOn(t, checkFileContents("/app/data/subdir/file.txt", "z"))
|
|
|
|
writeDataFile(filepath.Join("subdir", "file.txt"), "z")
|
|
|
|
writeDataFile(filepath.Join("subdir", "file.txt"), "y")
|
|
|
|
writeDataFile(filepath.Join("subdir", "file.txt"), "x")
|
|
|
|
poll.WaitOn(t, checkFileContents("/app/data/subdir/file.txt", "x"))
|
|
|
|
|
|
|
|
t.Logf("Deleting directory")
|
|
|
|
require.NoError(t, os.RemoveAll(filepath.Join(dataDir, "subdir")))
|
|
|
|
waitForFlush()
|
|
|
|
cli.RunDockerComposeCmdNoCheck(t, "exec", svcName, "stat", "/app/data/subdir").
|
|
|
|
Assert(t, icmd.Expected{
|
|
|
|
ExitCode: 1,
|
|
|
|
Err: "No such file or directory",
|
2023-09-07 19:27:23 +02:00
|
|
|
})
|
2023-07-17 16:47:36 +02:00
|
|
|
|
2023-10-13 18:04:05 +02:00
|
|
|
t.Logf("Sync and restart use case")
|
|
|
|
require.NoError(t, os.Mkdir(configDir, 0o700))
|
|
|
|
writeTestFile("file.config", "This is an updated config file", configDir)
|
|
|
|
checkRestart := func(state string) poll.Check {
|
|
|
|
return func(pollLog poll.LogT) poll.Result {
|
|
|
|
if strings.Contains(r.Combined(), state) {
|
|
|
|
return poll.Success()
|
|
|
|
}
|
|
|
|
return poll.Continue(r.Combined())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
poll.WaitOn(t, checkRestart(fmt.Sprintf("%s-1 Restarting", svcName)))
|
|
|
|
poll.WaitOn(t, checkRestart(fmt.Sprintf("%s-1 Started", svcName)))
|
|
|
|
poll.WaitOn(t, checkFileContents("/app/config/file.config", "This is an updated config file"))
|
|
|
|
|
2023-07-17 16:47:36 +02:00
|
|
|
testComplete.Store(true)
|
|
|
|
}
|