watch: build & launch the project at start (#10957)

The `alpha watch` command current "attaches" to an already-running
Compose project, so it's necessary to run something like
`docker compose up --wait` first.

Now, we'll do the equivalent of an `up --build` before starting the
watch, so that we know the project is up-to-date and running.

Additionally, unlike an interactive `up`, the services are not stopped
when `watch` exits (e.g. via `Ctrl-C`). This prevents the need to start
from scratch each time the command is run - if some services are already
running and up-to-date, they can be used as-is. A `down` can always be
used to destroy everything, and we can consider introducing a flag like
`--down-on-exit` to `watch` or changing the default.

Signed-off-by: Milas Bowman <milas.bowman@docker.com>
This commit is contained in:
Milas Bowman 2023-09-07 13:27:23 -04:00 committed by GitHub
parent e0f39ebbef
commit d7b0b2bd7d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 82 additions and 56 deletions

View File

@ -31,10 +31,14 @@ import (
type watchOptions struct {
*ProjectOptions
quiet bool
noUp bool
}
func watchCommand(p *ProjectOptions, dockerCli command.Cli, backend api.Service) *cobra.Command {
opts := watchOptions{
watchOpts := watchOptions{
ProjectOptions: p,
}
buildOpts := buildOptions{
ProjectOptions: p,
}
cmd := &cobra.Command{
@ -44,22 +48,33 @@ func watchCommand(p *ProjectOptions, dockerCli command.Cli, backend api.Service)
return nil
}),
RunE: Adapt(func(ctx context.Context, args []string) error {
return runWatch(ctx, dockerCli, backend, opts, args)
return runWatch(ctx, dockerCli, backend, watchOpts, buildOpts, args)
}),
ValidArgsFunction: completeServiceNames(dockerCli, p),
}
cmd.Flags().BoolVar(&opts.quiet, "quiet", false, "hide build output")
cmd.Flags().BoolVar(&watchOpts.quiet, "quiet", false, "hide build output")
cmd.Flags().BoolVar(&watchOpts.noUp, "no-up", false, "Do not build & start services before watching")
return cmd
}
func runWatch(ctx context.Context, dockerCli command.Cli, backend api.Service, opts watchOptions, services []string) error {
func runWatch(ctx context.Context, dockerCli command.Cli, backend api.Service, watchOpts watchOptions, buildOpts buildOptions, services []string) error {
fmt.Fprintln(os.Stderr, "watch command is EXPERIMENTAL")
project, err := opts.ToProject(dockerCli, nil)
project, err := watchOpts.ToProject(dockerCli, nil)
if err != nil {
return err
}
if err := applyPlatforms(project, true); err != nil {
return err
}
build, err := buildOpts.toAPIBuildOptions(nil)
if err != nil {
return err
}
// validation done -- ensure we have the lockfile for this project before doing work
l, err := locker.NewPidfile(project.Name)
if err != nil {
return fmt.Errorf("cannot take exclusive lock for project %q: %v", project.Name, err)
@ -68,5 +83,29 @@ func runWatch(ctx context.Context, dockerCli command.Cli, backend api.Service, o
return fmt.Errorf("cannot take exclusive lock for project %q: %v", project.Name, err)
}
return backend.Watch(ctx, project, services, api.WatchOptions{})
if !watchOpts.noUp {
upOpts := api.UpOptions{
Create: api.CreateOptions{
Build: &build,
Services: services,
RemoveOrphans: false,
Recreate: api.RecreateDiverged,
RecreateDependencies: api.RecreateNever,
Inherit: true,
QuietPull: watchOpts.quiet,
},
Start: api.StartOptions{
Project: project,
Attach: nil,
CascadeStop: false,
Services: services,
},
}
if err := backend.Up(ctx, project, upOpts); err != nil {
return err
}
}
return backend.Watch(ctx, project, services, api.WatchOptions{
Build: build,
})
}

View File

@ -5,10 +5,11 @@ EXPERIMENTAL - Watch build context for service and rebuild/refresh containers wh
### Options
| Name | Type | Default | Description |
|:------------|:-----|:--------|:--------------------------------|
| `--dry-run` | | | Execute command in dry run mode |
| `--quiet` | | | hide build output |
| Name | Type | Default | Description |
|:------------|:-----|:--------|:----------------------------------------------|
| `--dry-run` | | | Execute command in dry run mode |
| `--no-up` | | | Do not build & start services before watching |
| `--quiet` | | | hide build output |
<!---MARKER_GEN_END-->

View File

@ -7,6 +7,16 @@ usage: docker compose alpha watch [SERVICE...]
pname: docker compose alpha
plink: docker_compose_alpha.yaml
options:
- option: no-up
value_type: bool
default_value: "false"
description: Do not build & start services before watching
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
- option: quiet
value_type: bool
default_value: "false"

View File

@ -110,6 +110,7 @@ type VizOptions struct {
// WatchOptions group options of the Watch API
type WatchOptions struct {
Build BuildOptions
}
// BuildOptions group options of the Build API

View File

@ -26,19 +26,16 @@ import (
"strings"
"time"
moby "github.com/docker/docker/api/types"
"github.com/docker/compose/v2/internal/sync"
"github.com/compose-spec/compose-go/types"
"github.com/docker/compose/v2/internal/sync"
"github.com/docker/compose/v2/pkg/api"
"github.com/docker/compose/v2/pkg/watch"
moby "github.com/docker/docker/api/types"
"github.com/jonboulle/clockwork"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"github.com/docker/compose/v2/pkg/api"
"github.com/docker/compose/v2/pkg/watch"
)
type DevelopmentConfig struct {
@ -84,7 +81,7 @@ func (s *composeService) getSyncImplementation(project *types.Project) sync.Sync
return sync.NewDockerCopy(project.Name, s, s.stdinfo())
}
func (s *composeService) Watch(ctx context.Context, project *types.Project, services []string, _ api.WatchOptions) error { //nolint: gocyclo
func (s *composeService) Watch(ctx context.Context, project *types.Project, services []string, options api.WatchOptions) error { //nolint: gocyclo
if err := project.ForServices(services); err != nil {
return err
}
@ -161,7 +158,7 @@ func (s *composeService) Watch(ctx context.Context, project *types.Project, serv
eg.Go(func() error {
defer watcher.Close() //nolint:errcheck
return s.watch(ctx, project, service.Name, watcher, syncer, config.Watch)
return s.watch(ctx, project, service.Name, options, watcher, syncer, config.Watch)
})
}
@ -172,14 +169,7 @@ func (s *composeService) Watch(ctx context.Context, project *types.Project, serv
return eg.Wait()
}
func (s *composeService) watch(
ctx context.Context,
project *types.Project,
name string,
watcher watch.Notify,
syncer sync.Syncer,
triggers []Trigger,
) error {
func (s *composeService) watch(ctx context.Context, project *types.Project, name string, options api.WatchOptions, watcher watch.Notify, syncer sync.Syncer, triggers []Trigger) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@ -202,7 +192,7 @@ func (s *composeService) watch(
case batch := <-batchEvents:
start := time.Now()
logrus.Debugf("batch start: service[%s] count[%d]", name, len(batch))
if err := s.handleWatchBatch(ctx, project, name, batch, syncer); err != nil {
if err := s.handleWatchBatch(ctx, project, name, options.Build, batch, syncer); err != nil {
logrus.Warnf("Error handling changed files for service %s: %v", name, err)
}
logrus.Debugf("batch complete: service[%s] duration[%s] count[%d]",
@ -436,13 +426,7 @@ func (t tarDockerClient) Exec(ctx context.Context, containerID string, cmd []str
return nil
}
func (s *composeService) handleWatchBatch(
ctx context.Context,
project *types.Project,
serviceName string,
batch []fileEvent,
syncer sync.Syncer,
) error {
func (s *composeService) handleWatchBatch(ctx context.Context, project *types.Project, serviceName string, build api.BuildOptions, batch []fileEvent, syncer sync.Syncer) error {
pathMappings := make([]sync.PathMapping, len(batch))
for i := range batch {
if batch[i].Action == WatchActionRebuild {
@ -452,14 +436,11 @@ func (s *composeService) handleWatchBatch(
serviceName,
strings.Join(append([]string{""}, batch[i].HostPath), "\n - "),
)
// restrict the build to ONLY this service, not any of its dependencies
build.Services = []string{serviceName}
err := s.Up(ctx, project, api.UpOptions{
Create: api.CreateOptions{
Build: &api.BuildOptions{
Pull: false,
Push: false,
// restrict the build to ONLY this service, not any of its dependencies
Services: []string{serviceName},
},
Build: &build,
Services: []string{serviceName},
Inherit: true,
},

View File

@ -21,16 +21,14 @@ import (
"time"
"github.com/compose-spec/compose-go/types"
"github.com/docker/compose/v2/internal/sync"
"github.com/docker/compose/v2/pkg/api"
"github.com/docker/compose/v2/pkg/mocks"
"github.com/docker/compose/v2/pkg/watch"
moby "github.com/docker/docker/api/types"
"github.com/golang/mock/gomock"
"github.com/jonboulle/clockwork"
"github.com/stretchr/testify/require"
"github.com/docker/compose/v2/internal/sync"
"github.com/docker/compose/v2/pkg/watch"
"gotest.tools/v3/assert"
)
@ -126,7 +124,7 @@ func TestWatch_Sync(t *testing.T) {
dockerCli: cli,
clock: clock,
}
err := service.watch(ctx, &proj, "test", watcher, syncer, []Trigger{
err := service.watch(ctx, &proj, "test", api.WatchOptions{}, watcher, syncer, []Trigger{
{
Path: "/sync",
Action: "sync",

View File

@ -82,14 +82,13 @@ func doTest(t *testing.T, svcName string, tarSync bool) {
cli := NewCLI(t, WithEnv(env...))
// important that --rmi is used to prune the images and ensure that watch builds on launch
cleanup := func() {
cli.RunDockerComposeCmd(t, "down", svcName, "--timeout=0", "--remove-orphans", "--volumes")
cli.RunDockerComposeCmd(t, "down", svcName, "--timeout=0", "--remove-orphans", "--volumes", "--rmi=local")
}
cleanup()
t.Cleanup(cleanup)
cli.RunDockerComposeCmd(t, "up", svcName, "--wait", "--build")
cmd := cli.NewDockerComposeCmd(t, "--verbose", "alpha", "watch", svcName)
// stream output since watch runs in the background
cmd.Stdout = os.Stdout
@ -161,14 +160,12 @@ func doTest(t *testing.T, svcName string, tarSync bool) {
Assert(t, icmd.Expected{
ExitCode: 1,
Err: "No such file or directory",
},
)
})
cli.RunDockerComposeCmdNoCheck(t, "exec", svcName, "stat", "/app/data/ignored").
Assert(t, icmd.Expected{
ExitCode: 1,
Err: "No such file or directory",
},
)
})
t.Logf("Creating subdirectory")
require.NoError(t, os.Mkdir(filepath.Join(dataDir, "subdir"), 0o700))
@ -196,8 +193,7 @@ func doTest(t *testing.T, svcName string, tarSync bool) {
Assert(t, icmd.Expected{
ExitCode: 1,
Err: "No such file or directory",
},
)
})
testComplete.Store(true)
}