introduce --watch

Signed-off-by: Nicolas De Loof <nicolas.deloof@gmail.com>
This commit is contained in:
Nicolas De Loof 2024-02-19 15:38:45 +01:00 committed by Nicolas De loof
parent de178267df
commit 8ab8df86e0
10 changed files with 118 additions and 50 deletions

View File

@ -24,11 +24,10 @@ import (
"strings"
"time"
xprogress "github.com/moby/buildkit/util/progress/progressui"
"github.com/compose-spec/compose-go/v2/types"
"github.com/docker/cli/cli/command"
"github.com/docker/compose/v2/cmd/formatter"
xprogress "github.com/moby/buildkit/util/progress/progressui"
"github.com/spf13/cobra"
"github.com/docker/compose/v2/pkg/api"
@ -55,6 +54,7 @@ type upOptions struct {
timestamp bool
wait bool
waitTimeout int
watch bool
}
func (opts upOptions) apply(project *types.Project, services []string) (*types.Project, error) {
@ -126,6 +126,7 @@ func upCommand(p *ProjectOptions, dockerCli command.Cli, backend api.Service) *c
flags.BoolVar(&up.attachDependencies, "attach-dependencies", false, "Automatically attach to log output of dependent services")
flags.BoolVar(&up.wait, "wait", false, "Wait for services to be running|healthy. Implies detached mode.")
flags.IntVar(&up.waitTimeout, "wait-timeout", 0, "Maximum duration to wait for the project to be running|healthy")
flags.BoolVarP(&up.watch, "watch", "w", false, "Watch source code and rebuild/refresh containers when files are updated.")
return upCmd
}
@ -257,6 +258,7 @@ func runUp(
CascadeStop: upOptions.cascadeStop,
Wait: upOptions.wait,
WaitTimeout: timeout,
Watch: upOptions.watch,
Services: services,
},
})

View File

@ -21,6 +21,7 @@ import (
"fmt"
"github.com/compose-spec/compose-go/v2/types"
"github.com/docker/compose/v2/cmd/formatter"
"github.com/docker/cli/cli/command"
"github.com/docker/compose/v2/internal/locker"
@ -31,8 +32,7 @@ import (
type watchOptions struct {
*ProjectOptions
quiet bool
noUp bool
noUp bool
}
func watchCommand(p *ProjectOptions, dockerCli command.Cli, backend api.Service) *cobra.Command {
@ -57,7 +57,7 @@ func watchCommand(p *ProjectOptions, dockerCli command.Cli, backend api.Service)
ValidArgsFunction: completeServiceNames(dockerCli, p),
}
cmd.Flags().BoolVar(&watchOpts.quiet, "quiet", false, "hide build output")
cmd.Flags().BoolVar(&buildOpts.quiet, "quiet", false, "hide build output")
cmd.Flags().BoolVar(&watchOpts.noUp, "no-up", false, "Do not build & start services before watching")
return cmd
}
@ -101,7 +101,7 @@ func runWatch(ctx context.Context, dockerCli command.Cli, backend api.Service, w
Recreate: api.RecreateDiverged,
RecreateDependencies: api.RecreateNever,
Inherit: true,
QuietPull: watchOpts.quiet,
QuietPull: buildOpts.quiet,
},
Start: api.StartOptions{
Project: project,
@ -114,7 +114,10 @@ func runWatch(ctx context.Context, dockerCli command.Cli, backend api.Service, w
return err
}
}
consumer := formatter.NewLogConsumer(ctx, dockerCli.Out(), dockerCli.Err(), false, false, false)
return backend.Watch(ctx, project, services, api.WatchOptions{
Build: build,
Build: &build,
LogTo: consumer,
})
}

View File

@ -62,7 +62,11 @@ func (l *logConsumer) Register(name string) {
func (l *logConsumer) register(name string) *presenter {
cf := monochrome
if l.color {
cf = nextColor()
if name == api.WatchLogger {
cf = makeColorFunc("92")
} else {
cf = nextColor()
}
}
p := &presenter{
colors: cf,
@ -138,5 +142,9 @@ type presenter struct {
}
func (p *presenter) setPrefix(width int) {
if p.name == api.WatchLogger {
p.prefix = p.colors(strings.Repeat(" ", width) + " ⦿ ")
return
}
p.prefix = p.colors(fmt.Sprintf("%-"+strconv.Itoa(width)+"s | ", p.name))
}

View File

@ -32,6 +32,7 @@ Create and start containers
| `--timestamps` | | | Show timestamps |
| `--wait` | | | Wait for services to be running\|healthy. Implies detached mode. |
| `--wait-timeout` | `int` | `0` | Maximum duration to wait for the project to be running\|healthy |
| `-w`, `--watch` | | | Watch source code and rebuild/refresh containers when files are updated. |
<!---MARKER_GEN_END-->

View File

@ -274,6 +274,18 @@ options:
experimentalcli: false
kubernetes: false
swarm: false
- option: watch
shorthand: w
value_type: bool
default_value: "false"
description: |
Watch source code and rebuild/refresh containers when files are updated.
deprecated: false
hidden: false
experimental: false
experimentalcli: false
kubernetes: false
swarm: false
inherited_options:
- option: dry-run
value_type: bool

View File

@ -114,9 +114,13 @@ type VizOptions struct {
Indentation string
}
// WatchLogger is a reserved name to log watch events
const WatchLogger = "#watch"
// WatchOptions group options of the Watch API
type WatchOptions struct {
Build BuildOptions
Build *BuildOptions
LogTo LogConsumer
}
// BuildOptions group options of the Build API
@ -214,6 +218,7 @@ type StartOptions struct {
WaitTimeout time.Duration
// Services passed in the command line to be started
Services []string
Watch bool
}
// RestartOptions group options of the Restart API

View File

@ -127,6 +127,9 @@ func (s *composeService) build(ctx context.Context, project *types.Project, opti
progressCtx, cancel := context.WithCancel(context.Background())
defer cancel()
if options.Quiet {
options.Progress = progress.ModeQuiet
}
w, err = xprogress.NewPrinter(progressCtx, os.Stdout, progressui.DisplayMode(options.Progress),
xprogress.WithDesc(
fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver),

View File

@ -125,6 +125,15 @@ func (s *composeService) Up(ctx context.Context, project *types.Project, options
return err
})
if options.Start.Watch {
eg.Go(func() error {
return s.Watch(ctx, project, options.Start.Services, api.WatchOptions{
Build: options.Create.Build,
LogTo: options.Start.Attach,
})
})
}
// We don't use parent (cancelable) context as we manage sigterm to stop the stack
err = s.start(context.Background(), project.Name, options.Start, printer.HandleEvent)
if err != nil && !isTerminated { // Ignore error if the process is terminated

View File

@ -76,6 +76,7 @@ func (s *composeService) Watch(ctx context.Context, project *types.Project, serv
}
eg, ctx := errgroup.WithContext(ctx)
watching := false
options.LogTo.Register(api.WatchLogger)
for i := range project.Services {
service := project.Services[i]
config, err := loadDevelopmentConfig(service, project)
@ -91,9 +92,15 @@ func (s *composeService) Watch(ctx context.Context, project *types.Project, serv
continue
}
if len(config.Watch) > 0 && service.Build == nil {
// service configured with watchers but no build section
return fmt.Errorf("can't watch service %q without a build context", service.Name)
for _, trigger := range config.Watch {
if trigger.Action == types.WatchActionRebuild {
if service.Build == nil {
return fmt.Errorf("can't watch service %q with action %s without a build context", service.Name, types.WatchActionRebuild)
}
if options.Build == nil {
return fmt.Errorf("--no-build is incompatible with watch action %s in service %s", types.WatchActionRebuild, service.Name)
}
}
}
if len(services) > 0 && service.Build == nil {
@ -142,9 +149,7 @@ func (s *composeService) Watch(ctx context.Context, project *types.Project, serv
return err
}
fmt.Fprintf(
s.stdinfo(),
"Watch configuration for service %q:%s\n",
logrus.Debugf("Watch configuration for service %q:%s\n",
service.Name,
strings.Join(append([]string{""}, pathLogs...), "\n - "),
)
@ -163,6 +168,7 @@ func (s *composeService) Watch(ctx context.Context, project *types.Project, serv
if !watching {
return fmt.Errorf("none of the selected services is configured for watch, consider setting an 'develop' section")
}
options.LogTo.Log(api.WatchLogger, "watch enabled")
return eg.Wait()
}
@ -190,7 +196,7 @@ func (s *composeService) watch(ctx context.Context, project *types.Project, name
case batch := <-batchEvents:
start := time.Now()
logrus.Debugf("batch start: service[%s] count[%d]", name, len(batch))
if err := s.handleWatchBatch(ctx, project, name, options.Build, batch, syncer); err != nil {
if err := s.handleWatchBatch(ctx, project, name, options, batch, syncer); err != nil {
logrus.Warnf("Error handling changed files for service %s: %v", name, err)
}
logrus.Debugf("batch complete: service[%s] duration[%s] count[%d]",
@ -431,32 +437,38 @@ func (t tarDockerClient) Untar(ctx context.Context, id string, archive io.ReadCl
})
}
func (s *composeService) handleWatchBatch(ctx context.Context, project *types.Project, serviceName string, build api.BuildOptions, batch []fileEvent, syncer sync.Syncer) error {
func (s *composeService) handleWatchBatch(ctx context.Context, project *types.Project, serviceName string, options api.WatchOptions, batch []fileEvent, syncer sync.Syncer) error {
pathMappings := make([]sync.PathMapping, len(batch))
restartService := false
for i := range batch {
if batch[i].Action == types.WatchActionRebuild {
fmt.Fprintf(
s.stdinfo(),
"Rebuilding service %q after changes were detected:%s\n",
serviceName,
strings.Join(append([]string{""}, batch[i].HostPath), "\n - "),
)
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Rebuilding service %q after changes were detected...", serviceName))
// restrict the build to ONLY this service, not any of its dependencies
build.Services = []string{serviceName}
err := s.Up(ctx, project, api.UpOptions{
Create: api.CreateOptions{
Build: &build,
Services: []string{serviceName},
Inherit: true,
},
Start: api.StartOptions{
Services: []string{serviceName},
Project: project,
},
options.Build.Services = []string{serviceName}
options.Build.Quiet = true
_, err := s.build(ctx, project, *options.Build, nil)
if err != nil {
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Build failed. Error: %v", err))
return err
}
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("service %q successfully built", serviceName))
err = s.create(ctx, project, api.CreateOptions{
Services: []string{serviceName},
Inherit: true,
Recreate: api.RecreateForce,
})
if err != nil {
fmt.Fprintf(s.stderr(), "Application failed to start after update. Error: %v\n", err)
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Failed to recreate service after update. Error: %v", err))
return err
}
err = s.start(ctx, project.Name, api.StartOptions{
Project: project,
Services: []string{serviceName},
}, nil)
if err != nil {
options.LogTo.Log(api.WatchLogger, fmt.Sprintf("Application failed to start after update. Error: %v", err))
}
return nil
}
@ -466,7 +478,7 @@ func (s *composeService) handleWatchBatch(ctx context.Context, project *types.Pr
pathMappings[i] = batch[i].PathMapping
}
writeWatchSyncMessage(s.stdinfo(), serviceName, pathMappings)
writeWatchSyncMessage(options.LogTo, serviceName, pathMappings)
service, err := project.GetService(serviceName)
if err != nil {
@ -486,29 +498,19 @@ func (s *composeService) handleWatchBatch(ctx context.Context, project *types.Pr
}
// writeWatchSyncMessage prints out a message about the sync for the changed paths.
func writeWatchSyncMessage(w io.Writer, serviceName string, pathMappings []sync.PathMapping) {
func writeWatchSyncMessage(log api.LogConsumer, serviceName string, pathMappings []sync.PathMapping) {
const maxPathsToShow = 10
if len(pathMappings) <= maxPathsToShow || logrus.IsLevelEnabled(logrus.DebugLevel) {
hostPathsToSync := make([]string, len(pathMappings))
for i := range pathMappings {
hostPathsToSync[i] = pathMappings[i].HostPath
}
fmt.Fprintf(
w,
"Syncing %q after changes were detected:%s\n",
serviceName,
strings.Join(append([]string{""}, hostPathsToSync...), "\n - "),
)
log.Log(api.WatchLogger, fmt.Sprintf("Syncing %q after changes were detected", serviceName))
} else {
hostPathsToSync := make([]string, len(pathMappings))
for i := range pathMappings {
hostPathsToSync[i] = pathMappings[i].HostPath
}
fmt.Fprintf(
w,
"Syncing service %q after %d changes were detected\n",
serviceName,
len(pathMappings),
)
log.Log(api.WatchLogger, fmt.Sprintf("Syncing service %q after %d changes were detected", serviceName, len(pathMappings)))
}
}

View File

@ -16,6 +16,7 @@ package compose
import (
"context"
"fmt"
"os"
"testing"
"time"
@ -91,10 +92,29 @@ func (t testWatcher) Errors() chan error {
return t.errors
}
type stdLogger struct{}
func (s stdLogger) Log(containerName, message string) {
fmt.Printf("%s: %s\n", containerName, message)
}
func (s stdLogger) Err(containerName, message string) {
fmt.Fprintf(os.Stderr, "%s: %s\n", containerName, message)
}
func (s stdLogger) Status(container, msg string) {
fmt.Printf("%s: %s\n", container, msg)
}
func (s stdLogger) Register(container string) {
}
func TestWatch_Sync(t *testing.T) {
mockCtrl := gomock.NewController(t)
cli := mocks.NewMockCli(mockCtrl)
cli.EXPECT().Err().Return(os.Stderr).AnyTimes()
cli.EXPECT().BuildKitEnabled().Return(true, nil)
apiClient := mocks.NewMockAPIClient(mockCtrl)
apiClient.EXPECT().ContainerList(gomock.Any(), gomock.Any()).Return([]moby.Container{
testContainer("test", "123", false),
@ -124,7 +144,10 @@ func TestWatch_Sync(t *testing.T) {
dockerCli: cli,
clock: clock,
}
err := service.watch(ctx, &proj, "test", api.WatchOptions{}, watcher, syncer, []types.Trigger{
err := service.watch(ctx, &proj, "test", api.WatchOptions{
Build: &api.BuildOptions{},
LogTo: stdLogger{},
}, watcher, syncer, []types.Trigger{
{
Path: "/sync",
Action: "sync",