mirror of
https://github.com/docker/compose.git
synced 2025-07-21 12:44:54 +02:00
golangci-lint: enable copyloopvar linter
capturing loop variables is no longer needed in go1.22 and higher; https://go.dev/blog/loopvar-preview This path enables the copyloopvar linter, which finds places where capturing is no longer needed, and removes locations where they could be removed. Also made some minor changes, and renamed some vars in places where we could use a shorter name that's less likely to conflict with imports. Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
49575ef499
commit
c23a7e7281
@ -5,6 +5,7 @@ linters:
|
|||||||
enable-all: false
|
enable-all: false
|
||||||
disable-all: true
|
disable-all: true
|
||||||
enable:
|
enable:
|
||||||
|
- copyloopvar
|
||||||
- depguard
|
- depguard
|
||||||
- errcheck
|
- errcheck
|
||||||
- errorlint
|
- errorlint
|
||||||
|
@ -205,7 +205,6 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
|
|||||||
// Scale UP
|
// Scale UP
|
||||||
number := next + i
|
number := next + i
|
||||||
name := getContainerName(project.Name, service, number)
|
name := getContainerName(project.Name, service, number)
|
||||||
i := i
|
|
||||||
eventOpts := tracing.SpanOptions{trace.WithAttributes(attribute.String("container.name", name))}
|
eventOpts := tracing.SpanOptions{trace.WithAttributes(attribute.String("container.name", name))}
|
||||||
eg.Go(tracing.EventWrapFuncForErrGroup(ctx, "service/scale/up", eventOpts, func(ctx context.Context) error {
|
eg.Go(tracing.EventWrapFuncForErrGroup(ctx, "service/scale/up", eventOpts, func(ctx context.Context) error {
|
||||||
opts := createOptions{
|
opts := createOptions{
|
||||||
@ -470,7 +469,6 @@ func (s *composeService) waitDependencies(ctx context.Context, project *types.Pr
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
dep, config := dep, config
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
ticker := time.NewTicker(500 * time.Millisecond)
|
ticker := time.NewTicker(500 * time.Millisecond)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
@ -172,7 +172,6 @@ func (t *graphTraversal) run(ctx context.Context, graph *Graph, eg *errgroup.Gro
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
node := node
|
|
||||||
if !t.consume(node.Key) {
|
if !t.consume(node.Key) {
|
||||||
// another worker already visited this node
|
// another worker already visited this node
|
||||||
continue
|
continue
|
||||||
|
@ -322,10 +322,9 @@ func (s *composeService) stopContainer(ctx context.Context, w progress.Writer, s
|
|||||||
|
|
||||||
func (s *composeService) stopContainers(ctx context.Context, w progress.Writer, serv *types.ServiceConfig, containers []moby.Container, timeout *time.Duration) error {
|
func (s *composeService) stopContainers(ctx context.Context, w progress.Writer, serv *types.ServiceConfig, containers []moby.Container, timeout *time.Duration) error {
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
for _, container := range containers {
|
for _, ctr := range containers {
|
||||||
container := container
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
return s.stopContainer(ctx, w, serv, container, timeout)
|
return s.stopContainer(ctx, w, serv, ctr, timeout)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return eg.Wait()
|
return eg.Wait()
|
||||||
@ -333,10 +332,9 @@ func (s *composeService) stopContainers(ctx context.Context, w progress.Writer,
|
|||||||
|
|
||||||
func (s *composeService) removeContainers(ctx context.Context, containers []moby.Container, service *types.ServiceConfig, timeout *time.Duration, volumes bool) error {
|
func (s *composeService) removeContainers(ctx context.Context, containers []moby.Container, service *types.ServiceConfig, timeout *time.Duration, volumes bool) error {
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
for _, container := range containers {
|
for _, ctr := range containers {
|
||||||
container := container
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
return s.stopAndRemoveContainer(ctx, container, service, timeout, volumes)
|
return s.stopAndRemoveContainer(ctx, ctr, service, timeout, volumes)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return eg.Wait()
|
return eg.Wait()
|
||||||
|
@ -202,7 +202,6 @@ func (p *ImagePruner) filterImagesByExistence(ctx context.Context, imageNames []
|
|||||||
|
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
for _, img := range imageNames {
|
for _, img := range imageNames {
|
||||||
img := img
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
_, _, err := p.client.ImageInspectWithRaw(ctx, img)
|
_, _, err := p.client.ImageInspectWithRaw(ctx, img)
|
||||||
if errdefs.IsNotFound(err) {
|
if errdefs.IsNotFound(err) {
|
||||||
|
@ -82,7 +82,6 @@ func (s *composeService) getImageSummaries(ctx context.Context, repoTags []strin
|
|||||||
l := sync.Mutex{}
|
l := sync.Mutex{}
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
for _, repoTag := range repoTags {
|
for _, repoTag := range repoTags {
|
||||||
repoTag := repoTag
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
inspect, _, err := s.apiClient().ImageInspectWithRaw(ctx, repoTag)
|
inspect, _, err := s.apiClient().ImageInspectWithRaw(ctx, repoTag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -62,13 +62,12 @@ func (s *composeService) Logs(
|
|||||||
}
|
}
|
||||||
|
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
for _, c := range containers {
|
for _, ctr := range containers {
|
||||||
c := c
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
err := s.logContainers(ctx, consumer, c, options)
|
err := s.logContainers(ctx, consumer, ctr, options)
|
||||||
var notImplErr errdefs.ErrNotImplemented
|
var notImplErr errdefs.ErrNotImplemented
|
||||||
if errors.As(err, ¬ImplErr) {
|
if errors.As(err, ¬ImplErr) {
|
||||||
logrus.Warnf("Can't retrieve logs for %q: %s", getCanonicalContainerName(c), err.Error())
|
logrus.Warnf("Can't retrieve logs for %q: %s", getCanonicalContainerName(ctr), err.Error())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -134,7 +134,6 @@ func TestComposeService_Logs_ServiceFiltering(t *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
for _, id := range []string{"c1", "c2", "c4"} {
|
for _, id := range []string{"c1", "c2", "c4"} {
|
||||||
id := id
|
|
||||||
api.EXPECT().
|
api.EXPECT().
|
||||||
ContainerInspect(anyCancellableContext(), id).
|
ContainerInspect(anyCancellableContext(), id).
|
||||||
Return(
|
Return(
|
||||||
|
@ -43,7 +43,6 @@ func (s *composeService) Ps(ctx context.Context, projectName string, options api
|
|||||||
summary := make([]api.ContainerSummary, len(containers))
|
summary := make([]api.ContainerSummary, len(containers))
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
for i, container := range containers {
|
for i, container := range containers {
|
||||||
i, container := i, container
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
publishers := make([]api.PortPublisher, len(container.Ports))
|
publishers := make([]api.PortPublisher, len(container.Ports))
|
||||||
sort.Slice(container.Ports, func(i, j int) bool {
|
sort.Slice(container.Ports, func(i, j int) bool {
|
||||||
|
@ -113,7 +113,7 @@ func (s *composeService) pull(ctx context.Context, project *types.Project, opts
|
|||||||
|
|
||||||
imagesBeingPulled[service.Image] = service.Name
|
imagesBeingPulled[service.Image] = service.Name
|
||||||
|
|
||||||
idx, name, service := i, name, service
|
idx := i
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
_, err := s.pullServiceImage(ctx, service, s.configFile(), w, opts.Quiet, project.Environment["DOCKER_DEFAULT_PLATFORM"])
|
_, err := s.pullServiceImage(ctx, service, s.configFile(), w, opts.Quiet, project.Environment["DOCKER_DEFAULT_PLATFORM"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -316,7 +316,6 @@ func (s *composeService) pullRequiredImages(ctx context.Context, project *types.
|
|||||||
eg.SetLimit(s.maxConcurrency)
|
eg.SetLimit(s.maxConcurrency)
|
||||||
pulledImages := make([]string, len(needPull))
|
pulledImages := make([]string, len(needPull))
|
||||||
for i, service := range needPull {
|
for i, service := range needPull {
|
||||||
i, service := i, service
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
id, err := s.pullServiceImage(ctx, service, s.configFile(), w, quietPull, project.Environment["DOCKER_DEFAULT_PLATFORM"])
|
id, err := s.pullServiceImage(ctx, service, s.configFile(), w, quietPull, project.Environment["DOCKER_DEFAULT_PLATFORM"])
|
||||||
pulledImages[i] = id
|
pulledImages[i] = id
|
||||||
|
@ -72,14 +72,12 @@ func (s *composeService) push(ctx context.Context, project *types.Project, optio
|
|||||||
})
|
})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
service := service
|
|
||||||
tags := []string{service.Image}
|
tags := []string{service.Image}
|
||||||
if service.Build != nil {
|
if service.Build != nil {
|
||||||
tags = append(tags, service.Build.Tags...)
|
tags = append(tags, service.Build.Tags...)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tag := range tags {
|
for _, tag := range tags {
|
||||||
tag := tag
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
err := s.pushServiceImage(ctx, tag, info, s.configFile(), w, options.Quiet)
|
err := s.pushServiceImage(ctx, tag, info, s.configFile(), w, options.Quiet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -102,12 +102,11 @@ func (s *composeService) Remove(ctx context.Context, projectName string, options
|
|||||||
func (s *composeService) remove(ctx context.Context, containers Containers, options api.RemoveOptions) error {
|
func (s *composeService) remove(ctx context.Context, containers Containers, options api.RemoveOptions) error {
|
||||||
w := progress.ContextWriter(ctx)
|
w := progress.ContextWriter(ctx)
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
for _, container := range containers {
|
for _, ctr := range containers {
|
||||||
container := container
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
eventName := getContainerProgressName(container)
|
eventName := getContainerProgressName(ctr)
|
||||||
w.Event(progress.RemovingEvent(eventName))
|
w.Event(progress.RemovingEvent(eventName))
|
||||||
err := s.apiClient().ContainerRemove(ctx, container.ID, containerType.RemoveOptions{
|
err := s.apiClient().ContainerRemove(ctx, ctr.ID, containerType.RemoveOptions{
|
||||||
RemoveVolumes: options.Volumes,
|
RemoveVolumes: options.Volumes,
|
||||||
Force: options.Force,
|
Force: options.Force,
|
||||||
})
|
})
|
||||||
|
@ -78,17 +78,17 @@ func (s *composeService) restart(ctx context.Context, projectName string, option
|
|||||||
w := progress.ContextWriter(ctx)
|
w := progress.ContextWriter(ctx)
|
||||||
return InDependencyOrder(ctx, project, func(c context.Context, service string) error {
|
return InDependencyOrder(ctx, project, func(c context.Context, service string) error {
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
for _, container := range containers.filter(isService(service)) {
|
for _, ctr := range containers.filter(isService(service)) {
|
||||||
container := container
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
eventName := getContainerProgressName(container)
|
eventName := getContainerProgressName(ctr)
|
||||||
w.Event(progress.RestartingEvent(eventName))
|
w.Event(progress.RestartingEvent(eventName))
|
||||||
timeout := utils.DurationSecondToInt(options.Timeout)
|
timeout := utils.DurationSecondToInt(options.Timeout)
|
||||||
err := s.apiClient().ContainerRestart(ctx, container.ID, containerType.StopOptions{Timeout: timeout})
|
err := s.apiClient().ContainerRestart(ctx, ctr.ID, containerType.StopOptions{Timeout: timeout})
|
||||||
if err == nil {
|
if err != nil {
|
||||||
w.Event(progress.StartedEvent(eventName))
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
w.Event(progress.StartedEvent(eventName))
|
||||||
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return eg.Wait()
|
return eg.Wait()
|
||||||
|
@ -36,16 +36,15 @@ func (s *composeService) Top(ctx context.Context, projectName string, services [
|
|||||||
}
|
}
|
||||||
summary := make([]api.ContainerProcSummary, len(containers))
|
summary := make([]api.ContainerProcSummary, len(containers))
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
for i, container := range containers {
|
for i, ctr := range containers {
|
||||||
i, container := i, container
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
topContent, err := s.apiClient().ContainerTop(ctx, container.ID, []string{})
|
topContent, err := s.apiClient().ContainerTop(ctx, ctr.ID, []string{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
summary[i] = api.ContainerProcSummary{
|
summary[i] = api.ContainerProcSummary{
|
||||||
ID: container.ID,
|
ID: ctr.ID,
|
||||||
Name: getCanonicalContainerName(container),
|
Name: getCanonicalContainerName(ctr),
|
||||||
Processes: topContent.Processes,
|
Processes: topContent.Processes,
|
||||||
Titles: topContent.Titles,
|
Titles: topContent.Titles,
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,6 @@ type vizGraph map[*types.ServiceConfig][]*types.ServiceConfig
|
|||||||
func (s *composeService) Viz(_ context.Context, project *types.Project, opts api.VizOptions) (string, error) {
|
func (s *composeService) Viz(_ context.Context, project *types.Project, opts api.VizOptions) (string, error) {
|
||||||
graph := make(vizGraph)
|
graph := make(vizGraph)
|
||||||
for _, service := range project.Services {
|
for _, service := range project.Services {
|
||||||
service := service
|
|
||||||
graph[&service] = make([]*types.ServiceConfig, 0, len(service.DependsOn))
|
graph[&service] = make([]*types.ServiceConfig, 0, len(service.DependsOn))
|
||||||
for dependencyName := range service.DependsOn {
|
for dependencyName := range service.DependsOn {
|
||||||
// no error should be returned since dependencyName should exist
|
// no error should be returned since dependencyName should exist
|
||||||
|
@ -35,15 +35,14 @@ func (s *composeService) Wait(ctx context.Context, projectName string, options a
|
|||||||
|
|
||||||
eg, waitCtx := errgroup.WithContext(ctx)
|
eg, waitCtx := errgroup.WithContext(ctx)
|
||||||
var statusCode int64
|
var statusCode int64
|
||||||
for _, c := range containers {
|
for _, ctr := range containers {
|
||||||
c := c
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
var err error
|
var err error
|
||||||
resultC, errC := s.dockerCli.Client().ContainerWait(waitCtx, c.ID, "")
|
resultC, errC := s.dockerCli.Client().ContainerWait(waitCtx, ctr.ID, "")
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case result := <-resultC:
|
case result := <-resultC:
|
||||||
_, _ = fmt.Fprintf(s.dockerCli.Out(), "container %q exited with status code %d\n", c.ID, result.StatusCode)
|
_, _ = fmt.Fprintf(s.dockerCli.Out(), "container %q exited with status code %d\n", ctr.ID, result.StatusCode)
|
||||||
statusCode = result.StatusCode
|
statusCode = result.StatusCode
|
||||||
case err = <-errC:
|
case err = <-errC:
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user