golangci-lint: enable copyloopvar linter

capturing loop variables is no longer needed in go1.22 and higher;
https://go.dev/blog/loopvar-preview

This path enables the copyloopvar linter, which finds places where capturing
is no longer needed, and removes locations where they could be removed.

Also made some minor changes, and renamed some vars in places where
we could use a shorter name that's less likely to conflict with imports.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2025-02-10 13:15:23 +01:00 committed by Nicolas De loof
parent 49575ef499
commit c23a7e7281
16 changed files with 26 additions and 42 deletions

View File

@ -5,6 +5,7 @@ linters:
enable-all: false
disable-all: true
enable:
- copyloopvar
- depguard
- errcheck
- errorlint

View File

@ -205,7 +205,6 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
// Scale UP
number := next + i
name := getContainerName(project.Name, service, number)
i := i
eventOpts := tracing.SpanOptions{trace.WithAttributes(attribute.String("container.name", name))}
eg.Go(tracing.EventWrapFuncForErrGroup(ctx, "service/scale/up", eventOpts, func(ctx context.Context) error {
opts := createOptions{
@ -470,7 +469,6 @@ func (s *composeService) waitDependencies(ctx context.Context, project *types.Pr
continue
}
dep, config := dep, config
eg.Go(func() error {
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()

View File

@ -172,7 +172,6 @@ func (t *graphTraversal) run(ctx context.Context, graph *Graph, eg *errgroup.Gro
continue
}
node := node
if !t.consume(node.Key) {
// another worker already visited this node
continue

View File

@ -322,10 +322,9 @@ func (s *composeService) stopContainer(ctx context.Context, w progress.Writer, s
func (s *composeService) stopContainers(ctx context.Context, w progress.Writer, serv *types.ServiceConfig, containers []moby.Container, timeout *time.Duration) error {
eg, ctx := errgroup.WithContext(ctx)
for _, container := range containers {
container := container
for _, ctr := range containers {
eg.Go(func() error {
return s.stopContainer(ctx, w, serv, container, timeout)
return s.stopContainer(ctx, w, serv, ctr, timeout)
})
}
return eg.Wait()
@ -333,10 +332,9 @@ func (s *composeService) stopContainers(ctx context.Context, w progress.Writer,
func (s *composeService) removeContainers(ctx context.Context, containers []moby.Container, service *types.ServiceConfig, timeout *time.Duration, volumes bool) error {
eg, _ := errgroup.WithContext(ctx)
for _, container := range containers {
container := container
for _, ctr := range containers {
eg.Go(func() error {
return s.stopAndRemoveContainer(ctx, container, service, timeout, volumes)
return s.stopAndRemoveContainer(ctx, ctr, service, timeout, volumes)
})
}
return eg.Wait()

View File

@ -202,7 +202,6 @@ func (p *ImagePruner) filterImagesByExistence(ctx context.Context, imageNames []
eg, ctx := errgroup.WithContext(ctx)
for _, img := range imageNames {
img := img
eg.Go(func() error {
_, _, err := p.client.ImageInspectWithRaw(ctx, img)
if errdefs.IsNotFound(err) {

View File

@ -82,7 +82,6 @@ func (s *composeService) getImageSummaries(ctx context.Context, repoTags []strin
l := sync.Mutex{}
eg, ctx := errgroup.WithContext(ctx)
for _, repoTag := range repoTags {
repoTag := repoTag
eg.Go(func() error {
inspect, _, err := s.apiClient().ImageInspectWithRaw(ctx, repoTag)
if err != nil {

View File

@ -62,13 +62,12 @@ func (s *composeService) Logs(
}
eg, ctx := errgroup.WithContext(ctx)
for _, c := range containers {
c := c
for _, ctr := range containers {
eg.Go(func() error {
err := s.logContainers(ctx, consumer, c, options)
err := s.logContainers(ctx, consumer, ctr, options)
var notImplErr errdefs.ErrNotImplemented
if errors.As(err, &notImplErr) {
logrus.Warnf("Can't retrieve logs for %q: %s", getCanonicalContainerName(c), err.Error())
logrus.Warnf("Can't retrieve logs for %q: %s", getCanonicalContainerName(ctr), err.Error())
return nil
}
return err

View File

@ -134,7 +134,6 @@ func TestComposeService_Logs_ServiceFiltering(t *testing.T) {
)
for _, id := range []string{"c1", "c2", "c4"} {
id := id
api.EXPECT().
ContainerInspect(anyCancellableContext(), id).
Return(

View File

@ -43,7 +43,6 @@ func (s *composeService) Ps(ctx context.Context, projectName string, options api
summary := make([]api.ContainerSummary, len(containers))
eg, ctx := errgroup.WithContext(ctx)
for i, container := range containers {
i, container := i, container
eg.Go(func() error {
publishers := make([]api.PortPublisher, len(container.Ports))
sort.Slice(container.Ports, func(i, j int) bool {

View File

@ -113,7 +113,7 @@ func (s *composeService) pull(ctx context.Context, project *types.Project, opts
imagesBeingPulled[service.Image] = service.Name
idx, name, service := i, name, service
idx := i
eg.Go(func() error {
_, err := s.pullServiceImage(ctx, service, s.configFile(), w, opts.Quiet, project.Environment["DOCKER_DEFAULT_PLATFORM"])
if err != nil {
@ -316,7 +316,6 @@ func (s *composeService) pullRequiredImages(ctx context.Context, project *types.
eg.SetLimit(s.maxConcurrency)
pulledImages := make([]string, len(needPull))
for i, service := range needPull {
i, service := i, service
eg.Go(func() error {
id, err := s.pullServiceImage(ctx, service, s.configFile(), w, quietPull, project.Environment["DOCKER_DEFAULT_PLATFORM"])
pulledImages[i] = id

View File

@ -72,14 +72,12 @@ func (s *composeService) push(ctx context.Context, project *types.Project, optio
})
continue
}
service := service
tags := []string{service.Image}
if service.Build != nil {
tags = append(tags, service.Build.Tags...)
}
for _, tag := range tags {
tag := tag
eg.Go(func() error {
err := s.pushServiceImage(ctx, tag, info, s.configFile(), w, options.Quiet)
if err != nil {

View File

@ -102,12 +102,11 @@ func (s *composeService) Remove(ctx context.Context, projectName string, options
func (s *composeService) remove(ctx context.Context, containers Containers, options api.RemoveOptions) error {
w := progress.ContextWriter(ctx)
eg, ctx := errgroup.WithContext(ctx)
for _, container := range containers {
container := container
for _, ctr := range containers {
eg.Go(func() error {
eventName := getContainerProgressName(container)
eventName := getContainerProgressName(ctr)
w.Event(progress.RemovingEvent(eventName))
err := s.apiClient().ContainerRemove(ctx, container.ID, containerType.RemoveOptions{
err := s.apiClient().ContainerRemove(ctx, ctr.ID, containerType.RemoveOptions{
RemoveVolumes: options.Volumes,
Force: options.Force,
})

View File

@ -78,17 +78,17 @@ func (s *composeService) restart(ctx context.Context, projectName string, option
w := progress.ContextWriter(ctx)
return InDependencyOrder(ctx, project, func(c context.Context, service string) error {
eg, ctx := errgroup.WithContext(ctx)
for _, container := range containers.filter(isService(service)) {
container := container
for _, ctr := range containers.filter(isService(service)) {
eg.Go(func() error {
eventName := getContainerProgressName(container)
eventName := getContainerProgressName(ctr)
w.Event(progress.RestartingEvent(eventName))
timeout := utils.DurationSecondToInt(options.Timeout)
err := s.apiClient().ContainerRestart(ctx, container.ID, containerType.StopOptions{Timeout: timeout})
if err == nil {
w.Event(progress.StartedEvent(eventName))
err := s.apiClient().ContainerRestart(ctx, ctr.ID, containerType.StopOptions{Timeout: timeout})
if err != nil {
return err
}
return err
w.Event(progress.StartedEvent(eventName))
return nil
})
}
return eg.Wait()

View File

@ -36,16 +36,15 @@ func (s *composeService) Top(ctx context.Context, projectName string, services [
}
summary := make([]api.ContainerProcSummary, len(containers))
eg, ctx := errgroup.WithContext(ctx)
for i, container := range containers {
i, container := i, container
for i, ctr := range containers {
eg.Go(func() error {
topContent, err := s.apiClient().ContainerTop(ctx, container.ID, []string{})
topContent, err := s.apiClient().ContainerTop(ctx, ctr.ID, []string{})
if err != nil {
return err
}
summary[i] = api.ContainerProcSummary{
ID: container.ID,
Name: getCanonicalContainerName(container),
ID: ctr.ID,
Name: getCanonicalContainerName(ctr),
Processes: topContent.Processes,
Titles: topContent.Titles,
}

View File

@ -31,7 +31,6 @@ type vizGraph map[*types.ServiceConfig][]*types.ServiceConfig
func (s *composeService) Viz(_ context.Context, project *types.Project, opts api.VizOptions) (string, error) {
graph := make(vizGraph)
for _, service := range project.Services {
service := service
graph[&service] = make([]*types.ServiceConfig, 0, len(service.DependsOn))
for dependencyName := range service.DependsOn {
// no error should be returned since dependencyName should exist

View File

@ -35,15 +35,14 @@ func (s *composeService) Wait(ctx context.Context, projectName string, options a
eg, waitCtx := errgroup.WithContext(ctx)
var statusCode int64
for _, c := range containers {
c := c
for _, ctr := range containers {
eg.Go(func() error {
var err error
resultC, errC := s.dockerCli.Client().ContainerWait(waitCtx, c.ID, "")
resultC, errC := s.dockerCli.Client().ContainerWait(waitCtx, ctr.ID, "")
select {
case result := <-resultC:
_, _ = fmt.Fprintf(s.dockerCli.Out(), "container %q exited with status code %d\n", c.ID, result.StatusCode)
_, _ = fmt.Fprintf(s.dockerCli.Out(), "container %q exited with status code %d\n", ctr.ID, result.StatusCode)
statusCode = result.StatusCode
case err = <-errC:
}