1
0
mirror of https://github.com/goreleaser/goreleaser.git synced 2025-07-05 00:59:04 +02:00

refactor: remove unneeded in Go 1.22 loop var copy (#4856)

The PR cleans up unnecessary loop variable copying and enables the
[`copyloopvar`](https://golangci-lint.run/usage/linters/#copyloopvar)
linter for detecting this redundant variable copying.

#### Additional notes

After the project upgraded to Go version 1.22 in #4779, copying
variables inside a `for` loop became unnecessary. See this [blog
post](https://go.dev/blog/loopvar-preview) for a detailed explanation.

The `copyloopvar` linter is only available from `golangci-lint` v1.57
onwards, so we also need to update this tool.
This commit is contained in:
Oleksandr Redko
2024-05-12 19:21:13 +03:00
committed by GitHub
parent c5204dfb64
commit 00a376cc64
28 changed files with 3 additions and 43 deletions

View File

@ -23,7 +23,7 @@ jobs:
go-version: stable go-version: stable
cache: false cache: false
- name: golangci-lint - name: golangci-lint
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v3 uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
with: with:
args: --timeout=5m args: --timeout=5m
version: v1.56.2 version: v1.58.1

View File

@ -3,6 +3,7 @@ run:
timeout: 5m timeout: 5m
linters: linters:
enable: enable:
- copyloopvar
- thelper - thelper
- gofumpt - gofumpt
- bodyclose - bodyclose

View File

@ -483,7 +483,6 @@ func ByType(t Type) Filter {
func ByFormats(formats ...string) Filter { func ByFormats(formats ...string) Filter {
filters := make([]Filter, 0, len(formats)) filters := make([]Filter, 0, len(formats))
for _, format := range formats { for _, format := range formats {
format := format
filters = append(filters, func(a *Artifact) bool { filters = append(filters, func(a *Artifact) bool {
return a.Format() == format return a.Format() == format
}) })
@ -495,7 +494,6 @@ func ByFormats(formats ...string) Filter {
func ByIDs(ids ...string) Filter { func ByIDs(ids ...string) Filter {
filters := make([]Filter, 0, len(ids)) filters := make([]Filter, 0, len(ids))
for _, id := range ids { for _, id := range ids {
id := id
filters = append(filters, func(a *Artifact) bool { filters = append(filters, func(a *Artifact) bool {
// checksum and source archive are always for all artifacts, so return always true. // checksum and source archive are always for all artifacts, so return always true.
return a.Type == Checksum || return a.Type == Checksum ||
@ -512,7 +510,6 @@ func ByIDs(ids ...string) Filter {
func ByExt(exts ...string) Filter { func ByExt(exts ...string) Filter {
filters := make([]Filter, 0, len(exts)) filters := make([]Filter, 0, len(exts))
for _, ext := range exts { for _, ext := range exts {
ext := ext
filters = append(filters, func(a *Artifact) bool { filters = append(filters, func(a *Artifact) bool {
return ExtraOr(*a, ExtraExt, "") == ext return ExtraOr(*a, ExtraExt, "") == ext
}) })

View File

@ -44,7 +44,6 @@ func TestAdd(t *testing.T) {
Type: Checksum, Type: Checksum,
}, },
} { } {
a := a
g.Go(func() error { g.Go(func() error {
artifacts.Add(a) artifacts.Add(a)
return nil return nil

View File

@ -70,7 +70,6 @@ func executePublisher(ctx *context.Context, publisher config.Publisher) error {
g := semerrgroup.New(ctx.Parallelism) g := semerrgroup.New(ctx.Parallelism)
for _, artifact := range artifacts { for _, artifact := range artifacts {
artifact := artifact
g.Go(func() error { g.Go(func() error {
c, err := resolveCommand(ctx, publisher, artifact) c, err := resolveCommand(ctx, publisher, artifact)
if err != nil { if err != nil {

View File

@ -156,7 +156,6 @@ type ResponseChecker func(*h.Response) error
func Upload(ctx *context.Context, uploads []config.Upload, kind string, check ResponseChecker) error { func Upload(ctx *context.Context, uploads []config.Upload, kind string, check ResponseChecker) error {
// Handle every configured upload // Handle every configured upload
for _, upload := range uploads { for _, upload := range uploads {
upload := upload
filters := []artifact.Filter{} filters := []artifact.Filter{}
if upload.Checksum { if upload.Checksum {
filters = append(filters, artifact.ByType(artifact.Checksum)) filters = append(filters, artifact.ByType(artifact.Checksum))
@ -206,7 +205,6 @@ func uploadWithFilter(ctx *context.Context, upload *config.Upload, filter artifa
log.Debugf("will upload %d artifacts", len(artifacts)) log.Debugf("will upload %d artifacts", len(artifacts))
g := semerrgroup.New(ctx.Parallelism) g := semerrgroup.New(ctx.Parallelism)
for _, artifact := range artifacts { for _, artifact := range artifacts {
artifact := artifact
g.Go(func() error { g.Go(func() error {
return uploadAsset(ctx, upload, artifact, kind, check) return uploadAsset(ctx, upload, artifact, kind, check)
}) })

View File

@ -91,7 +91,6 @@ func (Pipe) Default(ctx *context.Context) error {
func (Pipe) Run(ctx *context.Context) error { func (Pipe) Run(ctx *context.Context) error {
g := semerrgroup.New(ctx.Parallelism) g := semerrgroup.New(ctx.Parallelism)
for i, archive := range ctx.Config.Archives { for i, archive := range ctx.Config.Archives {
archive := archive
if archive.Meta { if archive.Meta {
g.Go(func() error { g.Go(func() error {
return createMeta(ctx, archive) return createMeta(ctx, archive)
@ -115,7 +114,6 @@ func (Pipe) Run(ctx *context.Context) error {
} }
for group, artifacts := range artifacts { for group, artifacts := range artifacts {
log.Debugf("group %s has %d binaries", group, len(artifacts)) log.Debugf("group %s has %d binaries", group, len(artifacts))
artifacts := artifacts
format := packageFormat(archive, artifacts[0].Goos) format := packageFormat(archive, artifacts[0].Goos)
switch format { switch format {
case "none": case "none":

View File

@ -36,7 +36,6 @@ func (Pipe) Publish(ctx *context.Context) error {
// Check requirements for every instance we have configured. // Check requirements for every instance we have configured.
// If not fulfilled, we can skip this pipeline // If not fulfilled, we can skip this pipeline
for _, instance := range ctx.Config.Artifactories { for _, instance := range ctx.Config.Artifactories {
instance := instance
if skip := http.CheckConfig(ctx, &instance, "artifactory"); skip != nil { if skip := http.CheckConfig(ctx, &instance, "artifactory"); skip != nil {
return pipe.Skip(skip.Error()) return pipe.Skip(skip.Error())
} }

View File

@ -57,7 +57,6 @@ func (Pipe) Publish(ctx *context.Context) error {
g := semerrgroup.New(ctx.Parallelism) g := semerrgroup.New(ctx.Parallelism)
skips := pipe.SkipMemento{} skips := pipe.SkipMemento{}
for _, conf := range ctx.Config.Blobs { for _, conf := range ctx.Config.Blobs {
conf := conf
g.Go(func() error { g.Go(func() error {
b, err := tmpl.New(ctx).Bool(conf.Disable) b, err := tmpl.New(ctx).Bool(conf.Disable)
if err != nil { if err != nil {

View File

@ -137,7 +137,6 @@ func doUpload(ctx *context.Context, conf config.Blob) error {
g := semerrgroup.New(ctx.Parallelism) g := semerrgroup.New(ctx.Parallelism)
for _, artifact := range ctx.Artifacts.Filter(filter).List() { for _, artifact := range ctx.Artifacts.Filter(filter).List() {
artifact := artifact
g.Go(func() error { g.Go(func() error {
// TODO: replace this with ?prefix=folder on the bucket url // TODO: replace this with ?prefix=folder on the bucket url
dataFile := artifact.Path dataFile := artifact.Path
@ -152,8 +151,6 @@ func doUpload(ctx *context.Context, conf config.Blob) error {
return err return err
} }
for name, fullpath := range files { for name, fullpath := range files {
name := name
fullpath := fullpath
g.Go(func() error { g.Go(func() error {
uploadFile := path.Join(dir, name) uploadFile := path.Join(dir, name)
return uploadData(ctx, conf, up, fullpath, uploadFile, bucketURL) return uploadData(ctx, conf, up, fullpath, uploadFile, bucketURL)

View File

@ -89,8 +89,6 @@ func buildWithDefaults(ctx *context.Context, build config.Build) (config.Build,
func runPipeOnBuild(ctx *context.Context, g semerrgroup.Group, build config.Build) { func runPipeOnBuild(ctx *context.Context, g semerrgroup.Group, build config.Build) {
for _, target := range filter(ctx, build.Targets) { for _, target := range filter(ctx, build.Targets) {
target := target
build := build
g.Go(func() error { g.Go(func() error {
opts, err := buildOptionsForTarget(ctx, build, target) opts, err := buildOptionsForTarget(ctx, build, target)
if err != nil { if err != nil {

View File

@ -141,8 +141,6 @@ func refreshAll(ctx *context.Context, filepath string) error {
g := semerrgroup.New(ctx.Parallelism) g := semerrgroup.New(ctx.Parallelism)
sumLines := make([]string, len(artifactList)) sumLines := make([]string, len(artifactList))
for i, artifact := range artifactList { for i, artifact := range artifactList {
i := i
artifact := artifact
g.Go(func() error { g.Go(func() error {
sumLine, err := checksums(ctx.Config.Checksum.Algorithm, artifact) sumLine, err := checksums(ctx.Config.Checksum.Algorithm, artifact)
if err != nil { if err != nil {

View File

@ -118,8 +118,6 @@ func (Pipe) Publish(ctx *context.Context) error {
func (Pipe) Run(ctx *context.Context) error { func (Pipe) Run(ctx *context.Context) error {
g := semerrgroup.NewSkipAware(semerrgroup.New(ctx.Parallelism)) g := semerrgroup.NewSkipAware(semerrgroup.New(ctx.Parallelism))
for i, docker := range ctx.Config.Dockers { for i, docker := range ctx.Config.Dockers {
i := i
docker := docker
g.Go(func() error { g.Go(func() error {
log := log.WithField("index", i) log := log.WithField("index", i)
log.Debug("looking for artifacts matching") log.Debug("looking for artifacts matching")

View File

@ -1398,7 +1398,6 @@ func TestWithDigest(t *testing.T) {
}) })
for _, use := range []string{useDocker, useBuildx} { for _, use := range []string{useDocker, useBuildx} {
use := use
t.Run(use, func(t *testing.T) { t.Run(use, func(t *testing.T) {
t.Run("good", func(t *testing.T) { t.Run("good", func(t *testing.T) {
require.Equal(t, "localhost:5050/owner/img:t1@sha256:d1", withDigest(use, "localhost:5050/owner/img:t1", artifacts.List())) require.Equal(t, "localhost:5050/owner/img:t1@sha256:d1", withDigest(use, "localhost:5050/owner/img:t1", artifacts.List()))

View File

@ -60,7 +60,6 @@ func (ManifestPipe) Default(ctx *context.Context) error {
func (ManifestPipe) Publish(ctx *context.Context) error { func (ManifestPipe) Publish(ctx *context.Context) error {
g := semerrgroup.NewSkipAware(semerrgroup.New(1)) g := semerrgroup.NewSkipAware(semerrgroup.New(1))
for _, manifest := range ctx.Config.DockerManifests { for _, manifest := range ctx.Config.DockerManifests {
manifest := manifest
g.Go(func() error { g.Go(func() error {
skip, err := tmpl.New(ctx).Apply(manifest.SkipPush) skip, err := tmpl.New(ctx).Apply(manifest.SkipPush)
if err != nil { if err != nil {

View File

@ -193,7 +193,6 @@ func TestPublishPipeSuccess(t *testing.T) {
repository := fmt.Sprintf("%sgoreleasertest/testapp", registry) repository := fmt.Sprintf("%sgoreleasertest/testapp", registry)
for _, table := range table { for _, table := range table {
table := table
t.Run(table.Name, func(t *testing.T) { t.Run(table.Name, func(t *testing.T) {
if len(table.Tags) == 0 { if len(table.Tags) == 0 {
table.Tags = []string{table.Name} table.Tags = []string{table.Name}

View File

@ -118,8 +118,6 @@ func doRun(ctx *context.Context, fpm config.NFPM) error {
g := semerrgroup.New(ctx.Parallelism) g := semerrgroup.New(ctx.Parallelism)
for _, format := range fpm.Formats { for _, format := range fpm.Formats {
for _, artifacts := range linuxBinaries { for _, artifacts := range linuxBinaries {
format := format
artifacts := artifacts
g.Go(func() error { g.Go(func() error {
return create(ctx, fpm, format, artifacts) return create(ctx, fpm, format, artifacts)
}) })

View File

@ -56,8 +56,6 @@ func TestDescribeBodyMultipleChecksums(t *testing.T) {
"foo.zip": "271a74b75a12f6c3affc88df101f9ef29af79717b1b2f4bdd5964aacf65bcf40", "foo.zip": "271a74b75a12f6c3affc88df101f9ef29af79717b1b2f4bdd5964aacf65bcf40",
} }
for name, check := range checksums { for name, check := range checksums {
name := name
check := check
checksumPath := filepath.Join(t.TempDir(), name+".sha256") checksumPath := filepath.Join(t.TempDir(), name+".sha256")
ctx.Artifacts.Add(&artifact.Artifact{ ctx.Artifacts.Add(&artifact.Artifact{
Name: name + ".sha256", Name: name + ".sha256",

View File

@ -175,7 +175,6 @@ func doPublish(ctx *context.Context, client client.Client) error {
g := semerrgroup.New(ctx.Parallelism) g := semerrgroup.New(ctx.Parallelism)
for _, artifact := range ctx.Artifacts.Filter(filters).List() { for _, artifact := range ctx.Artifacts.Filter(filters).List() {
artifact := artifact
g.Go(func() error { g.Go(func() error {
return upload(ctx, client, releaseID, artifact) return upload(ctx, client, releaseID, artifact)
}) })

View File

@ -720,7 +720,6 @@ func Test_doRun(t *testing.T) {
ctx := tt.args.ctx ctx := tt.args.ctx
ctx.Config.Dist = t.TempDir() ctx.Config.Dist = t.TempDir()
for _, a := range tt.artifacts { for _, a := range tt.artifacts {
a := a
a.Type = artifact.UploadableArchive a.Type = artifact.UploadableArchive
ctx.Artifacts.Add(&a) ctx.Artifacts.Add(&a)
} }

View File

@ -193,7 +193,6 @@ func doRun(ctx *context.Context, snap config.Snapcraft) error {
log.WithField("arch", arch).Warn("ignored unsupported arch") log.WithField("arch", arch).Warn("ignored unsupported arch")
continue continue
} }
binaries := binaries
g.Go(func() error { g.Go(func() error {
return create(ctx, snap, arch, binaries) return create(ctx, snap, arch, binaries)
}) })

View File

@ -53,7 +53,6 @@ func (Pipe) Default(ctx *context.Context) error {
func (Pipe) Run(ctx *context.Context) error { func (Pipe) Run(ctx *context.Context) error {
g := semerrgroup.NewSkipAware(semerrgroup.New(ctx.Parallelism)) g := semerrgroup.NewSkipAware(semerrgroup.New(ctx.Parallelism))
for _, unibin := range ctx.Config.UniversalBinaries { for _, unibin := range ctx.Config.UniversalBinaries {
unibin := unibin
g.Go(func() error { g.Go(func() error {
opts := build.Options{ opts := build.Options{
Target: "darwin_all", Target: "darwin_all",

View File

@ -27,7 +27,6 @@ func (Pipe) Publish(ctx *context.Context) error {
// Check requirements for every instance we have configured. // Check requirements for every instance we have configured.
// If not fulfilled, we can skip this pipeline // If not fulfilled, we can skip this pipeline
for _, instance := range ctx.Config.Uploads { for _, instance := range ctx.Config.Uploads {
instance := instance
if skip := http.CheckConfig(ctx, &instance, "upload"); skip != nil { if skip := http.CheckConfig(ctx, &instance, "upload"); skip != nil {
return pipe.Skip(skip.Error()) return pipe.Skip(skip.Error())
} }

View File

@ -32,7 +32,6 @@ func (Pipe) Skip(ctx *context.Context) bool { return len(ctx.Config.UPXs) == 0 }
func (Pipe) Run(ctx *context.Context) error { func (Pipe) Run(ctx *context.Context) error {
g := semerrgroup.NewSkipAware(semerrgroup.New(ctx.Parallelism)) g := semerrgroup.NewSkipAware(semerrgroup.New(ctx.Parallelism))
for _, upx := range ctx.Config.UPXs { for _, upx := range ctx.Config.UPXs {
upx := upx
enabled, err := tmpl.New(ctx).Bool(upx.Enabled) enabled, err := tmpl.New(ctx).Bool(upx.Enabled)
if err != nil { if err != nil {
return err return err
@ -44,7 +43,6 @@ func (Pipe) Run(ctx *context.Context) error {
return pipe.Skipf("%s not found in PATH", upx.Binary) return pipe.Skipf("%s not found in PATH", upx.Binary)
} }
for _, bin := range findBinaries(ctx, upx) { for _, bin := range findBinaries(ctx, upx) {
bin := bin
g.Go(func() error { g.Go(func() error {
sizeBefore := sizeOf(bin.Path) sizeBefore := sizeOf(bin.Path)
args := []string{ args := []string{

View File

@ -37,7 +37,6 @@ func TestSemaphoreOrder(t *testing.T) {
g := New(1) g := New(1)
output := []int{} output := []int{}
for i := 0; i < num; i++ { for i := 0; i < num; i++ {
i := i
g.Go(func() error { g.Go(func() error {
output = append(output, i) output = append(output, i)
return nil return nil
@ -54,7 +53,6 @@ func TestSemaphoreError(t *testing.T) {
var lock sync.Mutex var lock sync.Mutex
output := []int{} output := []int{}
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
i := i
g.Go(func() error { g.Go(func() error {
lock.Lock() lock.Lock()
defer lock.Unlock() defer lock.Unlock()
@ -90,7 +88,6 @@ func TestSemaphoreSkipAwareSingleError(t *testing.T) {
t.Run(fmt.Sprintf("limit-%d", i), func(t *testing.T) { t.Run(fmt.Sprintf("limit-%d", i), func(t *testing.T) {
g := NewSkipAware(New(i)) g := NewSkipAware(New(i))
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
i := i
g.Go(func() error { g.Go(func() error {
time.Sleep(10 * time.Millisecond) time.Sleep(10 * time.Millisecond)
if i == 5 { if i == 5 {

View File

@ -104,8 +104,6 @@ func TestWithArtifact(t *testing.T) {
// maps // maps
"123": `{{ $m := map "a" "1" "b" "2" }}{{ index $m "a" }}{{ indexOrDefault $m "b" "10" }}{{ indexOrDefault $m "c" "3" }}{{ index $m "z" }}`, "123": `{{ $m := map "a" "1" "b" "2" }}{{ index $m "a" }}{{ indexOrDefault $m "b" "10" }}{{ indexOrDefault $m "c" "3" }}{{ index $m "z" }}`,
} { } {
tmpl := tmpl
expect := expect
t.Run(expect, func(t *testing.T) { t.Run(expect, func(t *testing.T) {
t.Parallel() t.Parallel()
result, err := New(ctx).WithArtifact( result, err := New(ctx).WithArtifact(

View File

@ -36,7 +36,6 @@ func TestVersion(t *testing.T) {
builtBy: "me", builtBy: "me",
}, },
} { } {
tt := tt
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
v := buildVersion(tt.version, tt.commit, tt.date, tt.builtBy, tt.treeState) v := buildVersion(tt.version, tt.commit, tt.date, tt.builtBy, tt.treeState)
v.GoVersion = goVersion v.GoVersion = goVersion

View File

@ -19,7 +19,6 @@ func TestArchive(t *testing.T) {
require.NoError(t, os.Mkdir(folder+"/folder-inside", 0o755)) require.NoError(t, os.Mkdir(folder+"/folder-inside", 0o755))
for _, format := range []string{"tar.gz", "zip", "gz", "tar.xz", "tar", "tgz", "txz", "tar.zst"} { for _, format := range []string{"tar.gz", "zip", "gz", "tar.xz", "tar", "tgz", "txz", "tar.zst"} {
format := format
t.Run(format, func(t *testing.T) { t.Run(format, func(t *testing.T) {
f1, err := os.Create(filepath.Join(t.TempDir(), "1.tar")) f1, err := os.Create(filepath.Join(t.TempDir(), "1.tar"))
require.NoError(t, err) require.NoError(t, err)