You've already forked goreleaser
mirror of
https://github.com/goreleaser/goreleaser.git
synced 2025-07-05 00:59:04 +02:00
refactor: remove unneeded in Go 1.22 loop var copy (#4856)
The PR cleans up unnecessary loop variable copying and enables the [`copyloopvar`](https://golangci-lint.run/usage/linters/#copyloopvar) linter for detecting this redundant variable copying. #### Additional notes After the project upgraded to Go version 1.22 in #4779, copying variables inside a `for` loop became unnecessary. See this [blog post](https://go.dev/blog/loopvar-preview) for a detailed explanation. The `copyloopvar` linter is only available from `golangci-lint` v1.57 onwards, so we also need to update this tool.
This commit is contained in:
4
.github/workflows/lint.yml
vendored
4
.github/workflows/lint.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
||||
go-version: stable
|
||||
cache: false
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v3
|
||||
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
|
||||
with:
|
||||
args: --timeout=5m
|
||||
version: v1.56.2
|
||||
version: v1.58.1
|
||||
|
@ -3,6 +3,7 @@ run:
|
||||
timeout: 5m
|
||||
linters:
|
||||
enable:
|
||||
- copyloopvar
|
||||
- thelper
|
||||
- gofumpt
|
||||
- bodyclose
|
||||
|
@ -483,7 +483,6 @@ func ByType(t Type) Filter {
|
||||
func ByFormats(formats ...string) Filter {
|
||||
filters := make([]Filter, 0, len(formats))
|
||||
for _, format := range formats {
|
||||
format := format
|
||||
filters = append(filters, func(a *Artifact) bool {
|
||||
return a.Format() == format
|
||||
})
|
||||
@ -495,7 +494,6 @@ func ByFormats(formats ...string) Filter {
|
||||
func ByIDs(ids ...string) Filter {
|
||||
filters := make([]Filter, 0, len(ids))
|
||||
for _, id := range ids {
|
||||
id := id
|
||||
filters = append(filters, func(a *Artifact) bool {
|
||||
// checksum and source archive are always for all artifacts, so return always true.
|
||||
return a.Type == Checksum ||
|
||||
@ -512,7 +510,6 @@ func ByIDs(ids ...string) Filter {
|
||||
func ByExt(exts ...string) Filter {
|
||||
filters := make([]Filter, 0, len(exts))
|
||||
for _, ext := range exts {
|
||||
ext := ext
|
||||
filters = append(filters, func(a *Artifact) bool {
|
||||
return ExtraOr(*a, ExtraExt, "") == ext
|
||||
})
|
||||
|
@ -44,7 +44,6 @@ func TestAdd(t *testing.T) {
|
||||
Type: Checksum,
|
||||
},
|
||||
} {
|
||||
a := a
|
||||
g.Go(func() error {
|
||||
artifacts.Add(a)
|
||||
return nil
|
||||
|
@ -70,7 +70,6 @@ func executePublisher(ctx *context.Context, publisher config.Publisher) error {
|
||||
|
||||
g := semerrgroup.New(ctx.Parallelism)
|
||||
for _, artifact := range artifacts {
|
||||
artifact := artifact
|
||||
g.Go(func() error {
|
||||
c, err := resolveCommand(ctx, publisher, artifact)
|
||||
if err != nil {
|
||||
|
@ -156,7 +156,6 @@ type ResponseChecker func(*h.Response) error
|
||||
func Upload(ctx *context.Context, uploads []config.Upload, kind string, check ResponseChecker) error {
|
||||
// Handle every configured upload
|
||||
for _, upload := range uploads {
|
||||
upload := upload
|
||||
filters := []artifact.Filter{}
|
||||
if upload.Checksum {
|
||||
filters = append(filters, artifact.ByType(artifact.Checksum))
|
||||
@ -206,7 +205,6 @@ func uploadWithFilter(ctx *context.Context, upload *config.Upload, filter artifa
|
||||
log.Debugf("will upload %d artifacts", len(artifacts))
|
||||
g := semerrgroup.New(ctx.Parallelism)
|
||||
for _, artifact := range artifacts {
|
||||
artifact := artifact
|
||||
g.Go(func() error {
|
||||
return uploadAsset(ctx, upload, artifact, kind, check)
|
||||
})
|
||||
|
@ -91,7 +91,6 @@ func (Pipe) Default(ctx *context.Context) error {
|
||||
func (Pipe) Run(ctx *context.Context) error {
|
||||
g := semerrgroup.New(ctx.Parallelism)
|
||||
for i, archive := range ctx.Config.Archives {
|
||||
archive := archive
|
||||
if archive.Meta {
|
||||
g.Go(func() error {
|
||||
return createMeta(ctx, archive)
|
||||
@ -115,7 +114,6 @@ func (Pipe) Run(ctx *context.Context) error {
|
||||
}
|
||||
for group, artifacts := range artifacts {
|
||||
log.Debugf("group %s has %d binaries", group, len(artifacts))
|
||||
artifacts := artifacts
|
||||
format := packageFormat(archive, artifacts[0].Goos)
|
||||
switch format {
|
||||
case "none":
|
||||
|
@ -36,7 +36,6 @@ func (Pipe) Publish(ctx *context.Context) error {
|
||||
// Check requirements for every instance we have configured.
|
||||
// If not fulfilled, we can skip this pipeline
|
||||
for _, instance := range ctx.Config.Artifactories {
|
||||
instance := instance
|
||||
if skip := http.CheckConfig(ctx, &instance, "artifactory"); skip != nil {
|
||||
return pipe.Skip(skip.Error())
|
||||
}
|
||||
|
@ -57,7 +57,6 @@ func (Pipe) Publish(ctx *context.Context) error {
|
||||
g := semerrgroup.New(ctx.Parallelism)
|
||||
skips := pipe.SkipMemento{}
|
||||
for _, conf := range ctx.Config.Blobs {
|
||||
conf := conf
|
||||
g.Go(func() error {
|
||||
b, err := tmpl.New(ctx).Bool(conf.Disable)
|
||||
if err != nil {
|
||||
|
@ -137,7 +137,6 @@ func doUpload(ctx *context.Context, conf config.Blob) error {
|
||||
|
||||
g := semerrgroup.New(ctx.Parallelism)
|
||||
for _, artifact := range ctx.Artifacts.Filter(filter).List() {
|
||||
artifact := artifact
|
||||
g.Go(func() error {
|
||||
// TODO: replace this with ?prefix=folder on the bucket url
|
||||
dataFile := artifact.Path
|
||||
@ -152,8 +151,6 @@ func doUpload(ctx *context.Context, conf config.Blob) error {
|
||||
return err
|
||||
}
|
||||
for name, fullpath := range files {
|
||||
name := name
|
||||
fullpath := fullpath
|
||||
g.Go(func() error {
|
||||
uploadFile := path.Join(dir, name)
|
||||
return uploadData(ctx, conf, up, fullpath, uploadFile, bucketURL)
|
||||
|
@ -89,8 +89,6 @@ func buildWithDefaults(ctx *context.Context, build config.Build) (config.Build,
|
||||
|
||||
func runPipeOnBuild(ctx *context.Context, g semerrgroup.Group, build config.Build) {
|
||||
for _, target := range filter(ctx, build.Targets) {
|
||||
target := target
|
||||
build := build
|
||||
g.Go(func() error {
|
||||
opts, err := buildOptionsForTarget(ctx, build, target)
|
||||
if err != nil {
|
||||
|
@ -141,8 +141,6 @@ func refreshAll(ctx *context.Context, filepath string) error {
|
||||
g := semerrgroup.New(ctx.Parallelism)
|
||||
sumLines := make([]string, len(artifactList))
|
||||
for i, artifact := range artifactList {
|
||||
i := i
|
||||
artifact := artifact
|
||||
g.Go(func() error {
|
||||
sumLine, err := checksums(ctx.Config.Checksum.Algorithm, artifact)
|
||||
if err != nil {
|
||||
|
@ -118,8 +118,6 @@ func (Pipe) Publish(ctx *context.Context) error {
|
||||
func (Pipe) Run(ctx *context.Context) error {
|
||||
g := semerrgroup.NewSkipAware(semerrgroup.New(ctx.Parallelism))
|
||||
for i, docker := range ctx.Config.Dockers {
|
||||
i := i
|
||||
docker := docker
|
||||
g.Go(func() error {
|
||||
log := log.WithField("index", i)
|
||||
log.Debug("looking for artifacts matching")
|
||||
|
@ -1398,7 +1398,6 @@ func TestWithDigest(t *testing.T) {
|
||||
})
|
||||
|
||||
for _, use := range []string{useDocker, useBuildx} {
|
||||
use := use
|
||||
t.Run(use, func(t *testing.T) {
|
||||
t.Run("good", func(t *testing.T) {
|
||||
require.Equal(t, "localhost:5050/owner/img:t1@sha256:d1", withDigest(use, "localhost:5050/owner/img:t1", artifacts.List()))
|
||||
|
@ -60,7 +60,6 @@ func (ManifestPipe) Default(ctx *context.Context) error {
|
||||
func (ManifestPipe) Publish(ctx *context.Context) error {
|
||||
g := semerrgroup.NewSkipAware(semerrgroup.New(1))
|
||||
for _, manifest := range ctx.Config.DockerManifests {
|
||||
manifest := manifest
|
||||
g.Go(func() error {
|
||||
skip, err := tmpl.New(ctx).Apply(manifest.SkipPush)
|
||||
if err != nil {
|
||||
|
@ -193,7 +193,6 @@ func TestPublishPipeSuccess(t *testing.T) {
|
||||
repository := fmt.Sprintf("%sgoreleasertest/testapp", registry)
|
||||
|
||||
for _, table := range table {
|
||||
table := table
|
||||
t.Run(table.Name, func(t *testing.T) {
|
||||
if len(table.Tags) == 0 {
|
||||
table.Tags = []string{table.Name}
|
||||
|
@ -118,8 +118,6 @@ func doRun(ctx *context.Context, fpm config.NFPM) error {
|
||||
g := semerrgroup.New(ctx.Parallelism)
|
||||
for _, format := range fpm.Formats {
|
||||
for _, artifacts := range linuxBinaries {
|
||||
format := format
|
||||
artifacts := artifacts
|
||||
g.Go(func() error {
|
||||
return create(ctx, fpm, format, artifacts)
|
||||
})
|
||||
|
@ -56,8 +56,6 @@ func TestDescribeBodyMultipleChecksums(t *testing.T) {
|
||||
"foo.zip": "271a74b75a12f6c3affc88df101f9ef29af79717b1b2f4bdd5964aacf65bcf40",
|
||||
}
|
||||
for name, check := range checksums {
|
||||
name := name
|
||||
check := check
|
||||
checksumPath := filepath.Join(t.TempDir(), name+".sha256")
|
||||
ctx.Artifacts.Add(&artifact.Artifact{
|
||||
Name: name + ".sha256",
|
||||
|
@ -175,7 +175,6 @@ func doPublish(ctx *context.Context, client client.Client) error {
|
||||
|
||||
g := semerrgroup.New(ctx.Parallelism)
|
||||
for _, artifact := range ctx.Artifacts.Filter(filters).List() {
|
||||
artifact := artifact
|
||||
g.Go(func() error {
|
||||
return upload(ctx, client, releaseID, artifact)
|
||||
})
|
||||
|
@ -720,7 +720,6 @@ func Test_doRun(t *testing.T) {
|
||||
ctx := tt.args.ctx
|
||||
ctx.Config.Dist = t.TempDir()
|
||||
for _, a := range tt.artifacts {
|
||||
a := a
|
||||
a.Type = artifact.UploadableArchive
|
||||
ctx.Artifacts.Add(&a)
|
||||
}
|
||||
|
@ -193,7 +193,6 @@ func doRun(ctx *context.Context, snap config.Snapcraft) error {
|
||||
log.WithField("arch", arch).Warn("ignored unsupported arch")
|
||||
continue
|
||||
}
|
||||
binaries := binaries
|
||||
g.Go(func() error {
|
||||
return create(ctx, snap, arch, binaries)
|
||||
})
|
||||
|
@ -53,7 +53,6 @@ func (Pipe) Default(ctx *context.Context) error {
|
||||
func (Pipe) Run(ctx *context.Context) error {
|
||||
g := semerrgroup.NewSkipAware(semerrgroup.New(ctx.Parallelism))
|
||||
for _, unibin := range ctx.Config.UniversalBinaries {
|
||||
unibin := unibin
|
||||
g.Go(func() error {
|
||||
opts := build.Options{
|
||||
Target: "darwin_all",
|
||||
|
@ -27,7 +27,6 @@ func (Pipe) Publish(ctx *context.Context) error {
|
||||
// Check requirements for every instance we have configured.
|
||||
// If not fulfilled, we can skip this pipeline
|
||||
for _, instance := range ctx.Config.Uploads {
|
||||
instance := instance
|
||||
if skip := http.CheckConfig(ctx, &instance, "upload"); skip != nil {
|
||||
return pipe.Skip(skip.Error())
|
||||
}
|
||||
|
@ -32,7 +32,6 @@ func (Pipe) Skip(ctx *context.Context) bool { return len(ctx.Config.UPXs) == 0 }
|
||||
func (Pipe) Run(ctx *context.Context) error {
|
||||
g := semerrgroup.NewSkipAware(semerrgroup.New(ctx.Parallelism))
|
||||
for _, upx := range ctx.Config.UPXs {
|
||||
upx := upx
|
||||
enabled, err := tmpl.New(ctx).Bool(upx.Enabled)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -44,7 +43,6 @@ func (Pipe) Run(ctx *context.Context) error {
|
||||
return pipe.Skipf("%s not found in PATH", upx.Binary)
|
||||
}
|
||||
for _, bin := range findBinaries(ctx, upx) {
|
||||
bin := bin
|
||||
g.Go(func() error {
|
||||
sizeBefore := sizeOf(bin.Path)
|
||||
args := []string{
|
||||
|
@ -37,7 +37,6 @@ func TestSemaphoreOrder(t *testing.T) {
|
||||
g := New(1)
|
||||
output := []int{}
|
||||
for i := 0; i < num; i++ {
|
||||
i := i
|
||||
g.Go(func() error {
|
||||
output = append(output, i)
|
||||
return nil
|
||||
@ -54,7 +53,6 @@ func TestSemaphoreError(t *testing.T) {
|
||||
var lock sync.Mutex
|
||||
output := []int{}
|
||||
for i := 0; i < 10; i++ {
|
||||
i := i
|
||||
g.Go(func() error {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
@ -90,7 +88,6 @@ func TestSemaphoreSkipAwareSingleError(t *testing.T) {
|
||||
t.Run(fmt.Sprintf("limit-%d", i), func(t *testing.T) {
|
||||
g := NewSkipAware(New(i))
|
||||
for i := 0; i < 10; i++ {
|
||||
i := i
|
||||
g.Go(func() error {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if i == 5 {
|
||||
|
@ -104,8 +104,6 @@ func TestWithArtifact(t *testing.T) {
|
||||
// maps
|
||||
"123": `{{ $m := map "a" "1" "b" "2" }}{{ index $m "a" }}{{ indexOrDefault $m "b" "10" }}{{ indexOrDefault $m "c" "3" }}{{ index $m "z" }}`,
|
||||
} {
|
||||
tmpl := tmpl
|
||||
expect := expect
|
||||
t.Run(expect, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
result, err := New(ctx).WithArtifact(
|
||||
|
@ -36,7 +36,6 @@ func TestVersion(t *testing.T) {
|
||||
builtBy: "me",
|
||||
},
|
||||
} {
|
||||
tt := tt
|
||||
t.Run(name, func(t *testing.T) {
|
||||
v := buildVersion(tt.version, tt.commit, tt.date, tt.builtBy, tt.treeState)
|
||||
v.GoVersion = goVersion
|
||||
|
@ -19,7 +19,6 @@ func TestArchive(t *testing.T) {
|
||||
require.NoError(t, os.Mkdir(folder+"/folder-inside", 0o755))
|
||||
|
||||
for _, format := range []string{"tar.gz", "zip", "gz", "tar.xz", "tar", "tgz", "txz", "tar.zst"} {
|
||||
format := format
|
||||
t.Run(format, func(t *testing.T) {
|
||||
f1, err := os.Create(filepath.Join(t.TempDir(), "1.tar"))
|
||||
require.NoError(t, err)
|
||||
|
Reference in New Issue
Block a user