2020-05-26 05:48:10 +02:00
|
|
|
// Package artifact provides the core artifact storage for goreleaser.
|
2017-12-17 19:14:21 +02:00
|
|
|
package artifact
|
|
|
|
|
2019-02-04 21:27:51 +02:00
|
|
|
// nolint: gosec
|
2017-12-17 20:31:06 +02:00
|
|
|
import (
|
2022-06-24 04:36:19 +02:00
|
|
|
"bytes"
|
2019-02-04 21:27:51 +02:00
|
|
|
"crypto/md5"
|
|
|
|
"crypto/sha1"
|
2018-08-21 20:55:35 +02:00
|
|
|
"crypto/sha256"
|
2019-02-04 21:27:51 +02:00
|
|
|
"crypto/sha512"
|
2018-08-21 20:55:35 +02:00
|
|
|
"encoding/hex"
|
2021-12-06 03:42:13 +02:00
|
|
|
"encoding/json"
|
2019-02-04 21:27:51 +02:00
|
|
|
"fmt"
|
|
|
|
"hash"
|
|
|
|
"hash/crc32"
|
2018-08-21 20:55:35 +02:00
|
|
|
"io"
|
|
|
|
"os"
|
2023-05-02 14:06:35 +02:00
|
|
|
"path/filepath"
|
|
|
|
"strings"
|
2017-12-17 20:31:06 +02:00
|
|
|
"sync"
|
2017-12-17 20:59:54 +02:00
|
|
|
|
2022-06-22 02:11:15 +02:00
|
|
|
"github.com/caarlos0/log"
|
2017-12-17 20:31:06 +02:00
|
|
|
)
|
2017-12-17 19:14:21 +02:00
|
|
|
|
2020-05-26 05:48:10 +02:00
|
|
|
// Type defines the type of an artifact.
|
2017-12-17 19:14:21 +02:00
|
|
|
type Type int
|
|
|
|
|
|
|
|
const (
|
2020-08-04 05:21:26 +02:00
|
|
|
// UploadableArchive a tar.gz/zip archive to be uploaded.
|
2021-12-08 02:52:35 +02:00
|
|
|
UploadableArchive Type = iota + 1
|
2020-08-04 05:21:26 +02:00
|
|
|
// UploadableBinary is a binary file to be uploaded.
|
2017-12-17 20:10:40 +02:00
|
|
|
UploadableBinary
|
2020-08-04 05:21:26 +02:00
|
|
|
// UploadableFile is any file that can be uploaded.
|
2020-02-11 21:10:41 +02:00
|
|
|
UploadableFile
|
2020-08-04 05:21:26 +02:00
|
|
|
// Binary is a binary (output of a gobuild).
|
2017-12-17 19:14:21 +02:00
|
|
|
Binary
|
2021-10-12 19:55:43 +02:00
|
|
|
// UniversalBinary is a binary that contains multiple binaries within.
|
|
|
|
UniversalBinary
|
2020-08-04 05:21:26 +02:00
|
|
|
// LinuxPackage is a linux package generated by nfpm.
|
2017-12-17 21:11:08 +02:00
|
|
|
LinuxPackage
|
2020-08-04 05:21:26 +02:00
|
|
|
// PublishableSnapcraft is a snap package yet to be published.
|
2018-10-20 19:25:46 +02:00
|
|
|
PublishableSnapcraft
|
2020-08-04 05:21:26 +02:00
|
|
|
// Snapcraft is a published snap package.
|
2018-10-20 19:25:46 +02:00
|
|
|
Snapcraft
|
2020-08-04 05:21:26 +02:00
|
|
|
// PublishableDockerImage is a Docker image yet to be published.
|
2018-10-20 18:45:31 +02:00
|
|
|
PublishableDockerImage
|
2020-08-04 05:21:26 +02:00
|
|
|
// DockerImage is a published Docker image.
|
2017-12-17 19:14:21 +02:00
|
|
|
DockerImage
|
2020-11-29 19:33:31 +02:00
|
|
|
// DockerManifest is a published Docker manifest.
|
|
|
|
DockerManifest
|
2020-08-04 05:21:26 +02:00
|
|
|
// Checksum is a checksums file.
|
2017-12-17 19:14:21 +02:00
|
|
|
Checksum
|
2020-08-04 05:21:26 +02:00
|
|
|
// Signature is a signature file.
|
2017-12-17 21:25:04 +02:00
|
|
|
Signature
|
2021-11-12 03:56:03 +02:00
|
|
|
// Certificate is a signing certificate file
|
|
|
|
Certificate
|
2020-08-04 05:21:26 +02:00
|
|
|
// UploadableSourceArchive is the archive with the current commit source code.
|
2020-04-12 16:47:46 +02:00
|
|
|
UploadableSourceArchive
|
2021-09-18 15:21:29 +02:00
|
|
|
// BrewTap is an uploadable homebrew tap recipe file.
|
|
|
|
BrewTap
|
2023-05-26 04:07:10 +02:00
|
|
|
// Nixpkg is an uploadable nix package.
|
|
|
|
Nixpkg
|
2023-06-15 04:59:55 +02:00
|
|
|
// WingetInstaller winget installer file.
|
|
|
|
WingetInstaller
|
|
|
|
// WingetDefaultLocale winget default locale file.
|
|
|
|
WingetDefaultLocale
|
|
|
|
// WingetVersion winget version file.
|
|
|
|
WingetVersion
|
2022-01-20 19:59:39 +02:00
|
|
|
// PkgBuild is an Arch Linux AUR PKGBUILD file.
|
|
|
|
PkgBuild
|
|
|
|
// SrcInfo is an Arch Linux AUR .SRCINFO file.
|
|
|
|
SrcInfo
|
2021-11-11 14:37:58 +02:00
|
|
|
// KrewPluginManifest is a krew plugin manifest file.
|
|
|
|
KrewPluginManifest
|
2021-09-18 15:21:29 +02:00
|
|
|
// ScoopManifest is an uploadable scoop manifest file.
|
|
|
|
ScoopManifest
|
2021-12-12 05:21:51 +02:00
|
|
|
// SBOM is a Software Bill of Materials file.
|
|
|
|
SBOM
|
feat: chocolatey support (#3509)
This PR adds support for generating the structure used to pack and push
Chocolatey Packages. And will solve the #3154
Is not ready for merge yet, but has the main structure, and ready for
comments.
Accordingly to Chocolatey, in order to build a package, it's necessary a
`.nuspec` and `chocolateyinstall.ps1` files at least, having these ones,
we could pack and distribute without adding the binary inside the final
package and that was implemented here.
To complete, will be necessary to define the package build and
distribute, however will be required to have Chocolatey installed
(Windows Only). One of alternatives that I thought was, publish the
files like Scoop and Brew in a separate repository, and there we could
use `chocolatey` through
[crazy-max/ghaction-chocolatey](https://github.com/crazy-max/ghaction-chocolatey).
Chocolatey has a lot of good examples of repositories:
https://github.com/chocolatey-community/chocolatey-packages/tree/master/automatic/curl
A final compilation of the missing parts:
- [x] How to pack and push (chocolatey)
- [x] Documentation
Sorry for the long description😄
All feedback very welcome!
Co-authored-by: Carlos Alexandro Becker <caarlos0@users.noreply.github.com>
2022-11-12 04:52:32 +02:00
|
|
|
// PublishableChocolatey is a chocolatey package yet to be published.
|
|
|
|
PublishableChocolatey
|
2022-11-12 04:35:51 +02:00
|
|
|
// Header is a C header file, generated for CGo library builds.
|
|
|
|
Header
|
|
|
|
// CArchive is a C static library, generated via a CGo build with buildmode=c-archive.
|
|
|
|
CArchive
|
|
|
|
// CShared is a C shared library, generated via a CGo build with buildmode=c-shared.
|
|
|
|
CShared
|
2017-12-17 19:14:21 +02:00
|
|
|
)
|
|
|
|
|
2018-09-15 23:53:59 +02:00
|
|
|
func (t Type) String() string {
|
|
|
|
switch t {
|
|
|
|
case UploadableArchive:
|
|
|
|
return "Archive"
|
2020-04-12 16:47:46 +02:00
|
|
|
case UploadableFile:
|
|
|
|
return "File"
|
2021-10-12 19:55:43 +02:00
|
|
|
case UploadableBinary, Binary, UniversalBinary:
|
2018-09-15 23:53:59 +02:00
|
|
|
return "Binary"
|
|
|
|
case LinuxPackage:
|
2018-10-20 18:45:31 +02:00
|
|
|
return "Linux Package"
|
feat: add digest to artifacts info of published docker images (#3540)
Extract the digest (sha256) of docker images from the `docker push`
command for dockers published to the docker registry.
Outputting the digest is required to avoid a race condition when
referencing the image, where the image tag is being modified before the
reference is done.
See this [blog
post](https://github.com/goreleaser/goreleaser/issues/3496) for more
info.
This PR fixes https://github.com/goreleaser/goreleaser/issues/3496.
Note that the 'publish' pipe now must run before the 'metadata' pipe, so
that the information extracted during the 'publish' pipe would appear in
the metadata.
Previously, the published docker images metadata wasn't printed (because
of the order). It made sense because the content of the published image
was just a subset of the local one.
Now that it is printed to the metadata, it should have a different name
to avoid confusion.
As I mentioned, it wasn't printed before - so there shouldn't be any
backward-compatibility issues.
---
Local tests:
```
go test -v .
=== RUN TestVersion
=== RUN TestVersion/only_version
=== RUN TestVersion/version_and_date
=== RUN TestVersion/version,_date,_built_by
=== RUN TestVersion/all_empty
=== RUN TestVersion/complete
--- PASS: TestVersion (0.00s)
--- PASS: TestVersion/only_version (0.00s)
--- PASS: TestVersion/version_and_date (0.00s)
--- PASS: TestVersion/version,_date,_built_by (0.00s)
--- PASS: TestVersion/all_empty (0.00s)
--- PASS: TestVersion/complete (0.00s)
PASS
ok github.com/goreleaser/goreleaser 0.764s
```
Output example:
```
{
"name": "gallegit/hello-world:latest",
"path": "gallegit/hello-world:latest",
"goos": "linux",
"goarch": "amd64",
"internal_type": 10,
"type": "Published Docker Image",
"extra": {
"digest": "sha256:c3f7dd196a046dc061236d3c6ae1e2946269e90da30b0a959240ca799750e632"
}
}
```
Signed-off-by: Carlos A Becker <caarlos0@users.noreply.github.com>
Co-authored-by: Carlos Alexandro Becker <caarlos0@users.noreply.github.com>
2022-11-12 19:51:53 +02:00
|
|
|
case PublishableDockerImage:
|
2018-10-20 18:45:31 +02:00
|
|
|
return "Docker Image"
|
feat: add digest to artifacts info of published docker images (#3540)
Extract the digest (sha256) of docker images from the `docker push`
command for dockers published to the docker registry.
Outputting the digest is required to avoid a race condition when
referencing the image, where the image tag is being modified before the
reference is done.
See this [blog
post](https://github.com/goreleaser/goreleaser/issues/3496) for more
info.
This PR fixes https://github.com/goreleaser/goreleaser/issues/3496.
Note that the 'publish' pipe now must run before the 'metadata' pipe, so
that the information extracted during the 'publish' pipe would appear in
the metadata.
Previously, the published docker images metadata wasn't printed (because
of the order). It made sense because the content of the published image
was just a subset of the local one.
Now that it is printed to the metadata, it should have a different name
to avoid confusion.
As I mentioned, it wasn't printed before - so there shouldn't be any
backward-compatibility issues.
---
Local tests:
```
go test -v .
=== RUN TestVersion
=== RUN TestVersion/only_version
=== RUN TestVersion/version_and_date
=== RUN TestVersion/version,_date,_built_by
=== RUN TestVersion/all_empty
=== RUN TestVersion/complete
--- PASS: TestVersion (0.00s)
--- PASS: TestVersion/only_version (0.00s)
--- PASS: TestVersion/version_and_date (0.00s)
--- PASS: TestVersion/version,_date,_built_by (0.00s)
--- PASS: TestVersion/all_empty (0.00s)
--- PASS: TestVersion/complete (0.00s)
PASS
ok github.com/goreleaser/goreleaser 0.764s
```
Output example:
```
{
"name": "gallegit/hello-world:latest",
"path": "gallegit/hello-world:latest",
"goos": "linux",
"goarch": "amd64",
"internal_type": 10,
"type": "Published Docker Image",
"extra": {
"digest": "sha256:c3f7dd196a046dc061236d3c6ae1e2946269e90da30b0a959240ca799750e632"
}
}
```
Signed-off-by: Carlos A Becker <caarlos0@users.noreply.github.com>
Co-authored-by: Carlos Alexandro Becker <caarlos0@users.noreply.github.com>
2022-11-12 19:51:53 +02:00
|
|
|
case DockerImage:
|
|
|
|
return "Published Docker Image"
|
2020-11-29 19:33:31 +02:00
|
|
|
case DockerManifest:
|
|
|
|
return "Docker Manifest"
|
2020-09-02 21:44:02 +02:00
|
|
|
case PublishableSnapcraft, Snapcraft:
|
2020-04-12 16:47:46 +02:00
|
|
|
return "Snap"
|
2018-09-15 23:53:59 +02:00
|
|
|
case Checksum:
|
|
|
|
return "Checksum"
|
|
|
|
case Signature:
|
|
|
|
return "Signature"
|
2021-11-12 03:56:03 +02:00
|
|
|
case Certificate:
|
|
|
|
return "Certificate"
|
2020-04-12 16:47:46 +02:00
|
|
|
case UploadableSourceArchive:
|
|
|
|
return "Source"
|
2021-09-18 15:21:29 +02:00
|
|
|
case BrewTap:
|
|
|
|
return "Brew Tap"
|
2021-11-11 14:37:58 +02:00
|
|
|
case KrewPluginManifest:
|
|
|
|
return "Krew Plugin Manifest"
|
2021-09-18 15:21:29 +02:00
|
|
|
case ScoopManifest:
|
|
|
|
return "Scoop Manifest"
|
2021-12-12 05:21:51 +02:00
|
|
|
case SBOM:
|
|
|
|
return "SBOM"
|
2022-01-20 19:59:39 +02:00
|
|
|
case PkgBuild:
|
|
|
|
return "PKGBUILD"
|
|
|
|
case SrcInfo:
|
|
|
|
return "SRCINFO"
|
feat: chocolatey support (#3509)
This PR adds support for generating the structure used to pack and push
Chocolatey Packages. And will solve the #3154
Is not ready for merge yet, but has the main structure, and ready for
comments.
Accordingly to Chocolatey, in order to build a package, it's necessary a
`.nuspec` and `chocolateyinstall.ps1` files at least, having these ones,
we could pack and distribute without adding the binary inside the final
package and that was implemented here.
To complete, will be necessary to define the package build and
distribute, however will be required to have Chocolatey installed
(Windows Only). One of alternatives that I thought was, publish the
files like Scoop and Brew in a separate repository, and there we could
use `chocolatey` through
[crazy-max/ghaction-chocolatey](https://github.com/crazy-max/ghaction-chocolatey).
Chocolatey has a lot of good examples of repositories:
https://github.com/chocolatey-community/chocolatey-packages/tree/master/automatic/curl
A final compilation of the missing parts:
- [x] How to pack and push (chocolatey)
- [x] Documentation
Sorry for the long description😄
All feedback very welcome!
Co-authored-by: Carlos Alexandro Becker <caarlos0@users.noreply.github.com>
2022-11-12 04:52:32 +02:00
|
|
|
case PublishableChocolatey:
|
|
|
|
return "Chocolatey"
|
2022-11-12 04:35:51 +02:00
|
|
|
case Header:
|
|
|
|
return "C Header"
|
|
|
|
case CArchive:
|
|
|
|
return "C Archive Library"
|
|
|
|
case CShared:
|
|
|
|
return "C Shared Library"
|
2023-06-15 04:59:55 +02:00
|
|
|
case WingetInstaller, WingetDefaultLocale, WingetVersion:
|
|
|
|
return "Winget Manifest"
|
2020-09-02 21:44:02 +02:00
|
|
|
default:
|
|
|
|
return "unknown"
|
2018-09-15 23:53:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-17 03:46:11 +02:00
|
|
|
const (
|
2021-10-17 04:20:14 +02:00
|
|
|
ExtraID = "ID"
|
|
|
|
ExtraBinary = "Binary"
|
|
|
|
ExtraExt = "Ext"
|
2023-05-01 02:29:36 +02:00
|
|
|
ExtraBuilds = "Builds" // deprecated
|
2021-10-17 04:20:14 +02:00
|
|
|
ExtraFormat = "Format"
|
|
|
|
ExtraWrappedIn = "WrappedIn"
|
|
|
|
ExtraBinaries = "Binaries"
|
2021-12-06 03:42:13 +02:00
|
|
|
ExtraRefresh = "Refresh"
|
2021-12-08 02:53:39 +02:00
|
|
|
ExtraReplaces = "Replaces"
|
2022-11-15 13:21:18 +02:00
|
|
|
ExtraDigest = "Digest"
|
2023-04-24 01:27:16 +02:00
|
|
|
ExtraSize = "Size"
|
2021-10-17 03:46:11 +02:00
|
|
|
)
|
|
|
|
|
2021-12-06 03:42:13 +02:00
|
|
|
// Extras represents the extra fields in an artifact.
|
2022-06-24 04:36:19 +02:00
|
|
|
type Extras map[string]any
|
2021-12-06 03:42:13 +02:00
|
|
|
|
|
|
|
func (e Extras) MarshalJSON() ([]byte, error) {
|
2022-06-24 04:36:19 +02:00
|
|
|
m := map[string]any{}
|
2021-12-06 03:42:13 +02:00
|
|
|
for k, v := range e {
|
|
|
|
if k == ExtraRefresh {
|
|
|
|
// refresh is a func, so we can't serialize it.
|
2022-06-22 06:48:11 +02:00
|
|
|
continue
|
2021-12-06 03:42:13 +02:00
|
|
|
}
|
|
|
|
m[k] = v
|
|
|
|
}
|
|
|
|
return json.Marshal(m)
|
|
|
|
}
|
|
|
|
|
2020-05-26 05:48:10 +02:00
|
|
|
// Artifact represents an artifact and its relevant info.
|
2017-12-17 19:14:21 +02:00
|
|
|
type Artifact struct {
|
2022-04-12 03:43:22 +02:00
|
|
|
Name string `json:"name,omitempty"`
|
|
|
|
Path string `json:"path,omitempty"`
|
|
|
|
Goos string `json:"goos,omitempty"`
|
|
|
|
Goarch string `json:"goarch,omitempty"`
|
|
|
|
Goarm string `json:"goarm,omitempty"`
|
|
|
|
Gomips string `json:"gomips,omitempty"`
|
|
|
|
Goamd64 string `json:"goamd64,omitempty"`
|
2022-06-22 06:48:11 +02:00
|
|
|
Type Type `json:"internal_type,omitempty"`
|
|
|
|
TypeS string `json:"type,omitempty"`
|
2022-04-12 03:43:22 +02:00
|
|
|
Extra Extras `json:"extra,omitempty"`
|
2019-01-01 18:40:17 +02:00
|
|
|
}
|
|
|
|
|
2021-12-08 02:53:39 +02:00
|
|
|
func (a Artifact) String() string {
|
|
|
|
return a.Name
|
|
|
|
}
|
|
|
|
|
2022-06-24 04:36:19 +02:00
|
|
|
// Extra tries to get the extra field with the given name, returning either
|
|
|
|
// its value, the default value for its type, or an error.
|
|
|
|
//
|
|
|
|
// If the extra value cannot be cast into the given type, it'll try to convert
|
|
|
|
// it to JSON and unmarshal it into the correct type after.
|
|
|
|
//
|
|
|
|
// If that fails as well, it'll error.
|
|
|
|
func Extra[T any](a Artifact, key string) (T, error) {
|
|
|
|
ex := a.Extra[key]
|
|
|
|
if ex == nil {
|
|
|
|
return *(new(T)), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
t, ok := ex.(T)
|
|
|
|
if ok {
|
|
|
|
return t, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
bts, err := json.Marshal(ex)
|
|
|
|
if err != nil {
|
|
|
|
return t, err
|
|
|
|
}
|
|
|
|
|
|
|
|
decoder := json.NewDecoder(bytes.NewReader(bts))
|
|
|
|
decoder.DisallowUnknownFields()
|
|
|
|
err = decoder.Decode(&t)
|
|
|
|
return t, err
|
|
|
|
}
|
|
|
|
|
2019-01-01 18:40:17 +02:00
|
|
|
// ExtraOr returns the Extra field with the given key or the or value specified
|
|
|
|
// if it is nil.
|
2022-06-24 04:36:19 +02:00
|
|
|
func ExtraOr[T any](a Artifact, key string, or T) T {
|
2019-01-01 18:40:17 +02:00
|
|
|
if a.Extra[key] == nil {
|
|
|
|
return or
|
|
|
|
}
|
2022-06-24 04:36:19 +02:00
|
|
|
return a.Extra[key].(T)
|
2017-12-17 19:14:21 +02:00
|
|
|
}
|
|
|
|
|
2019-02-04 21:27:51 +02:00
|
|
|
// Checksum calculates the checksum of the artifact.
|
|
|
|
// nolint: gosec
|
|
|
|
func (a Artifact) Checksum(algorithm string) (string, error) {
|
|
|
|
log.Debugf("calculating checksum for %s", a.Path)
|
2018-08-21 20:55:35 +02:00
|
|
|
file, err := os.Open(a.Path)
|
|
|
|
if err != nil {
|
2020-09-21 19:47:51 +02:00
|
|
|
return "", fmt.Errorf("failed to checksum: %w", err)
|
2018-08-21 20:55:35 +02:00
|
|
|
}
|
2020-05-26 05:48:10 +02:00
|
|
|
defer file.Close()
|
2019-02-04 21:27:51 +02:00
|
|
|
var h hash.Hash
|
|
|
|
switch algorithm {
|
|
|
|
case "crc32":
|
|
|
|
h = crc32.NewIEEE()
|
|
|
|
case "md5":
|
|
|
|
h = md5.New()
|
|
|
|
case "sha224":
|
|
|
|
h = sha256.New224()
|
|
|
|
case "sha384":
|
|
|
|
h = sha512.New384()
|
|
|
|
case "sha256":
|
|
|
|
h = sha256.New()
|
|
|
|
case "sha1":
|
|
|
|
h = sha1.New()
|
|
|
|
case "sha512":
|
|
|
|
h = sha512.New()
|
|
|
|
default:
|
2020-09-02 21:44:25 +02:00
|
|
|
return "", fmt.Errorf("invalid algorithm: %s", algorithm)
|
2019-02-04 21:27:51 +02:00
|
|
|
}
|
2021-09-18 15:21:29 +02:00
|
|
|
|
|
|
|
if _, err := io.Copy(h, file); err != nil {
|
2020-09-21 19:47:51 +02:00
|
|
|
return "", fmt.Errorf("failed to checksum: %w", err)
|
2018-08-21 20:55:35 +02:00
|
|
|
}
|
2019-02-04 21:27:51 +02:00
|
|
|
return hex.EncodeToString(h.Sum(nil)), nil
|
2018-08-21 20:55:35 +02:00
|
|
|
}
|
|
|
|
|
2021-12-06 03:42:13 +02:00
|
|
|
var noRefresh = func() error { return nil }
|
|
|
|
|
|
|
|
// Refresh executes a Refresh extra function on artifacts, if it exists.
|
|
|
|
func (a Artifact) Refresh() error {
|
|
|
|
// for now lets only do it for checksums, as we know for a fact that
|
|
|
|
// they are the only ones that support this right now.
|
|
|
|
if a.Type != Checksum {
|
|
|
|
return nil
|
|
|
|
}
|
2022-06-24 04:36:19 +02:00
|
|
|
if err := ExtraOr(a, ExtraRefresh, noRefresh)(); err != nil {
|
2021-12-06 03:42:13 +02:00
|
|
|
return fmt.Errorf("failed to refresh %q: %w", a.Name, err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-10-17 03:46:11 +02:00
|
|
|
// ID returns the artifact ID if it exists, empty otherwise.
|
|
|
|
func (a Artifact) ID() string {
|
2022-06-24 04:36:19 +02:00
|
|
|
return ExtraOr(a, ExtraID, "")
|
2021-10-17 03:46:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Format returns the artifact Format if it exists, empty otherwise.
|
|
|
|
func (a Artifact) Format() string {
|
2022-06-24 04:36:19 +02:00
|
|
|
return ExtraOr(a, ExtraFormat, "")
|
2021-10-17 03:46:11 +02:00
|
|
|
}
|
|
|
|
|
2020-05-26 05:48:10 +02:00
|
|
|
// Artifacts is a list of artifacts.
|
2017-12-17 19:14:21 +02:00
|
|
|
type Artifacts struct {
|
2019-08-12 22:44:48 +02:00
|
|
|
items []*Artifact
|
2017-12-17 19:14:21 +02:00
|
|
|
lock *sync.Mutex
|
|
|
|
}
|
|
|
|
|
2020-05-26 05:48:10 +02:00
|
|
|
// New return a new list of artifacts.
|
2023-03-01 06:05:30 +02:00
|
|
|
func New() *Artifacts {
|
|
|
|
return &Artifacts{
|
2019-08-12 22:44:48 +02:00
|
|
|
items: []*Artifact{},
|
2017-12-17 19:14:21 +02:00
|
|
|
lock: &sync.Mutex{},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-26 05:48:10 +02:00
|
|
|
// List return the actual list of artifacts.
|
2023-03-01 06:05:30 +02:00
|
|
|
func (artifacts *Artifacts) List() []*Artifact {
|
2022-08-16 07:05:36 +02:00
|
|
|
artifacts.lock.Lock()
|
|
|
|
defer artifacts.lock.Unlock()
|
2017-12-17 19:50:09 +02:00
|
|
|
return artifacts.items
|
|
|
|
}
|
|
|
|
|
2022-01-20 19:59:39 +02:00
|
|
|
// GroupByID groups the artifacts by their ID.
|
2023-03-01 06:05:30 +02:00
|
|
|
func (artifacts *Artifacts) GroupByID() map[string][]*Artifact {
|
2022-01-20 19:59:39 +02:00
|
|
|
result := map[string][]*Artifact{}
|
2022-08-16 07:05:36 +02:00
|
|
|
for _, a := range artifacts.List() {
|
2022-01-24 04:49:40 +02:00
|
|
|
id := a.ID()
|
|
|
|
if id == "" {
|
|
|
|
continue
|
|
|
|
}
|
2022-01-20 19:59:39 +02:00
|
|
|
result[a.ID()] = append(result[a.ID()], a)
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2020-05-26 05:48:10 +02:00
|
|
|
// GroupByPlatform groups the artifacts by their platform.
|
2023-03-01 06:05:30 +02:00
|
|
|
func (artifacts *Artifacts) GroupByPlatform() map[string][]*Artifact {
|
2021-04-25 19:20:49 +02:00
|
|
|
result := map[string][]*Artifact{}
|
2022-08-16 07:05:36 +02:00
|
|
|
for _, a := range artifacts.List() {
|
2022-04-12 03:43:22 +02:00
|
|
|
plat := a.Goos + a.Goarch + a.Goarm + a.Gomips + a.Goamd64
|
2017-12-18 13:19:02 +02:00
|
|
|
result[plat] = append(result[plat], a)
|
2017-12-17 19:50:09 +02:00
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2023-05-02 14:06:35 +02:00
|
|
|
func relPath(a *Artifact) (string, error) {
|
|
|
|
cwd, err := os.Getwd()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
if !strings.HasPrefix(a.Path, cwd) {
|
|
|
|
return "", nil
|
|
|
|
}
|
|
|
|
return filepath.Rel(cwd, a.Path)
|
|
|
|
}
|
|
|
|
|
|
|
|
func shouldRelPath(a *Artifact) bool {
|
|
|
|
switch a.Type {
|
|
|
|
case DockerImage, DockerManifest, PublishableDockerImage:
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
return filepath.IsAbs(a.Path)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-26 05:48:10 +02:00
|
|
|
// Add safely adds a new artifact to an artifact list.
|
2019-08-12 22:44:48 +02:00
|
|
|
func (artifacts *Artifacts) Add(a *Artifact) {
|
2017-12-17 19:14:21 +02:00
|
|
|
artifacts.lock.Lock()
|
|
|
|
defer artifacts.lock.Unlock()
|
2023-05-02 14:06:35 +02:00
|
|
|
if shouldRelPath(a) {
|
|
|
|
rel, err := relPath(a)
|
|
|
|
if rel != "" && err == nil {
|
|
|
|
a.Path = rel
|
|
|
|
}
|
|
|
|
}
|
|
|
|
log.WithField("name", a.Name).
|
|
|
|
WithField("type", a.Type).
|
|
|
|
WithField("path", a.Path).
|
|
|
|
Debug("added new artifact")
|
2017-12-17 19:14:21 +02:00
|
|
|
artifacts.items = append(artifacts.items, a)
|
|
|
|
}
|
|
|
|
|
2021-10-12 19:55:43 +02:00
|
|
|
// Remove removes artifacts that match the given filter from the original artifact list.
|
|
|
|
func (artifacts *Artifacts) Remove(filter Filter) error {
|
|
|
|
if filter == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
artifacts.lock.Lock()
|
|
|
|
defer artifacts.lock.Unlock()
|
|
|
|
|
|
|
|
result := New()
|
|
|
|
for _, a := range artifacts.items {
|
|
|
|
if filter(a) {
|
2023-05-02 14:06:35 +02:00
|
|
|
log.WithField("name", a.Name).
|
|
|
|
WithField("type", a.Type).
|
|
|
|
WithField("path", a.Path).
|
|
|
|
Debug("removing")
|
2021-10-12 19:55:43 +02:00
|
|
|
} else {
|
|
|
|
result.items = append(result.items, a)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
artifacts.items = result.items
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-17 19:14:21 +02:00
|
|
|
// Filter defines an artifact filter which can be used within the Filter
|
2020-05-26 05:48:10 +02:00
|
|
|
// function.
|
2019-08-12 22:44:48 +02:00
|
|
|
type Filter func(a *Artifact) bool
|
2017-12-17 19:14:21 +02:00
|
|
|
|
2021-12-08 02:53:39 +02:00
|
|
|
// OnlyReplacingUnibins removes universal binaries that did not replace the single-arch ones.
|
|
|
|
//
|
|
|
|
// This is useful specially on homebrew et al, where you'll want to use only either the single-arch or the universal binaries.
|
|
|
|
func OnlyReplacingUnibins(a *Artifact) bool {
|
2022-06-24 04:36:19 +02:00
|
|
|
return ExtraOr(*a, ExtraReplaces, true)
|
2021-12-08 02:53:39 +02:00
|
|
|
}
|
|
|
|
|
2020-05-26 05:48:10 +02:00
|
|
|
// ByGoos is a predefined filter that filters by the given goos.
|
2017-12-17 19:14:21 +02:00
|
|
|
func ByGoos(s string) Filter {
|
2019-08-12 22:44:48 +02:00
|
|
|
return func(a *Artifact) bool {
|
2017-12-17 19:14:21 +02:00
|
|
|
return a.Goos == s
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-26 05:48:10 +02:00
|
|
|
// ByGoarch is a predefined filter that filters by the given goarch.
|
2017-12-17 19:14:21 +02:00
|
|
|
func ByGoarch(s string) Filter {
|
2019-08-12 22:44:48 +02:00
|
|
|
return func(a *Artifact) bool {
|
2017-12-17 19:14:21 +02:00
|
|
|
return a.Goarch == s
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-26 05:48:10 +02:00
|
|
|
// ByGoarm is a predefined filter that filters by the given goarm.
|
2017-12-17 19:14:21 +02:00
|
|
|
func ByGoarm(s string) Filter {
|
2019-08-12 22:44:48 +02:00
|
|
|
return func(a *Artifact) bool {
|
2017-12-17 19:14:21 +02:00
|
|
|
return a.Goarm == s
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-12 03:43:22 +02:00
|
|
|
// ByGoamd64 is a predefined filter that filters by the given goamd64.
|
|
|
|
func ByGoamd64(s string) Filter {
|
|
|
|
return func(a *Artifact) bool {
|
|
|
|
return a.Goamd64 == s
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-26 05:48:10 +02:00
|
|
|
// ByType is a predefined filter that filters by the given type.
|
2017-12-17 19:14:21 +02:00
|
|
|
func ByType(t Type) Filter {
|
2019-08-12 22:44:48 +02:00
|
|
|
return func(a *Artifact) bool {
|
2017-12-17 19:14:21 +02:00
|
|
|
return a.Type == t
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-10 15:35:19 +02:00
|
|
|
// ByFormats filters artifacts by a `Format` extra field.
|
|
|
|
func ByFormats(formats ...string) Filter {
|
2021-04-25 19:20:49 +02:00
|
|
|
filters := make([]Filter, 0, len(formats))
|
2019-06-10 15:35:19 +02:00
|
|
|
for _, format := range formats {
|
|
|
|
format := format
|
2019-08-12 22:44:48 +02:00
|
|
|
filters = append(filters, func(a *Artifact) bool {
|
2021-10-17 03:46:11 +02:00
|
|
|
return a.Format() == format
|
2019-06-10 15:35:19 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return Or(filters...)
|
|
|
|
}
|
|
|
|
|
2019-04-16 15:19:15 +02:00
|
|
|
// ByIDs filter artifacts by an `ID` extra field.
|
|
|
|
func ByIDs(ids ...string) Filter {
|
2021-04-25 19:20:49 +02:00
|
|
|
filters := make([]Filter, 0, len(ids))
|
2019-04-16 15:19:15 +02:00
|
|
|
for _, id := range ids {
|
|
|
|
id := id
|
2019-08-12 22:44:48 +02:00
|
|
|
filters = append(filters, func(a *Artifact) bool {
|
2020-04-12 16:47:46 +02:00
|
|
|
// checksum and source archive are always for all artifacts, so return always true.
|
|
|
|
return a.Type == Checksum ||
|
|
|
|
a.Type == UploadableSourceArchive ||
|
2021-10-17 03:46:11 +02:00
|
|
|
a.ID() == id
|
2019-04-16 15:19:15 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return Or(filters...)
|
|
|
|
}
|
|
|
|
|
2022-03-19 22:44:34 +02:00
|
|
|
// ByExt filter artifact by their 'Ext' extra field.
|
|
|
|
func ByExt(exts ...string) Filter {
|
|
|
|
filters := make([]Filter, 0, len(exts))
|
|
|
|
for _, ext := range exts {
|
|
|
|
ext := ext
|
|
|
|
filters = append(filters, func(a *Artifact) bool {
|
2022-06-24 04:36:19 +02:00
|
|
|
return ExtraOr(*a, ExtraExt, "") == ext
|
2022-03-19 22:44:34 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
return Or(filters...)
|
|
|
|
}
|
|
|
|
|
2022-02-25 16:28:09 +02:00
|
|
|
// ByBinaryLikeArtifacts filter artifacts down to artifacts that are Binary, UploadableBinary, or UniversalBinary,
|
|
|
|
// deduplicating artifacts by path (preferring UploadableBinary over all others). Note: this filter is unique in the
|
|
|
|
// sense that it cannot act in isolation of the state of other artifacts; the filter requires the whole list of
|
|
|
|
// artifacts in advance to perform deduplication.
|
2023-03-01 06:05:30 +02:00
|
|
|
func ByBinaryLikeArtifacts(arts *Artifacts) Filter {
|
2022-02-25 16:28:09 +02:00
|
|
|
// find all of the paths for any uploadable binary artifacts
|
|
|
|
uploadableBins := arts.Filter(ByType(UploadableBinary)).List()
|
2022-02-25 18:55:32 +02:00
|
|
|
uploadableBinPaths := map[string]struct{}{}
|
2022-02-25 16:28:09 +02:00
|
|
|
for _, a := range uploadableBins {
|
2022-02-25 18:55:32 +02:00
|
|
|
uploadableBinPaths[a.Path] = struct{}{}
|
2022-02-25 16:28:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// we want to keep any matching artifact that is not a binary that already has a path accounted for
|
|
|
|
// by another uploadable binary. We always prefer uploadable binary artifacts over binary artifacts.
|
|
|
|
deduplicateByPath := func(a *Artifact) bool {
|
|
|
|
if a.Type == UploadableBinary {
|
|
|
|
return true
|
|
|
|
}
|
2022-02-25 18:55:32 +02:00
|
|
|
_, ok := uploadableBinPaths[a.Path]
|
|
|
|
return !ok
|
2022-02-25 16:28:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return And(
|
|
|
|
// allow all of the binary-like artifacts as possible...
|
|
|
|
Or(
|
|
|
|
ByType(Binary),
|
|
|
|
ByType(UploadableBinary),
|
|
|
|
ByType(UniversalBinary),
|
|
|
|
),
|
|
|
|
// ... but remove any duplicates found
|
|
|
|
deduplicateByPath,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-05-26 05:48:10 +02:00
|
|
|
// Or performs an OR between all given filters.
|
2017-12-17 20:59:54 +02:00
|
|
|
func Or(filters ...Filter) Filter {
|
2019-08-12 22:44:48 +02:00
|
|
|
return func(a *Artifact) bool {
|
2017-12-17 20:59:54 +02:00
|
|
|
for _, f := range filters {
|
|
|
|
if f(a) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-26 05:48:10 +02:00
|
|
|
// And performs an AND between all given filters.
|
2017-12-17 20:59:54 +02:00
|
|
|
func And(filters ...Filter) Filter {
|
2019-08-12 22:44:48 +02:00
|
|
|
return func(a *Artifact) bool {
|
2017-12-17 20:59:54 +02:00
|
|
|
for _, f := range filters {
|
|
|
|
if !f(a) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-17 19:14:21 +02:00
|
|
|
// Filter filters the artifact list, returning a new instance.
|
|
|
|
// There are some pre-defined filters but anything of the Type Filter
|
|
|
|
// is accepted.
|
2017-12-17 20:59:54 +02:00
|
|
|
// You can compose filters by using the And and Or filters.
|
2023-03-01 06:05:30 +02:00
|
|
|
func (artifacts *Artifacts) Filter(filter Filter) *Artifacts {
|
2020-05-10 18:03:49 +02:00
|
|
|
if filter == nil {
|
2023-03-01 06:05:30 +02:00
|
|
|
return artifacts
|
2020-05-10 18:03:49 +02:00
|
|
|
}
|
|
|
|
|
2021-04-25 19:20:49 +02:00
|
|
|
result := New()
|
2022-08-16 07:05:36 +02:00
|
|
|
for _, a := range artifacts.List() {
|
2017-12-17 20:59:54 +02:00
|
|
|
if filter(a) {
|
2017-12-17 22:01:58 +02:00
|
|
|
result.items = append(result.items, a)
|
2017-12-17 19:14:21 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
2021-01-07 21:21:12 +02:00
|
|
|
|
|
|
|
// Paths returns the artifact.Path of the current artifact list.
|
2023-03-01 06:05:30 +02:00
|
|
|
func (artifacts *Artifacts) Paths() []string {
|
2021-01-07 21:21:12 +02:00
|
|
|
var result []string
|
|
|
|
for _, artifact := range artifacts.List() {
|
|
|
|
result = append(result, artifact.Path)
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
2021-12-06 03:42:13 +02:00
|
|
|
|
|
|
|
// VisitFn is a function that can be executed against each artifact in a list.
|
|
|
|
type VisitFn func(a *Artifact) error
|
|
|
|
|
|
|
|
// Visit executes the given function for each artifact in the list.
|
2023-03-01 06:05:30 +02:00
|
|
|
func (artifacts *Artifacts) Visit(fn VisitFn) error {
|
2022-08-16 07:05:36 +02:00
|
|
|
for _, artifact := range artifacts.List() {
|
2021-12-06 03:42:13 +02:00
|
|
|
if err := fn(artifact); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|