2021-05-01 03:53:00 +10:00
|
|
|
/*
|
2021-05-26 04:44:52 +10:00
|
|
|
Copyright 2018 Google LLC All Rights Reserved.
|
2021-05-01 03:53:00 +10:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
2019-03-14 14:23:47 -04:00
|
|
|
|
2019-04-26 15:56:15 -07:00
|
|
|
package commands
|
2019-03-14 14:23:47 -04:00
|
|
|
|
|
|
|
import (
|
2019-11-05 15:24:08 -05:00
|
|
|
"bytes"
|
2019-12-13 15:08:52 -08:00
|
|
|
"context"
|
2019-04-30 13:08:54 -05:00
|
|
|
"errors"
|
2019-03-14 14:23:47 -04:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2020-12-21 16:53:00 -08:00
|
|
|
"path"
|
2020-11-20 13:12:19 -08:00
|
|
|
"strings"
|
2019-03-14 14:23:47 -04:00
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/google/go-containerregistry/pkg/name"
|
2020-02-11 10:44:25 -08:00
|
|
|
"golang.org/x/sync/errgroup"
|
2019-11-05 15:24:08 -05:00
|
|
|
"gopkg.in/yaml.v3"
|
|
|
|
"k8s.io/apimachinery/pkg/labels"
|
2021-10-21 16:14:06 +11:00
|
|
|
|
|
|
|
"github.com/google/ko/pkg/build"
|
|
|
|
"github.com/google/ko/pkg/commands/options"
|
|
|
|
"github.com/google/ko/pkg/publish"
|
|
|
|
"github.com/google/ko/pkg/resolve"
|
2019-03-14 14:23:47 -04:00
|
|
|
)
|
|
|
|
|
2021-05-26 04:44:52 +10:00
|
|
|
// ua returns the ko user agent.
|
2019-12-13 15:08:52 -08:00
|
|
|
func ua() string {
|
|
|
|
if v := version(); v != "" {
|
|
|
|
return "ko/" + v
|
|
|
|
}
|
|
|
|
return "ko"
|
|
|
|
}
|
|
|
|
|
2019-09-11 10:07:02 -07:00
|
|
|
func gobuildOptions(bo *options.BuildOptions) ([]build.Option, error) {
|
2019-03-14 14:23:47 -04:00
|
|
|
creationTime, err := getCreationTime()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-12-21 16:53:00 -08:00
|
|
|
|
2021-06-15 21:50:35 +02:00
|
|
|
kodataCreationTime, err := getKoDataCreationTime()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-01-04 11:19:52 -08:00
|
|
|
if len(bo.Platforms) == 0 {
|
|
|
|
envPlatform := "linux/amd64"
|
2020-12-21 16:53:00 -08:00
|
|
|
|
|
|
|
goos, goarch, goarm := os.Getenv("GOOS"), os.Getenv("GOARCH"), os.Getenv("GOARM")
|
|
|
|
|
|
|
|
// Default to linux/amd64 unless GOOS and GOARCH are set.
|
|
|
|
if goos != "" && goarch != "" {
|
2022-01-04 11:19:52 -08:00
|
|
|
envPlatform = path.Join(goos, goarch)
|
2020-12-21 16:53:00 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Use GOARM for variant if it's set and GOARCH is arm.
|
|
|
|
if strings.Contains(goarch, "arm") && goarm != "" {
|
2022-01-04 11:19:52 -08:00
|
|
|
envPlatform = path.Join(envPlatform, "v"+goarm)
|
2020-12-21 16:53:00 -08:00
|
|
|
}
|
2022-01-04 11:19:52 -08:00
|
|
|
|
|
|
|
bo.Platforms = []string{envPlatform}
|
2020-12-21 16:53:00 -08:00
|
|
|
} else {
|
|
|
|
// Make sure these are all unset
|
|
|
|
for _, env := range []string{"GOOS", "GOARCH", "GOARM"} {
|
|
|
|
if s, ok := os.LookupEnv(env); ok {
|
|
|
|
return nil, fmt.Errorf("cannot use --platform with %s=%q", env, s)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-14 14:23:47 -04:00
|
|
|
opts := []build.Option{
|
2021-12-15 16:08:27 -05:00
|
|
|
build.WithBaseImages(getBaseImage(bo)),
|
2022-01-04 11:19:52 -08:00
|
|
|
build.WithPlatforms(bo.Platforms...),
|
2021-12-09 11:02:14 -08:00
|
|
|
build.WithJobs(bo.ConcurrentBuilds),
|
2019-03-14 14:23:47 -04:00
|
|
|
}
|
|
|
|
if creationTime != nil {
|
|
|
|
opts = append(opts, build.WithCreationTime(*creationTime))
|
|
|
|
}
|
2021-06-15 21:50:35 +02:00
|
|
|
if kodataCreationTime != nil {
|
|
|
|
opts = append(opts, build.WithKoDataCreationTime(*kodataCreationTime))
|
|
|
|
}
|
2019-09-11 10:07:02 -07:00
|
|
|
if bo.DisableOptimizations {
|
2019-04-18 10:18:29 -07:00
|
|
|
opts = append(opts, build.WithDisabledOptimizations())
|
|
|
|
}
|
2021-11-22 10:57:13 -08:00
|
|
|
switch bo.SBOM {
|
|
|
|
case "none":
|
|
|
|
opts = append(opts, build.WithDisabledSBOM())
|
2021-11-22 14:19:43 -08:00
|
|
|
case "go.version-m":
|
|
|
|
opts = append(opts, build.WithGoVersionSBOM())
|
2022-02-11 16:49:53 -05:00
|
|
|
case "cyclonedx":
|
|
|
|
opts = append(opts, build.WithCycloneDX())
|
2021-11-23 06:34:43 -08:00
|
|
|
default: // "spdx"
|
|
|
|
opts = append(opts, build.WithSPDX(version()))
|
2021-11-22 10:57:13 -08:00
|
|
|
}
|
2021-11-17 14:03:48 +11:00
|
|
|
opts = append(opts, build.WithTrimpath(bo.Trimpath))
|
2021-03-03 13:03:31 -05:00
|
|
|
for _, lf := range bo.Labels {
|
|
|
|
parts := strings.SplitN(lf, "=", 2)
|
|
|
|
if len(parts) != 2 {
|
|
|
|
return nil, fmt.Errorf("invalid label flag: %s", lf)
|
|
|
|
}
|
|
|
|
opts = append(opts, build.WithLabel(parts[0], parts[1]))
|
|
|
|
}
|
2021-07-02 17:40:56 +02:00
|
|
|
|
2021-08-27 02:33:01 +10:00
|
|
|
if bo.BuildConfigs != nil {
|
|
|
|
opts = append(opts, build.WithConfig(bo.BuildConfigs))
|
2021-07-02 17:40:56 +02:00
|
|
|
}
|
|
|
|
|
2019-03-14 14:23:47 -04:00
|
|
|
return opts, nil
|
|
|
|
}
|
|
|
|
|
2021-05-26 04:44:52 +10:00
|
|
|
// NewBuilder creates a ko builder
|
|
|
|
func NewBuilder(ctx context.Context, bo *options.BuildOptions) (build.Interface, error) {
|
|
|
|
return makeBuilder(ctx, bo)
|
|
|
|
}
|
|
|
|
|
2020-12-21 11:47:05 -08:00
|
|
|
func makeBuilder(ctx context.Context, bo *options.BuildOptions) (*build.Caching, error) {
|
2021-10-21 16:14:06 +11:00
|
|
|
if err := bo.LoadConfig(); err != nil {
|
2021-08-28 02:55:39 +10:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-09-11 10:07:02 -07:00
|
|
|
opt, err := gobuildOptions(bo)
|
2019-03-14 14:23:47 -04:00
|
|
|
if err != nil {
|
2021-11-05 13:26:09 -04:00
|
|
|
return nil, fmt.Errorf("error setting up builder options: %w", err)
|
2019-03-14 14:23:47 -04:00
|
|
|
}
|
2021-10-21 16:14:06 +11:00
|
|
|
innerBuilder, err := build.NewGobuilds(ctx, bo.WorkingDirectory, bo.BuildConfigs, opt...)
|
2019-03-14 14:23:47 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// tl;dr Wrap builder in a caching builder.
|
|
|
|
//
|
|
|
|
// The caching builder should on Build calls:
|
|
|
|
// - Check for a valid Build future
|
|
|
|
// - if a valid Build future exists at the time of the request,
|
|
|
|
// then block on it.
|
|
|
|
// - if it does not, then initiate and record a Build future.
|
|
|
|
//
|
|
|
|
// This will benefit the following key cases:
|
|
|
|
// 1. When the same import path is referenced across multiple yaml files
|
|
|
|
// we can elide subsequent builds by blocking on the same image future.
|
|
|
|
// 2. When an affected yaml file has multiple import paths (mostly unaffected)
|
|
|
|
// we can elide the builds of unchanged import paths.
|
|
|
|
return build.NewCaching(innerBuilder)
|
|
|
|
}
|
|
|
|
|
2021-05-26 04:44:52 +10:00
|
|
|
// NewPublisher creates a ko publisher
|
|
|
|
func NewPublisher(po *options.PublishOptions) (publish.Interface, error) {
|
|
|
|
return makePublisher(po)
|
|
|
|
}
|
|
|
|
|
2020-02-19 09:30:01 -08:00
|
|
|
func makePublisher(po *options.PublishOptions) (publish.Interface, error) {
|
2019-03-14 14:23:47 -04:00
|
|
|
// Create the publish.Interface that we will use to publish image references
|
|
|
|
// to either a docker daemon or a container image registry.
|
|
|
|
innerPublisher, err := func() (publish.Interface, error) {
|
2021-05-01 03:53:00 +10:00
|
|
|
repoName := po.DockerRepo
|
2020-02-19 09:30:01 -08:00
|
|
|
namer := options.MakeNamer(po)
|
2022-08-22 19:51:02 -04:00
|
|
|
if strings.HasPrefix(repoName, publish.LocalDomain) || po.Local {
|
2020-02-19 09:30:01 -08:00
|
|
|
// TODO(jonjohnsonjr): I'm assuming that nobody will
|
|
|
|
// use local with other publishers, but that might
|
|
|
|
// not be true.
|
2021-05-18 01:25:47 +10:00
|
|
|
return publish.NewDaemon(namer, po.Tags,
|
2021-07-16 00:10:52 +10:00
|
|
|
publish.WithDockerClient(po.DockerClient),
|
|
|
|
publish.WithLocalDomain(po.LocalDomain),
|
|
|
|
)
|
2019-03-14 14:23:47 -04:00
|
|
|
}
|
2022-08-22 19:51:02 -04:00
|
|
|
if strings.HasPrefix(repoName, publish.KindDomain) {
|
2020-09-04 17:32:26 +02:00
|
|
|
return publish.NewKindPublisher(namer, po.Tags), nil
|
|
|
|
}
|
2020-02-19 09:30:01 -08:00
|
|
|
|
2022-02-20 10:15:08 +01:00
|
|
|
if repoName == "" && po.Push {
|
2019-04-30 13:08:54 -05:00
|
|
|
return nil, errors.New("KO_DOCKER_REPO environment variable is unset")
|
|
|
|
}
|
2019-10-02 10:02:01 -07:00
|
|
|
if _, err := name.NewRegistry(repoName); err != nil {
|
|
|
|
if _, err := name.NewRepository(repoName); err != nil {
|
2021-11-05 13:26:09 -04:00
|
|
|
return nil, fmt.Errorf("failed to parse %q as repository: %w", repoName, err)
|
2019-10-02 10:02:01 -07:00
|
|
|
}
|
2019-03-14 14:23:47 -04:00
|
|
|
}
|
|
|
|
|
2020-02-19 09:30:01 -08:00
|
|
|
publishers := []publish.Interface{}
|
|
|
|
if po.OCILayoutPath != "" {
|
|
|
|
lp, err := publish.NewLayout(po.OCILayoutPath)
|
|
|
|
if err != nil {
|
2021-11-05 13:26:09 -04:00
|
|
|
return nil, fmt.Errorf("failed to create LayoutPublisher for %q: %w", po.OCILayoutPath, err)
|
2020-02-19 09:30:01 -08:00
|
|
|
}
|
|
|
|
publishers = append(publishers, lp)
|
|
|
|
}
|
|
|
|
if po.TarballFile != "" {
|
|
|
|
tp := publish.NewTarball(po.TarballFile, repoName, namer, po.Tags)
|
|
|
|
publishers = append(publishers, tp)
|
|
|
|
}
|
2021-05-26 04:44:52 +10:00
|
|
|
userAgent := ua()
|
|
|
|
if po.UserAgent != "" {
|
|
|
|
userAgent = po.UserAgent
|
|
|
|
}
|
2020-02-19 09:30:01 -08:00
|
|
|
if po.Push {
|
|
|
|
dp, err := publish.NewDefault(repoName,
|
2021-05-26 04:44:52 +10:00
|
|
|
publish.WithUserAgent(userAgent),
|
2022-02-08 15:20:31 -05:00
|
|
|
publish.WithAuthFromKeychain(keychain),
|
2020-02-19 09:30:01 -08:00
|
|
|
publish.WithNamer(namer),
|
|
|
|
publish.WithTags(po.Tags),
|
2021-05-17 08:26:15 -07:00
|
|
|
publish.WithTagOnly(po.TagOnly),
|
2021-12-20 09:51:43 -08:00
|
|
|
publish.Insecure(po.InsecureRegistry),
|
|
|
|
)
|
2020-02-19 09:30:01 -08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
publishers = append(publishers, dp)
|
|
|
|
}
|
2020-10-06 09:45:25 -04:00
|
|
|
|
|
|
|
// If not publishing, at least generate a digest to simulate
|
|
|
|
// publishing.
|
|
|
|
if len(publishers) == 0 {
|
2022-08-24 10:57:52 -04:00
|
|
|
// If one or more tags are specified, use the first tag in the list
|
|
|
|
var tag string
|
|
|
|
if len(po.Tags) >= 1 {
|
|
|
|
tag = po.Tags[0]
|
|
|
|
}
|
2020-10-06 09:45:25 -04:00
|
|
|
publishers = append(publishers, nopPublisher{
|
|
|
|
repoName: repoName,
|
|
|
|
namer: namer,
|
2022-08-24 10:57:52 -04:00
|
|
|
tag: tag,
|
|
|
|
tagOnly: po.TagOnly,
|
2020-10-06 09:45:25 -04:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-02-19 09:30:01 -08:00
|
|
|
return publish.MultiPublisher(publishers...), nil
|
2019-03-14 14:23:47 -04:00
|
|
|
}()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-12-20 09:51:43 -08:00
|
|
|
if po.ImageRefsFile != "" {
|
2022-06-11 01:29:55 +01:00
|
|
|
f, err := os.OpenFile(po.ImageRefsFile, os.O_RDWR|os.O_CREATE, 0644)
|
2021-12-20 09:51:43 -08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
innerPublisher, err = publish.NewRecorder(innerPublisher, f)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-14 14:23:47 -04:00
|
|
|
// Wrap publisher in a memoizing publisher implementation.
|
|
|
|
return publish.NewCaching(innerPublisher)
|
|
|
|
}
|
|
|
|
|
2020-10-06 09:45:25 -04:00
|
|
|
// nopPublisher simulates publishing without actually publishing anything, to
|
|
|
|
// provide fallback behavior when the user configures no push destinations.
|
|
|
|
type nopPublisher struct {
|
|
|
|
repoName string
|
|
|
|
namer publish.Namer
|
2022-08-24 10:57:52 -04:00
|
|
|
tag string
|
|
|
|
tagOnly bool
|
2020-10-06 09:45:25 -04:00
|
|
|
}
|
|
|
|
|
2020-12-21 11:47:05 -08:00
|
|
|
func (n nopPublisher) Publish(_ context.Context, br build.Result, s string) (name.Reference, error) {
|
2021-05-26 04:44:52 +10:00
|
|
|
s = strings.TrimPrefix(s, build.StrictScheme)
|
2022-08-24 10:57:52 -04:00
|
|
|
nm := n.namer(n.repoName, s)
|
|
|
|
if n.tagOnly {
|
|
|
|
if n.tag == "" {
|
|
|
|
return nil, errors.New("must specify tag if requesting tag only")
|
|
|
|
}
|
|
|
|
return name.NewTag(fmt.Sprintf("%s:%s", nm, n.tag))
|
|
|
|
}
|
2020-10-06 09:45:25 -04:00
|
|
|
h, err := br.Digest()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-08-24 10:57:52 -04:00
|
|
|
if n.tag == "" {
|
|
|
|
return name.NewDigest(fmt.Sprintf("%s@%s", nm, h))
|
|
|
|
}
|
|
|
|
return name.NewDigest(fmt.Sprintf("%s:%s@%s", nm, n.tag, h))
|
2020-10-06 09:45:25 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (n nopPublisher) Close() error { return nil }
|
|
|
|
|
2019-03-14 14:23:47 -04:00
|
|
|
// resolvedFuture represents a "future" for the bytes of a resolved file.
|
|
|
|
type resolvedFuture chan []byte
|
|
|
|
|
2022-08-12 13:45:50 -06:00
|
|
|
func ResolveFilesToWriter(
|
2019-12-13 15:08:52 -08:00
|
|
|
ctx context.Context,
|
2019-11-05 15:24:08 -05:00
|
|
|
builder *build.Caching,
|
|
|
|
publisher publish.Interface,
|
|
|
|
fo *options.FilenameOptions,
|
|
|
|
so *options.SelectorOptions,
|
2020-02-11 10:44:25 -08:00
|
|
|
out io.WriteCloser) error {
|
2019-03-14 14:23:47 -04:00
|
|
|
defer out.Close()
|
|
|
|
|
|
|
|
// By having this as a channel, we can hook this up to a filesystem
|
|
|
|
// watcher and leave `fs` open to stream the names of yaml files
|
|
|
|
// affected by code changes (including the modification of existing or
|
|
|
|
// creation of new yaml files).
|
2019-04-26 15:25:40 -07:00
|
|
|
fs := options.EnumerateFiles(fo)
|
2019-03-14 14:23:47 -04:00
|
|
|
|
|
|
|
// This tracks filename -> []importpath
|
|
|
|
var sm sync.Map
|
|
|
|
|
2020-02-11 10:44:25 -08:00
|
|
|
// This tracks resolution errors and ensures we cancel other builds if an
|
|
|
|
// individual build fails.
|
|
|
|
errs, ctx := errgroup.WithContext(ctx)
|
|
|
|
|
2019-03-14 14:23:47 -04:00
|
|
|
var futures []resolvedFuture
|
|
|
|
for {
|
|
|
|
// Each iteration, if there is anything in the list of futures,
|
|
|
|
// listen to it in addition to the file enumerating channel.
|
|
|
|
// A nil channel is never available to receive on, so if nothing
|
|
|
|
// is available, this will result in us exclusively selecting
|
|
|
|
// on the file enumerating channel.
|
|
|
|
var bf resolvedFuture
|
|
|
|
if len(futures) > 0 {
|
|
|
|
bf = futures[0]
|
|
|
|
} else if fs == nil {
|
|
|
|
// There are no more files to enumerate and the futures
|
|
|
|
// have been drained, so quit.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2020-02-11 10:44:25 -08:00
|
|
|
case file, ok := <-fs:
|
2019-03-14 14:23:47 -04:00
|
|
|
if !ok {
|
|
|
|
// a nil channel is never available to receive on.
|
|
|
|
// This allows us to drain the list of in-process
|
|
|
|
// futures without this case of the select winning
|
|
|
|
// each time.
|
|
|
|
fs = nil
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make a new future to use to ship the bytes back and append
|
|
|
|
// it to the list of futures (see comment below about ordering).
|
|
|
|
ch := make(resolvedFuture)
|
|
|
|
futures = append(futures, ch)
|
|
|
|
|
|
|
|
// Kick off the resolution that will respond with its bytes on
|
|
|
|
// the future.
|
2020-02-11 10:44:25 -08:00
|
|
|
f := file // defensive copy
|
|
|
|
errs.Go(func() error {
|
2019-03-14 14:23:47 -04:00
|
|
|
defer close(ch)
|
|
|
|
// Record the builds we do via this builder.
|
|
|
|
recordingBuilder := &build.Recorder{
|
|
|
|
Builder: builder,
|
|
|
|
}
|
2020-12-22 13:55:13 -08:00
|
|
|
b, err := resolveFile(ctx, f, recordingBuilder, publisher, so)
|
2019-03-14 14:23:47 -04:00
|
|
|
if err != nil {
|
2020-02-11 10:44:25 -08:00
|
|
|
// This error is sometimes expected during watch mode, so this
|
|
|
|
// isn't fatal. Just print it and keep the watch open.
|
2022-03-03 14:58:34 -05:00
|
|
|
return fmt.Errorf("error processing import paths in %q: %w", f, err)
|
2019-03-14 14:23:47 -04:00
|
|
|
}
|
|
|
|
// Associate with this file the collection of binary import paths.
|
|
|
|
sm.Store(f, recordingBuilder.ImportPaths)
|
|
|
|
ch <- b
|
2020-02-11 10:44:25 -08:00
|
|
|
return nil
|
|
|
|
})
|
2019-03-14 14:23:47 -04:00
|
|
|
|
|
|
|
case b, ok := <-bf:
|
|
|
|
// Once the head channel returns something, dequeue it.
|
|
|
|
// We listen to the futures in order to be respectful of
|
|
|
|
// the kubectl apply ordering, which matters!
|
|
|
|
futures = futures[1:]
|
|
|
|
if ok {
|
|
|
|
// Write the next body and a trailing delimiter.
|
|
|
|
// We write the delimeter LAST so that when streamed to
|
|
|
|
// kubectl it knows that the resource is complete and may
|
|
|
|
// be applied.
|
|
|
|
out.Write(append(b, []byte("\n---\n")...))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-02-11 10:44:25 -08:00
|
|
|
|
|
|
|
// Make sure we exit with an error.
|
|
|
|
// See https://github.com/google/ko/issues/84
|
|
|
|
return errs.Wait()
|
2019-03-14 14:23:47 -04:00
|
|
|
}
|
|
|
|
|
2019-11-05 15:24:08 -05:00
|
|
|
func resolveFile(
|
2019-11-10 01:23:09 +08:00
|
|
|
ctx context.Context,
|
2019-11-05 15:24:08 -05:00
|
|
|
f string,
|
|
|
|
builder build.Interface,
|
|
|
|
pub publish.Interface,
|
2020-12-22 13:55:13 -08:00
|
|
|
so *options.SelectorOptions) (b []byte, err error) {
|
2019-11-05 15:24:08 -05:00
|
|
|
var selector labels.Selector
|
|
|
|
if so.Selector != "" {
|
|
|
|
var err error
|
|
|
|
selector, err = labels.Parse(so.Selector)
|
|
|
|
|
|
|
|
if err != nil {
|
2021-11-05 13:26:09 -04:00
|
|
|
return nil, fmt.Errorf("unable to parse selector: %w", err)
|
2019-11-05 15:24:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-14 14:23:47 -04:00
|
|
|
if f == "-" {
|
|
|
|
b, err = ioutil.ReadAll(os.Stdin)
|
|
|
|
} else {
|
|
|
|
b, err = ioutil.ReadFile(f)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-11-05 15:24:08 -05:00
|
|
|
var docNodes []*yaml.Node
|
|
|
|
|
|
|
|
// The loop is to support multi-document yaml files.
|
|
|
|
// This is handled by using a yaml.Decoder and reading objects until io.EOF, see:
|
|
|
|
// https://godoc.org/gopkg.in/yaml.v3#Decoder.Decode
|
|
|
|
decoder := yaml.NewDecoder(bytes.NewBuffer(b))
|
|
|
|
for {
|
|
|
|
var doc yaml.Node
|
|
|
|
if err := decoder.Decode(&doc); err != nil {
|
2021-11-05 13:26:09 -04:00
|
|
|
if errors.Is(err, io.EOF) {
|
2019-11-05 15:24:08 -05:00
|
|
|
break
|
|
|
|
}
|
2019-06-27 16:55:47 -04:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-05 15:24:08 -05:00
|
|
|
|
|
|
|
if selector != nil {
|
|
|
|
if match, err := resolve.MatchesSelector(&doc, selector); err != nil {
|
2021-11-05 13:26:09 -04:00
|
|
|
return nil, fmt.Errorf("error evaluating selector: %w", err)
|
2019-11-05 15:24:08 -05:00
|
|
|
} else if !match {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
docNodes = append(docNodes, &doc)
|
|
|
|
}
|
|
|
|
|
2020-12-22 13:55:13 -08:00
|
|
|
if err := resolve.ImageReferences(ctx, docNodes, builder, pub); err != nil {
|
2021-11-05 13:26:09 -04:00
|
|
|
return nil, fmt.Errorf("error resolving image references: %w", err)
|
2019-06-27 16:55:47 -04:00
|
|
|
}
|
|
|
|
|
2019-11-05 15:24:08 -05:00
|
|
|
buf := &bytes.Buffer{}
|
|
|
|
e := yaml.NewEncoder(buf)
|
|
|
|
e.SetIndent(2)
|
|
|
|
|
|
|
|
for _, doc := range docNodes {
|
|
|
|
err := e.Encode(doc)
|
|
|
|
if err != nil {
|
2021-11-05 13:26:09 -04:00
|
|
|
return nil, fmt.Errorf("failed to encode output: %w", err)
|
2019-11-05 15:24:08 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
e.Close()
|
|
|
|
|
|
|
|
return buf.Bytes(), nil
|
2019-03-14 14:23:47 -04:00
|
|
|
}
|