2022-10-18 03:24:12 +02:00
|
|
|
// Copyright 2022 Woodpecker Authors
|
2021-09-23 16:58:12 +02:00
|
|
|
// Copyright 2018 Drone.IO Inc.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"encoding/json"
|
|
|
|
"io"
|
2022-05-31 01:12:18 +02:00
|
|
|
"runtime"
|
2021-09-23 16:58:12 +02:00
|
|
|
"strconv"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2021-10-12 09:25:13 +02:00
|
|
|
"github.com/rs/zerolog/log"
|
|
|
|
"github.com/tevino/abool"
|
2021-09-23 16:58:12 +02:00
|
|
|
"google.golang.org/grpc/metadata"
|
|
|
|
|
2021-09-24 13:18:34 +02:00
|
|
|
"github.com/woodpecker-ci/woodpecker/pipeline"
|
2021-11-26 04:34:48 +02:00
|
|
|
backend "github.com/woodpecker-ci/woodpecker/pipeline/backend/types"
|
2021-09-24 13:18:34 +02:00
|
|
|
"github.com/woodpecker-ci/woodpecker/pipeline/multipart"
|
|
|
|
"github.com/woodpecker-ci/woodpecker/pipeline/rpc"
|
2022-05-11 13:40:44 +02:00
|
|
|
"github.com/woodpecker-ci/woodpecker/shared/utils"
|
2021-09-23 16:58:12 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// TODO: Implement log streaming.
|
|
|
|
// Until now we need to limit the size of the logs and files that we upload.
|
|
|
|
// The maximum grpc payload size is 4194304. So we need to set these limits below the maximum.
|
|
|
|
const (
|
|
|
|
maxLogsUpload = 2000000 // this is per step
|
|
|
|
maxFileUpload = 1000000
|
|
|
|
)
|
|
|
|
|
|
|
|
type Runner struct {
|
|
|
|
client rpc.Peer
|
|
|
|
filter rpc.Filter
|
|
|
|
hostname string
|
|
|
|
counter *State
|
|
|
|
engine *backend.Engine
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewRunner(workEngine rpc.Peer, f rpc.Filter, h string, state *State, backend *backend.Engine) Runner {
|
|
|
|
return Runner{
|
|
|
|
client: workEngine,
|
|
|
|
filter: f,
|
|
|
|
hostname: h,
|
|
|
|
counter: state,
|
|
|
|
engine: backend,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *Runner) Run(ctx context.Context) error {
|
2021-11-13 21:18:06 +02:00
|
|
|
log.Debug().Msg("request next execution")
|
2021-09-23 16:58:12 +02:00
|
|
|
|
|
|
|
meta, _ := metadata.FromOutgoingContext(ctx)
|
|
|
|
ctxmeta := metadata.NewOutgoingContext(context.Background(), meta)
|
|
|
|
|
|
|
|
// get the next job from the queue
|
|
|
|
work, err := r.client.Next(ctx, r.filter)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if work == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
timeout := time.Hour
|
|
|
|
if minutes := work.Timeout; minutes != 0 {
|
|
|
|
timeout = time.Duration(minutes) * time.Minute
|
|
|
|
}
|
|
|
|
|
2022-10-18 03:24:12 +02:00
|
|
|
repoName := extractRepositoryName(work.Config) // hack
|
|
|
|
pipelineNumber := extractPipelineNumber(work.Config) // hack
|
2022-06-15 18:11:20 +02:00
|
|
|
|
2021-09-23 16:58:12 +02:00
|
|
|
r.counter.Add(
|
|
|
|
work.ID,
|
|
|
|
timeout,
|
2022-06-15 18:11:20 +02:00
|
|
|
repoName,
|
2022-10-18 03:24:12 +02:00
|
|
|
pipelineNumber,
|
2021-09-23 16:58:12 +02:00
|
|
|
)
|
|
|
|
defer r.counter.Done(work.ID)
|
|
|
|
|
|
|
|
logger := log.With().
|
2022-06-15 18:11:20 +02:00
|
|
|
Str("repo", repoName).
|
2022-10-18 03:24:12 +02:00
|
|
|
Str("pipeline", pipelineNumber).
|
2021-09-23 16:58:12 +02:00
|
|
|
Str("id", work.ID).
|
|
|
|
Logger()
|
|
|
|
|
2021-11-14 23:33:45 +02:00
|
|
|
logger.Debug().Msg("received execution")
|
2021-09-23 16:58:12 +02:00
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(ctxmeta, timeout)
|
|
|
|
defer cancel()
|
|
|
|
|
2022-05-11 13:40:44 +02:00
|
|
|
// Add sigterm support for internal context.
|
|
|
|
// Required when the pipeline is terminated by external signals
|
|
|
|
// like kubernetes.
|
|
|
|
ctx = utils.WithContextSigtermCallback(ctx, func() {
|
|
|
|
logger.Error().Msg("Received sigterm termination signal")
|
|
|
|
})
|
|
|
|
|
2021-11-24 03:01:12 +02:00
|
|
|
canceled := abool.New()
|
2021-09-23 16:58:12 +02:00
|
|
|
go func() {
|
2021-11-14 23:33:45 +02:00
|
|
|
logger.Debug().Msg("listen for cancel signal")
|
2021-09-23 16:58:12 +02:00
|
|
|
|
|
|
|
if werr := r.client.Wait(ctx, work.ID); werr != nil {
|
2021-11-24 03:01:12 +02:00
|
|
|
canceled.SetTo(true)
|
2021-11-14 23:33:45 +02:00
|
|
|
logger.Warn().Err(werr).Msg("cancel signal received")
|
2021-09-23 16:58:12 +02:00
|
|
|
|
|
|
|
cancel()
|
|
|
|
} else {
|
2021-11-14 23:33:45 +02:00
|
|
|
logger.Debug().Msg("stop listening for cancel signal")
|
2021-09-23 16:58:12 +02:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2021-11-14 23:33:45 +02:00
|
|
|
logger.Debug().Msg("pipeline done")
|
2021-09-23 16:58:12 +02:00
|
|
|
|
|
|
|
return
|
|
|
|
case <-time.After(time.Minute):
|
2021-11-14 23:33:45 +02:00
|
|
|
logger.Debug().Msg("pipeline lease renewed")
|
2021-09-23 16:58:12 +02:00
|
|
|
|
2021-11-23 16:36:52 +02:00
|
|
|
if err := r.client.Extend(ctx, work.ID); err != nil {
|
|
|
|
log.Error().Err(err).Msg("extending pipeline deadline failed")
|
|
|
|
}
|
2021-09-23 16:58:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
state := rpc.State{}
|
|
|
|
state.Started = time.Now().Unix()
|
|
|
|
|
|
|
|
err = r.client.Init(ctxmeta, work.ID, state)
|
|
|
|
if err != nil {
|
2021-11-14 23:33:45 +02:00
|
|
|
logger.Error().Err(err).Msg("pipeline initialization failed")
|
2021-09-23 16:58:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
var uploads sync.WaitGroup
|
|
|
|
defaultLogger := pipeline.LogFunc(func(proc *backend.Step, rc multipart.Reader) error {
|
|
|
|
loglogger := logger.With().
|
|
|
|
Str("image", proc.Image).
|
|
|
|
Str("stage", proc.Alias).
|
|
|
|
Logger()
|
|
|
|
|
|
|
|
part, rerr := rc.NextPart()
|
|
|
|
if rerr != nil {
|
|
|
|
return rerr
|
|
|
|
}
|
|
|
|
uploads.Add(1)
|
|
|
|
|
|
|
|
var secrets []string
|
|
|
|
for _, secret := range work.Config.Secrets {
|
|
|
|
if secret.Mask {
|
|
|
|
secrets = append(secrets, secret.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
loglogger.Debug().Msg("log stream opened")
|
|
|
|
|
|
|
|
limitedPart := io.LimitReader(part, maxLogsUpload)
|
2021-11-23 16:36:52 +02:00
|
|
|
logStream := rpc.NewLineWriter(r.client, work.ID, proc.Alias, secrets...)
|
|
|
|
if _, err := io.Copy(logStream, limitedPart); err != nil {
|
|
|
|
log.Error().Err(err).Msg("copy limited logStream part")
|
|
|
|
}
|
2021-09-23 16:58:12 +02:00
|
|
|
|
|
|
|
loglogger.Debug().Msg("log stream copied")
|
|
|
|
|
2021-11-23 16:36:52 +02:00
|
|
|
data, err := json.Marshal(logStream.Lines())
|
2021-11-14 23:33:45 +02:00
|
|
|
if err != nil {
|
|
|
|
loglogger.Err(err).Msg("could not marshal logstream")
|
|
|
|
}
|
2021-09-23 16:58:12 +02:00
|
|
|
|
2021-11-14 23:33:45 +02:00
|
|
|
file := &rpc.File{
|
|
|
|
Mime: "application/json+logs",
|
|
|
|
Proc: proc.Alias,
|
|
|
|
Name: "logs.json",
|
|
|
|
Data: data,
|
|
|
|
Size: len(data),
|
|
|
|
Time: time.Now().Unix(),
|
|
|
|
}
|
2021-09-23 16:58:12 +02:00
|
|
|
|
2021-11-14 23:33:45 +02:00
|
|
|
loglogger.Debug().Msg("log stream uploading")
|
2021-09-23 16:58:12 +02:00
|
|
|
if serr := r.client.Upload(ctxmeta, work.ID, file); serr != nil {
|
2021-11-14 23:33:45 +02:00
|
|
|
loglogger.Error().Err(serr).Msg("log stream upload error")
|
|
|
|
} else {
|
|
|
|
loglogger.Debug().Msg("log stream upload complete")
|
2021-09-23 16:58:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
2021-11-14 23:33:45 +02:00
|
|
|
loglogger.Debug().Msg("log stream closed")
|
2021-09-23 16:58:12 +02:00
|
|
|
uploads.Done()
|
|
|
|
}()
|
|
|
|
|
|
|
|
part, rerr = rc.NextPart()
|
|
|
|
if rerr != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// TODO should be configurable
|
|
|
|
limitedPart = io.LimitReader(part, maxFileUpload)
|
2022-08-25 08:39:19 +02:00
|
|
|
data, err = io.ReadAll(limitedPart)
|
2021-11-14 23:33:45 +02:00
|
|
|
if err != nil {
|
|
|
|
loglogger.Err(err).Msg("could not read limited part")
|
|
|
|
}
|
2021-09-23 16:58:12 +02:00
|
|
|
|
2021-11-14 23:33:45 +02:00
|
|
|
file = &rpc.File{
|
|
|
|
Mime: part.Header().Get("Content-Type"),
|
|
|
|
Proc: proc.Alias,
|
|
|
|
Name: part.FileName(),
|
|
|
|
Data: data,
|
|
|
|
Size: len(data),
|
|
|
|
Time: time.Now().Unix(),
|
|
|
|
Meta: make(map[string]string),
|
|
|
|
}
|
2021-09-23 16:58:12 +02:00
|
|
|
for key, value := range part.Header() {
|
|
|
|
file.Meta[key] = value[0]
|
|
|
|
}
|
|
|
|
|
|
|
|
loglogger.Debug().
|
|
|
|
Str("file", file.Name).
|
|
|
|
Str("mime", file.Mime).
|
|
|
|
Msg("file stream uploading")
|
|
|
|
|
|
|
|
if serr := r.client.Upload(ctxmeta, work.ID, file); serr != nil {
|
|
|
|
loglogger.Error().
|
|
|
|
Err(serr).
|
|
|
|
Str("file", file.Name).
|
|
|
|
Str("mime", file.Mime).
|
|
|
|
Msg("file stream upload error")
|
|
|
|
}
|
|
|
|
|
|
|
|
loglogger.Debug().
|
|
|
|
Str("file", file.Name).
|
|
|
|
Str("mime", file.Mime).
|
|
|
|
Msg("file stream upload complete")
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
defaultTracer := pipeline.TraceFunc(func(state *pipeline.State) error {
|
|
|
|
proclogger := logger.With().
|
|
|
|
Str("image", state.Pipeline.Step.Image).
|
|
|
|
Str("stage", state.Pipeline.Step.Alias).
|
2022-05-11 13:40:44 +02:00
|
|
|
Err(state.Process.Error).
|
2021-09-23 16:58:12 +02:00
|
|
|
Int("exit_code", state.Process.ExitCode).
|
|
|
|
Bool("exited", state.Process.Exited).
|
|
|
|
Logger()
|
|
|
|
|
|
|
|
procState := rpc.State{
|
|
|
|
Proc: state.Pipeline.Step.Alias,
|
|
|
|
Exited: state.Process.Exited,
|
|
|
|
ExitCode: state.Process.ExitCode,
|
|
|
|
Started: time.Now().Unix(), // TODO do not do this
|
|
|
|
Finished: time.Now().Unix(),
|
|
|
|
}
|
2022-05-11 13:40:44 +02:00
|
|
|
if state.Process.Error != nil {
|
|
|
|
procState.Error = state.Process.Error.Error()
|
|
|
|
}
|
|
|
|
|
2021-09-23 16:58:12 +02:00
|
|
|
defer func() {
|
2021-11-14 23:33:45 +02:00
|
|
|
proclogger.Debug().Msg("update step status")
|
2021-09-23 16:58:12 +02:00
|
|
|
|
|
|
|
if uerr := r.client.Update(ctxmeta, work.ID, procState); uerr != nil {
|
|
|
|
proclogger.Debug().
|
|
|
|
Err(uerr).
|
|
|
|
Msg("update step status error")
|
|
|
|
}
|
|
|
|
|
2021-11-14 23:33:45 +02:00
|
|
|
proclogger.Debug().Msg("update step status complete")
|
2021-09-23 16:58:12 +02:00
|
|
|
}()
|
|
|
|
if state.Process.Exited {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if state.Pipeline.Step.Environment == nil {
|
|
|
|
state.Pipeline.Step.Environment = map[string]string{}
|
|
|
|
}
|
|
|
|
|
2022-05-31 01:12:18 +02:00
|
|
|
// TODO: find better way to update this state and move it to pipeline to have the same env in cli-exec
|
2021-11-25 21:43:31 +02:00
|
|
|
state.Pipeline.Step.Environment["CI_MACHINE"] = r.hostname
|
2022-10-18 03:24:12 +02:00
|
|
|
state.Pipeline.Step.Environment["CI_PIPELINE_STATUS"] = "success"
|
|
|
|
state.Pipeline.Step.Environment["CI_PIPELINE_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
|
|
|
|
state.Pipeline.Step.Environment["CI_PIPELINE_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
|
|
|
|
// DEPRECATED
|
2021-09-23 16:58:12 +02:00
|
|
|
state.Pipeline.Step.Environment["CI_BUILD_STATUS"] = "success"
|
|
|
|
state.Pipeline.Step.Environment["CI_BUILD_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
|
|
|
|
state.Pipeline.Step.Environment["CI_BUILD_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
|
|
|
|
|
|
|
|
state.Pipeline.Step.Environment["CI_JOB_STATUS"] = "success"
|
|
|
|
state.Pipeline.Step.Environment["CI_JOB_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
|
|
|
|
state.Pipeline.Step.Environment["CI_JOB_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
|
|
|
|
|
2022-05-31 01:12:18 +02:00
|
|
|
state.Pipeline.Step.Environment["CI_SYSTEM_ARCH"] = runtime.GOOS + "/" + runtime.GOARCH
|
|
|
|
|
2021-09-23 16:58:12 +02:00
|
|
|
if state.Pipeline.Error != nil {
|
2022-10-18 03:24:12 +02:00
|
|
|
state.Pipeline.Step.Environment["CI_PIPELINE_STATUS"] = "failure"
|
|
|
|
// DEPRECATED
|
2021-09-23 16:58:12 +02:00
|
|
|
state.Pipeline.Step.Environment["CI_BUILD_STATUS"] = "failure"
|
|
|
|
state.Pipeline.Step.Environment["CI_JOB_STATUS"] = "failure"
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
err = pipeline.New(work.Config,
|
|
|
|
pipeline.WithContext(ctx),
|
|
|
|
pipeline.WithLogger(defaultLogger),
|
|
|
|
pipeline.WithTracer(defaultTracer),
|
|
|
|
pipeline.WithEngine(*r.engine),
|
2022-06-15 18:11:20 +02:00
|
|
|
pipeline.WithDescription(map[string]string{
|
2022-10-18 03:24:12 +02:00
|
|
|
"ID": work.ID,
|
|
|
|
"Repo": repoName,
|
|
|
|
"Pipeline": pipelineNumber,
|
2022-06-15 18:11:20 +02:00
|
|
|
}),
|
2021-09-23 16:58:12 +02:00
|
|
|
).Run()
|
|
|
|
|
|
|
|
state.Finished = time.Now().Unix()
|
|
|
|
state.Exited = true
|
|
|
|
if err != nil {
|
|
|
|
switch xerr := err.(type) {
|
|
|
|
case *pipeline.ExitError:
|
|
|
|
state.ExitCode = xerr.Code
|
|
|
|
default:
|
|
|
|
state.ExitCode = 1
|
|
|
|
state.Error = err.Error()
|
|
|
|
}
|
2021-11-24 03:01:12 +02:00
|
|
|
if canceled.IsSet() {
|
2021-09-23 16:58:12 +02:00
|
|
|
state.ExitCode = 137
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
logger.Debug().
|
|
|
|
Str("error", state.Error).
|
|
|
|
Int("exit_code", state.ExitCode).
|
|
|
|
Msg("pipeline complete")
|
|
|
|
|
2021-11-14 23:33:45 +02:00
|
|
|
logger.Debug().Msg("uploading logs")
|
2021-09-23 16:58:12 +02:00
|
|
|
|
|
|
|
uploads.Wait()
|
|
|
|
|
2021-11-14 23:33:45 +02:00
|
|
|
logger.Debug().Msg("uploading logs complete")
|
2021-09-23 16:58:12 +02:00
|
|
|
|
|
|
|
logger.Debug().
|
|
|
|
Str("error", state.Error).
|
|
|
|
Int("exit_code", state.ExitCode).
|
|
|
|
Msg("updating pipeline status")
|
|
|
|
|
|
|
|
err = r.client.Done(ctxmeta, work.ID, state)
|
|
|
|
if err != nil {
|
2021-11-14 23:33:45 +02:00
|
|
|
logger.Error().Err(err).Msg("updating pipeline status failed")
|
2021-09-23 16:58:12 +02:00
|
|
|
} else {
|
2021-11-14 23:33:45 +02:00
|
|
|
logger.Debug().Msg("updating pipeline status complete")
|
2021-09-23 16:58:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// extract repository name from the configuration
|
|
|
|
func extractRepositoryName(config *backend.Config) string {
|
2021-11-25 21:43:31 +02:00
|
|
|
return config.Stages[0].Steps[0].Environment["CI_REPO"]
|
2021-09-23 16:58:12 +02:00
|
|
|
}
|
|
|
|
|
2022-10-18 03:24:12 +02:00
|
|
|
// extract pipeline number from the configuration
|
|
|
|
func extractPipelineNumber(config *backend.Config) string {
|
|
|
|
return config.Stages[0].Steps[0].Environment["CI_PIPELINE_NUMBER"]
|
2021-09-23 16:58:12 +02:00
|
|
|
}
|