mirror of
https://github.com/woodpecker-ci/woodpecker.git
synced 2024-12-24 10:07:21 +02:00
Add depends_on support for steps (#2771)
Co-authored-by: 6543 <6543@obermui.de>
This commit is contained in:
parent
9d9bcbf363
commit
2b1e5f35de
@ -101,7 +101,7 @@ func lintFile(_ *cli.Context, file string) error {
|
||||
// TODO: lint multiple files at once to allow checks for sth like "depends_on" to work
|
||||
err = linter.New(linter.WithTrusted(true)).Lint([]*linter.WorkflowConfig{config})
|
||||
if err != nil {
|
||||
fmt.Printf("🔥 %s has warning / errors:\n", output.String(config.File).Underline())
|
||||
fmt.Printf("🔥 %s has warnings / errors:\n", output.String(config.File).Underline())
|
||||
|
||||
hasErrors := false
|
||||
for _, err := range pipeline_errors.GetPipelineErrors(err) {
|
||||
|
@ -443,33 +443,28 @@ when:
|
||||
- evaluate: 'SKIP != "true"'
|
||||
```
|
||||
|
||||
### `group` - Parallel execution
|
||||
### `depends_on`
|
||||
|
||||
Woodpecker supports parallel step execution for same-machine fan-in and fan-out. Parallel steps are configured using the `group` attribute. This instructs the agent to execute the named group in parallel.
|
||||
|
||||
Example parallel configuration:
|
||||
Normally steps of a workflow are executed serially in the order in which they are defined. As soon as you set `depends_on` for a step a [directed acyclic graph](https://en.wikipedia.org/wiki/Directed_acyclic_graph) will be used and all steps of the workflow will be executed in parallel besides the steps that have a dependency set to another step using `depends_on`:
|
||||
|
||||
```diff
|
||||
steps:
|
||||
backend:
|
||||
+ group: build
|
||||
image: golang
|
||||
commands:
|
||||
- go build
|
||||
- go test
|
||||
frontend:
|
||||
+ group: build
|
||||
image: node
|
||||
commands:
|
||||
- npm install
|
||||
- npm run test
|
||||
- npm run build
|
||||
publish:
|
||||
image: plugins/docker
|
||||
repo: octocat/hello-world
|
||||
```
|
||||
build: # build will be executed immediately
|
||||
image: golang
|
||||
commands:
|
||||
- go build
|
||||
|
||||
In the above example, the `frontend` and `backend` steps are executed in parallel. The agent will not execute the `publish` step until the group completes.
|
||||
deploy:
|
||||
image: plugins/docker
|
||||
settings:
|
||||
repo: foo/bar
|
||||
+ depends_on: [build, test] # deploy will be executed after build and test finished
|
||||
|
||||
test: # test will be executed immediately as no dependencies are set
|
||||
image: golang
|
||||
commands:
|
||||
- go test
|
||||
```
|
||||
|
||||
### `volumes`
|
||||
|
||||
|
@ -4,6 +4,7 @@ Some versions need some changes to the server configuration or the pipeline conf
|
||||
|
||||
## `next`
|
||||
|
||||
- Deprecated `steps.[name].group` in favor of `steps.[name].depends_on` (see [workflow syntax](./20-usage/20-workflow-syntax.md#depends_on) to learn how to set dependencies)
|
||||
- Removed `WOODPECKER_ROOT_PATH` and `WOODPECKER_ROOT_URL` config variables. Use `WOODPECKER_HOST` with a path instead
|
||||
- Pipelines without a config file will now be skipped instead of failing
|
||||
|
||||
|
@ -233,10 +233,9 @@ func (c *Compiler) Compile(conf *yaml_types.Workflow) (*backend_types.Config, er
|
||||
config.Stages = append(config.Stages, stage)
|
||||
}
|
||||
|
||||
// add pipeline steps. 1 pipeline step per stage, at the moment
|
||||
var stage *backend_types.Stage
|
||||
var group string
|
||||
for i, container := range conf.Steps.ContainerList {
|
||||
// add pipeline steps
|
||||
steps := make([]*dagCompilerStep, 0, len(conf.Steps.ContainerList))
|
||||
for pos, container := range conf.Steps.ContainerList {
|
||||
// Skip if local and should not run local
|
||||
if c.local && !container.When.IsLocal() {
|
||||
continue
|
||||
@ -248,16 +247,7 @@ func (c *Compiler) Compile(conf *yaml_types.Workflow) (*backend_types.Config, er
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if stage == nil || group != container.Group || container.Group == "" {
|
||||
group = container.Group
|
||||
|
||||
stage = new(backend_types.Stage)
|
||||
stage.Name = fmt.Sprintf("%s_stage_%v", c.prefix, i)
|
||||
stage.Alias = container.Name
|
||||
config.Stages = append(config.Stages, stage)
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("%s_step_%d", c.prefix, i)
|
||||
name := fmt.Sprintf("%s_step_%d", c.prefix, pos)
|
||||
stepType := backend_types.StepTypeCommands
|
||||
if container.IsPlugin() {
|
||||
stepType = backend_types.StepTypePlugin
|
||||
@ -274,9 +264,23 @@ func (c *Compiler) Compile(conf *yaml_types.Workflow) (*backend_types.Config, er
|
||||
}
|
||||
}
|
||||
|
||||
stage.Steps = append(stage.Steps, step)
|
||||
steps = append(steps, &dagCompilerStep{
|
||||
step: step,
|
||||
position: pos,
|
||||
name: container.Name,
|
||||
group: container.Group,
|
||||
dependsOn: container.DependsOn,
|
||||
})
|
||||
}
|
||||
|
||||
// generate stages out of steps
|
||||
stepStages, err := newDAGCompiler(steps, c.prefix).compile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.Stages = append(config.Stages, stepStages...)
|
||||
|
||||
err = c.setupCacheRebuild(conf, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -99,120 +99,199 @@ func TestCompilerCompile(t *testing.T) {
|
||||
fronConf *yaml_types.Workflow
|
||||
backConf *backend_types.Config
|
||||
expectedErr string
|
||||
}{{
|
||||
name: "empty workflow, no clone",
|
||||
fronConf: &yaml_types.Workflow{SkipClone: true},
|
||||
backConf: &backend_types.Config{
|
||||
Networks: defaultNetworks,
|
||||
Volumes: defaultVolumes,
|
||||
}{
|
||||
{
|
||||
name: "empty workflow, no clone",
|
||||
fronConf: &yaml_types.Workflow{SkipClone: true},
|
||||
backConf: &backend_types.Config{
|
||||
Networks: defaultNetworks,
|
||||
Volumes: defaultVolumes,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "empty workflow, default clone",
|
||||
fronConf: &yaml_types.Workflow{},
|
||||
backConf: &backend_types.Config{
|
||||
Networks: defaultNetworks,
|
||||
Volumes: defaultVolumes,
|
||||
Stages: []*backend_types.Stage{defaultCloneStage},
|
||||
{
|
||||
name: "empty workflow, default clone",
|
||||
fronConf: &yaml_types.Workflow{},
|
||||
backConf: &backend_types.Config{
|
||||
Networks: defaultNetworks,
|
||||
Volumes: defaultVolumes,
|
||||
Stages: []*backend_types.Stage{defaultCloneStage},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "workflow with one dummy step",
|
||||
fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{
|
||||
Name: "dummy",
|
||||
Image: "dummy_img",
|
||||
}}}},
|
||||
backConf: &backend_types.Config{
|
||||
Networks: defaultNetworks,
|
||||
Volumes: defaultVolumes,
|
||||
Stages: []*backend_types.Stage{defaultCloneStage, {
|
||||
Name: "test_stage_0",
|
||||
Alias: "dummy",
|
||||
Steps: []*backend_types.Step{{
|
||||
Name: "test_step_0",
|
||||
Alias: "dummy",
|
||||
Type: backend_types.StepTypePlugin,
|
||||
Image: "dummy_img",
|
||||
OnSuccess: true,
|
||||
Failure: "fail",
|
||||
Volumes: []string{defaultVolumes[0].Name + ":"},
|
||||
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"dummy"}}},
|
||||
ExtraHosts: []backend_types.HostAlias{},
|
||||
{
|
||||
name: "workflow with one dummy step",
|
||||
fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{
|
||||
Name: "dummy",
|
||||
Image: "dummy_img",
|
||||
}}}},
|
||||
backConf: &backend_types.Config{
|
||||
Networks: defaultNetworks,
|
||||
Volumes: defaultVolumes,
|
||||
Stages: []*backend_types.Stage{defaultCloneStage, {
|
||||
Name: "test_stage_0",
|
||||
Alias: "dummy",
|
||||
Steps: []*backend_types.Step{{
|
||||
Name: "test_step_0",
|
||||
Alias: "dummy",
|
||||
Type: backend_types.StepTypePlugin,
|
||||
Image: "dummy_img",
|
||||
OnSuccess: true,
|
||||
Failure: "fail",
|
||||
Volumes: []string{defaultVolumes[0].Name + ":"},
|
||||
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"dummy"}}},
|
||||
ExtraHosts: []backend_types.HostAlias{},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "workflow with three steps and one group",
|
||||
fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{
|
||||
Name: "echo env",
|
||||
Image: "bash",
|
||||
Commands: []string{"env"},
|
||||
}, {
|
||||
Name: "parallel echo 1",
|
||||
Group: "parallel",
|
||||
Image: "bash",
|
||||
Commands: []string{"echo 1"},
|
||||
}, {
|
||||
Name: "parallel echo 2",
|
||||
Group: "parallel",
|
||||
Image: "bash",
|
||||
Commands: []string{"echo 2"},
|
||||
}}}},
|
||||
backConf: &backend_types.Config{
|
||||
Networks: defaultNetworks,
|
||||
Volumes: defaultVolumes,
|
||||
Stages: []*backend_types.Stage{defaultCloneStage, {
|
||||
Name: "test_stage_0",
|
||||
Alias: "echo env",
|
||||
Steps: []*backend_types.Step{{
|
||||
Name: "test_step_0",
|
||||
Alias: "echo env",
|
||||
Type: backend_types.StepTypeCommands,
|
||||
Image: "bash",
|
||||
Commands: []string{"env"},
|
||||
OnSuccess: true,
|
||||
Failure: "fail",
|
||||
Volumes: []string{defaultVolumes[0].Name + ":"},
|
||||
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"echo env"}}},
|
||||
ExtraHosts: []backend_types.HostAlias{},
|
||||
}},
|
||||
{
|
||||
name: "workflow with three steps and one group",
|
||||
fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{
|
||||
Name: "echo env",
|
||||
Image: "bash",
|
||||
Commands: []string{"env"},
|
||||
}, {
|
||||
Name: "test_stage_1",
|
||||
Alias: "parallel echo 1",
|
||||
Steps: []*backend_types.Step{{
|
||||
Name: "test_step_1",
|
||||
Alias: "parallel echo 1",
|
||||
Type: backend_types.StepTypeCommands,
|
||||
Image: "bash",
|
||||
Commands: []string{"echo 1"},
|
||||
OnSuccess: true,
|
||||
Failure: "fail",
|
||||
Volumes: []string{defaultVolumes[0].Name + ":"},
|
||||
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"parallel echo 1"}}},
|
||||
ExtraHosts: []backend_types.HostAlias{},
|
||||
Name: "parallel echo 1",
|
||||
Group: "parallel",
|
||||
Image: "bash",
|
||||
Commands: []string{"echo 1"},
|
||||
}, {
|
||||
Name: "parallel echo 2",
|
||||
Group: "parallel",
|
||||
Image: "bash",
|
||||
Commands: []string{"echo 2"},
|
||||
}}}},
|
||||
backConf: &backend_types.Config{
|
||||
Networks: defaultNetworks,
|
||||
Volumes: defaultVolumes,
|
||||
Stages: []*backend_types.Stage{defaultCloneStage, {
|
||||
Name: "test_stage_0",
|
||||
Alias: "echo env",
|
||||
Steps: []*backend_types.Step{{
|
||||
Name: "test_step_0",
|
||||
Alias: "echo env",
|
||||
Type: backend_types.StepTypeCommands,
|
||||
Image: "bash",
|
||||
Commands: []string{"env"},
|
||||
OnSuccess: true,
|
||||
Failure: "fail",
|
||||
Volumes: []string{defaultVolumes[0].Name + ":"},
|
||||
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"echo env"}}},
|
||||
ExtraHosts: []backend_types.HostAlias{},
|
||||
}},
|
||||
}, {
|
||||
Name: "test_step_2",
|
||||
Alias: "parallel echo 2",
|
||||
Type: backend_types.StepTypeCommands,
|
||||
Image: "bash",
|
||||
Commands: []string{"echo 2"},
|
||||
OnSuccess: true,
|
||||
Failure: "fail",
|
||||
Volumes: []string{defaultVolumes[0].Name + ":"},
|
||||
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"parallel echo 2"}}},
|
||||
ExtraHosts: []backend_types.HostAlias{},
|
||||
Name: "test_stage_1",
|
||||
Alias: "parallel echo 1",
|
||||
Steps: []*backend_types.Step{{
|
||||
Name: "test_step_1",
|
||||
Alias: "parallel echo 1",
|
||||
Type: backend_types.StepTypeCommands,
|
||||
Image: "bash",
|
||||
Commands: []string{"echo 1"},
|
||||
OnSuccess: true,
|
||||
Failure: "fail",
|
||||
Volumes: []string{defaultVolumes[0].Name + ":"},
|
||||
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"parallel echo 1"}}},
|
||||
ExtraHosts: []backend_types.HostAlias{},
|
||||
}, {
|
||||
Name: "test_step_2",
|
||||
Alias: "parallel echo 2",
|
||||
Type: backend_types.StepTypeCommands,
|
||||
Image: "bash",
|
||||
Commands: []string{"echo 2"},
|
||||
OnSuccess: true,
|
||||
Failure: "fail",
|
||||
Volumes: []string{defaultVolumes[0].Name + ":"},
|
||||
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"parallel echo 2"}}},
|
||||
ExtraHosts: []backend_types.HostAlias{},
|
||||
}},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
name: "workflow with missing secret",
|
||||
fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{
|
||||
Name: "step",
|
||||
Image: "bash",
|
||||
Commands: []string{"env"},
|
||||
Secrets: yaml_types.Secrets{Secrets: []*yaml_types.Secret{{Source: "missing", Target: "missing"}}},
|
||||
}}}},
|
||||
backConf: nil,
|
||||
expectedErr: "secret \"missing\" not found or not allowed to be used",
|
||||
}}
|
||||
{
|
||||
name: "workflow with three steps and depends_on",
|
||||
fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{
|
||||
Name: "echo env",
|
||||
Image: "bash",
|
||||
Commands: []string{"env"},
|
||||
}, {
|
||||
Name: "echo 1",
|
||||
Image: "bash",
|
||||
Commands: []string{"echo 1"},
|
||||
DependsOn: []string{"echo env", "echo 2"},
|
||||
}, {
|
||||
Name: "echo 2",
|
||||
Image: "bash",
|
||||
Commands: []string{"echo 2"},
|
||||
}}}},
|
||||
backConf: &backend_types.Config{
|
||||
Networks: defaultNetworks,
|
||||
Volumes: defaultVolumes,
|
||||
Stages: []*backend_types.Stage{defaultCloneStage, {
|
||||
Name: "test_stage_0",
|
||||
Alias: "test_stage_0",
|
||||
Steps: []*backend_types.Step{{
|
||||
Name: "test_step_0",
|
||||
Alias: "echo env",
|
||||
Type: backend_types.StepTypeCommands,
|
||||
Image: "bash",
|
||||
Commands: []string{"env"},
|
||||
OnSuccess: true,
|
||||
Failure: "fail",
|
||||
Volumes: []string{defaultVolumes[0].Name + ":"},
|
||||
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"echo env"}}},
|
||||
ExtraHosts: []backend_types.HostAlias{},
|
||||
}, {
|
||||
Name: "test_step_2",
|
||||
Alias: "echo 2",
|
||||
Type: backend_types.StepTypeCommands,
|
||||
Image: "bash",
|
||||
Commands: []string{"echo 2"},
|
||||
OnSuccess: true,
|
||||
Failure: "fail",
|
||||
Volumes: []string{defaultVolumes[0].Name + ":"},
|
||||
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"echo 2"}}},
|
||||
ExtraHosts: []backend_types.HostAlias{},
|
||||
}},
|
||||
}, {
|
||||
Name: "test_stage_1",
|
||||
Alias: "test_stage_1",
|
||||
Steps: []*backend_types.Step{{
|
||||
Name: "test_step_1",
|
||||
Alias: "echo 1",
|
||||
Type: backend_types.StepTypeCommands,
|
||||
Image: "bash",
|
||||
Commands: []string{"echo 1"},
|
||||
OnSuccess: true,
|
||||
Failure: "fail",
|
||||
Volumes: []string{defaultVolumes[0].Name + ":"},
|
||||
Networks: []backend_types.Conn{{Name: "test_default", Aliases: []string{"echo 1"}}},
|
||||
ExtraHosts: []backend_types.HostAlias{},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "workflow with missing secret",
|
||||
fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{
|
||||
Name: "step",
|
||||
Image: "bash",
|
||||
Commands: []string{"env"},
|
||||
Secrets: yaml_types.Secrets{Secrets: []*yaml_types.Secret{{Source: "missing", Target: "missing"}}},
|
||||
}}}},
|
||||
backConf: nil,
|
||||
expectedErr: "secret \"missing\" not found or not allowed to be used",
|
||||
},
|
||||
{
|
||||
name: "workflow with broken step dependency",
|
||||
fronConf: &yaml_types.Workflow{Steps: yaml_types.ContainerList{ContainerList: []*yaml_types.Container{{
|
||||
Name: "dummy",
|
||||
Image: "dummy_img",
|
||||
DependsOn: []string{"not exist"},
|
||||
}}}},
|
||||
backConf: nil,
|
||||
expectedErr: "step 'dummy' depends on unknown step 'not exist'",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
159
pipeline/frontend/yaml/compiler/dag.go
Normal file
159
pipeline/frontend/yaml/compiler/dag.go
Normal file
@ -0,0 +1,159 @@
|
||||
// Copyright 2023 Woodpecker Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
backend_types "go.woodpecker-ci.org/woodpecker/v2/pipeline/backend/types"
|
||||
)
|
||||
|
||||
type dagCompilerStep struct {
|
||||
step *backend_types.Step
|
||||
position int
|
||||
name string
|
||||
group string
|
||||
dependsOn []string
|
||||
}
|
||||
|
||||
type dagCompiler struct {
|
||||
steps []*dagCompilerStep
|
||||
prefix string
|
||||
}
|
||||
|
||||
func newDAGCompiler(steps []*dagCompilerStep, prefix string) dagCompiler {
|
||||
return dagCompiler{
|
||||
steps: steps,
|
||||
prefix: prefix,
|
||||
}
|
||||
}
|
||||
|
||||
func (c dagCompiler) isDAG() bool {
|
||||
for _, v := range c.steps {
|
||||
if len(v.dependsOn) != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c dagCompiler) compile() ([]*backend_types.Stage, error) {
|
||||
if c.isDAG() {
|
||||
return c.compileByDependsOn()
|
||||
}
|
||||
return c.compileByGroup()
|
||||
}
|
||||
|
||||
func (c dagCompiler) compileByGroup() ([]*backend_types.Stage, error) {
|
||||
stages := make([]*backend_types.Stage, 0, len(c.steps))
|
||||
var currentStage *backend_types.Stage
|
||||
var currentGroup string
|
||||
|
||||
for _, s := range c.steps {
|
||||
// create a new stage if current step is in a new group compared to last one
|
||||
if currentStage == nil || currentGroup != s.group || s.group == "" {
|
||||
currentGroup = s.group
|
||||
|
||||
currentStage = new(backend_types.Stage)
|
||||
currentStage.Name = fmt.Sprintf("%s_stage_%v", c.prefix, s.position)
|
||||
currentStage.Alias = s.name
|
||||
stages = append(stages, currentStage)
|
||||
}
|
||||
|
||||
// add step to current stage
|
||||
currentStage.Steps = append(currentStage.Steps, s.step)
|
||||
}
|
||||
|
||||
return stages, nil
|
||||
}
|
||||
|
||||
func (c dagCompiler) compileByDependsOn() ([]*backend_types.Stage, error) {
|
||||
stepMap := make(map[string]*dagCompilerStep, len(c.steps))
|
||||
for _, s := range c.steps {
|
||||
stepMap[s.name] = s
|
||||
}
|
||||
return convertDAGToStages(stepMap, c.prefix)
|
||||
}
|
||||
|
||||
func dfsVisit(steps map[string]*dagCompilerStep, name string, visited map[string]struct{}, path []string) error {
|
||||
if _, ok := visited[name]; ok {
|
||||
return &ErrStepDependencyCycle{path: path}
|
||||
}
|
||||
|
||||
visited[name] = struct{}{}
|
||||
path = append(path, name)
|
||||
|
||||
for _, dep := range steps[name].dependsOn {
|
||||
if err := dfsVisit(steps, dep, visited, path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertDAGToStages(steps map[string]*dagCompilerStep, prefix string) ([]*backend_types.Stage, error) {
|
||||
addedSteps := make(map[string]struct{})
|
||||
stages := make([]*backend_types.Stage, 0)
|
||||
|
||||
for name, step := range steps {
|
||||
// check if all depends_on are valid
|
||||
for _, dep := range step.dependsOn {
|
||||
if _, ok := steps[dep]; !ok {
|
||||
return nil, &ErrStepMissingDependency{name: name, dep: dep}
|
||||
}
|
||||
}
|
||||
|
||||
// check if there are cycles
|
||||
visited := make(map[string]struct{})
|
||||
if err := dfsVisit(steps, name, visited, []string{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for len(steps) > 0 {
|
||||
addedNodesThisLevel := make(map[string]struct{})
|
||||
stage := &backend_types.Stage{
|
||||
Name: fmt.Sprintf("%s_stage_%d", prefix, len(stages)),
|
||||
Alias: fmt.Sprintf("%s_stage_%d", prefix, len(stages)),
|
||||
}
|
||||
|
||||
for name, step := range steps {
|
||||
if allDependenciesSatisfied(step, addedSteps) {
|
||||
stage.Steps = append(stage.Steps, step.step)
|
||||
addedNodesThisLevel[name] = struct{}{}
|
||||
delete(steps, name)
|
||||
}
|
||||
}
|
||||
|
||||
for name := range addedNodesThisLevel {
|
||||
addedSteps[name] = struct{}{}
|
||||
}
|
||||
|
||||
stages = append(stages, stage)
|
||||
}
|
||||
|
||||
return stages, nil
|
||||
}
|
||||
|
||||
func allDependenciesSatisfied(step *dagCompilerStep, addedSteps map[string]struct{}) bool {
|
||||
for _, childName := range step.dependsOn {
|
||||
_, ok := addedSteps[childName]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
62
pipeline/frontend/yaml/compiler/dag_test.go
Normal file
62
pipeline/frontend/yaml/compiler/dag_test.go
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2023 Woodpecker Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
backend_types "go.woodpecker-ci.org/woodpecker/v2/pipeline/backend/types"
|
||||
)
|
||||
|
||||
func TestConvertDAGToStages(t *testing.T) {
|
||||
steps := map[string]*dagCompilerStep{
|
||||
"step1": {
|
||||
step: &backend_types.Step{},
|
||||
dependsOn: []string{"step3"},
|
||||
},
|
||||
"step2": {
|
||||
step: &backend_types.Step{},
|
||||
dependsOn: []string{"step1"},
|
||||
},
|
||||
"step3": {
|
||||
step: &backend_types.Step{},
|
||||
dependsOn: []string{"step2"},
|
||||
},
|
||||
}
|
||||
_, err := convertDAGToStages(steps, "")
|
||||
assert.ErrorIs(t, err, &ErrStepDependencyCycle{})
|
||||
|
||||
steps = map[string]*dagCompilerStep{
|
||||
"step1": {
|
||||
step: &backend_types.Step{},
|
||||
dependsOn: []string{"step2"},
|
||||
},
|
||||
"step2": {
|
||||
step: &backend_types.Step{},
|
||||
},
|
||||
}
|
||||
_, err = convertDAGToStages(steps, "")
|
||||
assert.NoError(t, err)
|
||||
|
||||
steps = map[string]*dagCompilerStep{
|
||||
"step1": {
|
||||
step: &backend_types.Step{},
|
||||
dependsOn: []string{"not-existing-step"},
|
||||
},
|
||||
}
|
||||
_, err = convertDAGToStages(steps, "")
|
||||
assert.ErrorIs(t, err, &ErrStepMissingDependency{})
|
||||
}
|
@ -28,3 +28,30 @@ func (*ErrExtraHostFormat) Is(target error) bool {
|
||||
_, ok := target.(*ErrExtraHostFormat) //nolint:errorlint
|
||||
return ok
|
||||
}
|
||||
|
||||
type ErrStepMissingDependency struct {
|
||||
name,
|
||||
dep string
|
||||
}
|
||||
|
||||
func (err *ErrStepMissingDependency) Error() string {
|
||||
return fmt.Sprintf("step '%s' depends on unknown step '%s'", err.name, err.dep)
|
||||
}
|
||||
|
||||
func (*ErrStepMissingDependency) Is(target error) bool {
|
||||
_, ok := target.(*ErrStepMissingDependency) //nolint:errorlint
|
||||
return ok
|
||||
}
|
||||
|
||||
type ErrStepDependencyCycle struct {
|
||||
path []string
|
||||
}
|
||||
|
||||
func (err *ErrStepDependencyCycle) Error() string {
|
||||
return fmt.Sprintf("cycle detected: %v", err.path)
|
||||
}
|
||||
|
||||
func (*ErrStepDependencyCycle) Is(target error) bool {
|
||||
_, ok := target.(*ErrStepDependencyCycle) //nolint:errorlint
|
||||
return ok
|
||||
}
|
||||
|
@ -254,6 +254,21 @@ func (l *Linter) lintDeprecations(config *WorkflowConfig) (err error) {
|
||||
})
|
||||
}
|
||||
|
||||
for _, step := range parsed.Steps.ContainerList {
|
||||
if step.Group != "" {
|
||||
err = multierr.Append(err, &errors.PipelineError{
|
||||
Type: errors.PipelineErrorTypeDeprecation,
|
||||
Message: "Please use depends_on instead of deprecated 'group' setting",
|
||||
Data: errors.DeprecationErrorData{
|
||||
File: config.File,
|
||||
Field: "steps." + step.Name + ".group",
|
||||
Docs: "https://woodpecker-ci.org/docs/next/usage/workflow-syntax#depends_on",
|
||||
},
|
||||
IsWarning: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -344,6 +344,12 @@
|
||||
"description": "Execute multiple steps with the same group key in parallel. Read more: https://woodpecker-ci.org/docs/usage/pipeline-syntax#step-group---parallel-execution",
|
||||
"type": "string"
|
||||
},
|
||||
"depends_on": {
|
||||
"description": "Execute a step after another step has finished.",
|
||||
"type": "array",
|
||||
"items": { "type": "string" },
|
||||
"minLength": 1
|
||||
},
|
||||
"detach": {
|
||||
"description": "Detach a step to run in background until pipeline finishes. Read more: https://woodpecker-ci.org/docs/usage/services#detachment",
|
||||
"type": "boolean"
|
||||
|
@ -48,6 +48,7 @@ type (
|
||||
Volumes Volumes `yaml:"volumes,omitempty"`
|
||||
When constraint.When `yaml:"when,omitempty"`
|
||||
Ports []base.StringOrInt `yaml:"ports,omitempty"`
|
||||
DependsOn base.StringOrSlice `yaml:"depends_on,omitempty"`
|
||||
|
||||
// Docker Specific
|
||||
Privileged bool `yaml:"privileged,omitempty"`
|
||||
|
@ -354,7 +354,7 @@ func (s *RPC) RegisterAgent(ctx context.Context, platform, backend, version stri
|
||||
|
||||
func (s *RPC) UnregisterAgent(ctx context.Context) error {
|
||||
agent, err := s.getAgentFromContext(ctx)
|
||||
if agent.OwnerID > 0 {
|
||||
if !agent.IsSystemAgent() {
|
||||
// registered with individual agent token -> do not unregister
|
||||
return nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user