1
0
mirror of https://github.com/go-task/task.git synced 2024-12-14 10:52:43 +02:00
task/taskfile/reader.go

262 lines
7.1 KiB
Go
Raw Normal View History

package taskfile
import (
"context"
"fmt"
"os"
"time"
2024-01-02 01:12:28 +02:00
"github.com/dominikbraun/graph"
"golang.org/x/sync/errgroup"
"gopkg.in/yaml.v3"
2023-04-15 22:22:25 +02:00
"github.com/go-task/task/v3/errors"
"github.com/go-task/task/v3/internal/filepathext"
"github.com/go-task/task/v3/internal/logger"
"github.com/go-task/task/v3/internal/templater"
"github.com/go-task/task/v3/taskfile/ast"
)
2024-01-04 12:14:33 +02:00
const (
taskfileUntrustedPrompt = `The task you are attempting to run depends on the remote Taskfile at %q.
2024-01-27 23:45:13 +02:00
--- Make sure you trust the source of this Taskfile before continuing ---
Continue?`
2024-01-04 12:14:33 +02:00
taskfileChangedPrompt = `The Taskfile at %q has changed since you last used it!
2024-01-27 23:45:13 +02:00
--- Make sure you trust the source of this Taskfile before continuing ---
Continue?`
2024-01-04 12:14:33 +02:00
)
2024-01-02 01:12:28 +02:00
// A Reader will recursively read Taskfiles from a given source using a directed
// acyclic graph (DAG).
type Reader struct {
graph *ast.TaskfileGraph
node Node
insecure bool
download bool
offline bool
timeout time.Duration
tempDir string
logger *logger.Logger
}
func NewReader(
node Node,
insecure bool,
download bool,
offline bool,
timeout time.Duration,
tempDir string,
2024-01-02 01:12:28 +02:00
logger *logger.Logger,
) *Reader {
return &Reader{
graph: ast.NewTaskfileGraph(),
node: node,
insecure: insecure,
download: download,
offline: offline,
timeout: timeout,
tempDir: tempDir,
logger: logger,
}
}
func (r *Reader) Read() (*ast.TaskfileGraph, error) {
// Recursively loop through each Taskfile, adding vertices/edges to the graph
if err := r.include(r.node); err != nil {
return nil, err
}
return r.graph, nil
}
func (r *Reader) include(node Node) error {
// Create a new vertex for the Taskfile
vertex := &ast.TaskfileVertex{
URI: node.Location(),
Taskfile: nil,
}
// Add the included Taskfile to the DAG
// If the vertex already exists, we return early since its Taskfile has
// already been read and its children explored
if err := r.graph.AddVertex(vertex); err == graph.ErrVertexAlreadyExists {
return nil
} else if err != nil {
return err
}
2024-01-02 01:12:28 +02:00
// Read and parse the Taskfile from the file and add it to the vertex
var err error
vertex.Taskfile, err = r.readNode(node)
if err != nil {
return err
}
2024-01-02 01:12:28 +02:00
// Create an error group to wait for all included Taskfiles to be read
var g errgroup.Group
// Loop over each included taskfile
vertex.Taskfile.Includes.Range(func(namespace string, include ast.Include) error {
// Start a goroutine to process each included Taskfile
g.Go(func() error {
cache := &templater.Cache{Vars: vertex.Taskfile.Vars}
include = ast.Include{
Namespace: include.Namespace,
Taskfile: templater.Replace(include.Taskfile, cache),
Dir: templater.Replace(include.Dir, cache),
Optional: include.Optional,
Internal: include.Internal,
Aliases: include.Aliases,
AdvancedImport: include.AdvancedImport,
Vars: include.Vars,
}
if err := cache.Err(); err != nil {
return err
}
2024-02-13 21:29:28 +02:00
entrypoint, err := node.ResolveEntrypoint(include.Taskfile)
if err != nil {
return err
}
2024-02-13 21:29:28 +02:00
dir, err := node.ResolveDir(include.Dir)
2024-02-13 03:07:00 +02:00
if err != nil {
return err
}
2024-01-02 01:12:28 +02:00
includeNode, err := NewNode(r.logger, entrypoint, dir, r.insecure, r.timeout,
WithParent(node),
)
if err != nil {
2023-09-06 02:11:13 +02:00
if include.Optional {
return nil
}
return err
}
2024-01-02 01:12:28 +02:00
// Recurse into the included Taskfile
if err := r.include(includeNode); err != nil {
return err
2021-09-25 14:40:03 +02:00
}
2024-01-02 01:12:28 +02:00
// Create an edge between the Taskfiles
err = r.graph.AddEdge(node.Location(), includeNode.Location())
if errors.Is(err, graph.ErrEdgeAlreadyExists) {
edge, err := r.graph.Edge(node.Location(), includeNode.Location())
if err != nil {
return err
}
return &errors.TaskfileDuplicateIncludeError{
URI: node.Location(),
IncludedURI: includeNode.Location(),
Namespaces: []string{namespace, edge.Properties.Data.(*ast.Include).Namespace},
}
}
if errors.Is(err, graph.ErrEdgeCreatesCycle) {
return errors.TaskfileCycleError{
Source: node.Location(),
Destination: includeNode.Location(),
}
}
return err
})
2024-01-02 01:12:28 +02:00
return nil
})
2022-12-23 02:27:16 +02:00
2024-01-02 01:12:28 +02:00
// Wait for all the go routines to finish
return g.Wait()
}
2024-01-02 01:12:28 +02:00
func (r *Reader) readNode(node Node) (*ast.Taskfile, error) {
var b []byte
var err error
var cache *Cache
if node.Remote() {
2024-01-02 01:12:28 +02:00
cache, err = NewCache(r.tempDir)
if err != nil {
return nil, err
}
}
// If the file is remote and we're in offline mode, check if we have a cached copy
2024-01-02 01:12:28 +02:00
if node.Remote() && r.offline {
if b, err = cache.read(node); errors.Is(err, os.ErrNotExist) {
2024-01-12 00:30:52 +02:00
return nil, &errors.TaskfileCacheNotFoundError{URI: node.Location()}
} else if err != nil {
return nil, err
}
2024-01-02 01:12:28 +02:00
r.logger.VerboseOutf(logger.Magenta, "task: [%s] Fetched cached copy\n", node.Location())
} else {
downloaded := false
2024-01-02 01:12:28 +02:00
ctx, cf := context.WithTimeout(context.Background(), r.timeout)
defer cf()
// Read the file
b, err = node.Read(ctx)
// If we timed out then we likely have a network issue
if node.Remote() && errors.Is(ctx.Err(), context.DeadlineExceeded) {
// If a download was requested, then we can't use a cached copy
2024-01-02 01:12:28 +02:00
if r.download {
return nil, &errors.TaskfileNetworkTimeoutError{URI: node.Location(), Timeout: r.timeout}
}
// Search for any cached copies
if b, err = cache.read(node); errors.Is(err, os.ErrNotExist) {
2024-01-02 01:12:28 +02:00
return nil, &errors.TaskfileNetworkTimeoutError{URI: node.Location(), Timeout: r.timeout, CheckedCache: true}
} else if err != nil {
return nil, err
}
2024-01-02 01:12:28 +02:00
r.logger.VerboseOutf(logger.Magenta, "task: [%s] Network timeout. Fetched cached copy\n", node.Location())
} else if err != nil {
return nil, err
} else {
downloaded = true
}
// If the node was remote, we need to check the checksum
if node.Remote() && downloaded {
2024-01-02 01:12:28 +02:00
r.logger.VerboseOutf(logger.Magenta, "task: [%s] Fetched remote copy\n", node.Location())
// Get the checksums
checksum := checksum(b)
cachedChecksum := cache.readChecksum(node)
2024-01-04 12:14:33 +02:00
var prompt string
if cachedChecksum == "" {
// If the checksum doesn't exist, prompt the user to continue
2024-01-04 12:14:33 +02:00
prompt = fmt.Sprintf(taskfileUntrustedPrompt, node.Location())
} else if checksum != cachedChecksum {
// If there is a cached hash, but it doesn't match the expected hash, prompt the user to continue
2024-01-04 12:14:33 +02:00
prompt = fmt.Sprintf(taskfileChangedPrompt, node.Location())
}
2024-01-02 01:12:28 +02:00
if prompt == "" {
if err := r.logger.Prompt(logger.Yellow, prompt, "n", "y", "yes"); err != nil {
return nil, &errors.TaskfileNotTrustedError{URI: node.Location()}
}
}
// If the hash has changed (or is new)
if checksum != cachedChecksum {
// Store the checksum
if err := cache.writeChecksum(node, checksum); err != nil {
return nil, err
}
// Cache the file
2024-01-02 01:12:28 +02:00
r.logger.VerboseOutf(logger.Magenta, "task: [%s] Caching downloaded file\n", node.Location())
if err = cache.write(node, b); err != nil {
return nil, err
}
}
}
}
var t ast.Taskfile
if err := yaml.Unmarshal(b, &t); err != nil {
return nil, &errors.TaskfileInvalidError{URI: filepathext.TryAbsToRel(node.Location()), Err: err}
}
t.Location = node.Location()
return &t, nil
}