mirror of
https://github.com/go-task/task.git
synced 2025-02-11 13:53:03 +02:00
parent
f3097845b4
commit
0160f5dd30
4
Gopkg.lock
generated
4
Gopkg.lock
generated
@ -11,7 +11,7 @@
|
|||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/Masterminds/sprig"
|
name = "github.com/Masterminds/sprig"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "e039e20e500c2c025d9145be375e27cf42a94174"
|
revision = "175e437013029f9a1c35bdf04bc451b0d20d4331"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/aokoli/goutils"
|
name = "github.com/aokoli/goutils"
|
||||||
@ -101,7 +101,7 @@
|
|||||||
branch = "master"
|
branch = "master"
|
||||||
name = "mvdan.cc/sh"
|
name = "mvdan.cc/sh"
|
||||||
packages = ["interp","syntax"]
|
packages = ["interp","syntax"]
|
||||||
revision = "eaf7b83013b7a3dbfe3c627d00e1b9971afc9748"
|
revision = "d8d2c36c06455d4bb8e2116cc2b955271046329d"
|
||||||
|
|
||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
|
18
vendor/github.com/Masterminds/sprig/date.go
generated
vendored
18
vendor/github.com/Masterminds/sprig/date.go
generated
vendored
@ -51,3 +51,21 @@ func dateModify(fmt string, date time.Time) time.Time {
|
|||||||
}
|
}
|
||||||
return date.Add(d)
|
return date.Add(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func dateAgo(date interface{}) string {
|
||||||
|
var t time.Time
|
||||||
|
|
||||||
|
switch date := date.(type) {
|
||||||
|
default:
|
||||||
|
t = time.Now()
|
||||||
|
case time.Time:
|
||||||
|
t = date
|
||||||
|
case int64:
|
||||||
|
t = time.Unix(date, 0)
|
||||||
|
case int:
|
||||||
|
t = time.Unix(int64(date), 0)
|
||||||
|
}
|
||||||
|
// Drop resolution to seconds
|
||||||
|
duration := time.Since(t) / time.Second * time.Second
|
||||||
|
return duration.String()
|
||||||
|
}
|
||||||
|
10
vendor/github.com/Masterminds/sprig/dict.go
generated
vendored
10
vendor/github.com/Masterminds/sprig/dict.go
generated
vendored
@ -75,10 +75,12 @@ func dict(v ...interface{}) map[string]interface{} {
|
|||||||
return dict
|
return dict
|
||||||
}
|
}
|
||||||
|
|
||||||
func merge(dst map[string]interface{}, src map[string]interface{}) interface{} {
|
func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} {
|
||||||
if err := mergo.Merge(&dst, src); err != nil {
|
for _, src := range srcs {
|
||||||
// Swallow errors inside of a template.
|
if err := mergo.Merge(&dst, src); err != nil {
|
||||||
return ""
|
// Swallow errors inside of a template.
|
||||||
|
return ""
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
5
vendor/github.com/Masterminds/sprig/doc.go
generated
vendored
5
vendor/github.com/Masterminds/sprig/doc.go
generated
vendored
@ -48,6 +48,10 @@ String Functions
|
|||||||
- randAlpha: Given a length, generate an alphabetic string
|
- randAlpha: Given a length, generate an alphabetic string
|
||||||
- randAscii: Given a length, generate a random ASCII string (symbols included)
|
- randAscii: Given a length, generate a random ASCII string (symbols included)
|
||||||
- randNumeric: Given a length, generate a string of digits.
|
- randNumeric: Given a length, generate a string of digits.
|
||||||
|
- swapcase: SwapCase swaps the case of a string using a word based algorithm. see https://godoc.org/github.com/Masterminds/goutils#SwapCase
|
||||||
|
- shuffle: Shuffle randomizes runes in a string and returns the result. It uses default random source in `math/rand`
|
||||||
|
- snakecase: convert all upper case characters in a string to underscore format.
|
||||||
|
- camelcase: convert all lower case characters behind underscores to upper case character
|
||||||
- wrap: Force a line wrap at the given width. `wrap 80 "imagine a longer string"`
|
- wrap: Force a line wrap at the given width. `wrap 80 "imagine a longer string"`
|
||||||
- wrapWith: Wrap a line at the given length, but using 'sep' instead of a newline. `wrapWith 50, "<br>", $html`
|
- wrapWith: Wrap a line at the given length, but using 'sep' instead of a newline. `wrapWith 50, "<br>", $html`
|
||||||
- contains: strings.Contains, but with the arguments switched: `contains substr str`. (This simplifies common pipelines)
|
- contains: strings.Contains, but with the arguments switched: `contains substr str`. (This simplifies common pipelines)
|
||||||
@ -57,6 +61,7 @@ String Functions
|
|||||||
- squote: Wrap string(s) in double quotation marks, does not escape content.
|
- squote: Wrap string(s) in double quotation marks, does not escape content.
|
||||||
- cat: Concatenate strings, separating them by spaces. `cat $a $b $c`.
|
- cat: Concatenate strings, separating them by spaces. `cat $a $b $c`.
|
||||||
- indent: Indent a string using space characters. `indent 4 "foo\nbar"` produces " foo\n bar"
|
- indent: Indent a string using space characters. `indent 4 "foo\nbar"` produces " foo\n bar"
|
||||||
|
- nindent: Indent a string using space characters and prepend a new line. `indent 4 "foo\nbar"` produces "\n foo\n bar"
|
||||||
- replace: Replace an old with a new in a string: `$name | replace " " "-"`
|
- replace: Replace an old with a new in a string: `$name | replace " " "-"`
|
||||||
- plural: Choose singular or plural based on length: `len $fish | plural "one anchovy" "many anchovies"`
|
- plural: Choose singular or plural based on length: `len $fish | plural "one anchovy" "many anchovies"`
|
||||||
- sha256sum: Generate a hex encoded sha256 hash of the input
|
- sha256sum: Generate a hex encoded sha256 hash of the input
|
||||||
|
12
vendor/github.com/Masterminds/sprig/functions.go
generated
vendored
12
vendor/github.com/Masterminds/sprig/functions.go
generated
vendored
@ -98,6 +98,7 @@ var genericMap = map[string]interface{}{
|
|||||||
"htmlDateInZone": htmlDateInZone,
|
"htmlDateInZone": htmlDateInZone,
|
||||||
"dateInZone": dateInZone,
|
"dateInZone": dateInZone,
|
||||||
"dateModify": dateModify,
|
"dateModify": dateModify,
|
||||||
|
"ago": dateAgo,
|
||||||
|
|
||||||
// Strings
|
// Strings
|
||||||
"abbrev": abbrev,
|
"abbrev": abbrev,
|
||||||
@ -137,6 +138,7 @@ var genericMap = map[string]interface{}{
|
|||||||
"squote": squote,
|
"squote": squote,
|
||||||
"cat": cat,
|
"cat": cat,
|
||||||
"indent": indent,
|
"indent": indent,
|
||||||
|
"nindent": nindent,
|
||||||
"replace": replace,
|
"replace": replace,
|
||||||
"plural": plural,
|
"plural": plural,
|
||||||
"sha256sum": sha256sum,
|
"sha256sum": sha256sum,
|
||||||
@ -263,10 +265,10 @@ var genericMap = map[string]interface{}{
|
|||||||
"fail": func(msg string) (string, error) { return "", errors.New(msg) },
|
"fail": func(msg string) (string, error) { return "", errors.New(msg) },
|
||||||
|
|
||||||
// Regex
|
// Regex
|
||||||
"regexMatch": regexMatch,
|
"regexMatch": regexMatch,
|
||||||
"regexFindAll": regexFindAll,
|
"regexFindAll": regexFindAll,
|
||||||
"regexFind": regexFind,
|
"regexFind": regexFind,
|
||||||
"regexReplaceAll": regexReplaceAll,
|
"regexReplaceAll": regexReplaceAll,
|
||||||
"regexReplaceAllLiteral": regexReplaceAllLiteral,
|
"regexReplaceAllLiteral": regexReplaceAllLiteral,
|
||||||
"regexSplit": regexSplit,
|
"regexSplit": regexSplit,
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/Masterminds/sprig/strings.go
generated
vendored
4
vendor/github.com/Masterminds/sprig/strings.go
generated
vendored
@ -106,6 +106,10 @@ func indent(spaces int, v string) string {
|
|||||||
return pad + strings.Replace(v, "\n", "\n"+pad, -1)
|
return pad + strings.Replace(v, "\n", "\n"+pad, -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func nindent(spaces int, v string) string {
|
||||||
|
return "\n" + indent(spaces, v)
|
||||||
|
}
|
||||||
|
|
||||||
func replace(old, new, src string) string {
|
func replace(old, new, src string) string {
|
||||||
return strings.Replace(src, old, new, -1)
|
return strings.Replace(src, old, new, -1)
|
||||||
}
|
}
|
||||||
|
4
vendor/mvdan.cc/sh/README.md
vendored
4
vendor/mvdan.cc/sh/README.md
vendored
@ -51,8 +51,8 @@ validity:
|
|||||||
|
|
||||||
go get -u mvdan.cc/sh/cmd/gosh
|
go get -u mvdan.cc/sh/cmd/gosh
|
||||||
|
|
||||||
Experimental non-interactive shell that uses `interp`. Work in progress,
|
Experimental shell that uses `interp`. Work in progress, so don't expect
|
||||||
so don't expect stability just yet.
|
stability just yet.
|
||||||
|
|
||||||
### Fuzzing
|
### Fuzzing
|
||||||
|
|
||||||
|
15
vendor/mvdan.cc/sh/interp/interp.go
vendored
15
vendor/mvdan.cc/sh/interp/interp.go
vendored
@ -8,6 +8,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"os/user"
|
"os/user"
|
||||||
@ -17,6 +18,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
"mvdan.cc/sh/syntax"
|
"mvdan.cc/sh/syntax"
|
||||||
)
|
)
|
||||||
@ -645,6 +647,19 @@ func (r *Runner) cmd(cm syntax.Command) {
|
|||||||
for _, as := range x.Assigns {
|
for _, as := range x.Assigns {
|
||||||
r.setVar(as.Name.Value, r.assignValue(as))
|
r.setVar(as.Name.Value, r.assignValue(as))
|
||||||
}
|
}
|
||||||
|
case *syntax.TimeClause:
|
||||||
|
start := time.Now()
|
||||||
|
if x.Stmt != nil {
|
||||||
|
r.stmt(x.Stmt)
|
||||||
|
}
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
r.outf("\n")
|
||||||
|
min := int(elapsed.Minutes())
|
||||||
|
sec := math.Remainder(elapsed.Seconds(), 60.0)
|
||||||
|
r.outf("real\t%dm%.3fs\n", min, sec)
|
||||||
|
// TODO: can we do these?
|
||||||
|
r.outf("user\t0m0.000s\n")
|
||||||
|
r.outf("sys\t0m0.000s\n")
|
||||||
default:
|
default:
|
||||||
r.runErr(cm.Pos(), "unhandled command node: %T", x)
|
r.runErr(cm.Pos(), "unhandled command node: %T", x)
|
||||||
}
|
}
|
||||||
|
24
vendor/mvdan.cc/sh/syntax/lexer.go
vendored
24
vendor/mvdan.cc/sh/syntax/lexer.go
vendored
@ -157,7 +157,7 @@ func (p *Parser) nextKeepSpaces() {
|
|||||||
if r == '`' || r == '$' {
|
if r == '`' || r == '$' {
|
||||||
p.tok = p.dqToken(r)
|
p.tok = p.dqToken(r)
|
||||||
} else if p.hdocStop == nil {
|
} else if p.hdocStop == nil {
|
||||||
p.tok = illegalTok
|
p.tok = _Newl
|
||||||
} else {
|
} else {
|
||||||
p.advanceLitHdoc(r)
|
p.advanceLitHdoc(r)
|
||||||
}
|
}
|
||||||
@ -186,7 +186,7 @@ func (p *Parser) next() {
|
|||||||
p.tok = _EOF
|
p.tok = _EOF
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
p.spaced, p.newLine = false, false
|
p.spaced = false
|
||||||
if p.quote&allKeepSpaces != 0 {
|
if p.quote&allKeepSpaces != 0 {
|
||||||
p.nextKeepSpaces()
|
p.nextKeepSpaces()
|
||||||
return
|
return
|
||||||
@ -202,18 +202,16 @@ skipSpace:
|
|||||||
p.spaced = true
|
p.spaced = true
|
||||||
r = p.rune()
|
r = p.rune()
|
||||||
case '\n':
|
case '\n':
|
||||||
if p.quote == arithmExprLet || p.quote == hdocWord {
|
if p.tok == _Newl {
|
||||||
p.tok = illegalTok
|
r = p.rune()
|
||||||
return
|
continue
|
||||||
}
|
}
|
||||||
p.spaced, p.newLine = true, true
|
p.spaced = true
|
||||||
r = p.rune()
|
p.tok = _Newl
|
||||||
if len(p.heredocs) > p.buriedHdocs {
|
if p.quote != hdocWord && len(p.heredocs) > p.buriedHdocs {
|
||||||
if p.doHeredocs(); p.tok == _EOF {
|
p.doHeredocs()
|
||||||
return
|
|
||||||
}
|
|
||||||
r = p.r
|
|
||||||
}
|
}
|
||||||
|
return
|
||||||
case '\\':
|
case '\\':
|
||||||
if !p.peekByte('\n') {
|
if !p.peekByte('\n') {
|
||||||
break skipSpace
|
break skipSpace
|
||||||
@ -899,7 +897,7 @@ func (p *Parser) advanceLitHdoc(r rune) {
|
|||||||
if bytes.HasPrefix(p.litBs[lStart:], p.hdocStop) {
|
if bytes.HasPrefix(p.litBs[lStart:], p.hdocStop) {
|
||||||
p.val = p.endLit()[:lStart]
|
p.val = p.endLit()[:lStart]
|
||||||
if p.val == "" {
|
if p.val == "" {
|
||||||
p.tok = illegalTok
|
p.tok = _Newl
|
||||||
}
|
}
|
||||||
p.hdocStop = nil
|
p.hdocStop = nil
|
||||||
return
|
return
|
||||||
|
171
vendor/mvdan.cc/sh/syntax/parser.go
vendored
171
vendor/mvdan.cc/sh/syntax/parser.go
vendored
@ -50,7 +50,7 @@ func (p *Parser) Parse(r io.Reader, name string) (*File, error) {
|
|||||||
p.src = r
|
p.src = r
|
||||||
p.rune()
|
p.rune()
|
||||||
p.next()
|
p.next()
|
||||||
p.f.StmtList = p.stmts()
|
p.f.StmtList = p.stmtList()
|
||||||
if p.err == nil {
|
if p.err == nil {
|
||||||
// EOF immediately after heredoc word so no newline to
|
// EOF immediately after heredoc word so no newline to
|
||||||
// trigger it
|
// trigger it
|
||||||
@ -59,6 +59,21 @@ func (p *Parser) Parse(r io.Reader, name string) (*File, error) {
|
|||||||
return p.f, p.err
|
return p.f, p.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Parser) Stmts(r io.Reader, fn func(*Stmt)) error {
|
||||||
|
p.reset()
|
||||||
|
p.f = &File{}
|
||||||
|
p.src = r
|
||||||
|
p.rune()
|
||||||
|
p.next()
|
||||||
|
p.stmts(fn)
|
||||||
|
if p.err == nil {
|
||||||
|
// EOF immediately after heredoc word so no newline to
|
||||||
|
// trigger it
|
||||||
|
p.doHeredocs()
|
||||||
|
}
|
||||||
|
return p.err
|
||||||
|
}
|
||||||
|
|
||||||
// Parser holds the internal state of the parsing mechanism of a
|
// Parser holds the internal state of the parsing mechanism of a
|
||||||
// program.
|
// program.
|
||||||
type Parser struct {
|
type Parser struct {
|
||||||
@ -70,8 +85,7 @@ type Parser struct {
|
|||||||
|
|
||||||
f *File
|
f *File
|
||||||
|
|
||||||
spaced bool // whether tok has whitespace on its left
|
spaced bool // whether tok has whitespace on its left
|
||||||
newLine bool // whether tok is on a new line
|
|
||||||
|
|
||||||
err error // lexer/parser error
|
err error // lexer/parser error
|
||||||
readErr error // got a read error, but bytes left
|
readErr error // got a read error, but bytes left
|
||||||
@ -291,6 +305,7 @@ func (p *Parser) unquotedWordPart(buf *bytes.Buffer, wp WordPart, quotes bool) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) doHeredocs() {
|
func (p *Parser) doHeredocs() {
|
||||||
|
p.rune() // consume '\n', since we know p.tok == _Newl
|
||||||
old := p.quote
|
old := p.quote
|
||||||
hdocs := p.heredocs[p.buriedHdocs:]
|
hdocs := p.heredocs[p.buriedHdocs:]
|
||||||
p.heredocs = p.heredocs[:p.buriedHdocs]
|
p.heredocs = p.heredocs[:p.buriedHdocs]
|
||||||
@ -322,6 +337,9 @@ func (p *Parser) doHeredocs() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) got(tok token) bool {
|
func (p *Parser) got(tok token) bool {
|
||||||
|
if p.tok == _Newl {
|
||||||
|
p.next()
|
||||||
|
}
|
||||||
if p.tok == tok {
|
if p.tok == tok {
|
||||||
p.next()
|
p.next()
|
||||||
return true
|
return true
|
||||||
@ -329,16 +347,20 @@ func (p *Parser) got(tok token) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) gotRsrv(val string) bool {
|
func (p *Parser) gotRsrv(val string) (Pos, bool) {
|
||||||
|
if p.tok == _Newl {
|
||||||
|
p.next()
|
||||||
|
}
|
||||||
|
pos := p.pos
|
||||||
if p.tok == _LitWord && p.val == val {
|
if p.tok == _LitWord && p.val == val {
|
||||||
p.next()
|
p.next()
|
||||||
return true
|
return pos, true
|
||||||
}
|
}
|
||||||
return false
|
return pos, false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) gotSameLine(tok token) bool {
|
func (p *Parser) gotSameLine(tok token) bool {
|
||||||
if !p.newLine && p.tok == tok {
|
if p.tok == tok {
|
||||||
p.next()
|
p.next()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -369,8 +391,8 @@ func (p *Parser) follow(lpos Pos, left string, tok token) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) followRsrv(lpos Pos, left, val string) Pos {
|
func (p *Parser) followRsrv(lpos Pos, left, val string) Pos {
|
||||||
pos := p.pos
|
pos, ok := p.gotRsrv(val)
|
||||||
if !p.gotRsrv(val) {
|
if !ok {
|
||||||
p.followErr(lpos, left, fmt.Sprintf("%q", val))
|
p.followErr(lpos, left, fmt.Sprintf("%q", val))
|
||||||
}
|
}
|
||||||
return pos
|
return pos
|
||||||
@ -380,8 +402,13 @@ func (p *Parser) followStmts(left string, lpos Pos, stops ...string) StmtList {
|
|||||||
if p.gotSameLine(semicolon) {
|
if p.gotSameLine(semicolon) {
|
||||||
return StmtList{}
|
return StmtList{}
|
||||||
}
|
}
|
||||||
sl := p.stmts(stops...)
|
newLine := false
|
||||||
if len(sl.Stmts) < 1 && !p.newLine {
|
if p.tok == _Newl {
|
||||||
|
p.next()
|
||||||
|
newLine = true
|
||||||
|
}
|
||||||
|
sl := p.stmtList(stops...)
|
||||||
|
if len(sl.Stmts) < 1 && !newLine {
|
||||||
p.followErr(lpos, left, "a statement list")
|
p.followErr(lpos, left, "a statement list")
|
||||||
}
|
}
|
||||||
return sl
|
return sl
|
||||||
@ -404,8 +431,8 @@ func (p *Parser) followWord(s string, pos Pos) *Word {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) stmtEnd(n Node, start, end string) Pos {
|
func (p *Parser) stmtEnd(n Node, start, end string) Pos {
|
||||||
pos := p.pos
|
pos, ok := p.gotRsrv(end)
|
||||||
if !p.gotRsrv(end) {
|
if !ok {
|
||||||
p.posErr(n.Pos(), "%s statement must end with %q", start, end)
|
p.posErr(n.Pos(), "%s statement must end with %q", start, end)
|
||||||
}
|
}
|
||||||
return pos
|
return pos
|
||||||
@ -464,10 +491,15 @@ func (p *Parser) curErr(format string, a ...interface{}) {
|
|||||||
p.posErr(p.pos, format, a...)
|
p.posErr(p.pos, format, a...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) stmts(stops ...string) (sl StmtList) {
|
func (p *Parser) stmts(fn func(*Stmt), stops ...string) {
|
||||||
gotEnd := true
|
gotEnd := true
|
||||||
loop:
|
loop:
|
||||||
for p.tok != _EOF {
|
for p.tok != _EOF {
|
||||||
|
newLine := false
|
||||||
|
if p.tok == _Newl {
|
||||||
|
newLine = true
|
||||||
|
p.next()
|
||||||
|
}
|
||||||
switch p.tok {
|
switch p.tok {
|
||||||
case _LitWord:
|
case _LitWord:
|
||||||
for _, stop := range stops {
|
for _, stop := range stops {
|
||||||
@ -489,7 +521,7 @@ loop:
|
|||||||
}
|
}
|
||||||
p.curErr("%s can only be used in a case clause", p.tok)
|
p.curErr("%s can only be used in a case clause", p.tok)
|
||||||
}
|
}
|
||||||
if !p.newLine && !gotEnd {
|
if !newLine && !gotEnd {
|
||||||
p.curErr("statements must be separated by &, ; or a newline")
|
p.curErr("statements must be separated by &, ; or a newline")
|
||||||
}
|
}
|
||||||
if p.tok == _EOF {
|
if p.tok == _EOF {
|
||||||
@ -498,13 +530,20 @@ loop:
|
|||||||
if s, end := p.getStmt(true, false); s == nil {
|
if s, end := p.getStmt(true, false); s == nil {
|
||||||
p.invalidStmtStart()
|
p.invalidStmtStart()
|
||||||
} else {
|
} else {
|
||||||
if sl.Stmts == nil {
|
fn(s)
|
||||||
sl.Stmts = p.stList()
|
|
||||||
}
|
|
||||||
sl.Stmts = append(sl.Stmts, s)
|
|
||||||
gotEnd = end
|
gotEnd = end
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Parser) stmtList(stops ...string) (sl StmtList) {
|
||||||
|
fn := func(s *Stmt) {
|
||||||
|
if sl.Stmts == nil {
|
||||||
|
sl.Stmts = p.stList()
|
||||||
|
}
|
||||||
|
sl.Stmts = append(sl.Stmts, s)
|
||||||
|
}
|
||||||
|
p.stmts(fn, stops...)
|
||||||
sl.Last, p.accComs = p.accComs, nil
|
sl.Last, p.accComs = p.accComs, nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -586,12 +625,13 @@ func (p *Parser) wordPart() WordPart {
|
|||||||
old := p.preNested(subCmd)
|
old := p.preNested(subCmd)
|
||||||
p.rune() // don't tokenize '|'
|
p.rune() // don't tokenize '|'
|
||||||
p.next()
|
p.next()
|
||||||
cs.StmtList = p.stmts("}")
|
cs.StmtList = p.stmtList("}")
|
||||||
p.postNested(old)
|
p.postNested(old)
|
||||||
cs.Right = p.pos
|
pos, ok := p.gotRsrv("}")
|
||||||
if !p.gotRsrv("}") {
|
if !ok {
|
||||||
p.matchingErr(cs.Left, "${", "}")
|
p.matchingErr(cs.Left, "${", "}")
|
||||||
}
|
}
|
||||||
|
cs.Right = pos
|
||||||
return cs
|
return cs
|
||||||
default:
|
default:
|
||||||
return p.paramExp()
|
return p.paramExp()
|
||||||
@ -630,7 +670,7 @@ func (p *Parser) wordPart() WordPart {
|
|||||||
cs := &CmdSubst{Left: p.pos}
|
cs := &CmdSubst{Left: p.pos}
|
||||||
old := p.preNested(subCmd)
|
old := p.preNested(subCmd)
|
||||||
p.next()
|
p.next()
|
||||||
cs.StmtList = p.stmts()
|
cs.StmtList = p.stmtList()
|
||||||
p.postNested(old)
|
p.postNested(old)
|
||||||
cs.Right = p.matched(cs.Left, leftParen, rightParen)
|
cs.Right = p.matched(cs.Left, leftParen, rightParen)
|
||||||
return cs
|
return cs
|
||||||
@ -648,7 +688,7 @@ func (p *Parser) wordPart() WordPart {
|
|||||||
ps := &ProcSubst{Op: ProcOperator(p.tok), OpPos: p.pos}
|
ps := &ProcSubst{Op: ProcOperator(p.tok), OpPos: p.pos}
|
||||||
old := p.preNested(subCmd)
|
old := p.preNested(subCmd)
|
||||||
p.next()
|
p.next()
|
||||||
ps.StmtList = p.stmts()
|
ps.StmtList = p.stmtList()
|
||||||
p.postNested(old)
|
p.postNested(old)
|
||||||
ps.Rparen = p.matched(ps.OpPos, token(ps.Op), rightParen)
|
ps.Rparen = p.matched(ps.OpPos, token(ps.Op), rightParen)
|
||||||
return ps
|
return ps
|
||||||
@ -709,7 +749,7 @@ func (p *Parser) wordPart() WordPart {
|
|||||||
cs := &CmdSubst{Left: p.pos}
|
cs := &CmdSubst{Left: p.pos}
|
||||||
old := p.preNested(subCmdBckquo)
|
old := p.preNested(subCmdBckquo)
|
||||||
p.next()
|
p.next()
|
||||||
cs.StmtList = p.stmts()
|
cs.StmtList = p.stmtList()
|
||||||
p.postNested(old)
|
p.postNested(old)
|
||||||
cs.Right = p.pos
|
cs.Right = p.pos
|
||||||
if !p.got(bckQuote) {
|
if !p.got(bckQuote) {
|
||||||
@ -813,6 +853,9 @@ func (p *Parser) arithmExpr(level int, compact, tern bool) ArithmExpr {
|
|||||||
if compact && p.spaced {
|
if compact && p.spaced {
|
||||||
return left
|
return left
|
||||||
}
|
}
|
||||||
|
if p.tok == _Newl {
|
||||||
|
p.next()
|
||||||
|
}
|
||||||
newLevel := arithmOpLevel(BinAritOperator(p.tok))
|
newLevel := arithmOpLevel(BinAritOperator(p.tok))
|
||||||
if !tern && p.tok == colon && p.quote&allParamArith != 0 {
|
if !tern && p.tok == colon && p.quote&allParamArith != 0 {
|
||||||
newLevel = -1
|
newLevel = -1
|
||||||
@ -1134,7 +1177,7 @@ func (p *Parser) arithmEnd(ltok token, lpos Pos, old saveState) Pos {
|
|||||||
|
|
||||||
func stopToken(tok token) bool {
|
func stopToken(tok token) bool {
|
||||||
switch tok {
|
switch tok {
|
||||||
case _EOF, semicolon, and, or, andAnd, orOr, orAnd, dblSemicolon,
|
case _EOF, _Newl, semicolon, and, or, andAnd, orOr, orAnd, dblSemicolon,
|
||||||
semiAnd, dblSemiAnd, semiOr, rightParen:
|
semiAnd, dblSemiAnd, semiOr, rightParen:
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -1239,6 +1282,9 @@ func (p *Parser) getAssign(needEqual bool) *Assign {
|
|||||||
}
|
}
|
||||||
old := p.preNested(newQuote)
|
old := p.preNested(newQuote)
|
||||||
p.next()
|
p.next()
|
||||||
|
if p.tok == _Newl {
|
||||||
|
p.next()
|
||||||
|
}
|
||||||
for p.tok != _EOF && p.tok != rightParen {
|
for p.tok != _EOF && p.tok != rightParen {
|
||||||
ae := &ArrayElem{}
|
ae := &ArrayElem{}
|
||||||
ae.Comments, p.accComs = p.accComs, nil
|
ae.Comments, p.accComs = p.accComs, nil
|
||||||
@ -1273,6 +1319,9 @@ func (p *Parser) getAssign(needEqual bool) *Assign {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
as.Array.Elems = append(as.Array.Elems, ae)
|
as.Array.Elems = append(as.Array.Elems, ae)
|
||||||
|
if p.tok == _Newl {
|
||||||
|
p.next()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
as.Array.Last, p.accComs = p.accComs, nil
|
as.Array.Last, p.accComs = p.accComs, nil
|
||||||
p.postNested(old)
|
p.postNested(old)
|
||||||
@ -1301,7 +1350,7 @@ func (p *Parser) doRedirect(s *Stmt) {
|
|||||||
r.N = p.getLit()
|
r.N = p.getLit()
|
||||||
r.Op, r.OpPos = RedirOperator(p.tok), p.pos
|
r.Op, r.OpPos = RedirOperator(p.tok), p.pos
|
||||||
p.next()
|
p.next()
|
||||||
if p.newLine {
|
if p.tok == _Newl {
|
||||||
p.curErr("redirect word must be on the same line")
|
p.curErr("redirect word must be on the same line")
|
||||||
}
|
}
|
||||||
switch r.Op {
|
switch r.Op {
|
||||||
@ -1311,8 +1360,8 @@ func (p *Parser) doRedirect(s *Stmt) {
|
|||||||
p.heredocs = append(p.heredocs, r)
|
p.heredocs = append(p.heredocs, r)
|
||||||
r.Word = p.followWordTok(token(r.Op), r.OpPos)
|
r.Word = p.followWordTok(token(r.Op), r.OpPos)
|
||||||
p.quote, p.forbidNested = old, false
|
p.quote, p.forbidNested = old, false
|
||||||
if p.tok == illegalTok {
|
if p.tok == _Newl {
|
||||||
p.next()
|
p.doHeredocs()
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
r.Word = p.followWordTok(token(r.Op), r.OpPos)
|
r.Word = p.followWordTok(token(r.Op), r.OpPos)
|
||||||
@ -1321,10 +1370,11 @@ func (p *Parser) doRedirect(s *Stmt) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) getStmt(readEnd, binCmd bool) (s *Stmt, gotEnd bool) {
|
func (p *Parser) getStmt(readEnd, binCmd bool) (s *Stmt, gotEnd bool) {
|
||||||
s = p.stmt(p.pos)
|
pos, ok := p.gotRsrv("!")
|
||||||
if p.gotRsrv("!") {
|
s = p.stmt(pos)
|
||||||
|
if ok {
|
||||||
s.Negated = true
|
s.Negated = true
|
||||||
if p.newLine || stopToken(p.tok) {
|
if stopToken(p.tok) {
|
||||||
p.posErr(s.Pos(), `"!" cannot form a statement alone`)
|
p.posErr(s.Pos(), `"!" cannot form a statement alone`)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1359,7 +1409,7 @@ func (p *Parser) getStmt(readEnd, binCmd bool) (s *Stmt, gotEnd bool) {
|
|||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
case semicolon:
|
case semicolon:
|
||||||
if !p.newLine && readEnd {
|
if readEnd {
|
||||||
s.Semicolon = p.pos
|
s.Semicolon = p.pos
|
||||||
p.next()
|
p.next()
|
||||||
}
|
}
|
||||||
@ -1382,6 +1432,10 @@ func (p *Parser) getStmt(readEnd, binCmd bool) (s *Stmt, gotEnd bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) gotStmtPipe(s *Stmt) *Stmt {
|
func (p *Parser) gotStmtPipe(s *Stmt) *Stmt {
|
||||||
|
if p.tok == _Newl {
|
||||||
|
p.next()
|
||||||
|
s.Position = p.pos
|
||||||
|
}
|
||||||
s.Comments, p.accComs = p.accComs, nil
|
s.Comments, p.accComs = p.accComs, nil
|
||||||
switch p.tok {
|
switch p.tok {
|
||||||
case _LitWord:
|
case _LitWord:
|
||||||
@ -1474,7 +1528,7 @@ func (p *Parser) gotStmtPipe(s *Stmt) *Stmt {
|
|||||||
hdoc, dashHdoc, wordHdoc, rdrAll, appAll, _LitRedir:
|
hdoc, dashHdoc, wordHdoc, rdrAll, appAll, _LitRedir:
|
||||||
p.doRedirect(s)
|
p.doRedirect(s)
|
||||||
switch {
|
switch {
|
||||||
case p.newLine, p.tok == _EOF, p.tok == semicolon:
|
case p.tok == _EOF, p.tok == semicolon, p.tok == _Newl:
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
s.Cmd = p.callExpr(s, nil, false)
|
s.Cmd = p.callExpr(s, nil, false)
|
||||||
@ -1504,7 +1558,7 @@ func (p *Parser) gotStmtPipe(s *Stmt) *Stmt {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for !p.newLine && p.peekRedir() {
|
for p.peekRedir() {
|
||||||
p.doRedirect(s)
|
p.doRedirect(s)
|
||||||
}
|
}
|
||||||
switch p.tok {
|
switch p.tok {
|
||||||
@ -1531,7 +1585,7 @@ func (p *Parser) subshell() *Subshell {
|
|||||||
s := &Subshell{Lparen: p.pos}
|
s := &Subshell{Lparen: p.pos}
|
||||||
old := p.preNested(subCmd)
|
old := p.preNested(subCmd)
|
||||||
p.next()
|
p.next()
|
||||||
s.StmtList = p.stmts()
|
s.StmtList = p.stmtList()
|
||||||
p.postNested(old)
|
p.postNested(old)
|
||||||
s.Rparen = p.matched(s.Lparen, leftParen, rightParen)
|
s.Rparen = p.matched(s.Lparen, leftParen, rightParen)
|
||||||
return s
|
return s
|
||||||
@ -1555,9 +1609,10 @@ func (p *Parser) arithmExpCmd() Command {
|
|||||||
func (p *Parser) block() *Block {
|
func (p *Parser) block() *Block {
|
||||||
b := &Block{Lbrace: p.pos}
|
b := &Block{Lbrace: p.pos}
|
||||||
p.next()
|
p.next()
|
||||||
b.StmtList = p.stmts("}")
|
b.StmtList = p.stmtList("}")
|
||||||
b.Rbrace = p.pos
|
pos, ok := p.gotRsrv("}")
|
||||||
if !p.gotRsrv("}") {
|
b.Rbrace = pos
|
||||||
|
if !ok {
|
||||||
p.matchingErr(b.Lbrace, "{", "}")
|
p.matchingErr(b.Lbrace, "{", "}")
|
||||||
}
|
}
|
||||||
return b
|
return b
|
||||||
@ -1582,7 +1637,7 @@ func (p *Parser) ifClause() *IfClause {
|
|||||||
curIf.Else.Stmts = []*Stmt{s}
|
curIf.Else.Stmts = []*Stmt{s}
|
||||||
curIf = elf
|
curIf = elf
|
||||||
}
|
}
|
||||||
if elsePos := p.pos; p.gotRsrv("else") {
|
if elsePos, ok := p.gotRsrv("else"); ok {
|
||||||
curIf.ElsePos = elsePos
|
curIf.ElsePos = elsePos
|
||||||
curIf.Else = p.followStmts("else", curIf.ElsePos, "fi")
|
curIf.Else = p.followStmts("else", curIf.ElsePos, "fi")
|
||||||
}
|
}
|
||||||
@ -1647,8 +1702,8 @@ func (p *Parser) wordIter(ftok string, fpos Pos) *WordIter {
|
|||||||
if wi.Name = p.getLit(); wi.Name == nil {
|
if wi.Name = p.getLit(); wi.Name == nil {
|
||||||
p.followErr(fpos, ftok, "a literal")
|
p.followErr(fpos, ftok, "a literal")
|
||||||
}
|
}
|
||||||
if p.gotRsrv("in") {
|
if _, ok := p.gotRsrv("in"); ok {
|
||||||
for !p.newLine && p.tok != _EOF && p.tok != semicolon {
|
for p.tok != _Newl && p.tok != _EOF && p.tok != semicolon {
|
||||||
if w := p.getWord(); w == nil {
|
if w := p.getWord(); w == nil {
|
||||||
p.curErr("word list can only contain words")
|
p.curErr("word list can only contain words")
|
||||||
} else {
|
} else {
|
||||||
@ -1656,7 +1711,7 @@ func (p *Parser) wordIter(ftok string, fpos Pos) *WordIter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
p.gotSameLine(semicolon)
|
p.gotSameLine(semicolon)
|
||||||
} else if !p.newLine && !p.got(semicolon) {
|
} else if p.tok != _Newl && !p.got(semicolon) {
|
||||||
p.followErr(fpos, ftok+" foo", `"in", ; or a newline`)
|
p.followErr(fpos, ftok+" foo", `"in", ; or a newline`)
|
||||||
}
|
}
|
||||||
return wi
|
return wi
|
||||||
@ -1677,7 +1732,7 @@ func (p *Parser) caseClause() *CaseClause {
|
|||||||
p.next()
|
p.next()
|
||||||
cc.Word = p.followWord("case", cc.Case)
|
cc.Word = p.followWord("case", cc.Case)
|
||||||
end := "esac"
|
end := "esac"
|
||||||
if p.gotRsrv("{") {
|
if _, ok := p.gotRsrv("{"); ok {
|
||||||
if p.lang != LangMirBSDKorn {
|
if p.lang != LangMirBSDKorn {
|
||||||
p.posErr(cc.Pos(), `"case i {" is a mksh feature`)
|
p.posErr(cc.Pos(), `"case i {" is a mksh feature`)
|
||||||
}
|
}
|
||||||
@ -1692,6 +1747,9 @@ func (p *Parser) caseClause() *CaseClause {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) caseItems(stop string) (items []*CaseItem) {
|
func (p *Parser) caseItems(stop string) (items []*CaseItem) {
|
||||||
|
if p.tok == _Newl {
|
||||||
|
p.next()
|
||||||
|
}
|
||||||
for p.tok != _EOF && !(p.tok == _LitWord && p.val == stop) {
|
for p.tok != _EOF && !(p.tok == _LitWord && p.val == stop) {
|
||||||
ci := &CaseItem{}
|
ci := &CaseItem{}
|
||||||
ci.Comments, p.accComs = p.accComs, nil
|
ci.Comments, p.accComs = p.accComs, nil
|
||||||
@ -1711,7 +1769,7 @@ func (p *Parser) caseItems(stop string) (items []*CaseItem) {
|
|||||||
}
|
}
|
||||||
old := p.preNested(switchCase)
|
old := p.preNested(switchCase)
|
||||||
p.next()
|
p.next()
|
||||||
ci.StmtList = p.stmts(stop)
|
ci.StmtList = p.stmtList(stop)
|
||||||
p.postNested(old)
|
p.postNested(old)
|
||||||
ci.OpPos = p.pos
|
ci.OpPos = p.pos
|
||||||
switch p.tok {
|
switch p.tok {
|
||||||
@ -1733,18 +1791,22 @@ func (p *Parser) caseItems(stop string) (items []*CaseItem) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
items = append(items, ci)
|
items = append(items, ci)
|
||||||
|
if p.tok == _Newl {
|
||||||
|
p.next()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Parser) testClause() *TestClause {
|
func (p *Parser) testClause() *TestClause {
|
||||||
tc := &TestClause{Left: p.pos}
|
tc := &TestClause{Left: p.pos}
|
||||||
if p.next(); p.tok == _EOF || p.gotRsrv("]]") {
|
p.next()
|
||||||
|
if _, ok := p.gotRsrv("]]"); ok || p.tok == _EOF {
|
||||||
p.posErr(tc.Left, "test clause requires at least one expression")
|
p.posErr(tc.Left, "test clause requires at least one expression")
|
||||||
}
|
}
|
||||||
tc.X = p.testExpr(illegalTok, tc.Left, false)
|
tc.X = p.testExpr(illegalTok, tc.Left, false)
|
||||||
tc.Right = p.pos
|
tc.Right = p.pos
|
||||||
if !p.gotRsrv("]]") {
|
if _, ok := p.gotRsrv("]]"); !ok {
|
||||||
p.matchingErr(tc.Left, "[[", "]]")
|
p.matchingErr(tc.Left, "[[", "]]")
|
||||||
}
|
}
|
||||||
return tc
|
return tc
|
||||||
@ -1864,7 +1926,7 @@ func (p *Parser) declClause() *DeclClause {
|
|||||||
for (p.tok == _LitWord || p.tok == _Lit) && p.val[0] == '-' {
|
for (p.tok == _LitWord || p.tok == _Lit) && p.val[0] == '-' {
|
||||||
ds.Opts = append(ds.Opts, p.getWord())
|
ds.Opts = append(ds.Opts, p.getWord())
|
||||||
}
|
}
|
||||||
for !p.newLine && !stopToken(p.tok) && !p.peekRedir() {
|
for !stopToken(p.tok) && !p.peekRedir() {
|
||||||
if (p.tok == _Lit || p.tok == _LitWord) && p.hasValidIdent() {
|
if (p.tok == _Lit || p.tok == _LitWord) && p.hasValidIdent() {
|
||||||
ds.Assigns = append(ds.Assigns, p.getAssign(false))
|
ds.Assigns = append(ds.Assigns, p.getAssign(false))
|
||||||
} else if p.eqlOffs > 0 {
|
} else if p.eqlOffs > 0 {
|
||||||
@ -1904,7 +1966,7 @@ func isBashCompoundCommand(tok token, val string) bool {
|
|||||||
func (p *Parser) timeClause() *TimeClause {
|
func (p *Parser) timeClause() *TimeClause {
|
||||||
tc := &TimeClause{Time: p.pos}
|
tc := &TimeClause{Time: p.pos}
|
||||||
p.next()
|
p.next()
|
||||||
if !p.newLine {
|
if p.tok != _Newl {
|
||||||
tc.Stmt = p.gotStmtPipe(p.stmt(p.pos))
|
tc.Stmt = p.gotStmtPipe(p.stmt(p.pos))
|
||||||
}
|
}
|
||||||
return tc
|
return tc
|
||||||
@ -1917,7 +1979,7 @@ func (p *Parser) coprocClause() *CoprocClause {
|
|||||||
cc.Stmt = p.gotStmtPipe(p.stmt(p.pos))
|
cc.Stmt = p.gotStmtPipe(p.stmt(p.pos))
|
||||||
return cc
|
return cc
|
||||||
}
|
}
|
||||||
if p.newLine {
|
if p.tok == _Newl {
|
||||||
p.posErr(cc.Coproc, "coproc clause requires a command")
|
p.posErr(cc.Coproc, "coproc clause requires a command")
|
||||||
}
|
}
|
||||||
cc.Name = p.getLit()
|
cc.Name = p.getLit()
|
||||||
@ -1946,7 +2008,7 @@ func (p *Parser) letClause() *LetClause {
|
|||||||
lc := &LetClause{Let: p.pos}
|
lc := &LetClause{Let: p.pos}
|
||||||
old := p.preNested(arithmExprLet)
|
old := p.preNested(arithmExprLet)
|
||||||
p.next()
|
p.next()
|
||||||
for !p.newLine && !stopToken(p.tok) && !p.peekRedir() {
|
for !stopToken(p.tok) && !p.peekRedir() {
|
||||||
x := p.arithmExpr(0, true, false)
|
x := p.arithmExpr(0, true, false)
|
||||||
if x == nil {
|
if x == nil {
|
||||||
break
|
break
|
||||||
@ -1957,9 +2019,6 @@ func (p *Parser) letClause() *LetClause {
|
|||||||
p.followErrExp(lc.Let, "let")
|
p.followErrExp(lc.Let, "let")
|
||||||
}
|
}
|
||||||
p.postNested(old)
|
p.postNested(old)
|
||||||
if p.tok == illegalTok {
|
|
||||||
p.next()
|
|
||||||
}
|
|
||||||
return lc
|
return lc
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1987,9 +2046,9 @@ func (p *Parser) callExpr(s *Stmt, w *Word, assign bool) Command {
|
|||||||
ce.Assigns = append(ce.Assigns, p.getAssign(true))
|
ce.Assigns = append(ce.Assigns, p.getAssign(true))
|
||||||
}
|
}
|
||||||
loop:
|
loop:
|
||||||
for !p.newLine {
|
for {
|
||||||
switch p.tok {
|
switch p.tok {
|
||||||
case _EOF, semicolon, and, or, andAnd, orOr, orAnd,
|
case _EOF, _Newl, semicolon, and, or, andAnd, orOr, orAnd,
|
||||||
dblSemicolon, semiAnd, dblSemiAnd, semiOr:
|
dblSemicolon, semiAnd, dblSemiAnd, semiOr:
|
||||||
break loop
|
break loop
|
||||||
case _LitWord:
|
case _LitWord:
|
||||||
|
4
vendor/mvdan.cc/sh/syntax/token_string.go
vendored
4
vendor/mvdan.cc/sh/syntax/token_string.go
vendored
@ -4,9 +4,9 @@ package syntax
|
|||||||
|
|
||||||
import "fmt"
|
import "fmt"
|
||||||
|
|
||||||
const _token_name = "illegalTokEOFLitLitWordLitRedir'\"`&&&||||&$$'$\"${$[$($(([(((}])));;;;&;;&;|!++--***==!=<=>=+=-=*=/=%=&=|=^=<<=>>=>>><<><&>&>|<<<<-<<<&>&>><(>(+:+-:-?:?=:=%%%###^^^,,,@///:-e-f-d-c-b-p-S-L-k-g-u-G-O-N-r-w-x-s-t-z-n-o-v-R=~-nt-ot-ef-eq-ne-le-ge-lt-gt?(*(+(@(!("
|
const _token_name = "illegalTokEOFNewlineLitLitWordLitRedir'\"`&&&||||&$$'$\"${$[$($(([(((}])));;;;&;;&;|!++--***==!=<=>=+=-=*=/=%=&=|=^=<<=>>=>>><<><&>&>|<<<<-<<<&>&>><(>(+:+-:-?:?=:=%%%###^^^,,,@///:-e-f-d-c-b-p-S-L-k-g-u-G-O-N-r-w-x-s-t-z-n-o-v-R=~-nt-ot-ef-eq-ne-le-ge-lt-gt?(*(+(@(!("
|
||||||
|
|
||||||
var _token_index = [...]uint16{0, 10, 13, 16, 23, 31, 32, 33, 34, 35, 37, 39, 40, 42, 43, 45, 47, 49, 51, 53, 56, 57, 58, 60, 61, 62, 63, 65, 66, 68, 70, 73, 75, 76, 78, 80, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 110, 113, 114, 116, 117, 119, 121, 123, 125, 127, 130, 133, 135, 138, 140, 142, 143, 145, 146, 148, 149, 151, 152, 154, 155, 157, 158, 160, 161, 163, 164, 166, 167, 168, 170, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 224, 227, 230, 233, 236, 239, 242, 245, 248, 250, 252, 254, 256, 258}
|
var _token_index = [...]uint16{0, 10, 13, 20, 23, 30, 38, 39, 40, 41, 42, 44, 46, 47, 49, 50, 52, 54, 56, 58, 60, 63, 64, 65, 67, 68, 69, 70, 72, 73, 75, 77, 80, 82, 83, 85, 87, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 117, 120, 121, 123, 124, 126, 128, 130, 132, 134, 137, 140, 142, 145, 147, 149, 150, 152, 153, 155, 156, 158, 159, 161, 162, 164, 165, 167, 168, 170, 171, 173, 174, 175, 177, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 231, 234, 237, 240, 243, 246, 249, 252, 255, 257, 259, 261, 263, 265}
|
||||||
|
|
||||||
func (i token) String() string {
|
func (i token) String() string {
|
||||||
if i >= token(len(_token_index)-1) {
|
if i >= token(len(_token_index)-1) {
|
||||||
|
1
vendor/mvdan.cc/sh/syntax/tokens.go
vendored
1
vendor/mvdan.cc/sh/syntax/tokens.go
vendored
@ -15,6 +15,7 @@ const (
|
|||||||
illegalTok token = iota
|
illegalTok token = iota
|
||||||
|
|
||||||
_EOF // EOF
|
_EOF // EOF
|
||||||
|
_Newl // Newline
|
||||||
_Lit // Lit
|
_Lit // Lit
|
||||||
_LitWord // LitWord
|
_LitWord // LitWord
|
||||||
_LitRedir // LitRedir
|
_LitRedir // LitRedir
|
||||||
|
Loading…
x
Reference in New Issue
Block a user