1
0
mirror of https://github.com/alecthomas/chroma.git synced 2025-01-12 01:22:30 +02:00
chroma/lexer.go

307 lines
7.3 KiB
Go
Raw Normal View History

2017-06-01 16:17:21 +02:00
package chroma
import (
"fmt"
"regexp"
"strings"
)
var (
defaultOptions = &TokeniseOptions{
State: "root",
}
)
2017-06-01 16:17:21 +02:00
// Config for a lexer.
type Config struct {
// Name of the lexer.
Name string
// Shortcuts for the lexer
Aliases []string
// File name globs
Filenames []string
// Secondary file name globs
AliasFilenames []string
// MIME types
MimeTypes []string
// Regex matching is case-insensitive.
CaseInsensitive bool
2017-06-05 01:55:19 +02:00
// Regex matches all characters.
DotAll bool
// Regex does not match across lines ($ matches EOL).
//
// Defaults to multiline.
NotMultiline bool
2017-06-01 16:17:21 +02:00
// Don't strip leading and trailing newlines from the input.
// DontStripNL bool
2017-06-01 16:17:21 +02:00
// Strip all leading and trailing whitespace from the input
// StripAll bool
2017-06-01 16:17:21 +02:00
// Make sure that the input does not end with a newline. This
// is required for some lexers that consume input linewise.
// DontEnsureNL bool
2017-06-01 16:17:21 +02:00
// If given and greater than 0, expand tabs in the input.
// TabSize int
2017-06-01 16:17:21 +02:00
}
// Token output to formatter.
2017-06-01 16:17:21 +02:00
type Token struct {
Type TokenType
Value string
}
func (t *Token) String() string { return t.Value }
func (t *Token) GoString() string { return fmt.Sprintf("Token{%s, %q}", t.Type, t.Value) }
2017-06-01 16:17:21 +02:00
type TokeniseOptions struct {
// State to start tokenisation in. Defaults to "root".
State string
}
2017-06-01 16:17:21 +02:00
type Lexer interface {
Config() *Config
Tokenise(options *TokeniseOptions, text string, out func(*Token)) error
2017-06-01 16:17:21 +02:00
}
2017-06-07 02:27:10 +02:00
type Lexers []Lexer
// Pick attempts to pick the best Lexer for a piece of source code. May return nil.
func (l Lexers) Pick(text string) Lexer {
if len(l) == 0 {
return nil
}
var picked Lexer
highest := float32(-1)
for _, lexer := range l {
if analyser, ok := lexer.(Analyser); ok {
score := analyser.AnalyseText(text)
if score > highest {
highest = score
picked = lexer
continue
}
}
}
return picked
}
2017-06-01 16:17:21 +02:00
// Analyser determines if this lexer is appropriate for the given text.
type Analyser interface {
AnalyseText(text string) float32
}
type Rule struct {
Pattern string
Type Emitter
Mutator Mutator
2017-06-01 16:17:21 +02:00
}
// An Emitter takes group matches and returns tokens.
type Emitter interface {
// Emit tokens for the given regex groups.
Emit(groups []string, lexer Lexer, out func(*Token))
2017-06-01 16:17:21 +02:00
}
// EmitterFunc is a function that is an Emitter.
type EmitterFunc func(groups []string, lexer Lexer, out func(*Token))
2017-06-01 16:17:21 +02:00
// Emit tokens for groups.
func (e EmitterFunc) Emit(groups []string, lexer Lexer, out func(*Token)) { e(groups, lexer, out) }
2017-06-01 16:17:21 +02:00
// ByGroups emits a token for each matching group in the rule's regex.
func ByGroups(emitters ...Emitter) Emitter {
return EmitterFunc(func(groups []string, lexer Lexer, out func(*Token)) {
2017-06-01 16:17:21 +02:00
for i, group := range groups[1:] {
emitters[i].Emit([]string{group}, lexer, out)
2017-06-01 16:17:21 +02:00
}
return
})
}
// Using returns an Emitter that uses a given Lexer for parsing and emitting.
func Using(lexer Lexer, options *TokeniseOptions) Emitter {
return EmitterFunc(func(groups []string, _ Lexer, out func(*Token)) {
if err := lexer.Tokenise(options, groups[0], out); err != nil {
panic(err)
}
})
}
// UsingSelf is like Using, but uses the current Lexer.
func UsingSelf(state string) Emitter {
return EmitterFunc(func(groups []string, lexer Lexer, out func(*Token)) {
if err := lexer.Tokenise(&TokeniseOptions{State: state}, groups[0], out); err != nil {
panic(err)
}
})
}
2017-06-01 16:17:21 +02:00
// Words creates a regex that matches any of the given literal words.
func Words(words ...string) string {
for i, word := range words {
words[i] = regexp.QuoteMeta(word)
}
return `\b(?:` + strings.Join(words, `|`) + `)\b`
2017-06-01 16:17:21 +02:00
}
// Rules maps from state to a sequence of Rules.
2017-06-01 16:17:21 +02:00
type Rules map[string][]Rule
// MustNewLexer creates a new Lexer or panics.
2017-06-07 02:27:10 +02:00
func MustNewLexer(config *Config, rules Rules) *RegexLexer {
2017-06-01 16:17:21 +02:00
lexer, err := NewLexer(config, rules)
if err != nil {
panic(err)
}
return lexer
}
// NewLexer creates a new regex-based Lexer.
//
// "rules" is a state machine transitition map. Each key is a state. Values are sets of rules
// that match input, optionally modify lexer state, and output tokens.
2017-06-07 02:27:10 +02:00
func NewLexer(config *Config, rules Rules) (*RegexLexer, error) {
if config == nil {
config = &Config{}
}
2017-06-01 16:17:21 +02:00
if _, ok := rules["root"]; !ok {
return nil, fmt.Errorf("no \"root\" state")
}
compiledRules := map[string][]CompiledRule{}
for state, rules := range rules {
for _, rule := range rules {
crule := CompiledRule{Rule: rule}
2017-06-05 01:55:19 +02:00
flags := ""
if !config.NotMultiline {
flags += "m"
}
if config.CaseInsensitive {
flags += "i"
}
2017-06-05 01:55:19 +02:00
if config.DotAll {
flags += "s"
}
re, err := regexp.Compile("^(?" + flags + ")(?:" + rule.Pattern + ")")
2017-06-01 16:17:21 +02:00
if err != nil {
return nil, fmt.Errorf("invalid regex %q for state %q: %s", rule.Pattern, state, err)
}
crule.Regexp = re
compiledRules[state] = append(compiledRules[state], crule)
}
}
2017-06-07 02:27:10 +02:00
return &RegexLexer{
2017-06-01 16:17:21 +02:00
config: config,
rules: compiledRules,
}, nil
}
// A CompiledRule is a Rule with a pre-compiled regex.
type CompiledRule struct {
Rule
Regexp *regexp.Regexp
}
type CompiledRules map[string][]CompiledRule
type LexerState struct {
Text string
Pos int
Rules map[string][]CompiledRule
Stack []string
State string
Rule int
// Group matches.
Groups []string
}
2017-06-07 02:27:10 +02:00
type RegexLexer struct {
config *Config
rules map[string][]CompiledRule
analyser func(text string) float32
}
// SetAnalyser sets the analyser function used to perform content inspection.
func (r *RegexLexer) SetAnalyser(analyser func(text string) float32) *RegexLexer {
r.analyser = analyser
return r
}
func (r *RegexLexer) AnalyseText(text string) float32 {
if r.analyser != nil {
return r.analyser(text)
}
return 0.0
2017-06-01 16:17:21 +02:00
}
2017-06-07 02:27:10 +02:00
func (r *RegexLexer) Config() *Config {
2017-06-01 16:17:21 +02:00
return r.config
}
2017-06-07 02:27:10 +02:00
func (r *RegexLexer) Tokenise(options *TokeniseOptions, text string, out func(*Token)) error {
if options == nil {
options = defaultOptions
}
2017-06-01 16:17:21 +02:00
state := &LexerState{
Text: text,
Stack: []string{options.State},
2017-06-01 16:17:21 +02:00
Rules: r.rules,
}
for state.Pos < len(text) && len(state.Stack) > 0 {
state.State = state.Stack[len(state.Stack)-1]
ruleIndex, rule, index := matchRules(state.Text[state.Pos:], state.Rules[state.State])
// fmt.Println(text[state.Pos:state.Pos+1], rule, state.Text[state.Pos:state.Pos+1])
2017-06-01 16:17:21 +02:00
// No match.
if index == nil {
out(&Token{Error, state.Text[state.Pos : state.Pos+1]})
2017-06-01 16:17:21 +02:00
state.Pos++
continue
}
state.Rule = ruleIndex
2017-06-01 16:17:21 +02:00
state.Groups = make([]string, len(index)/2)
2017-06-01 16:17:21 +02:00
for i := 0; i < len(index); i += 2 {
start := state.Pos + index[i]
end := state.Pos + index[i+1]
if start == -1 || end == -1 {
continue
}
state.Groups[i/2] = text[start:end]
2017-06-01 16:17:21 +02:00
}
state.Pos += index[1]
if rule.Mutator != nil {
if err := rule.Mutator.Mutate(state); err != nil {
return err
2017-06-01 16:17:21 +02:00
}
}
if rule.Type != nil {
rule.Type.Emit(state.Groups, r, out)
}
2017-06-01 16:17:21 +02:00
}
return nil
2017-06-01 16:17:21 +02:00
}
// Tokenise text using lexer, returning tokens as a slice.
func Tokenise(lexer Lexer, options *TokeniseOptions, text string) ([]*Token, error) {
out := []*Token{}
return out, lexer.Tokenise(options, text, func(token *Token) { out = append(out, token) })
}
func matchRules(text string, rules []CompiledRule) (int, CompiledRule, []int) {
for i, rule := range rules {
2017-06-01 16:17:21 +02:00
if index := rule.Regexp.FindStringSubmatchIndex(text); index != nil {
return i, rule, index
2017-06-01 16:17:21 +02:00
}
}
return 0, CompiledRule{}, nil
2017-06-01 16:17:21 +02:00
}