feat: undo
This commit is contained in:
@@ -4,12 +4,12 @@ import "git.maximhutz.com/max/lambda/pkg/token"
|
|||||||
|
|
||||||
// scanner is the declarative lexer for the lambda calculus.
|
// scanner is the declarative lexer for the lambda calculus.
|
||||||
var scanner = token.NewScanner[tokenType]().
|
var scanner = token.NewScanner[tokenType]().
|
||||||
On(`\(`, tokenOpenParen).
|
On(`\(`, tokenOpenParen, 0).
|
||||||
On(`\)`, tokenCloseParen).
|
On(`\)`, tokenCloseParen, 0).
|
||||||
On(`\\`, tokenSlash).
|
On(`\\`, tokenSlash, 0).
|
||||||
On(`\.`, tokenDot).
|
On(`\.`, tokenDot, 0).
|
||||||
On(`[a-zA-Z0-9_]+`, tokenAtom).
|
On(`[a-zA-Z0-9_]+`, tokenAtom, 0).
|
||||||
Skip(`\s+`)
|
Skip(`\s+`, 0)
|
||||||
|
|
||||||
// scan tokenizes an input string into lambda calculus tokens.
|
// scan tokenizes an input string into lambda calculus tokens.
|
||||||
func scan(input string) ([]lambdaToken, error) {
|
func scan(input string) ([]lambdaToken, error) {
|
||||||
|
|||||||
@@ -4,18 +4,18 @@ import "git.maximhutz.com/max/lambda/pkg/token"
|
|||||||
|
|
||||||
// scanner is the declarative lexer for the Saccharine language.
|
// scanner is the declarative lexer for the Saccharine language.
|
||||||
var scanner = token.NewScanner[TokenType]().
|
var scanner = token.NewScanner[TokenType]().
|
||||||
On(`:=`, TokenAssign).
|
On(`:=`, TokenAssign, 1).
|
||||||
On(`\(`, TokenOpenParen).
|
On(`\(`, TokenOpenParen, 0).
|
||||||
On(`\)`, TokenCloseParen).
|
On(`\)`, TokenCloseParen, 0).
|
||||||
On(`\{`, TokenOpenBrace).
|
On(`\{`, TokenOpenBrace, 0).
|
||||||
On(`\}`, TokenCloseBrace).
|
On(`\}`, TokenCloseBrace, 0).
|
||||||
On(`;`, TokenHardBreak).
|
On(`;`, TokenHardBreak, 0).
|
||||||
On(`\n`, TokenSoftBreak).
|
On(`\n`, TokenSoftBreak, 0).
|
||||||
On(`\\`, TokenSlash).
|
On(`\\`, TokenSlash, 0).
|
||||||
On(`\.`, TokenDot).
|
On(`\.`, TokenDot, 0).
|
||||||
On(`[a-zA-Z0-9_]+`, TokenAtom).
|
On(`[a-zA-Z0-9_]+`, TokenAtom, 0).
|
||||||
Skip(`#[^\n]*`).
|
Skip(`#[^\n]*`, 0).
|
||||||
Skip(`[^\S\n]+`)
|
Skip(`[^\S\n]+`, 0)
|
||||||
|
|
||||||
// scan tokenizes a string into Saccharine tokens.
|
// scan tokenizes a string into Saccharine tokens.
|
||||||
func scan(input string) ([]Token, error) {
|
func scan(input string) ([]Token, error) {
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
"unicode/utf8"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A rule describes a single lexical pattern for the scanner.
|
// A rule describes a single lexical pattern for the scanner.
|
||||||
@@ -41,10 +40,8 @@ func (s *Scanner[T]) On(pattern string, typ T) *Scanner[T] {
|
|||||||
// Skip registers a rule that consumes matching text without emitting a token.
|
// Skip registers a rule that consumes matching text without emitting a token.
|
||||||
// This is used for whitespace and comments.
|
// This is used for whitespace and comments.
|
||||||
func (s *Scanner[T]) Skip(pattern string) *Scanner[T] {
|
func (s *Scanner[T]) Skip(pattern string) *Scanner[T] {
|
||||||
var zero T
|
|
||||||
s.rules = append(s.rules, rule[T]{
|
s.rules = append(s.rules, rule[T]{
|
||||||
pattern: compileAnchored(pattern),
|
pattern: compileAnchored(pattern),
|
||||||
typ: zero,
|
|
||||||
skip: true,
|
skip: true,
|
||||||
})
|
})
|
||||||
return s
|
return s
|
||||||
@@ -52,48 +49,37 @@ func (s *Scanner[T]) Skip(pattern string) *Scanner[T] {
|
|||||||
|
|
||||||
// Scan tokenizes the input string using the registered rules.
|
// Scan tokenizes the input string using the registered rules.
|
||||||
// At each position, all rules are tested and the longest match wins.
|
// At each position, all rules are tested and the longest match wins.
|
||||||
// If no rule matches, an error is recorded and the scanner advances one rune.
|
// If no rule matches, an error is recorded and the scanner advances one byte.
|
||||||
func (s *Scanner[T]) Scan(input string) ([]Token[T], error) {
|
func (s *Scanner[T]) Scan(input string) ([]Token[T], error) {
|
||||||
tokens := []Token[T]{}
|
tokens := []Token[T]{}
|
||||||
errorList := []error{}
|
errorList := []error{}
|
||||||
pos := 0
|
|
||||||
column := 0
|
|
||||||
|
|
||||||
for pos < len(input) {
|
for pos := 0; pos < len(input); {
|
||||||
bestLen := 0
|
bestLen := 0
|
||||||
bestRule := -1
|
bestRule := -1
|
||||||
|
|
||||||
for idx, r := range s.rules {
|
for idx, r := range s.rules {
|
||||||
loc := r.pattern.FindStringIndex(input[pos:])
|
loc := r.pattern.FindStringIndex(input[pos:])
|
||||||
if loc == nil {
|
if loc != nil && loc[1] > bestLen {
|
||||||
continue
|
bestLen = loc[1]
|
||||||
}
|
|
||||||
if matchLen := loc[1]; matchLen > bestLen {
|
|
||||||
bestLen = matchLen
|
|
||||||
bestRule = idx
|
bestRule = idx
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if bestRule == -1 || bestLen == 0 {
|
if bestRule == -1 || bestLen == 0 {
|
||||||
_, size := utf8.DecodeRuneInString(input[pos:])
|
errorList = append(errorList, fmt.Errorf("unknown character '%v'", string(input[pos])))
|
||||||
errorList = append(errorList, fmt.Errorf("unknown character '%v'", input[pos:pos+size]))
|
pos++
|
||||||
pos += size
|
|
||||||
column++
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
matched := input[pos : pos+bestLen]
|
if r := s.rules[bestRule]; !r.skip {
|
||||||
r := s.rules[bestRule]
|
|
||||||
|
|
||||||
if !r.skip {
|
|
||||||
tokens = append(tokens, Token[T]{
|
tokens = append(tokens, Token[T]{
|
||||||
Type: r.typ,
|
Type: r.typ,
|
||||||
Value: matched,
|
Value: input[pos : pos+bestLen],
|
||||||
Column: column,
|
Column: pos,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
column += utf8.RuneCountInString(matched)
|
|
||||||
pos += bestLen
|
pos += bestLen
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user