feat: functional options pattern

This commit is contained in:
2026-02-11 21:08:57 -05:00
parent 3b7cf21eb7
commit 76ea6ea2cb
3 changed files with 104 additions and 67 deletions

View File

@@ -3,13 +3,14 @@ package lambda
import "git.maximhutz.com/max/lambda/pkg/token" import "git.maximhutz.com/max/lambda/pkg/token"
// scanner is the declarative lexer for the lambda calculus. // scanner is the declarative lexer for the lambda calculus.
var scanner = token.NewScanner[tokenType](). var scanner = token.NewScanner(
On(`\(`, tokenOpenParen, 0). token.On(`\(`, tokenOpenParen, 0),
On(`\)`, tokenCloseParen, 0). token.On(`\)`, tokenCloseParen, 0),
On(`\\`, tokenSlash, 0). token.On(`\\`, tokenSlash, 0),
On(`\.`, tokenDot, 0). token.On(`\.`, tokenDot, 0),
On(`[a-zA-Z0-9_]+`, tokenAtom, 0). token.On(`[a-zA-Z0-9_]+`, tokenAtom, 0),
Skip(`\s+`, 0) token.Skip[tokenType](`\s+`, 0),
)
// scan tokenizes an input string into lambda calculus tokens. // scan tokenizes an input string into lambda calculus tokens.
func scan(input string) ([]lambdaToken, error) { func scan(input string) ([]lambdaToken, error) {

View File

@@ -3,19 +3,20 @@ package saccharine
import "git.maximhutz.com/max/lambda/pkg/token" import "git.maximhutz.com/max/lambda/pkg/token"
// scanner is the declarative lexer for the Saccharine language. // scanner is the declarative lexer for the Saccharine language.
var scanner = token.NewScanner[TokenType](). var scanner = token.NewScanner(
On(`:=`, TokenAssign, 1). token.On(`:=`, TokenAssign, 1),
On(`\(`, TokenOpenParen, 0). token.On(`\(`, TokenOpenParen, 0),
On(`\)`, TokenCloseParen, 0). token.On(`\)`, TokenCloseParen, 0),
On(`\{`, TokenOpenBrace, 0). token.On(`\{`, TokenOpenBrace, 0),
On(`\}`, TokenCloseBrace, 0). token.On(`\}`, TokenCloseBrace, 0),
On(`;`, TokenHardBreak, 0). token.On(`;`, TokenHardBreak, 0),
On(`\n`, TokenSoftBreak, 0). token.On(`\n`, TokenSoftBreak, 0),
On(`\\`, TokenSlash, 0). token.On(`\\`, TokenSlash, 0),
On(`\.`, TokenDot, 0). token.On(`\.`, TokenDot, 0),
On(`[a-zA-Z0-9_]+`, TokenAtom, 0). token.On(`[a-zA-Z0-9_]+`, TokenAtom, 0),
Skip(`#[^\n]*`, 0). token.Skip[TokenType](`#[^\n]*`, 0),
Skip(`[^\S\n]+`, 0) token.Skip[TokenType](`[^\S\n]+`, 0),
)
// scan tokenizes a string into Saccharine tokens. // scan tokenizes a string into Saccharine tokens.
func scan(input string) ([]Token, error) { func scan(input string) ([]Token, error) {

View File

@@ -4,83 +4,118 @@ import (
"errors" "errors"
"fmt" "fmt"
"regexp" "regexp"
"slices"
) )
// A rule describes a single lexical pattern for the scanner. // A rule describes a single lexical pattern for the scanner.
type rule[T Type] struct { type rule[T Type] struct {
pattern *regexp.Regexp pattern *regexp.Regexp
typ T typ T
precedence int
skip bool skip bool
} }
// A Scanner is a declarative lexer configured by registering regex rules. // compare orders rules by descending precedence.
// At each position in the input, all rules are tested and the longest match func (r rule[T]) compare(other rule[T]) int {
// wins. return other.precedence - r.precedence
// Ties are broken by registration order (first registered wins). }
// An Option configures a Scanner during construction.
type Option[T Type] func(rules []rule[T]) []rule[T]
// On returns an option that registers a token-emitting rule.
// The token's value is the matched text.
// Higher precedence rules are tried first.
func On[T Type](pattern string, typ T, precedence int) Option[T] {
return func(rules []rule[T]) []rule[T] {
return append(rules, rule[T]{
pattern: compileAnchored(pattern),
typ: typ,
precedence: precedence,
})
}
}
// Skip returns an option that registers a non-emitting rule.
// This is used for whitespace and comments.
// Higher precedence rules are tried first.
func Skip[T Type](pattern string, precedence int) Option[T] {
return func(rules []rule[T]) []rule[T] {
return append(rules, rule[T]{
pattern: compileAnchored(pattern),
precedence: precedence,
skip: true,
})
}
}
// A Scanner is a declarative lexer built from a set of regex rules.
// Rules are sorted by precedence (highest first), with registration order as
// tiebreaker.
// At each position, the first matching rule wins.
type Scanner[T Type] struct { type Scanner[T Type] struct {
rules []rule[T] rules []rule[T]
} }
// NewScanner creates a new Scanner with no rules. // NewScanner creates a Scanner by applying the given options and sorting the
func NewScanner[T Type]() *Scanner[T] { // resulting rules by precedence.
return &Scanner[T]{} func NewScanner[T Type](opts ...Option[T]) *Scanner[T] {
var rules []rule[T]
for _, opt := range opts {
rules = opt(rules)
}
slices.SortStableFunc(rules, rule[T].compare)
return &Scanner[T]{rules: rules}
} }
// On registers a rule that emits a token of the given type when the pattern // scanOne tries each rule at the current position and returns the first match.
// matches. // Returns the token (or nil if skipped) and the number of bytes consumed.
// The token's value is the matched text. // Returns 0 if no rule matched.
func (s *Scanner[T]) On(pattern string, typ T) *Scanner[T] { func (s *Scanner[T]) scanOne(input string, pos int) (*Token[T], int) {
s.rules = append(s.rules, rule[T]{ for _, r := range s.rules {
pattern: compileAnchored(pattern), loc := r.pattern.FindStringIndex(input[pos:])
typ: typ, if loc == nil || loc[1] == 0 {
}) continue
return s }
}
// Skip registers a rule that consumes matching text without emitting a token. if r.skip {
// This is used for whitespace and comments. return nil, loc[1]
func (s *Scanner[T]) Skip(pattern string) *Scanner[T] { }
s.rules = append(s.rules, rule[T]{
pattern: compileAnchored(pattern), return &Token[T]{
skip: true, Type: r.typ,
}) Value: input[pos : pos+loc[1]],
return s Column: pos,
}, loc[1]
}
return nil, 0
} }
// Scan tokenizes the input string using the registered rules. // Scan tokenizes the input string using the registered rules.
// At each position, all rules are tested and the longest match wins. // At each position, rules are tried in precedence order and the first match
// wins.
// If no rule matches, an error is recorded and the scanner advances one byte. // If no rule matches, an error is recorded and the scanner advances one byte.
func (s *Scanner[T]) Scan(input string) ([]Token[T], error) { func (s *Scanner[T]) Scan(input string) ([]Token[T], error) {
tokens := []Token[T]{} tokens := []Token[T]{}
errorList := []error{} errorList := []error{}
for pos := 0; pos < len(input); { for pos := 0; pos < len(input); {
bestLen := 0 tok, n := s.scanOne(input, pos)
bestRule := -1
for idx, r := range s.rules { if n == 0 {
loc := r.pattern.FindStringIndex(input[pos:])
if loc != nil && loc[1] > bestLen {
bestLen = loc[1]
bestRule = idx
}
}
if bestRule == -1 || bestLen == 0 {
errorList = append(errorList, fmt.Errorf("unknown character '%v'", string(input[pos]))) errorList = append(errorList, fmt.Errorf("unknown character '%v'", string(input[pos])))
pos++ pos++
continue continue
} }
if r := s.rules[bestRule]; !r.skip { if tok != nil {
tokens = append(tokens, Token[T]{ tokens = append(tokens, *tok)
Type: r.typ,
Value: input[pos : pos+bestLen],
Column: pos,
})
} }
pos += bestLen pos += n
} }
return tokens, errors.Join(errorList...) return tokens, errors.Join(errorList...)