refactor: extract shared token package (#46)
## Description Both the `saccharine` and `lambda` packages need tokenizing and parsing primitives. This PR extracts shared token infrastructure into a new `pkg/token` package, then wires both languages up to use it. - Add `pkg/token` with a generic `Token[T]` type, `Scan`, `ScanAtom`, `ScanRune`, `ScanCharacter`, `IsVariable`, `ParseRawToken`, and `ParseList`. - Refactor `pkg/saccharine` to delegate to `pkg/token`, removing duplicated scanning and parsing helpers. - Implement `Codec.Decode` for `pkg/lambda` (scanner + parser) using the shared token package. - Add `iterator.While` for predicate-driven iteration. - Rename `iterator.Do` to `iterator.Try` to better describe its rollback semantics. ### Decisions - The `Type` constraint (`comparable` + `Name() string`) keeps the generic token flexible while ensuring every token type can produce readable error messages. - `iterator.Do` was renamed to `iterator.Try` since it describes a try/rollback operation, not a side-effecting "do". ## Benefits - Eliminates duplicated token, scanning, and parsing code between languages. - Enables the `lambda` package to decode (parse) lambda calculus strings, which was previously unimplemented. - Makes it straightforward to add new languages by reusing `pkg/token` primitives. ## Checklist - [x] Code follows conventional commit format. - [x] Branch follows naming convention (`<type>/<description>`). Always use underscores. - [x] Tests pass (if applicable). - [ ] Documentation updated (if applicable). Reviewed-on: #46 Co-authored-by: M.V. Hutz <git@maximhutz.me> Co-committed-by: M.V. Hutz <git@maximhutz.me>
This commit was merged in pull request #46.
This commit is contained in:
41
pkg/token/parse.go
Normal file
41
pkg/token/parse.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package token
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"git.maximhutz.com/max/lambda/pkg/iterator"
|
||||
)
|
||||
|
||||
// ParseRawToken consumes the next token from the iterator if its type matches
|
||||
// the expected type.
|
||||
// Uses [iterator.Try] for automatic backtracking on failure.
|
||||
func ParseRawToken[T Type](i *iterator.Iterator[Token[T]], expected T) (*Token[T], error) {
|
||||
return iterator.Try(i, func(i *iterator.Iterator[Token[T]]) (*Token[T], error) {
|
||||
if tok, err := i.Next(); err != nil {
|
||||
return nil, err
|
||||
} else if tok.Type != expected {
|
||||
return nil, fmt.Errorf("expected token %v, got %v'", expected.Name(), tok.Value)
|
||||
} else {
|
||||
return &tok, nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// ParseList repeatedly applies a parse function, collecting results into a
|
||||
// slice.
|
||||
// Stops when the parse function returns an error.
|
||||
// Returns an error if fewer than minimum results are collected.
|
||||
func ParseList[T Type, U any](i *iterator.Iterator[Token[T]], fn func(*iterator.Iterator[Token[T]]) (U, error), minimum int) ([]U, error) {
|
||||
results := []U{}
|
||||
|
||||
for {
|
||||
if u, err := fn(i); err != nil {
|
||||
if len(results) < minimum {
|
||||
return nil, fmt.Errorf("expected at least '%v' items, got only '%v': %w", minimum, len(results), err)
|
||||
}
|
||||
return results, nil
|
||||
} else {
|
||||
results = append(results, u)
|
||||
}
|
||||
}
|
||||
}
|
||||
74
pkg/token/scan.go
Normal file
74
pkg/token/scan.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package token
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unicode"
|
||||
|
||||
"git.maximhutz.com/max/lambda/pkg/iterator"
|
||||
)
|
||||
|
||||
// IsVariable determines whether a rune can be a valid variable character.
|
||||
func IsVariable(r rune) bool {
|
||||
return unicode.IsLetter(r) || unicode.IsNumber(r) || r == '_'
|
||||
}
|
||||
|
||||
// ScanRune consumes the next rune from the iterator if it satisfies the
|
||||
// predicate.
|
||||
// Returns an error if the iterator is exhausted or the rune does not match.
|
||||
func ScanRune(i *iterator.Iterator[rune], expected func(rune) bool) (rune, error) {
|
||||
return iterator.Try(i, func(i *iterator.Iterator[rune]) (rune, error) {
|
||||
if r, err := i.Next(); err != nil {
|
||||
return r, err
|
||||
} else if !expected(r) {
|
||||
return r, fmt.Errorf("got unexpected rune %v'", r)
|
||||
} else {
|
||||
return r, nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// ScanCharacter consumes the next rune from the iterator if it matches the
|
||||
// expected rune exactly.
|
||||
// Returns an error if the iterator is exhausted or the rune does not match.
|
||||
func ScanCharacter(i *iterator.Iterator[rune], expected rune) (rune, error) {
|
||||
return ScanRune(i, func(r rune) bool { return r == expected })
|
||||
}
|
||||
|
||||
// ScanAtom scans a contiguous sequence of variable characters into a single
|
||||
// atom token.
|
||||
// The first rune has already been consumed and is passed in.
|
||||
func ScanAtom[T Type](i *iterator.Iterator[rune], first rune, typ T, column int) *Token[T] {
|
||||
atom := []rune{first}
|
||||
|
||||
for {
|
||||
if r, err := ScanRune(i, IsVariable); err != nil {
|
||||
break
|
||||
} else {
|
||||
atom = append(atom, r)
|
||||
}
|
||||
}
|
||||
|
||||
return NewAtom(typ, string(atom), column)
|
||||
}
|
||||
|
||||
// Scan tokenizes an input string using a language-specific scanToken function.
|
||||
// The scanToken function is called repeatedly until the input is exhausted.
|
||||
// It returns nil (no token, no error) for skippable input like whitespace.
|
||||
// Errors are accumulated and returned joined at the end.
|
||||
func Scan[T Type](input string, scanToken func(*iterator.Iterator[rune]) (*Token[T], error)) ([]Token[T], error) {
|
||||
i := iterator.Of([]rune(input))
|
||||
tokens := []Token[T]{}
|
||||
errorList := []error{}
|
||||
|
||||
for !i.Done() {
|
||||
token, err := scanToken(i)
|
||||
if err != nil {
|
||||
errorList = append(errorList, err)
|
||||
} else if token != nil {
|
||||
tokens = append(tokens, *token)
|
||||
}
|
||||
}
|
||||
|
||||
return tokens, errors.Join(errorList...)
|
||||
}
|
||||
36
pkg/token/token.go
Normal file
36
pkg/token/token.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// Package token provides generic token types and scanning/parsing primitives
|
||||
// for building language-specific lexers and parsers.
|
||||
package token
|
||||
|
||||
// A Type is a constraint for language-specific token type enums.
|
||||
// It must be comparable (for equality checks) and must have a Name method
|
||||
// that returns a human-readable string for error messages.
|
||||
type Type interface {
|
||||
comparable
|
||||
// Name returns a human-readable name for this token type.
|
||||
Name() string
|
||||
}
|
||||
|
||||
// A Token is a lexical unit in a source language.
|
||||
type Token[T Type] struct {
|
||||
Column int // Where the token begins in the source text.
|
||||
Type T // What type the token is.
|
||||
Value string // The value of the token.
|
||||
}
|
||||
|
||||
// New creates a Token of the given type at the given column.
|
||||
// The token's value is derived from its type's Name method.
|
||||
func New[T Type](typ T, column int) *Token[T] {
|
||||
return &Token[T]{Type: typ, Column: column, Value: typ.Name()}
|
||||
}
|
||||
|
||||
// NewAtom creates a Token of the given type with a custom value at the given
|
||||
// column.
|
||||
func NewAtom[T Type](typ T, name string, column int) *Token[T] {
|
||||
return &Token[T]{Type: typ, Column: column, Value: name}
|
||||
}
|
||||
|
||||
// Name returns the type of the Token, as a string.
|
||||
func (t Token[T]) Name() string {
|
||||
return t.Type.Name()
|
||||
}
|
||||
Reference in New Issue
Block a user