package cuckoo import ( "fmt" "iter" "math/bits" "strings" ) // A Table is hash table that uses cuckoo hashing to resolve collision. Create // one with [NewTable]. Or if you want more granularity, use [NewTableBy] or // [NewCustomTable]. type Table[K, V any] struct { bucketA, bucketB bucket[K, V] growthFactor uint64 minLoadFactor float64 } // TotalCapacity returns the number of slots allocated for the [Table]. To get the // number of slots filled, look at [Table.Size]. func (t Table[K, V]) TotalCapacity() uint64 { return t.bucketA.capacity + t.bucketB.capacity } // Size returns how many slots are filled in the [Table]. func (t Table[K, V]) Size() int { return int(t.bucketA.size + t.bucketB.size) } func log2(n uint64) (m int) { return max(0, bits.Len64(n)-1) } func (t Table[K, V]) maxEvictions() int { return 3 * log2(t.TotalCapacity()) } func (t Table[K, V]) load() float64 { // When there are no slots in the table, we still treat the load as 100%. // Every slot in the table is full. if t.TotalCapacity() == 0 { return 1.0 } return float64(t.Size()) / float64(t.TotalCapacity()) } // insert attempts to put/update an entry in the table, without modifying the // size of the table. Returns a displaced entry and 'homeless = true' if an // entry could not be placed after exhausting evictions. func (t *Table[K, V]) insert(entry Entry[K, V]) (displaced Entry[K, V], homeless bool) { if t.bucketA.update(entry.Key, entry.Value) { return } if t.bucketB.update(entry.Key, entry.Value) { return } for range t.maxEvictions() { if entry, homeless = t.bucketA.insert(entry); !homeless { return } if entry, homeless = t.bucketB.insert(entry); !homeless { return } } return entry, true } // resized creates an empty copy of the table, with a new capacity for each // bucket. func (t Table[K, V]) resized(capacity uint64) Table[K, V] { return Table[K, V]{ growthFactor: t.growthFactor, minLoadFactor: t.minLoadFactor, bucketA: t.bucketA.resized(capacity), bucketB: t.bucketB.resized(capacity), } } // resize creates a new [Table.resized] with 'capacity', inserts all items into // the array, and replaces the current table. It is a helper function for // [Table.grow] and [Table.shrink]; use them instead. func (t *Table[K, V]) resize(capacity uint64) bool { updated := t.resized(capacity) for k, v := range t.Entries() { if _, failed := updated.insert(Entry[K, V]{k, v}); failed { return false } } *t = updated return true } // grow increases the table's capacity by the [Table.growthFactor]. If the // capacity is 0, it increases it to 1. func (t *Table[K, V]) grow() bool { var newCapacity uint64 if t.TotalCapacity() == 0 { newCapacity = 1 } else { newCapacity = t.bucketA.capacity * t.growthFactor } return t.resize(newCapacity) } // shrink reduces the table's capacity by the [Table.growthFactor]. It may // reduce it down to 0. func (t *Table[K, V]) shrink() bool { return t.resize(t.bucketA.capacity / t.growthFactor) } // Get fetches the value for a key in the [Table]. Returns an error if no value // is found. func (t Table[K, V]) Get(key K) (value V, err error) { if item, ok := t.bucketA.get(key); ok { return item, nil } if item, ok := t.bucketB.get(key); ok { return item, nil } return value, fmt.Errorf("key '%v' not found", key) } // Has returns true if a key has a value in the table. func (t Table[K, V]) Has(key K) (exists bool) { _, err := t.Get(key) return err == nil } // Put sets the value for a key. If it cannot be set, an error is returned, // along with the last displaced entry. // // On failure, the returned entry and the current table contents together // preserve all previously inserted entries and the attempted entry. func (t *Table[K, V]) Put(key K, value V) (displaced Entry[K, V], err error) { var ( entry = Entry[K, V]{key, value} homeless bool ) for range defaultGrowthLimit { if entry, homeless = t.insert(entry); !homeless { return } // Both this and the growth limit are necessary: this catches bad hashes // early when the table is sparse, while the latter catches cases where // growing never helps. if t.load() < t.minLoadFactor { return entry, fmt.Errorf("bad hash: resize on load %d/%d", t.Size(), t.TotalCapacity()) } // It is theoretically possible to have a table with a larger capacity // that is valid. But this chance is astronomically small, so we ignore // it in this implementation. if grew := t.grow(); !grew { return entry, fmt.Errorf("bad hash: could not redistribute entries into larger table") } } return entry, fmt.Errorf("bad hash: could not place entry after %d resizes", defaultGrowthLimit) } // Drop removes a value for a key in the table. Returns whether the key had // existed. func (t *Table[K, V]) Drop(key K) bool { occupied := t.bucketA.drop(key) || t.bucketB.drop(key) if t.load() < t.minLoadFactor { // The error is not handled here, because table-shrinking is an internal // optimization. t.shrink() } return occupied } // Entries returns an unordered sequence of all key-value pairs in the table. func (t Table[K, V]) Entries() iter.Seq2[K, V] { return func(yield func(K, V) bool) { for _, slot := range t.bucketA.slots { if slot.occupied { if !yield(slot.Key, slot.Value) { return } } } for _, slot := range t.bucketB.slots { if slot.occupied { if !yield(slot.Key, slot.Value) { return } } } } } // String returns the entries of the table as a string in the format: // "table[k1:v1 h2:v2 ...]". func (t Table[K, V]) String() string { var sb strings.Builder sb.WriteString("table[") first := true for k, v := range t.Entries() { if !first { sb.WriteString(" ") } fmt.Fprintf(&sb, "%v:%v", k, v) first = false } sb.WriteString("]") return sb.String() } // NewCustomTable creates a [Table] with custom [Hash] and [EqualFunc] // functions, along with any [Option] the user provides. func NewCustomTable[K, V any](hashA, hashB Hash[K], compare EqualFunc[K], options ...Option) *Table[K, V] { settings := &settings{ growthFactor: DefaultGrowthFactor, bucketSize: DefaultCapacity, minLoadFactor: defaultMinimumLoad, } for _, option := range options { option(settings) } return &Table[K, V]{ growthFactor: settings.growthFactor, minLoadFactor: settings.minLoadFactor, bucketA: newBucket[K, V](settings.bucketSize, hashA, compare), bucketB: newBucket[K, V](settings.bucketSize, hashB, compare), } } func pipe[X, Y, Z any](a func(X) Y, b func(Y) Z) func(X) Z { return func(x X) Z { return b(a(x)) } } // NewTableBy creates a [Table] for any key type by using keyFunc to derive a // comparable key. Two keys with the same derived key are treated as equal. func NewTableBy[K, V any, C comparable](keyFunc func(K) C, options ...Option) *Table[K, V] { return NewCustomTable[K, V]( pipe(keyFunc, NewDefaultHash[C]()), pipe(keyFunc, NewDefaultHash[C]()), func(a, b K) bool { return keyFunc(a) == keyFunc(b) }, options..., ) } // NewTable creates a [Table] using the default [Hash] and [EqualFunc]. Use // the [Option] functions to configure its behavior. Note that this constructor // is only provided for comparable keys. For arbitrary keys, consider // [NewTableBy] or [NewCustomTable]. func NewTable[K comparable, V any](options ...Option) *Table[K, V] { return NewCustomTable[K, V](NewDefaultHash[K](), NewDefaultHash[K](), DefaultEqualFunc[K], options...) }