summaryrefslogtreecommitdiff
path: root/vendor/codeberg.org/gruf/go-structr/index.go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/codeberg.org/gruf/go-structr/index.go')
-rw-r--r--vendor/codeberg.org/gruf/go-structr/index.go428
1 files changed, 226 insertions, 202 deletions
diff --git a/vendor/codeberg.org/gruf/go-structr/index.go b/vendor/codeberg.org/gruf/go-structr/index.go
index e2768b3ca..c7115e0b4 100644
--- a/vendor/codeberg.org/gruf/go-structr/index.go
+++ b/vendor/codeberg.org/gruf/go-structr/index.go
@@ -6,7 +6,7 @@ import (
"sync"
"unsafe"
- "github.com/zeebo/xxh3"
+ "codeberg.org/gruf/go-byteutil"
)
// IndexConfig defines config variables
@@ -55,7 +55,12 @@ type IndexConfig struct {
// case that you would like to manually provide the used
// index via the Cache.___By() series of functions, or
// access the underlying index key generator.
-type Index[StructType any] struct {
+type Index struct {
+
+ // ptr is a pointer to
+ // the source Cache/Queue
+ // index is attached to.
+ ptr unsafe.Pointer
// name is the actual name of this
// index, which is the unparsed
@@ -67,11 +72,11 @@ type Index[StructType any] struct {
// index_entry{} which also contains the exact
// key each result is stored under. the hash map
// only keys by the xxh3 hash checksum for speed.
- data map[Hash]*list //[*index_entry[StructType]]
+ data map[string]*list // [*index_entry]
// struct fields encompassed by
// keys (+ hashes) of this index.
- fields []structfield
+ fields []struct_field
// index flags:
// - 1 << 0 = unique
@@ -79,55 +84,68 @@ type Index[StructType any] struct {
flags uint8
}
-// Key returns the configured fields as key, and hash sum of key.
-func (i *Index[T]) Key(value T) ([]any, Hash, bool) {
- h := get_hasher()
- key, sum, ok := index_key(i, h, value)
- hash_pool.Put(h)
- return key, sum, ok
+// Name returns the receiving Index name.
+func (i *Index) Name() string {
+ return i.name
}
-func is_unique(f uint8) bool {
- const mask = uint8(1) << 0
- return f&mask != 0
+// Key generates Key{} from given parts for
+// the type of lookup this Index uses in cache.
+// NOTE: panics on incorrect no. parts / types given.
+func (i *Index) Key(parts ...any) Key {
+ buf := new_buffer()
+ key := i.key(buf, parts)
+ free_buffer(buf)
+ return key
}
-func set_is_unique(f *uint8) {
- const mask = uint8(1) << 0
- (*f) |= mask
-}
-
-func allow_zero(f uint8) bool {
- const mask = uint8(1) << 1
- return f&mask != 0
+// Keys generates []Key{} from given (multiple) parts
+// for the type of lookup this Index uses in the cache.
+// NOTE: panics on incorrect no. parts / types given.
+func (i *Index) Keys(parts ...[]any) []Key {
+ keys := make([]Key, 0, len(parts))
+ buf := new_buffer()
+ for _, parts := range parts {
+ key := i.key(buf, parts)
+ if key.Zero() {
+ continue
+ }
+ keys = append(keys, key)
+ }
+ free_buffer(buf)
+ return keys
}
-func set_allow_zero(f *uint8) {
- const mask = uint8(1) << 1
- (*f) |= mask
-}
+// init will initialize the cache with given type, config and capacity.
+func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) {
+ switch {
+ // The only 2 types we support are
+ // structs, and ptrs to a struct.
+ case t.Kind() == reflect.Struct:
+ case t.Kind() == reflect.Pointer &&
+ t.Elem().Kind() == reflect.Struct:
+ t = t.Elem()
+ default:
+ panic("index only support struct{} and *struct{}")
+ }
-func init_index[T any](i *Index[T], config IndexConfig, max int) {
// Set name from the raw
// struct fields string.
- i.name = config.Fields
+ i.name = cfg.Fields
// Set struct flags.
- if config.AllowZero {
+ if cfg.AllowZero {
set_allow_zero(&i.flags)
}
- if !config.Multiple {
+ if !cfg.Multiple {
set_is_unique(&i.flags)
}
- // Split to get the containing struct fields.
- fields := strings.Split(config.Fields, ",")
+ // Split to get containing struct fields.
+ fields := strings.Split(cfg.Fields, ",")
// Preallocate expected struct field slice.
- i.fields = make([]structfield, len(fields))
-
- // Get the reflected struct ptr type.
- t := reflect.TypeOf((*T)(nil)).Elem()
+ i.fields = make([]struct_field, len(fields))
for x, fieldName := range fields {
// Split name to account for nesting.
@@ -138,255 +156,261 @@ func init_index[T any](i *Index[T], config IndexConfig, max int) {
}
// Initialize index_entry list store.
- i.data = make(map[Hash]*list, max+1)
+ i.data = make(map[string]*list, cap+1)
}
-func index_key[T any](i *Index[T], h *xxh3.Hasher, value T) ([]any, Hash, bool) {
- key := extract_fields(value, i.fields)
- sum, zero := hash_sum(i.fields, h, key)
- if zero && !allow_zero(i.flags) {
- var zero Hash
- return nil, zero, false
- }
- return key, sum, true
-}
-
-func index_hash[T any](i *Index[T], h *xxh3.Hasher, key []any) (Hash, bool) {
- sum, zero := hash_sum(i.fields, h, key)
- if zero && !allow_zero(i.flags) {
- var zero Hash
- return zero, false
- }
- return sum, true
-}
-
-func index_get[T any](i *Index[T], hash Hash, key []any) *list {
- l := i.data[hash]
+// get_one will fetch one indexed item under key.
+func (i *Index) get_one(key Key) *indexed_item {
+ // Get list at hash.
+ l := i.data[key.key]
if l == nil {
return nil
}
+
+ // Extract entry from first list elem.
entry := (*index_entry)(l.head.data)
- if !is_equal(entry.key, key) {
- return l
+
+ // Check contains expected key.
+ if !entry.key.Equal(key) {
+ return nil
}
- return l
+
+ return entry.item
}
-func index_append[T any](c *Cache[T], i *Index[T], hash Hash, key []any, res *result) {
- // Get list at key.
- l := i.data[hash]
+// get will fetch all indexed items under key, passing each to hook.
+func (i *Index) get(key Key, hook func(*indexed_item)) {
+ if hook == nil {
+ panic("nil hook")
+ }
+ // Get list at hash.
+ l := i.data[key.key]
if l == nil {
+ return
+ }
- // Allocate new list.
- l = list_acquire()
- i.data[hash] = l
-
- } else if entry := (*index_entry)(l.head.data); //nocollapse
- !is_equal(entry.key, key) {
-
- // Collision! Drop all.
- delete(i.data, hash)
+ // Extract entry from first list elem.
+ entry := (*index_entry)(l.head.data)
- // Iterate entries in list.
- for x := 0; x < l.len; x++ {
+ // Check contains expected key.
+ if !entry.key.Equal(key) {
+ return
+ }
- // Pop current head.
- list_remove(l, l.head)
+ // Iterate all entries in list.
+ l.rangefn(func(elem *list_elem) {
- // Extract result.
- res := entry.result
+ // Extract element entry + item.
+ entry := (*index_entry)(elem.data)
+ item := entry.item
- // Drop index entry from res.
- result_drop_index(res, i)
- if len(res.indexed) == 0 {
+ // Pass to hook.
+ hook(item)
+ })
+}
- // Old res now unused,
- // release to mem pool.
- result_release(c, res)
+// key uses hasher to generate Key{} from given raw parts.
+func (i *Index) key(buf *byteutil.Buffer, parts []any) Key {
+ if len(parts) != len(i.fields) {
+ panicf("incorrect number key parts: want=%d received=%d",
+ len(parts),
+ len(i.fields),
+ )
+ }
+ buf.B = buf.B[:0]
+ if !allow_zero(i.flags) {
+ for x := range parts {
+ before := len(buf.B)
+ buf.B = i.fields[x].mangle(buf.B, parts[x])
+ if string(buf.B[before:]) == i.fields[x].zero {
+ return Key{}
}
+ buf.B = append(buf.B, '.')
+ }
+ } else {
+ for x := range parts {
+ buf.B = i.fields[x].mangle(buf.B, parts[x])
+ buf.B = append(buf.B, '.')
}
+ }
+ return Key{
+ raw: parts,
+ key: string(buf.B),
+ }
+}
- return
+// append will append the given index entry to appropriate
+// doubly-linked-list in index hashmap. this handles case
+// of key collisions and overwriting 'unique' entries.
+func (i *Index) append(key Key, item *indexed_item) {
+ // Look for existing.
+ l := i.data[key.key]
- } else if is_unique(i.flags) {
+ if l == nil {
- // Remove current
- // indexed entry.
- list_remove(l, l.head)
+ // Allocate new.
+ l = new_list()
+ i.data[key.key] = l
- // Get ptr to old
- // entry before we
- // release to pool.
- res := entry.result
+ } else if is_unique(i.flags) {
- // Drop this index's key from
- // old res now not indexed here.
- result_drop_index(res, i)
- if len(res.indexed) == 0 {
+ // Remove head.
+ elem := l.head
+ l.remove(elem)
- // Old res now unused,
- // release to mem pool.
- result_release(c, res)
- }
+ // Drop index from inner item.
+ e := (*index_entry)(elem.data)
+ e.item.drop_index(e)
+
+ // Free unused entry.
+ free_index_entry(e)
}
- // Acquire + setup index entry.
- entry := index_entry_acquire()
- entry.index = unsafe.Pointer(i)
- entry.result = res
+ // Prepare new index entry.
+ entry := new_index_entry()
+ entry.item = item
entry.key = key
- entry.hash = hash
+ entry.index = i
- // Append to result's indexed entries.
- res.indexed = append(res.indexed, entry)
+ // Add ourselves to item's index tracker.
+ item.indexed = append(item.indexed, entry)
- // Add index entry to index list.
- list_push_front(l, &entry.elem)
+ // Add entry to index list.
+ l.push_front(&entry.elem)
}
-func index_delete[T any](c *Cache[T], i *Index[T], hash Hash, key []any, fn func(*result)) {
- if fn == nil {
- panic("nil fn")
+// delete will remove all indexed items under key, passing each to hook.
+func (i *Index) delete(key Key, hook func(*indexed_item)) {
+ if hook == nil {
+ panic("nil hook")
}
// Get list at hash.
- l := i.data[hash]
+ l := i.data[key.key]
if l == nil {
return
}
+ // Extract entry from first list elem.
entry := (*index_entry)(l.head.data)
- // Check contains expected key for hash.
- if !is_equal(entry.key, key) {
+ // Check contains expected key.
+ if !entry.key.Equal(key) {
return
}
// Delete data at hash.
- delete(i.data, hash)
+ delete(i.data, key.key)
// Iterate entries in list.
for x := 0; x < l.len; x++ {
- // Pop current head.
- entry := (*index_entry)(l.head.data)
- list_remove(l, l.head)
+ // Pop list head.
+ elem := l.head
+ l.remove(elem)
- // Extract result.
- res := entry.result
+ // Extract element entry + item.
+ entry := (*index_entry)(elem.data)
+ item := entry.item
- // Call hook.
- fn(res)
+ // Drop index from item.
+ item.drop_index(entry)
- // Drop index entry from res.
- result_drop_index(res, i)
+ // Free now-unused entry.
+ free_index_entry(entry)
+
+ // Pass to hook.
+ hook(item)
}
- // Release to pool.
- list_release(l)
+ // Release list.
+ free_list(l)
}
-func index_delete_entry[T any](c *Cache[T], entry *index_entry) {
- // Get from entry.
- i := (*Index[T])(entry.index)
-
+// delete_entry deletes the given index entry.
+func (i *Index) delete_entry(entry *index_entry) {
// Get list at hash sum.
- l := i.data[entry.hash]
+ l := i.data[entry.key.key]
if l == nil {
return
}
- // Remove entry from list.
- list_remove(l, &entry.elem)
- if l.len == 0 {
+ // Remove list entry.
+ l.remove(&entry.elem)
- // Remove list from map.
- delete(i.data, entry.hash)
+ if l.len == 0 {
+ // Remove entry list from map.
+ delete(i.data, entry.key.key)
- // Release to pool.
- list_release(l)
+ // Release list.
+ free_list(l)
}
- // Extract result.
- res := entry.result
-
- // Drop index entry from res.
- result_drop_index(res, i)
+ // Drop this index from item.
+ entry.item.drop_index(entry)
}
-var entry_pool sync.Pool
-
+// index_entry represents a single entry
+// in an Index{}, where it will be accessible
+// by Key{} pointing to a containing list{}.
type index_entry struct {
- // elem contains the list element
- // appended to each per-hash list
- // within the Index{} type. the
- // contained value is a self-ref.
+
+ // list elem that entry is stored
+ // within, under containing index.
+ // elem.data is ptr to index_entry.
elem list_elem
- // index is the Index{} this
- // index_entry{} is stored in.
- index unsafe.Pointer
-
- // result is the actual
- // underlying result stored
- // within the index. this
- // also contains a ref to
- // this *index_entry in order
- // to track indices each result
- // is currently stored under.
- result *result
-
- // key contains the actual
- // key this item was stored
- // under, used for collision
- // check.
- key []any
-
- // hash contains computed
- // hash checksum of .key.
- hash Hash
+ // hash checksum
+ // + raw key data
+ key Key
+
+ // index this is stored in.
+ index *Index
+
+ // underlying indexed item.
+ item *indexed_item
}
-func index_entry_acquire() *index_entry {
- // Acquire from pool.
- v := entry_pool.Get()
+var index_entry_pool sync.Pool
+
+// new_index_entry returns a new prepared index_entry.
+func new_index_entry() *index_entry {
+ v := index_entry_pool.Get()
if v == nil {
v = new(index_entry)
}
-
- // Cast index_entry value.
entry := v.(*index_entry)
-
- // Set index list elem entry on itself.
- entry.elem.data = unsafe.Pointer(entry)
-
+ ptr := unsafe.Pointer(entry)
+ entry.elem.data = ptr
return entry
}
-func index_entry_release(entry *index_entry) {
- var zero Hash
-
- // Reset index entry.
+// free_index_entry releases the index_entry.
+func free_index_entry(entry *index_entry) {
entry.elem.data = nil
+ entry.key = Key{}
entry.index = nil
- entry.result = nil
- entry.key = nil
- entry.hash = zero
+ entry.item = nil
+ index_entry_pool.Put(entry)
+}
- // Release to pool.
- entry_pool.Put(entry)
+func is_unique(f uint8) bool {
+ const mask = uint8(1) << 0
+ return f&mask != 0
}
-// is_equal returns whether 2 key slices are equal.
-func is_equal(k1, k2 []any) bool {
- if len(k1) != len(k2) {
- return false
- }
- for i := range k1 {
- if k1[i] != k2[i] {
- return false
- }
- }
- return true
+func set_is_unique(f *uint8) {
+ const mask = uint8(1) << 0
+ (*f) |= mask
+}
+
+func allow_zero(f uint8) bool {
+ const mask = uint8(1) << 1
+ return f&mask != 0
+}
+
+func set_allow_zero(f *uint8) {
+ const mask = uint8(1) << 1
+ (*f) |= mask
}