summaryrefslogtreecommitdiff
path: root/vendor/codeberg.org/gruf/go-structr
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/codeberg.org/gruf/go-structr')
-rw-r--r--vendor/codeberg.org/gruf/go-structr/LICENSE9
-rw-r--r--vendor/codeberg.org/gruf/go-structr/README.md11
-rw-r--r--vendor/codeberg.org/gruf/go-structr/cache.go724
-rw-r--r--vendor/codeberg.org/gruf/go-structr/index.go419
-rw-r--r--vendor/codeberg.org/gruf/go-structr/item.go65
-rw-r--r--vendor/codeberg.org/gruf/go-structr/key.go85
-rw-r--r--vendor/codeberg.org/gruf/go-structr/list.go185
-rw-r--r--vendor/codeberg.org/gruf/go-structr/map.go62
-rw-r--r--vendor/codeberg.org/gruf/go-structr/queue.go346
-rw-r--r--vendor/codeberg.org/gruf/go-structr/queue_ctx.go152
-rw-r--r--vendor/codeberg.org/gruf/go-structr/runtime.go294
-rw-r--r--vendor/codeberg.org/gruf/go-structr/timeline.go1197
12 files changed, 0 insertions, 3549 deletions
diff --git a/vendor/codeberg.org/gruf/go-structr/LICENSE b/vendor/codeberg.org/gruf/go-structr/LICENSE
deleted file mode 100644
index d6f08d0ab..000000000
--- a/vendor/codeberg.org/gruf/go-structr/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-MIT License
-
-Copyright (c) gruf
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/codeberg.org/gruf/go-structr/README.md b/vendor/codeberg.org/gruf/go-structr/README.md
deleted file mode 100644
index 2ea88faef..000000000
--- a/vendor/codeberg.org/gruf/go-structr/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# go-structr
-
-A library with a series of performant data types with automated struct value indexing. Indexing is supported via arbitrary combinations of fields, and in the case of the cache type, negative results (errors!) are also supported.
-
-Under the hood, go-structr maintains a hashmap per index, where each hashmap is keyed by serialized input key. This is handled by the incredibly performant serialization library [go-mangler/v2](https://codeberg.org/gruf/go-mangler), which at this point in time supports all concrete types, so feel free to index by by *almost* anything!
-
-See the [docs](https://pkg.go.dev/codeberg.org/gruf/go-structr) for more API information.
-
-## Notes
-
-This is a core underpinning of [GoToSocial](https://github.com/superseriousbusiness/gotosocial)'s performance. \ No newline at end of file
diff --git a/vendor/codeberg.org/gruf/go-structr/cache.go b/vendor/codeberg.org/gruf/go-structr/cache.go
deleted file mode 100644
index 6ae6ff3ed..000000000
--- a/vendor/codeberg.org/gruf/go-structr/cache.go
+++ /dev/null
@@ -1,724 +0,0 @@
-package structr
-
-import (
- "context"
- "errors"
- "sync"
- "unsafe"
-)
-
-// DefaultIgnoreErr is the default function used to
-// ignore (i.e. not cache) incoming error results during
-// Load() calls. By default ignores context pkg errors.
-func DefaultIgnoreErr(err error) bool {
- return errors.Is(err, context.Canceled) ||
- errors.Is(err, context.DeadlineExceeded)
-}
-
-// CacheConfig defines config vars
-// for initializing a Cache{} type.
-type CacheConfig[StructType any] struct {
-
- // IgnoreErr defines which errors to
- // ignore (i.e. not cache) returned
- // from load function callback calls.
- // This may be left as nil, on which
- // DefaultIgnoreErr will be used.
- IgnoreErr func(error) bool
-
- // Copy provides a means of copying
- // cached values, to ensure returned values
- // do not share memory with those in cache.
- Copy func(StructType) StructType
-
- // Invalidate is called when cache values
- // (NOT errors) are invalidated, either
- // as the values passed to Put() / Store(),
- // or by the keys by calls to Invalidate().
- Invalidate func(StructType)
-
- // Indices defines indices to create
- // in the Cache for the receiving
- // generic struct type parameter.
- Indices []IndexConfig
-
- // MaxSize defines the maximum number
- // of items allowed in the Cache at
- // one time, before old items start
- // getting evicted.
- MaxSize int
-}
-
-// Cache provides a structure cache with automated
-// indexing and lookups by any initialization-defined
-// combination of fields. This also supports caching
-// of negative results (errors!) returned by LoadOne().
-type Cache[StructType any] struct {
-
- // hook functions.
- ignore func(error) bool
- copy func(StructType) StructType
- invalid func(StructType)
-
- // keeps track of all indexed items,
- // in order of last recently used (LRU).
- lru list
-
- // indices used in storing passed struct
- // types by user defined sets of fields.
- indices []Index
-
- // max cache size, imposes size
- // limit on the lru list in order
- // to evict old entries.
- maxSize int
-
- // protective mutex, guards:
- // - Cache{}.*
- // - Index{}.data
- mutex sync.Mutex
-}
-
-// Init initializes the cache with given configuration
-// including struct fields to index, and necessary fns.
-func (c *Cache[T]) Init(config CacheConfig[T]) {
- t := get_type_iter[T]()
-
- if len(config.Indices) == 0 {
- panic("no indices provided")
- }
-
- if config.IgnoreErr == nil {
- config.IgnoreErr = DefaultIgnoreErr
- }
-
- if config.Copy == nil {
- panic("copy function must be provided")
- }
-
- if config.MaxSize < 2 {
- panic("minimum cache size is 2 for LRU to work")
- }
-
- // Safely copy over
- // provided config.
- c.mutex.Lock()
- defer c.mutex.Unlock()
- c.indices = make([]Index, len(config.Indices))
- for i, cfg := range config.Indices {
- c.indices[i].ptr = unsafe.Pointer(c)
- c.indices[i].init(t, cfg, config.MaxSize)
- }
- c.ignore = config.IgnoreErr
- c.copy = config.Copy
- c.invalid = config.Invalidate
- c.maxSize = config.MaxSize
-}
-
-// Index selects index with given name from cache, else panics.
-func (c *Cache[T]) Index(name string) *Index {
- for i, idx := range c.indices {
- if idx.name == name {
- return &(c.indices[i])
- }
- }
- panic("unknown index: " + name)
-}
-
-// GetOne fetches value from cache stored under index, using precalculated index key.
-func (c *Cache[T]) GetOne(index *Index, key Key) (T, bool) {
- values := c.Get(index, key)
- if len(values) == 0 {
- var zero T
- return zero, false
- }
- return values[0], true
-}
-
-// Get fetches values from the cache stored under index, using precalculated index keys.
-func (c *Cache[T]) Get(index *Index, keys ...Key) []T {
- if index == nil {
- panic("no index given")
- } else if index.ptr != unsafe.Pointer(c) {
- panic("invalid index for cache")
- }
-
- // Preallocate expected ret slice.
- values := make([]T, 0, len(keys))
-
- // Acquire lock.
- c.mutex.Lock()
- defer c.mutex.Unlock()
-
- // Check cache init.
- if c.copy == nil {
- panic("not initialized")
- }
-
- for i := range keys {
- // Concatenate all *values* from cached items.
- index.get(keys[i].key, func(item *indexed_item) {
- if value, ok := item.data.(T); ok {
-
- // Append value COPY.
- value = c.copy(value)
- values = append(values, value)
-
- // Push to front of LRU list, USING
- // THE ITEM'S LRU ENTRY, NOT THE
- // INDEX KEY ENTRY. VERY IMPORTANT!!
- c.lru.move_front(&item.elem)
- }
- })
- }
-
- return values
-}
-
-// Put will insert the given values into cache,
-// calling any invalidate hook on each value.
-func (c *Cache[T]) Put(values ...T) {
- // Acquire lock.
- c.mutex.Lock()
-
- // Ensure mutex
- // gets unlocked.
- var unlocked bool
- defer func() {
- if !unlocked {
- c.mutex.Unlock()
- }
- }()
-
- // Check cache init.
- if c.copy == nil {
- panic("not initialized")
- }
-
- // Store all passed values.
- for i := range values {
- c.store_value(
- nil, "",
- values[i],
- )
- }
-
- // Get func ptrs.
- invalid := c.invalid
-
- // Done with lock.
- c.mutex.Unlock()
- unlocked = true
-
- if invalid != nil {
- // Pass all invalidated values
- // to given user hook (if set).
- for _, value := range values {
- invalid(value)
- }
- }
-}
-
-// LoadOneBy fetches one result from the cache stored under index, using precalculated index key.
-// In the case that no result is found, provided load callback will be used to hydrate the cache.
-func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, error) {
- if index == nil {
- panic("no index given")
- } else if index.ptr != unsafe.Pointer(c) {
- panic("invalid index for cache")
- } else if !is_unique(index.flags) {
- panic("cannot get one by non-unique index")
- }
-
- var (
- // whether an item was found
- // (and so val / err are set).
- ok bool
-
- // separate value / error ptrs
- // as the item is liable to
- // change outside of lock.
- val T
- err error
- )
-
- // Acquire lock.
- c.mutex.Lock()
-
- // Ensure mutex
- // gets unlocked.
- var unlocked bool
- defer func() {
- if !unlocked {
- c.mutex.Unlock()
- }
- }()
-
- // Check init'd.
- if c.copy == nil ||
- c.ignore == nil {
- panic("not initialized")
- }
-
- // Get item indexed at key.
- item := index.get_one(key)
-
- if ok = (item != nil); ok {
- var is bool
-
- if val, is = item.data.(T); is {
- // Set value COPY.
- val = c.copy(val)
-
- // Push to front of LRU list, USING
- // THE ITEM'S LRU ENTRY, NOT THE
- // INDEX KEY ENTRY. VERY IMPORTANT!!
- c.lru.move_front(&item.elem)
-
- } else {
-
- // Attempt to return error.
- err, _ = item.data.(error)
- }
- }
-
- // Get func ptrs.
- ignore := c.ignore
-
- // Done with lock.
- c.mutex.Unlock()
- unlocked = true
-
- if ok {
- // item found!
- return val, err
- }
-
- // Load new result.
- val, err = load()
-
- // Check for ignored error types.
- if err != nil && ignore(err) {
- return val, err
- }
-
- // Acquire lock.
- c.mutex.Lock()
- unlocked = false
-
- // Index this new loaded item.
- // Note this handles copying of
- // the provided value, so it is
- // safe for us to return as-is.
- if err != nil {
- c.store_error(index, key.key, err)
- } else {
- c.store_value(index, key.key, val)
- }
-
- // Done with lock.
- c.mutex.Unlock()
- unlocked = true
-
- return val, err
-}
-
-// Load fetches values from the cache stored under index, using precalculated index keys. The cache will attempt to
-// results with values stored under keys, passing keys with uncached results to the provider load callback to further
-// hydrate the cache with missing results. Cached error results not included or returned by this function.
-func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error)) ([]T, error) {
- if index == nil {
- panic("no index given")
- } else if index.ptr != unsafe.Pointer(c) {
- panic("invalid index for cache")
- }
-
- // Preallocate expected ret slice.
- values := make([]T, 0, len(keys))
-
- // Acquire lock.
- c.mutex.Lock()
-
- // Ensure mutex
- // gets unlocked.
- var unlocked bool
- defer func() {
- if !unlocked {
- c.mutex.Unlock()
- }
- }()
-
- // Check init'd.
- if c.copy == nil {
- panic("not initialized")
- }
-
- // Iterate keys and catch uncached.
- toLoad := make([]Key, 0, len(keys))
- for _, key := range keys {
-
- // Value length before
- // any below appends.
- before := len(values)
-
- // Concatenate all *values* from cached items.
- index.get(key.key, func(item *indexed_item) {
- if value, ok := item.data.(T); ok {
- // Append value COPY.
- value = c.copy(value)
- values = append(values, value)
-
- // Push to front of LRU list, USING
- // THE ITEM'S LRU ENTRY, NOT THE
- // INDEX KEY ENTRY. VERY IMPORTANT!!
- c.lru.move_front(&item.elem)
- }
- })
-
- // Only if values changed did
- // we actually find anything.
- if len(values) == before {
- toLoad = append(toLoad, key)
- }
- }
-
- // Done with lock.
- c.mutex.Unlock()
- unlocked = true
-
- if len(toLoad) == 0 {
- // We loaded everything!
- return values, nil
- }
-
- // Load uncached key values.
- uncached, err := load(toLoad)
- if err != nil {
- return nil, err
- }
-
- // Acquire lock.
- c.mutex.Lock()
- unlocked = false
-
- // Store all uncached values.
- for i := range uncached {
- c.store_value(
- nil, "",
- uncached[i],
- )
- }
-
- // Done with lock.
- c.mutex.Unlock()
- unlocked = true
-
- // Append uncached to return values.
- values = append(values, uncached...)
-
- return values, nil
-}
-
-// Store will call the given store callback, on non-error then
-// passing the provided value to the Put() function. On error
-// return the value is still passed to stored invalidate hook.
-func (c *Cache[T]) Store(value T, store func() error) error {
- // Store value.
- err := store()
- if err != nil {
-
- // Get func ptrs.
- c.mutex.Lock()
- invalid := c.invalid
- c.mutex.Unlock()
-
- // On error don't store
- // value, but still pass
- // to invalidate hook.
- if invalid != nil {
- invalid(value)
- }
-
- return err
- }
-
- // Store value.
- c.Put(value)
-
- return nil
-}
-
-// Invalidate invalidates all results stored under index keys.
-// Note that if set, this will call the invalidate hook on each.
-func (c *Cache[T]) Invalidate(index *Index, keys ...Key) {
- if index == nil {
- panic("no index given")
- } else if index.ptr != unsafe.Pointer(c) {
- panic("invalid index for cache")
- }
-
- // Acquire lock.
- c.mutex.Lock()
-
- // Preallocate expected ret slice.
- values := make([]T, 0, len(keys))
-
- for i := range keys {
- // Delete all items under key from index, collecting
- // value items and dropping them from all their indices.
- index.delete(keys[i].key, func(item *indexed_item) {
-
- if value, ok := item.data.(T); ok {
- // No need to copy, as item
- // being deleted from cache.
- values = append(values, value)
- }
-
- // Delete item.
- c.delete(item)
- })
- }
-
- // Get func ptrs.
- invalid := c.invalid
-
- // Done with lock.
- c.mutex.Unlock()
-
- if invalid != nil {
- // Pass all invalidated values
- // to given user hook (if set).
- for _, value := range values {
- invalid(value)
- }
- }
-}
-
-// Trim will truncate the cache to ensure it
-// stays within given percentage of MaxSize.
-func (c *Cache[T]) Trim(perc float64) {
-
- // Acquire lock.
- c.mutex.Lock()
-
- // Calculate number of cache items to drop.
- max := (perc / 100) * float64(c.maxSize)
- diff := c.lru.len - int(max)
- if diff <= 0 {
-
- // Trim not needed.
- c.mutex.Unlock()
- return
- }
-
- // Iterate over 'diff' items
- // from back (oldest) of cache.
- for i := 0; i < diff; i++ {
-
- // Get oldest LRU elem.
- oldest := c.lru.tail
- if oldest == nil {
-
- // reached
- // end.
- break
- }
-
- // Drop oldest item from cache.
- item := (*indexed_item)(oldest.data)
- c.delete(item)
- }
-
- // Compact index data stores.
- for _, idx := range c.indices {
- (&idx).data.Compact()
- }
-
- // Done with lock.
- c.mutex.Unlock()
-}
-
-// Clear empties the cache by calling .Trim(0).
-func (c *Cache[T]) Clear() { c.Trim(0) }
-
-// Len returns the current length of cache.
-func (c *Cache[T]) Len() int {
- c.mutex.Lock()
- l := c.lru.len
- c.mutex.Unlock()
- return l
-}
-
-// Debug returns debug stats about cache.
-func (c *Cache[T]) Debug() map[string]any {
- m := make(map[string]any, 2)
- c.mutex.Lock()
- m["lru"] = c.lru.len
- indices := make(map[string]any, len(c.indices))
- m["indices"] = indices
- for _, idx := range c.indices {
- var n uint64
- for _, l := range idx.data.m {
- n += uint64(l.len)
- }
- indices[idx.name] = n
- }
- c.mutex.Unlock()
- return m
-}
-
-// Cap returns the maximum capacity (size) of cache.
-func (c *Cache[T]) Cap() int {
- c.mutex.Lock()
- m := c.maxSize
- c.mutex.Unlock()
- return m
-}
-
-func (c *Cache[T]) store_value(index *Index, key string, value T) {
- // Alloc new index item.
- item := new_indexed_item()
- if cap(item.indexed) < len(c.indices) {
-
- // Preallocate item indices slice to prevent Go auto
- // allocating overlying large slices we don't need.
- item.indexed = make([]*index_entry, 0, len(c.indices))
- }
-
- // Create COPY of value.
- value = c.copy(value)
- item.data = value
-
- if index != nil {
- // Append item to index a key
- // was already generated for.
- evicted := index.append(key, item)
- if evicted != nil {
-
- // This item is no longer
- // indexed, remove from list.
- c.lru.remove(&evicted.elem)
- free_indexed_item(evicted)
- }
- }
-
- // Get ptr to value data.
- ptr := unsafe.Pointer(&value)
-
- // Acquire key buf.
- buf := new_buffer()
-
- for i := range c.indices {
- // Get current index ptr.
- idx := (&c.indices[i])
- if idx == index {
-
- // Already stored under
- // this index, ignore.
- continue
- }
-
- // Extract fields comprising index key.
- parts := extract_fields(ptr, idx.fields)
-
- // Calculate index key.
- key := idx.key(buf, parts)
- if key == "" {
- continue
- }
-
- // Append item to this index.
- evicted := idx.append(key, item)
- if evicted != nil {
-
- // This item is no longer
- // indexed, remove from list.
- c.lru.remove(&evicted.elem)
- free_indexed_item(evicted)
- }
- }
-
- // Done with buf.
- free_buffer(buf)
-
- if len(item.indexed) == 0 {
- // Item was not stored under
- // any index. Drop this item.
- free_indexed_item(item)
- return
- }
-
- // Add item to main lru list.
- c.lru.push_front(&item.elem)
-
- if c.lru.len > c.maxSize {
- // Cache has hit max size!
- // Drop the oldest element.
- ptr := c.lru.tail.data
- item := (*indexed_item)(ptr)
- c.delete(item)
- }
-}
-
-func (c *Cache[T]) store_error(index *Index, key string, err error) {
- if index == nil {
- // nothing we
- // can do here.
- return
- }
-
- // Alloc new index item.
- item := new_indexed_item()
- if cap(item.indexed) < len(c.indices) {
-
- // Preallocate item indices slice to prevent Go auto
- // allocating overlying large slices we don't need.
- item.indexed = make([]*index_entry, 0, len(c.indices))
- }
-
- // Set error val.
- item.data = err
-
- // Append item to index a key
- // was already generated for.
- evicted := index.append(key, item)
- if evicted != nil {
-
- // This item is no longer
- // indexed, remove from list.
- c.lru.remove(&evicted.elem)
- free_indexed_item(evicted)
- }
-
- // Add item to main lru list.
- c.lru.push_front(&item.elem)
-
- if c.lru.len > c.maxSize {
- // Cache has hit max size!
- // Drop the oldest element.
- ptr := c.lru.tail.data
- item := (*indexed_item)(ptr)
- c.delete(item)
- }
-}
-
-func (c *Cache[T]) delete(i *indexed_item) {
- for len(i.indexed) > 0 {
- // Pop last indexed entry from list.
- entry := i.indexed[len(i.indexed)-1]
- i.indexed[len(i.indexed)-1] = nil
- i.indexed = i.indexed[:len(i.indexed)-1]
-
- // Get entry's index.
- index := entry.index
-
- // Drop this index_entry.
- index.delete_entry(entry)
- }
-
- // Drop from lru list.
- c.lru.remove(&i.elem)
-
- // Free unused item.
- free_indexed_item(i)
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/index.go b/vendor/codeberg.org/gruf/go-structr/index.go
deleted file mode 100644
index d8469577d..000000000
--- a/vendor/codeberg.org/gruf/go-structr/index.go
+++ /dev/null
@@ -1,419 +0,0 @@
-package structr
-
-import (
- "os"
- "reflect"
- "strings"
- "unsafe"
-
- "codeberg.org/gruf/go-byteutil"
- "codeberg.org/gruf/go-mempool"
- "codeberg.org/gruf/go-xunsafe"
-)
-
-// IndexConfig defines config variables
-// for initializing a struct index.
-type IndexConfig struct {
-
- // Fields should contain a comma-separated
- // list of struct fields used when generating
- // keys for this index. Nested fields should
- // be specified using periods. An example:
- // "Username,Favorites.Color"
- //
- // If a nested field encounters a nil pointer
- // along the way, e.g. "Favourites == nil", then
- // a zero value for "Favorites.Color" is used.
- //
- // Field types supported include any of those
- // supported by the `go-mangler/v2` library.
- Fields string
-
- // Multiple indicates whether to accept multiple
- // possible values for any single index key. The
- // default behaviour is to only accept one value
- // and overwrite existing on any write operation.
- Multiple bool
-
- // AllowZero indicates whether to accept zero
- // value fields in index keys. i.e. whether to
- // index structs for this set of field values
- // IF any one of those field values is the zero
- // value for that type. The default behaviour
- // is to skip indexing structs for this lookup
- // when any of the indexing fields are zero.
- AllowZero bool
-}
-
-// Index is an exposed Cache internal model, used to
-// extract struct keys, generate hash checksums for them
-// and store struct results by the init defined config.
-// This model is exposed to provide faster lookups in the
-// case that you would like to manually provide the used
-// index via the Cache.___By() series of functions, or
-// access the underlying index key generator.
-type Index struct {
-
- // ptr is a pointer to
- // the source type this
- // index is attached to.
- ptr unsafe.Pointer
-
- // name is the actual name of this
- // index, which is the unparsed
- // string value of contained fields.
- name string
-
- // backing data store of the index, containing
- // list{}s of index_entry{}s which each contain
- // the exact key each result is stored under.
- data hashmap
-
- // struct fields encompassed
- // by keys of this index.
- fields []struct_field
-
- // index flags:
- // - 1 << 0 = unique
- // - 1 << 1 = allow zero
- flags uint8
-}
-
-// Name returns the receiving Index name.
-func (i *Index) Name() string {
- return i.name
-}
-
-// init will initialize the cache with given type, config and capacity.
-func (i *Index) init(t xunsafe.TypeIter, cfg IndexConfig, cap int) {
- switch {
- // The only 2 types we support are
- // structs, and ptrs to a struct.
- case t.Type.Kind() == reflect.Struct:
- case t.Type.Kind() == reflect.Pointer &&
- t.Type.Elem().Kind() == reflect.Struct:
- default:
- panic("index only support struct{} and *struct{}")
- }
-
- // Set name from the raw
- // struct fields string.
- i.name = cfg.Fields
-
- // Set struct flags.
- if cfg.AllowZero {
- set_allow_zero(&i.flags)
- }
- if !cfg.Multiple {
- set_is_unique(&i.flags)
- }
-
- // Split to get containing struct fields.
- fields := strings.Split(cfg.Fields, ",")
-
- // Preallocate expected struct field slice.
- i.fields = make([]struct_field, len(fields))
- for x, name := range fields {
-
- // Split name to account for nesting.
- names := strings.Split(name, ".")
-
- // Look for struct field by names.
- i.fields[x], _ = find_field(t, names)
- }
-
- // Initialize store for
- // index_entry lists.
- i.data.Init(cap)
-}
-
-// get_one will fetch one indexed item under key.
-func (i *Index) get_one(key Key) *indexed_item {
- // Get list at hash.
- l := i.data.Get(key.key)
- if l == nil {
- return nil
- }
-
- // Extract entry from first list elem.
- entry := (*index_entry)(l.head.data)
-
- return entry.item
-}
-
-// get will fetch all indexed items under key, passing each to hook.
-func (i *Index) get(key string, hook func(*indexed_item)) {
- if hook == nil {
- panic("nil hook")
- }
-
- // Get list at hash.
- l := i.data.Get(key)
- if l == nil {
- return
- }
-
- // Iterate the list.
- for elem := l.head; //
- elem != nil; //
- {
- // Get next before
- // any modification.
- next := elem.next
-
- // Extract element entry + item.
- entry := (*index_entry)(elem.data)
- item := entry.item
-
- // Pass to hook.
- hook(item)
-
- // Set next.
- elem = next
- }
-}
-
-// key ...
-func (i *Index) key(buf *byteutil.Buffer, parts []unsafe.Pointer) string {
- if len(parts) != len(i.fields) {
- panic(assert("len(parts) = len(i.fields)"))
- }
- buf.B = buf.B[:0]
- if !allow_zero(i.flags) {
- for x, field := range i.fields {
- before := len(buf.B)
- buf.B = field.mangle(buf.B, parts[x])
- if string(buf.B[before:]) == field.zerostr {
- return ""
- }
- buf.B = append(buf.B, '.')
- }
- } else {
- for x, field := range i.fields {
- buf.B = field.mangle(buf.B, parts[x])
- buf.B = append(buf.B, '.')
- }
- }
- return string(buf.B)
-}
-
-// add will attempt to add given index entry to appropriate
-// doubly-linked-list in index hashmap. in the case of an
-// existing entry in a "unique" index, it will return false.
-func (i *Index) add(key string, item *indexed_item) bool {
- // Look for existing.
- l := i.data.Get(key)
-
- if l == nil {
-
- // Allocate new.
- l = new_list()
- i.data.Put(key, l)
-
- } else if is_unique(i.flags) {
-
- // Collision!
- return false
- }
-
- // Prepare new index entry.
- entry := new_index_entry()
- entry.item = item
- entry.key = key
- entry.index = i
-
- // Add ourselves to item's index tracker.
- item.indexed = append(item.indexed, entry)
-
- // Add entry to index list.
- l.push_front(&entry.elem)
- return true
-}
-
-// append will append the given index entry to appropriate
-// doubly-linked-list in index hashmap. this handles case of
-// overwriting "unique" index entries, and removes from given
-// outer linked-list in the case that it is no longer indexed.
-func (i *Index) append(key string, item *indexed_item) (evicted *indexed_item) {
- // Look for existing.
- l := i.data.Get(key)
-
- if l == nil {
-
- // Allocate new.
- l = new_list()
- i.data.Put(key, l)
-
- } else if is_unique(i.flags) {
-
- // Remove head.
- elem := l.head
- l.remove(elem)
-
- // Drop index from inner item,
- // catching the evicted item.
- e := (*index_entry)(elem.data)
- evicted = e.item
- evicted.drop_index(e)
-
- // Free unused entry.
- free_index_entry(e)
-
- if len(evicted.indexed) != 0 {
- // Evicted is still stored
- // under index, don't return.
- evicted = nil
- }
- }
-
- // Prepare new index entry.
- entry := new_index_entry()
- entry.item = item
- entry.key = key
- entry.index = i
-
- // Add ourselves to item's index tracker.
- item.indexed = append(item.indexed, entry)
-
- // Add entry to index list.
- l.push_front(&entry.elem)
- return
-}
-
-// delete will remove all indexed items under key, passing each to hook.
-func (i *Index) delete(key string, hook func(*indexed_item)) {
- if hook == nil {
- panic("nil hook")
- }
-
- // Get list at hash.
- l := i.data.Get(key)
- if l == nil {
- return
- }
-
- // Delete at hash.
- i.data.Delete(key)
-
- // Iterate the list.
- for elem := l.head; //
- elem != nil; //
- {
- // Get next before
- // any modification.
- next := elem.next
-
- // Remove elem.
- l.remove(elem)
-
- // Extract element entry + item.
- entry := (*index_entry)(elem.data)
- item := entry.item
-
- // Drop index from item.
- item.drop_index(entry)
-
- // Free now-unused entry.
- free_index_entry(entry)
-
- // Pass to hook.
- hook(item)
-
- // Set next.
- elem = next
- }
-
- // Release list.
- free_list(l)
-}
-
-// delete_entry deletes the given index entry.
-func (i *Index) delete_entry(entry *index_entry) {
- // Get list at hash sum.
- l := i.data.Get(entry.key)
- if l == nil {
- return
- }
-
- // Remove list entry.
- l.remove(&entry.elem)
-
- if l.len == 0 {
- // Remove entry from map.
- i.data.Delete(entry.key)
-
- // Release list.
- free_list(l)
- }
-
- // Drop this index from item.
- entry.item.drop_index(entry)
-}
-
-// index_entry represents a single entry
-// in an Index{}, where it will be accessible
-// by .key pointing to a containing list{}.
-type index_entry struct {
-
- // list elem that entry is stored
- // within, under containing index.
- // elem.data is ptr to index_entry.
- elem list_elem
-
- // index this is stored in.
- index *Index
-
- // underlying indexed item.
- item *indexed_item
-
- // raw cache key
- // for this entry.
- key string
-}
-
-var index_entry_pool mempool.UnsafePool
-
-// new_index_entry returns a new prepared index_entry.
-func new_index_entry() *index_entry {
- if ptr := index_entry_pool.Get(); ptr != nil {
- return (*index_entry)(ptr)
- }
- entry := new(index_entry)
- entry.elem.data = unsafe.Pointer(entry)
- return entry
-}
-
-// free_index_entry releases the index_entry.
-func free_index_entry(entry *index_entry) {
- if entry.elem.next != nil ||
- entry.elem.prev != nil {
- msg := assert("entry not in use")
- os.Stderr.WriteString(msg + "\n")
- return
- }
- entry.key = ""
- entry.index = nil
- entry.item = nil
- ptr := unsafe.Pointer(entry)
- index_entry_pool.Put(ptr)
-}
-
-func is_unique(f uint8) bool {
- const mask = uint8(1) << 0
- return f&mask != 0
-}
-
-func set_is_unique(f *uint8) {
- const mask = uint8(1) << 0
- (*f) |= mask
-}
-
-func allow_zero(f uint8) bool {
- const mask = uint8(1) << 1
- return f&mask != 0
-}
-
-func set_allow_zero(f *uint8) {
- const mask = uint8(1) << 1
- (*f) |= mask
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/item.go b/vendor/codeberg.org/gruf/go-structr/item.go
deleted file mode 100644
index 4c63b97c6..000000000
--- a/vendor/codeberg.org/gruf/go-structr/item.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package structr
-
-import (
- "os"
- "unsafe"
-
- "codeberg.org/gruf/go-mempool"
-)
-
-type indexed_item struct {
- // linked list elem this item
- // is stored in a main list.
- elem list_elem
-
- // cached data with type.
- data interface{}
-
- // indexed stores the indices
- // this item is stored under.
- indexed []*index_entry
-}
-
-var indexed_item_pool mempool.UnsafePool
-
-// new_indexed_item returns a new prepared indexed_item.
-func new_indexed_item() *indexed_item {
- if ptr := indexed_item_pool.Get(); ptr != nil {
- return (*indexed_item)(ptr)
- }
- item := new(indexed_item)
- item.elem.data = unsafe.Pointer(item)
- return item
-}
-
-// free_indexed_item releases the indexed_item.
-func free_indexed_item(item *indexed_item) {
- if len(item.indexed) > 0 ||
- item.elem.next != nil ||
- item.elem.prev != nil {
- msg := assert("item not in use")
- os.Stderr.WriteString(msg + "\n")
- return
- }
- item.data = nil
- ptr := unsafe.Pointer(item)
- indexed_item_pool.Put(ptr)
-}
-
-// drop_index will drop the given index entry from item's indexed.
-func (i *indexed_item) drop_index(entry *index_entry) {
- for x := 0; x < len(i.indexed); x++ {
- if i.indexed[x] != entry {
- // Prof. Obiwan:
- // this is not the index
- // we are looking for.
- continue
- }
-
- // Reslice index entries minus 'x'.
- _ = copy(i.indexed[x:], i.indexed[x+1:])
- i.indexed[len(i.indexed)-1] = nil
- i.indexed = i.indexed[:len(i.indexed)-1]
- break
- }
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/key.go b/vendor/codeberg.org/gruf/go-structr/key.go
deleted file mode 100644
index ad591f8ac..000000000
--- a/vendor/codeberg.org/gruf/go-structr/key.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package structr
-
-import (
- "sync"
-
- "codeberg.org/gruf/go-byteutil"
- "codeberg.org/gruf/go-mangler/v2"
-)
-
-// Key represents one key to
-// lookup (potentially) stored
-// entries in an Index.
-type Key struct {
- key string
- raw []any
-}
-
-// MakeKey generates Key{} from given parts.
-func MakeKey(parts ...any) Key {
- buf := new_buffer()
- buf.B = mangler.AppendMulti(buf.B[:0], parts...)
- key := string(buf.B)
- free_buffer(buf)
- return Key{
- raw: parts,
- key: key,
- }
-}
-
-// MakeKeys generates []Key{} from given (multiple) parts.
-func MakeKeys(parts ...[]any) []Key {
- keys := make([]Key, len(parts))
- if len(keys) != len(parts) {
- panic(assert("BCE"))
- }
- buf := new_buffer()
- for x, parts := range parts {
- buf.B = mangler.AppendMulti(buf.B[:0], parts...)
- key := string(buf.B)
- keys[x] = Key{
- raw: parts,
- key: key,
- }
- }
- free_buffer(buf)
- return keys
-}
-
-// Key returns the underlying cache key string.
-// NOTE: this will not be log output friendly.
-func (k Key) Key() string {
- return k.key
-}
-
-// Equal returns whether keys are equal.
-func (k Key) Equal(o Key) bool {
- return (k.key == o.key)
-}
-
-// Value returns the raw slice of
-// values that comprise this Key.
-func (k Key) Values() []any {
- return k.raw
-}
-
-var buf_pool sync.Pool
-
-// new_buffer returns a new initialized byte buffer.
-func new_buffer() *byteutil.Buffer {
- v := buf_pool.Get()
- if v == nil {
- buf := new(byteutil.Buffer)
- buf.B = make([]byte, 0, 512)
- v = buf
- }
- return v.(*byteutil.Buffer)
-}
-
-// free_buffer releases the byte buffer.
-func free_buffer(buf *byteutil.Buffer) {
- if cap(buf.B) > int(^uint16(0)) {
- return // drop large bufs
- }
- buf_pool.Put(buf)
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/list.go b/vendor/codeberg.org/gruf/go-structr/list.go
deleted file mode 100644
index 1c931fd51..000000000
--- a/vendor/codeberg.org/gruf/go-structr/list.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package structr
-
-import (
- "os"
- "unsafe"
-
- "codeberg.org/gruf/go-mempool"
-)
-
-// elem represents an elem
-// in a doubly-linked list.
-type list_elem struct {
- next *list_elem
- prev *list_elem
-
- // data is a ptr to the
- // value this linked list
- // element is embedded-in.
- data unsafe.Pointer
-}
-
-// list implements a doubly-linked list, where:
-// - head = index 0 (i.e. the front)
-// - tail = index n-1 (i.e. the back)
-type list struct {
- head *list_elem
- tail *list_elem
- len int
-}
-
-var list_pool mempool.UnsafePool
-
-// new_list returns a new prepared list.
-func new_list() *list {
- if ptr := list_pool.Get(); ptr != nil {
- return (*list)(ptr)
- }
- return new(list)
-}
-
-// free_list releases the list.
-func free_list(list *list) {
- if list.head != nil ||
- list.tail != nil ||
- list.len != 0 {
- msg := assert("list not in use")
- os.Stderr.WriteString(msg + "\n")
- return
- }
- ptr := unsafe.Pointer(list)
- list_pool.Put(ptr)
-}
-
-// push_front will push the given elem to front (head) of list.
-func (l *list) push_front(elem *list_elem) {
-
- // Set new head.
- oldHead := l.head
- l.head = elem
-
- if oldHead != nil {
- // Link to old head
- elem.next = oldHead
- oldHead.prev = elem
- } else {
- // First in list.
- l.tail = elem
- }
-
- // Incr
- // count
- l.len++
-}
-
-// push_back will push the given elem to back (tail) of list.
-func (l *list) push_back(elem *list_elem) {
-
- // Set new tail.
- oldTail := l.tail
- l.tail = elem
-
- if oldTail != nil {
- // Link to old tail
- elem.prev = oldTail
- oldTail.next = elem
- } else {
- // First in list.
- l.head = elem
- }
-
- // Incr
- // count
- l.len++
-}
-
-// move_front will move given elem to front (head) of list.
-// if it is already at front this call is a no-op.
-func (l *list) move_front(elem *list_elem) {
- if elem == l.head {
- return
- }
- l.remove(elem)
- l.push_front(elem)
-}
-
-// move_back will move given elem to back (tail) of list,
-// if it is already at back this call is a no-op.
-func (l *list) move_back(elem *list_elem) {
- if elem == l.tail {
- return
- }
- l.remove(elem)
- l.push_back(elem)
-}
-
-// insert will insert given element at given location in list.
-func (l *list) insert(elem *list_elem, at *list_elem) {
- if elem == at {
- return
- }
-
- // Set new 'next'.
- oldNext := at.next
- at.next = elem
-
- // Link to 'at'.
- elem.prev = at
-
- if oldNext == nil {
- // Set new tail
- l.tail = elem
- } else {
- // Link to 'prev'.
- oldNext.prev = elem
- elem.next = oldNext
- }
-
- // Incr
- // count
- l.len++
-}
-
-// remove will remove given elem from list.
-func (l *list) remove(elem *list_elem) {
- // Get linked elems.
- next := elem.next
- prev := elem.prev
-
- // Unset elem.
- elem.next = nil
- elem.prev = nil
-
- switch {
- case next == nil:
- if prev == nil {
- // next == nil && prev == nil
- //
- // elem is ONLY one in list.
- l.head = nil
- l.tail = nil
- } else {
- // next == nil && prev != nil
- //
- // elem is last in list.
- l.tail = prev
- prev.next = nil
- }
-
- case prev == nil:
- // next != nil && prev == nil
- //
- // elem is front in list.
- l.head = next
- next.prev = nil
-
- // elem in middle of list.
- default:
- next.prev = prev
- prev.next = next
- }
-
- // Decr
- // count
- l.len--
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/map.go b/vendor/codeberg.org/gruf/go-structr/map.go
deleted file mode 100644
index 6a718eae1..000000000
--- a/vendor/codeberg.org/gruf/go-structr/map.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package structr
-
-type hashmap struct {
- m map[string]*list
- n int
-}
-
-func (m *hashmap) Init(cap int) {
- m.m = make(map[string]*list, cap)
- m.n = cap
-}
-
-func (m *hashmap) Get(key string) *list {
- return m.m[key]
-}
-
-func (m *hashmap) Put(key string, list *list) {
- m.m[key] = list
- if n := len(m.m); n > m.n {
- m.n = n
- }
-}
-
-func (m *hashmap) Delete(key string) {
- delete(m.m, key)
-}
-
-func (m *hashmap) Compact() {
- // Noop when hashmap size
- // is too small to matter.
- if m.n < 2048 {
- return
- }
-
- // Difference between maximum map
- // size and the current map size.
- diff := m.n - len(m.m)
-
- // Maximum load factor before
- // runtime allocates new hmap:
- // maxLoad = 13 / 16
- //
- // So we apply the inverse/2, once
- // $maxLoad/2 % of hmap is empty we
- // compact the map to drop buckets.
- //
- // TODO: this is still a relatively
- // good approximation, but it has
- // changed a little with swiss maps.
- if 2*16*diff > m.n*13 {
-
- // Create new map only big as required.
- m2 := make(map[string]*list, len(m.m))
- for k, v := range m.m {
- m2[k] = v
- }
-
- // Set new.
- m.m = m2
- m.n = len(m2)
- }
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/queue.go b/vendor/codeberg.org/gruf/go-structr/queue.go
deleted file mode 100644
index 674967d74..000000000
--- a/vendor/codeberg.org/gruf/go-structr/queue.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package structr
-
-import (
- "sync"
- "unsafe"
-)
-
-// QueueConfig defines config vars
-// for initializing a struct queue.
-type QueueConfig[StructType any] struct {
-
- // Pop is called when queue values
- // are popped, during calls to any
- // of the Pop___() series of fns.
- Pop func(StructType)
-
- // Indices defines indices to create
- // in the Queue for the receiving
- // generic struct parameter type.
- Indices []IndexConfig
-}
-
-// Queue provides a structure model queue with
-// automated indexing and popping by any init
-// defined lookups of field combinations.
-type Queue[StructType any] struct {
-
- // hook functions.
- copy func(StructType) StructType
- pop func(StructType)
-
- // main underlying
- // struct item queue.
- queue list
-
- // indices used in storing passed struct
- // types by user defined sets of fields.
- indices []Index
-
- // protective mutex, guards:
- // - Queue{}.queue
- // - Index{}.data
- // - Queue{} hook fns
- mutex sync.Mutex
-}
-
-// Init initializes the queue with given configuration
-// including struct fields to index, and necessary fns.
-func (q *Queue[T]) Init(config QueueConfig[T]) {
- t := get_type_iter[T]()
-
- if len(config.Indices) == 0 {
- panic("no indices provided")
- }
-
- // Safely copy over
- // provided config.
- q.mutex.Lock()
- defer q.mutex.Unlock()
- q.indices = make([]Index, len(config.Indices))
- for i, cfg := range config.Indices {
- q.indices[i].ptr = unsafe.Pointer(q)
- q.indices[i].init(t, cfg, 0)
- }
- q.pop = config.Pop
-}
-
-// Index selects index with given name from queue, else panics.
-func (q *Queue[T]) Index(name string) *Index {
- for i, idx := range q.indices {
- if idx.name == name {
- return &(q.indices[i])
- }
- }
- panic("unknown index: " + name)
-}
-
-// PopFront pops the current value at front of the queue.
-func (q *Queue[T]) PopFront() (T, bool) {
- t := q.PopFrontN(1)
- if len(t) == 0 {
- var t T
- return t, false
- }
- return t[0], true
-}
-
-// PopBack pops the current value at back of the queue.
-func (q *Queue[T]) PopBack() (T, bool) {
- t := q.PopBackN(1)
- if len(t) == 0 {
- var t T
- return t, false
- }
- return t[0], true
-}
-
-// PopFrontN attempts to pop n values from front of the queue.
-func (q *Queue[T]) PopFrontN(n int) []T {
- return q.pop_n(n, func() *list_elem {
- return q.queue.head
- })
-}
-
-// PopBackN attempts to pop n values from back of the queue.
-func (q *Queue[T]) PopBackN(n int) []T {
- return q.pop_n(n, func() *list_elem {
- return q.queue.tail
- })
-}
-
-// Pop attempts to pop values from queue indexed under any of keys.
-func (q *Queue[T]) Pop(index *Index, keys ...Key) []T {
- if index == nil {
- panic("no index given")
- } else if index.ptr != unsafe.Pointer(q) {
- panic("invalid index for queue")
- }
-
- // Acquire lock.
- q.mutex.Lock()
-
- // Preallocate expected ret slice.
- values := make([]T, 0, len(keys))
-
- for i := range keys {
- // Delete all items under key from index, collecting
- // value items and dropping them from all their indices.
- index.delete(keys[i].key, func(item *indexed_item) {
-
- // Append deleted to values.
- value := item.data.(T)
- values = append(values, value)
-
- // Delete item.
- q.delete(item)
- })
- }
-
- // Get func ptrs.
- pop := q.pop
-
- // Done with lock.
- q.mutex.Unlock()
-
- if pop != nil {
- // Pass all popped values
- // to given user hook (if set).
- for _, value := range values {
- pop(value)
- }
- }
-
- return values
-}
-
-// PushFront pushes values to front of queue.
-func (q *Queue[T]) PushFront(values ...T) {
- q.mutex.Lock()
- for i := range values {
- item := q.index(values[i])
- q.queue.push_front(&item.elem)
- }
- q.mutex.Unlock()
-}
-
-// PushBack pushes values to back of queue.
-func (q *Queue[T]) PushBack(values ...T) {
- q.mutex.Lock()
- for i := range values {
- item := q.index(values[i])
- q.queue.push_back(&item.elem)
- }
- q.mutex.Unlock()
-}
-
-// MoveFront attempts to move values indexed under any of keys to the front of the queue.
-func (q *Queue[T]) MoveFront(index *Index, keys ...Key) {
- q.mutex.Lock()
- for i := range keys {
- index.get(keys[i].key, func(item *indexed_item) {
- q.queue.move_front(&item.elem)
- })
- }
- q.mutex.Unlock()
-}
-
-// MoveBack attempts to move values indexed under any of keys to the back of the queue.
-func (q *Queue[T]) MoveBack(index *Index, keys ...Key) {
- q.mutex.Lock()
- for i := range keys {
- index.get(keys[i].key, func(item *indexed_item) {
- q.queue.move_back(&item.elem)
- })
- }
- q.mutex.Unlock()
-}
-
-// Len returns the current length of queue.
-func (q *Queue[T]) Len() int {
- q.mutex.Lock()
- l := q.queue.len
- q.mutex.Unlock()
- return l
-}
-
-// Debug returns debug stats about queue.
-func (q *Queue[T]) Debug() map[string]any {
- m := make(map[string]any, 2)
- q.mutex.Lock()
- m["queue"] = q.queue.len
- indices := make(map[string]any, len(q.indices))
- m["indices"] = indices
- for _, idx := range q.indices {
- var n uint64
- for _, l := range idx.data.m {
- n += uint64(l.len)
- }
- indices[idx.name] = n
- }
- q.mutex.Unlock()
- return m
-}
-
-func (q *Queue[T]) pop_n(n int, next func() *list_elem) []T {
- if next == nil {
- panic("nil fn")
- }
-
- // Acquire lock.
- q.mutex.Lock()
-
- // Preallocate ret slice.
- values := make([]T, 0, n)
-
- // Iterate over 'n' items.
- for i := 0; i < n; i++ {
-
- // Get next elem.
- next := next()
- if next == nil {
-
- // reached
- // end.
- break
- }
-
- // Cast the indexed item from elem.
- item := (*indexed_item)(next.data)
-
- // Append deleted to values.
- value := item.data.(T)
- values = append(values, value)
-
- // Delete item.
- q.delete(item)
- }
-
- // Get func ptrs.
- pop := q.pop
-
- // Done with lock.
- q.mutex.Unlock()
-
- if pop != nil {
- // Pass all popped values
- // to given user hook (if set).
- for _, value := range values {
- pop(value)
- }
- }
-
- return values
-}
-
-func (q *Queue[T]) index(value T) *indexed_item {
- item := new_indexed_item()
- if cap(item.indexed) < len(q.indices) {
-
- // Preallocate item indices slice to prevent Go auto
- // allocating overlying large slices we don't need.
- item.indexed = make([]*index_entry, 0, len(q.indices))
- }
-
- // Set item value.
- item.data = value
-
- // Get ptr to value data.
- ptr := unsafe.Pointer(&value)
-
- // Acquire key buf.
- buf := new_buffer()
-
- for i := range q.indices {
- // Get current index ptr.
- idx := &(q.indices[i])
-
- // Extract fields comprising index key.
- parts := extract_fields(ptr, idx.fields)
-
- // Calculate index key.
- key := idx.key(buf, parts)
- if key == "" {
- continue
- }
-
- // Append item to this index.
- evicted := idx.append(key, item)
- if evicted != nil {
-
- // This item is no longer
- // indexed, remove from list.
- q.queue.remove(&evicted.elem)
- free_indexed_item(evicted)
- }
- }
-
- // Done with buf.
- free_buffer(buf)
-
- return item
-}
-
-func (q *Queue[T]) delete(i *indexed_item) {
- for len(i.indexed) > 0 {
- // Pop last indexed entry from list.
- entry := i.indexed[len(i.indexed)-1]
- i.indexed[len(i.indexed)-1] = nil
- i.indexed = i.indexed[:len(i.indexed)-1]
-
- // Get entry's index.
- index := entry.index
-
- // Drop this index_entry.
- index.delete_entry(entry)
-
- // Compact index map.
- index.data.Compact()
- }
-
- // Drop from queue list.
- q.queue.remove(&i.elem)
-
- // Free unused item.
- free_indexed_item(i)
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/queue_ctx.go b/vendor/codeberg.org/gruf/go-structr/queue_ctx.go
deleted file mode 100644
index 9a9c615e2..000000000
--- a/vendor/codeberg.org/gruf/go-structr/queue_ctx.go
+++ /dev/null
@@ -1,152 +0,0 @@
-package structr
-
-import (
- "context"
-)
-
-// QueueCtx is a context-aware form of Queue{}.
-type QueueCtx[StructType any] struct {
- ch chan struct{}
- Queue[StructType]
-}
-
-// PopFront pops the current value at front of the queue, else blocking on ctx.
-func (q *QueueCtx[T]) PopFront(ctx context.Context) (T, bool) {
- return q.pop(ctx, func() *list_elem {
- return q.queue.head
- })
-}
-
-// PopBack pops the current value at back of the queue, else blocking on ctx.
-func (q *QueueCtx[T]) PopBack(ctx context.Context) (T, bool) {
- return q.pop(ctx, func() *list_elem {
- return q.queue.tail
- })
-}
-
-// PushFront pushes values to front of queue.
-func (q *QueueCtx[T]) PushFront(values ...T) {
- q.mutex.Lock()
- for i := range values {
- item := q.index(values[i])
- q.queue.push_front(&item.elem)
- }
- if q.ch != nil {
- close(q.ch)
- q.ch = nil
- }
- q.mutex.Unlock()
-}
-
-// PushBack pushes values to back of queue.
-func (q *QueueCtx[T]) PushBack(values ...T) {
- q.mutex.Lock()
- for i := range values {
- item := q.index(values[i])
- q.queue.push_back(&item.elem)
- }
- if q.ch != nil {
- close(q.ch)
- q.ch = nil
- }
- q.mutex.Unlock()
-}
-
-// Wait returns a ptr to the current ctx channel,
-// this will block until next push to the queue.
-func (q *QueueCtx[T]) Wait() <-chan struct{} {
- q.mutex.Lock()
- if q.ch == nil {
- q.ch = make(chan struct{})
- }
- ctx := q.ch
- q.mutex.Unlock()
- return ctx
-}
-
-// Debug returns debug stats about queue.
-func (q *QueueCtx[T]) Debug() map[string]any {
- m := make(map[string]any)
- q.mutex.Lock()
- m["queue"] = q.queue.len
- indices := make(map[string]any)
- m["indices"] = indices
- for i := range q.indices {
- var n uint64
- for _, l := range q.indices[i].data.m {
- n += uint64(l.len)
- }
- indices[q.indices[i].name] = n
- }
- q.mutex.Unlock()
- return m
-}
-
-func (q *QueueCtx[T]) pop(ctx context.Context, next func() *list_elem) (T, bool) {
- if next == nil {
- panic("nil fn")
- } else if ctx == nil {
- panic("nil ctx")
- }
-
- // Acquire lock.
- q.mutex.Lock()
-
- var elem *list_elem
-
- for {
- // Get element.
- elem = next()
- if elem != nil {
- break
- }
-
- if q.ch == nil {
- // Allocate new ctx channel.
- q.ch = make(chan struct{})
- }
-
- // Get current
- // ch pointer.
- ch := q.ch
-
- // Unlock queue.
- q.mutex.Unlock()
-
- select {
- // Ctx cancelled.
- case <-ctx.Done():
- var z T
- return z, false
-
- // Pushed!
- case <-ch:
- }
-
- // Relock queue.
- q.mutex.Lock()
- }
-
- // Cast the indexed item from elem.
- item := (*indexed_item)(elem.data)
-
- // Extract item value.
- value := item.data.(T)
-
- // Delete item.
- q.delete(item)
-
- // Get func ptrs.
- pop := q.Queue.pop
-
- // Done with lock.
- q.mutex.Unlock()
-
- if pop != nil {
- // Pass to
- // user hook.
- pop(value)
- }
-
- return value, true
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/runtime.go b/vendor/codeberg.org/gruf/go-structr/runtime.go
deleted file mode 100644
index 508cd6e4c..000000000
--- a/vendor/codeberg.org/gruf/go-structr/runtime.go
+++ /dev/null
@@ -1,294 +0,0 @@
-package structr
-
-import (
- "fmt"
- "reflect"
- "runtime"
- "strings"
- "unicode"
- "unicode/utf8"
- "unsafe"
-
- "codeberg.org/gruf/go-mangler/v2"
- "codeberg.org/gruf/go-xunsafe"
-)
-
-// struct_field contains pre-prepared type
-// information about a struct's field member,
-// including memory offset and hash function.
-type struct_field struct {
-
- // struct field type mangling
- // (i.e. fast serializing) fn.
- mangle mangler.Mangler
-
- // zero value data, used when
- // nil encountered during ptr
- // offset following.
- zero unsafe.Pointer
-
- // mangled zero value string,
- // to check zero value keys.
- zerostr string
-
- // offsets defines whereabouts in
- // memory this field is located,
- // and after how many dereferences.
- offsets []next_offset
-}
-
-// next_offset defines a next offset location
-// in a struct_field, first by the number of
-// derefences required, then by offset from
-// that final memory location.
-type next_offset struct {
- derefs int
- offset uintptr
-}
-
-// get_type_iter returns a prepared xunsafe.TypeIter{} for generic parameter type,
-// with flagIndir specifically set as we always take a reference to value type.
-func get_type_iter[T any]() xunsafe.TypeIter {
- rtype := reflect.TypeOf((*T)(nil)).Elem()
- flags := xunsafe.Reflect_flag(xunsafe.Abi_Type_Kind(rtype))
- flags |= xunsafe.Reflect_flagIndir // always comes from unsafe ptr
- return xunsafe.ToTypeIter(rtype, flags)
-}
-
-// find_field will search for a struct field with given set of names,
-// where names is a len > 0 slice of names account for struct nesting.
-func find_field(t xunsafe.TypeIter, names []string) (sfield struct_field, ftype reflect.Type) {
- var (
- // is_exported returns whether name is exported
- // from a package; can be func or struct field.
- is_exported = func(name string) bool {
- r, _ := utf8.DecodeRuneInString(name)
- return unicode.IsUpper(r)
- }
-
- // pop_name pops the next name from
- // the provided slice of field names.
- pop_name = func() string {
- name := names[0]
- names = names[1:]
- if !is_exported(name) {
- panic(fmt.Sprintf("field is not exported: %s", name))
- }
- return name
- }
-
- // field is the iteratively searched
- // struct field value in below loop.
- field reflect.StructField
- )
-
- // Take reference
- // of parent iter.
- o := t
-
- for len(names) > 0 {
- // Pop next name.
- name := pop_name()
-
- var n int
- rtype := t.Type
- flags := t.Flag
-
- // Iteratively dereference pointer types.
- for rtype.Kind() == reflect.Pointer {
-
- // If this actual indirect memory,
- // increase dereferences counter.
- if flags&xunsafe.Reflect_flagIndir != 0 {
- n++
- }
-
- // Get next elem type.
- rtype = rtype.Elem()
-
- // Get next set of dereferenced element type flags.
- flags = xunsafe.ReflectPointerElemFlags(flags, rtype)
-
- // Update type iter info.
- t = t.Child(rtype, flags)
- }
-
- // Check for valid struct type.
- if rtype.Kind() != reflect.Struct {
- panic(fmt.Sprintf("field %s is not struct (or ptr-to): %s", rtype, name))
- }
-
- // Set offset info.
- var off next_offset
- off.derefs = n
-
- var ok bool
-
- // Look for the next field by name.
- field, ok = rtype.FieldByName(name)
- if !ok {
- panic(fmt.Sprintf("unknown field: %s", name))
- }
-
- // Set next offset value.
- off.offset = field.Offset
- sfield.offsets = append(sfield.offsets, off)
-
- // Calculate value flags, and set next nested field type.
- flags = xunsafe.ReflectStructFieldFlags(t.Flag, field.Type)
- t = t.Child(field.Type, flags)
- }
-
- // Set final field type.
- ftype = t.TypeInfo.Type
-
- // Get mangler from type info.
- sfield.mangle = mangler.Get(t)
-
- // Calculate zero value string.
- zptr := zero_value_ptr(o, sfield.offsets)
- zstr := string(sfield.mangle(nil, zptr))
- sfield.zerostr = zstr
- sfield.zero = zptr
-
- return
-}
-
-// zero_value iterates the type contained in TypeIter{} along the given
-// next_offset{} values, creating new ptrs where necessary, returning the
-// zero reflect.Value{} after fully iterating the next_offset{} slice.
-func zero_value(t xunsafe.TypeIter, offsets []next_offset) reflect.Value {
- v := reflect.New(t.Type).Elem()
- for _, offset := range offsets {
- for range offset.derefs {
- if v.IsNil() {
- new := reflect.New(v.Type().Elem())
- v.Set(new)
- }
- v = v.Elem()
- }
- for i := 0; i < v.NumField(); i++ {
- if v.Type().Field(i).Offset == offset.offset {
- v = v.Field(i)
- break
- }
- }
- }
- return v
-}
-
-// zero_value_ptr returns the unsafe pointer address of the result of zero_value().
-func zero_value_ptr(t xunsafe.TypeIter, offsets []next_offset) unsafe.Pointer {
- return zero_value(t, offsets).Addr().UnsafePointer()
-}
-
-// extract_fields extracts given structfields from the provided value type,
-// this is done using predetermined struct field memory offset locations.
-func extract_fields(ptr unsafe.Pointer, fields []struct_field) []unsafe.Pointer {
-
- // Prepare slice of field value pointers.
- ptrs := make([]unsafe.Pointer, len(fields))
- if len(ptrs) != len(fields) {
- panic(assert("BCE"))
- }
-
- for i, field := range fields {
- // loop scope.
- fptr := ptr
-
- for _, offset := range field.offsets {
- // Dereference any ptrs to offset.
- fptr = deref(fptr, offset.derefs)
- if fptr == nil {
- break
- }
-
- // Jump forward by offset to next ptr.
- fptr = unsafe.Pointer(uintptr(fptr) +
- offset.offset)
- }
-
- if fptr == nil {
- // Use zero value.
- fptr = field.zero
- }
-
- // Set field ptr.
- ptrs[i] = fptr
- }
-
- return ptrs
-}
-
-// pkey_field contains pre-prepared type
-// information about a primary key struct's
-// field member, including memory offset.
-type pkey_field struct {
-
- // zero value data, used when
- // nil encountered during ptr
- // offset following.
- zero unsafe.Pointer
-
- // offsets defines whereabouts in
- // memory this field is located.
- offsets []next_offset
-}
-
-// extract_pkey will extract a pointer from 'ptr', to
-// the primary key struct field defined by 'field'.
-func extract_pkey(ptr unsafe.Pointer, field pkey_field) unsafe.Pointer {
- for _, offset := range field.offsets {
-
- // Dereference any ptrs to offset.
- ptr = deref(ptr, offset.derefs)
- if ptr == nil {
- break
- }
-
- // Jump forward by offset to next ptr.
- ptr = unsafe.Pointer(uintptr(ptr) +
- offset.offset)
- }
-
- if ptr == nil {
- // Use zero value.
- ptr = field.zero
- }
-
- return ptr
-}
-
-// deref will dereference ptr 'n' times (or until nil).
-func deref(p unsafe.Pointer, n int) unsafe.Pointer {
- for ; n > 0; n-- {
- if p == nil {
- return nil
- }
- p = *(*unsafe.Pointer)(p)
- }
- return p
-}
-
-// assert can be called to indicated a block
-// of code should not be able to be reached,
-// it returns a BUG report with callsite.
-func assert(assert string) string {
- pcs := make([]uintptr, 1)
- _ = runtime.Callers(2, pcs)
- funcname := "go-structr" // by default use just our library name
- if frames := runtime.CallersFrames(pcs); frames != nil {
- frame, _ := frames.Next()
- funcname = frame.Function
- if i := strings.LastIndexByte(funcname, '/'); i != -1 {
- funcname = funcname[i+1:]
- }
- }
- var buf strings.Builder
- buf.Grow(32 + len(assert) + len(funcname))
- buf.WriteString("BUG: assertion \"")
- buf.WriteString(assert)
- buf.WriteString("\" failed in ")
- buf.WriteString(funcname)
- return buf.String()
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/timeline.go b/vendor/codeberg.org/gruf/go-structr/timeline.go
deleted file mode 100644
index e738a8228..000000000
--- a/vendor/codeberg.org/gruf/go-structr/timeline.go
+++ /dev/null
@@ -1,1197 +0,0 @@
-package structr
-
-import (
- "cmp"
- "os"
- "reflect"
- "slices"
- "strings"
- "sync"
- "sync/atomic"
- "unsafe"
-
- "codeberg.org/gruf/go-mempool"
-)
-
-// Direction defines a direction
-// to iterate entries in a Timeline.
-type Direction bool
-
-const (
- // Asc = ascending, i.e. bottom-up.
- Asc = Direction(true)
-
- // Desc = descending, i.e. top-down.
- Desc = Direction(false)
-)
-
-// TimelineConfig defines config vars for initializing a Timeline{}.
-type TimelineConfig[StructType any, PK cmp.Ordered] struct {
-
- // Copy provides a means of copying
- // timelined values, to ensure returned values
- // do not share memory with those in timeline.
- Copy func(StructType) StructType
-
- // Invalidate is called when timelined
- // values are invalidated, either as passed
- // to Insert(), or by calls to Invalidate().
- Invalidate func(StructType)
-
- // PKey defines the generic parameter StructType's
- // field to use as the primary key for this cache.
- // It must be ordered so that the timeline can
- // maintain correct sorting of inserted values.
- //
- // Field selection logic follows the same path as
- // with IndexConfig{}.Fields. Noting that in this
- // case only a single field is permitted, though
- // it may be nested, and as described above the
- // type must conform to cmp.Ordered.
- PKey IndexConfig
-
- // Indices defines indices to create
- // in the Timeline for the receiving
- // generic struct type parameter.
- Indices []IndexConfig
-}
-
-// Timeline provides an ordered-list like cache of structures,
-// with automated indexing and invalidation by any initialization
-// defined combination of fields. The list order is maintained
-// according to the configured struct primary key.
-type Timeline[StructType any, PK cmp.Ordered] struct {
-
- // hook functions.
- invalid func(StructType)
- copy func(StructType) StructType
-
- // main underlying
- // timeline list.
- //
- // where:
- // - head = top = largest
- // - tail = btm = smallest
- list list
-
- // contains struct field information of
- // the field used as the primary key for
- // this timeline. it can also be found
- // under indices[0]
- pkey pkey_field
-
- // indices used in storing passed struct
- // types by user defined sets of fields.
- indices []Index
-
- // atomically updated head
- // / tail primary key values.
- headPK unsafe.Pointer
- tailPK unsafe.Pointer
-
- // protective mutex, guards:
- // - Timeline{}.*
- // - Index{}.data
- mutex sync.Mutex
-}
-
-// Init initializes the timeline with given configuration
-// including struct fields to index, and necessary fns.
-func (t *Timeline[T, PK]) Init(config TimelineConfig[T, PK]) {
- ti := get_type_iter[T]()
-
- if len(config.Indices) == 0 {
- panic("no indices provided")
- }
-
- if config.Copy == nil {
- panic("copy function must be provided")
- }
-
- if strings.Contains(config.PKey.Fields, ",") {
- panic("primary key must contain only 1 field")
- }
-
- // Verify primary key parameter type is correct.
- names := strings.Split(config.PKey.Fields, ".")
- if _, ftype := find_field(ti, names); //
- ftype != reflect.TypeFor[PK]() {
- panic("primary key field path and generic parameter type do not match")
- }
-
- // Safely copy over
- // provided config.
- t.mutex.Lock()
- defer t.mutex.Unlock()
-
- // The first index is created from PKey,
- // other indices are created as expected.
- t.indices = make([]Index, len(config.Indices)+1)
- t.indices[0].ptr = unsafe.Pointer(t)
- t.indices[0].init(ti, config.PKey, 0)
- for i, cfg := range config.Indices {
- t.indices[i+1].ptr = unsafe.Pointer(t)
- t.indices[i+1].init(ti, cfg, 0)
- }
-
- // Extract pkey details from index.
- field := t.indices[0].fields[0]
- t.pkey = pkey_field{
- zero: field.zero,
- offsets: field.offsets,
- }
-
- // Copy over remaining.
- t.copy = config.Copy
- t.invalid = config.Invalidate
-}
-
-// Index selects index with given name from timeline, else panics.
-func (t *Timeline[T, PK]) Index(name string) *Index {
- for i, idx := range t.indices {
- if idx.name == name {
- return &(t.indices[i])
- }
- }
- panic("unknown index: " + name)
-}
-
-// Head returns the current head primary key.
-func (t *Timeline[T, PK]) Head() *PK {
- return (*PK)(atomic.LoadPointer(&t.headPK))
-}
-
-// Tail returns the current tail primary key.
-func (t *Timeline[T, PK]) Tail() *PK {
- return (*PK)(atomic.LoadPointer(&t.tailPK))
-}
-
-// Select allows you to retrieve a slice of values, in order, from the timeline.
-// This slice is defined by the minimum and maximum primary key parameters, up to
-// a given length in size. The direction in which you select will determine which
-// of the min / max primary key values is used as the *cursor* to begin the start
-// of the selection, and which is used as the *boundary* to mark the end, if set.
-// In either case, the length parameter is always optional.
-//
-// dir = Asc : cursors up from 'max' (required), with boundary 'min' (optional).
-// dir = Desc : cursors down from 'min' (required), with boundary 'max' (optional).
-func (t *Timeline[T, PK]) Select(min, max *PK, length *int, dir Direction) (values []T) {
-
- // Acquire lock.
- t.mutex.Lock()
-
- // Check init'd.
- if t.copy == nil {
- t.mutex.Unlock()
- panic("not initialized")
- }
-
- switch dir {
- case Asc:
- // Verify args.
- if min == nil {
- t.mutex.Unlock()
- panic("min must be provided when selecting asc")
- }
-
- // Select determined values ASCENDING.
- values = t.select_asc(*min, max, length)
-
- case Desc:
- // Verify args.
- if max == nil {
- t.mutex.Unlock()
- panic("max must be provided when selecting asc")
- }
-
- // Select determined values DESCENDING.
- values = t.select_desc(min, *max, length)
- }
-
- // Done with lock.
- t.mutex.Unlock()
-
- return values
-}
-
-// Insert will insert the given values into the timeline,
-// calling any set invalidate hook on each inserted value.
-// Returns current list length after performing inserts.
-func (t *Timeline[T, PK]) Insert(values ...T) int {
-
- // Acquire lock.
- t.mutex.Lock()
-
- // Check init'd.
- if t.copy == nil {
- t.mutex.Unlock()
- panic("not initialized")
- }
-
- // Allocate a slice of our value wrapping struct type.
- with_keys := make([]value_with_pk[T, PK], len(values))
- if len(with_keys) != len(values) {
- panic(assert("BCE"))
- }
-
- // Range the provided values.
- for i, value := range values {
-
- // Create our own copy
- // of value to work with.
- value = t.copy(value)
-
- // Take ptr to the value copy.
- vptr := unsafe.Pointer(&value)
-
- // Extract primary key from vptr.
- kptr := extract_pkey(vptr, t.pkey)
- pkey := *(*PK)(kptr)
-
- // Append wrapped value to slice with
- // the acquire pointers and primary key.
- with_keys[i] = value_with_pk[T, PK]{
- k: pkey,
- v: value,
-
- kptr: kptr,
- vptr: vptr,
- }
- }
-
- // BEFORE inserting the prepared slice of value copies w/ primary
- // keys, sort them by their primary key, descending. This permits
- // us to re-use the 'last' timeline position as next insert cursor.
- // Otherwise we would have to iterate from 'head' every single time.
- slices.SortFunc(with_keys, func(a, b value_with_pk[T, PK]) int {
- const k = +1
- switch {
- case a.k < b.k:
- return +k
- case b.k < a.k:
- return -k
- default:
- return 0
- }
- })
-
- var last *list_elem
-
- // Store each value in the timeline,
- // updating the last used list element
- // each time so we don't have to iter
- // down from head on every single store.
- for _, value := range with_keys {
- last = t.store_one(last, value)
- }
-
- // Get func ptrs.
- invalid := t.invalid
-
- // Get length AFTER
- // insert to return.
- len := t.list.len
-
- // Done with lock.
- t.mutex.Unlock()
-
- if invalid != nil {
- // Pass all invalidated values
- // to given user hook (if set).
- for _, value := range values {
- invalid(value)
- }
- }
-
- return len
-}
-
-// Invalidate invalidates all entries stored in index under given keys.
-// Note that if set, this will call the invalidate hook on each value.
-func (t *Timeline[T, PK]) Invalidate(index *Index, keys ...Key) {
- if index == nil {
- panic("no index given")
- } else if index.ptr != unsafe.Pointer(t) {
- panic("invalid index for timeline")
- }
-
- // Acquire lock.
- t.mutex.Lock()
-
- // Preallocate expected ret slice.
- values := make([]T, 0, len(keys))
-
- for i := range keys {
- // Delete all items under key from index, collecting
- // value items and dropping them from all their indices.
- index.delete(keys[i].key, func(item *indexed_item) {
-
- // Cast to *actual* timeline item.
- t_item := to_timeline_item(item)
-
- if value, ok := item.data.(T); ok {
- // No need to copy, as item
- // being deleted from cache.
- values = append(values, value)
- }
-
- // Delete item.
- t.delete(t_item)
- })
- }
-
- // Get func ptrs.
- invalid := t.invalid
-
- // Done with lock.
- t.mutex.Unlock()
-
- if invalid != nil {
- // Pass all invalidated values
- // to given user hook (if set).
- for _, value := range values {
- invalid(value)
- }
- }
-}
-
-// Range will range over all values in the timeline in given direction.
-// dir = Asc : ranges from the bottom-up.
-// dir = Desc : ranges from the top-down.
-//
-// Please note that the entire Timeline{} will be locked for the duration of the range
-// operation, i.e. from the beginning of the first yield call until the end of the last.
-func (t *Timeline[T, PK]) Range(dir Direction) func(yield func(index int, value T) bool) {
- return func(yield func(int, T) bool) {
- if t.copy == nil {
- panic("not initialized")
- } else if yield == nil {
- panic("nil func")
- }
-
- // Acquire lock.
- t.mutex.Lock()
- defer t.mutex.Unlock()
-
- var i int
- switch dir {
-
- case Asc:
- // Iterate through linked list from bottom (i.e. tail).
- for prev := t.list.tail; prev != nil; prev = prev.prev {
-
- // Extract item from list element.
- item := (*timeline_item)(prev.data)
-
- // Create copy of item value.
- value := t.copy(item.data.(T))
-
- // Pass to given function.
- if !yield(i, value) {
- break
- }
-
- // Iter
- i++
- }
-
- case Desc:
- // Iterate through linked list from top (i.e. head).
- for next := t.list.head; next != nil; next = next.next {
-
- // Extract item from list element.
- item := (*timeline_item)(next.data)
-
- // Create copy of item value.
- value := t.copy(item.data.(T))
-
- // Pass to given function.
- if !yield(i, value) {
- break
- }
-
- // Iter
- i++
- }
- }
- }
-}
-
-// RangeUnsafe is functionally similar to Range(), except it does not pass *copies* of
-// data. It allows you to operate on the data directly and modify it. As such it can also
-// be more performant to use this function, even for read-write operations.
-//
-// Please note that the entire Timeline{} will be locked for the duration of the range
-// operation, i.e. from the beginning of the first yield call until the end of the last.
-func (t *Timeline[T, PK]) RangeUnsafe(dir Direction) func(yield func(index int, value T) bool) {
- return func(yield func(int, T) bool) {
- if t.copy == nil {
- panic("not initialized")
- } else if yield == nil {
- panic("nil func")
- }
-
- // Acquire lock.
- t.mutex.Lock()
- defer t.mutex.Unlock()
-
- var i int
- switch dir {
-
- case Asc:
- // Iterate through linked list from bottom (i.e. tail).
- for prev := t.list.tail; prev != nil; prev = prev.prev {
-
- // Extract item from list element.
- item := (*timeline_item)(prev.data)
-
- // Pass to given function.
- if !yield(i, item.data.(T)) {
- break
- }
-
- // Iter
- i++
- }
-
- case Desc:
- // Iterate through linked list from top (i.e. head).
- for next := t.list.head; next != nil; next = next.next {
-
- // Extract item from list element.
- item := (*timeline_item)(next.data)
-
- // Pass to given function.
- if !yield(i, item.data.(T)) {
- break
- }
-
- // Iter
- i++
- }
- }
- }
-}
-
-// RangeKeys will iterate over all values for given keys in the given index.
-//
-// Please note that the entire Timeline{} will be locked for the duration of the range
-// operation, i.e. from the beginning of the first yield call until the end of the last.
-func (t *Timeline[T, PK]) RangeKeys(index *Index, keys ...Key) func(yield func(T) bool) {
- return func(yield func(T) bool) {
- if t.copy == nil {
- panic("not initialized")
- } else if index == nil {
- panic("no index given")
- } else if index.ptr != unsafe.Pointer(t) {
- panic("invalid index for timeline")
- } else if yield == nil {
- panic("nil func")
- }
-
- // Acquire lock.
- t.mutex.Lock()
- defer t.mutex.Unlock()
-
- for _, key := range keys {
- var done bool
-
- // Iterate over values in index under key.
- index.get(key.key, func(i *indexed_item) {
-
- // Cast to timeline_item type.
- item := to_timeline_item(i)
-
- // Create copy of item value.
- value := t.copy(item.data.(T))
-
- // Pass val to yield function.
- done = done || !yield(value)
- })
-
- if done {
- break
- }
- }
- }
-}
-
-// RangeKeysUnsafe is functionally similar to RangeKeys(), except it does not pass *copies*
-// of data. It allows you to operate on the data directly and modify it. As such it can also
-// be more performant to use this function, even for read-write operations.
-//
-// Please note that the entire Timeline{} will be locked for the duration of the range
-// operation, i.e. from the beginning of the first yield call until the end of the last.
-func (t *Timeline[T, PK]) RangeKeysUnsafe(index *Index, keys ...Key) func(yield func(T) bool) {
- return func(yield func(T) bool) {
- if t.copy == nil {
- panic("not initialized")
- } else if index == nil {
- panic("no index given")
- } else if index.ptr != unsafe.Pointer(t) {
- panic("invalid index for timeline")
- } else if yield == nil {
- panic("nil func")
- }
-
- // Acquire lock.
- t.mutex.Lock()
- defer t.mutex.Unlock()
-
- for _, key := range keys {
- var done bool
-
- // Iterate over values in index under key.
- index.get(key.key, func(i *indexed_item) {
-
- // Cast to timeline_item type.
- item := to_timeline_item(i)
-
- // Pass value data to yield function.
- done = done || !yield(item.data.(T))
- })
-
- if done {
- break
- }
- }
- }
-}
-
-// Trim will remove entries from the timeline in given
-// direction, ensuring timeline is no larger than 'max'.
-// If 'max' >= t.Len(), this function is a no-op.
-// dir = Asc : trims from the bottom-up.
-// dir = Desc : trims from the top-down.
-func (t *Timeline[T, PK]) Trim(max int, dir Direction) {
- // Acquire lock.
- t.mutex.Lock()
-
- // Calculate number to drop.
- diff := t.list.len - int(max)
- if diff <= 0 {
-
- // Trim not needed.
- t.mutex.Unlock()
- return
- }
-
- switch dir {
- case Asc:
- // Iterate over 'diff' items
- // from bottom of timeline list.
- for range diff {
-
- // Get bottom list elem.
- bottom := t.list.tail
- if bottom == nil {
-
- // Zero head + tail primary keys.
- atomic.StorePointer(&t.headPK, nil)
- atomic.StorePointer(&t.tailPK, nil)
-
- // reached
- // end.
- break
- }
-
- // Drop bottom-most item from timeline.
- item := (*timeline_item)(bottom.data)
- t.delete(item)
- }
-
- case Desc:
- // Iterate over 'diff' items
- // from top of timeline list.
- for range diff {
-
- // Get top list elem.
- top := t.list.head
- if top == nil {
-
- // Zero head + tail primary keys.
- atomic.StorePointer(&t.headPK, nil)
- atomic.StorePointer(&t.tailPK, nil)
-
- // reached
- // end.
- break
- }
-
- // Drop top-most item from timeline.
- item := (*timeline_item)(top.data)
- t.delete(item)
- }
- }
-
- // Compact index data stores.
- for _, idx := range t.indices {
- (&idx).data.Compact()
- }
-
- // Done with lock.
- t.mutex.Unlock()
-}
-
-// Clear empties the timeline by calling .TrimBottom(0, Down).
-func (t *Timeline[T, PK]) Clear() { t.Trim(0, Desc) }
-
-// Len returns the current length of cache.
-func (t *Timeline[T, PK]) Len() int {
- t.mutex.Lock()
- l := t.list.len
- t.mutex.Unlock()
- return l
-}
-
-// Debug returns debug stats about cache.
-func (t *Timeline[T, PK]) Debug() map[string]any {
- m := make(map[string]any, 2)
- t.mutex.Lock()
- m["list"] = t.list.len
- indices := make(map[string]any, len(t.indices))
- m["indices"] = indices
- for _, idx := range t.indices {
- var n uint64
- for _, l := range idx.data.m {
- n += uint64(l.len)
- }
- indices[idx.name] = n
- }
- t.mutex.Unlock()
- return m
-}
-
-func (t *Timeline[T, PK]) select_asc(min PK, max *PK, length *int) (values []T) {
- // Iterate through linked list
- // from bottom (i.e. tail), asc.
- prev := t.list.tail
-
- // Iterate from 'prev' up, skipping all
- // entries with pkey below cursor 'min'.
- for ; prev != nil; prev = prev.prev {
- item := (*timeline_item)(prev.data)
- pkey := *(*PK)(item.pk)
-
- // Check below min.
- if pkey <= min {
- continue
- }
-
- // Reached
- // cursor.
- break
- }
-
- if prev == nil {
- // No values
- // remaining.
- return
- }
-
- // Optimized switch case to handle
- // each set of argument combinations
- // separately, in order to minimize
- // number of checks during loops.
- switch {
-
- case length != nil && max != nil:
- // Deref arguments.
- length := *length
- max := *max
-
- // Optimistic preallocate slice.
- values = make([]T, 0, length)
-
- // Both a length and maximum were given,
- // select from cursor until either reached.
- for ; prev != nil; prev = prev.prev {
- item := (*timeline_item)(prev.data)
- pkey := *(*PK)(item.pk)
-
- // Check above max.
- if pkey >= max {
- break
- }
-
- // Append value copy.
- value := item.data.(T)
- value = t.copy(value)
- values = append(values, value)
-
- // Check if length reached.
- if len(values) >= length {
- break
- }
- }
-
- case length != nil:
- // Deref length.
- length := *length
-
- // Optimistic preallocate slice.
- values = make([]T, 0, length)
-
- // Only a length was given, select
- // from cursor until length reached.
- for ; prev != nil; prev = prev.prev {
- item := (*timeline_item)(prev.data)
-
- // Append value copy.
- value := item.data.(T)
- value = t.copy(value)
- values = append(values, value)
-
- // Check if length reached.
- if len(values) >= length {
- break
- }
- }
-
- case max != nil:
- // Deref min.
- max := *max
-
- // Only a maximum was given, select
- // from cursor until max is reached.
- for ; prev != nil; prev = prev.prev {
- item := (*timeline_item)(prev.data)
- pkey := *(*PK)(item.pk)
-
- // Check above max.
- if pkey >= max {
- break
- }
-
- // Append value copy.
- value := item.data.(T)
- value = t.copy(value)
- values = append(values, value)
- }
-
- default:
- // No maximum or length were given,
- // ALL from cursor need selecting.
- for ; prev != nil; prev = prev.prev {
- item := (*timeline_item)(prev.data)
-
- // Append value copy.
- value := item.data.(T)
- value = t.copy(value)
- values = append(values, value)
- }
- }
-
- return
-}
-
-func (t *Timeline[T, PK]) select_desc(min *PK, max PK, length *int) (values []T) {
- // Iterate through linked list
- // from top (i.e. head), desc.
- next := t.list.head
-
- // Iterate from 'next' down, skipping
- // all entries with pkey above cursor 'max'.
- for ; next != nil; next = next.next {
- item := (*timeline_item)(next.data)
- pkey := *(*PK)(item.pk)
-
- // Check above max.
- if pkey >= max {
- continue
- }
-
- // Reached
- // cursor.
- break
- }
-
- if next == nil {
- // No values
- // remaining.
- return
- }
-
- // Optimized switch case to handle
- // each set of argument combinations
- // separately, in order to minimize
- // number of checks during loops.
- switch {
-
- case length != nil && min != nil:
- // Deref arguments.
- length := *length
- min := *min
-
- // Optimistic preallocate slice.
- values = make([]T, 0, length)
-
- // Both a length and minimum were given,
- // select from cursor until either reached.
- for ; next != nil; next = next.next {
- item := (*timeline_item)(next.data)
- pkey := *(*PK)(item.pk)
-
- // Check below min.
- if pkey <= min {
- break
- }
-
- // Append value copy.
- value := item.data.(T)
- value = t.copy(value)
- values = append(values, value)
-
- // Check if length reached.
- if len(values) >= length {
- break
- }
- }
-
- case length != nil:
- // Deref length.
- length := *length
-
- // Optimistic preallocate slice.
- values = make([]T, 0, length)
-
- // Only a length was given, select
- // from cursor until length reached.
- for ; next != nil; next = next.next {
- item := (*timeline_item)(next.data)
-
- // Append value copy.
- value := item.data.(T)
- value = t.copy(value)
- values = append(values, value)
-
- // Check if length reached.
- if len(values) >= length {
- break
- }
- }
-
- case min != nil:
- // Deref min.
- min := *min
-
- // Only a minimum was given, select
- // from cursor until minimum reached.
- for ; next != nil; next = next.next {
- item := (*timeline_item)(next.data)
- pkey := *(*PK)(item.pk)
-
- // Check below min.
- if pkey <= min {
- break
- }
-
- // Append value copy.
- value := item.data.(T)
- value = t.copy(value)
- values = append(values, value)
- }
-
- default:
- // No minimum or length were given,
- // ALL from cursor need selecting.
- for ; next != nil; next = next.next {
- item := (*timeline_item)(next.data)
-
- // Append value copy.
- value := item.data.(T)
- value = t.copy(value)
- values = append(values, value)
- }
- }
-
- return
-}
-
-// value_with_pk wraps an incoming value type, with
-// its extracted primary key, and pointers to both.
-// this encompasses all arguments related to a value
-// required by store_one(), simplifying some logic.
-//
-// with all the primary keys extracted, it also
-// makes it much easier to sort input before insert.
-type value_with_pk[T any, PK comparable] struct {
- k PK // primary key value
- v T // value copy
-
- kptr unsafe.Pointer // primary key ptr
- vptr unsafe.Pointer // value copy ptr
-}
-
-func (t *Timeline[T, PK]) store_one(last *list_elem, value value_with_pk[T, PK]) *list_elem {
- // NOTE: the value passed here should
- // already be a copy of the original.
-
- // Alloc new index item.
- t_item := new_timeline_item()
- if cap(t_item.indexed) < len(t.indices) {
-
- // Preallocate item indices slice to prevent Go auto
- // allocating overlying large slices we don't need.
- t_item.indexed = make([]*index_entry, 0, len(t.indices))
- }
-
- // Set item value data.
- t_item.data = value.v
- t_item.pk = value.kptr
-
- // Get zero'th index, i.e.
- // the primary key index.
- idx0 := (&t.indices[0])
-
- // Acquire key buf.
- buf := new_buffer()
-
- // Calculate index key from already extracted
- // primary key, checking for zero return value.
- partptrs := []unsafe.Pointer{value.kptr}
- key := idx0.key(buf, partptrs)
- if key == "" { // i.e. (!allow_zero && pkey == zero)
- free_timeline_item(t_item)
- free_buffer(buf)
- return last
- }
-
- // Convert to indexed_item pointer.
- i_item := from_timeline_item(t_item)
-
- if last == nil {
- // No previous element was provided, this is
- // first insert, we need to work from head.
-
- // Check for emtpy head.
- if t.list.head == nil {
-
- // The easiest case, this will
- // be the first item in list.
- t.list.push_front(&t_item.elem)
- last = t.list.head // return value
- goto indexing
- }
-
- // Extract head item and its primary key.
- headItem := (*timeline_item)(t.list.head.data)
- headPK := *(*PK)(headItem.pk)
- if value.k > headPK {
-
- // Another easier case, this also
- // will be the first item in list.
- t.list.push_front(&t_item.elem)
- last = t.list.head // return value
- goto indexing
- }
-
- // Check (and drop) if pkey is a collision!
- if value.k == headPK && is_unique(idx0.flags) {
- free_timeline_item(t_item)
- free_buffer(buf)
- return t.list.head
- }
-
- // Set last = head.next
- // as next to work from.
- last = t.list.head.next
- }
-
- // Iterate through list from head
- // to find location. Optimized into two
- // cases to minimize loop CPU cycles.
- if is_unique(idx0.flags) {
- for next := last; //
- next != nil; next = next.next {
-
- // Extract item and it's primary key.
- nextItem := (*timeline_item)(next.data)
- nextPK := *(*PK)(nextItem.pk)
-
- // If pkey smaller than
- // cursor's, keep going.
- if value.k < nextPK {
- continue
- }
-
- // Check (and drop) if
- // pkey is a collision!
- if value.k == nextPK {
- free_timeline_item(t_item)
- free_buffer(buf)
- return next
- }
-
- // New pkey is larger than cursor,
- // insert into list just before it.
- t.list.insert(&t_item.elem, next.prev)
- last = next // return value
- goto indexing
- }
- } else {
- for next := last; //
- next != nil; next = next.next {
-
- // Extract item and it's primary key.
- nextItem := (*timeline_item)(next.data)
- nextPK := *(*PK)(nextItem.pk)
-
- // If pkey smaller than
- // cursor's, keep going.
- if value.k < nextPK {
- continue
- }
-
- // New pkey is larger than cursor,
- // insert into list just before it.
- t.list.insert(&t_item.elem, next.prev)
- last = next // return value
- goto indexing
- }
- }
-
- // We reached the end of the
- // list, insert at tail pos.
- t.list.push_back(&t_item.elem)
- last = t.list.tail // return value
- goto indexing
-
-indexing:
- // Set new head / tail
- // primary key values.
- switch last {
- case t.list.head:
- atomic.StorePointer(&t.headPK, value.kptr)
- case t.list.tail:
- atomic.StorePointer(&t.tailPK, value.kptr)
- }
-
- // Append already-extracted
- // primary key to 0th index.
- _ = idx0.add(key, i_item)
-
- // Insert item into each of indices.
- for i := 1; i < len(t.indices); i++ {
-
- // Get current index ptr.
- idx := (&t.indices[i])
-
- // Extract fields comprising index key from value.
- parts := extract_fields(value.vptr, idx.fields)
-
- // Calculate this index key,
- // checking for zero values.
- key := idx.key(buf, parts)
- if key == "" {
- continue
- }
-
- // Add this item to index,
- // checking for collisions.
- if !idx.add(key, i_item) {
-
- // This key already appears
- // in this unique index. So
- // drop new timeline item.
- t.delete(t_item)
- free_buffer(buf)
- return last
- }
- }
-
- // Done with bufs.
- free_buffer(buf)
- return last
-}
-
-func (t *Timeline[T, PK]) delete(i *timeline_item) {
- for len(i.indexed) > 0 {
- // Pop last indexed entry from list.
- entry := i.indexed[len(i.indexed)-1]
- i.indexed[len(i.indexed)-1] = nil
- i.indexed = i.indexed[:len(i.indexed)-1]
-
- // Get entry's index.
- index := entry.index
-
- // Drop this index_entry.
- index.delete_entry(entry)
- }
-
- // Drop from main list.
- t.list.remove(&i.elem)
-
- // Free unused item.
- free_timeline_item(i)
-}
-
-type timeline_item struct {
- indexed_item
-
- // retains fast ptr access
- // to primary key value of
- // above indexed_item{}.data
- pk unsafe.Pointer
-
- // check bits always all set
- // to 1. used to ensure cast
- // from indexed_item to this
- // type was originally a
- // timeline_item to begin with.
- ck uint
-}
-
-func init() {
- // ensure the embedded indexed_item struct is ALWAYS at zero offset.
- // we rely on this to allow a ptr to one to be a ptr to either of them.
- const off = unsafe.Offsetof(timeline_item{}.indexed_item)
- if off != 0 {
- panic(assert("offset_of(timeline_item{}.indexed_item) = 0"))
- }
-}
-
-// from_timeline_item converts a timeline_item ptr to indexed_item, given the above init() guarantee.
-func from_timeline_item(item *timeline_item) *indexed_item {
- return (*indexed_item)(unsafe.Pointer(item))
-}
-
-// to_timeline_item converts an indexed_item ptr to timeline_item, given the above init() guarantee.
-// NOTE THIS MUST BE AN indexed_item THAT WAS INITIALLY CONVERTED WITH from_timeline_item().
-func to_timeline_item(item *indexed_item) *timeline_item {
- to := (*timeline_item)(unsafe.Pointer(item))
- if to.ck != ^uint(0) {
- // ensure check bits set, indicating
- // it was a timeline_item originally.
- panic(assert("t.ck = ^uint(0)"))
- }
- return to
-}
-
-var timeline_item_pool mempool.UnsafePool
-
-// new_timeline_item returns a new prepared timeline_item.
-func new_timeline_item() *timeline_item {
- if ptr := timeline_item_pool.Get(); ptr != nil {
- return (*timeline_item)(ptr)
- }
- item := new(timeline_item)
- item.elem.data = unsafe.Pointer(item)
- item.ck = ^uint(0)
- return item
-}
-
-// free_timeline_item releases the timeline_item.
-func free_timeline_item(item *timeline_item) {
- if len(item.indexed) > 0 ||
- item.elem.next != nil ||
- item.elem.prev != nil {
- msg := assert("item not in use")
- os.Stderr.WriteString(msg + "\n")
- return
- }
- item.data = nil
- item.pk = nil
- ptr := unsafe.Pointer(item)
- timeline_item_pool.Put(ptr)
-}