summaryrefslogtreecommitdiff
path: root/vendor/codeberg.org/gruf/go-structr
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/codeberg.org/gruf/go-structr')
-rw-r--r--vendor/codeberg.org/gruf/go-structr/README.md82
-rw-r--r--vendor/codeberg.org/gruf/go-structr/cache.go471
-rw-r--r--vendor/codeberg.org/gruf/go-structr/hash.go404
-rw-r--r--vendor/codeberg.org/gruf/go-structr/hash_32.go14
-rw-r--r--vendor/codeberg.org/gruf/go-structr/hash_48.go21
-rw-r--r--vendor/codeberg.org/gruf/go-structr/hash_64.go14
-rw-r--r--vendor/codeberg.org/gruf/go-structr/index.go428
-rw-r--r--vendor/codeberg.org/gruf/go-structr/item.go59
-rw-r--r--vendor/codeberg.org/gruf/go-structr/key.go58
-rw-r--r--vendor/codeberg.org/gruf/go-structr/list.go78
-rw-r--r--vendor/codeberg.org/gruf/go-structr/queue.go306
-rw-r--r--vendor/codeberg.org/gruf/go-structr/result.go78
-rw-r--r--vendor/codeberg.org/gruf/go-structr/runtime.go53
13 files changed, 988 insertions, 1078 deletions
diff --git a/vendor/codeberg.org/gruf/go-structr/README.md b/vendor/codeberg.org/gruf/go-structr/README.md
index 60f398085..c8e1585b3 100644
--- a/vendor/codeberg.org/gruf/go-structr/README.md
+++ b/vendor/codeberg.org/gruf/go-structr/README.md
@@ -1,10 +1,11 @@
# go-structr
-A performant struct caching library with automated indexing by arbitrary combinations of fields, including support for negative results (errors!). An example use case is in database lookups.
+A library with a series of performant data types with automated struct value indexing. Indexing is supported via arbitrary combinations of fields, and in the case of the cache type, negative results (errors!) are also supported.
-Under the hood, go-structr maintains a hashmap per index, where each hashmap is a hashmap keyed with either 32bit, 48bit or 64bit (default) hash checksum of the inputted raw index keys. The hash checksum size can be controlled by the following Go build-tags: `structr_32bit_hash` `structr_48bit_hash`
+Under the hood, go-structr maintains a hashmap per index, where each hashmap is a hashmap keyed by serialized input key type. This is handled by the incredibly performant serialization library [go-mangler](https://codeberg.org/gruf/go-mangler), which at this point in time supports just about **any** arbitrary type, so feel free to index by *anything*!
+
+## Cache example
-Some example code of how you can use `go-structr` in your application:
```golang
type Cached struct {
Username string
@@ -15,7 +16,7 @@ type Cached struct {
var c structr.Cache[*Cached]
-c.Init(structr.Config[*Cached]{
+c.Init(structr.CacheConfig[*Cached]{
// Fields this cached struct type
// will be indexed and stored under.
@@ -32,7 +33,7 @@ c.Init(structr.Config[*Cached]{
// User provided value copy function to
// reduce need for reflection + ensure
// concurrency safety for returned values.
- CopyValue: func(c *Cached) *Cached {
+ Copy: func(c *Cached) *Cached {
c2 := new(Cached)
*c2 = *c
return c2
@@ -44,15 +45,23 @@ c.Init(structr.Config[*Cached]{
},
})
+// Access and store indexes ahead-of-time for perf.
+usernameDomainIndex := c.Index("Username,Domain")
+urlIndex := c.Index("URL")
+countryCodeIndex := c.Index("CountryCode")
+
var url string
+// Generate URL index key.
+urlKey := urlIndex.Key(url)
+
// Load value from cache, with callback function to hydrate
// cache if value cannot be found under index name with key.
// Negative (error) results are also cached, with user definable
// errors to ignore from caching (e.g. context deadline errs).
-value, err := c.LoadOne("URL", func() (*Cached, error) {
+value, err := c.LoadOne(urlIndex, func() (*Cached, error) {
return dbType.SelectByURL(url)
-}, url)
+}, urlKey)
if err != nil {
return nil, err
}
@@ -69,9 +78,66 @@ if err := c.Store(value, func() error {
return nil, err
}
+// Generate country code index key.
+countryCodeKey := countryCodeIndex.Key(42)
+
// Invalidate all cached results stored under
// provided index name with give field value(s).
-c.Invalidate("CountryCode", 42)
+c.Invalidate(countryCodeIndex, countryCodeKey)
```
+## Queue example
+
+```golang
+
+type Queued struct{
+ Username string
+ Domain string
+ URL string
+ CountryCode int
+}
+
+var q structr.Queue[*Queued]
+
+q.Init(structr.QueueConfig[*Cached]{
+
+ // Fields this queued struct type
+ // will be indexed and stored under.
+ Indices: []structr.IndexConfig{
+ {Fields: "Username,Domain", AllowZero: true},
+ {Fields: "URL"},
+ {Fields: "CountryCode", Multiple: true},
+ },
+
+ // User defined pop hook.
+ Pop: func(c *Cached) {
+ log.Println("popped:", c)
+ },
+})
+
+// Access and store indexes ahead-of-time for perf.
+usernameDomainIndex := q.Index("Username,Domain")
+urlIndex := q.Index("URL")
+countryCodeIndex := q.Index("CountryCode")
+
+// ...
+q.PushBack(Queued{
+ Username: "billybob",
+ Domain: "google.com",
+ URL: "https://some.website.here",
+ CountryCode: 42,
+})
+
+// ...
+queued, ok := q.PopFront()
+
+// Generate country code index key.
+countryCodeKey := countryCodeIndex.Key(42)
+
+// ...
+queuedByCountry := q.Pop(countryCodeIndex, countryCodeKey)
+```
+
+## Notes
+
This is a core underpinning of [GoToSocial](https://github.com/superseriousbusiness/gotosocial)'s performance. \ No newline at end of file
diff --git a/vendor/codeberg.org/gruf/go-structr/cache.go b/vendor/codeberg.org/gruf/go-structr/cache.go
index 690292df4..7b0772acf 100644
--- a/vendor/codeberg.org/gruf/go-structr/cache.go
+++ b/vendor/codeberg.org/gruf/go-structr/cache.go
@@ -3,7 +3,9 @@ package structr
import (
"context"
"errors"
+ "reflect"
"sync"
+ "unsafe"
)
// DefaultIgnoreErr is the default function used to
@@ -14,9 +16,9 @@ func DefaultIgnoreErr(err error) bool {
errors.Is(err, context.DeadlineExceeded)
}
-// Config defines config variables
+// CacheConfig defines config vars
// for initializing a struct cache.
-type Config[StructType any] struct {
+type CacheConfig[StructType any] struct {
// Indices defines indices to create
// in the Cache for the receiving
@@ -24,8 +26,8 @@ type Config[StructType any] struct {
Indices []IndexConfig
// MaxSize defines the maximum number
- // of results allowed in the Cache at
- // one time, before old results start
+ // of items allowed in the Cache at
+ // one time, before old items start
// getting evicted.
MaxSize int
@@ -36,10 +38,10 @@ type Config[StructType any] struct {
// DefaultIgnoreErr will be used.
IgnoreErr func(error) bool
- // CopyValue provides a means of copying
+ // Copy provides a means of copying
// cached values, to ensure returned values
// do not share memory with those in cache.
- CopyValue func(StructType) StructType
+ Copy func(StructType) StructType
// Invalidate is called when cache values
// (NOT errors) are invalidated, either
@@ -50,19 +52,17 @@ type Config[StructType any] struct {
// Cache provides a structure cache with automated
// indexing and lookups by any initialization-defined
-// combination of fields (as long as serialization is
-// supported by codeberg.org/gruf/go-mangler). This
-// also supports caching of negative results by errors
-// returned from the LoadOne() series of functions.
+// combination of fields. This also supports caching
+// of negative results (errors!) returned by LoadOne().
type Cache[StructType any] struct {
// indices used in storing passed struct
// types by user defined sets of fields.
- indices []Index[StructType]
+ indices []Index
- // keeps track of all indexed results,
+ // keeps track of all indexed items,
// in order of last recently used (LRU).
- lruList list
+ lru list
// max cache size, imposes size
// limit on the lruList in order
@@ -83,7 +83,9 @@ type Cache[StructType any] struct {
// Init initializes the cache with given configuration
// including struct fields to index, and necessary fns.
-func (c *Cache[T]) Init(config Config[T]) {
+func (c *Cache[T]) Init(config CacheConfig[T]) {
+ t := reflect.TypeOf((*T)(nil)).Elem()
+
if len(config.Indices) == 0 {
panic("no indices provided")
}
@@ -92,8 +94,8 @@ func (c *Cache[T]) Init(config Config[T]) {
config.IgnoreErr = DefaultIgnoreErr
}
- if config.CopyValue == nil {
- panic("copy value function must be provided")
+ if config.Copy == nil {
+ panic("copy function must be provided")
}
if config.MaxSize < 2 {
@@ -103,19 +105,20 @@ func (c *Cache[T]) Init(config Config[T]) {
// Safely copy over
// provided config.
c.mutex.Lock()
- c.indices = make([]Index[T], len(config.Indices))
+ c.indices = make([]Index, len(config.Indices))
for i, cfg := range config.Indices {
- init_index(&c.indices[i], cfg, config.MaxSize)
+ c.indices[i].ptr = unsafe.Pointer(c)
+ c.indices[i].init(t, cfg, config.MaxSize)
}
c.ignore = config.IgnoreErr
- c.copy = config.CopyValue
+ c.copy = config.Copy
c.invalid = config.Invalidate
c.maxSize = config.MaxSize
c.mutex.Unlock()
}
// Index selects index with given name from cache, else panics.
-func (c *Cache[T]) Index(name string) *Index[T] {
+func (c *Cache[T]) Index(name string) *Index {
for i := range c.indices {
if c.indices[i].name == name {
return &c.indices[i]
@@ -124,20 +127,9 @@ func (c *Cache[T]) Index(name string) *Index[T] {
panic("unknown index: " + name)
}
-// GetOne fetches one value from the cache stored under index, using key generated from key parts.
-// Note that given number of key parts MUST match expected number and types of the given index name.
-func (c *Cache[T]) GetOne(index string, key ...any) (T, bool) {
- return c.GetOneBy(c.Index(index), key...)
-}
-
-// GetOneBy fetches value from cache stored under index, using precalculated index key.
-func (c *Cache[T]) GetOneBy(index *Index[T], key ...any) (T, bool) {
- if index == nil {
- panic("no index given")
- } else if !is_unique(index.flags) {
- panic("cannot get one by non-unique index")
- }
- values := c.GetBy(index, key)
+// GetOne fetches value from cache stored under index, using precalculated index key.
+func (c *Cache[T]) GetOne(index *Index, key Key) (T, bool) {
+ values := c.Get(index, key)
if len(values) == 0 {
var zero T
return zero, false
@@ -145,20 +137,16 @@ func (c *Cache[T]) GetOneBy(index *Index[T], key ...any) (T, bool) {
return values[0], true
}
-// Get fetches values from the cache stored under index, using keys generated from given key parts.
-// Note that each number of key parts MUST match expected number and types of the given index name.
-func (c *Cache[T]) Get(index string, keys ...[]any) []T {
- return c.GetBy(c.Index(index), keys...)
-}
-
-// GetBy fetches values from the cache stored under index, using precalculated index keys.
-func (c *Cache[T]) GetBy(index *Index[T], keys ...[]any) []T {
+// Get fetches values from the cache stored under index, using precalculated index keys.
+func (c *Cache[T]) Get(index *Index, keys ...Key) []T {
if index == nil {
panic("no index given")
+ } else if index.ptr != unsafe.Pointer(c) {
+ panic("invalid index for cache")
}
- // Acquire hasher.
- h := get_hasher()
+ // Preallocate expected ret slice.
+ values := make([]T, 0, len(keys))
// Acquire lock.
c.mutex.Lock()
@@ -169,61 +157,31 @@ func (c *Cache[T]) GetBy(index *Index[T], keys ...[]any) []T {
panic("not initialized")
}
- // Preallocate expected ret slice.
- values := make([]T, 0, len(keys))
-
- for _, key := range keys {
-
- // Generate sum from provided key.
- sum, ok := index_hash(index, h, key)
- if !ok {
- continue
- }
-
- // Get indexed results list at key.
- list := index_get(index, sum, key)
- if list == nil {
- continue
- }
-
- // Concatenate all *values* from non-err cached results.
- list_rangefn(list, func(e *list_elem) {
- entry := (*index_entry)(e.data)
- res := entry.result
-
- switch value := res.data.(type) {
- case T:
+ for i := range keys {
+ // Concatenate all *values* from cached items.
+ index.get(keys[i], func(item *indexed_item) {
+ if value, ok := item.data.(T); ok {
// Append value COPY.
value = c.copy(value)
values = append(values, value)
- case error:
- // Don't bump
- // for errors.
- return
+ // Push to front of LRU list, USING
+ // THE ITEM'S LRU ENTRY, NOT THE
+ // INDEX KEY ENTRY. VERY IMPORTANT!!
+ c.lru.move_front(&item.elem)
}
-
- // Push to front of LRU list, USING
- // THE RESULT'S LRU ENTRY, NOT THE
- // INDEX KEY ENTRY. VERY IMPORTANT!!
- list_move_front(&c.lruList, &res.elem)
})
}
// Done with lock.
c.mutex.Unlock()
- // Done with h.
- hash_pool.Put(h)
-
return values
}
// Put will insert the given values into cache,
// calling any invalidate hook on each value.
func (c *Cache[T]) Put(values ...T) {
- var z Hash
-
// Acquire lock.
c.mutex.Lock()
@@ -236,9 +194,13 @@ func (c *Cache[T]) Put(values ...T) {
panic("not initialized")
}
- // Store all the passed values.
- for _, value := range values {
- c.store_value(nil, z, nil, value)
+ // Store all passed values.
+ for i := range values {
+ c.store_value(
+ nil,
+ Key{},
+ values[i],
+ )
}
// Done with lock.
@@ -253,43 +215,29 @@ func (c *Cache[T]) Put(values ...T) {
}
}
-// LoadOne fetches one result from the cache stored under index, using key generated from key parts.
-// In the case that no result is found, the provided load callback will be used to hydrate the cache.
-// Note that given number of key parts MUST match expected number and types of the given index name.
-func (c *Cache[T]) LoadOne(index string, load func() (T, error), key ...any) (T, error) {
- return c.LoadOneBy(c.Index(index), load, key...)
-}
-
// LoadOneBy fetches one result from the cache stored under index, using precalculated index key.
// In the case that no result is found, provided load callback will be used to hydrate the cache.
-func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key ...any) (T, error) {
+func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, error) {
if index == nil {
panic("no index given")
+ } else if index.ptr != unsafe.Pointer(c) {
+ panic("invalid index for cache")
} else if !is_unique(index.flags) {
panic("cannot get one by non-unique index")
}
var (
- // whether a result was found
+ // whether an item was found
// (and so val / err are set).
ok bool
// separate value / error ptrs
- // as the result is liable to
+ // as the item is liable to
// change outside of lock.
val T
err error
)
- // Acquire hasher.
- h := get_hasher()
-
- // Generate sum from provided key.
- sum, _ := index_hash(index, h, key)
-
- // Done with h.
- hash_pool.Put(h)
-
// Acquire lock.
c.mutex.Lock()
@@ -303,33 +251,30 @@ func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key ...any
panic("not initialized")
}
- // Get indexed list at hash key.
- list := index_get(index, sum, key)
+ // Get item indexed at key.
+ item := index.get_one(key)
- if ok = (list != nil); ok {
- entry := (*index_entry)(list.head.data)
- res := entry.result
+ if ok = (item != nil); ok {
+ if value, is := item.data.(T); is {
+ // Set value COPY.
+ val = c.copy(value)
+
+ // Push to front of LRU list, USING
+ // THE ITEM'S LRU ENTRY, NOT THE
+ // INDEX KEY ENTRY. VERY IMPORTANT!!
+ c.lru.move_front(&item.elem)
- switch data := res.data.(type) {
- case T:
- // Return value COPY.
- val = c.copy(data)
- case error:
+ } else if error, is := item.data.(error); is {
// Return error.
- err = data
+ err = error
}
-
- // Push to front of LRU list, USING
- // THE RESULT'S LRU ENTRY, NOT THE
- // INDEX KEY ENTRY. VERY IMPORTANT!!
- list_move_front(&c.lruList, &res.elem)
}
// Done with lock.
c.mutex.Unlock()
if ok {
- // result found!
+ // item found!
return val, err
}
@@ -345,14 +290,14 @@ func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key ...any
// Acquire lock.
c.mutex.Lock()
- // Index this new loaded result.
+ // Index this new loaded item.
// Note this handles copying of
// the provided value, so it is
// safe for us to return as-is.
if err != nil {
- c.store_error(index, sum, key, err)
+ c.store_error(index, key, err)
} else {
- c.store_value(index, sum, key, val)
+ c.store_value(index, key, val)
}
// Done with lock.
@@ -361,29 +306,18 @@ func (c *Cache[T]) LoadOneBy(index *Index[T], load func() (T, error), key ...any
return val, err
}
-// Load fetches values from the cache stored under index, using keys generated from given key parts. The provided get callback is used
-// to load groups of values from the cache by the key generated from the key parts provided to the inner callback func, where the returned
-// boolean indicates whether any values are currently stored. After the get callback has returned, the cache will then call provided load
-// callback to hydrate the cache with any other values. Example usage here is that you may see which values are cached using 'get', and load
-// the remaining uncached values using 'load', to minimize database queries. Cached error results are not included or returned by this func.
-// Note that given number of key parts MUST match expected number and types of the given index name, in those provided to the get callback.
-func (c *Cache[T]) Load(index string, get func(load func(key ...any) bool), load func() ([]T, error)) (values []T, err error) {
- return c.LoadBy(c.Index(index), get, load)
-}
-
-// LoadBy fetches values from the cache stored under index, using precalculated index key. The provided get callback is used to load
-// groups of values from the cache by the key generated from the key parts provided to the inner callback func, where the returned boolea
-// indicates whether any values are currently stored. After the get callback has returned, the cache will then call provided load callback
-// to hydrate the cache with any other values. Example usage here is that you may see which values are cached using 'get', and load the
-// remaining uncached values using 'load', to minimize database queries. Cached error results are not included or returned by this func.
-// Note that given number of key parts MUST match expected number and types of the given index name, in those provided to the get callback.
-func (c *Cache[T]) LoadBy(index *Index[T], get func(load func(key ...any) bool), load func() ([]T, error)) (values []T, err error) {
+// Load fetches values from the cache stored under index, using precalculated index keys. The cache will attempt to
+// results with values stored under keys, passing keys with uncached results to the provider load callback to further
+// hydrate the cache with missing results. Cached error results not included or returned by this function.
+func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error)) ([]T, error) {
if index == nil {
panic("no index given")
+ } else if index.ptr != unsafe.Pointer(c) {
+ panic("invalid index for cache")
}
- // Acquire hasher.
- h := get_hasher()
+ // Preallocate expected ret slice.
+ values := make([]T, 0, len(keys))
// Acquire lock.
c.mutex.Lock()
@@ -394,71 +328,45 @@ func (c *Cache[T]) LoadBy(index *Index[T], get func(load func(key ...any) bool),
panic("not initialized")
}
- var unlocked bool
- defer func() {
- // Deferred unlock to catch
- // any user function panics.
- if !unlocked {
- c.mutex.Unlock()
- }
- }()
-
- // Pass loader to user func.
- get(func(key ...any) bool {
-
- // Generate sum from provided key.
- sum, ok := index_hash(index, h, key)
- if !ok {
- return false
- }
-
- // Get indexed results at hash key.
- list := index_get(index, sum, key)
- if list == nil {
- return false
- }
-
+ for i := 0; i < len(keys); {
// Value length before
// any below appends.
before := len(values)
- // Concatenate all *values* from non-err cached results.
- list_rangefn(list, func(e *list_elem) {
- entry := (*index_entry)(e.data)
- res := entry.result
-
- switch value := res.data.(type) {
- case T:
+ // Concatenate all *values* from cached items.
+ index.get(keys[i], func(item *indexed_item) {
+ if value, ok := item.data.(T); ok {
// Append value COPY.
value = c.copy(value)
values = append(values, value)
- case error:
- // Don't bump
- // for errors.
- return
+ // Push to front of LRU list, USING
+ // THE ITEM'S LRU ENTRY, NOT THE
+ // INDEX KEY ENTRY. VERY IMPORTANT!!
+ c.lru.move_front(&item.elem)
}
-
- // Push to front of LRU list, USING
- // THE RESULT'S LRU ENTRY, NOT THE
- // INDEX KEY ENTRY. VERY IMPORTANT!!
- list_move_front(&c.lruList, &res.elem)
})
// Only if values changed did
// we actually find anything.
- return len(values) != before
- })
+ if len(values) != before {
+
+ // We found values at key,
+ // drop key from the slice.
+ copy(keys[i:], keys[i+1:])
+ keys = keys[:len(keys)-1]
+ continue
+ }
+
+ // Iter
+ i++
+ }
// Done with lock.
c.mutex.Unlock()
- unlocked = true
-
- // Done with h.
- hash_pool.Put(h)
// Load uncached values.
- uncached, err := load()
+ uncached, err := load(keys)
if err != nil {
return nil, err
}
@@ -469,7 +377,7 @@ func (c *Cache[T]) LoadBy(index *Index[T], get func(load func(key ...any) bool),
// Append uncached to return values.
values = append(values, uncached...)
- return
+ return values, nil
}
// Store will call the given store callback, on non-error then
@@ -478,8 +386,8 @@ func (c *Cache[T]) LoadBy(index *Index[T], get func(load func(key ...any) bool),
func (c *Cache[T]) Store(value T, store func() error) error {
// Store value.
err := store()
-
if err != nil {
+
// Get func ptrs.
c.mutex.Lock()
invalid := c.invalid
@@ -501,51 +409,39 @@ func (c *Cache[T]) Store(value T, store func() error) error {
return nil
}
-// Invalidate generates index key from parts and invalidates all stored under it.
-func (c *Cache[T]) Invalidate(index string, key ...any) {
- c.InvalidateBy(c.Index(index), key...)
-}
-
-// InvalidateBy invalidates all results stored under index key.
-func (c *Cache[T]) InvalidateBy(index *Index[T], key ...any) {
+// Invalidate invalidates all results stored under index keys.
+func (c *Cache[T]) Invalidate(index *Index, keys ...Key) {
if index == nil {
panic("no index given")
+ } else if index.ptr != unsafe.Pointer(c) {
+ panic("invalid index for cache")
}
- // Acquire hasher.
- h := get_hasher()
-
- // Generate sum from provided key.
- sum, ok := index_hash(index, h, key)
+ // Acquire lock.
+ c.mutex.Lock()
- // Done with h.
- hash_pool.Put(h)
+ // Preallocate expected ret slice.
+ values := make([]T, 0, len(keys))
- if !ok {
- return
- }
+ for i := range keys {
+ // Delete all items under key from index, collecting
+ // value items and dropping them from all their indices.
+ index.delete(keys[i], func(item *indexed_item) {
- var values []T
+ if value, ok := item.data.(T); ok {
+ // No need to copy, as item
+ // being deleted from cache.
+ values = append(values, value)
+ }
- // Acquire lock.
- c.mutex.Lock()
+ // Delete cached.
+ c.delete(item)
+ })
+ }
// Get func ptrs.
invalid := c.invalid
- // Delete all results under key from index, collecting
- // value results and dropping them from all their indices.
- index_delete(c, index, sum, key, func(del *result) {
- switch value := del.data.(type) {
- case T:
- // Append value COPY.
- value = c.copy(value)
- values = append(values, value)
- case error:
- }
- c.delete(del)
- })
-
// Done with lock.
c.mutex.Unlock()
@@ -566,29 +462,30 @@ func (c *Cache[T]) Trim(perc float64) {
// Calculate number of cache items to drop.
max := (perc / 100) * float64(c.maxSize)
- diff := c.lruList.len - int(max)
-
+ diff := c.lru.len - int(max)
if diff <= 0 {
+
// Trim not needed.
c.mutex.Unlock()
return
}
- // Iterate over 'diff' results
+ // Iterate over 'diff' items
// from back (oldest) of cache.
for i := 0; i < diff; i++ {
- // Get oldest LRU element.
- oldest := c.lruList.tail
-
+ // Get oldest LRU elem.
+ oldest := c.lru.tail
if oldest == nil {
- // reached end.
+
+ // reached
+ // end.
break
}
- // Drop oldest from cache.
- res := (*result)(oldest.data)
- c.delete(res)
+ // Drop oldest item from cache.
+ item := (*indexed_item)(oldest.data)
+ c.delete(item)
}
// Done with lock.
@@ -601,7 +498,7 @@ func (c *Cache[T]) Clear() { c.Trim(0) }
// Len returns the current length of cache.
func (c *Cache[T]) Len() int {
c.mutex.Lock()
- l := c.lruList.len
+ l := c.lru.len
c.mutex.Unlock()
return l
}
@@ -614,95 +511,99 @@ func (c *Cache[T]) Cap() int {
return m
}
-func (c *Cache[T]) store_value(index *Index[T], hash Hash, key []any, value T) {
- // Acquire new result.
- res := result_acquire(c)
-
- if index != nil {
- // Append result to the provided index
- // with precalculated key / its hash.
- index_append(c, index, hash, key, res)
- }
+func (c *Cache[T]) store_value(index *Index, key Key, value T) {
+ // Alloc new index item.
+ item := new_indexed_item()
// Create COPY of value.
value = c.copy(value)
- res.data = value
+ item.data = value
- // Acquire hasher.
- h := get_hasher()
+ if index != nil {
+ // Append item to index.
+ index.append(key, item)
+ }
+
+ // Acquire key buf.
+ buf := new_buffer()
for i := range c.indices {
// Get current index ptr.
idx := &(c.indices[i])
-
if idx == index {
+
// Already stored under
// this index, ignore.
continue
}
- // Get key and hash sum for this index.
- key, sum, ok := index_key(idx, h, value)
- if !ok {
+ // Extract fields comprising index key.
+ parts := extract_fields(value, idx.fields)
+
+ // Calculate index key.
+ key := idx.key(buf, parts)
+ if key.Zero() {
continue
}
- // Append result to index at key.
- index_append(c, idx, sum, key, res)
+ // Append item to index.
+ idx.append(key, item)
}
- // Done with h.
- hash_pool.Put(h)
+ // Add item to main lru list.
+ c.lru.push_front(&item.elem)
- if c.lruList.len > c.maxSize {
+ // Done with buf.
+ free_buffer(buf)
+
+ if c.lru.len > c.maxSize {
// Cache has hit max size!
// Drop the oldest element.
- ptr := c.lruList.tail.data
- res := (*result)(ptr)
- c.delete(res)
+ ptr := c.lru.tail.data
+ item := (*indexed_item)(ptr)
+ c.delete(item)
}
}
-func (c *Cache[T]) store_error(index *Index[T], hash Hash, key []any, err error) {
+func (c *Cache[T]) store_error(index *Index, key Key, err error) {
if index == nil {
// nothing we
// can do here.
return
}
- // Acquire new result.
- res := result_acquire(c)
- res.data = err
+ // Alloc new index item.
+ item := new_indexed_item()
+ item.data = err
+
+ // Append item to index.
+ index.append(key, item)
- // Append result to the provided index
- // with precalculated key / its hash.
- index_append(c, index, hash, key, res)
+ // Add item to main lru list.
+ c.lru.push_front(&item.elem)
- if c.lruList.len > c.maxSize {
+ if c.lru.len > c.maxSize {
// Cache has hit max size!
// Drop the oldest element.
- ptr := c.lruList.tail.data
- res := (*result)(ptr)
- c.delete(res)
+ ptr := c.lru.tail.data
+ item := (*indexed_item)(ptr)
+ c.delete(item)
}
}
-// delete will delete the given result from the cache, deleting
-// it from all indices it is stored under, and main LRU list.
-func (c *Cache[T]) delete(res *result) {
- for len(res.indexed) != 0 {
-
+func (c *Cache[T]) delete(item *indexed_item) {
+ for len(item.indexed) != 0 {
// Pop last indexed entry from list.
- entry := res.indexed[len(res.indexed)-1]
- res.indexed = res.indexed[:len(res.indexed)-1]
+ entry := item.indexed[len(item.indexed)-1]
+ item.indexed = item.indexed[:len(item.indexed)-1]
- // Drop entry from index.
- index_delete_entry(c, entry)
-
- // Release to memory pool.
- index_entry_release(entry)
+ // Drop index_entry from index.
+ entry.index.delete_entry(entry)
}
- // Release res to pool.
- result_release(c, res)
+ // Drop entry from lru list.
+ c.lru.remove(&item.elem)
+
+ // Free now-unused item.
+ free_indexed_item(item)
}
diff --git a/vendor/codeberg.org/gruf/go-structr/hash.go b/vendor/codeberg.org/gruf/go-structr/hash.go
deleted file mode 100644
index ffdc5f5f1..000000000
--- a/vendor/codeberg.org/gruf/go-structr/hash.go
+++ /dev/null
@@ -1,404 +0,0 @@
-package structr
-
-import (
- "reflect"
- "sync"
- "unsafe"
-
- "github.com/zeebo/xxh3"
-)
-
-var hash_pool sync.Pool
-
-func get_hasher() *xxh3.Hasher {
- v := hash_pool.Get()
- if v == nil {
- v = new(xxh3.Hasher)
- }
- return v.(*xxh3.Hasher)
-}
-
-func hash_sum(fields []structfield, h *xxh3.Hasher, key []any) (Hash, bool) {
- if len(key) != len(fields) {
- panicf("incorrect number key parts: want=%d received=%d",
- len(key),
- len(fields),
- )
- }
- var zero bool
- h.Reset()
- for i, part := range key {
- zero = fields[i].hasher(h, part) || zero
- }
- // See: https://github.com/Cyan4973/xxHash/issues/453#issuecomment-696838445
- //
- // In order to extract 32-bit from a good 64-bit hash result,
- // there are many possible choices, which are all valid.
- // I would typically grab the lower 32-bit and call it a day.
- //
- // Grabbing any other 32-bit (the upper part for example) is fine too.
- //
- // xoring higher and lower bits makes more sense whenever the produced hash offers dubious quality.
- // FNV, for example, has poor mixing in its lower bits, so it's better to mix with the higher bits.
- //
- // XXH3 already performs significant output mixing before returning the data,
- // so it's not beneficial to add another xorfold stage.
- return uint64ToHash(h.Sum64()), zero
-}
-
-func hasher(t reflect.Type) func(*xxh3.Hasher, any) bool {
- switch t.Kind() {
- case reflect.Int,
- reflect.Uint,
- reflect.Uintptr:
- switch unsafe.Sizeof(int(0)) {
- case 4:
- return hash32bit
- case 8:
- return hash64bit
- default:
- panic("unexpected platform int size")
- }
-
- case reflect.Int8,
- reflect.Uint8:
- return hash8bit
-
- case reflect.Int16,
- reflect.Uint16:
- return hash16bit
-
- case reflect.Int32,
- reflect.Uint32,
- reflect.Float32:
- return hash32bit
-
- case reflect.Int64,
- reflect.Uint64,
- reflect.Float64,
- reflect.Complex64:
- return hash64bit
-
- case reflect.String:
- return hashstring
-
- case reflect.Pointer:
- switch t.Elem().Kind() {
- case reflect.Int,
- reflect.Uint,
- reflect.Uintptr:
- switch unsafe.Sizeof(int(0)) {
- case 4:
- return hash32bitptr
- case 8:
- return hash64bitptr
- default:
- panic("unexpected platform int size")
- }
-
- case reflect.Int8,
- reflect.Uint8:
- return hash8bitptr
-
- case reflect.Int16,
- reflect.Uint16:
- return hash16bitptr
-
- case reflect.Int32,
- reflect.Uint32,
- reflect.Float32:
- return hash32bitptr
-
- case reflect.Int64,
- reflect.Uint64,
- reflect.Float64,
- reflect.Complex64:
- return hash64bitptr
-
- case reflect.String:
- return hashstringptr
- }
-
- case reflect.Slice:
- switch t.Elem().Kind() {
- case reflect.Int,
- reflect.Uint,
- reflect.Uintptr:
- switch unsafe.Sizeof(int(0)) {
- case 4:
- return hash32bitslice
- case 8:
- return hash64bitslice
- default:
- panic("unexpected platform int size")
- }
-
- case reflect.Int8,
- reflect.Uint8:
- return hash8bitslice
-
- case reflect.Int16,
- reflect.Uint16:
- return hash16bitslice
-
- case reflect.Int32,
- reflect.Uint32,
- reflect.Float32:
- return hash32bitslice
-
- case reflect.Int64,
- reflect.Uint64,
- reflect.Float64,
- reflect.Complex64:
- return hash64bitslice
-
- case reflect.String:
- return hashstringslice
- }
- }
- switch {
- case t.Implements(reflect.TypeOf((*interface{ MarshalBinary() ([]byte, error) })(nil)).Elem()):
- return hashbinarymarshaler
-
- case t.Implements(reflect.TypeOf((*interface{ Bytes() []byte })(nil)).Elem()):
- return hashbytesmethod
-
- case t.Implements(reflect.TypeOf((*interface{ String() string })(nil)).Elem()):
- return hashstringmethod
-
- case t.Implements(reflect.TypeOf((*interface{ MarshalText() ([]byte, error) })(nil)).Elem()):
- return hashtextmarshaler
-
- case t.Implements(reflect.TypeOf((*interface{ MarshalJSON() ([]byte, error) })(nil)).Elem()):
- return hashjsonmarshaler
- }
- panic("unhashable type")
-}
-
-func hash8bit(h *xxh3.Hasher, a any) bool {
- u := *(*uint8)(data_ptr(a))
- _, _ = h.Write([]byte{u})
- return u == 0
-}
-
-func hash8bitptr(h *xxh3.Hasher, a any) bool {
- u := (*uint8)(data_ptr(a))
- if u == nil {
- _, _ = h.Write([]byte{
- 0,
- })
- return true
- } else {
- _, _ = h.Write([]byte{
- 1,
- byte(*u),
- })
- return false
- }
-}
-
-func hash8bitslice(h *xxh3.Hasher, a any) bool {
- b := *(*[]byte)(data_ptr(a))
- _, _ = h.Write(b)
- return b == nil
-}
-
-func hash16bit(h *xxh3.Hasher, a any) bool {
- u := *(*uint16)(data_ptr(a))
- _, _ = h.Write([]byte{
- byte(u),
- byte(u >> 8),
- })
- return u == 0
-}
-
-func hash16bitptr(h *xxh3.Hasher, a any) bool {
- u := (*uint16)(data_ptr(a))
- if u == nil {
- _, _ = h.Write([]byte{
- 0,
- })
- return true
- } else {
- _, _ = h.Write([]byte{
- 1,
- byte(*u),
- byte(*u >> 8),
- })
- return false
- }
-}
-
-func hash16bitslice(h *xxh3.Hasher, a any) bool {
- u := *(*[]uint16)(data_ptr(a))
- for i := range u {
- _, _ = h.Write([]byte{
- byte(u[i]),
- byte(u[i] >> 8),
- })
- }
- return u == nil
-}
-
-func hash32bit(h *xxh3.Hasher, a any) bool {
- u := *(*uint32)(data_ptr(a))
- _, _ = h.Write([]byte{
- byte(u),
- byte(u >> 8),
- byte(u >> 16),
- byte(u >> 24),
- })
- return u == 0
-}
-
-func hash32bitptr(h *xxh3.Hasher, a any) bool {
- u := (*uint32)(data_ptr(a))
- if u == nil {
- _, _ = h.Write([]byte{
- 0,
- })
- return true
- } else {
- _, _ = h.Write([]byte{
- 1,
- byte(*u),
- byte(*u >> 8),
- byte(*u >> 16),
- byte(*u >> 24),
- })
- return false
- }
-}
-
-func hash32bitslice(h *xxh3.Hasher, a any) bool {
- u := *(*[]uint32)(data_ptr(a))
- for i := range u {
- _, _ = h.Write([]byte{
- byte(u[i]),
- byte(u[i] >> 8),
- byte(u[i] >> 16),
- byte(u[i] >> 24),
- })
- }
- return u == nil
-}
-
-func hash64bit(h *xxh3.Hasher, a any) bool {
- u := *(*uint64)(data_ptr(a))
- _, _ = h.Write([]byte{
- byte(u),
- byte(u >> 8),
- byte(u >> 16),
- byte(u >> 24),
- byte(u >> 32),
- byte(u >> 40),
- byte(u >> 48),
- byte(u >> 56),
- })
- return u == 0
-}
-
-func hash64bitptr(h *xxh3.Hasher, a any) bool {
- u := (*uint64)(data_ptr(a))
- if u == nil {
- _, _ = h.Write([]byte{
- 0,
- })
- return true
- } else {
- _, _ = h.Write([]byte{
- 1,
- byte(*u),
- byte(*u >> 8),
- byte(*u >> 16),
- byte(*u >> 24),
- byte(*u >> 32),
- byte(*u >> 40),
- byte(*u >> 48),
- byte(*u >> 56),
- })
- return false
- }
-}
-
-func hash64bitslice(h *xxh3.Hasher, a any) bool {
- u := *(*[]uint64)(data_ptr(a))
- for i := range u {
- _, _ = h.Write([]byte{
- byte(u[i]),
- byte(u[i] >> 8),
- byte(u[i] >> 16),
- byte(u[i] >> 24),
- byte(u[i] >> 32),
- byte(u[i] >> 40),
- byte(u[i] >> 48),
- byte(u[i] >> 56),
- })
- }
- return u == nil
-}
-
-func hashstring(h *xxh3.Hasher, a any) bool {
- s := *(*string)(data_ptr(a))
- _, _ = h.WriteString(s)
- return s == ""
-}
-
-func hashstringptr(h *xxh3.Hasher, a any) bool {
- s := (*string)(data_ptr(a))
- if s == nil {
- _, _ = h.Write([]byte{
- 0,
- })
- return true
- } else {
- _, _ = h.Write([]byte{
- 1,
- })
- _, _ = h.WriteString(*s)
- return false
- }
-}
-
-func hashstringslice(h *xxh3.Hasher, a any) bool {
- s := *(*[]string)(data_ptr(a))
- for i := range s {
- _, _ = h.WriteString(s[i])
- }
- return s == nil
-}
-
-func hashbinarymarshaler(h *xxh3.Hasher, a any) bool {
- i := a.(interface{ MarshalBinary() ([]byte, error) })
- b, _ := i.MarshalBinary()
- _, _ = h.Write(b)
- return b == nil
-}
-
-func hashbytesmethod(h *xxh3.Hasher, a any) bool {
- i := a.(interface{ Bytes() []byte })
- b := i.Bytes()
- _, _ = h.Write(b)
- return b == nil
-}
-
-func hashstringmethod(h *xxh3.Hasher, a any) bool {
- i := a.(interface{ String() string })
- s := i.String()
- _, _ = h.WriteString(s)
- return s == ""
-}
-
-func hashtextmarshaler(h *xxh3.Hasher, a any) bool {
- i := a.(interface{ MarshalText() ([]byte, error) })
- b, _ := i.MarshalText()
- _, _ = h.Write(b)
- return b == nil
-}
-
-func hashjsonmarshaler(h *xxh3.Hasher, a any) bool {
- i := a.(interface{ MarshalJSON() ([]byte, error) })
- b, _ := i.MarshalJSON()
- _, _ = h.Write(b)
- return b == nil
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/hash_32.go b/vendor/codeberg.org/gruf/go-structr/hash_32.go
deleted file mode 100644
index 883a3a174..000000000
--- a/vendor/codeberg.org/gruf/go-structr/hash_32.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//go:build structr_32bit_hash
-// +build structr_32bit_hash
-
-package structr
-
-// Hash is the current compiler
-// flag defined cache key hash
-// checksum type. Here; uint32.
-type Hash uint32
-
-// uint64ToHash converts uint64 to currently Hash type.
-func uint64ToHash(u uint64) Hash {
- return Hash(u >> 32)
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/hash_48.go b/vendor/codeberg.org/gruf/go-structr/hash_48.go
deleted file mode 100644
index df4209e2f..000000000
--- a/vendor/codeberg.org/gruf/go-structr/hash_48.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build structr_48bit_hash
-// +build structr_48bit_hash
-
-package structr
-
-// Hash is the current compiler
-// flag defined cache key hash
-// checksum type. Here; uint48.
-type Hash [6]byte
-
-// uint64ToHash converts uint64 to currently Hash type.
-func uint64ToHash(u uint64) Hash {
- return Hash{
- 0: byte(u),
- 1: byte(u >> 8),
- 2: byte(u >> 16),
- 3: byte(u >> 24),
- 4: byte(u >> 32),
- 5: byte(u >> 40),
- }
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/hash_64.go b/vendor/codeberg.org/gruf/go-structr/hash_64.go
deleted file mode 100644
index 5462c340e..000000000
--- a/vendor/codeberg.org/gruf/go-structr/hash_64.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//go:build !structr_32bit_hash && !structr_48bit_hash
-// +build !structr_32bit_hash,!structr_48bit_hash
-
-package structr
-
-// Hash is the current compiler
-// flag defined cache key hash
-// checksum type. Here; uint64.
-type Hash uint64
-
-// uint64ToHash converts uint64 to currently Hash type.
-func uint64ToHash(u uint64) Hash {
- return Hash(u)
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/index.go b/vendor/codeberg.org/gruf/go-structr/index.go
index e2768b3ca..c7115e0b4 100644
--- a/vendor/codeberg.org/gruf/go-structr/index.go
+++ b/vendor/codeberg.org/gruf/go-structr/index.go
@@ -6,7 +6,7 @@ import (
"sync"
"unsafe"
- "github.com/zeebo/xxh3"
+ "codeberg.org/gruf/go-byteutil"
)
// IndexConfig defines config variables
@@ -55,7 +55,12 @@ type IndexConfig struct {
// case that you would like to manually provide the used
// index via the Cache.___By() series of functions, or
// access the underlying index key generator.
-type Index[StructType any] struct {
+type Index struct {
+
+ // ptr is a pointer to
+ // the source Cache/Queue
+ // index is attached to.
+ ptr unsafe.Pointer
// name is the actual name of this
// index, which is the unparsed
@@ -67,11 +72,11 @@ type Index[StructType any] struct {
// index_entry{} which also contains the exact
// key each result is stored under. the hash map
// only keys by the xxh3 hash checksum for speed.
- data map[Hash]*list //[*index_entry[StructType]]
+ data map[string]*list // [*index_entry]
// struct fields encompassed by
// keys (+ hashes) of this index.
- fields []structfield
+ fields []struct_field
// index flags:
// - 1 << 0 = unique
@@ -79,55 +84,68 @@ type Index[StructType any] struct {
flags uint8
}
-// Key returns the configured fields as key, and hash sum of key.
-func (i *Index[T]) Key(value T) ([]any, Hash, bool) {
- h := get_hasher()
- key, sum, ok := index_key(i, h, value)
- hash_pool.Put(h)
- return key, sum, ok
+// Name returns the receiving Index name.
+func (i *Index) Name() string {
+ return i.name
}
-func is_unique(f uint8) bool {
- const mask = uint8(1) << 0
- return f&mask != 0
+// Key generates Key{} from given parts for
+// the type of lookup this Index uses in cache.
+// NOTE: panics on incorrect no. parts / types given.
+func (i *Index) Key(parts ...any) Key {
+ buf := new_buffer()
+ key := i.key(buf, parts)
+ free_buffer(buf)
+ return key
}
-func set_is_unique(f *uint8) {
- const mask = uint8(1) << 0
- (*f) |= mask
-}
-
-func allow_zero(f uint8) bool {
- const mask = uint8(1) << 1
- return f&mask != 0
+// Keys generates []Key{} from given (multiple) parts
+// for the type of lookup this Index uses in the cache.
+// NOTE: panics on incorrect no. parts / types given.
+func (i *Index) Keys(parts ...[]any) []Key {
+ keys := make([]Key, 0, len(parts))
+ buf := new_buffer()
+ for _, parts := range parts {
+ key := i.key(buf, parts)
+ if key.Zero() {
+ continue
+ }
+ keys = append(keys, key)
+ }
+ free_buffer(buf)
+ return keys
}
-func set_allow_zero(f *uint8) {
- const mask = uint8(1) << 1
- (*f) |= mask
-}
+// init will initialize the cache with given type, config and capacity.
+func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) {
+ switch {
+ // The only 2 types we support are
+ // structs, and ptrs to a struct.
+ case t.Kind() == reflect.Struct:
+ case t.Kind() == reflect.Pointer &&
+ t.Elem().Kind() == reflect.Struct:
+ t = t.Elem()
+ default:
+ panic("index only support struct{} and *struct{}")
+ }
-func init_index[T any](i *Index[T], config IndexConfig, max int) {
// Set name from the raw
// struct fields string.
- i.name = config.Fields
+ i.name = cfg.Fields
// Set struct flags.
- if config.AllowZero {
+ if cfg.AllowZero {
set_allow_zero(&i.flags)
}
- if !config.Multiple {
+ if !cfg.Multiple {
set_is_unique(&i.flags)
}
- // Split to get the containing struct fields.
- fields := strings.Split(config.Fields, ",")
+ // Split to get containing struct fields.
+ fields := strings.Split(cfg.Fields, ",")
// Preallocate expected struct field slice.
- i.fields = make([]structfield, len(fields))
-
- // Get the reflected struct ptr type.
- t := reflect.TypeOf((*T)(nil)).Elem()
+ i.fields = make([]struct_field, len(fields))
for x, fieldName := range fields {
// Split name to account for nesting.
@@ -138,255 +156,261 @@ func init_index[T any](i *Index[T], config IndexConfig, max int) {
}
// Initialize index_entry list store.
- i.data = make(map[Hash]*list, max+1)
+ i.data = make(map[string]*list, cap+1)
}
-func index_key[T any](i *Index[T], h *xxh3.Hasher, value T) ([]any, Hash, bool) {
- key := extract_fields(value, i.fields)
- sum, zero := hash_sum(i.fields, h, key)
- if zero && !allow_zero(i.flags) {
- var zero Hash
- return nil, zero, false
- }
- return key, sum, true
-}
-
-func index_hash[T any](i *Index[T], h *xxh3.Hasher, key []any) (Hash, bool) {
- sum, zero := hash_sum(i.fields, h, key)
- if zero && !allow_zero(i.flags) {
- var zero Hash
- return zero, false
- }
- return sum, true
-}
-
-func index_get[T any](i *Index[T], hash Hash, key []any) *list {
- l := i.data[hash]
+// get_one will fetch one indexed item under key.
+func (i *Index) get_one(key Key) *indexed_item {
+ // Get list at hash.
+ l := i.data[key.key]
if l == nil {
return nil
}
+
+ // Extract entry from first list elem.
entry := (*index_entry)(l.head.data)
- if !is_equal(entry.key, key) {
- return l
+
+ // Check contains expected key.
+ if !entry.key.Equal(key) {
+ return nil
}
- return l
+
+ return entry.item
}
-func index_append[T any](c *Cache[T], i *Index[T], hash Hash, key []any, res *result) {
- // Get list at key.
- l := i.data[hash]
+// get will fetch all indexed items under key, passing each to hook.
+func (i *Index) get(key Key, hook func(*indexed_item)) {
+ if hook == nil {
+ panic("nil hook")
+ }
+ // Get list at hash.
+ l := i.data[key.key]
if l == nil {
+ return
+ }
- // Allocate new list.
- l = list_acquire()
- i.data[hash] = l
-
- } else if entry := (*index_entry)(l.head.data); //nocollapse
- !is_equal(entry.key, key) {
-
- // Collision! Drop all.
- delete(i.data, hash)
+ // Extract entry from first list elem.
+ entry := (*index_entry)(l.head.data)
- // Iterate entries in list.
- for x := 0; x < l.len; x++ {
+ // Check contains expected key.
+ if !entry.key.Equal(key) {
+ return
+ }
- // Pop current head.
- list_remove(l, l.head)
+ // Iterate all entries in list.
+ l.rangefn(func(elem *list_elem) {
- // Extract result.
- res := entry.result
+ // Extract element entry + item.
+ entry := (*index_entry)(elem.data)
+ item := entry.item
- // Drop index entry from res.
- result_drop_index(res, i)
- if len(res.indexed) == 0 {
+ // Pass to hook.
+ hook(item)
+ })
+}
- // Old res now unused,
- // release to mem pool.
- result_release(c, res)
+// key uses hasher to generate Key{} from given raw parts.
+func (i *Index) key(buf *byteutil.Buffer, parts []any) Key {
+ if len(parts) != len(i.fields) {
+ panicf("incorrect number key parts: want=%d received=%d",
+ len(parts),
+ len(i.fields),
+ )
+ }
+ buf.B = buf.B[:0]
+ if !allow_zero(i.flags) {
+ for x := range parts {
+ before := len(buf.B)
+ buf.B = i.fields[x].mangle(buf.B, parts[x])
+ if string(buf.B[before:]) == i.fields[x].zero {
+ return Key{}
}
+ buf.B = append(buf.B, '.')
+ }
+ } else {
+ for x := range parts {
+ buf.B = i.fields[x].mangle(buf.B, parts[x])
+ buf.B = append(buf.B, '.')
}
+ }
+ return Key{
+ raw: parts,
+ key: string(buf.B),
+ }
+}
- return
+// append will append the given index entry to appropriate
+// doubly-linked-list in index hashmap. this handles case
+// of key collisions and overwriting 'unique' entries.
+func (i *Index) append(key Key, item *indexed_item) {
+ // Look for existing.
+ l := i.data[key.key]
- } else if is_unique(i.flags) {
+ if l == nil {
- // Remove current
- // indexed entry.
- list_remove(l, l.head)
+ // Allocate new.
+ l = new_list()
+ i.data[key.key] = l
- // Get ptr to old
- // entry before we
- // release to pool.
- res := entry.result
+ } else if is_unique(i.flags) {
- // Drop this index's key from
- // old res now not indexed here.
- result_drop_index(res, i)
- if len(res.indexed) == 0 {
+ // Remove head.
+ elem := l.head
+ l.remove(elem)
- // Old res now unused,
- // release to mem pool.
- result_release(c, res)
- }
+ // Drop index from inner item.
+ e := (*index_entry)(elem.data)
+ e.item.drop_index(e)
+
+ // Free unused entry.
+ free_index_entry(e)
}
- // Acquire + setup index entry.
- entry := index_entry_acquire()
- entry.index = unsafe.Pointer(i)
- entry.result = res
+ // Prepare new index entry.
+ entry := new_index_entry()
+ entry.item = item
entry.key = key
- entry.hash = hash
+ entry.index = i
- // Append to result's indexed entries.
- res.indexed = append(res.indexed, entry)
+ // Add ourselves to item's index tracker.
+ item.indexed = append(item.indexed, entry)
- // Add index entry to index list.
- list_push_front(l, &entry.elem)
+ // Add entry to index list.
+ l.push_front(&entry.elem)
}
-func index_delete[T any](c *Cache[T], i *Index[T], hash Hash, key []any, fn func(*result)) {
- if fn == nil {
- panic("nil fn")
+// delete will remove all indexed items under key, passing each to hook.
+func (i *Index) delete(key Key, hook func(*indexed_item)) {
+ if hook == nil {
+ panic("nil hook")
}
// Get list at hash.
- l := i.data[hash]
+ l := i.data[key.key]
if l == nil {
return
}
+ // Extract entry from first list elem.
entry := (*index_entry)(l.head.data)
- // Check contains expected key for hash.
- if !is_equal(entry.key, key) {
+ // Check contains expected key.
+ if !entry.key.Equal(key) {
return
}
// Delete data at hash.
- delete(i.data, hash)
+ delete(i.data, key.key)
// Iterate entries in list.
for x := 0; x < l.len; x++ {
- // Pop current head.
- entry := (*index_entry)(l.head.data)
- list_remove(l, l.head)
+ // Pop list head.
+ elem := l.head
+ l.remove(elem)
- // Extract result.
- res := entry.result
+ // Extract element entry + item.
+ entry := (*index_entry)(elem.data)
+ item := entry.item
- // Call hook.
- fn(res)
+ // Drop index from item.
+ item.drop_index(entry)
- // Drop index entry from res.
- result_drop_index(res, i)
+ // Free now-unused entry.
+ free_index_entry(entry)
+
+ // Pass to hook.
+ hook(item)
}
- // Release to pool.
- list_release(l)
+ // Release list.
+ free_list(l)
}
-func index_delete_entry[T any](c *Cache[T], entry *index_entry) {
- // Get from entry.
- i := (*Index[T])(entry.index)
-
+// delete_entry deletes the given index entry.
+func (i *Index) delete_entry(entry *index_entry) {
// Get list at hash sum.
- l := i.data[entry.hash]
+ l := i.data[entry.key.key]
if l == nil {
return
}
- // Remove entry from list.
- list_remove(l, &entry.elem)
- if l.len == 0 {
+ // Remove list entry.
+ l.remove(&entry.elem)
- // Remove list from map.
- delete(i.data, entry.hash)
+ if l.len == 0 {
+ // Remove entry list from map.
+ delete(i.data, entry.key.key)
- // Release to pool.
- list_release(l)
+ // Release list.
+ free_list(l)
}
- // Extract result.
- res := entry.result
-
- // Drop index entry from res.
- result_drop_index(res, i)
+ // Drop this index from item.
+ entry.item.drop_index(entry)
}
-var entry_pool sync.Pool
-
+// index_entry represents a single entry
+// in an Index{}, where it will be accessible
+// by Key{} pointing to a containing list{}.
type index_entry struct {
- // elem contains the list element
- // appended to each per-hash list
- // within the Index{} type. the
- // contained value is a self-ref.
+
+ // list elem that entry is stored
+ // within, under containing index.
+ // elem.data is ptr to index_entry.
elem list_elem
- // index is the Index{} this
- // index_entry{} is stored in.
- index unsafe.Pointer
-
- // result is the actual
- // underlying result stored
- // within the index. this
- // also contains a ref to
- // this *index_entry in order
- // to track indices each result
- // is currently stored under.
- result *result
-
- // key contains the actual
- // key this item was stored
- // under, used for collision
- // check.
- key []any
-
- // hash contains computed
- // hash checksum of .key.
- hash Hash
+ // hash checksum
+ // + raw key data
+ key Key
+
+ // index this is stored in.
+ index *Index
+
+ // underlying indexed item.
+ item *indexed_item
}
-func index_entry_acquire() *index_entry {
- // Acquire from pool.
- v := entry_pool.Get()
+var index_entry_pool sync.Pool
+
+// new_index_entry returns a new prepared index_entry.
+func new_index_entry() *index_entry {
+ v := index_entry_pool.Get()
if v == nil {
v = new(index_entry)
}
-
- // Cast index_entry value.
entry := v.(*index_entry)
-
- // Set index list elem entry on itself.
- entry.elem.data = unsafe.Pointer(entry)
-
+ ptr := unsafe.Pointer(entry)
+ entry.elem.data = ptr
return entry
}
-func index_entry_release(entry *index_entry) {
- var zero Hash
-
- // Reset index entry.
+// free_index_entry releases the index_entry.
+func free_index_entry(entry *index_entry) {
entry.elem.data = nil
+ entry.key = Key{}
entry.index = nil
- entry.result = nil
- entry.key = nil
- entry.hash = zero
+ entry.item = nil
+ index_entry_pool.Put(entry)
+}
- // Release to pool.
- entry_pool.Put(entry)
+func is_unique(f uint8) bool {
+ const mask = uint8(1) << 0
+ return f&mask != 0
}
-// is_equal returns whether 2 key slices are equal.
-func is_equal(k1, k2 []any) bool {
- if len(k1) != len(k2) {
- return false
- }
- for i := range k1 {
- if k1[i] != k2[i] {
- return false
- }
- }
- return true
+func set_is_unique(f *uint8) {
+ const mask = uint8(1) << 0
+ (*f) |= mask
+}
+
+func allow_zero(f uint8) bool {
+ const mask = uint8(1) << 1
+ return f&mask != 0
+}
+
+func set_allow_zero(f *uint8) {
+ const mask = uint8(1) << 1
+ (*f) |= mask
}
diff --git a/vendor/codeberg.org/gruf/go-structr/item.go b/vendor/codeberg.org/gruf/go-structr/item.go
new file mode 100644
index 000000000..602c5b84a
--- /dev/null
+++ b/vendor/codeberg.org/gruf/go-structr/item.go
@@ -0,0 +1,59 @@
+package structr
+
+import (
+ "sync"
+ "unsafe"
+)
+
+type indexed_item struct {
+ // linked list elem this item
+ // is stored in a main list.
+ elem list_elem
+
+ // indexed stores the indices
+ // this item is stored under.
+ indexed []*index_entry
+
+ // cached data with type.
+ data interface{}
+}
+
+var indexed_item_pool sync.Pool
+
+// new_indexed_item returns a new prepared indexed_item.
+func new_indexed_item() *indexed_item {
+ v := indexed_item_pool.Get()
+ if v == nil {
+ v = new(indexed_item)
+ }
+ item := v.(*indexed_item)
+ ptr := unsafe.Pointer(item)
+ item.elem.data = ptr
+ return item
+}
+
+// free_indexed_item releases the indexed_item.
+func free_indexed_item(item *indexed_item) {
+ item.elem.data = nil
+ item.indexed = item.indexed[:0]
+ item.data = nil
+ indexed_item_pool.Put(item)
+}
+
+// drop_index will drop the given index entry from item's indexed.
+// note this also handles freeing the index_entry memory (e.g. to pool)
+func (i *indexed_item) drop_index(entry *index_entry) {
+ for x := 0; x < len(i.indexed); x++ {
+ if i.indexed[x] != entry {
+ // Prof. Obiwan:
+ // this is not the index
+ // we are looking for.
+ continue
+ }
+
+ // Move all index entries down + reslice.
+ copy(i.indexed[x:], i.indexed[x+1:])
+ i.indexed = i.indexed[:len(i.indexed)-1]
+ break
+ }
+}
diff --git a/vendor/codeberg.org/gruf/go-structr/key.go b/vendor/codeberg.org/gruf/go-structr/key.go
new file mode 100644
index 000000000..d68e3fe19
--- /dev/null
+++ b/vendor/codeberg.org/gruf/go-structr/key.go
@@ -0,0 +1,58 @@
+package structr
+
+import (
+ "sync"
+
+ "codeberg.org/gruf/go-byteutil"
+)
+
+// Key represents one key to
+// lookup (potentially) stored
+// entries in an Index.
+type Key struct {
+ raw []any
+ key string
+}
+
+// Key returns the underlying cache key string.
+// NOTE: this will not be log output friendly.
+func (k Key) Key() string {
+ return k.key
+}
+
+// Equal returns whether keys are equal.
+func (k Key) Equal(o Key) bool {
+ return k.key == o.key
+}
+
+// Value returns the raw slice of
+// values that comprise this Key.
+func (k Key) Values() []any {
+ return k.raw
+}
+
+// Zero indicates a zero value key.
+func (k Key) Zero() bool {
+ return k.raw == nil
+}
+
+var buf_pool sync.Pool
+
+// new_buffer returns a new initialized byte buffer.
+func new_buffer() *byteutil.Buffer {
+ v := buf_pool.Get()
+ if v == nil {
+ buf := new(byteutil.Buffer)
+ buf.B = make([]byte, 0, 512)
+ v = buf
+ }
+ return v.(*byteutil.Buffer)
+}
+
+// free_buffer releases the byte buffer.
+func free_buffer(buf *byteutil.Buffer) {
+ if cap(buf.B) > int(^uint16(0)) {
+ return // drop large bufs
+ }
+ buf_pool.Put(buf)
+}
diff --git a/vendor/codeberg.org/gruf/go-structr/list.go b/vendor/codeberg.org/gruf/go-structr/list.go
index 398f84ceb..17e1899ad 100644
--- a/vendor/codeberg.org/gruf/go-structr/list.go
+++ b/vendor/codeberg.org/gruf/go-structr/list.go
@@ -5,8 +5,6 @@ import (
"unsafe"
)
-var list_pool sync.Pool
-
// elem represents an elem
// in a doubly-linked list.
type list_elem struct {
@@ -28,28 +26,28 @@ type list struct {
len int
}
-func list_acquire() *list {
- // Acquire from pool.
+var list_pool sync.Pool
+
+// new_list returns a new prepared list.
+func new_list() *list {
v := list_pool.Get()
if v == nil {
v = new(list)
}
-
- // Cast list value.
- return v.(*list)
+ list := v.(*list)
+ return list
}
-func list_release(l *list) {
- // Reset list.
- l.head = nil
- l.tail = nil
- l.len = 0
-
- // Release to pool.
- list_pool.Put(l)
+// free_list releases the list.
+func free_list(list *list) {
+ list.head = nil
+ list.tail = nil
+ list.len = 0
+ list_pool.Put(list)
}
-func list_push_front(l *list, elem *list_elem) {
+// push_front will push the given elem to front (head) of list.
+func (l *list) push_front(elem *list_elem) {
if l.len == 0 {
// Set new tail + head
l.head = elem
@@ -77,12 +75,49 @@ func list_push_front(l *list, elem *list_elem) {
l.len++
}
-func list_move_front(l *list, elem *list_elem) {
- list_remove(l, elem)
- list_push_front(l, elem)
+// push_back will push the given elem to back (tail) of list.
+func (l *list) push_back(elem *list_elem) {
+ if l.len == 0 {
+ // Set new tail + head
+ l.head = elem
+ l.tail = elem
+
+ // Link elem to itself
+ elem.next = elem
+ elem.prev = elem
+ } else {
+ oldTail := l.tail
+
+ // Link to old tail
+ elem.prev = oldTail
+ oldTail.next = elem
+
+ // Link up to head
+ elem.next = l.head
+ l.head.prev = elem
+
+ // Set new tail
+ l.tail = elem
+ }
+
+ // Incr count
+ l.len++
+}
+
+// move_front will move given elem to front (head) of list.
+func (l *list) move_front(elem *list_elem) {
+ l.remove(elem)
+ l.push_front(elem)
+}
+
+// move_back will move given elem to back (tail) of list.
+func (l *list) move_back(elem *list_elem) {
+ l.remove(elem)
+ l.push_back(elem)
}
-func list_remove(l *list, elem *list_elem) {
+// remove will remove given elem from list.
+func (l *list) remove(elem *list_elem) {
if l.len <= 1 {
// Drop elem's links
elem.next = nil
@@ -121,7 +156,8 @@ func list_remove(l *list, elem *list_elem) {
l.len--
}
-func list_rangefn(l *list, fn func(*list_elem)) {
+// rangefn will range all elems in list, passing each to fn.
+func (l *list) rangefn(fn func(*list_elem)) {
if fn == nil {
panic("nil fn")
}
diff --git a/vendor/codeberg.org/gruf/go-structr/queue.go b/vendor/codeberg.org/gruf/go-structr/queue.go
new file mode 100644
index 000000000..1e735762f
--- /dev/null
+++ b/vendor/codeberg.org/gruf/go-structr/queue.go
@@ -0,0 +1,306 @@
+package structr
+
+import (
+ "reflect"
+ "sync"
+ "unsafe"
+)
+
+// QueueConfig defines config vars
+// for initializing a struct queue.
+type QueueConfig[StructType any] struct {
+
+ // Indices defines indices to create
+ // in the Queue for the receiving
+ // generic struct parameter type.
+ Indices []IndexConfig
+
+ // Pop is called when queue values
+ // are popped, during calls to any
+ // of the Pop___() series of fns.
+ Pop func(StructType)
+}
+
+// Queue provides a structure model queue with
+// automated indexing and popping by any init
+// defined lookups of field combinations.
+type Queue[StructType any] struct {
+
+ // indices used in storing passed struct
+ // types by user defined sets of fields.
+ indices []Index
+
+ // main underlying
+ // struct item queue.
+ queue list
+
+ // hook functions.
+ copy func(StructType) StructType
+ pop func(StructType)
+
+ // protective mutex, guards:
+ // - Queue{}.queue
+ // - Index{}.data
+ // - Queue{} hook fns
+ mutex sync.Mutex
+}
+
+// Init initializes the queue with given configuration
+// including struct fields to index, and necessary fns.
+func (q *Queue[T]) Init(config QueueConfig[T]) {
+ t := reflect.TypeOf((*T)(nil)).Elem()
+
+ if len(config.Indices) == 0 {
+ panic("no indices provided")
+ }
+
+ // Safely copy over
+ // provided config.
+ q.mutex.Lock()
+ q.indices = make([]Index, len(config.Indices))
+ for i, cfg := range config.Indices {
+ q.indices[i].ptr = unsafe.Pointer(q)
+ q.indices[i].init(t, cfg, 0)
+ }
+ q.pop = config.Pop
+ q.mutex.Unlock()
+}
+
+// Index selects index with given name from queue, else panics.
+func (q *Queue[T]) Index(name string) *Index {
+ for i := range q.indices {
+ if q.indices[i].name == name {
+ return &q.indices[i]
+ }
+ }
+ panic("unknown index: " + name)
+}
+
+// PopFront pops the current value at front of the queue.
+func (q *Queue[T]) PopFront() (T, bool) {
+ t := q.PopFrontN(1)
+ if len(t) == 0 {
+ var t T
+ return t, false
+ }
+ return t[0], true
+}
+
+// PopBack pops the current value at back of the queue.
+func (q *Queue[T]) PopBack() (T, bool) {
+ t := q.PopBackN(1)
+ if len(t) == 0 {
+ var t T
+ return t, false
+ }
+ return t[0], true
+}
+
+// PopFrontN attempts to pop n values from front of the queue.
+func (q *Queue[T]) PopFrontN(n int) []T {
+ return q.pop_n(n, func() *list_elem {
+ return q.queue.head
+ })
+}
+
+// PopBackN attempts to pop n values from back of the queue.
+func (q *Queue[T]) PopBackN(n int) []T {
+ return q.pop_n(n, func() *list_elem {
+ return q.queue.tail
+ })
+}
+
+// Pop attempts to pop values from queue indexed under any of keys.
+func (q *Queue[T]) Pop(index *Index, keys ...Key) []T {
+ if index == nil {
+ panic("no index given")
+ } else if index.ptr != unsafe.Pointer(q) {
+ panic("invalid index for queue")
+ }
+
+ // Acquire lock.
+ q.mutex.Lock()
+
+ // Preallocate expected ret slice.
+ values := make([]T, 0, len(keys))
+
+ for i := range keys {
+ // Delete all items under key from index, collecting
+ // value items and dropping them from all their indices.
+ index.delete(keys[i], func(item *indexed_item) {
+
+ // Append deleted to values.
+ value := item.data.(T)
+ values = append(values, value)
+
+ // Delete queued.
+ q.delete(item)
+ })
+ }
+
+ // Get func ptrs.
+ pop := q.pop
+
+ // Done with lock.
+ q.mutex.Unlock()
+
+ if pop != nil {
+ // Pass all popped values
+ // to given user hook (if set).
+ for _, value := range values {
+ pop(value)
+ }
+ }
+
+ return values
+}
+
+// PushFront pushes values to front of queue.
+func (q *Queue[T]) PushFront(values ...T) {
+ q.mutex.Lock()
+ for i := range values {
+ item := q.index(values[i])
+ q.queue.push_front(&item.elem)
+ }
+ q.mutex.Unlock()
+}
+
+// PushBack pushes values to back of queue.
+func (q *Queue[T]) PushBack(values ...T) {
+ q.mutex.Lock()
+ for i := range values {
+ item := q.index(values[i])
+ q.queue.push_back(&item.elem)
+ }
+ q.mutex.Unlock()
+}
+
+// MoveFront attempts to move values indexed under any of keys to the front of the queue.
+func (q *Queue[T]) MoveFront(index *Index, keys ...Key) {
+ q.mutex.Lock()
+ for i := range keys {
+ index.get(keys[i], func(item *indexed_item) {
+ q.queue.move_front(&item.elem)
+ })
+ }
+ q.mutex.Unlock()
+}
+
+// MoveBack attempts to move values indexed under any of keys to the back of the queue.
+func (q *Queue[T]) MoveBack(index *Index, keys ...Key) {
+ q.mutex.Lock()
+ for i := range keys {
+ index.get(keys[i], func(item *indexed_item) {
+ q.queue.move_back(&item.elem)
+ })
+ }
+ q.mutex.Unlock()
+}
+
+// Len returns the current length of queue.
+func (q *Queue[T]) Len() int {
+ q.mutex.Lock()
+ l := q.queue.len
+ q.mutex.Unlock()
+ return l
+}
+
+func (q *Queue[T]) pop_n(n int, next func() *list_elem) []T {
+ if next == nil {
+ panic("nil fn")
+ }
+
+ // Acquire lock.
+ q.mutex.Lock()
+
+ // Preallocate ret slice.
+ values := make([]T, 0, n)
+
+ // Iterate over 'n' items.
+ for i := 0; i < n; i++ {
+
+ // Get next elem.
+ next := next()
+ if next == nil {
+
+ // reached
+ // end.
+ break
+ }
+
+ // Cast the indexed item from elem.
+ item := (*indexed_item)(next.data)
+
+ // Append deleted to values.
+ value := item.data.(T)
+ values = append(values, value)
+
+ // Delete queued.
+ q.delete(item)
+ }
+
+ // Get func ptrs.
+ pop := q.pop
+
+ // Done with lock.
+ q.mutex.Unlock()
+
+ if pop != nil {
+ // Pass all popped values
+ // to given user hook (if set).
+ for _, value := range values {
+ pop(value)
+ }
+ }
+
+ return values
+}
+
+func (q *Queue[T]) index(value T) *indexed_item {
+ item := new_indexed_item()
+
+ // Set item value.
+ item.data = value
+
+ // Acquire key buf.
+ buf := new_buffer()
+
+ for i := range q.indices {
+ // Get current index ptr.
+ idx := &(q.indices[i])
+
+ // Extract fields comprising index key.
+ parts := extract_fields(value, idx.fields)
+
+ // Calculate index key.
+ key := idx.key(buf, parts)
+ if key.Zero() {
+ continue
+ }
+
+ // Append item to index.
+ idx.append(key, item)
+ }
+
+ // Done with buf.
+ free_buffer(buf)
+
+ return item
+}
+
+func (q *Queue[T]) delete(item *indexed_item) {
+ for len(item.indexed) != 0 {
+ // Pop last indexed entry from list.
+ entry := item.indexed[len(item.indexed)-1]
+ item.indexed = item.indexed[:len(item.indexed)-1]
+
+ // Drop index_entry from index.
+ entry.index.delete_entry(entry)
+ }
+
+ // Drop entry from queue list.
+ q.queue.remove(&item.elem)
+
+ // Free now-unused item.
+ free_indexed_item(item)
+}
diff --git a/vendor/codeberg.org/gruf/go-structr/result.go b/vendor/codeberg.org/gruf/go-structr/result.go
deleted file mode 100644
index 08d3ad013..000000000
--- a/vendor/codeberg.org/gruf/go-structr/result.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package structr
-
-import (
- "sync"
- "unsafe"
-)
-
-var result_pool sync.Pool
-
-type result struct {
- // linked list elem this result is
- // stored under in Cache.lruList.
- elem list_elem
-
- // indexed stores the indices
- // this result is stored under.
- indexed []*index_entry
-
- // cached data (we maintain
- // the type data here using
- // an interface as any one
- // instance can be T / error).
- data interface{}
-}
-
-func result_acquire[T any](c *Cache[T]) *result {
- // Acquire from pool.
- v := result_pool.Get()
- if v == nil {
- v = new(result)
- }
-
- // Cast result value.
- res := v.(*result)
-
- // Push result elem to front of LRU list.
- list_push_front(&c.lruList, &res.elem)
- res.elem.data = unsafe.Pointer(res)
-
- return res
-}
-
-func result_release[T any](c *Cache[T], res *result) {
- // Remove result elem from LRU list.
- list_remove(&c.lruList, &res.elem)
- res.elem.data = nil
-
- // Reset all result fields.
- res.indexed = res.indexed[:0]
- res.data = nil
-
- // Release to pool.
- result_pool.Put(res)
-}
-
-func result_drop_index[T any](res *result, index *Index[T]) {
- for i := 0; i < len(res.indexed); i++ {
-
- if res.indexed[i].index != unsafe.Pointer(index) {
- // Prof. Obiwan:
- // this is not the index
- // we are looking for.
- continue
- }
-
- // Get index entry ptr.
- entry := res.indexed[i]
-
- // Move all index entries down + reslice.
- copy(res.indexed[i:], res.indexed[i+1:])
- res.indexed = res.indexed[:len(res.indexed)-1]
-
- // Release to memory pool.
- index_entry_release(entry)
-
- return
- }
-}
diff --git a/vendor/codeberg.org/gruf/go-structr/runtime.go b/vendor/codeberg.org/gruf/go-structr/runtime.go
index cc1bcd86d..7db1d7e7a 100644
--- a/vendor/codeberg.org/gruf/go-structr/runtime.go
+++ b/vendor/codeberg.org/gruf/go-structr/runtime.go
@@ -7,31 +7,38 @@ import (
"unicode/utf8"
"unsafe"
+ "codeberg.org/gruf/go-mangler"
"github.com/modern-go/reflect2"
- "github.com/zeebo/xxh3"
)
-type structfield struct {
- // _type is the runtime type pointer
+// struct_field contains pre-prepared type
+// information about a struct's field member,
+// including memory offset and hash function.
+type struct_field struct {
+
+ // type2 is the runtime type pointer
// underlying the struct field type.
// used for repacking our own erfaces.
- _type reflect2.Type
+ type2 reflect2.Type
// offset is the offset in memory
// of this struct field from the
// outer-most value ptr location.
offset uintptr
- // hasher is the relevant function
- // for hashing value of structfield
- // into the supplied hashbuf, where
- // return value indicates if zero.
- hasher func(*xxh3.Hasher, any) bool
+ // struct field type mangling
+ // (i.e. fast serializing) fn.
+ mangle mangler.Mangler
+
+ // mangled zero value string,
+ // if set this indicates zero
+ // values of field not allowed
+ zero string
}
// find_field will search for a struct field with given set of names,
// where names is a len > 0 slice of names account for struct nesting.
-func find_field(t reflect.Type, names []string) (sfield structfield) {
+func find_field(t reflect.Type, names []string) (sfield struct_field) {
var (
// is_exported returns whether name is exported
// from a package; can be func or struct field.
@@ -56,17 +63,6 @@ func find_field(t reflect.Type, names []string) (sfield structfield) {
field reflect.StructField
)
- switch {
- // The only 2 types we support are
- // structs, and ptrs to a struct.
- case t.Kind() == reflect.Struct:
- case t.Kind() == reflect.Pointer &&
- t.Elem().Kind() == reflect.Struct:
- t = t.Elem()
- default:
- panic("index only support struct{} and *struct{}")
- }
-
for len(names) > 0 {
var ok bool
@@ -92,17 +88,17 @@ func find_field(t reflect.Type, names []string) (sfield structfield) {
}
// Get field type as reflect2.
- sfield._type = reflect2.Type2(t)
+ sfield.type2 = reflect2.Type2(t)
- // Find hasher for type.
- sfield.hasher = hasher(t)
+ // Find mangler for field type.
+ sfield.mangle = mangler.Get(t)
return
}
// extract_fields extracts given structfields from the provided value type,
// this is done using predetermined struct field memory offset locations.
-func extract_fields[T any](value T, fields []structfield) []any {
+func extract_fields[T any](value T, fields []struct_field) []any {
// Get ptr to raw value data.
ptr := unsafe.Pointer(&value)
@@ -117,17 +113,12 @@ func extract_fields[T any](value T, fields []structfield) []any {
for i := 0; i < len(fields); i++ {
// Manually access field at memory offset and pack eface.
ptr := unsafe.Pointer(uintptr(ptr) + fields[i].offset)
- ifaces[i] = fields[i]._type.UnsafeIndirect(ptr)
+ ifaces[i] = fields[i].type2.UnsafeIndirect(ptr)
}
return ifaces
}
-// data_ptr returns the runtime data ptr associated with value.
-func data_ptr(a any) unsafe.Pointer {
- return (*struct{ t, v unsafe.Pointer })(unsafe.Pointer(&a)).v
-}
-
// panicf provides a panic with string formatting.
func panicf(format string, args ...any) {
panic(fmt.Sprintf(format, args...))