diff options
author | 2023-08-03 10:34:35 +0100 | |
---|---|---|
committer | 2023-08-03 11:34:35 +0200 | |
commit | 00adf18c2470a69c255ea75990bbbae6e57eea89 (patch) | |
tree | d65408d4860b39f22f0aa853d25f57a37c65ee5c /vendor/codeberg.org/gruf/go-cache | |
parent | [bugfix] Rework MultiError to wrap + unwrap errors properly (#2057) (diff) | |
download | gotosocial-00adf18c2470a69c255ea75990bbbae6e57eea89.tar.xz |
[feature] simpler cache size configuration (#2051)
* add automatic cache max size generation based on ratios of a singular fixed memory target
Signed-off-by: kim <grufwub@gmail.com>
* remove now-unused cache max-size config variables
Signed-off-by: kim <grufwub@gmail.com>
* slight ratio tweak
Signed-off-by: kim <grufwub@gmail.com>
* remove unused visibility config var
Signed-off-by: kim <grufwub@gmail.com>
* add secret little ratio config trick
Signed-off-by: kim <grufwub@gmail.com>
* fixed a word
Signed-off-by: kim <grufwub@gmail.com>
* update cache library to remove use of TTL in result caches + slice cache
Signed-off-by: kim <grufwub@gmail.com>
* update other cache usages to use correct interface
Signed-off-by: kim <grufwub@gmail.com>
* update example config to explain the cache memory target
Signed-off-by: kim <grufwub@gmail.com>
* update env parsing test with new config values
Signed-off-by: kim <grufwub@gmail.com>
* do some ratio twiddling
Signed-off-by: kim <grufwub@gmail.com>
* add missing header
* update envparsing with latest defaults
Signed-off-by: kim <grufwub@gmail.com>
* update size calculations to take into account result cache, simple cache and extra map overheads
Signed-off-by: kim <grufwub@gmail.com>
* tweak the ratios some more
Signed-off-by: kim <grufwub@gmail.com>
* more nan rampaging
Signed-off-by: kim <grufwub@gmail.com>
* fix envparsing script
Signed-off-by: kim <grufwub@gmail.com>
* update cache library, add sweep function to keep caches trim
Signed-off-by: kim <grufwub@gmail.com>
* sweep caches once a minute
Signed-off-by: kim <grufwub@gmail.com>
* add a regular job to sweep caches and keep under 80% utilisation
Signed-off-by: kim <grufwub@gmail.com>
* remove dead code
Signed-off-by: kim <grufwub@gmail.com>
* add new size library used to libraries section of readme
Signed-off-by: kim <grufwub@gmail.com>
* add better explanations for the mem-ratio numbers
Signed-off-by: kim <grufwub@gmail.com>
* update go-cache
Signed-off-by: kim <grufwub@gmail.com>
* library version bump
Signed-off-by: kim <grufwub@gmail.com>
* update cache.result{} size model estimation
Signed-off-by: kim <grufwub@gmail.com>
---------
Signed-off-by: kim <grufwub@gmail.com>
Diffstat (limited to 'vendor/codeberg.org/gruf/go-cache')
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/LICENSE | 2 | ||||
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/README.md | 10 | ||||
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/cache.go | 30 | ||||
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/result/cache.go | 226 | ||||
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/result/key.go | 4 | ||||
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/result/pool.go | 24 | ||||
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/simple/cache.go | 454 | ||||
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/simple/pool.go | 23 | ||||
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/ttl/ttl.go | 2 |
9 files changed, 645 insertions, 130 deletions
diff --git a/vendor/codeberg.org/gruf/go-cache/v3/LICENSE b/vendor/codeberg.org/gruf/go-cache/v3/LICENSE index e4163ae35..d6f08d0ab 100644 --- a/vendor/codeberg.org/gruf/go-cache/v3/LICENSE +++ b/vendor/codeberg.org/gruf/go-cache/v3/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2022 gruf +Copyright (c) gruf Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/vendor/codeberg.org/gruf/go-cache/v3/README.md b/vendor/codeberg.org/gruf/go-cache/v3/README.md index 43004f3d8..eb298bea4 100644 --- a/vendor/codeberg.org/gruf/go-cache/v3/README.md +++ b/vendor/codeberg.org/gruf/go-cache/v3/README.md @@ -1,14 +1,14 @@ # go-cache -Provides access to a simple yet flexible, performant TTL cache via the `Cache{}` interface and `cache.New()`. Under the hood this is returning a `ttl.Cache{}`. +Provides access to simple, yet flexible, and performant caches (with TTL if required) via the `cache.Cache{}` and `cache.TTLCache{}` interfaces. -## ttl +## simple -A TTL cache implementation with much of the inner workings exposed, designed to be used as a base for your own customizations, or used as-is. Access via the base package `cache.New()` is recommended in the latter case, to prevent accidental use of unsafe methods. +A `cache.Cache{}` implementation with much more of the inner workings exposed. Designed to be used as a base for your own customizations, or used as-is. -## lookup +## ttl -`lookup.Cache` is an example of a more complex cache implementation using `ttl.Cache{}` as its underpinning. It provides caching of items under multiple keys. +A `cache.TTLCache{}` implementation with much more of the inner workings exposed. Designed to be used as a base for your own customizations, or used as-is. ## result diff --git a/vendor/codeberg.org/gruf/go-cache/v3/cache.go b/vendor/codeberg.org/gruf/go-cache/v3/cache.go index 1192cf3e8..d96971702 100644 --- a/vendor/codeberg.org/gruf/go-cache/v3/cache.go +++ b/vendor/codeberg.org/gruf/go-cache/v3/cache.go @@ -3,26 +3,33 @@ package cache import ( "time" - ttlcache "codeberg.org/gruf/go-cache/v3/ttl" + "codeberg.org/gruf/go-cache/v3/simple" + "codeberg.org/gruf/go-cache/v3/ttl" ) -// Cache represents a TTL cache with customizable callbacks, it exists here to abstract away the "unsafe" methods in the case that you do not want your own implementation atop ttl.Cache{}. -type Cache[Key comparable, Value any] interface { +// TTLCache represents a TTL cache with customizable callbacks, it exists here to abstract away the "unsafe" methods in the case that you do not want your own implementation atop ttl.Cache{}. +type TTLCache[Key comparable, Value any] interface { // Start will start the cache background eviction routine with given sweep frequency. If already running or a freq <= 0 provided, this is a no-op. This will block until the eviction routine has started. Start(freq time.Duration) bool // Stop will stop cache background eviction routine. If not running this is a no-op. This will block until the eviction routine has stopped. Stop() bool + // SetTTL sets the cache item TTL. Update can be specified to force updates of existing items in the cache, this will simply add the change in TTL to their current expiry time. + SetTTL(ttl time.Duration, update bool) + + // implements base cache. + Cache[Key, Value] +} + +// Cache represents a cache with customizable callbacks, it exists here to abstract away the "unsafe" methods in the case that you do not want your own implementation atop simple.Cache{}. +type Cache[Key comparable, Value any] interface { // SetEvictionCallback sets the eviction callback to the provided hook. SetEvictionCallback(hook func(Key, Value)) // SetInvalidateCallback sets the invalidate callback to the provided hook. SetInvalidateCallback(hook func(Key, Value)) - // SetTTL sets the cache item TTL. Update can be specified to force updates of existing items in the cache, this will simply add the change in TTL to their current expiry time. - SetTTL(ttl time.Duration, update bool) - // Get fetches the value with key from the cache, extending its TTL. Get(key Key) (value Value, ok bool) @@ -57,7 +64,12 @@ type Cache[Key comparable, Value any] interface { Cap() int } -// New returns a new initialized Cache with given initial length, maximum capacity and item TTL. -func New[K comparable, V any](len, cap int, ttl time.Duration) Cache[K, V] { - return ttlcache.New[K, V](len, cap, ttl) +// New returns a new initialized Cache with given initial length, maximum capacity. +func New[K comparable, V any](len, cap int) Cache[K, V] { + return simple.New[K, V](len, cap) +} + +// NewTTL returns a new initialized TTLCache with given initial length, maximum capacity and TTL duration. +func NewTTL[K comparable, V any](len, cap int, _ttl time.Duration) TTLCache[K, V] { + return ttl.New[K, V](len, cap, _ttl) } diff --git a/vendor/codeberg.org/gruf/go-cache/v3/result/cache.go b/vendor/codeberg.org/gruf/go-cache/v3/result/cache.go index a86a72c77..f31e6604a 100644 --- a/vendor/codeberg.org/gruf/go-cache/v3/result/cache.go +++ b/vendor/codeberg.org/gruf/go-cache/v3/result/cache.go @@ -2,14 +2,38 @@ package result import ( "context" + "fmt" + "os" "reflect" - "time" _ "unsafe" - "codeberg.org/gruf/go-cache/v3/ttl" + "codeberg.org/gruf/go-cache/v3/simple" "codeberg.org/gruf/go-errors/v2" ) +type result struct { + // Result primary key + PKey int64 + + // keys accessible under + Keys cacheKeys + + // cached value + Value any + + // cached error + Error error +} + +// getResultValue is a safe way of casting and fetching result value. +func getResultValue[T any](res *result) T { + v, ok := res.Value.(T) + if !ok { + fmt.Fprintf(os.Stderr, "!! BUG: unexpected value type in result: %T\n", res.Value) + } + return v +} + // Lookup represents a struct object lookup method in the cache. type Lookup struct { // Name is a period ('.') separated string @@ -23,26 +47,23 @@ type Lookup struct { // Multi allows specifying a key capable of storing // multiple results. Note this only supports invalidate. Multi bool - - // TODO: support toggling case sensitive lookups. - // CaseSensitive bool } // Cache provides a means of caching value structures, along with // the results of attempting to load them. An example usecase of this // cache would be in wrapping a database, allowing caching of sql.ErrNoRows. -type Cache[Value any] struct { - cache ttl.Cache[int64, result[Value]] // underlying result cache - invalid func(Value) // store unwrapped invalidate callback. - lookups structKeys // pre-determined struct lookups - ignore func(error) bool // determines cacheable errors - copy func(Value) Value // copies a Value type - next int64 // update key counter +type Cache[T any] struct { + cache simple.Cache[int64, *result] // underlying result cache + lookups structKeys // pre-determined struct lookups + invalid func(T) // store unwrapped invalidate callback. + ignore func(error) bool // determines cacheable errors + copy func(T) T // copies a Value type + next int64 // update key counter } // New returns a new initialized Cache, with given lookups, underlying value copy function and provided capacity. -func New[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Value] { - var z Value +func New[T any](lookups []Lookup, copy func(T) T, cap int) *Cache[T] { + var z T // Determine generic type t := reflect.TypeOf(z) @@ -58,7 +79,7 @@ func New[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Va } // Allocate new cache object - c := &Cache[Value]{copy: copy} + c := &Cache[T]{copy: copy} c.lookups = make([]structKey, len(lookups)) for i, lookup := range lookups { @@ -67,38 +88,20 @@ func New[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Va } // Create and initialize underlying cache - c.cache.Init(0, cap, 0) + c.cache.Init(0, cap) c.SetEvictionCallback(nil) c.SetInvalidateCallback(nil) c.IgnoreErrors(nil) return c } -// Start will start the cache background eviction routine with given sweep frequency. If already -// running or a freq <= 0 provided, this is a no-op. This will block until eviction routine started. -func (c *Cache[Value]) Start(freq time.Duration) bool { - return c.cache.Start(freq) -} - -// Stop will stop cache background eviction routine. If not running this -// is a no-op. This will block until the eviction routine has stopped. -func (c *Cache[Value]) Stop() bool { - return c.cache.Stop() -} - -// SetTTL sets the cache item TTL. Update can be specified to force updates of existing items -// in the cache, this will simply add the change in TTL to their current expiry time. -func (c *Cache[Value]) SetTTL(ttl time.Duration, update bool) { - c.cache.SetTTL(ttl, update) -} - // SetEvictionCallback sets the eviction callback to the provided hook. -func (c *Cache[Value]) SetEvictionCallback(hook func(Value)) { +func (c *Cache[T]) SetEvictionCallback(hook func(T)) { if hook == nil { // Ensure non-nil hook. - hook = func(Value) {} + hook = func(T) {} } - c.cache.SetEvictionCallback(func(pkey int64, res result[Value]) { + c.cache.SetEvictionCallback(func(pkey int64, res *result) { c.cache.Lock() for _, key := range res.Keys { // Delete key->pkey lookup @@ -108,23 +111,25 @@ func (c *Cache[Value]) SetEvictionCallback(hook func(Value)) { c.cache.Unlock() if res.Error != nil { - // Skip error hooks + // Skip value hooks return } - // Call user hook. - hook(res.Value) + // Free result and call hook. + v := getResultValue[T](res) + putResult(res) + hook(v) }) } // SetInvalidateCallback sets the invalidate callback to the provided hook. -func (c *Cache[Value]) SetInvalidateCallback(hook func(Value)) { +func (c *Cache[T]) SetInvalidateCallback(hook func(T)) { if hook == nil { // Ensure non-nil hook. - hook = func(Value) {} + hook = func(T) {} } // store hook. c.invalid = hook - c.cache.SetInvalidateCallback(func(pkey int64, res result[Value]) { + c.cache.SetInvalidateCallback(func(pkey int64, res *result) { c.cache.Lock() for _, key := range res.Keys { // Delete key->pkey lookup @@ -134,17 +139,19 @@ func (c *Cache[Value]) SetInvalidateCallback(hook func(Value)) { c.cache.Unlock() if res.Error != nil { - // Skip error hooks + // Skip value hooks return } - // Call user hook. - hook(res.Value) + // Free result and call hook. + v := getResultValue[T](res) + putResult(res) + hook(v) }) } // IgnoreErrors allows setting a function hook to determine which error types should / not be cached. -func (c *Cache[Value]) IgnoreErrors(ignore func(error) bool) { +func (c *Cache[T]) IgnoreErrors(ignore func(error) bool) { if ignore == nil { ignore = func(err error) bool { return errors.Comparable( @@ -160,11 +167,10 @@ func (c *Cache[Value]) IgnoreErrors(ignore func(error) bool) { } // Load will attempt to load an existing result from the cacche for the given lookup and key parts, else calling the provided load function and caching the result. -func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts ...any) (Value, error) { +func (c *Cache[T]) Load(lookup string, load func() (T, error), keyParts ...any) (T, error) { var ( - zero Value - res result[Value] - ok bool + zero T + res *result ) // Get lookup key info by name. @@ -182,24 +188,22 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts // Look for primary cache key pkeys := keyInfo.pkeys[ckey] - if ok = (len(pkeys) > 0); ok { - var entry *ttl.Entry[int64, result[Value]] - + if len(pkeys) > 0 { // Fetch the result for primary key - entry, ok = c.cache.Cache.Get(pkeys[0]) + entry, ok := c.cache.Cache.Get(pkeys[0]) if ok { // Since the invalidation / eviction hooks acquire a mutex // lock separately, and only at this point are the pkeys // updated, there is a chance that a primary key may return // no matching entry. Hence we have to check for it here. - res = entry.Value + res = entry.Value.(*result) } } // Done with lock c.cache.Unlock() - if !ok { + if res == nil { // Generate fresh result. value, err := load() @@ -209,6 +213,9 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts return zero, err } + // Alloc result. + res = getResult() + // Store error result. res.Error = err @@ -219,6 +226,9 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts key: ckey, }} } else { + // Alloc result. + res = getResult() + // Store value result. res.Value = value @@ -251,22 +261,21 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts } // Return a copy of value from cache - return c.copy(res.Value), nil + return c.copy(getResultValue[T](res)), nil } // Store will call the given store function, and on success store the value in the cache as a positive result. -func (c *Cache[Value]) Store(value Value, store func() error) error { +func (c *Cache[T]) Store(value T, store func() error) error { // Attempt to store this value. if err := store(); err != nil { return err } // Prepare cached result. - result := result[Value]{ - Keys: c.lookups.generate(value), - Value: c.copy(value), - Error: nil, - } + result := getResult() + result.Keys = c.lookups.generate(value) + result.Value = c.copy(value) + result.Error = nil var evict func() @@ -293,9 +302,8 @@ func (c *Cache[Value]) Store(value Value, store func() error) error { } // Has checks the cache for a positive result under the given lookup and key parts. -func (c *Cache[Value]) Has(lookup string, keyParts ...any) bool { - var res result[Value] - var ok bool +func (c *Cache[T]) Has(lookup string, keyParts ...any) bool { + var res *result // Get lookup key info by name. keyInfo := c.lookups.get(lookup) @@ -312,29 +320,27 @@ func (c *Cache[Value]) Has(lookup string, keyParts ...any) bool { // Look for primary key for cache key pkeys := keyInfo.pkeys[ckey] - if ok = (len(pkeys) > 0); ok { - var entry *ttl.Entry[int64, result[Value]] - + if len(pkeys) > 0 { // Fetch the result for primary key - entry, ok = c.cache.Cache.Get(pkeys[0]) + entry, ok := c.cache.Cache.Get(pkeys[0]) if ok { // Since the invalidation / eviction hooks acquire a mutex // lock separately, and only at this point are the pkeys // updated, there is a chance that a primary key may return // no matching entry. Hence we have to check for it here. - res = entry.Value + res = entry.Value.(*result) } } // Done with lock c.cache.Unlock() - // Check for non-error result. - return ok && (res.Error == nil) + // Check for result AND non-error result. + return (res != nil && res.Error == nil) } // Invalidate will invalidate any result from the cache found under given lookup and key parts. -func (c *Cache[Value]) Invalidate(lookup string, keyParts ...any) { +func (c *Cache[T]) Invalidate(lookup string, keyParts ...any) { // Get lookup key info by name. keyInfo := c.lookups.get(lookup) @@ -351,15 +357,20 @@ func (c *Cache[Value]) Invalidate(lookup string, keyParts ...any) { c.cache.InvalidateAll(pkeys...) } -// Clear empties the cache, calling the invalidate callback. -func (c *Cache[Value]) Clear() { c.cache.Clear() } +// Clear empties the cache, calling the invalidate callback where necessary. +func (c *Cache[T]) Clear() { c.Trim(100) } + +// Trim ensures the cache stays within percentage of total capacity, truncating where necessary. +func (c *Cache[T]) Trim(perc float64) { c.cache.Trim(perc) } // store will cache this result under all of its required cache keys. -func (c *Cache[Value]) store(res result[Value]) (evict func()) { +func (c *Cache[T]) store(res *result) (evict func()) { + var toEvict []*result + // Get primary key - pnext := c.next + res.PKey = c.next c.next++ - if pnext > c.next { + if res.PKey > c.next { panic("cache primary key overflow") } @@ -371,15 +382,19 @@ func (c *Cache[Value]) store(res result[Value]) (evict func()) { for _, conflict := range pkeys { // Get the overlapping result with this key. entry, _ := c.cache.Cache.Get(conflict) + confRes := entry.Value.(*result) // From conflicting entry, drop this key, this // will prevent eviction cleanup key confusion. - entry.Value.Keys.drop(key.info.name) + confRes.Keys.drop(key.info.name) - if len(entry.Value.Keys) == 0 { + if len(res.Keys) == 0 { // We just over-wrote the only lookup key for // this value, so we drop its primary key too. c.cache.Cache.Delete(conflict) + + // Add finished result to evict queue. + toEvict = append(toEvict, confRes) } } @@ -388,42 +403,27 @@ func (c *Cache[Value]) store(res result[Value]) (evict func()) { } // Store primary key lookup. - pkeys = append(pkeys, pnext) + pkeys = append(pkeys, res.PKey) key.info.pkeys[key.key] = pkeys } - // Store main entry under primary key, using evict hook if needed - c.cache.Cache.SetWithHook(pnext, &ttl.Entry[int64, result[Value]]{ - Expiry: c.expiry(), - Key: pnext, - Value: res, - }, func(_ int64, item *ttl.Entry[int64, result[Value]]) { - evict = func() { c.cache.Evict(item.Key, item.Value) } + // Store main entry under primary key, catch evicted. + c.cache.Cache.SetWithHook(res.PKey, &simple.Entry{ + Key: res.PKey, + Value: res, + }, func(_ int64, item *simple.Entry) { + toEvict = append(toEvict, item.Value.(*result)) }) - return evict -} - -//go:linkname runtime_nanotime runtime.nanotime -func runtime_nanotime() uint64 - -// expiry returns an the next expiry time to use for an entry, -// which is equivalent to time.Now().Add(ttl), or zero if disabled. -func (c *Cache[Value]) expiry() uint64 { - if ttl := c.cache.TTL; ttl > 0 { - return runtime_nanotime() + - uint64(c.cache.TTL) + if len(toEvict) == 0 { + // none evicted. + return nil } - return 0 -} - -type result[Value any] struct { - // keys accessible under - Keys cacheKeys - - // cached value - Value Value - // cached error - Error error + return func() { + for _, res := range toEvict { + // Call evict hook on each entry. + c.cache.Evict(res.PKey, res) + } + } } diff --git a/vendor/codeberg.org/gruf/go-cache/v3/result/key.go b/vendor/codeberg.org/gruf/go-cache/v3/result/key.go index bcf12c3de..cf86c7c30 100644 --- a/vendor/codeberg.org/gruf/go-cache/v3/result/key.go +++ b/vendor/codeberg.org/gruf/go-cache/v3/result/key.go @@ -145,7 +145,7 @@ type structField struct { } // genKey generates a cache key string for given key parts (i.e. serializes them using "go-mangler"). -func (sk structKey) genKey(parts []any) string { +func (sk *structKey) genKey(parts []any) string { // Check this expected no. key parts. if len(parts) != len(sk.fields) { panic(fmt.Sprintf("incorrect no. key parts provided: want=%d received=%d", len(parts), len(sk.fields))) @@ -246,10 +246,12 @@ var bufPool = sync.Pool{ }, } +// getBuf ... func getBuf() *byteutil.Buffer { return bufPool.Get().(*byteutil.Buffer) } +// putBuf ... func putBuf(buf *byteutil.Buffer) { if buf.Cap() > int(^uint16(0)) { return // drop large bufs diff --git a/vendor/codeberg.org/gruf/go-cache/v3/result/pool.go b/vendor/codeberg.org/gruf/go-cache/v3/result/pool.go new file mode 100644 index 000000000..c5cbeda57 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-cache/v3/result/pool.go @@ -0,0 +1,24 @@ +package result + +import "sync" + +// resultPool is a global pool for result +// objects, regardless of cache type. +var resultPool sync.Pool + +// getEntry fetches a result from pool, or allocates new. +func getResult() *result { + v := resultPool.Get() + if v == nil { + return new(result) + } + return v.(*result) +} + +// putResult replaces a result in the pool. +func putResult(r *result) { + r.Keys = nil + r.Value = nil + r.Error = nil + resultPool.Put(r) +} diff --git a/vendor/codeberg.org/gruf/go-cache/v3/simple/cache.go b/vendor/codeberg.org/gruf/go-cache/v3/simple/cache.go new file mode 100644 index 000000000..0224871bc --- /dev/null +++ b/vendor/codeberg.org/gruf/go-cache/v3/simple/cache.go @@ -0,0 +1,454 @@ +package simple + +import ( + "sync" + + "codeberg.org/gruf/go-maps" +) + +// Entry represents an item in the cache. +type Entry struct { + Key any + Value any +} + +// Cache is the underlying Cache implementation, providing both the base Cache interface and unsafe access to underlying map to allow flexibility in building your own. +type Cache[Key comparable, Value any] struct { + // Evict is the hook that is called when an item is evicted from the cache. + Evict func(Key, Value) + + // Invalid is the hook that is called when an item's data in the cache is invalidated, includes Add/Set. + Invalid func(Key, Value) + + // Cache is the underlying hashmap used for this cache. + Cache maps.LRUMap[Key, *Entry] + + // Embedded mutex. + sync.Mutex +} + +// New returns a new initialized Cache with given initial length, maximum capacity and item TTL. +func New[K comparable, V any](len, cap int) *Cache[K, V] { + c := new(Cache[K, V]) + c.Init(len, cap) + return c +} + +// Init will initialize this cache with given initial length, maximum capacity and item TTL. +func (c *Cache[K, V]) Init(len, cap int) { + c.SetEvictionCallback(nil) + c.SetInvalidateCallback(nil) + c.Cache.Init(len, cap) +} + +// SetEvictionCallback: implements cache.Cache's SetEvictionCallback(). +func (c *Cache[K, V]) SetEvictionCallback(hook func(K, V)) { + c.locked(func() { c.Evict = hook }) +} + +// SetInvalidateCallback: implements cache.Cache's SetInvalidateCallback(). +func (c *Cache[K, V]) SetInvalidateCallback(hook func(K, V)) { + c.locked(func() { c.Invalid = hook }) +} + +// Get: implements cache.Cache's Get(). +func (c *Cache[K, V]) Get(key K) (V, bool) { + var ( + // did exist in cache? + ok bool + + // cached value. + v V + ) + + c.locked(func() { + var item *Entry + + // Check for item in cache + item, ok = c.Cache.Get(key) + if !ok { + return + } + + // Set item value. + v = item.Value.(V) + }) + + return v, ok +} + +// Add: implements cache.Cache's Add(). +func (c *Cache[K, V]) Add(key K, value V) bool { + var ( + // did exist in cache? + ok bool + + // was entry evicted? + ev bool + + // evicted key values. + evcK K + evcV V + + // hook func ptrs. + evict func(K, V) + ) + + c.locked(func() { + // Check if in cache. + ok = c.Cache.Has(key) + if ok { + return + } + + // Alloc new entry. + new := getEntry() + new.Key = key + new.Value = value + + // Add new entry to cache and catched any evicted item. + c.Cache.SetWithHook(key, new, func(_ K, item *Entry) { + evcK = item.Key.(K) + evcV = item.Value.(V) + ev = true + putEntry(item) + }) + + // Set hook func ptr. + evict = c.Evict + }) + + if ev && evict != nil { + // Pass to eviction hook. + evict(evcK, evcV) + } + + return !ok +} + +// Set: implements cache.Cache's Set(). +func (c *Cache[K, V]) Set(key K, value V) { + var ( + // did exist in cache? + ok bool + + // was entry evicted? + ev bool + + // old value. + oldV V + + // evicted key values. + evcK K + evcV V + + // hook func ptrs. + invalid func(K, V) + evict func(K, V) + ) + + c.locked(func() { + var item *Entry + + // Check for item in cache + item, ok = c.Cache.Get(key) + + if ok { + // Set old value. + oldV = item.Value.(V) + + // Update the existing item. + item.Value = value + } else { + // Alloc new entry. + new := getEntry() + new.Key = key + new.Value = value + + // Add new entry to cache and catched any evicted item. + c.Cache.SetWithHook(key, new, func(_ K, item *Entry) { + evcK = item.Key.(K) + evcV = item.Value.(V) + ev = true + putEntry(item) + }) + } + + // Set hook func ptrs. + invalid = c.Invalid + evict = c.Evict + }) + + if ok && invalid != nil { + // Pass to invalidate hook. + invalid(key, oldV) + } + + if ev && evict != nil { + // Pass to eviction hook. + evict(evcK, evcV) + } +} + +// CAS: implements cache.Cache's CAS(). +func (c *Cache[K, V]) CAS(key K, old V, new V, cmp func(V, V) bool) bool { + var ( + // did exist in cache? + ok bool + + // swapped value. + oldV V + + // hook func ptrs. + invalid func(K, V) + ) + + c.locked(func() { + var item *Entry + + // Check for item in cache + item, ok = c.Cache.Get(key) + if !ok { + return + } + + // Set old value. + oldV = item.Value.(V) + + // Perform the comparison + if !cmp(old, oldV) { + var zero V + oldV = zero + return + } + + // Update value. + item.Value = new + + // Set hook func ptr. + invalid = c.Invalid + }) + + if ok && invalid != nil { + // Pass to invalidate hook. + invalid(key, oldV) + } + + return ok +} + +// Swap: implements cache.Cache's Swap(). +func (c *Cache[K, V]) Swap(key K, swp V) V { + var ( + // did exist in cache? + ok bool + + // swapped value. + oldV V + + // hook func ptrs. + invalid func(K, V) + ) + + c.locked(func() { + var item *Entry + + // Check for item in cache + item, ok = c.Cache.Get(key) + if !ok { + return + } + + // Set old value. + oldV = item.Value.(V) + + // Update value. + item.Value = swp + + // Set hook func ptr. + invalid = c.Invalid + }) + + if ok && invalid != nil { + // Pass to invalidate hook. + invalid(key, oldV) + } + + return oldV +} + +// Has: implements cache.Cache's Has(). +func (c *Cache[K, V]) Has(key K) (ok bool) { + c.locked(func() { + ok = c.Cache.Has(key) + }) + return +} + +// Invalidate: implements cache.Cache's Invalidate(). +func (c *Cache[K, V]) Invalidate(key K) (ok bool) { + var ( + // old value. + oldV V + + // hook func ptrs. + invalid func(K, V) + ) + + c.locked(func() { + var item *Entry + + // Check for item in cache + item, ok = c.Cache.Get(key) + if !ok { + return + } + + // Set old value. + oldV = item.Value.(V) + + // Remove from cache map + _ = c.Cache.Delete(key) + + // Free entry + putEntry(item) + + // Set hook func ptrs. + invalid = c.Invalid + }) + + if ok && invalid != nil { + // Pass to invalidate hook. + invalid(key, oldV) + } + + return +} + +// InvalidateAll: implements cache.Cache's InvalidateAll(). +func (c *Cache[K, V]) InvalidateAll(keys ...K) (ok bool) { + var ( + // deleted items. + items []*Entry + + // hook func ptrs. + invalid func(K, V) + ) + + // Allocate a slice for invalidated. + items = make([]*Entry, 0, len(keys)) + + c.locked(func() { + for x := range keys { + var item *Entry + + // Check for item in cache + item, ok = c.Cache.Get(keys[x]) + if !ok { + continue + } + + // Append this old value. + items = append(items, item) + + // Remove from cache map + _ = c.Cache.Delete(keys[x]) + } + + // Set hook func ptrs. + invalid = c.Invalid + }) + + if invalid != nil { + for x := range items { + // Pass to invalidate hook. + k := items[x].Key.(K) + v := items[x].Value.(V) + invalid(k, v) + + // Free this entry. + putEntry(items[x]) + } + } + + return +} + +// Clear: implements cache.Cache's Clear(). +func (c *Cache[K, V]) Clear() { c.Trim(100) } + +// Trim will truncate the cache to ensure it stays within given percentage of total capacity. +func (c *Cache[K, V]) Trim(perc float64) { + var ( + // deleted items + items []*Entry + + // hook func ptrs. + invalid func(K, V) + ) + + c.locked(func() { + // Calculate number of cache items to truncate. + max := (perc / 100) * float64(c.Cache.Cap()) + diff := c.Cache.Len() - int(max) + if diff <= 0 { + return + } + + // Set hook func ptr. + invalid = c.Invalid + + // Truncate by calculated length. + items = c.truncate(diff, invalid) + }) + + if invalid != nil { + for x := range items { + // Pass to invalidate hook. + k := items[x].Key.(K) + v := items[x].Value.(V) + invalid(k, v) + + // Free this entry. + putEntry(items[x]) + } + } +} + +// Len: implements cache.Cache's Len(). +func (c *Cache[K, V]) Len() (l int) { + c.locked(func() { l = c.Cache.Len() }) + return +} + +// Cap: implements cache.Cache's Cap(). +func (c *Cache[K, V]) Cap() (l int) { + c.locked(func() { l = c.Cache.Cap() }) + return +} + +// locked performs given function within mutex lock (NOTE: UNLOCK IS NOT DEFERRED). +func (c *Cache[K, V]) locked(fn func()) { + c.Lock() + fn() + c.Unlock() +} + +// truncate will truncate the cache by given size, returning deleted items. +func (c *Cache[K, V]) truncate(sz int, hook func(K, V)) []*Entry { + if hook == nil { + // No hook to execute, simply release all truncated entries. + c.Cache.Truncate(sz, func(_ K, item *Entry) { putEntry(item) }) + return nil + } + + // Allocate a slice for deleted. + deleted := make([]*Entry, 0, sz) + + // Truncate and catch all deleted k-v pairs. + c.Cache.Truncate(sz, func(_ K, item *Entry) { + deleted = append(deleted, item) + }) + + return deleted +} diff --git a/vendor/codeberg.org/gruf/go-cache/v3/simple/pool.go b/vendor/codeberg.org/gruf/go-cache/v3/simple/pool.go new file mode 100644 index 000000000..2fc99ab0f --- /dev/null +++ b/vendor/codeberg.org/gruf/go-cache/v3/simple/pool.go @@ -0,0 +1,23 @@ +package simple + +import "sync" + +// entryPool is a global pool for Entry +// objects, regardless of cache type. +var entryPool sync.Pool + +// getEntry fetches an Entry from pool, or allocates new. +func getEntry() *Entry { + v := entryPool.Get() + if v == nil { + return new(Entry) + } + return v.(*Entry) +} + +// putEntry replaces an Entry in the pool. +func putEntry(e *Entry) { + e.Key = nil + e.Value = nil + entryPool.Put(e) +} diff --git a/vendor/codeberg.org/gruf/go-cache/v3/ttl/ttl.go b/vendor/codeberg.org/gruf/go-cache/v3/ttl/ttl.go index af108e336..106d675c8 100644 --- a/vendor/codeberg.org/gruf/go-cache/v3/ttl/ttl.go +++ b/vendor/codeberg.org/gruf/go-cache/v3/ttl/ttl.go @@ -15,7 +15,7 @@ type Entry[Key comparable, Value any] struct { Expiry uint64 } -// Cache is the underlying Cache implementation, providing both the base Cache interface and unsafe access to underlying map to allow flexibility in building your own. +// Cache is the underlying TTLCache implementation, providing both the base Cache interface and unsafe access to underlying map to allow flexibility in building your own. type Cache[Key comparable, Value any] struct { // TTL is the cache item TTL. TTL time.Duration |