diff options
author | 2022-11-11 12:18:38 +0100 | |
---|---|---|
committer | 2022-11-11 12:18:38 +0100 | |
commit | edcee14d07bae129e2d1a06d99c30fc6f659ff5e (patch) | |
tree | 5b9d605654347fe104c55bf4b0e7fb1e1533e2a0 /vendor/codeberg.org/gruf/go-cache | |
parent | [feature] S3: add config flag to proxy S3 media (#1014) (diff) | |
download | gotosocial-edcee14d07bae129e2d1a06d99c30fc6f659ff5e.tar.xz |
[feature] Read + Write tombstones for deleted Actors (#1005)
* [feature] Read + Write tombstones for deleted Actors
* copyTombstone
* update to use resultcache instead of old ttl cache
Signed-off-by: kim <grufwub@gmail.com>
* update go-cache library to fix result cache capacity / ordering bugs
Signed-off-by: kim <grufwub@gmail.com>
* bump go-cache/v3 to v3.1.6 to fix bugs
Signed-off-by: kim <grufwub@gmail.com>
* switch on status code
* better explain ErrGone reasoning
Signed-off-by: kim <grufwub@gmail.com>
Co-authored-by: kim <grufwub@gmail.com>
Diffstat (limited to 'vendor/codeberg.org/gruf/go-cache')
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/LICENSE | 9 | ||||
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/result/cache.go | 341 | ||||
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/result/error.go | 22 | ||||
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/result/key.go | 184 | ||||
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/ttl/schedule.go | 20 | ||||
-rw-r--r-- | vendor/codeberg.org/gruf/go-cache/v3/ttl/ttl.go | 412 |
6 files changed, 988 insertions, 0 deletions
diff --git a/vendor/codeberg.org/gruf/go-cache/v3/LICENSE b/vendor/codeberg.org/gruf/go-cache/v3/LICENSE new file mode 100644 index 000000000..e4163ae35 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-cache/v3/LICENSE @@ -0,0 +1,9 @@ +MIT License + +Copyright (c) 2022 gruf + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/codeberg.org/gruf/go-cache/v3/result/cache.go b/vendor/codeberg.org/gruf/go-cache/v3/result/cache.go new file mode 100644 index 000000000..69f5593e3 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-cache/v3/result/cache.go @@ -0,0 +1,341 @@ +package result + +import ( + "reflect" + "time" + + "codeberg.org/gruf/go-cache/v3/ttl" +) + +// Cache ... +type Cache[Value any] struct { + cache ttl.Cache[int64, result[Value]] // underlying result cache + lookups structKeys // pre-determined struct lookups + copy func(Value) Value // copies a Value type + next int64 // update key counter +} + +// New returns a new initialized Cache, with given lookups and underlying value copy function. +func New[Value any](lookups []string, copy func(Value) Value) *Cache[Value] { + return NewSized(lookups, copy, 64) +} + +// NewSized returns a new initialized Cache, with given lookups, underlying value copy function and provided capacity. +func NewSized[Value any](lookups []string, copy func(Value) Value, cap int) *Cache[Value] { + var z Value + + // Determine generic type + t := reflect.TypeOf(z) + + // Iteratively deref pointer type + for t.Kind() == reflect.Pointer { + t = t.Elem() + } + + // Ensure that this is a struct type + if t.Kind() != reflect.Struct { + panic("generic parameter type must be struct (or ptr to)") + } + + // Allocate new cache object + c := &Cache[Value]{copy: copy} + c.lookups = make([]keyFields, len(lookups)) + + for i, lookup := range lookups { + // Generate keyed field info for lookup + c.lookups[i].pkeys = make(map[string]int64, cap) + c.lookups[i].lookup = lookup + c.lookups[i].populate(t) + } + + // Create and initialize underlying cache + c.cache.Init(0, cap, 0) + c.SetEvictionCallback(nil) + c.SetInvalidateCallback(nil) + return c +} + +// Start will start the cache background eviction routine with given sweep frequency. If already +// running or a freq <= 0 provided, this is a no-op. This will block until eviction routine started. +func (c *Cache[Value]) Start(freq time.Duration) bool { + return c.cache.Start(freq) +} + +// Stop will stop cache background eviction routine. If not running this +// is a no-op. This will block until the eviction routine has stopped. +func (c *Cache[Value]) Stop() bool { + return c.cache.Stop() +} + +// SetTTL sets the cache item TTL. Update can be specified to force updates of existing items +// in the cache, this will simply add the change in TTL to their current expiry time. +func (c *Cache[Value]) SetTTL(ttl time.Duration, update bool) { + c.cache.SetTTL(ttl, update) +} + +// SetEvictionCallback sets the eviction callback to the provided hook. +func (c *Cache[Value]) SetEvictionCallback(hook func(Value)) { + if hook == nil { + // Ensure non-nil hook. + hook = func(Value) {} + } + c.cache.SetEvictionCallback(func(item *ttl.Entry[int64, result[Value]]) { + for _, key := range item.Value.Keys { + // Delete key->pkey lookup + pkeys := key.fields.pkeys + delete(pkeys, key.value) + } + + if item.Value.Error != nil { + // Skip error hooks + return + } + + // Call user hook. + hook(item.Value.Value) + }) +} + +// SetInvalidateCallback sets the invalidate callback to the provided hook. +func (c *Cache[Value]) SetInvalidateCallback(hook func(Value)) { + if hook == nil { + // Ensure non-nil hook. + hook = func(Value) {} + } + c.cache.SetInvalidateCallback(func(item *ttl.Entry[int64, result[Value]]) { + for _, key := range item.Value.Keys { + if key.fields != nil { + // Delete key->pkey lookup + pkeys := key.fields.pkeys + delete(pkeys, key.value) + } + } + + if item.Value.Error != nil { + // Skip error hooks + return + } + + // Call user hook. + hook(item.Value.Value) + }) +} + +// Load ... +func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts ...any) (Value, error) { + var ( + zero Value + res result[Value] + ) + + // Get lookup map by name. + kfields := c.getFields(lookup) + lmap := kfields.pkeys + + // Generate cache key string. + ckey := genkey(keyParts...) + + // Acquire cache lock + c.cache.Lock() + + // Look for primary key + pkey, ok := lmap[ckey] + + if ok { + // Fetch the result for primary key + entry, _ := c.cache.Cache.Get(pkey) + res = entry.Value + } + + // Done with lock + c.cache.Unlock() + + if !ok { + // Generate new result from fresh load. + res.Value, res.Error = load() + + if res.Error != nil { + // This load returned an error, only + // store this item under provided key. + res.Keys = []cacheKey{{ + value: ckey, + fields: kfields, + }} + } else { + // This was a successful load, generate keys. + res.Keys = c.lookups.generate(res.Value) + } + + // Acquire cache lock. + c.cache.Lock() + defer c.cache.Unlock() + + // Attempt to cache this result. + if key, ok := c.storeResult(res); !ok { + return zero, ConflictError{key} + } + } + + // Catch and return error + if res.Error != nil { + return zero, res.Error + } + + // Return a copy of value from cache + return c.copy(res.Value), nil +} + +// Store ... +func (c *Cache[Value]) Store(value Value, store func() error) error { + // Attempt to store this value. + if err := store(); err != nil { + return err + } + + // Prepare cached result. + result := result[Value]{ + Keys: c.lookups.generate(value), + Value: c.copy(value), + Error: nil, + } + + // Acquire cache lock. + c.cache.Lock() + defer c.cache.Unlock() + + // Attempt to cache result, only return conflict + // error if the appropriate flag has been set. + if key, ok := c.storeResult(result); !ok { + return ConflictError{key} + } + + return nil +} + +// Has ... +func (c *Cache[Value]) Has(lookup string, keyParts ...any) bool { + var res result[Value] + + // Get lookup map by name. + kfields := c.getFields(lookup) + lmap := kfields.pkeys + + // Generate cache key string. + ckey := genkey(keyParts...) + + // Acquire cache lock + c.cache.Lock() + + // Look for primary key + pkey, ok := lmap[ckey] + + if ok { + // Fetch the result for primary key + entry, _ := c.cache.Cache.Get(pkey) + res = entry.Value + } + + // Done with lock + c.cache.Unlock() + + // Check for non-error result. + return ok && (res.Error == nil) +} + +// Invalidate ... +func (c *Cache[Value]) Invalidate(lookup string, keyParts ...any) { + // Get lookup map by name. + kfields := c.getFields(lookup) + lmap := kfields.pkeys + + // Generate cache key string. + ckey := genkey(keyParts...) + + // Look for primary key + c.cache.Lock() + pkey, ok := lmap[ckey] + c.cache.Unlock() + + if !ok { + return + } + + // Invalid by primary key + c.cache.Invalidate(pkey) +} + +// Clear empties the cache, calling the invalidate callback. +func (c *Cache[Value]) Clear() { + c.cache.Clear() +} + +// Len ... +func (c *Cache[Value]) Len() int { + return c.cache.Cache.Len() +} + +// Cap ... +func (c *Cache[Value]) Cap() int { + return c.cache.Cache.Cap() +} + +func (c *Cache[Value]) getFields(name string) *keyFields { + for _, k := range c.lookups { + // Find key fields with name + if k.lookup == name { + return &k + } + } + panic("invalid lookup: " + name) +} + +func (c *Cache[Value]) storeResult(res result[Value]) (string, bool) { + for _, key := range res.Keys { + pkeys := key.fields.pkeys + + // Look for cache primary key + pkey, ok := pkeys[key.value] + + if ok { + // Look for overlap with non error keys, + // as an overlap for some but not all keys + // could produce inconsistent results. + entry, _ := c.cache.Cache.Get(pkey) + if entry.Value.Error == nil { + return key.value, false + } + } + } + + // Get primary key + pkey := c.next + c.next++ + + // Store all primary key lookups + for _, key := range res.Keys { + pkeys := key.fields.pkeys + pkeys[key.value] = pkey + } + + // Store main entry under primary key, using evict hook if needed + c.cache.Cache.SetWithHook(pkey, &ttl.Entry[int64, result[Value]]{ + Expiry: time.Now().Add(c.cache.TTL), + Key: pkey, + Value: res, + }, func(_ int64, item *ttl.Entry[int64, result[Value]]) { + c.cache.Evict(item) + }) + + return "", true +} + +type result[Value any] struct { + // keys accessible under + Keys []cacheKey + + // cached value + Value Value + + // cached error + Error error +} diff --git a/vendor/codeberg.org/gruf/go-cache/v3/result/error.go b/vendor/codeberg.org/gruf/go-cache/v3/result/error.go new file mode 100644 index 000000000..fa26083bf --- /dev/null +++ b/vendor/codeberg.org/gruf/go-cache/v3/result/error.go @@ -0,0 +1,22 @@ +package result + +import "errors" + +// ErrUnkownLookup ... +var ErrUnknownLookup = errors.New("unknown lookup identifier") + +// IsConflictErr returns whether error is due to key conflict. +func IsConflictErr(err error) bool { + _, ok := err.(ConflictError) + return ok +} + +// ConflictError is returned on cache key conflict. +type ConflictError struct { + Key string +} + +// Error returns the message for this key conflict error. +func (c ConflictError) Error() string { + return "cache conflict for key \"" + c.Key + "\"" +} diff --git a/vendor/codeberg.org/gruf/go-cache/v3/result/key.go b/vendor/codeberg.org/gruf/go-cache/v3/result/key.go new file mode 100644 index 000000000..ec58e0ef9 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-cache/v3/result/key.go @@ -0,0 +1,184 @@ +package result + +import ( + "reflect" + "strings" + "sync" + "unicode" + "unicode/utf8" + + "codeberg.org/gruf/go-byteutil" + "codeberg.org/gruf/go-mangler" +) + +// structKeys provides convience methods for a list +// of struct field combinations used for cache keys. +type structKeys []keyFields + +// get fetches the key-fields for given lookup (else, panics). +func (sk structKeys) get(lookup string) *keyFields { + for i := range sk { + if sk[i].lookup == lookup { + return &sk[i] + } + } + panic("unknown lookup: \"" + lookup + "\"") +} + +// generate will calculate the value string for each required +// cache key as laid-out by the receiving structKeys{}. +func (sk structKeys) generate(a any) []cacheKey { + // Get reflected value in order + // to access the struct fields + v := reflect.ValueOf(a) + + // Iteratively deref pointer value + for v.Kind() == reflect.Pointer { + if v.IsNil() { + panic("nil ptr") + } + v = v.Elem() + } + + // Preallocate expected slice of keys + keys := make([]cacheKey, len(sk)) + + // Acquire byte buffer + buf := bufpool.Get().(*byteutil.Buffer) + defer bufpool.Put(buf) + + for i := range sk { + // Reset buffer + buf.B = buf.B[:0] + + // Set the key-fields reference + keys[i].fields = &sk[i] + + // Calculate cache-key value + keys[i].populate(buf, v) + } + + return keys +} + +// cacheKey represents an actual cache key. +type cacheKey struct { + // value is the actual string representing + // this cache key for hashmap lookups. + value string + + // fieldsRO is a read-only slice (i.e. we should + // NOT be modifying them, only using for reference) + // of struct fields encapsulated by this cache key. + fields *keyFields +} + +// populate will calculate the cache key's value string for given +// value's reflected information. Passed encoder is for string building. +func (k *cacheKey) populate(buf *byteutil.Buffer, v reflect.Value) { + // Append each field value to buffer. + for _, idx := range k.fields.fields { + fv := v.Field(idx) + fi := fv.Interface() + buf.B = mangler.Append(buf.B, fi) + buf.B = append(buf.B, '.') + } + + // Drop last '.' + buf.Truncate(1) + + // Create string copy from buf + k.value = string(buf.B) +} + +// keyFields represents a list of struct fields +// encompassed in a single cache key, the string name +// of the lookup, and the lookup map to primary keys. +type keyFields struct { + // lookup is the calculated (well, provided) + // cache key lookup, consisting of dot sep'd + // struct field names. + lookup string + + // fields is a slice of runtime struct field + // indices, of the fields encompassed by this key. + fields []int + + // pkeys is a lookup of stored struct key values + // to the primary cache lookup key (int64). + pkeys map[string]int64 +} + +// populate will populate this keyFields{} object's .fields member by determining +// the field names from the given lookup, and querying given reflected type to get +// the runtime field indices for each of the fields. this speeds-up future value lookups. +func (kf *keyFields) populate(t reflect.Type) { + // Split dot-separated lookup to get + // the individual struct field names + names := strings.Split(kf.lookup, ".") + if len(names) == 0 { + panic("no key fields specified") + } + + // Pre-allocate slice of expected length + kf.fields = make([]int, len(names)) + + for i, name := range names { + // Get field info for given name + ft, ok := t.FieldByName(name) + if !ok { + panic("no field found for name: \"" + name + "\"") + } + + // Check field is usable + if !isExported(name) { + panic("field must be exported") + } + + // Set the runtime field index + kf.fields[i] = ft.Index[0] + } +} + +// genkey generates a cache key for given key values. +func genkey(parts ...any) string { + if len(parts) < 1 { + // Panic to prevent annoying usecase + // where user forgets to pass lookup + // and instead only passes a key part, + // e.g. cache.Get("key") + // which then always returns false. + panic("no key parts provided") + } + + // Acquire buffer and reset + buf := bufpool.Get().(*byteutil.Buffer) + defer bufpool.Put(buf) + buf.Reset() + + // Encode each key part + for _, part := range parts { + buf.B = mangler.Append(buf.B, part) + buf.B = append(buf.B, '.') + } + + // Drop last '.' + buf.Truncate(1) + + // Return string copy + return string(buf.B) +} + +// isExported checks whether function name is exported. +func isExported(fnName string) bool { + r, _ := utf8.DecodeRuneInString(fnName) + return unicode.IsUpper(r) +} + +// bufpool provides a memory pool of byte +// buffers use when encoding key types. +var bufpool = sync.Pool{ + New: func() any { + return &byteutil.Buffer{B: make([]byte, 0, 512)} + }, +} diff --git a/vendor/codeberg.org/gruf/go-cache/v3/ttl/schedule.go b/vendor/codeberg.org/gruf/go-cache/v3/ttl/schedule.go new file mode 100644 index 000000000..111de0757 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-cache/v3/ttl/schedule.go @@ -0,0 +1,20 @@ +package ttl + +import ( + "time" + + "codeberg.org/gruf/go-sched" +) + +// scheduler is the global cache runtime scheduler +// for handling regular cache evictions. +var scheduler sched.Scheduler + +// schedule will given sweep routine to the global scheduler, and start global scheduler. +func schedule(sweep func(time.Time), freq time.Duration) func() { + if !scheduler.Running() { + // ensure running + _ = scheduler.Start() + } + return scheduler.Schedule(sched.NewJob(sweep).Every(freq)) +} diff --git a/vendor/codeberg.org/gruf/go-cache/v3/ttl/ttl.go b/vendor/codeberg.org/gruf/go-cache/v3/ttl/ttl.go new file mode 100644 index 000000000..f830ed3d2 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-cache/v3/ttl/ttl.go @@ -0,0 +1,412 @@ +package ttl + +import ( + "sync" + "time" + + "codeberg.org/gruf/go-maps" +) + +// Entry represents an item in the cache, with it's currently calculated Expiry time. +type Entry[Key comparable, Value any] struct { + Key Key + Value Value + Expiry time.Time +} + +// Cache is the underlying Cache implementation, providing both the base Cache interface and unsafe access to underlying map to allow flexibility in building your own. +type Cache[Key comparable, Value any] struct { + // TTL is the cache item TTL. + TTL time.Duration + + // Evict is the hook that is called when an item is evicted from the cache, includes manual delete. + Evict func(*Entry[Key, Value]) + + // Invalid is the hook that is called when an item's data in the cache is invalidated. + Invalid func(*Entry[Key, Value]) + + // Cache is the underlying hashmap used for this cache. + Cache maps.LRUMap[Key, *Entry[Key, Value]] + + // stop is the eviction routine cancel func. + stop func() + + // pool is a memory pool of entry objects. + pool []*Entry[Key, Value] + + // Embedded mutex. + sync.Mutex +} + +// New returns a new initialized Cache with given initial length, maximum capacity and item TTL. +func New[K comparable, V any](len, cap int, ttl time.Duration) *Cache[K, V] { + c := new(Cache[K, V]) + c.Init(len, cap, ttl) + return c +} + +// Init will initialize this cache with given initial length, maximum capacity and item TTL. +func (c *Cache[K, V]) Init(len, cap int, ttl time.Duration) { + if ttl <= 0 { + // Default duration + ttl = time.Second * 5 + } + c.TTL = ttl + c.SetEvictionCallback(nil) + c.SetInvalidateCallback(nil) + c.Cache.Init(len, cap) +} + +// Start: implements cache.Cache's Start(). +func (c *Cache[K, V]) Start(freq time.Duration) (ok bool) { + // Nothing to start + if freq <= 0 { + return false + } + + // Safely start + c.Lock() + + if ok = c.stop == nil; ok { + // Not yet running, schedule us + c.stop = schedule(c.Sweep, freq) + } + + // Done with lock + c.Unlock() + + return +} + +// Stop: implements cache.Cache's Stop(). +func (c *Cache[K, V]) Stop() (ok bool) { + // Safely stop + c.Lock() + + if ok = c.stop != nil; ok { + // We're running, cancel evicts + c.stop() + c.stop = nil + } + + // Done with lock + c.Unlock() + + return +} + +// Sweep attempts to evict expired items (with callback!) from cache. +func (c *Cache[K, V]) Sweep(now time.Time) { + var after int + + // Sweep within lock + c.Lock() + defer c.Unlock() + + // Sentinel value + after = -1 + + // The cache will be ordered by expiry date, we iterate until we reach the index of + // the youngest item that hsa expired, as all succeeding items will also be expired. + c.Cache.RangeIf(0, c.Cache.Len(), func(i int, _ K, item *Entry[K, V]) bool { + if now.After(item.Expiry) { + after = i + + // All older than this (including) can be dropped + return false + } + + // Continue looping + return true + }) + + if after == -1 { + // No Truncation needed + return + } + + // Truncate items, calling eviction hook + c.truncate(c.Cache.Len()-after, c.Evict) +} + +// SetEvictionCallback: implements cache.Cache's SetEvictionCallback(). +func (c *Cache[K, V]) SetEvictionCallback(hook func(*Entry[K, V])) { + // Ensure non-nil hook + if hook == nil { + hook = func(*Entry[K, V]) {} + } + + // Update within lock + c.Lock() + defer c.Unlock() + + // Update hook + c.Evict = hook +} + +// SetInvalidateCallback: implements cache.Cache's SetInvalidateCallback(). +func (c *Cache[K, V]) SetInvalidateCallback(hook func(*Entry[K, V])) { + // Ensure non-nil hook + if hook == nil { + hook = func(*Entry[K, V]) {} + } + + // Update within lock + c.Lock() + defer c.Unlock() + + // Update hook + c.Invalid = hook +} + +// SetTTL: implements cache.Cache's SetTTL(). +func (c *Cache[K, V]) SetTTL(ttl time.Duration, update bool) { + if ttl < 0 { + panic("ttl must be greater than zero") + } + + // Update within lock + c.Lock() + defer c.Unlock() + + // Set updated TTL + diff := ttl - c.TTL + c.TTL = ttl + + if update { + // Update existing cache entries with new expiry time + c.Cache.Range(0, c.Cache.Len(), func(i int, key K, item *Entry[K, V]) { + item.Expiry = item.Expiry.Add(diff) + }) + } +} + +// Get: implements cache.Cache's Get(). +func (c *Cache[K, V]) Get(key K) (V, bool) { + // Read within lock + c.Lock() + defer c.Unlock() + + // Check for item in cache + item, ok := c.Cache.Get(key) + if !ok { + var value V + return value, false + } + + // Update item expiry and return + item.Expiry = time.Now().Add(c.TTL) + return item.Value, true +} + +// Add: implements cache.Cache's Add(). +func (c *Cache[K, V]) Add(key K, value V) bool { + // Write within lock + c.Lock() + defer c.Unlock() + + // If already cached, return + if c.Cache.Has(key) { + return false + } + + // Alloc new item + item := c.alloc() + item.Key = key + item.Value = value + item.Expiry = time.Now().Add(c.TTL) + + var hook func(K, *Entry[K, V]) + + if c.Evict != nil { + // Pass evicted entry to user hook + hook = func(_ K, item *Entry[K, V]) { + c.Evict(item) + } + } + + // Place new item in the map with hook + c.Cache.SetWithHook(key, item, hook) + + return true +} + +// Set: implements cache.Cache's Set(). +func (c *Cache[K, V]) Set(key K, value V) { + // Write within lock + c.Lock() + defer c.Unlock() + + // Check if already exists + item, ok := c.Cache.Get(key) + + if ok { + if c.Invalid != nil { + // Invalidate existing + c.Invalid(item) + } + } else { + // Allocate new item + item = c.alloc() + item.Key = key + c.Cache.Set(key, item) + } + + // Update the item value + expiry + item.Expiry = time.Now().Add(c.TTL) + item.Value = value +} + +// CAS: implements cache.Cache's CAS(). +func (c *Cache[K, V]) CAS(key K, old V, new V, cmp func(V, V) bool) bool { + // CAS within lock + c.Lock() + defer c.Unlock() + + // Check for item in cache + item, ok := c.Cache.Get(key) + if !ok || !cmp(item.Value, old) { + return false + } + + if c.Invalid != nil { + // Invalidate item + c.Invalid(item) + } + + // Update item + Expiry + item.Value = new + item.Expiry = time.Now().Add(c.TTL) + + return ok +} + +// Swap: implements cache.Cache's Swap(). +func (c *Cache[K, V]) Swap(key K, swp V) V { + // Swap within lock + c.Lock() + defer c.Unlock() + + // Check for item in cache + item, ok := c.Cache.Get(key) + if !ok { + var value V + return value + } + + if c.Invalid != nil { + // invalidate old + c.Invalid(item) + } + + old := item.Value + + // update item + Expiry + item.Value = swp + item.Expiry = time.Now().Add(c.TTL) + + return old +} + +// Has: implements cache.Cache's Has(). +func (c *Cache[K, V]) Has(key K) bool { + c.Lock() + ok := c.Cache.Has(key) + c.Unlock() + return ok +} + +// Invalidate: implements cache.Cache's Invalidate(). +func (c *Cache[K, V]) Invalidate(key K) bool { + // Delete within lock + c.Lock() + defer c.Unlock() + + // Check if we have item with key + item, ok := c.Cache.Get(key) + if !ok { + return false + } + + // Remove from cache map + _ = c.Cache.Delete(key) + + if c.Invalid != nil { + // Invalidate item + c.Invalid(item) + } + + // Return item to pool + c.free(item) + + return true +} + +// Clear: implements cache.Cache's Clear(). +func (c *Cache[K, V]) Clear() { + c.Lock() + defer c.Unlock() + c.truncate(c.Cache.Len(), c.Invalid) +} + +// Len: implements cache.Cache's Len(). +func (c *Cache[K, V]) Len() int { + c.Lock() + l := c.Cache.Len() + c.Unlock() + return l +} + +// Cap: implements cache.Cache's Cap(). +func (c *Cache[K, V]) Cap() int { + c.Lock() + l := c.Cache.Cap() + c.Unlock() + return l +} + +// truncate will call Cache.Truncate(sz), and if provided a hook will temporarily store deleted items before passing them to the hook. This is required in order to prevent cache writes during .Truncate(). +func (c *Cache[K, V]) truncate(sz int, hook func(*Entry[K, V])) { + if hook == nil { + // No hook was provided, we can simply truncate and free items immediately. + c.Cache.Truncate(sz, func(_ K, item *Entry[K, V]) { c.free(item) }) + return + } + + // Store list of deleted items for later callbacks + deleted := make([]*Entry[K, V], 0, sz) + + // Truncate and store list of deleted items + c.Cache.Truncate(sz, func(_ K, item *Entry[K, V]) { + deleted = append(deleted, item) + }) + + // Pass each deleted to hook, then free + for _, item := range deleted { + hook(item) + c.free(item) + } +} + +// alloc will acquire cache entry from pool, or allocate new. +func (c *Cache[K, V]) alloc() *Entry[K, V] { + if len(c.pool) == 0 { + return &Entry[K, V]{} + } + idx := len(c.pool) - 1 + e := c.pool[idx] + c.pool = c.pool[:idx] + return e +} + +// free will reset entry fields and place back in pool. +func (c *Cache[K, V]) free(e *Entry[K, V]) { + var ( + zk K + zv V + ) + e.Key = zk + e.Value = zv + e.Expiry = time.Time{} + c.pool = append(c.pool, e) +} |