diff options
| author | 2025-04-26 09:56:15 +0000 | |
|---|---|---|
| committer | 2025-04-26 09:56:15 +0000 | |
| commit | 6a6a4993338262f87df34c9be051bfaac75c1829 (patch) | |
| tree | bfbda090dc4b25efdd34145c016d7cc7b9c14d6e /internal/cache/wrappers.go | |
| parent | [chore] Move deps to code.superseriousbusiness.org (#4054) (diff) | |
| download | gotosocial-6a6a4993338262f87df34c9be051bfaac75c1829.tar.xz | |
[performance] rewrite timelines to rely on new timeline cache type (#3941)
* start work rewriting timeline cache type
* further work rewriting timeline caching
* more work integration new timeline code
* remove old code
* add local timeline, fix up merge conflicts
* remove old use of go-bytes
* implement new timeline code into more areas of codebase, pull in latest go-mangler, go-mutexes, go-structr
* remove old timeline package, add local timeline cache
* remove references to old timeline types that needed starting up in tests
* start adding page validation
* fix test-identified timeline cache package issues
* fix up more tests, fix missing required changes, etc
* add exclusion for test.out in gitignore
* clarify some things better in code comments
* tweak cache size limits
* fix list timeline cache fetching
* further list timeline fixes
* linter, ssssssssshhhhhhhhhhhh please
* fix linter hints
* reslice the output if it's beyond length of 'lim'
* remove old timeline initialization code, bump go-structr to v0.9.4
* continued from previous commit
* improved code comments
* don't allow multiple entries for BoostOfID values to prevent repeated boosts of same boosts
* finish writing more code comments
* some variable renaming, for ease of following
* change the way we update lo,hi paging values during timeline load
* improved code comments for updated / returned lo , hi paging values
* finish writing code comments for the StatusTimeline{} type itself
* fill in more code comments
* update go-structr version to latest with changed timeline unique indexing logic
* have a local and public timeline *per user*
* rewrite calls to public / local timeline calls
* remove the zero length check, as lo, hi values might still be set
* simplify timeline cache loading, fix lo/hi returns, fix timeline invalidation side-effects missing for some federated actions
* swap the lo, hi values :facepalm:
* add (now) missing slice reverse of tag timeline statuses when paging ASC
* remove local / public caches (is out of scope for this work), share more timeline code
* remove unnecessary change
* again, remove more unused code
* remove unused function to appease the linter
* move boost checking to prepare function
* fix use of timeline.lastOrder, fix incorrect range functions used
* remove comments for repeat code
* remove the boost logic from prepare function
* do a maximum of 5 loads, not 10
* add repeat boost filtering logic, update go-structr, general improvements
* more code comments
* add important note
* fix timeline tests now that timelines are returned in page order
* remove unused field
* add StatusTimeline{} tests
* add more status timeline tests
* start adding preloading support
* ensure repeat boosts are marked in preloaded entries
* share a bunch of the database load code in timeline cache, don't clear timelines on relationship change
* add logic to allow dynamic clear / preloading of timelines
* comment-out unused functions, but leave in place as we might end-up using them
* fix timeline preload state check
* much improved status timeline code comments
* more code comments, don't bother inserting statuses if timeline not preloaded
* shift around some logic to make sure things aren't accidentally left set
* finish writing code comments
* remove trim-after-insert behaviour
* fix-up some comments referring to old logic
* remove unsetting of lo, hi
* fix preload repeatBoost checking logic
* don't return on status filter errors, these are usually transient
* better concurrency safety in Clear() and Done()
* fix test broken due to addition of preloader
* fix repeatBoost logic that doesn't account for already-hidden repeatBoosts
* ensure edit submodels are dropped on cache insertion
* update code-comment to expand CAS accronym
* use a plus1hULID() instead of 24h
* remove unused functions
* add note that public / local timeline requester can be nil
* fix incorrect visibility filtering of tag timeline statuses
* ensure we filter home timeline statuses on local only
* some small re-orderings to confirm query params in correct places
* fix the local only home timeline filter func
Diffstat (limited to 'internal/cache/wrappers.go')
| -rw-r--r-- | internal/cache/wrappers.go | 84 |
1 files changed, 17 insertions, 67 deletions
diff --git a/internal/cache/wrappers.go b/internal/cache/wrappers.go index 9cb4fca98..34d7cb8db 100644 --- a/internal/cache/wrappers.go +++ b/internal/cache/wrappers.go @@ -27,19 +27,19 @@ import ( // SliceCache wraps a simple.Cache to provide simple loader-callback // functions for fetching + caching slices of objects (e.g. IDs). type SliceCache[T any] struct { - cache simple.Cache[string, []T] + simple.Cache[string, []T] } // Init initializes the cache with given length + capacity. func (c *SliceCache[T]) Init(len, cap int) { - c.cache = simple.Cache[string, []T]{} - c.cache.Init(len, cap) + c.Cache = simple.Cache[string, []T]{} + c.Cache.Init(len, cap) } // Load will attempt to load an existing slice from cache for key, else calling load function and caching the result. func (c *SliceCache[T]) Load(key string, load func() ([]T, error)) ([]T, error) { // Look for cached values. - data, ok := c.cache.Get(key) + data, ok := c.Cache.Get(key) if !ok { var err error @@ -51,7 +51,7 @@ func (c *SliceCache[T]) Load(key string, load func() ([]T, error)) ([]T, error) } // Store the data. - c.cache.Set(key, data) + c.Cache.Set(key, data) } // Return data clone for safety. @@ -60,27 +60,7 @@ func (c *SliceCache[T]) Load(key string, load func() ([]T, error)) ([]T, error) // Invalidate: see simple.Cache{}.InvalidateAll(). func (c *SliceCache[T]) Invalidate(keys ...string) { - _ = c.cache.InvalidateAll(keys...) -} - -// Trim: see simple.Cache{}.Trim(). -func (c *SliceCache[T]) Trim(perc float64) { - c.cache.Trim(perc) -} - -// Clear: see simple.Cache{}.Clear(). -func (c *SliceCache[T]) Clear() { - c.cache.Clear() -} - -// Len: see simple.Cache{}.Len(). -func (c *SliceCache[T]) Len() int { - return c.cache.Len() -} - -// Cap: see simple.Cache{}.Cap(). -func (c *SliceCache[T]) Cap() int { - return c.cache.Cap() + _ = c.Cache.InvalidateAll(keys...) } // StructCache wraps a structr.Cache{} to simple index caching @@ -89,17 +69,17 @@ func (c *SliceCache[T]) Cap() int { // name under the main database caches struct which would reduce // time required to access cached values). type StructCache[StructType any] struct { - cache structr.Cache[StructType] + structr.Cache[StructType] index map[string]*structr.Index } // Init initializes the cache with given structr.CacheConfig{}. func (c *StructCache[T]) Init(config structr.CacheConfig[T]) { c.index = make(map[string]*structr.Index, len(config.Indices)) - c.cache = structr.Cache[T]{} - c.cache.Init(config) + c.Cache = structr.Cache[T]{} + c.Cache.Init(config) for _, cfg := range config.Indices { - c.index[cfg.Fields] = c.cache.Index(cfg.Fields) + c.index[cfg.Fields] = c.Cache.Index(cfg.Fields) } } @@ -107,26 +87,21 @@ func (c *StructCache[T]) Init(config structr.CacheConfig[T]) { // Note: this also handles conversion of the untyped (any) keys to structr.Key{} via structr.Index{}. func (c *StructCache[T]) GetOne(index string, key ...any) (T, bool) { i := c.index[index] - return c.cache.GetOne(i, i.Key(key...)) + return c.Cache.GetOne(i, i.Key(key...)) } // Get calls structr.Cache{}.Get(), using a cached structr.Index{} by 'index' name. // Note: this also handles conversion of the untyped (any) keys to structr.Key{} via structr.Index{}. func (c *StructCache[T]) Get(index string, keys ...[]any) []T { i := c.index[index] - return c.cache.Get(i, i.Keys(keys...)...) -} - -// Put: see structr.Cache{}.Put(). -func (c *StructCache[T]) Put(values ...T) { - c.cache.Put(values...) + return c.Cache.Get(i, i.Keys(keys...)...) } // LoadOne calls structr.Cache{}.LoadOne(), using a cached structr.Index{} by 'index' name. // Note: this also handles conversion of the untyped (any) keys to structr.Key{} via structr.Index{}. func (c *StructCache[T]) LoadOne(index string, load func() (T, error), key ...any) (T, error) { i := c.index[index] - return c.cache.LoadOne(i, i.Key(key...), load) + return c.Cache.LoadOne(i, i.Key(key...), load) } // LoadIDs calls structr.Cache{}.Load(), using a cached structr.Index{} by 'index' name. Note: this also handles @@ -149,7 +124,7 @@ func (c *StructCache[T]) LoadIDs(index string, ids []string, load func([]string) } // Pass loader callback with wrapper onto main cache load function. - return c.cache.Load(i, keys, func(uncached []structr.Key) ([]T, error) { + return c.Cache.Load(i, keys, func(uncached []structr.Key) ([]T, error) { uncachedIDs := make([]string, len(uncached)) for i := range uncached { uncachedIDs[i] = uncached[i].Values()[0].(string) @@ -177,7 +152,7 @@ func (c *StructCache[T]) LoadIDs2Part(index string, id1 string, id2s []string, l } // Pass loader callback with wrapper onto main cache load function. - return c.cache.Load(i, keys, func(uncached []structr.Key) ([]T, error) { + return c.Cache.Load(i, keys, func(uncached []structr.Key) ([]T, error) { uncachedIDs := make([]string, len(uncached)) for i := range uncached { uncachedIDs[i] = uncached[i].Values()[1].(string) @@ -186,16 +161,11 @@ func (c *StructCache[T]) LoadIDs2Part(index string, id1 string, id2s []string, l }) } -// Store: see structr.Cache{}.Store(). -func (c *StructCache[T]) Store(value T, store func() error) error { - return c.cache.Store(value, store) -} - // Invalidate calls structr.Cache{}.Invalidate(), using a cached structr.Index{} by 'index' name. // Note: this also handles conversion of the untyped (any) keys to structr.Key{} via structr.Index{}. func (c *StructCache[T]) Invalidate(index string, key ...any) { i := c.index[index] - c.cache.Invalidate(i, i.Key(key...)) + c.Cache.Invalidate(i, i.Key(key...)) } // InvalidateIDs calls structr.Cache{}.Invalidate(), using a cached structr.Index{} by 'index' name. Note: this also @@ -218,25 +188,5 @@ func (c *StructCache[T]) InvalidateIDs(index string, ids []string) { } // Pass to main invalidate func. - c.cache.Invalidate(i, keys...) -} - -// Trim: see structr.Cache{}.Trim(). -func (c *StructCache[T]) Trim(perc float64) { - c.cache.Trim(perc) -} - -// Clear: see structr.Cache{}.Clear(). -func (c *StructCache[T]) Clear() { - c.cache.Clear() -} - -// Len: see structr.Cache{}.Len(). -func (c *StructCache[T]) Len() int { - return c.cache.Len() -} - -// Cap: see structr.Cache{}.Cap(). -func (c *StructCache[T]) Cap() int { - return c.cache.Cap() + c.Cache.Invalidate(i, keys...) } |
