summaryrefslogtreecommitdiff
path: root/internal/cache
diff options
context:
space:
mode:
authorLibravatar kim <89579420+NyaaaWhatsUpDoc@users.noreply.github.com>2023-08-03 10:34:35 +0100
committerLibravatar GitHub <noreply@github.com>2023-08-03 11:34:35 +0200
commit00adf18c2470a69c255ea75990bbbae6e57eea89 (patch)
treed65408d4860b39f22f0aa853d25f57a37c65ee5c /internal/cache
parent[bugfix] Rework MultiError to wrap + unwrap errors properly (#2057) (diff)
downloadgotosocial-00adf18c2470a69c255ea75990bbbae6e57eea89.tar.xz
[feature] simpler cache size configuration (#2051)
* add automatic cache max size generation based on ratios of a singular fixed memory target Signed-off-by: kim <grufwub@gmail.com> * remove now-unused cache max-size config variables Signed-off-by: kim <grufwub@gmail.com> * slight ratio tweak Signed-off-by: kim <grufwub@gmail.com> * remove unused visibility config var Signed-off-by: kim <grufwub@gmail.com> * add secret little ratio config trick Signed-off-by: kim <grufwub@gmail.com> * fixed a word Signed-off-by: kim <grufwub@gmail.com> * update cache library to remove use of TTL in result caches + slice cache Signed-off-by: kim <grufwub@gmail.com> * update other cache usages to use correct interface Signed-off-by: kim <grufwub@gmail.com> * update example config to explain the cache memory target Signed-off-by: kim <grufwub@gmail.com> * update env parsing test with new config values Signed-off-by: kim <grufwub@gmail.com> * do some ratio twiddling Signed-off-by: kim <grufwub@gmail.com> * add missing header * update envparsing with latest defaults Signed-off-by: kim <grufwub@gmail.com> * update size calculations to take into account result cache, simple cache and extra map overheads Signed-off-by: kim <grufwub@gmail.com> * tweak the ratios some more Signed-off-by: kim <grufwub@gmail.com> * more nan rampaging Signed-off-by: kim <grufwub@gmail.com> * fix envparsing script Signed-off-by: kim <grufwub@gmail.com> * update cache library, add sweep function to keep caches trim Signed-off-by: kim <grufwub@gmail.com> * sweep caches once a minute Signed-off-by: kim <grufwub@gmail.com> * add a regular job to sweep caches and keep under 80% utilisation Signed-off-by: kim <grufwub@gmail.com> * remove dead code Signed-off-by: kim <grufwub@gmail.com> * add new size library used to libraries section of readme Signed-off-by: kim <grufwub@gmail.com> * add better explanations for the mem-ratio numbers Signed-off-by: kim <grufwub@gmail.com> * update go-cache Signed-off-by: kim <grufwub@gmail.com> * library version bump Signed-off-by: kim <grufwub@gmail.com> * update cache.result{} size model estimation Signed-off-by: kim <grufwub@gmail.com> --------- Signed-off-by: kim <grufwub@gmail.com>
Diffstat (limited to 'internal/cache')
-rw-r--r--internal/cache/cache.go33
-rw-r--r--internal/cache/gts.go389
-rw-r--r--internal/cache/size.go501
-rw-r--r--internal/cache/slice.go4
-rw-r--r--internal/cache/util.go22
-rw-r--r--internal/cache/visibility.go15
6 files changed, 795 insertions, 169 deletions
diff --git a/internal/cache/cache.go b/internal/cache/cache.go
index e97dce6f9..cb5503a84 100644
--- a/internal/cache/cache.go
+++ b/internal/cache/cache.go
@@ -204,3 +204,36 @@ func (c *Caches) setuphooks() {
c.Visibility.Invalidate("RequesterID", user.AccountID)
})
}
+
+// Sweep will sweep all the available caches to ensure none
+// are above threshold percent full to their total capacity.
+//
+// This helps with cache performance, as a full cache will
+// require an eviction on every single write, which adds
+// significant overhead to all cache writes.
+func (c *Caches) Sweep(threshold float64) {
+ c.GTS.Account().Trim(threshold)
+ c.GTS.AccountNote().Trim(threshold)
+ c.GTS.Block().Trim(threshold)
+ c.GTS.BlockIDs().Trim(threshold)
+ c.GTS.Emoji().Trim(threshold)
+ c.GTS.EmojiCategory().Trim(threshold)
+ c.GTS.Follow().Trim(threshold)
+ c.GTS.FollowIDs().Trim(threshold)
+ c.GTS.FollowRequest().Trim(threshold)
+ c.GTS.FollowRequestIDs().Trim(threshold)
+ c.GTS.Instance().Trim(threshold)
+ c.GTS.List().Trim(threshold)
+ c.GTS.ListEntry().Trim(threshold)
+ c.GTS.Marker().Trim(threshold)
+ c.GTS.Media().Trim(threshold)
+ c.GTS.Mention().Trim(threshold)
+ c.GTS.Notification().Trim(threshold)
+ c.GTS.Report().Trim(threshold)
+ c.GTS.Status().Trim(threshold)
+ c.GTS.StatusFave().Trim(threshold)
+ c.GTS.Tag().Trim(threshold)
+ c.GTS.Tombstone().Trim(threshold)
+ c.GTS.User().Trim(threshold)
+ c.Visibility.Trim(threshold)
+}
diff --git a/internal/cache/gts.go b/internal/cache/gts.go
index 6014d13d4..3f54d5c52 100644
--- a/internal/cache/gts.go
+++ b/internal/cache/gts.go
@@ -18,11 +18,15 @@
package cache
import (
+ "time"
+
"codeberg.org/gruf/go-cache/v3/result"
+ "codeberg.org/gruf/go-cache/v3/simple"
"codeberg.org/gruf/go-cache/v3/ttl"
"github.com/superseriousbusiness/gotosocial/internal/cache/domain"
"github.com/superseriousbusiness/gotosocial/internal/config"
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
+ "github.com/superseriousbusiness/gotosocial/internal/log"
)
type GTSCaches struct {
@@ -52,7 +56,7 @@ type GTSCaches struct {
user *result.Cache[*gtsmodel.User]
// TODO: move out of GTS caches since unrelated to DB.
- webfinger *ttl.Cache[string, string]
+ webfinger *ttl.Cache[string, string] // TTL=24hr, sweep=5min
}
// Init will initialize all the gtsmodel caches in this collection.
@@ -87,98 +91,14 @@ func (c *GTSCaches) Init() {
// Start will attempt to start all of the gtsmodel caches, or panic.
func (c *GTSCaches) Start() {
- tryStart(c.account, config.GetCacheGTSAccountSweepFreq())
- tryStart(c.accountNote, config.GetCacheGTSAccountNoteSweepFreq())
- tryStart(c.block, config.GetCacheGTSBlockSweepFreq())
- tryUntil("starting block IDs cache", 5, func() bool {
- if sweep := config.GetCacheGTSBlockIDsSweepFreq(); sweep > 0 {
- return c.blockIDs.Start(sweep)
- }
- return true
- })
- tryStart(c.emoji, config.GetCacheGTSEmojiSweepFreq())
- tryStart(c.emojiCategory, config.GetCacheGTSEmojiCategorySweepFreq())
- tryStart(c.follow, config.GetCacheGTSFollowSweepFreq())
- tryUntil("starting follow IDs cache", 5, func() bool {
- if sweep := config.GetCacheGTSFollowIDsSweepFreq(); sweep > 0 {
- return c.followIDs.Start(sweep)
- }
- return true
- })
- tryStart(c.followRequest, config.GetCacheGTSFollowRequestSweepFreq())
- tryUntil("starting follow request IDs cache", 5, func() bool {
- if sweep := config.GetCacheGTSFollowRequestIDsSweepFreq(); sweep > 0 {
- return c.followRequestIDs.Start(sweep)
- }
- return true
- })
- tryStart(c.instance, config.GetCacheGTSInstanceSweepFreq())
- tryStart(c.list, config.GetCacheGTSListSweepFreq())
- tryStart(c.listEntry, config.GetCacheGTSListEntrySweepFreq())
- tryStart(c.marker, config.GetCacheGTSMarkerSweepFreq())
- tryStart(c.media, config.GetCacheGTSMediaSweepFreq())
- tryStart(c.mention, config.GetCacheGTSMentionSweepFreq())
- tryStart(c.notification, config.GetCacheGTSNotificationSweepFreq())
- tryStart(c.report, config.GetCacheGTSReportSweepFreq())
- tryStart(c.status, config.GetCacheGTSStatusSweepFreq())
- tryStart(c.statusFave, config.GetCacheGTSStatusFaveSweepFreq())
- tryStart(c.tag, config.GetCacheGTSTagSweepFreq())
- tryStart(c.tombstone, config.GetCacheGTSTombstoneSweepFreq())
- tryStart(c.user, config.GetCacheGTSUserSweepFreq())
tryUntil("starting *gtsmodel.Webfinger cache", 5, func() bool {
- if sweep := config.GetCacheGTSWebfingerSweepFreq(); sweep > 0 {
- return c.webfinger.Start(sweep)
- }
- return true
+ return c.webfinger.Start(5 * time.Minute)
})
}
// Stop will attempt to stop all of the gtsmodel caches, or panic.
func (c *GTSCaches) Stop() {
- tryStop(c.account, config.GetCacheGTSAccountSweepFreq())
- tryStop(c.accountNote, config.GetCacheGTSAccountNoteSweepFreq())
- tryStop(c.block, config.GetCacheGTSBlockSweepFreq())
- tryUntil("stopping block IDs cache", 5, func() bool {
- if config.GetCacheGTSBlockIDsSweepFreq() > 0 {
- return c.blockIDs.Stop()
- }
- return true
- })
- tryStop(c.emoji, config.GetCacheGTSEmojiSweepFreq())
- tryStop(c.emojiCategory, config.GetCacheGTSEmojiCategorySweepFreq())
- tryStop(c.follow, config.GetCacheGTSFollowSweepFreq())
- tryUntil("stopping follow IDs cache", 5, func() bool {
- if config.GetCacheGTSFollowIDsSweepFreq() > 0 {
- return c.followIDs.Stop()
- }
- return true
- })
- tryStop(c.followRequest, config.GetCacheGTSFollowRequestSweepFreq())
- tryUntil("stopping follow request IDs cache", 5, func() bool {
- if config.GetCacheGTSFollowRequestIDsSweepFreq() > 0 {
- return c.followRequestIDs.Stop()
- }
- return true
- })
- tryStop(c.instance, config.GetCacheGTSInstanceSweepFreq())
- tryStop(c.list, config.GetCacheGTSListSweepFreq())
- tryStop(c.listEntry, config.GetCacheGTSListEntrySweepFreq())
- tryStop(c.marker, config.GetCacheGTSMarkerSweepFreq())
- tryStop(c.media, config.GetCacheGTSMediaSweepFreq())
- tryStop(c.mention, config.GetCacheGTSNotificationSweepFreq())
- tryStop(c.notification, config.GetCacheGTSNotificationSweepFreq())
- tryStop(c.report, config.GetCacheGTSReportSweepFreq())
- tryStop(c.status, config.GetCacheGTSStatusSweepFreq())
- tryStop(c.statusFave, config.GetCacheGTSStatusFaveSweepFreq())
- tryStop(c.tag, config.GetCacheGTSTagSweepFreq())
- tryStop(c.tombstone, config.GetCacheGTSTombstoneSweepFreq())
- tryStop(c.user, config.GetCacheGTSUserSweepFreq())
- tryUntil("stopping *gtsmodel.Webfinger cache", 5, func() bool {
- if config.GetCacheGTSWebfingerSweepFreq() > 0 {
- return c.webfinger.Stop()
- }
- return true
- })
+ tryUntil("stopping *gtsmodel.Webfinger cache", 5, c.webfinger.Stop)
}
// Account provides access to the gtsmodel Account database cache.
@@ -315,6 +235,14 @@ func (c *GTSCaches) Webfinger() *ttl.Cache[string, string] {
}
func (c *GTSCaches) initAccount() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofAccount(), // model in-mem size.
+ config.GetCacheAccountMemRatio(),
+ )
+
+ log.Infof(nil, "Account cache size = %d", cap)
+
c.account = result.New([]result.Lookup{
{Name: "ID"},
{Name: "URI"},
@@ -329,12 +257,19 @@ func (c *GTSCaches) initAccount() {
a2 := new(gtsmodel.Account)
*a2 = *a1
return a2
- }, config.GetCacheGTSAccountMaxSize())
- c.account.SetTTL(config.GetCacheGTSAccountTTL(), true)
+ }, cap)
+
c.account.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initAccountNote() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofAccountNote(), // model in-mem size.
+ config.GetCacheAccountNoteMemRatio(),
+ )
+ log.Infof(nil, "AccountNote cache size = %d", cap)
+
c.accountNote = result.New([]result.Lookup{
{Name: "ID"},
{Name: "AccountID.TargetAccountID"},
@@ -342,12 +277,20 @@ func (c *GTSCaches) initAccountNote() {
n2 := new(gtsmodel.AccountNote)
*n2 = *n1
return n2
- }, config.GetCacheGTSAccountNoteMaxSize())
- c.accountNote.SetTTL(config.GetCacheGTSAccountNoteTTL(), true)
+ }, cap)
+
c.accountNote.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initBlock() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofBlock(), // model in-mem size.
+ config.GetCacheBlockMemRatio(),
+ )
+
+ log.Infof(nil, "Block cache size = %d", cap)
+
c.block = result.New([]result.Lookup{
{Name: "ID"},
{Name: "URI"},
@@ -358,16 +301,22 @@ func (c *GTSCaches) initBlock() {
b2 := new(gtsmodel.Block)
*b2 = *b1
return b2
- }, config.GetCacheGTSBlockMaxSize())
- c.block.SetTTL(config.GetCacheGTSBlockTTL(), true)
+ }, cap)
+
c.block.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initBlockIDs() {
- c.blockIDs = &SliceCache[string]{Cache: ttl.New[string, []string](
+ // Calculate maximum cache size.
+ cap := calculateSliceCacheMax(
+ config.GetCacheBlockIDsMemRatio(),
+ )
+
+ log.Infof(nil, "Block IDs cache size = %d", cap)
+
+ c.blockIDs = &SliceCache[string]{Cache: simple.New[string, []string](
0,
- config.GetCacheGTSBlockIDsMaxSize(),
- config.GetCacheGTSBlockIDsTTL(),
+ cap,
)}
}
@@ -376,6 +325,14 @@ func (c *GTSCaches) initDomainBlock() {
}
func (c *GTSCaches) initEmoji() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofEmoji(), // model in-mem size.
+ config.GetCacheEmojiMemRatio(),
+ )
+
+ log.Infof(nil, "Emoji cache size = %d", cap)
+
c.emoji = result.New([]result.Lookup{
{Name: "ID"},
{Name: "URI"},
@@ -386,12 +343,20 @@ func (c *GTSCaches) initEmoji() {
e2 := new(gtsmodel.Emoji)
*e2 = *e1
return e2
- }, config.GetCacheGTSEmojiMaxSize())
- c.emoji.SetTTL(config.GetCacheGTSEmojiTTL(), true)
+ }, cap)
+
c.emoji.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initEmojiCategory() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofEmojiCategory(), // model in-mem size.
+ config.GetCacheEmojiCategoryMemRatio(),
+ )
+
+ log.Infof(nil, "EmojiCategory cache size = %d", cap)
+
c.emojiCategory = result.New([]result.Lookup{
{Name: "ID"},
{Name: "Name"},
@@ -399,12 +364,20 @@ func (c *GTSCaches) initEmojiCategory() {
c2 := new(gtsmodel.EmojiCategory)
*c2 = *c1
return c2
- }, config.GetCacheGTSEmojiCategoryMaxSize())
- c.emojiCategory.SetTTL(config.GetCacheGTSEmojiCategoryTTL(), true)
+ }, cap)
+
c.emojiCategory.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initFollow() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofFollow(), // model in-mem size.
+ config.GetCacheFollowMemRatio(),
+ )
+
+ log.Infof(nil, "Follow cache size = %d", cap)
+
c.follow = result.New([]result.Lookup{
{Name: "ID"},
{Name: "URI"},
@@ -415,19 +388,34 @@ func (c *GTSCaches) initFollow() {
f2 := new(gtsmodel.Follow)
*f2 = *f1
return f2
- }, config.GetCacheGTSFollowMaxSize())
- c.follow.SetTTL(config.GetCacheGTSFollowTTL(), true)
+ }, cap)
+
+ c.follow.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initFollowIDs() {
- c.followIDs = &SliceCache[string]{Cache: ttl.New[string, []string](
+ // Calculate maximum cache size.
+ cap := calculateSliceCacheMax(
+ config.GetCacheFollowIDsMemRatio(),
+ )
+
+ log.Infof(nil, "Follow IDs cache size = %d", cap)
+
+ c.followIDs = &SliceCache[string]{Cache: simple.New[string, []string](
0,
- config.GetCacheGTSFollowIDsMaxSize(),
- config.GetCacheGTSFollowIDsTTL(),
+ cap,
)}
}
func (c *GTSCaches) initFollowRequest() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofFollowRequest(), // model in-mem size.
+ config.GetCacheFollowRequestMemRatio(),
+ )
+
+ log.Infof(nil, "FollowRequest cache size = %d", cap)
+
c.followRequest = result.New([]result.Lookup{
{Name: "ID"},
{Name: "URI"},
@@ -438,19 +426,34 @@ func (c *GTSCaches) initFollowRequest() {
f2 := new(gtsmodel.FollowRequest)
*f2 = *f1
return f2
- }, config.GetCacheGTSFollowRequestMaxSize())
- c.followRequest.SetTTL(config.GetCacheGTSFollowRequestTTL(), true)
+ }, cap)
+
+ c.followRequest.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initFollowRequestIDs() {
- c.followRequestIDs = &SliceCache[string]{Cache: ttl.New[string, []string](
+ // Calculate maximum cache size.
+ cap := calculateSliceCacheMax(
+ config.GetCacheFollowRequestIDsMemRatio(),
+ )
+
+ log.Infof(nil, "Follow Request IDs cache size = %d", cap)
+
+ c.followRequestIDs = &SliceCache[string]{Cache: simple.New[string, []string](
0,
- config.GetCacheGTSFollowRequestIDsMaxSize(),
- config.GetCacheGTSFollowRequestIDsTTL(),
+ cap,
)}
}
func (c *GTSCaches) initInstance() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofInstance(), // model in-mem size.
+ config.GetCacheInstanceMemRatio(),
+ )
+
+ log.Infof(nil, "Instance cache size = %d", cap)
+
c.instance = result.New([]result.Lookup{
{Name: "ID"},
{Name: "Domain"},
@@ -458,24 +461,40 @@ func (c *GTSCaches) initInstance() {
i2 := new(gtsmodel.Instance)
*i2 = *i1
return i1
- }, config.GetCacheGTSInstanceMaxSize())
- c.instance.SetTTL(config.GetCacheGTSInstanceTTL(), true)
- c.emojiCategory.IgnoreErrors(ignoreErrors)
+ }, cap)
+
+ c.instance.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initList() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofList(), // model in-mem size.
+ config.GetCacheListMemRatio(),
+ )
+
+ log.Infof(nil, "List cache size = %d", cap)
+
c.list = result.New([]result.Lookup{
{Name: "ID"},
}, func(l1 *gtsmodel.List) *gtsmodel.List {
l2 := new(gtsmodel.List)
*l2 = *l1
return l2
- }, config.GetCacheGTSListMaxSize())
- c.list.SetTTL(config.GetCacheGTSListTTL(), true)
+ }, cap)
+
c.list.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initListEntry() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofListEntry(), // model in-mem size.
+ config.GetCacheListEntryMemRatio(),
+ )
+
+ log.Infof(nil, "ListEntry cache size = %d", cap)
+
c.listEntry = result.New([]result.Lookup{
{Name: "ID"},
{Name: "ListID", Multi: true},
@@ -484,48 +503,80 @@ func (c *GTSCaches) initListEntry() {
l2 := new(gtsmodel.ListEntry)
*l2 = *l1
return l2
- }, config.GetCacheGTSListEntryMaxSize())
- c.list.SetTTL(config.GetCacheGTSListEntryTTL(), true)
- c.list.IgnoreErrors(ignoreErrors)
+ }, cap)
+
+ c.listEntry.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initMarker() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofMarker(), // model in-mem size.
+ config.GetCacheMarkerMemRatio(),
+ )
+
+ log.Infof(nil, "Marker cache size = %d", cap)
+
c.marker = result.New([]result.Lookup{
{Name: "AccountID.Name"},
}, func(m1 *gtsmodel.Marker) *gtsmodel.Marker {
m2 := new(gtsmodel.Marker)
*m2 = *m1
return m2
- }, config.GetCacheGTSMarkerMaxSize())
- c.marker.SetTTL(config.GetCacheGTSMarkerTTL(), true)
+ }, cap)
+
c.marker.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initMedia() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofMedia(), // model in-mem size.
+ config.GetCacheMediaMemRatio(),
+ )
+
+ log.Infof(nil, "Media cache size = %d", cap)
+
c.media = result.New([]result.Lookup{
{Name: "ID"},
}, func(m1 *gtsmodel.MediaAttachment) *gtsmodel.MediaAttachment {
m2 := new(gtsmodel.MediaAttachment)
*m2 = *m1
return m2
- }, config.GetCacheGTSMediaMaxSize())
- c.media.SetTTL(config.GetCacheGTSMediaTTL(), true)
+ }, cap)
+
c.media.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initMention() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofMention(), // model in-mem size.
+ config.GetCacheMentionMemRatio(),
+ )
+
+ log.Infof(nil, "Mention cache size = %d", cap)
+
c.mention = result.New([]result.Lookup{
{Name: "ID"},
}, func(m1 *gtsmodel.Mention) *gtsmodel.Mention {
m2 := new(gtsmodel.Mention)
*m2 = *m1
return m2
- }, config.GetCacheGTSMentionMaxSize())
- c.mention.SetTTL(config.GetCacheGTSMentionTTL(), true)
+ }, cap)
+
c.mention.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initNotification() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofNotification(), // model in-mem size.
+ config.GetCacheNotificationMemRatio(),
+ )
+
+ log.Infof(nil, "Notification cache size = %d", cap)
+
c.notification = result.New([]result.Lookup{
{Name: "ID"},
{Name: "NotificationType.TargetAccountID.OriginAccountID.StatusID"},
@@ -533,24 +584,40 @@ func (c *GTSCaches) initNotification() {
n2 := new(gtsmodel.Notification)
*n2 = *n1
return n2
- }, config.GetCacheGTSNotificationMaxSize())
- c.notification.SetTTL(config.GetCacheGTSNotificationTTL(), true)
+ }, cap)
+
c.notification.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initReport() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofReport(), // model in-mem size.
+ config.GetCacheReportMemRatio(),
+ )
+
+ log.Infof(nil, "Report cache size = %d", cap)
+
c.report = result.New([]result.Lookup{
{Name: "ID"},
}, func(r1 *gtsmodel.Report) *gtsmodel.Report {
r2 := new(gtsmodel.Report)
*r2 = *r1
return r2
- }, config.GetCacheGTSReportMaxSize())
- c.report.SetTTL(config.GetCacheGTSReportTTL(), true)
+ }, cap)
+
c.report.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initStatus() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofStatus(), // model in-mem size.
+ config.GetCacheStatusMemRatio(),
+ )
+
+ log.Infof(nil, "Status cache size = %d", cap)
+
c.status = result.New([]result.Lookup{
{Name: "ID"},
{Name: "URI"},
@@ -559,12 +626,20 @@ func (c *GTSCaches) initStatus() {
s2 := new(gtsmodel.Status)
*s2 = *s1
return s2
- }, config.GetCacheGTSStatusMaxSize())
- c.status.SetTTL(config.GetCacheGTSStatusTTL(), true)
+ }, cap)
+
c.status.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initStatusFave() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofStatusFave(), // model in-mem size.
+ config.GetCacheStatusFaveMemRatio(),
+ )
+
+ log.Infof(nil, "StatusFave cache size = %d", cap)
+
c.statusFave = result.New([]result.Lookup{
{Name: "ID"},
{Name: "AccountID.StatusID"},
@@ -572,12 +647,20 @@ func (c *GTSCaches) initStatusFave() {
f2 := new(gtsmodel.StatusFave)
*f2 = *f1
return f2
- }, config.GetCacheGTSStatusFaveMaxSize())
- c.status.SetTTL(config.GetCacheGTSStatusFaveTTL(), true)
- c.status.IgnoreErrors(ignoreErrors)
+ }, cap)
+
+ c.statusFave.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initTag() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofTag(), // model in-mem size.
+ config.GetCacheTagMemRatio(),
+ )
+
+ log.Infof(nil, "Tag cache size = %d", cap)
+
c.tag = result.New([]result.Lookup{
{Name: "ID"},
{Name: "Name"},
@@ -585,12 +668,20 @@ func (c *GTSCaches) initTag() {
m2 := new(gtsmodel.Tag)
*m2 = *m1
return m2
- }, config.GetCacheGTSTagMaxSize())
- c.tag.SetTTL(config.GetCacheGTSTagTTL(), true)
+ }, cap)
+
c.tag.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initTombstone() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofTombstone(), // model in-mem size.
+ config.GetCacheTombstoneMemRatio(),
+ )
+
+ log.Infof(nil, "Tombstone cache size = %d", cap)
+
c.tombstone = result.New([]result.Lookup{
{Name: "ID"},
{Name: "URI"},
@@ -598,12 +689,20 @@ func (c *GTSCaches) initTombstone() {
t2 := new(gtsmodel.Tombstone)
*t2 = *t1
return t2
- }, config.GetCacheGTSTombstoneMaxSize())
- c.tombstone.SetTTL(config.GetCacheGTSTombstoneTTL(), true)
+ }, cap)
+
c.tombstone.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initUser() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofUser(), // model in-mem size.
+ config.GetCacheUserMemRatio(),
+ )
+
+ log.Infof(nil, "User cache size = %d", cap)
+
c.user = result.New([]result.Lookup{
{Name: "ID"},
{Name: "AccountID"},
@@ -614,15 +713,23 @@ func (c *GTSCaches) initUser() {
u2 := new(gtsmodel.User)
*u2 = *u1
return u2
- }, config.GetCacheGTSUserMaxSize())
- c.user.SetTTL(config.GetCacheGTSUserTTL(), true)
+ }, cap)
+
c.user.IgnoreErrors(ignoreErrors)
}
func (c *GTSCaches) initWebfinger() {
+ // Calculate maximum cache size.
+ cap := calculateCacheMax(
+ sizeofURIStr, sizeofURIStr,
+ config.GetCacheWebfingerMemRatio(),
+ )
+
+ log.Infof(nil, "Webfinger cache size = %d", cap)
+
c.webfinger = ttl.New[string, string](
0,
- config.GetCacheGTSWebfingerMaxSize(),
- config.GetCacheGTSWebfingerTTL(),
+ cap,
+ 24*time.Hour,
)
}
diff --git a/internal/cache/size.go b/internal/cache/size.go
new file mode 100644
index 000000000..56524575b
--- /dev/null
+++ b/internal/cache/size.go
@@ -0,0 +1,501 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+package cache
+
+import (
+ "crypto/rsa"
+ "time"
+ "unsafe"
+
+ "codeberg.org/gruf/go-cache/v3/simple"
+ "github.com/DmitriyVTitov/size"
+ "github.com/superseriousbusiness/gotosocial/internal/ap"
+ "github.com/superseriousbusiness/gotosocial/internal/config"
+ "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
+ "github.com/superseriousbusiness/gotosocial/internal/id"
+)
+
+const (
+ // example data values.
+ exampleID = id.Highest
+ exampleURI = "https://social.bbc/users/ItsMePrinceCharlesInit"
+ exampleText = `
+oh no me nan's gone and done it :shocked:
+
+she fuckin killed the king :regicide:
+
+nan what have you done :shocked:
+
+no nan put down the knife, don't go after the landlords next! :knife:
+
+you'll make society more equitable for all if you're not careful! :hammer_sickle:
+
+#JustNanProblems #WhatWillSheDoNext #MaybeItWasntSuchABadThingAfterAll
+`
+
+ exampleTextSmall = "Small problem lads, me nan's gone on a bit of a rampage"
+ exampleUsername = "@SexHaver1969"
+
+ // ID string size in memory (is always 26 char ULID).
+ sizeofIDStr = unsafe.Sizeof(exampleID)
+
+ // URI string size in memory (use some random example URI).
+ sizeofURIStr = unsafe.Sizeof(exampleURI)
+
+ // ID slice size in memory (using some estimate of length = 250).
+ sizeofIDSlice = unsafe.Sizeof([]string{}) + 250*sizeofIDStr
+
+ // result cache key size estimate which is tricky. it can
+ // be a serialized string of almost any type, so we pick a
+ // nice serialized key size on the upper end of normal.
+ sizeofResultKey = 2 * sizeofIDStr
+)
+
+// calculateSliceCacheMax calculates the maximum capacity for a slice cache with given individual ratio.
+func calculateSliceCacheMax(ratio float64) int {
+ return calculateCacheMax(sizeofIDStr, sizeofIDSlice, ratio)
+}
+
+// calculateResultCacheMax calculates the maximum cache capacity for a result
+// cache's individual ratio number, and the size of the struct model in memory.
+func calculateResultCacheMax(structSz uintptr, ratio float64) int {
+ // Estimate a worse-case scenario of extra lookup hash maps,
+ // where lookups are the no. "keys" each result can be found under
+ const lookups = 10
+
+ // Calculate the extra cache lookup map overheads.
+ totalLookupKeySz := uintptr(lookups) * sizeofResultKey
+ totalLookupValSz := uintptr(lookups) * unsafe.Sizeof(uint64(0))
+
+ // Primary cache sizes.
+ pkeySz := unsafe.Sizeof(uint64(0))
+ pvalSz := structSz
+
+ // The result cache wraps each struct result in a wrapping
+ // struct with further information, and possible error. This
+ // also needs to be taken into account when calculating value.
+ const resultValueOverhead = unsafe.Sizeof(&struct {
+ _ int64
+ _ []any
+ _ any
+ _ error
+ }{})
+
+ return calculateCacheMax(
+ pkeySz+totalLookupKeySz,
+ pvalSz+totalLookupValSz+resultValueOverhead,
+ ratio,
+ )
+}
+
+// calculateCacheMax calculates the maximum cache capacity for a cache's
+// individual ratio number, and key + value object sizes in memory.
+func calculateCacheMax(keySz, valSz uintptr, ratio float64) int {
+ if ratio < 0 {
+ // Negative ratios are a secret little trick
+ // to manually set the cache capacity sizes.
+ return int(-1 * ratio)
+ }
+
+ // see: https://golang.org/src/runtime/map.go
+ const emptyBucketOverhead = 10.79
+
+ // This takes into account (roughly) that the underlying simple cache library wraps
+ // elements within a simple.Entry{}, and the ordered map wraps each in a linked list elem.
+ const cacheElemOverhead = unsafe.Sizeof(simple.Entry{}) + unsafe.Sizeof(struct {
+ key, value interface{}
+ next, prev uintptr
+ }{})
+
+ // The inputted memory ratio does not take into account the
+ // total of all ratios, so divide it here to get perc. ratio.
+ totalRatio := ratio / totalOfRatios()
+
+ // TODO: we should also further weight this ratio depending
+ // on the combined keySz + valSz as a ratio of all available
+ // cache model memories. otherwise you can end up with a
+ // low-ratio cache of tiny models with larger capacity than
+ // a high-ratio cache of large models.
+
+ // Get max available cache memory, calculating max for
+ // this cache by multiplying by this cache's mem ratio.
+ maxMem := config.GetCacheMemoryTarget()
+ fMaxMem := float64(maxMem) * totalRatio
+
+ // Cast to useable types.
+ fKeySz := float64(keySz)
+ fValSz := float64(valSz)
+
+ // Calculated using the internal cache map size:
+ // (($keysz + $valsz) * $len) + ($len * $allOverheads) = $memSz
+ return int(fMaxMem / (fKeySz + fValSz + emptyBucketOverhead + float64(cacheElemOverhead)))
+}
+
+// totalOfRatios returns the total of all cache ratios added together.
+func totalOfRatios() float64 {
+ // NOTE: this is not performant calculating
+ // this every damn time (mainly the mutex unlocks
+ // required to access each config var). fortunately
+ // we only do this on init so fuck it :D
+ return 0 +
+ config.GetCacheAccountMemRatio() +
+ config.GetCacheAccountNoteMemRatio() +
+ config.GetCacheBlockMemRatio() +
+ config.GetCacheBlockIDsMemRatio() +
+ config.GetCacheEmojiMemRatio() +
+ config.GetCacheEmojiCategoryMemRatio() +
+ config.GetCacheFollowMemRatio() +
+ config.GetCacheFollowIDsMemRatio() +
+ config.GetCacheFollowRequestMemRatio() +
+ config.GetCacheFollowRequestIDsMemRatio() +
+ config.GetCacheInstanceMemRatio() +
+ config.GetCacheListMemRatio() +
+ config.GetCacheListEntryMemRatio() +
+ config.GetCacheMarkerMemRatio() +
+ config.GetCacheMediaMemRatio() +
+ config.GetCacheMentionMemRatio() +
+ config.GetCacheNotificationMemRatio() +
+ config.GetCacheReportMemRatio() +
+ config.GetCacheStatusMemRatio() +
+ config.GetCacheStatusFaveMemRatio() +
+ config.GetCacheTagMemRatio() +
+ config.GetCacheTombstoneMemRatio() +
+ config.GetCacheUserMemRatio() +
+ config.GetCacheWebfingerMemRatio() +
+ config.GetCacheVisibilityMemRatio()
+}
+
+func sizeofAccount() uintptr {
+ return uintptr(size.Of(&gtsmodel.Account{
+ ID: exampleID,
+ Username: exampleUsername,
+ AvatarMediaAttachmentID: exampleID,
+ HeaderMediaAttachmentID: exampleID,
+ DisplayName: exampleUsername,
+ Note: exampleText,
+ NoteRaw: exampleText,
+ Memorial: func() *bool { ok := false; return &ok }(),
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ FetchedAt: time.Now(),
+ Bot: func() *bool { ok := true; return &ok }(),
+ Locked: func() *bool { ok := true; return &ok }(),
+ Discoverable: func() *bool { ok := false; return &ok }(),
+ Privacy: gtsmodel.VisibilityFollowersOnly,
+ Sensitive: func() *bool { ok := true; return &ok }(),
+ Language: "fr",
+ URI: exampleURI,
+ URL: exampleURI,
+ InboxURI: exampleURI,
+ OutboxURI: exampleURI,
+ FollowersURI: exampleURI,
+ FollowingURI: exampleURI,
+ FeaturedCollectionURI: exampleURI,
+ ActorType: ap.ActorPerson,
+ PrivateKey: &rsa.PrivateKey{},
+ PublicKey: &rsa.PublicKey{},
+ PublicKeyURI: exampleURI,
+ SensitizedAt: time.Time{},
+ SilencedAt: time.Now(),
+ SuspendedAt: time.Now(),
+ HideCollections: func() *bool { ok := true; return &ok }(),
+ SuspensionOrigin: "",
+ EnableRSS: func() *bool { ok := true; return &ok }(),
+ }))
+}
+
+func sizeofAccountNote() uintptr {
+ return uintptr(size.Of(&gtsmodel.AccountNote{
+ ID: exampleID,
+ AccountID: exampleID,
+ TargetAccountID: exampleID,
+ Comment: exampleTextSmall,
+ }))
+}
+
+func sizeofBlock() uintptr {
+ return uintptr(size.Of(&gtsmodel.Block{
+ ID: exampleID,
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ URI: exampleURI,
+ AccountID: exampleID,
+ TargetAccountID: exampleID,
+ }))
+}
+
+func sizeofEmoji() uintptr {
+ return uintptr(size.Of(&gtsmodel.Emoji{
+ ID: exampleID,
+ Shortcode: exampleTextSmall,
+ Domain: exampleURI,
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ ImageRemoteURL: exampleURI,
+ ImageStaticRemoteURL: exampleURI,
+ ImageURL: exampleURI,
+ ImagePath: exampleURI,
+ ImageStaticURL: exampleURI,
+ ImageStaticPath: exampleURI,
+ ImageContentType: "image/png",
+ ImageStaticContentType: "image/png",
+ ImageUpdatedAt: time.Now(),
+ Disabled: func() *bool { ok := false; return &ok }(),
+ URI: "http://localhost:8080/emoji/01F8MH9H8E4VG3KDYJR9EGPXCQ",
+ VisibleInPicker: func() *bool { ok := true; return &ok }(),
+ CategoryID: "01GGQ8V4993XK67B2JB396YFB7",
+ Cached: func() *bool { ok := true; return &ok }(),
+ }))
+}
+
+func sizeofEmojiCategory() uintptr {
+ return uintptr(size.Of(&gtsmodel.EmojiCategory{
+ ID: exampleID,
+ Name: exampleUsername,
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ }))
+}
+
+func sizeofFollow() uintptr {
+ return uintptr(size.Of(&gtsmodel.Follow{
+ ID: exampleID,
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ AccountID: exampleID,
+ TargetAccountID: exampleID,
+ ShowReblogs: func() *bool { ok := true; return &ok }(),
+ URI: exampleURI,
+ Notify: func() *bool { ok := false; return &ok }(),
+ }))
+}
+
+func sizeofFollowRequest() uintptr {
+ return uintptr(size.Of(&gtsmodel.FollowRequest{
+ ID: exampleID,
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ AccountID: exampleID,
+ TargetAccountID: exampleID,
+ ShowReblogs: func() *bool { ok := true; return &ok }(),
+ URI: exampleURI,
+ Notify: func() *bool { ok := false; return &ok }(),
+ }))
+}
+
+func sizeofInstance() uintptr {
+ return uintptr(size.Of(&gtsmodel.Instance{
+ ID: exampleID,
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ Domain: exampleURI,
+ URI: exampleURI,
+ Title: exampleTextSmall,
+ ShortDescription: exampleText,
+ Description: exampleText,
+ ContactEmail: exampleUsername,
+ ContactAccountUsername: exampleUsername,
+ ContactAccountID: exampleID,
+ }))
+}
+
+func sizeofList() uintptr {
+ return uintptr(size.Of(&gtsmodel.List{
+ ID: exampleID,
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ Title: exampleTextSmall,
+ AccountID: exampleID,
+ RepliesPolicy: gtsmodel.RepliesPolicyFollowed,
+ }))
+}
+
+func sizeofListEntry() uintptr {
+ return uintptr(size.Of(&gtsmodel.ListEntry{
+ ID: exampleID,
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ ListID: exampleID,
+ FollowID: exampleID,
+ }))
+}
+
+func sizeofMarker() uintptr {
+ return uintptr(size.Of(&gtsmodel.Marker{
+ AccountID: exampleID,
+ Name: gtsmodel.MarkerNameHome,
+ UpdatedAt: time.Now(),
+ Version: 0,
+ LastReadID: exampleID,
+ }))
+}
+
+func sizeofMedia() uintptr {
+ return uintptr(size.Of(&gtsmodel.MediaAttachment{
+ ID: exampleID,
+ StatusID: exampleID,
+ URL: exampleURI,
+ RemoteURL: exampleURI,
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ Type: gtsmodel.FileTypeImage,
+ AccountID: exampleID,
+ Description: exampleText,
+ ScheduledStatusID: exampleID,
+ Blurhash: exampleTextSmall,
+ File: gtsmodel.File{
+ Path: exampleURI,
+ ContentType: "image/jpeg",
+ UpdatedAt: time.Now(),
+ },
+ Thumbnail: gtsmodel.Thumbnail{
+ Path: exampleURI,
+ ContentType: "image/jpeg",
+ UpdatedAt: time.Now(),
+ URL: exampleURI,
+ RemoteURL: exampleURI,
+ },
+ Avatar: func() *bool { ok := false; return &ok }(),
+ Header: func() *bool { ok := false; return &ok }(),
+ Cached: func() *bool { ok := true; return &ok }(),
+ }))
+}
+
+func sizeofMention() uintptr {
+ return uintptr(size.Of(&gtsmodel.Mention{
+ ID: exampleURI,
+ StatusID: exampleURI,
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ OriginAccountID: exampleURI,
+ OriginAccountURI: exampleURI,
+ TargetAccountID: exampleID,
+ NameString: exampleUsername,
+ TargetAccountURI: exampleURI,
+ TargetAccountURL: exampleURI,
+ }))
+}
+
+func sizeofNotification() uintptr {
+ return uintptr(size.Of(&gtsmodel.Notification{
+ ID: exampleID,
+ NotificationType: gtsmodel.NotificationFave,
+ CreatedAt: time.Now(),
+ TargetAccountID: exampleID,
+ OriginAccountID: exampleID,
+ StatusID: exampleID,
+ Read: func() *bool { ok := false; return &ok }(),
+ }))
+}
+
+func sizeofReport() uintptr {
+ return uintptr(size.Of(&gtsmodel.Report{
+ ID: exampleID,
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ URI: exampleURI,
+ AccountID: exampleID,
+ TargetAccountID: exampleID,
+ Comment: exampleText,
+ StatusIDs: []string{exampleID, exampleID, exampleID},
+ Forwarded: func() *bool { ok := true; return &ok }(),
+ ActionTaken: exampleText,
+ ActionTakenAt: time.Now(),
+ ActionTakenByAccountID: exampleID,
+ }))
+}
+
+func sizeofStatus() uintptr {
+ return uintptr(size.Of(&gtsmodel.Status{
+ ID: exampleURI,
+ URI: exampleURI,
+ URL: exampleURI,
+ Content: exampleText,
+ Text: exampleText,
+ AttachmentIDs: []string{exampleID, exampleID, exampleID},
+ TagIDs: []string{exampleID, exampleID, exampleID},
+ MentionIDs: []string{},
+ EmojiIDs: []string{exampleID, exampleID, exampleID},
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ FetchedAt: time.Now(),
+ Local: func() *bool { ok := false; return &ok }(),
+ AccountURI: exampleURI,
+ AccountID: exampleID,
+ InReplyToID: exampleID,
+ InReplyToURI: exampleURI,
+ InReplyToAccountID: exampleID,
+ BoostOfID: exampleID,
+ BoostOfAccountID: exampleID,
+ ContentWarning: exampleUsername, // similar length
+ Visibility: gtsmodel.VisibilityPublic,
+ Sensitive: func() *bool { ok := false; return &ok }(),
+ Language: "en",
+ CreatedWithApplicationID: exampleID,
+ Federated: func() *bool { ok := true; return &ok }(),
+ Boostable: func() *bool { ok := true; return &ok }(),
+ Replyable: func() *bool { ok := true; return &ok }(),
+ Likeable: func() *bool { ok := true; return &ok }(),
+ ActivityStreamsType: ap.ObjectNote,
+ }))
+}
+
+func sizeofStatusFave() uintptr {
+ return uintptr(size.Of(&gtsmodel.StatusFave{
+ ID: exampleID,
+ CreatedAt: time.Now(),
+ AccountID: exampleID,
+ TargetAccountID: exampleID,
+ StatusID: exampleID,
+ URI: exampleURI,
+ }))
+}
+
+func sizeofTag() uintptr {
+ return uintptr(size.Of(&gtsmodel.Tag{
+ ID: exampleID,
+ Name: exampleUsername,
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ Useable: func() *bool { ok := true; return &ok }(),
+ Listable: func() *bool { ok := true; return &ok }(),
+ }))
+}
+
+func sizeofTombstone() uintptr {
+ return uintptr(size.Of(&gtsmodel.Tombstone{
+ ID: exampleID,
+ CreatedAt: time.Now(),
+ UpdatedAt: time.Now(),
+ Domain: exampleUsername,
+ URI: exampleURI,
+ }))
+}
+
+func sizeofVisibility() uintptr {
+ return uintptr(size.Of(&CachedVisibility{
+ ItemID: exampleID,
+ RequesterID: exampleID,
+ Type: VisibilityTypeAccount,
+ Value: false,
+ }))
+}
+
+func sizeofUser() uintptr {
+ return uintptr(size.Of(&gtsmodel.User{}))
+}
diff --git a/internal/cache/slice.go b/internal/cache/slice.go
index 194f20d4b..e296a3b57 100644
--- a/internal/cache/slice.go
+++ b/internal/cache/slice.go
@@ -18,14 +18,14 @@
package cache
import (
- "codeberg.org/gruf/go-cache/v3/ttl"
+ "codeberg.org/gruf/go-cache/v3/simple"
"golang.org/x/exp/slices"
)
// SliceCache wraps a ttl.Cache to provide simple loader-callback
// functions for fetching + caching slices of objects (e.g. IDs).
type SliceCache[T any] struct {
- *ttl.Cache[string, []T]
+ *simple.Cache[string, []T]
}
// Load will attempt to load an existing slice from the cache for the given key, else calling the provided load function and caching the result.
diff --git a/internal/cache/util.go b/internal/cache/util.go
index f2357c904..f15922401 100644
--- a/internal/cache/util.go
+++ b/internal/cache/util.go
@@ -20,10 +20,8 @@ package cache
import (
"database/sql"
"errors"
- "fmt"
"time"
- "codeberg.org/gruf/go-cache/v3/result"
errorsv2 "codeberg.org/gruf/go-errors/v2"
"github.com/superseriousbusiness/gotosocial/internal/db"
"github.com/superseriousbusiness/gotosocial/internal/log"
@@ -56,26 +54,6 @@ func (*nocopy) Lock() {}
func (*nocopy) Unlock() {}
-// tryStart will attempt to start the given cache only if sweep duration > 0 (sweeping is enabled).
-func tryStart[ValueType any](cache *result.Cache[ValueType], sweep time.Duration) {
- if sweep > 0 {
- var z ValueType
- msg := fmt.Sprintf("starting %T cache", z)
- tryUntil(msg, 5, func() bool {
- return cache.Start(sweep)
- })
- }
-}
-
-// tryStop will attempt to stop the given cache only if sweep duration > 0 (sweeping is enabled).
-func tryStop[ValueType any](cache *result.Cache[ValueType], sweep time.Duration) {
- if sweep > 0 {
- var z ValueType
- msg := fmt.Sprintf("stopping %T cache", z)
- tryUntil(msg, 5, cache.Stop)
- }
-}
-
// tryUntil will attempt to call 'do' for 'count' attempts, before panicking with 'msg'.
func tryUntil(msg string, count int, do func() bool) {
for i := 0; i < count; i++ {
diff --git a/internal/cache/visibility.go b/internal/cache/visibility.go
index fd481eedc..8c534206b 100644
--- a/internal/cache/visibility.go
+++ b/internal/cache/visibility.go
@@ -20,6 +20,7 @@ package cache
import (
"codeberg.org/gruf/go-cache/v3/result"
"github.com/superseriousbusiness/gotosocial/internal/config"
+ "github.com/superseriousbusiness/gotosocial/internal/log"
)
type VisibilityCache struct {
@@ -29,6 +30,14 @@ type VisibilityCache struct {
// Init will initialize the visibility cache in this collection.
// NOTE: the cache MUST NOT be in use anywhere, this is not thread-safe.
func (c *VisibilityCache) Init() {
+ // Calculate maximum cache size.
+ cap := calculateResultCacheMax(
+ sizeofVisibility(), // model in-mem size.
+ config.GetCacheVisibilityMemRatio(),
+ )
+
+ log.Infof(nil, "Visibility cache size = %d", cap)
+
c.Cache = result.New([]result.Lookup{
{Name: "ItemID", Multi: true},
{Name: "RequesterID", Multi: true},
@@ -37,19 +46,17 @@ func (c *VisibilityCache) Init() {
v2 := new(CachedVisibility)
*v2 = *v1
return v2
- }, config.GetCacheVisibilityMaxSize())
- c.Cache.SetTTL(config.GetCacheVisibilityTTL(), true)
+ }, cap)
+
c.Cache.IgnoreErrors(ignoreErrors)
}
// Start will attempt to start the visibility cache, or panic.
func (c *VisibilityCache) Start() {
- tryStart(c.Cache, config.GetCacheVisibilitySweepFreq())
}
// Stop will attempt to stop the visibility cache, or panic.
func (c *VisibilityCache) Stop() {
- tryStop(c.Cache, config.GetCacheVisibilitySweepFreq())
}
// VisibilityType represents a visibility lookup type.