summaryrefslogtreecommitdiff
path: root/internal/cache/timeline
diff options
context:
space:
mode:
Diffstat (limited to 'internal/cache/timeline')
-rw-r--r--internal/cache/timeline/status.go76
-rw-r--r--internal/cache/timeline/status_test.go83
2 files changed, 124 insertions, 35 deletions
diff --git a/internal/cache/timeline/status.go b/internal/cache/timeline/status.go
index 56d90e422..c0c394042 100644
--- a/internal/cache/timeline/status.go
+++ b/internal/cache/timeline/status.go
@@ -336,6 +336,14 @@ func (t *StatusTimeline) Load(
limit := page.Limit
order := page.Order()
dir := toDirection(order)
+ if limit <= 0 {
+
+ // a page limit MUST be set!
+ // this shouldn't be possible
+ // but we check anyway to stop
+ // chance of limitless db calls!
+ panic("invalid page limit")
+ }
// Use a copy of current page so
// we can repeatedly update it.
@@ -344,11 +352,11 @@ func (t *StatusTimeline) Load(
nextPg.Min.Value = lo
nextPg.Max.Value = hi
- // Interstitial meta objects.
- var metas []*StatusMeta
+ // Preallocate slice of interstitial models.
+ metas := make([]*StatusMeta, 0, limit)
- // Returned frontend API statuses.
- var apiStatuses []*apimodel.Status
+ // Preallocate slice of required status API models.
+ apiStatuses := make([]*apimodel.Status, 0, limit)
// TODO: we can remove this nil
// check when we've updated all
@@ -362,13 +370,17 @@ func (t *StatusTimeline) Load(
return nil, "", "", err
}
+ // Load a little more than limit to
+ // reduce chance of db calls below.
+ limitPtr := util.Ptr(limit + 10)
+
// First we attempt to load status
// metadata entries from the timeline
// cache, up to given limit.
metas = t.cache.Select(
util.PtrIf(lo),
util.PtrIf(hi),
- util.PtrIf(limit),
+ limitPtr,
dir,
)
@@ -384,9 +396,6 @@ func (t *StatusTimeline) Load(
lo = metas[len(metas)-1].ID
hi = metas[0].ID
- // Allocate slice of expected required API models.
- apiStatuses = make([]*apimodel.Status, 0, len(metas))
-
// Prepare frontend API models for
// the cached statuses. For now this
// also does its own extra filtering.
@@ -399,10 +408,10 @@ func (t *StatusTimeline) Load(
}
}
- // If no cached timeline statuses
- // were found for page, we need to
- // call through to the database.
- if len(apiStatuses) == 0 {
+ // If not enough cached timeline
+ // statuses were found for page,
+ // we need to call to database.
+ if len(apiStatuses) < limit {
// Pass through to main timeline db load function.
apiStatuses, lo, hi, err = loadStatusTimeline(ctx,
@@ -460,25 +469,31 @@ func loadStatusTimeline(
// vals of loaded statuses.
var lo, hi string
- // Extract paging params.
+ // Extract paging params, in particular
+ // limit is used separate to nextPg to
+ // determine the *expected* return limit,
+ // not just what we use in db queries.
+ returnLimit := nextPg.Limit
order := nextPg.Order()
- limit := nextPg.Limit
-
- // Load a little more than
- // limit to reduce db calls.
- nextPg.Limit += 10
-
- // Ensure we have a slice of meta objects to
- // use in later preparation of the API models.
- metas = xslices.GrowJust(metas[:0], nextPg.Limit)
-
- // Ensure we have a slice of required frontend API models.
- apiStatuses = xslices.GrowJust(apiStatuses[:0], nextPg.Limit)
// Perform maximum of 5 load
// attempts fetching statuses.
for i := 0; i < 5; i++ {
+ // Update page limit to the *remaining*
+ // limit of total we're expected to return.
+ nextPg.Limit = returnLimit - len(apiStatuses)
+ if nextPg.Limit <= 0 {
+
+ // We reached the end! Set lo paging value.
+ lo = apiStatuses[len(apiStatuses)-1].ID
+ break
+ }
+
+ // But load a bit more than
+ // limit to reduce db calls.
+ nextPg.Limit += 10
+
// Load next timeline statuses.
statuses, err := loadPage(nextPg)
if err != nil {
@@ -519,17 +534,8 @@ func loadStatusTimeline(
metas,
prepareAPI,
apiStatuses,
- limit,
+ returnLimit,
)
-
- // If we have anything, return
- // here. Even if below limit.
- if len(apiStatuses) > 0 {
-
- // Set returned lo status paging value.
- lo = apiStatuses[len(apiStatuses)-1].ID
- break
- }
}
return apiStatuses, lo, hi, nil
diff --git a/internal/cache/timeline/status_test.go b/internal/cache/timeline/status_test.go
index 6a288d2ea..fc7e43da8 100644
--- a/internal/cache/timeline/status_test.go
+++ b/internal/cache/timeline/status_test.go
@@ -18,11 +18,16 @@
package timeline
import (
+ "context"
+ "fmt"
"slices"
"testing"
apimodel "code.superseriousbusiness.org/gotosocial/internal/api/model"
"code.superseriousbusiness.org/gotosocial/internal/gtsmodel"
+ "code.superseriousbusiness.org/gotosocial/internal/id"
+ "code.superseriousbusiness.org/gotosocial/internal/log"
+ "code.superseriousbusiness.org/gotosocial/internal/paging"
"codeberg.org/gruf/go-structr"
"github.com/stretchr/testify/assert"
)
@@ -60,6 +65,46 @@ var testStatusMeta = []*StatusMeta{
},
}
+func TestStatusTimelineLoadLimit(t *testing.T) {
+ var tt StatusTimeline
+ tt.Init(1000)
+
+ // Prepare new context for the duration of this test.
+ ctx, cncl := context.WithCancel(context.Background())
+ defer cncl()
+
+ // Clone the input test status data.
+ data := slices.Clone(testStatusMeta)
+
+ // Insert test data into timeline.
+ _ = tt.cache.Insert(data...)
+
+ // Manually mark timeline as 'preloaded'.
+ tt.preloader.CheckPreload(tt.preloader.Done)
+
+ // Craft a new page for selection,
+ // setting placeholder min / max values
+ // but in particular setting a limit
+ // HIGHER than currently cached values.
+ page := new(paging.Page)
+ page.Min = paging.MinID(id.Lowest)
+ page.Max = paging.MaxID(id.Highest)
+ page.Limit = len(data) + 10
+
+ // Load crafted page from the cache. This
+ // SHOULD load all cached entries, then
+ // generate an extra 10 statuses up to limit.
+ apiStatuses, _, _, err := tt.Load(ctx,
+ page,
+ loadGeneratedStatusPage,
+ loadStatusIDsFrom(data),
+ nil, // no filtering
+ func(status *gtsmodel.Status) (*apimodel.Status, error) { return new(apimodel.Status), nil },
+ )
+ assert.NoError(t, err)
+ assert.Len(t, apiStatuses, page.Limit)
+}
+
func TestStatusTimelineUnprepare(t *testing.T) {
var tt StatusTimeline
tt.Init(1000)
@@ -301,6 +346,44 @@ func TestStatusTimelineTrim(t *testing.T) {
assert.Equal(t, before, tt.cache.Len())
}
+// loadStatusIDsFrom imitates loading of statuses of given IDs from the database, instead selecting
+// statuses with appropriate IDs from the given slice of status meta, converting them to statuses.
+func loadStatusIDsFrom(data []*StatusMeta) func(ids []string) ([]*gtsmodel.Status, error) {
+ return func(ids []string) ([]*gtsmodel.Status, error) {
+ var statuses []*gtsmodel.Status
+ for _, id := range ids {
+ i := slices.IndexFunc(data, func(s *StatusMeta) bool {
+ return s.ID == id
+ })
+ if i < 0 || i >= len(data) {
+ panic(fmt.Sprintf("could not find %s in %v", id, log.VarDump(data)))
+ }
+ statuses = append(statuses, &gtsmodel.Status{
+ ID: data[i].ID,
+ AccountID: data[i].AccountID,
+ BoostOfID: data[i].BoostOfID,
+ BoostOfAccountID: data[i].BoostOfAccountID,
+ })
+ }
+ return statuses, nil
+ }
+}
+
+// loadGeneratedStatusPage imitates loading of a given page of statuses,
+// simply generating new statuses until the given page's limit is reached.
+func loadGeneratedStatusPage(page *paging.Page) ([]*gtsmodel.Status, error) {
+ var statuses []*gtsmodel.Status
+ for range page.Limit {
+ statuses = append(statuses, &gtsmodel.Status{
+ ID: id.NewULID(),
+ AccountID: id.NewULID(),
+ BoostOfID: id.NewULID(),
+ BoostOfAccountID: id.NewULID(),
+ })
+ }
+ return statuses, nil
+}
+
// containsStatusID returns whether timeline contains a status with ID.
func containsStatusID(t *StatusTimeline, id string) bool {
return getStatusByID(t, id) != nil