diff options
Diffstat (limited to 'internal/cleaner')
-rw-r--r-- | internal/cleaner/emoji.go | 63 | ||||
-rw-r--r-- | internal/cleaner/media.go | 37 |
2 files changed, 69 insertions, 31 deletions
diff --git a/internal/cleaner/emoji.go b/internal/cleaner/emoji.go index d2baec7e8..62ed0f012 100644 --- a/internal/cleaner/emoji.go +++ b/internal/cleaner/emoji.go @@ -27,6 +27,7 @@ import ( "github.com/superseriousbusiness/gotosocial/internal/gtserror" "github.com/superseriousbusiness/gotosocial/internal/gtsmodel" "github.com/superseriousbusiness/gotosocial/internal/log" + "github.com/superseriousbusiness/gotosocial/internal/paging" ) // Emoji encompasses a set of @@ -105,8 +106,9 @@ func (e *Emoji) UncacheRemote(ctx context.Context, olderThan time.Time) (int, er return total, gtserror.Newf("error getting remote emoji: %w", err) } - if len(emojis) == 0 { - // reached end. + // If no emojis / same group is returned, we reached the end. + if len(emojis) == 0 || + olderThan.Equal(emojis[len(emojis)-1].CreatedAt) { break } @@ -140,23 +142,30 @@ func (e *Emoji) UncacheRemote(ctx context.Context, olderThan time.Time) (int, er func (e *Emoji) FixBroken(ctx context.Context) (int, error) { var ( total int - maxID string + page paging.Page ) + // Set page select limit. + page.Limit = selectLimit + for { - // Fetch the next batch of emoji media up to next ID. - emojis, err := e.state.DB.GetEmojis(ctx, maxID, selectLimit) + // Fetch the next batch of emoji to next max ID. + emojis, err := e.state.DB.GetEmojis(ctx, &page) if err != nil && !errors.Is(err, db.ErrNoEntries) { return total, gtserror.Newf("error getting emojis: %w", err) } - if len(emojis) == 0 { - // reached end. + // Get current max ID. + maxID := page.Max.Value + + // If no emoji or the same group is returned, we reached end. + if len(emojis) == 0 || maxID == emojis[len(emojis)-1].ID { break } - // Use last as the next 'maxID' value. + // Use last ID as the next 'maxID'. maxID = emojis[len(emojis)-1].ID + page.Max = paging.MaxID(maxID) for _, emoji := range emojis { // Check / fix missing broken emoji. @@ -182,23 +191,30 @@ func (e *Emoji) FixBroken(ctx context.Context) (int, error) { func (e *Emoji) PruneUnused(ctx context.Context) (int, error) { var ( total int - maxID string + page paging.Page ) + // Set page select limit. + page.Limit = selectLimit + for { - // Fetch the next batch of emoji media up to next ID. - emojis, err := e.state.DB.GetRemoteEmojis(ctx, maxID, selectLimit) + // Fetch the next batch of emoji to next max ID. + emojis, err := e.state.DB.GetRemoteEmojis(ctx, &page) if err != nil && !errors.Is(err, db.ErrNoEntries) { return total, gtserror.Newf("error getting remote emojis: %w", err) } - if len(emojis) == 0 { - // reached end. + // Get current max ID. + maxID := page.Max.Value + + // If no emoji or the same group is returned, we reached end. + if len(emojis) == 0 || maxID == emojis[len(emojis)-1].ID { break } - // Use last as the next 'maxID' value. + // Use last ID as the next 'maxID'. maxID = emojis[len(emojis)-1].ID + page.Max = paging.MaxID(maxID) for _, emoji := range emojis { // Check / prune unused emoji media. @@ -224,23 +240,30 @@ func (e *Emoji) PruneUnused(ctx context.Context) (int, error) { func (e *Emoji) FixCacheStates(ctx context.Context) (int, error) { var ( total int - maxID string + page paging.Page ) + // Set page select limit. + page.Limit = selectLimit + for { - // Fetch the next batch of emoji media up to next ID. - emojis, err := e.state.DB.GetRemoteEmojis(ctx, maxID, selectLimit) + // Fetch the next batch of emoji to next max ID. + emojis, err := e.state.DB.GetRemoteEmojis(ctx, &page) if err != nil && !errors.Is(err, db.ErrNoEntries) { return total, gtserror.Newf("error getting remote emojis: %w", err) } - if len(emojis) == 0 { - // reached end. + // Get current max ID. + maxID := page.Max.Value + + // If no emoji or the same group is returned, we reached end. + if len(emojis) == 0 || maxID == emojis[len(emojis)-1].ID { break } - // Use last as the next 'maxID' value. + // Use last ID as the next 'maxID'. maxID = emojis[len(emojis)-1].ID + page.Max = paging.MaxID(maxID) for _, emoji := range emojis { // Check / fix required emoji cache states. diff --git a/internal/cleaner/media.go b/internal/cleaner/media.go index 6db205d13..f3cda5d87 100644 --- a/internal/cleaner/media.go +++ b/internal/cleaner/media.go @@ -28,6 +28,7 @@ import ( "github.com/superseriousbusiness/gotosocial/internal/gtsmodel" "github.com/superseriousbusiness/gotosocial/internal/log" "github.com/superseriousbusiness/gotosocial/internal/media" + "github.com/superseriousbusiness/gotosocial/internal/paging" "github.com/superseriousbusiness/gotosocial/internal/regexes" "github.com/superseriousbusiness/gotosocial/internal/uris" ) @@ -128,23 +129,30 @@ func (m *Media) PruneOrphaned(ctx context.Context) (int, error) { func (m *Media) PruneUnused(ctx context.Context) (int, error) { var ( total int - maxID string + page paging.Page ) + // Set page select limit. + page.Limit = selectLimit + for { - // Fetch the next batch of media attachments up to next max ID. - attachments, err := m.state.DB.GetAttachments(ctx, maxID, selectLimit) + // Fetch the next batch of media attachments to next maxID. + attachments, err := m.state.DB.GetAttachments(ctx, &page) if err != nil && !errors.Is(err, db.ErrNoEntries) { return total, gtserror.Newf("error getting attachments: %w", err) } - if len(attachments) == 0 { - // reached end. + // Get current max ID. + maxID := page.Max.Value + + // If no attachments or the same group is returned, we reached the end. + if len(attachments) == 0 || maxID == attachments[len(attachments)-1].ID { break } // Use last ID as the next 'maxID' value. maxID = attachments[len(attachments)-1].ID + page.Max = paging.MaxID(maxID) for _, media := range attachments { // Check / prune unused media attachment. @@ -183,8 +191,9 @@ func (m *Media) UncacheRemote(ctx context.Context, olderThan time.Time) (int, er return total, gtserror.Newf("error getting remote attachments: %w", err) } - if len(attachments) == 0 { - // reached end. + // If no attachments / same group is returned, we reached the end. + if len(attachments) == 0 || + olderThan.Equal(attachments[len(attachments)-1].CreatedAt) { break } @@ -215,23 +224,29 @@ func (m *Media) UncacheRemote(ctx context.Context, olderThan time.Time) (int, er func (m *Media) FixCacheStates(ctx context.Context) (int, error) { var ( total int - maxID string + page paging.Page ) + // Set page select limit. + page.Limit = selectLimit + for { // Fetch the next batch of media attachments up to next max ID. - attachments, err := m.state.DB.GetRemoteAttachments(ctx, maxID, selectLimit) + attachments, err := m.state.DB.GetRemoteAttachments(ctx, &page) if err != nil && !errors.Is(err, db.ErrNoEntries) { return total, gtserror.Newf("error getting remote attachments: %w", err) } + // Get current max ID. + maxID := page.Max.Value - if len(attachments) == 0 { - // reached end. + // If no attachments or the same group is returned, we reached the end. + if len(attachments) == 0 || maxID == attachments[len(attachments)-1].ID { break } // Use last ID as the next 'maxID' value. maxID = attachments[len(attachments)-1].ID + page.Max = paging.MaxID(maxID) for _, media := range attachments { // Check / fix required media cache states. |