summaryrefslogtreecommitdiff
path: root/internal/workers/worker_msg.go
diff options
context:
space:
mode:
authorLibravatar kim <89579420+NyaaaWhatsUpDoc@users.noreply.github.com>2024-04-26 13:50:46 +0100
committerLibravatar GitHub <noreply@github.com>2024-04-26 13:50:46 +0100
commitc9c0773f2c2363dcfa37e675b83ec3f0b49bd0d9 (patch)
treedbd3409070765d5ca81448a574ccd32b4da1ffe6 /internal/workers/worker_msg.go
parent[chore] update Docker container to use new go swagger hash (#2872) (diff)
downloadgotosocial-c9c0773f2c2363dcfa37e675b83ec3f0b49bd0d9.tar.xz
[performance] update remaining worker pools to use queues (#2865)
* start replacing client + federator + media workers with new worker + queue types * refactor federatingDB.Delete(), drop queued messages when deleting account / status * move all queue purging to the processor workers * undo toolchain updates * code comments, ensure dereferencer worker pool gets started * update gruf libraries in readme * start the job scheduler separately to the worker pools * reshuffle ordering or server.go + remove duplicate worker start / stop * update go-list version * fix vendoring * move queue invalidation to before wipeing / deletion, to ensure queued work not dropped * add logging to worker processing functions in testrig, don't start workers in unexpected places * update go-structr to add (+then rely on) QueueCtx{} type * ensure more worker pools get started properly in tests * fix remaining broken tests relying on worker queue logic * fix account test suite queue popping logic, ensure noop workers do not pull from queue * move back accidentally shuffled account deletion order * ensure error (non nil!!) gets passed in refactored federatingDB{}.Delete() * silently drop deletes from accounts not permitted to * don't warn log on forwarded deletes * make if else clauses easier to parse * use getFederatorMsg() * improved code comment * improved code comment re: requesting account delete checks * remove boolean result from worker start / stop since false = already running or already stopped * remove optional passed-in http.client * remove worker starting from the admin CLI commands (we don't need to handle side-effects) * update prune cli to start scheduler but not all of the workers * fix rebase issues * remove redundant return statements * i'm sorry sir linter
Diffstat (limited to 'internal/workers/worker_msg.go')
-rw-r--r--internal/workers/worker_msg.go157
1 files changed, 157 insertions, 0 deletions
diff --git a/internal/workers/worker_msg.go b/internal/workers/worker_msg.go
new file mode 100644
index 000000000..1920c964e
--- /dev/null
+++ b/internal/workers/worker_msg.go
@@ -0,0 +1,157 @@
+// GoToSocial
+// Copyright (C) GoToSocial Authors admin@gotosocial.org
+// SPDX-License-Identifier: AGPL-3.0-or-later
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+package workers
+
+import (
+ "context"
+
+ "codeberg.org/gruf/go-runners"
+ "codeberg.org/gruf/go-structr"
+ "github.com/superseriousbusiness/gotosocial/internal/log"
+ "github.com/superseriousbusiness/gotosocial/internal/queue"
+ "github.com/superseriousbusiness/gotosocial/internal/util"
+)
+
+// MsgWorkerPool wraps multiple MsgWorker{}s in
+// a singular struct for easy multi start / stop.
+type MsgWorkerPool[Msg any] struct {
+
+ // Process handles queued message types.
+ Process func(context.Context, Msg) error
+
+ // Queue is embedded queue.StructQueue{}
+ // passed to each of the pool Worker{}s.
+ Queue queue.StructQueue[Msg]
+
+ // internal fields.
+ workers []*MsgWorker[Msg]
+}
+
+// Init will initialize the worker pool queue with given struct indices.
+func (p *MsgWorkerPool[T]) Init(indices []structr.IndexConfig) {
+ p.Queue.Init(structr.QueueConfig[T]{Indices: indices})
+}
+
+// Start will attempt to start 'n' Worker{}s.
+func (p *MsgWorkerPool[T]) Start(n int) {
+ // Check whether workers are
+ // set (is already running).
+ ok := (len(p.workers) > 0)
+ if ok {
+ return
+ }
+
+ // Allocate new msg workers slice.
+ p.workers = make([]*MsgWorker[T], n)
+ for i := range p.workers {
+
+ // Allocate new MsgWorker[T]{}.
+ p.workers[i] = new(MsgWorker[T])
+ p.workers[i].Process = p.Process
+ p.workers[i].Queue = &p.Queue
+
+ // Attempt to start worker.
+ // Return bool not useful
+ // here, as true = started,
+ // false = already running.
+ _ = p.workers[i].Start()
+ }
+}
+
+// Stop will attempt to stop contained Worker{}s.
+func (p *MsgWorkerPool[T]) Stop() {
+ // Check whether workers are
+ // set (is currently running).
+ ok := (len(p.workers) == 0)
+ if ok {
+ return
+ }
+
+ // Stop all running workers.
+ for i := range p.workers {
+
+ // return bool not useful
+ // here, as true = stopped,
+ // false = never running.
+ _ = p.workers[i].Stop()
+ }
+
+ // Unset workers slice.
+ p.workers = p.workers[:0]
+}
+
+// MsgWorker wraps a processing function to
+// feed from a queue.StructQueue{} for messages
+// to process. It does so in a single goroutine
+// with state management utilities.
+type MsgWorker[Msg any] struct {
+
+ // Process handles queued message types.
+ Process func(context.Context, Msg) error
+
+ // Queue is the Delivery{} message queue
+ // that delivery worker will feed from.
+ Queue *queue.StructQueue[Msg]
+
+ // internal fields.
+ service runners.Service
+}
+
+// Start will attempt to start the Worker{}.
+func (w *MsgWorker[T]) Start() bool {
+ return w.service.GoRun(w.run)
+}
+
+// Stop will attempt to stop the Worker{}.
+func (w *MsgWorker[T]) Stop() bool {
+ return w.service.Stop()
+}
+
+// run wraps process to restart on any panic.
+func (w *MsgWorker[T]) run(ctx context.Context) {
+ if w.Process == nil || w.Queue == nil {
+ panic("not yet initialized")
+ }
+ log.Infof(ctx, "%p: starting worker", w)
+ defer log.Infof(ctx, "%p: stopped worker", w)
+ util.Must(func() { w.process(ctx) })
+}
+
+// process is the main delivery worker processing routine.
+func (w *MsgWorker[T]) process(ctx context.Context) {
+ if w.Process == nil || w.Queue == nil {
+ // we perform this check here just
+ // to ensure the compiler knows these
+ // variables aren't nil in the loop,
+ // even if already checked by caller.
+ panic("not yet initialized")
+ }
+
+ for {
+ // Block until pop next message.
+ msg, ok := w.Queue.PopCtx(ctx)
+ if !ok {
+ return
+ }
+
+ // Attempt to process popped message type.
+ if err := w.Process(ctx, msg); err != nil {
+ log.Errorf(ctx, "%p: error processing: %v", w, err)
+ }
+ }
+}