about summary refs log tree commit diff
diff options
context:
space:
mode:
authorTerin Stock <terinjokes@gmail.com>2023-03-02 00:05:26 +0100
committerTerin Stock <terinjokes@gmail.com>2023-03-02 00:05:26 +0100
commitd6baaceefab0a307b1d7ae8f8bf92cfa5e03a854 (patch)
tree856ef72e83f4c30e5c1a18a2c8ebb596d24f60e8
parentf4ce6ac1be7b2817a8e02fc0e517d93ff9890d2e (diff)
fix: manager started race HEAD trunk
It's possible for there to be a data race where `Add` checks if the
manager has started, then proceeds to append to the runnables slice, at
the same time that `Start` is progressing towards starting the manager.
This results in a data race with the runnables slice.

This changeset modifies the manager to use a more traditional locking approach.
-rw-r--r--manager/manager.go21
1 files changed, 17 insertions, 4 deletions
diff --git a/manager/manager.go b/manager/manager.go
index 1a79214..704bced 100644
--- a/manager/manager.go
+++ b/manager/manager.go
@@ -5,13 +5,15 @@ package manager
 
 import (
 	"context"
-	"sync/atomic"
+	"errors"
+	"sync"
 
 	"golang.org/x/sync/errgroup"
 )
 
 type Manager struct {
-	started     uint32
+	mu          sync.Mutex
+	started     bool
 	runnables   []Runnable
 	internalCtx context.Context
 	group       *errgroup.Group
@@ -24,7 +26,10 @@ func New() *Manager {
 }
 
 func (m *Manager) Add(r Runnable) error {
-	if atomic.LoadUint32(&m.started) == 1 {
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	if m.started {
 		m.group.Go(func() error {
 			return r.Start(m.internalCtx)
 		})
@@ -36,11 +41,16 @@ func (m *Manager) Add(r Runnable) error {
 }
 
 func (m *Manager) Start(ctx context.Context) error {
+	m.mu.Lock()
+	if m.started {
+		m.mu.Unlock()
+		return errors.New("manager already started")
+	}
+
 	g, internalCtx := errgroup.WithContext(ctx)
 	m.group = g
 	m.internalCtx = internalCtx
 
-	atomic.SwapUint32(&m.started, 1)
 	for _, r := range m.runnables {
 		r := r
 		g.Go(func() error {
@@ -48,6 +58,9 @@ func (m *Manager) Start(ctx context.Context) error {
 		})
 	}
 	m.runnables = nil
+	m.started = true
+
+	m.mu.Unlock()
 
 	return g.Wait()
 }