It's possible for there to be a data race where `Add` checks if the
manager has started, then proceeds to append to the runnables slice, at
the same time that `Start` is progressing towards starting the manager.
This results in a data race with the runnables slice.
This changeset modifies the manager to use a more traditional locking approach.
1 files changed, 17 insertions, 4 deletions
diff --git a/manager/manager.go b/manager/manager.go
index 1a79214..704bced 100644
--- a/manager/manager.go
+++ b/manager/manager.go
@@ -5,13 +5,15 @@ package manager
import (
"context"
- "sync/atomic"
+ "errors"
+ "sync"
"golang.org/x/sync/errgroup"
)
type Manager struct {
- started uint32
+ mu sync.Mutex
+ started bool
runnables []Runnable
internalCtx context.Context
group *errgroup.Group
@@ -24,7 +26,10 @@ func New() *Manager {
}
func (m *Manager) Add(r Runnable) error {
- if atomic.LoadUint32(&m.started) == 1 {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if m.started {
m.group.Go(func() error {
return r.Start(m.internalCtx)
})
@@ -36,11 +41,16 @@ func (m *Manager) Add(r Runnable) error {
}
func (m *Manager) Start(ctx context.Context) error {
+ m.mu.Lock()
+ if m.started {
+ m.mu.Unlock()
+ return errors.New("manager already started")
+ }
+
g, internalCtx := errgroup.WithContext(ctx)
m.group = g
m.internalCtx = internalCtx
- atomic.SwapUint32(&m.started, 1)
for _, r := range m.runnables {
r := r
g.Go(func() error {
@@ -48,6 +58,9 @@ func (m *Manager) Start(ctx context.Context) error {
})
}
m.runnables = nil
+ m.started = true
+
+ m.mu.Unlock()
return g.Wait()
}
|