summaryrefslogtreecommitdiff
path: root/vendor/codeberg.org
diff options
context:
space:
mode:
authorLibravatar kim <89579420+NyaaaWhatsUpDoc@users.noreply.github.com>2024-06-21 16:35:32 +0000
committerLibravatar GitHub <noreply@github.com>2024-06-21 17:35:32 +0100
commit9143ac6fb4f68f1c79e42f7adc10e68bf7f36e87 (patch)
tree73514f164dd48cfc3f902e6b3f3d2a6db8c6175c /vendor/codeberg.org
parent[chore] update go-structr and go-mangler to no longer rely on modern-go/refle... (diff)
downloadgotosocial-9143ac6fb4f68f1c79e42f7adc10e68bf7f36e87.tar.xz
updates go-mutexes to no longer rely on unsafe linkname (#3027)
Diffstat (limited to 'vendor/codeberg.org')
-rw-r--r--vendor/codeberg.org/gruf/go-mutexes/cond.go86
-rw-r--r--vendor/codeberg.org/gruf/go-mutexes/hash_map.go56
-rw-r--r--vendor/codeberg.org/gruf/go-mutexes/map.go92
3 files changed, 130 insertions, 104 deletions
diff --git a/vendor/codeberg.org/gruf/go-mutexes/cond.go b/vendor/codeberg.org/gruf/go-mutexes/cond.go
index d89040c02..3d7f21126 100644
--- a/vendor/codeberg.org/gruf/go-mutexes/cond.go
+++ b/vendor/codeberg.org/gruf/go-mutexes/cond.go
@@ -2,86 +2,66 @@ package mutexes
import (
"sync"
- "unsafe"
)
// Cond is similar to a sync.Cond{}, but
// it encompasses the Mutex{} within itself.
type Cond struct {
- notify notifyList
+ c sync.Cond
sync.Mutex
}
// See: sync.Cond{}.Wait().
func (c *Cond) Wait() {
- t := runtime_notifyListAdd(&c.notify)
- c.Mutex.Unlock()
- runtime_notifyListWait(&c.notify, t)
- c.Mutex.Lock()
+ if c.c.L == nil {
+ c.c.L = &c.Mutex
+ }
+ c.c.Wait()
}
// See: sync.Cond{}.Signal().
-func (c *Cond) Signal() { runtime_notifyListNotifyOne(&c.notify) }
+func (c *Cond) Signal() {
+ if c.c.L == nil {
+ c.c.L = &c.Mutex
+ }
+ c.c.Signal()
+}
// See: sync.Cond{}.Broadcast().
-func (c *Cond) Broadcast() { runtime_notifyListNotifyAll(&c.notify) }
+func (c *Cond) Broadcast() {
+ if c.c.L == nil {
+ c.c.L = &c.Mutex
+ }
+ c.c.Broadcast()
+}
// RWCond is similar to a sync.Cond{}, but
// it encompasses the RWMutex{} within itself.
type RWCond struct {
- notify notifyList
+ c sync.Cond
sync.RWMutex
}
// See: sync.Cond{}.Wait().
func (c *RWCond) Wait() {
- t := runtime_notifyListAdd(&c.notify)
- c.RWMutex.Unlock()
- runtime_notifyListWait(&c.notify, t)
- c.RWMutex.Lock()
+ if c.c.L == nil {
+ c.c.L = &c.RWMutex
+ }
+ c.c.Wait()
}
// See: sync.Cond{}.Signal().
-func (c *RWCond) Signal() { runtime_notifyListNotifyOne(&c.notify) }
-
-// See: sync.Cond{}.Broadcast().
-func (c *RWCond) Broadcast() { runtime_notifyListNotifyAll(&c.notify) }
-
-// unused fields left
-// un-named for safety.
-type notifyList struct {
- _ uint32 // wait uint32
- notify uint32 // notify uint32
- _ uintptr // lock mutex
- _ unsafe.Pointer // head *sudog
- _ unsafe.Pointer // tail *sudog
+func (c *RWCond) Signal() {
+ if c.c.L == nil {
+ c.c.L = &c.RWMutex
+ }
+ c.c.Signal()
}
-// See runtime/sema.go for documentation.
-//
-//go:linkname runtime_notifyListAdd sync.runtime_notifyListAdd
-func runtime_notifyListAdd(l *notifyList) uint32
-
-// See runtime/sema.go for documentation.
-//
-//go:linkname runtime_notifyListWait sync.runtime_notifyListWait
-func runtime_notifyListWait(l *notifyList, t uint32)
-
-// See runtime/sema.go for documentation.
-//
-//go:linkname runtime_notifyListNotifyOne sync.runtime_notifyListNotifyOne
-func runtime_notifyListNotifyOne(l *notifyList)
-
-// See runtime/sema.go for documentation.
-//
-//go:linkname runtime_notifyListNotifyAll sync.runtime_notifyListNotifyAll
-func runtime_notifyListNotifyAll(l *notifyList)
-
-// Ensure that sync and runtime agree on size of notifyList.
-//
-//go:linkname runtime_notifyListCheck sync.runtime_notifyListCheck
-func runtime_notifyListCheck(size uintptr)
-func init() {
- var n notifyList
- runtime_notifyListCheck(unsafe.Sizeof(n))
+// See: sync.Cond{}.Broadcast().
+func (c *RWCond) Broadcast() {
+ if c.c.L == nil {
+ c.c.L = &c.RWMutex
+ }
+ c.c.Broadcast()
}
diff --git a/vendor/codeberg.org/gruf/go-mutexes/hash_map.go b/vendor/codeberg.org/gruf/go-mutexes/hash_map.go
new file mode 100644
index 000000000..a177133b5
--- /dev/null
+++ b/vendor/codeberg.org/gruf/go-mutexes/hash_map.go
@@ -0,0 +1,56 @@
+package mutexes
+
+type hashmap struct {
+ m map[string]*rwmutex
+ n int
+}
+
+func (m *hashmap) init(cap int) {
+ m.m = make(map[string]*rwmutex, cap)
+ m.n = cap
+}
+
+func (m *hashmap) Get(key string) *rwmutex { return m.m[key] }
+
+func (m *hashmap) Put(key string, mu *rwmutex) {
+ m.m[key] = mu
+ if n := len(m.m); n > m.n {
+ m.n = n
+ }
+}
+
+func (m *hashmap) Delete(key string) {
+ delete(m.m, key)
+}
+
+func (m *hashmap) Compact() {
+ // Noop when hashmap size
+ // is too small to matter.
+ if m.n < 2048 {
+ return
+ }
+
+ // Difference between maximum map
+ // size and the current map size.
+ diff := m.n - len(m.m)
+
+ // Maximum load factor before
+ // runtime allocates new hmap:
+ // maxLoad = 13 / 16
+ //
+ // So we apply the inverse/2, once
+ // $maxLoad/2 % of hmap is empty we
+ // compact the map to drop buckets.
+ if 2*16*diff > m.n*13 {
+
+ // Create new map only as big as required.
+ m2 := make(map[string]*rwmutex, len(m.m))
+ for k, v := range m.m {
+ m2[k] = v
+ }
+
+ // Set new.
+ m.m = m2
+ m.n = len(m2)
+ }
+}
diff --git a/vendor/codeberg.org/gruf/go-mutexes/map.go b/vendor/codeberg.org/gruf/go-mutexes/map.go
index 0da1fc3cc..4b721cec4 100644
--- a/vendor/codeberg.org/gruf/go-mutexes/map.go
+++ b/vendor/codeberg.org/gruf/go-mutexes/map.go
@@ -6,7 +6,6 @@ import (
"unsafe"
"codeberg.org/gruf/go-mempool"
- "github.com/dolthub/swiss"
)
const (
@@ -27,14 +26,14 @@ const (
// like structures for sleeping / awaking awaiting goroutines.
type MutexMap struct {
mapmu sync.Mutex
- mumap *swiss.Map[string, *rwmutex]
+ mumap hashmap
mupool mempool.UnsafePool
}
// checkInit ensures MutexMap is initialized (UNSAFE).
func (mm *MutexMap) checkInit() {
- if mm.mumap == nil {
- mm.mumap = swiss.NewMap[string, *rwmutex](0)
+ if mm.mumap.m == nil {
+ mm.mumap.init(0)
mm.mupool.DirtyFactor = 256
}
}
@@ -58,7 +57,7 @@ func (mm *MutexMap) lock(key string, lt uint8) func() {
for {
// Check map for mutex.
- mu, _ := mm.mumap.Get(key)
+ mu := mm.mumap.Get(key)
if mu == nil {
// Allocate mutex.
@@ -69,7 +68,7 @@ func (mm *MutexMap) lock(key string, lt uint8) func() {
if !mu.Lock(lt) {
// Wait on mutex unlock, after
// immediately relocking map mu.
- mu.WaitRelock(&mm.mapmu)
+ mu.WaitRelock()
continue
}
@@ -100,27 +99,9 @@ func (mm *MutexMap) unlock(key string, mu *rwmutex) {
mm.mumap.Delete(key)
mm.release(mu)
- // Maximum load factor before
- // 'swiss' allocates new hmap:
- // maxLoad = 7 / 8
- //
- // So we apply the inverse/2, once
- // $maxLoad/2 % of hmap is empty we
- // compact the map to drop buckets.
- len := mm.mumap.Count()
- cap := mm.mumap.Capacity()
- if cap-len > (cap*7)/(8*2) {
-
- // Create a new map only as big as required.
- mumap := swiss.NewMap[string, *rwmutex](uint32(len))
- mm.mumap.Iter(func(k string, v *rwmutex) (stop bool) {
- mumap.Put(k, v)
- return false
- })
-
- // Set new map.
- mm.mumap = mumap
- }
+ // Check if compaction
+ // needed.
+ mm.mumap.Compact()
// Done with map.
mm.mapmu.Unlock()
@@ -131,7 +112,9 @@ func (mm *MutexMap) acquire() *rwmutex {
if ptr := mm.mupool.Get(); ptr != nil {
return (*rwmutex)(ptr)
}
- return new(rwmutex)
+ mu := new(rwmutex)
+ mu.c.L = &mm.mapmu
+ return mu
}
// release will release given mutex to memory pool.
@@ -152,9 +135,9 @@ func (mm *MutexMap) release(mu *rwmutex) {
// mechanism we use, otherwise all Cond{}.L would reference
// the same outer map mutex.
type rwmutex struct {
- n notifyList // 'trigger' mechanism
- l int32 // no. locks
- t uint8 // lock type
+ c sync.Cond // 'trigger' mechanism
+ l int32 // no. locks
+ t uint8 // lock type
}
// Lock will lock the mutex for given lock type, in the
@@ -202,11 +185,34 @@ func (mu *rwmutex) Unlock() bool {
// Fully unlocked.
mu.t = 0
+ // NOTE: must remain in
+ // sync with runtime.notifyList{}.
+ //
+ // goexperiment.staticlockranking
+ // does change it slightly, but
+ // this does not alter the first
+ // 2 fields which are all we need.
+ type notifyList struct {
+ _ uint32
+ notify uint32
+ // ... other fields
+ }
+
+ // NOTE: must remain in
+ // sync with sync.Cond{}.
+ type syncCond struct {
+ _ struct{}
+ L sync.Locker
+ n notifyList
+ // ... other fields
+ }
+
// Awake all blocked goroutines and check
// for change in the last notified ticket.
- before := atomic.LoadUint32(&mu.n.notify)
- runtime_notifyListNotifyAll(&mu.n)
- after := atomic.LoadUint32(&mu.n.notify)
+ cptr := (*syncCond)(unsafe.Pointer(&mu.c))
+ before := atomic.LoadUint32(&cptr.n.notify)
+ mu.c.Broadcast() // awakes all blocked!
+ after := atomic.LoadUint32(&cptr.n.notify)
// If ticket changed, this indicates
// AT LEAST one goroutine was awoken.
@@ -226,20 +232,4 @@ func (mu *rwmutex) Unlock() bool {
// locked state. It incr the notifyList waiter count before
// unlocking the outer mutex and blocking on notifyList wait.
// On awake it will decr wait count and relock outer mutex.
-func (mu *rwmutex) WaitRelock(outer *sync.Mutex) {
-
- // add ourselves to list while still
- // under protection of outer map lock.
- t := runtime_notifyListAdd(&mu.n)
-
- // Finished with
- // outer map lock.
- outer.Unlock()
-
- // Block until awoken by another
- // goroutine within mu.Unlock().
- runtime_notifyListWait(&mu.n, t)
-
- // Relock!
- outer.Lock()
-}
+func (mu *rwmutex) WaitRelock() { mu.c.Wait() }