summaryrefslogtreecommitdiff
path: root/vendor/codeberg.org
diff options
context:
space:
mode:
authorLibravatar kim <89579420+NyaaaWhatsUpDoc@users.noreply.github.com>2024-05-13 08:05:46 +0000
committerLibravatar GitHub <noreply@github.com>2024-05-13 08:05:46 +0000
commitc06e6fb6561595adc80ce5191640ae442771d45c (patch)
tree58845f63151eff4e984351575eea67f5e82a6c82 /vendor/codeberg.org
parent[bugfix] Reset emoji fields on upload error (#2905) (diff)
downloadgotosocial-c06e6fb6561595adc80ce5191640ae442771d45c.tar.xz
[performance] update go-structr and go-mutexes with memory usage improvements (#2909)
* update go-structr and go-mutexes with memory usage improvements * bump to go-structr v0.8.4
Diffstat (limited to 'vendor/codeberg.org')
-rw-r--r--vendor/codeberg.org/gruf/go-mempool/LICENSE9
-rw-r--r--vendor/codeberg.org/gruf/go-mempool/README.md3
-rw-r--r--vendor/codeberg.org/gruf/go-mempool/pool.go99
-rw-r--r--vendor/codeberg.org/gruf/go-mutexes/map.go112
-rw-r--r--vendor/codeberg.org/gruf/go-structr/cache.go12
-rw-r--r--vendor/codeberg.org/gruf/go-structr/index.go51
-rw-r--r--vendor/codeberg.org/gruf/go-structr/queue.go17
-rw-r--r--vendor/codeberg.org/gruf/go-structr/queue_ctx.go7
-rw-r--r--vendor/codeberg.org/gruf/go-structr/runtime.go19
9 files changed, 240 insertions, 89 deletions
diff --git a/vendor/codeberg.org/gruf/go-mempool/LICENSE b/vendor/codeberg.org/gruf/go-mempool/LICENSE
new file mode 100644
index 000000000..d6f08d0ab
--- /dev/null
+++ b/vendor/codeberg.org/gruf/go-mempool/LICENSE
@@ -0,0 +1,9 @@
+MIT License
+
+Copyright (c) gruf
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/codeberg.org/gruf/go-mempool/README.md b/vendor/codeberg.org/gruf/go-mempool/README.md
new file mode 100644
index 000000000..af4cb6770
--- /dev/null
+++ b/vendor/codeberg.org/gruf/go-mempool/README.md
@@ -0,0 +1,3 @@
+# go-mempool
+
+very simple memory pool implementation \ No newline at end of file
diff --git a/vendor/codeberg.org/gruf/go-mempool/pool.go b/vendor/codeberg.org/gruf/go-mempool/pool.go
new file mode 100644
index 000000000..e5ff6ba3d
--- /dev/null
+++ b/vendor/codeberg.org/gruf/go-mempool/pool.go
@@ -0,0 +1,99 @@
+package mempool
+
+import (
+ "unsafe"
+)
+
+const DefaultDirtyFactor = 128
+
+// Pool provides a type-safe form
+// of UnsafePool using generics.
+//
+// Note it is NOT safe for concurrent
+// use, you must protect it yourself!
+type Pool[T any] struct {
+
+ // New is an optionally provided
+ // allocator used when no value
+ // is available for use in pool.
+ New func() T
+
+ // Reset is an optionally provided
+ // value resetting function called
+ // on passed value to Put().
+ Reset func(T)
+
+ UnsafePool
+}
+
+func (p *Pool[T]) Get() T {
+ if ptr := p.UnsafePool.Get(); ptr != nil {
+ return *(*T)(ptr)
+ } else if p.New != nil {
+ return p.New()
+ }
+ var z T
+ return z
+}
+
+func (p *Pool[T]) Put(t T) {
+ if p.Reset != nil {
+ p.Reset(t)
+ }
+ ptr := unsafe.Pointer(&t)
+ p.UnsafePool.Put(ptr)
+}
+
+// UnsafePool provides an incredibly
+// simple memory pool implementation
+// that stores ptrs to memory values,
+// and regularly flushes internal pool
+// structures according to DirtyFactor.
+//
+// Note it is NOT safe for concurrent
+// use, you must protect it yourself!
+type UnsafePool struct {
+
+ // DirtyFactor determines the max
+ // number of $dirty count before
+ // pool is garbage collected. Where:
+ // $dirty = len(current) - len(victim)
+ DirtyFactor int
+
+ current []unsafe.Pointer
+ victim []unsafe.Pointer
+}
+
+func (p *UnsafePool) Get() unsafe.Pointer {
+ // First try current list.
+ if len(p.current) > 0 {
+ ptr := p.current[len(p.current)-1]
+ p.current = p.current[:len(p.current)-1]
+ return ptr
+ }
+
+ // Fallback to victim.
+ if len(p.victim) > 0 {
+ ptr := p.victim[len(p.victim)-1]
+ p.victim = p.victim[:len(p.victim)-1]
+ return ptr
+ }
+
+ return nil
+}
+
+func (p *UnsafePool) Put(ptr unsafe.Pointer) {
+ p.current = append(p.current, ptr)
+
+ // Get dirty factor.
+ df := p.DirtyFactor
+ if df == 0 {
+ df = DefaultDirtyFactor
+ }
+
+ if len(p.current)-len(p.victim) > df {
+ // Garbage collection!
+ p.victim = p.current
+ p.current = nil
+ }
+}
diff --git a/vendor/codeberg.org/gruf/go-mutexes/map.go b/vendor/codeberg.org/gruf/go-mutexes/map.go
index e61ef3537..0da1fc3cc 100644
--- a/vendor/codeberg.org/gruf/go-mutexes/map.go
+++ b/vendor/codeberg.org/gruf/go-mutexes/map.go
@@ -3,17 +3,16 @@ package mutexes
import (
"sync"
"sync/atomic"
+ "unsafe"
+
+ "codeberg.org/gruf/go-mempool"
+ "github.com/dolthub/swiss"
)
const (
// possible lock types.
lockTypeRead = uint8(1) << 0
lockTypeWrite = uint8(1) << 1
-
- // frequency of GC cycles
- // per no. unlocks. i.e.
- // every 'gcfreq' unlocks.
- gcfreq = 1024
)
// MutexMap is a structure that allows read / write locking
@@ -28,15 +27,15 @@ const (
// like structures for sleeping / awaking awaiting goroutines.
type MutexMap struct {
mapmu sync.Mutex
- mumap map[string]*rwmutex
- mupool rwmutexPool
- count uint32
+ mumap *swiss.Map[string, *rwmutex]
+ mupool mempool.UnsafePool
}
// checkInit ensures MutexMap is initialized (UNSAFE).
func (mm *MutexMap) checkInit() {
if mm.mumap == nil {
- mm.mumap = make(map[string]*rwmutex)
+ mm.mumap = swiss.NewMap[string, *rwmutex](0)
+ mm.mupool.DirtyFactor = 256
}
}
@@ -58,13 +57,13 @@ func (mm *MutexMap) lock(key string, lt uint8) func() {
mm.checkInit()
for {
- // Check map for mu.
- mu := mm.mumap[key]
+ // Check map for mutex.
+ mu, _ := mm.mumap.Get(key)
if mu == nil {
- // Allocate new mutex.
- mu = mm.mupool.Acquire()
- mm.mumap[key] = mu
+ // Allocate mutex.
+ mu = mm.acquire()
+ mm.mumap.Put(key, mu)
}
if !mu.Lock(lt) {
@@ -87,63 +86,58 @@ func (mm *MutexMap) unlock(key string, mu *rwmutex) {
mm.mapmu.Lock()
// Unlock mutex.
- if mu.Unlock() {
+ if !mu.Unlock() {
- // Mutex fully unlocked
- // with zero waiters. Self
- // evict and release it.
- delete(mm.mumap, key)
- mm.mupool.Release(mu)
+ // Fast path. Mutex still
+ // used so no map change.
+ mm.mapmu.Unlock()
+ return
}
- if mm.count++; mm.count%gcfreq == 0 {
- // Every 'gcfreq' unlocks perform
- // a garbage collection to keep
- // us squeaky clean :]
- mm.mupool.GC()
+ // Mutex fully unlocked
+ // with zero waiters. Self
+ // evict and release it.
+ mm.mumap.Delete(key)
+ mm.release(mu)
+
+ // Maximum load factor before
+ // 'swiss' allocates new hmap:
+ // maxLoad = 7 / 8
+ //
+ // So we apply the inverse/2, once
+ // $maxLoad/2 % of hmap is empty we
+ // compact the map to drop buckets.
+ len := mm.mumap.Count()
+ cap := mm.mumap.Capacity()
+ if cap-len > (cap*7)/(8*2) {
+
+ // Create a new map only as big as required.
+ mumap := swiss.NewMap[string, *rwmutex](uint32(len))
+ mm.mumap.Iter(func(k string, v *rwmutex) (stop bool) {
+ mumap.Put(k, v)
+ return false
+ })
+
+ // Set new map.
+ mm.mumap = mumap
}
// Done with map.
mm.mapmu.Unlock()
}
-// rwmutexPool is a very simply memory rwmutexPool.
-type rwmutexPool struct {
- current []*rwmutex
- victim []*rwmutex
-}
-
-// Acquire will returns a rwmutexState from rwmutexPool (or alloc new).
-func (p *rwmutexPool) Acquire() *rwmutex {
- // First try the current queue
- if l := len(p.current) - 1; l >= 0 {
- mu := p.current[l]
- p.current = p.current[:l]
- return mu
- }
-
- // Next try the victim queue.
- if l := len(p.victim) - 1; l >= 0 {
- mu := p.victim[l]
- p.victim = p.victim[:l]
- return mu
+// acquire will acquire mutex from memory pool, or alloc new.
+func (mm *MutexMap) acquire() *rwmutex {
+ if ptr := mm.mupool.Get(); ptr != nil {
+ return (*rwmutex)(ptr)
}
-
- // Lastly, alloc new.
- mu := new(rwmutex)
- return mu
-}
-
-// Release places a sync.rwmutexState back in the rwmutexPool.
-func (p *rwmutexPool) Release(mu *rwmutex) {
- p.current = append(p.current, mu)
+ return new(rwmutex)
}
-// GC will clear out unused entries from the rwmutexPool.
-func (p *rwmutexPool) GC() {
- current := p.current
- p.current = nil
- p.victim = current
+// release will release given mutex to memory pool.
+func (mm *MutexMap) release(mu *rwmutex) {
+ ptr := unsafe.Pointer(mu)
+ mm.mupool.Put(ptr)
}
// rwmutex represents a RW mutex when used correctly within
diff --git a/vendor/codeberg.org/gruf/go-structr/cache.go b/vendor/codeberg.org/gruf/go-structr/cache.go
index 17c491158..429c6ab67 100644
--- a/vendor/codeberg.org/gruf/go-structr/cache.go
+++ b/vendor/codeberg.org/gruf/go-structr/cache.go
@@ -289,9 +289,8 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
// Load new result.
val, err = load()
- // Check for ignored
- // (transient) errors.
- if ignore(err) {
+ // Check for ignored error types.
+ if err != nil && ignore(err) {
return val, err
}
@@ -536,9 +535,10 @@ func (c *Cache[T]) Debug() map[string]any {
m["indices"] = indices
for i := range c.indices {
var n uint64
- for _, list := range c.indices[i].data {
- n += uint64(list.len)
- }
+ c.indices[i].data.Iter(func(_ string, l *list) (stop bool) {
+ n += uint64(l.len)
+ return
+ })
indices[c.indices[i].name] = n
}
c.mutex.Unlock()
diff --git a/vendor/codeberg.org/gruf/go-structr/index.go b/vendor/codeberg.org/gruf/go-structr/index.go
index b8f6b9d01..16f099ec6 100644
--- a/vendor/codeberg.org/gruf/go-structr/index.go
+++ b/vendor/codeberg.org/gruf/go-structr/index.go
@@ -7,6 +7,8 @@ import (
"unsafe"
"codeberg.org/gruf/go-byteutil"
+
+ "github.com/dolthub/swiss"
)
// IndexConfig defines config variables
@@ -70,7 +72,7 @@ type Index struct {
// index_entry{} which also contains the exact
// key each result is stored under. the hash map
// only keys by the xxh3 hash checksum for speed.
- data map[string]*list // [*index_entry]
+ data *swiss.Map[string, *list]
// struct fields encompassed by
// keys (+ hashes) of this index.
@@ -153,13 +155,13 @@ func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) {
}
// Initialize index_entry list store.
- i.data = make(map[string]*list, cap+1)
+ i.data = swiss.NewMap[string, *list](uint32(cap))
}
// get_one will fetch one indexed item under key.
func (i *Index) get_one(key Key) *indexed_item {
// Get list at hash.
- l := i.data[key.key]
+ l, _ := i.data.Get(key.key)
if l == nil {
return nil
}
@@ -182,7 +184,7 @@ func (i *Index) get(key Key, hook func(*indexed_item)) {
}
// Get list at hash.
- l := i.data[key.key]
+ l, _ := i.data.Get(key.key)
if l == nil {
return
}
@@ -220,7 +222,7 @@ func (i *Index) key(buf *byteutil.Buffer, parts []any) Key {
for x, field := range i.fields {
before := len(buf.B)
buf.B = field.mangle(buf.B, parts[x])
- if string(buf.B[before:]) == field.zero {
+ if string(buf.B[before:]) == field.zerostr {
return Key{}
}
buf.B = append(buf.B, '.')
@@ -242,13 +244,13 @@ func (i *Index) key(buf *byteutil.Buffer, parts []any) Key {
// of key collisions and overwriting 'unique' entries.
func (i *Index) append(key Key, item *indexed_item) {
// Look for existing.
- l := i.data[key.key]
+ l, _ := i.data.Get(key.key)
if l == nil {
// Allocate new.
l = new_list()
- i.data[key.key] = l
+ i.data.Put(key.key, l)
} else if is_unique(i.flags) {
@@ -284,7 +286,7 @@ func (i *Index) delete(key Key, hook func(*indexed_item)) {
}
// Get list at hash.
- l := i.data[key.key]
+ l, _ := i.data.Get(key.key)
if l == nil {
return
}
@@ -298,7 +300,7 @@ func (i *Index) delete(key Key, hook func(*indexed_item)) {
}
// Delete data at hash.
- delete(i.data, key.key)
+ i.data.Delete(key.key)
// Iterate entries in list.
for x := 0; x < l.len; x++ {
@@ -328,7 +330,7 @@ func (i *Index) delete(key Key, hook func(*indexed_item)) {
// delete_entry deletes the given index entry.
func (i *Index) delete_entry(entry *index_entry) {
// Get list at hash sum.
- l := i.data[entry.key.key]
+ l, _ := i.data.Get(entry.key.key)
if l == nil {
return
}
@@ -338,7 +340,7 @@ func (i *Index) delete_entry(entry *index_entry) {
if l.len == 0 {
// Remove entry list from map.
- delete(i.data, entry.key.key)
+ i.data.Delete(entry.key.key)
// Release list.
free_list(l)
@@ -348,6 +350,33 @@ func (i *Index) delete_entry(entry *index_entry) {
entry.item.drop_index(entry)
}
+// compact will reduce the size of underlying
+// index map if the cap vastly exceeds len.
+func (i *Index) compact() {
+
+ // Maximum load factor before
+ // 'swiss' allocates new hmap:
+ // maxLoad = 7 / 8
+ //
+ // So we apply the inverse/2, once
+ // $maxLoad/2 % of hmap is empty we
+ // compact the map to drop buckets.
+ len := i.data.Count()
+ cap := i.data.Capacity()
+ if cap-len > (cap*7)/(8*2) {
+
+ // Create a new map only as big as required.
+ data := swiss.NewMap[string, *list](uint32(len))
+ i.data.Iter(func(k string, v *list) (stop bool) {
+ data.Put(k, v)
+ return false
+ })
+
+ // Set new map.
+ i.data = data
+ }
+}
+
// index_entry represents a single entry
// in an Index{}, where it will be accessible
// by Key{} pointing to a containing list{}.
diff --git a/vendor/codeberg.org/gruf/go-structr/queue.go b/vendor/codeberg.org/gruf/go-structr/queue.go
index d7c21daaa..ed5d78b5a 100644
--- a/vendor/codeberg.org/gruf/go-structr/queue.go
+++ b/vendor/codeberg.org/gruf/go-structr/queue.go
@@ -214,9 +214,10 @@ func (q *Queue[T]) Debug() map[string]any {
m["indices"] = indices
for i := range q.indices {
var n uint64
- for _, list := range q.indices[i].data {
- n += uint64(list.len)
- }
+ q.indices[i].data.Iter(func(_ string, l *list) (stop bool) {
+ n += uint64(l.len)
+ return
+ })
indices[q.indices[i].name] = n
}
q.mutex.Unlock()
@@ -324,8 +325,14 @@ func (q *Queue[T]) delete(item *indexed_item) {
entry := item.indexed[len(item.indexed)-1]
item.indexed = item.indexed[:len(item.indexed)-1]
- // Drop index_entry from index.
- entry.index.delete_entry(entry)
+ // Get entry's index.
+ index := entry.index
+
+ // Drop this index_entry.
+ index.delete_entry(entry)
+
+ // Check compact.
+ index.compact()
}
// Drop entry from queue list.
diff --git a/vendor/codeberg.org/gruf/go-structr/queue_ctx.go b/vendor/codeberg.org/gruf/go-structr/queue_ctx.go
index 766644c3c..3d15e619c 100644
--- a/vendor/codeberg.org/gruf/go-structr/queue_ctx.go
+++ b/vendor/codeberg.org/gruf/go-structr/queue_ctx.go
@@ -73,9 +73,10 @@ func (q *QueueCtx[T]) Debug() map[string]any {
m["indices"] = indices
for i := range q.indices {
var n uint64
- for _, list := range q.indices[i].data {
- n += uint64(list.len)
- }
+ q.indices[i].data.Iter(func(_ string, l *list) (stop bool) {
+ n += uint64(l.len)
+ return
+ })
indices[q.indices[i].name] = n
}
q.mutex.Unlock()
diff --git a/vendor/codeberg.org/gruf/go-structr/runtime.go b/vendor/codeberg.org/gruf/go-structr/runtime.go
index 9990fe7b9..a4696187a 100644
--- a/vendor/codeberg.org/gruf/go-structr/runtime.go
+++ b/vendor/codeberg.org/gruf/go-structr/runtime.go
@@ -29,10 +29,15 @@ type struct_field struct {
// (i.e. fast serializing) fn.
mangle mangler.Mangler
+ // zero value data, used when
+ // nil encountered during ptr
+ // offset following.
+ zero unsafe.Pointer
+
// mangled zero value string,
// if set this indicates zero
// values of field not allowed
- zero string
+ zerostr string
}
// next_offset defines a next offset location
@@ -106,13 +111,14 @@ func find_field(t reflect.Type, names []string) (sfield struct_field) {
// Get field type as reflect2.
sfield.type2 = reflect2.Type2(t)
- i := sfield.type2.New()
// Find mangler for field type.
sfield.mangle = mangler.Get(t)
- // Set possible mangled zero value.
- sfield.zero = string(sfield.mangle(nil, i))
+ // Set possible zero value and its string.
+ sfield.zero = sfield.type2.UnsafeNew()
+ i := sfield.type2.UnsafeIndirect(sfield.zero)
+ sfield.zerostr = string(sfield.mangle(nil, i))
return
}
@@ -130,8 +136,11 @@ func extract_fields(ptr unsafe.Pointer, fields []struct_field) []any {
for _, offset := range field.offsets {
// Dereference any ptrs to offset.
fptr = deref(fptr, offset.derefs)
+
if fptr == nil {
- return nil
+ // Use zero value.
+ fptr = field.zero
+ break
}
// Jump forward by offset to next ptr.