summaryrefslogtreecommitdiff
path: root/vendor/codeberg.org/gruf/go-structr
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/codeberg.org/gruf/go-structr')
-rw-r--r--vendor/codeberg.org/gruf/go-structr/cache.go28
-rw-r--r--vendor/codeberg.org/gruf/go-structr/index.go50
-rw-r--r--vendor/codeberg.org/gruf/go-structr/map.go59
-rw-r--r--vendor/codeberg.org/gruf/go-structr/queue.go9
-rw-r--r--vendor/codeberg.org/gruf/go-structr/queue_ctx.go5
-rw-r--r--vendor/codeberg.org/gruf/go-structr/runtime.go56
-rw-r--r--vendor/codeberg.org/gruf/go-structr/util.go8
7 files changed, 138 insertions, 77 deletions
diff --git a/vendor/codeberg.org/gruf/go-structr/cache.go b/vendor/codeberg.org/gruf/go-structr/cache.go
index 0b8a8b2c7..c16bf48c8 100644
--- a/vendor/codeberg.org/gruf/go-structr/cache.go
+++ b/vendor/codeberg.org/gruf/go-structr/cache.go
@@ -194,8 +194,7 @@ func (c *Cache[T]) Put(values ...T) {
// Store all passed values.
for i := range values {
c.store_value(
- nil,
- Key{},
+ nil, "",
values[i],
)
}
@@ -302,9 +301,9 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er
// the provided value, so it is
// safe for us to return as-is.
if err != nil {
- c.store_error(index, key, err)
+ c.store_error(index, key.key, err)
} else {
- c.store_value(index, key, val)
+ c.store_value(index, key.key, val)
}
// Done with lock.
@@ -388,8 +387,7 @@ func (c *Cache[T]) Load(index *Index, keys []Key, load func([]Key) ([]T, error))
// Store all uncached values.
for i := range uncached {
c.store_value(
- nil,
- Key{},
+ nil, "",
uncached[i],
)
}
@@ -511,6 +509,11 @@ func (c *Cache[T]) Trim(perc float64) {
c.delete(item)
}
+ // Compact index data stores.
+ for i := range c.indices {
+ c.indices[i].data.Compact()
+ }
+
// Done with lock.
c.mutex.Unlock()
}
@@ -535,10 +538,9 @@ func (c *Cache[T]) Debug() map[string]any {
m["indices"] = indices
for i := range c.indices {
var n uint64
- c.indices[i].data.Iter(func(_ string, l *list) (stop bool) {
+ for _, l := range c.indices[i].data.m {
n += uint64(l.len)
- return
- })
+ }
indices[c.indices[i].name] = n
}
c.mutex.Unlock()
@@ -553,7 +555,7 @@ func (c *Cache[T]) Cap() int {
return m
}
-func (c *Cache[T]) store_value(index *Index, key Key, value T) {
+func (c *Cache[T]) store_value(index *Index, key string, value T) {
// Alloc new index item.
item := new_indexed_item()
if cap(item.indexed) < len(c.indices) {
@@ -569,7 +571,7 @@ func (c *Cache[T]) store_value(index *Index, key Key, value T) {
if index != nil {
// Append item to index.
- index.append(key.key, item)
+ index.append(key, item)
}
// Get ptr to value data.
@@ -619,7 +621,7 @@ func (c *Cache[T]) store_value(index *Index, key Key, value T) {
}
}
-func (c *Cache[T]) store_error(index *Index, key Key, err error) {
+func (c *Cache[T]) store_error(index *Index, key string, err error) {
if index == nil {
// nothing we
// can do here.
@@ -639,7 +641,7 @@ func (c *Cache[T]) store_error(index *Index, key Key, err error) {
item.data = err
// Append item to index.
- index.append(key.key, item)
+ index.append(key, item)
// Add item to main lru list.
c.lru.push_front(&item.elem)
diff --git a/vendor/codeberg.org/gruf/go-structr/index.go b/vendor/codeberg.org/gruf/go-structr/index.go
index 522dca836..b1e05b9b9 100644
--- a/vendor/codeberg.org/gruf/go-structr/index.go
+++ b/vendor/codeberg.org/gruf/go-structr/index.go
@@ -7,8 +7,6 @@ import (
"unsafe"
"codeberg.org/gruf/go-byteutil"
-
- "github.com/dolthub/swiss"
)
// IndexConfig defines config variables
@@ -72,7 +70,7 @@ type Index struct {
// index_entry{} which also contains the exact
// key each result is stored under. the hash map
// only keys by the xxh3 hash checksum for speed.
- data *swiss.Map[string, *list]
+ data hashmap
// struct fields encompassed by
// keys (+ hashes) of this index.
@@ -93,8 +91,12 @@ func (i *Index) Name() string {
// the type of lookup this Index uses in cache.
// NOTE: panics on incorrect no. parts / types given.
func (i *Index) Key(parts ...any) Key {
+ ptrs := make([]unsafe.Pointer, len(parts))
+ for x, part := range parts {
+ ptrs[x] = eface_data(part)
+ }
buf := new_buffer()
- key := i.key(buf, parts)
+ key := i.key(buf, ptrs)
free_buffer(buf)
return Key{
raw: parts,
@@ -109,7 +111,11 @@ func (i *Index) Keys(parts ...[]any) []Key {
keys := make([]Key, 0, len(parts))
buf := new_buffer()
for _, parts := range parts {
- key := i.key(buf, parts)
+ ptrs := make([]unsafe.Pointer, len(parts))
+ for x, part := range parts {
+ ptrs[x] = eface_data(part)
+ }
+ key := i.key(buf, ptrs)
if key == "" {
continue
}
@@ -160,8 +166,9 @@ func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) {
i.fields[x] = find_field(t, names)
}
- // Initialize index_entry list store.
- i.data = swiss.NewMap[string, *list](uint32(cap))
+ // Initialize store for
+ // index_entry lists.
+ i.data.init(cap)
}
// get_one will fetch one indexed item under key.
@@ -203,7 +210,7 @@ func (i *Index) get(key string, hook func(*indexed_item)) {
}
// key uses hasher to generate Key{} from given raw parts.
-func (i *Index) key(buf *byteutil.Buffer, parts []any) string {
+func (i *Index) key(buf *byteutil.Buffer, parts []unsafe.Pointer) string {
if len(parts) != len(i.fields) {
panicf("incorrect number key parts: want=%d received=%d",
len(i.fields),
@@ -332,33 +339,6 @@ func (i *Index) delete_entry(entry *index_entry) {
entry.item.drop_index(entry)
}
-// compact will reduce the size of underlying
-// index map if the cap vastly exceeds len.
-func (i *Index) compact() {
-
- // Maximum load factor before
- // 'swiss' allocates new hmap:
- // maxLoad = 7 / 8
- //
- // So we apply the inverse/2, once
- // $maxLoad/2 % of hmap is empty we
- // compact the map to drop buckets.
- len := i.data.Count()
- cap := i.data.Capacity()
- if cap-len > (cap*7)/(8*2) {
-
- // Create a new map only as big as required.
- data := swiss.NewMap[string, *list](uint32(len))
- i.data.Iter(func(k string, v *list) (stop bool) {
- data.Put(k, v)
- return false
- })
-
- // Set new map.
- i.data = data
- }
-}
-
// index_entry represents a single entry
// in an Index{}, where it will be accessible
// by Key{} pointing to a containing list{}.
diff --git a/vendor/codeberg.org/gruf/go-structr/map.go b/vendor/codeberg.org/gruf/go-structr/map.go
new file mode 100644
index 000000000..a31574641
--- /dev/null
+++ b/vendor/codeberg.org/gruf/go-structr/map.go
@@ -0,0 +1,59 @@
+package structr
+
+type hashmap struct {
+ m map[string]*list
+ n int
+}
+
+func (m *hashmap) init(cap int) {
+ m.m = make(map[string]*list, cap)
+ m.n = cap
+}
+
+func (m *hashmap) Get(key string) (*list, bool) {
+ list, ok := m.m[key]
+ return list, ok
+}
+
+func (m *hashmap) Put(key string, list *list) {
+ m.m[key] = list
+ if n := len(m.m); n > m.n {
+ m.n = n
+ }
+}
+
+func (m *hashmap) Delete(key string) {
+ delete(m.m, key)
+}
+
+func (m *hashmap) Compact() {
+ // Noop when hashmap size
+ // is too small to matter.
+ if m.n < 2048 {
+ return
+ }
+
+ // Difference between maximum map
+ // size and the current map size.
+ diff := m.n - len(m.m)
+
+ // Maximum load factor before
+ // runtime allocates new hmap:
+ // maxLoad = 13 / 16
+ //
+ // So we apply the inverse/2, once
+ // $maxLoad/2 % of hmap is empty we
+ // compact the map to drop buckets.
+ if 2*16*diff > m.n*13 {
+
+ // Create new map only big as required.
+ m2 := make(map[string]*list, len(m.m))
+ for k, v := range m.m {
+ m2[k] = v
+ }
+
+ // Set new.
+ m.m = m2
+ m.n = len(m2)
+ }
+}
diff --git a/vendor/codeberg.org/gruf/go-structr/queue.go b/vendor/codeberg.org/gruf/go-structr/queue.go
index bdead6ea8..1c49edbb1 100644
--- a/vendor/codeberg.org/gruf/go-structr/queue.go
+++ b/vendor/codeberg.org/gruf/go-structr/queue.go
@@ -214,10 +214,9 @@ func (q *Queue[T]) Debug() map[string]any {
m["indices"] = indices
for i := range q.indices {
var n uint64
- q.indices[i].data.Iter(func(_ string, l *list) (stop bool) {
+ for _, l := range q.indices[i].data.m {
n += uint64(l.len)
- return
- })
+ }
indices[q.indices[i].name] = n
}
q.mutex.Unlock()
@@ -331,8 +330,8 @@ func (q *Queue[T]) delete(item *indexed_item) {
// Drop this index_entry.
index.delete_entry(entry)
- // Check compact.
- index.compact()
+ // Check compact map.
+ index.data.Compact()
}
// Drop entry from queue list.
diff --git a/vendor/codeberg.org/gruf/go-structr/queue_ctx.go b/vendor/codeberg.org/gruf/go-structr/queue_ctx.go
index 3d15e619c..5431b8947 100644
--- a/vendor/codeberg.org/gruf/go-structr/queue_ctx.go
+++ b/vendor/codeberg.org/gruf/go-structr/queue_ctx.go
@@ -73,10 +73,9 @@ func (q *QueueCtx[T]) Debug() map[string]any {
m["indices"] = indices
for i := range q.indices {
var n uint64
- q.indices[i].data.Iter(func(_ string, l *list) (stop bool) {
+ for _, l := range q.indices[i].data.m {
n += uint64(l.len)
- return
- })
+ }
indices[q.indices[i].name] = n
}
q.mutex.Unlock()
diff --git a/vendor/codeberg.org/gruf/go-structr/runtime.go b/vendor/codeberg.org/gruf/go-structr/runtime.go
index a4696187a..4d76a0d74 100644
--- a/vendor/codeberg.org/gruf/go-structr/runtime.go
+++ b/vendor/codeberg.org/gruf/go-structr/runtime.go
@@ -8,18 +8,13 @@ import (
"unsafe"
"codeberg.org/gruf/go-mangler"
- "github.com/modern-go/reflect2"
)
// struct_field contains pre-prepared type
// information about a struct's field member,
// including memory offset and hash function.
type struct_field struct {
-
- // type2 contains the reflect2
- // type information for this field,
- // used in repacking it as eface.
- type2 reflect2.Type
+ rtype reflect.Type
// offsets defines whereabouts in
// memory this field is located.
@@ -109,25 +104,27 @@ func find_field(t reflect.Type, names []string) (sfield struct_field) {
t = field.Type
}
- // Get field type as reflect2.
- sfield.type2 = reflect2.Type2(t)
+ // Set final type.
+ sfield.rtype = t
// Find mangler for field type.
sfield.mangle = mangler.Get(t)
- // Set possible zero value and its string.
- sfield.zero = sfield.type2.UnsafeNew()
- i := sfield.type2.UnsafeIndirect(sfield.zero)
- sfield.zerostr = string(sfield.mangle(nil, i))
+ // Get new zero value data ptr.
+ v := reflect.New(t).Elem()
+ zptr := eface_data(v.Interface())
+ zstr := sfield.mangle(nil, zptr)
+ sfield.zerostr = string(zstr)
+ sfield.zero = zptr
return
}
// extract_fields extracts given structfields from the provided value type,
// this is done using predetermined struct field memory offset locations.
-func extract_fields(ptr unsafe.Pointer, fields []struct_field) []any {
- // Prepare slice of field ifaces.
- ifaces := make([]any, len(fields))
+func extract_fields(ptr unsafe.Pointer, fields []struct_field) []unsafe.Pointer {
+ // Prepare slice of field value pointers.
+ ptrs := make([]unsafe.Pointer, len(fields))
for i, field := range fields {
// loop scope.
@@ -136,10 +133,7 @@ func extract_fields(ptr unsafe.Pointer, fields []struct_field) []any {
for _, offset := range field.offsets {
// Dereference any ptrs to offset.
fptr = deref(fptr, offset.derefs)
-
if fptr == nil {
- // Use zero value.
- fptr = field.zero
break
}
@@ -148,11 +142,31 @@ func extract_fields(ptr unsafe.Pointer, fields []struct_field) []any {
offset.offset)
}
- // Repack value data ptr as empty interface.
- ifaces[i] = field.type2.UnsafeIndirect(fptr)
+ if like_ptr(field.rtype) && fptr != nil {
+ // Further dereference value ptr.
+ fptr = *(*unsafe.Pointer)(fptr)
+ }
+
+ if fptr == nil {
+ // Use zero value.
+ fptr = field.zero
+ }
+
+ ptrs[i] = fptr
}
+ return ptrs
+}
- return ifaces
+// like_ptr returns whether type's kind is ptr-like.
+func like_ptr(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Pointer,
+ reflect.Map,
+ reflect.Chan,
+ reflect.Func:
+ return true
+ }
+ return false
}
// deref will dereference ptr 'n' times (or until nil).
diff --git a/vendor/codeberg.org/gruf/go-structr/util.go b/vendor/codeberg.org/gruf/go-structr/util.go
index 46535fcff..84c98074d 100644
--- a/vendor/codeberg.org/gruf/go-structr/util.go
+++ b/vendor/codeberg.org/gruf/go-structr/util.go
@@ -1,5 +1,7 @@
package structr
+import "unsafe"
+
// once only executes 'fn' once.
func once(fn func()) func() {
var once int32
@@ -11,3 +13,9 @@ func once(fn func()) func() {
fn()
}
}
+
+// eface_data returns the data ptr from an empty interface.
+func eface_data(a any) unsafe.Pointer {
+ type eface struct{ _, data unsafe.Pointer }
+ return (*eface)(unsafe.Pointer(&a)).data
+}