diff options
Diffstat (limited to 'vendor/codeberg.org/gruf/go-structr')
| -rw-r--r-- | vendor/codeberg.org/gruf/go-structr/cache.go | 12 | ||||
| -rw-r--r-- | vendor/codeberg.org/gruf/go-structr/index.go | 51 | ||||
| -rw-r--r-- | vendor/codeberg.org/gruf/go-structr/queue.go | 17 | ||||
| -rw-r--r-- | vendor/codeberg.org/gruf/go-structr/queue_ctx.go | 7 | ||||
| -rw-r--r-- | vendor/codeberg.org/gruf/go-structr/runtime.go | 19 | 
5 files changed, 76 insertions, 30 deletions
| diff --git a/vendor/codeberg.org/gruf/go-structr/cache.go b/vendor/codeberg.org/gruf/go-structr/cache.go index 17c491158..429c6ab67 100644 --- a/vendor/codeberg.org/gruf/go-structr/cache.go +++ b/vendor/codeberg.org/gruf/go-structr/cache.go @@ -289,9 +289,8 @@ func (c *Cache[T]) LoadOne(index *Index, key Key, load func() (T, error)) (T, er  	// Load new result.  	val, err = load() -	// Check for ignored -	// (transient) errors. -	if ignore(err) { +	// Check for ignored error types. +	if err != nil && ignore(err) {  		return val, err  	} @@ -536,9 +535,10 @@ func (c *Cache[T]) Debug() map[string]any {  	m["indices"] = indices  	for i := range c.indices {  		var n uint64 -		for _, list := range c.indices[i].data { -			n += uint64(list.len) -		} +		c.indices[i].data.Iter(func(_ string, l *list) (stop bool) { +			n += uint64(l.len) +			return +		})  		indices[c.indices[i].name] = n  	}  	c.mutex.Unlock() diff --git a/vendor/codeberg.org/gruf/go-structr/index.go b/vendor/codeberg.org/gruf/go-structr/index.go index b8f6b9d01..16f099ec6 100644 --- a/vendor/codeberg.org/gruf/go-structr/index.go +++ b/vendor/codeberg.org/gruf/go-structr/index.go @@ -7,6 +7,8 @@ import (  	"unsafe"  	"codeberg.org/gruf/go-byteutil" + +	"github.com/dolthub/swiss"  )  // IndexConfig defines config variables @@ -70,7 +72,7 @@ type Index struct {  	// index_entry{} which also contains the exact  	// key each result is stored under. the hash map  	// only keys by the xxh3 hash checksum for speed. -	data map[string]*list // [*index_entry] +	data *swiss.Map[string, *list]  	// struct fields encompassed by  	// keys (+ hashes) of this index. @@ -153,13 +155,13 @@ func (i *Index) init(t reflect.Type, cfg IndexConfig, cap int) {  	}  	// Initialize index_entry list store. -	i.data = make(map[string]*list, cap+1) +	i.data = swiss.NewMap[string, *list](uint32(cap))  }  // get_one will fetch one indexed item under key.  func (i *Index) get_one(key Key) *indexed_item {  	// Get list at hash. -	l := i.data[key.key] +	l, _ := i.data.Get(key.key)  	if l == nil {  		return nil  	} @@ -182,7 +184,7 @@ func (i *Index) get(key Key, hook func(*indexed_item)) {  	}  	// Get list at hash. -	l := i.data[key.key] +	l, _ := i.data.Get(key.key)  	if l == nil {  		return  	} @@ -220,7 +222,7 @@ func (i *Index) key(buf *byteutil.Buffer, parts []any) Key {  		for x, field := range i.fields {  			before := len(buf.B)  			buf.B = field.mangle(buf.B, parts[x]) -			if string(buf.B[before:]) == field.zero { +			if string(buf.B[before:]) == field.zerostr {  				return Key{}  			}  			buf.B = append(buf.B, '.') @@ -242,13 +244,13 @@ func (i *Index) key(buf *byteutil.Buffer, parts []any) Key {  // of key collisions and overwriting 'unique' entries.  func (i *Index) append(key Key, item *indexed_item) {  	// Look for existing. -	l := i.data[key.key] +	l, _ := i.data.Get(key.key)  	if l == nil {  		// Allocate new.  		l = new_list() -		i.data[key.key] = l +		i.data.Put(key.key, l)  	} else if is_unique(i.flags) { @@ -284,7 +286,7 @@ func (i *Index) delete(key Key, hook func(*indexed_item)) {  	}  	// Get list at hash. -	l := i.data[key.key] +	l, _ := i.data.Get(key.key)  	if l == nil {  		return  	} @@ -298,7 +300,7 @@ func (i *Index) delete(key Key, hook func(*indexed_item)) {  	}  	// Delete data at hash. -	delete(i.data, key.key) +	i.data.Delete(key.key)  	// Iterate entries in list.  	for x := 0; x < l.len; x++ { @@ -328,7 +330,7 @@ func (i *Index) delete(key Key, hook func(*indexed_item)) {  // delete_entry deletes the given index entry.  func (i *Index) delete_entry(entry *index_entry) {  	// Get list at hash sum. -	l := i.data[entry.key.key] +	l, _ := i.data.Get(entry.key.key)  	if l == nil {  		return  	} @@ -338,7 +340,7 @@ func (i *Index) delete_entry(entry *index_entry) {  	if l.len == 0 {  		// Remove entry list from map. -		delete(i.data, entry.key.key) +		i.data.Delete(entry.key.key)  		// Release list.  		free_list(l) @@ -348,6 +350,33 @@ func (i *Index) delete_entry(entry *index_entry) {  	entry.item.drop_index(entry)  } +// compact will reduce the size of underlying +// index map if the cap vastly exceeds len. +func (i *Index) compact() { + +	// Maximum load factor before +	// 'swiss' allocates new hmap: +	// maxLoad = 7 / 8 +	// +	// So we apply the inverse/2, once +	// $maxLoad/2 % of hmap is empty we +	// compact the map to drop buckets. +	len := i.data.Count() +	cap := i.data.Capacity() +	if cap-len > (cap*7)/(8*2) { + +		// Create a new map only as big as required. +		data := swiss.NewMap[string, *list](uint32(len)) +		i.data.Iter(func(k string, v *list) (stop bool) { +			data.Put(k, v) +			return false +		}) + +		// Set new map. +		i.data = data +	} +} +  // index_entry represents a single entry  // in an Index{}, where it will be accessible  // by Key{} pointing to a containing list{}. diff --git a/vendor/codeberg.org/gruf/go-structr/queue.go b/vendor/codeberg.org/gruf/go-structr/queue.go index d7c21daaa..ed5d78b5a 100644 --- a/vendor/codeberg.org/gruf/go-structr/queue.go +++ b/vendor/codeberg.org/gruf/go-structr/queue.go @@ -214,9 +214,10 @@ func (q *Queue[T]) Debug() map[string]any {  	m["indices"] = indices  	for i := range q.indices {  		var n uint64 -		for _, list := range q.indices[i].data { -			n += uint64(list.len) -		} +		q.indices[i].data.Iter(func(_ string, l *list) (stop bool) { +			n += uint64(l.len) +			return +		})  		indices[q.indices[i].name] = n  	}  	q.mutex.Unlock() @@ -324,8 +325,14 @@ func (q *Queue[T]) delete(item *indexed_item) {  		entry := item.indexed[len(item.indexed)-1]  		item.indexed = item.indexed[:len(item.indexed)-1] -		// Drop index_entry from index. -		entry.index.delete_entry(entry) +		// Get entry's index. +		index := entry.index + +		// Drop this index_entry. +		index.delete_entry(entry) + +		// Check compact. +		index.compact()  	}  	// Drop entry from queue list. diff --git a/vendor/codeberg.org/gruf/go-structr/queue_ctx.go b/vendor/codeberg.org/gruf/go-structr/queue_ctx.go index 766644c3c..3d15e619c 100644 --- a/vendor/codeberg.org/gruf/go-structr/queue_ctx.go +++ b/vendor/codeberg.org/gruf/go-structr/queue_ctx.go @@ -73,9 +73,10 @@ func (q *QueueCtx[T]) Debug() map[string]any {  	m["indices"] = indices  	for i := range q.indices {  		var n uint64 -		for _, list := range q.indices[i].data { -			n += uint64(list.len) -		} +		q.indices[i].data.Iter(func(_ string, l *list) (stop bool) { +			n += uint64(l.len) +			return +		})  		indices[q.indices[i].name] = n  	}  	q.mutex.Unlock() diff --git a/vendor/codeberg.org/gruf/go-structr/runtime.go b/vendor/codeberg.org/gruf/go-structr/runtime.go index 9990fe7b9..a4696187a 100644 --- a/vendor/codeberg.org/gruf/go-structr/runtime.go +++ b/vendor/codeberg.org/gruf/go-structr/runtime.go @@ -29,10 +29,15 @@ type struct_field struct {  	// (i.e. fast serializing) fn.  	mangle mangler.Mangler +	// zero value data, used when +	// nil encountered during ptr +	// offset following. +	zero unsafe.Pointer +  	// mangled zero value string,  	// if set this indicates zero  	// values of field not allowed -	zero string +	zerostr string  }  // next_offset defines a next offset location @@ -106,13 +111,14 @@ func find_field(t reflect.Type, names []string) (sfield struct_field) {  	// Get field type as reflect2.  	sfield.type2 = reflect2.Type2(t) -	i := sfield.type2.New()  	// Find mangler for field type.  	sfield.mangle = mangler.Get(t) -	// Set possible mangled zero value. -	sfield.zero = string(sfield.mangle(nil, i)) +	// Set possible zero value and its string. +	sfield.zero = sfield.type2.UnsafeNew() +	i := sfield.type2.UnsafeIndirect(sfield.zero) +	sfield.zerostr = string(sfield.mangle(nil, i))  	return  } @@ -130,8 +136,11 @@ func extract_fields(ptr unsafe.Pointer, fields []struct_field) []any {  		for _, offset := range field.offsets {  			// Dereference any ptrs to offset.  			fptr = deref(fptr, offset.derefs) +  			if fptr == nil { -				return nil +				// Use zero value. +				fptr = field.zero +				break  			}  			// Jump forward by offset to next ptr. | 
