diff options
Diffstat (limited to 'vendor/codeberg.org/gruf/go-mangler/v2/cache.go')
| -rw-r--r-- | vendor/codeberg.org/gruf/go-mangler/v2/cache.go | 62 |
1 files changed, 62 insertions, 0 deletions
diff --git a/vendor/codeberg.org/gruf/go-mangler/v2/cache.go b/vendor/codeberg.org/gruf/go-mangler/v2/cache.go new file mode 100644 index 000000000..0f6f71cd0 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-mangler/v2/cache.go @@ -0,0 +1,62 @@ +package mangler + +import ( + "sync/atomic" + "unsafe" + + "codeberg.org/gruf/go-xunsafe" +) + +var manglers cache + +// cache is a concurrency-safe map[xunsafe.TypeInfo]Mangler +// cache, designed for heavy reads but with unfortunately expensive +// writes. it is designed such that after some initial load period +// in which functions are cached by types, all future ops are reads. +type cache struct{ p unsafe.Pointer } + +// Get will check cache for mangler func under key. +func (c *cache) Get(t xunsafe.TypeInfo) Mangler { + if p := c.load(); p != nil { + return (*p)[t] + } + return nil +} + +// Put will place given mangler func in cache under key, if not already exists. +func (c *cache) Put(t xunsafe.TypeInfo, fn Mangler) { + for { + p := c.load() + + var cache map[xunsafe.TypeInfo]Mangler + + if p != nil { + if _, ok := (*p)[t]; ok { + return + } + + cache = make(map[xunsafe.TypeInfo]Mangler, len(*p)+1) + for key, value := range *p { + cache[key] = value + } + } else { + cache = make(map[xunsafe.TypeInfo]Mangler, 1) + } + + cache[t] = fn + + if c.cas(p, &cache) { + return + } + } +} + +// load is a typed wrapper around atomic.LoadPointer(). +func (c *cache) load() *map[xunsafe.TypeInfo]Mangler { + return (*map[xunsafe.TypeInfo]Mangler)(atomic.LoadPointer(&c.p)) +} + +// cas is a typed wrapper around atomic.CompareAndSwapPointer(). +func (c *cache) cas(old, new *map[xunsafe.TypeInfo]Mangler) bool { + return atomic.CompareAndSwapPointer(&c.p, unsafe.Pointer(old), unsafe.Pointer(new)) +} |
