summaryrefslogtreecommitdiff
path: root/vendor/github.com/vmihailenco/bufpool/pool.go
blob: 3e1676b480c49f2ef0a2ae7fda1f202f814f0d40 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
package bufpool

import (
	"math/bits"
	"sync/atomic"
)

const (
	minBitSize = 6 // 2**6=64 is a CPU cache line size
	steps      = 20

	minSize     = 1 << minBitSize               // 64 bytes
	maxSize     = 1 << (minBitSize + steps - 1) // 32 mb
	maxPoolSize = maxSize << 1                  // 64 mb

	defaultServePctile      = 0.95
	calibrateCallsThreshold = 42000
	defaultSize             = 4096
)

// Pool represents byte buffer pool.
//
// Different pools should be used for different usage patterns to achieve better
// performance and lower memory usage.
type Pool struct {
	calls       [steps]uint32
	calibrating uint32

	ServePctile float64 // default is 0.95
	serveSize   uint32
}

func (p *Pool) getServeSize() int {
	size := atomic.LoadUint32(&p.serveSize)
	if size > 0 {
		return int(size)
	}

	for i := 0; i < len(p.calls); i++ {
		calls := atomic.LoadUint32(&p.calls[i])
		if calls > 10 {
			size := indexSize(i)
			atomic.CompareAndSwapUint32(&p.serveSize, 0, uint32(size))
			return size
		}
	}

	return defaultSize
}

// Get returns an empty buffer from the pool. Returned buffer capacity
// is determined by accumulated usage stats and changes over time.
//
// The buffer may be returned to the pool using Put or retained for further
// usage. In latter case buffer length must be updated using UpdateLen.
func (p *Pool) Get() *Buffer {
	buf := Get(p.getServeSize())
	buf.Reset()
	return buf
}

// New returns an empty buffer bypassing the pool. Returned buffer capacity
// is determined by accumulated usage stats and changes over time.
func (p *Pool) New() *Buffer {
	return NewBuffer(make([]byte, 0, p.getServeSize()))
}

// Put returns buffer to the pool.
func (p *Pool) Put(buf *Buffer) {
	length := buf.Len()
	if length == 0 {
		length = buf.Cap()
	}

	p.UpdateLen(length)

	// Always put buf to the pool.
	Put(buf)
}

// UpdateLen updates stats about buffer length.
func (p *Pool) UpdateLen(bufLen int) {
	idx := index(bufLen)
	if atomic.AddUint32(&p.calls[idx], 1) > calibrateCallsThreshold {
		p.calibrate()
	}
}

func (p *Pool) calibrate() {
	if !atomic.CompareAndSwapUint32(&p.calibrating, 0, 1) {
		return
	}

	var callSum uint64
	var calls [steps]uint32

	for i := 0; i < len(p.calls); i++ {
		n := atomic.SwapUint32(&p.calls[i], 0)
		calls[i] = n
		callSum += uint64(n)
	}

	serveSum := uint64(float64(callSum) * p.getServePctile())
	var serveSize int

	callSum = 0
	for i, numCall := range &calls {
		callSum += uint64(numCall)

		if serveSize == 0 && callSum >= serveSum {
			serveSize = indexSize(i)
			break
		}
	}

	atomic.StoreUint32(&p.serveSize, uint32(serveSize))
	atomic.StoreUint32(&p.calibrating, 0)
}

func (p *Pool) getServePctile() float64 {
	if p.ServePctile > 0 {
		return p.ServePctile
	}
	return defaultServePctile
}

func index(n int) int {
	if n == 0 {
		return 0
	}
	idx := bits.Len32(uint32((n - 1) >> minBitSize))
	if idx >= steps {
		idx = steps - 1
	}
	return idx
}

func prevIndex(n int) int {
	next := index(n)
	if next == 0 || n == indexSize(next) {
		return next
	}
	return next - 1
}

func indexSize(idx int) int {
	return minSize << uint(idx)
}