summaryrefslogtreecommitdiff
path: root/vendor/github.com/vmihailenco/bufpool
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/vmihailenco/bufpool')
-rw-r--r--vendor/github.com/vmihailenco/bufpool/.travis.yml20
-rw-r--r--vendor/github.com/vmihailenco/bufpool/LICENSE23
-rw-r--r--vendor/github.com/vmihailenco/bufpool/Makefile6
-rw-r--r--vendor/github.com/vmihailenco/bufpool/README.md74
-rw-r--r--vendor/github.com/vmihailenco/bufpool/buf_pool.go67
-rw-r--r--vendor/github.com/vmihailenco/bufpool/buffer.go397
-rw-r--r--vendor/github.com/vmihailenco/bufpool/buffer_ext.go66
-rw-r--r--vendor/github.com/vmihailenco/bufpool/go.mod9
-rw-r--r--vendor/github.com/vmihailenco/bufpool/go.sum17
-rw-r--r--vendor/github.com/vmihailenco/bufpool/pool.go148
10 files changed, 827 insertions, 0 deletions
diff --git a/vendor/github.com/vmihailenco/bufpool/.travis.yml b/vendor/github.com/vmihailenco/bufpool/.travis.yml
new file mode 100644
index 000000000..c7383a2b1
--- /dev/null
+++ b/vendor/github.com/vmihailenco/bufpool/.travis.yml
@@ -0,0 +1,20 @@
+sudo: false
+language: go
+
+go:
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+env:
+ - GO111MODULE=on
+
+go_import_path: github.com/vmihailenco/bufpool
+
+before_install:
+ - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.21.0
diff --git a/vendor/github.com/vmihailenco/bufpool/LICENSE b/vendor/github.com/vmihailenco/bufpool/LICENSE
new file mode 100644
index 000000000..2b76a892e
--- /dev/null
+++ b/vendor/github.com/vmihailenco/bufpool/LICENSE
@@ -0,0 +1,23 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Juan Batiz-Benet
+Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia
+Copyright (c) 2019 Vladimir Mihailenco
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/vmihailenco/bufpool/Makefile b/vendor/github.com/vmihailenco/bufpool/Makefile
new file mode 100644
index 000000000..57914e333
--- /dev/null
+++ b/vendor/github.com/vmihailenco/bufpool/Makefile
@@ -0,0 +1,6 @@
+all:
+ go test ./...
+ go test ./... -short -race
+ go test ./... -run=NONE -bench=. -benchmem
+ env GOOS=linux GOARCH=386 go test ./...
+ golangci-lint run
diff --git a/vendor/github.com/vmihailenco/bufpool/README.md b/vendor/github.com/vmihailenco/bufpool/README.md
new file mode 100644
index 000000000..05a70791c
--- /dev/null
+++ b/vendor/github.com/vmihailenco/bufpool/README.md
@@ -0,0 +1,74 @@
+# bufpool
+
+[![Build Status](https://travis-ci.org/vmihailenco/bufpool.svg)](https://travis-ci.org/vmihailenco/bufpool)
+[![GoDoc](https://godoc.org/github.com/vmihailenco/bufpool?status.svg)](https://godoc.org/github.com/vmihailenco/bufpool)
+
+bufpool is an implementation of a pool of byte buffers with anti-memory-waste protection. It is based on the code and ideas from these 2 projects:
+- https://github.com/libp2p/go-buffer-pool
+- https://github.com/valyala/bytebufferpool
+
+bufpool consists of global pool of buffers that have a capacity of a power of 2 starting from 64 bytes to 32 megabytes. It also provides individual pools that maintain usage stats to provide buffers of the size that satisfies 95% of the calls. Global pool is used to reuse buffers between different parts of the app.
+
+# Installation
+
+``` go
+go get github.com/vmihailenco/bufpool
+```
+
+# Usage
+
+bufpool can be used as a replacement for `sync.Pool`:
+
+``` go
+var jsonPool bufpool.Pool // basically sync.Pool with usage stats
+
+func writeJSON(w io.Writer, obj interface{}) error {
+ buf := jsonPool.Get()
+ defer jsonPool.Put(buf)
+
+ if err := json.NewEncoder(buf).Encode(obj); err != nil {
+ return err
+ }
+
+ _, err := w.Write(buf.Bytes())
+ return err
+}
+```
+
+or to allocate buffer of the given size:
+
+``` go
+func writeHex(w io.Writer, data []byte) error {
+ n := hex.EncodedLen(len(data)))
+
+ buf := bufpool.Get(n) // buf.Len() is guaranteed to equal n
+ defer bufpool.Put(buf)
+
+ tmp := buf.Bytes()
+ hex.Encode(tmp, data)
+
+ _, err := w.Write(tmp)
+ return err
+}
+```
+
+If you need to append data to the buffer you can use following pattern:
+
+``` go
+buf := bufpool.Get(n)
+defer bufpool.Put(buf)
+
+bb := buf.Bytes()[:0]
+
+bb = append(bb, ...)
+
+buf.ResetBuf(bb)
+```
+
+You can also change default pool thresholds:
+
+``` go
+var jsonPool = bufpool.Pool{
+ ServePctile: 0.95, // serve p95 buffers
+}
+```
diff --git a/vendor/github.com/vmihailenco/bufpool/buf_pool.go b/vendor/github.com/vmihailenco/bufpool/buf_pool.go
new file mode 100644
index 000000000..2daa69888
--- /dev/null
+++ b/vendor/github.com/vmihailenco/bufpool/buf_pool.go
@@ -0,0 +1,67 @@
+package bufpool
+
+import (
+ "log"
+ "sync"
+)
+
+var thePool bufPool
+
+// Get retrieves a buffer of the appropriate length from the buffer pool or
+// allocates a new one. Get may choose to ignore the pool and treat it as empty.
+// Callers should not assume any relation between values passed to Put and the
+// values returned by Get.
+//
+// If no suitable buffer exists in the pool, Get creates one.
+func Get(length int) *Buffer {
+ return thePool.Get(length)
+}
+
+// Put returns a buffer to the buffer pool.
+func Put(buf *Buffer) {
+ thePool.Put(buf)
+}
+
+type bufPool struct {
+ pools [steps]sync.Pool
+}
+
+func (p *bufPool) Get(length int) *Buffer {
+ if length > maxPoolSize {
+ return NewBuffer(make([]byte, length))
+ }
+
+ idx := index(length)
+ if bufIface := p.pools[idx].Get(); bufIface != nil {
+ buf := bufIface.(*Buffer)
+ unlock(buf)
+ if length > buf.Cap() {
+ log.Println(idx, buf.Len(), buf.Cap(), buf.String())
+ }
+ buf.buf = buf.buf[:length]
+ return buf
+ }
+
+ b := make([]byte, length, indexSize(idx))
+ return NewBuffer(b)
+}
+
+func (p *bufPool) Put(buf *Buffer) {
+ length := buf.Cap()
+ if length > maxPoolSize || length < minSize {
+ return // drop it
+ }
+
+ idx := prevIndex(length)
+ lock(buf)
+ p.pools[idx].Put(buf)
+}
+
+func lock(buf *Buffer) {
+ buf.buf = buf.buf[:cap(buf.buf)]
+ buf.off = cap(buf.buf) + 1
+}
+
+func unlock(buf *Buffer) {
+ buf.off = 0
+}
diff --git a/vendor/github.com/vmihailenco/bufpool/buffer.go b/vendor/github.com/vmihailenco/bufpool/buffer.go
new file mode 100644
index 000000000..a061a0b70
--- /dev/null
+++ b/vendor/github.com/vmihailenco/bufpool/buffer.go
@@ -0,0 +1,397 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bufpool
+
+// Simple byte buffer for marshaling data.
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "unicode/utf8"
+)
+
+// smallBufferSize is an initial allocation minimal capacity.
+const smallBufferSize = 64
+
+// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
+// The zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+ buf []byte // contents are the bytes buf[off : len(buf)]
+ off int // read at &buf[off], write at &buf[len(buf)]
+ lastRead readOp // last read operation, so that Unread* can work correctly.
+}
+
+// The readOp constants describe the last action performed on
+// the buffer, so that UnreadRune and UnreadByte can check for
+// invalid usage. opReadRuneX constants are chosen such that
+// converted to int they correspond to the rune size that was read.
+type readOp int8
+
+// Don't use iota for these, as the values need to correspond with the
+// names and comments, which is easier to see when being explicit.
+const (
+ opRead readOp = -1 // Any other read operation.
+ opInvalid readOp = 0 // Non-read operation.
+ opReadRune1 readOp = 1 // Read rune of size 1.
+)
+
+var errNegativeRead = errors.New("bytes.Buffer: reader returned negative count from Read")
+
+const maxInt = int(^uint(0) >> 1)
+
+// Bytes returns a slice of length b.Len() holding the unread portion of the buffer.
+// The slice is valid for use only until the next buffer modification (that is,
+// only until the next call to a method like Read, Write, Reset, or Truncate).
+// The slice aliases the buffer content at least until the next buffer modification,
+// so immediate changes to the slice will affect the result of future reads.
+func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
+
+// String returns the contents of the unread portion of the buffer
+// as a string. If the Buffer is a nil pointer, it returns "<nil>".
+//
+// To build strings more efficiently, see the strings.Builder type.
+func (b *Buffer) String() string {
+ if b == nil {
+ // Special case, useful in debugging.
+ return "<nil>"
+ }
+ return string(b.buf[b.off:])
+}
+
+// empty reports whether the unread portion of the buffer is empty.
+func (b *Buffer) empty() bool { return len(b.buf) <= b.off }
+
+// Len returns the number of bytes of the unread portion of the buffer;
+// b.Len() == len(b.Bytes()).
+func (b *Buffer) Len() int { return len(b.buf) - b.off }
+
+// Cap returns the capacity of the buffer's underlying byte slice, that is, the
+// total space allocated for the buffer's data.
+func (b *Buffer) Cap() int { return cap(b.buf) }
+
+// Truncate discards all but the first n unread bytes from the buffer
+// but continues to use the same allocated storage.
+// It panics if n is negative or greater than the length of the buffer.
+func (b *Buffer) Truncate(n int) {
+ if n == 0 {
+ b.Reset()
+ return
+ }
+ b.lastRead = opInvalid
+ if n < 0 || n > b.Len() {
+ panic("bytes.Buffer: truncation out of range")
+ }
+ b.buf = b.buf[:b.off+n]
+}
+
+// tryGrowByReslice is a inlineable version of grow for the fast-case where the
+// internal buffer only needs to be resliced.
+// It returns the index where bytes should be written and whether it succeeded.
+func (b *Buffer) tryGrowByReslice(n int) (int, bool) {
+ if l := len(b.buf); n <= cap(b.buf)-l {
+ b.buf = b.buf[:l+n]
+ return l, true
+ }
+ return 0, false
+}
+
+// Grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After Grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+// If n is negative, Grow will panic.
+// If the buffer can't grow it will panic with ErrTooLarge.
+func (b *Buffer) Grow(n int) {
+ if n < 0 {
+ panic("bytes.Buffer.Grow: negative count")
+ }
+ m := b.grow(n)
+ b.buf = b.buf[:m]
+}
+
+// Write appends the contents of p to the buffer, growing the buffer as
+// needed. The return value n is the length of p; err is always nil. If the
+// buffer becomes too large, Write will panic with ErrTooLarge.
+func (b *Buffer) Write(p []byte) (n int, err error) {
+ b.lastRead = opInvalid
+ m, ok := b.tryGrowByReslice(len(p))
+ if !ok {
+ m = b.grow(len(p))
+ }
+ return copy(b.buf[m:], p), nil
+}
+
+// WriteString appends the contents of s to the buffer, growing the buffer as
+// needed. The return value n is the length of s; err is always nil. If the
+// buffer becomes too large, WriteString will panic with ErrTooLarge.
+func (b *Buffer) WriteString(s string) (n int, err error) {
+ b.lastRead = opInvalid
+ m, ok := b.tryGrowByReslice(len(s))
+ if !ok {
+ m = b.grow(len(s))
+ }
+ return copy(b.buf[m:], s), nil
+}
+
+// MinRead is the minimum slice size passed to a Read call by
+// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
+// what is required to hold the contents of r, ReadFrom will not grow the
+// underlying buffer.
+const minRead = 512
+
+// ReadFrom reads data from r until EOF and appends it to the buffer, growing
+// the buffer as needed. The return value n is the number of bytes read. Any
+// error except io.EOF encountered during the read is also returned. If the
+// buffer becomes too large, ReadFrom will panic with ErrTooLarge.
+func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
+ b.lastRead = opInvalid
+ for {
+ i := b.grow(minRead)
+ b.buf = b.buf[:i]
+ m, e := r.Read(b.buf[i:cap(b.buf)])
+ if m < 0 {
+ panic(errNegativeRead)
+ }
+
+ b.buf = b.buf[:i+m]
+ n += int64(m)
+ if e == io.EOF {
+ return n, nil // e is EOF, so return nil explicitly
+ }
+ if e != nil {
+ return n, e
+ }
+ }
+}
+
+// WriteTo writes data to w until the buffer is drained or an error occurs.
+// The return value n is the number of bytes written; it always fits into an
+// int, but it is int64 to match the io.WriterTo interface. Any error
+// encountered during the write is also returned.
+func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
+ b.lastRead = opInvalid
+ if nBytes := b.Len(); nBytes > 0 {
+ m, e := w.Write(b.buf[b.off:])
+ if m > nBytes {
+ panic("bytes.Buffer.WriteTo: invalid Write count")
+ }
+ b.off += m
+ n = int64(m)
+ if e != nil {
+ return n, e
+ }
+ // all bytes should have been written, by definition of
+ // Write method in io.Writer
+ if m != nBytes {
+ return n, io.ErrShortWrite
+ }
+ }
+ // Buffer is now empty; reset.
+ b.Reset()
+ return n, nil
+}
+
+// WriteByte appends the byte c to the buffer, growing the buffer as needed.
+// The returned error is always nil, but is included to match bufio.Writer's
+// WriteByte. If the buffer becomes too large, WriteByte will panic with
+// ErrTooLarge.
+func (b *Buffer) WriteByte(c byte) error {
+ b.lastRead = opInvalid
+ m, ok := b.tryGrowByReslice(1)
+ if !ok {
+ m = b.grow(1)
+ }
+ b.buf[m] = c
+ return nil
+}
+
+// WriteRune appends the UTF-8 encoding of Unicode code point r to the
+// buffer, returning its length and an error, which is always nil but is
+// included to match bufio.Writer's WriteRune. The buffer is grown as needed;
+// if it becomes too large, WriteRune will panic with ErrTooLarge.
+func (b *Buffer) WriteRune(r rune) (n int, err error) {
+ if r < utf8.RuneSelf {
+ _ = b.WriteByte(byte(r))
+ return 1, nil
+ }
+ b.lastRead = opInvalid
+ m, ok := b.tryGrowByReslice(utf8.UTFMax)
+ if !ok {
+ m = b.grow(utf8.UTFMax)
+ }
+ n = utf8.EncodeRune(b.buf[m:m+utf8.UTFMax], r)
+ b.buf = b.buf[:m+n]
+ return n, nil
+}
+
+// Read reads the next len(p) bytes from the buffer or until the buffer
+// is drained. The return value n is the number of bytes read. If the
+// buffer has no data to return, err is io.EOF (unless len(p) is zero);
+// otherwise it is nil.
+func (b *Buffer) Read(p []byte) (n int, err error) {
+ b.lastRead = opInvalid
+ if b.empty() {
+ // Buffer is empty, reset to recover space.
+ b.Reset()
+ if len(p) == 0 {
+ return 0, nil
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, b.buf[b.off:])
+ b.off += n
+ if n > 0 {
+ b.lastRead = opRead
+ }
+ return n, nil
+}
+
+// Next returns a slice containing the next n bytes from the buffer,
+// advancing the buffer as if the bytes had been returned by Read.
+// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
+// The slice is only valid until the next call to a read or write method.
+func (b *Buffer) Next(n int) []byte {
+ b.lastRead = opInvalid
+ m := b.Len()
+ if n > m {
+ n = m
+ }
+ data := b.buf[b.off : b.off+n]
+ b.off += n
+ if n > 0 {
+ b.lastRead = opRead
+ }
+ return data
+}
+
+// ReadByte reads and returns the next byte from the buffer.
+// If no byte is available, it returns error io.EOF.
+func (b *Buffer) ReadByte() (byte, error) {
+ if b.empty() {
+ // Buffer is empty, reset to recover space.
+ b.Reset()
+ return 0, io.EOF
+ }
+ c := b.buf[b.off]
+ b.off++
+ b.lastRead = opRead
+ return c, nil
+}
+
+// ReadRune reads and returns the next UTF-8-encoded
+// Unicode code point from the buffer.
+// If no bytes are available, the error returned is io.EOF.
+// If the bytes are an erroneous UTF-8 encoding, it
+// consumes one byte and returns U+FFFD, 1.
+func (b *Buffer) ReadRune() (r rune, size int, err error) {
+ if b.empty() {
+ // Buffer is empty, reset to recover space.
+ b.Reset()
+ return 0, 0, io.EOF
+ }
+ c := b.buf[b.off]
+ if c < utf8.RuneSelf {
+ b.off++
+ b.lastRead = opReadRune1
+ return rune(c), 1, nil
+ }
+ r, n := utf8.DecodeRune(b.buf[b.off:])
+ b.off += n
+ b.lastRead = readOp(n)
+ return r, n, nil
+}
+
+// UnreadRune unreads the last rune returned by ReadRune.
+// If the most recent read or write operation on the buffer was
+// not a successful ReadRune, UnreadRune returns an error. (In this regard
+// it is stricter than UnreadByte, which will unread the last byte
+// from any read operation.)
+func (b *Buffer) UnreadRune() error {
+ if b.lastRead <= opInvalid {
+ return errors.New("bytes.Buffer: UnreadRune: previous operation was not a successful ReadRune")
+ }
+ if b.off >= int(b.lastRead) {
+ b.off -= int(b.lastRead)
+ }
+ b.lastRead = opInvalid
+ return nil
+}
+
+var errUnreadByte = errors.New("bytes.Buffer: UnreadByte: previous operation was not a successful read")
+
+// UnreadByte unreads the last byte returned by the most recent successful
+// read operation that read at least one byte. If a write has happened since
+// the last read, if the last read returned an error, or if the read read zero
+// bytes, UnreadByte returns an error.
+func (b *Buffer) UnreadByte() error {
+ if b.lastRead == opInvalid {
+ return errUnreadByte
+ }
+ b.lastRead = opInvalid
+ if b.off > 0 {
+ b.off--
+ }
+ return nil
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
+ slice, err := b.readSlice(delim)
+ // return a copy of slice. The buffer's backing array may
+ // be overwritten by later calls.
+ line = append(line, slice...)
+ return line, err
+}
+
+// readSlice is like ReadBytes but returns a reference to internal buffer data.
+func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
+ i := bytes.IndexByte(b.buf[b.off:], delim)
+ end := b.off + i + 1
+ if i < 0 {
+ end = len(b.buf)
+ err = io.EOF
+ }
+ line = b.buf[b.off:end]
+ b.off = end
+ b.lastRead = opRead
+ return line, err
+}
+
+// ReadString reads until the first occurrence of delim in the input,
+// returning a string containing the data up to and including the delimiter.
+// If ReadString encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadString returns err != nil if and only if the returned data does not end
+// in delim.
+func (b *Buffer) ReadString(delim byte) (line string, err error) {
+ slice, err := b.readSlice(delim)
+ return string(slice), err
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its
+// initial contents. The new Buffer takes ownership of buf, and the
+// caller should not use buf after this call. NewBuffer is intended to
+// prepare a Buffer to read existing data. It can also be used to set
+// the initial size of the internal buffer for writing. To do that,
+// buf should have the desired capacity but a length of zero.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
+
+// NewBufferString creates and initializes a new Buffer using string s as its
+// initial contents. It is intended to prepare a buffer to read an existing
+// string.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBufferString(s string) *Buffer {
+ return &Buffer{buf: []byte(s)}
+}
diff --git a/vendor/github.com/vmihailenco/bufpool/buffer_ext.go b/vendor/github.com/vmihailenco/bufpool/buffer_ext.go
new file mode 100644
index 000000000..8a904bc5c
--- /dev/null
+++ b/vendor/github.com/vmihailenco/bufpool/buffer_ext.go
@@ -0,0 +1,66 @@
+package bufpool
+
+import "bytes"
+
+// Reset resets the buffer to be empty,
+// but it retains the underlying storage for use by future writes.
+// Reset is the same as Truncate(0).
+func (b *Buffer) Reset() {
+ if b.off > cap(b.buf) {
+ panic("Buffer is used after Put")
+ }
+ b.buf = b.buf[:0]
+ b.off = 0
+ b.lastRead = opInvalid
+}
+
+func (b *Buffer) ResetBuf(buf []byte) {
+ if b.off > cap(b.buf) {
+ panic("Buffer is used after Put")
+ }
+ b.buf = buf[:0]
+ b.off = 0
+ b.lastRead = opInvalid
+}
+
+// grow grows the buffer to guarantee space for n more bytes.
+// It returns the index where bytes should be written.
+// If the buffer can't grow it will panic with ErrTooLarge.
+func (b *Buffer) grow(n int) int {
+ if b.off > cap(b.buf) {
+ panic("Buffer is used after Put")
+ }
+ m := b.Len()
+ // If buffer is empty, reset to recover space.
+ if m == 0 && b.off != 0 {
+ b.Reset()
+ }
+ // Try to grow by means of a reslice.
+ if i, ok := b.tryGrowByReslice(n); ok {
+ return i
+ }
+ if b.buf == nil && n <= smallBufferSize {
+ b.buf = make([]byte, n, smallBufferSize)
+ return 0
+ }
+ c := cap(b.buf)
+ if n <= c/2-m {
+ // We can slide things down instead of allocating a new
+ // slice. We only need m+n <= c to slide, but
+ // we instead let capacity get twice as large so we
+ // don't spend all our time copying.
+ copy(b.buf, b.buf[b.off:])
+ } else if c > maxInt-c-n {
+ panic(bytes.ErrTooLarge)
+ } else {
+ // Not enough space anywhere, we need to allocate.
+ tmp := Get(2*c + n)
+ copy(tmp.buf, b.buf[b.off:])
+ b.buf, tmp.buf = tmp.buf, b.buf
+ Put(tmp)
+ }
+ // Restore b.off and len(b.buf).
+ b.off = 0
+ b.buf = b.buf[:m+n]
+ return m
+}
diff --git a/vendor/github.com/vmihailenco/bufpool/go.mod b/vendor/github.com/vmihailenco/bufpool/go.mod
new file mode 100644
index 000000000..7f3096ae4
--- /dev/null
+++ b/vendor/github.com/vmihailenco/bufpool/go.mod
@@ -0,0 +1,9 @@
+module github.com/vmihailenco/bufpool
+
+go 1.13
+
+require (
+ github.com/kr/pretty v0.1.0 // indirect
+ github.com/stretchr/testify v1.5.1
+ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
+)
diff --git a/vendor/github.com/vmihailenco/bufpool/go.sum b/vendor/github.com/vmihailenco/bufpool/go.sum
new file mode 100644
index 000000000..6074473ac
--- /dev/null
+++ b/vendor/github.com/vmihailenco/bufpool/go.sum
@@ -0,0 +1,17 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/vmihailenco/bufpool/pool.go b/vendor/github.com/vmihailenco/bufpool/pool.go
new file mode 100644
index 000000000..3e1676b48
--- /dev/null
+++ b/vendor/github.com/vmihailenco/bufpool/pool.go
@@ -0,0 +1,148 @@
+package bufpool
+
+import (
+ "math/bits"
+ "sync/atomic"
+)
+
+const (
+ minBitSize = 6 // 2**6=64 is a CPU cache line size
+ steps = 20
+
+ minSize = 1 << minBitSize // 64 bytes
+ maxSize = 1 << (minBitSize + steps - 1) // 32 mb
+ maxPoolSize = maxSize << 1 // 64 mb
+
+ defaultServePctile = 0.95
+ calibrateCallsThreshold = 42000
+ defaultSize = 4096
+)
+
+// Pool represents byte buffer pool.
+//
+// Different pools should be used for different usage patterns to achieve better
+// performance and lower memory usage.
+type Pool struct {
+ calls [steps]uint32
+ calibrating uint32
+
+ ServePctile float64 // default is 0.95
+ serveSize uint32
+}
+
+func (p *Pool) getServeSize() int {
+ size := atomic.LoadUint32(&p.serveSize)
+ if size > 0 {
+ return int(size)
+ }
+
+ for i := 0; i < len(p.calls); i++ {
+ calls := atomic.LoadUint32(&p.calls[i])
+ if calls > 10 {
+ size := indexSize(i)
+ atomic.CompareAndSwapUint32(&p.serveSize, 0, uint32(size))
+ return size
+ }
+ }
+
+ return defaultSize
+}
+
+// Get returns an empty buffer from the pool. Returned buffer capacity
+// is determined by accumulated usage stats and changes over time.
+//
+// The buffer may be returned to the pool using Put or retained for further
+// usage. In latter case buffer length must be updated using UpdateLen.
+func (p *Pool) Get() *Buffer {
+ buf := Get(p.getServeSize())
+ buf.Reset()
+ return buf
+}
+
+// New returns an empty buffer bypassing the pool. Returned buffer capacity
+// is determined by accumulated usage stats and changes over time.
+func (p *Pool) New() *Buffer {
+ return NewBuffer(make([]byte, 0, p.getServeSize()))
+}
+
+// Put returns buffer to the pool.
+func (p *Pool) Put(buf *Buffer) {
+ length := buf.Len()
+ if length == 0 {
+ length = buf.Cap()
+ }
+
+ p.UpdateLen(length)
+
+ // Always put buf to the pool.
+ Put(buf)
+}
+
+// UpdateLen updates stats about buffer length.
+func (p *Pool) UpdateLen(bufLen int) {
+ idx := index(bufLen)
+ if atomic.AddUint32(&p.calls[idx], 1) > calibrateCallsThreshold {
+ p.calibrate()
+ }
+}
+
+func (p *Pool) calibrate() {
+ if !atomic.CompareAndSwapUint32(&p.calibrating, 0, 1) {
+ return
+ }
+
+ var callSum uint64
+ var calls [steps]uint32
+
+ for i := 0; i < len(p.calls); i++ {
+ n := atomic.SwapUint32(&p.calls[i], 0)
+ calls[i] = n
+ callSum += uint64(n)
+ }
+
+ serveSum := uint64(float64(callSum) * p.getServePctile())
+ var serveSize int
+
+ callSum = 0
+ for i, numCall := range &calls {
+ callSum += uint64(numCall)
+
+ if serveSize == 0 && callSum >= serveSum {
+ serveSize = indexSize(i)
+ break
+ }
+ }
+
+ atomic.StoreUint32(&p.serveSize, uint32(serveSize))
+ atomic.StoreUint32(&p.calibrating, 0)
+}
+
+func (p *Pool) getServePctile() float64 {
+ if p.ServePctile > 0 {
+ return p.ServePctile
+ }
+ return defaultServePctile
+}
+
+func index(n int) int {
+ if n == 0 {
+ return 0
+ }
+ idx := bits.Len32(uint32((n - 1) >> minBitSize))
+ if idx >= steps {
+ idx = steps - 1
+ }
+ return idx
+}
+
+func prevIndex(n int) int {
+ next := index(n)
+ if next == 0 || n == indexSize(next) {
+ return next
+ }
+ return next - 1
+}
+
+func indexSize(idx int) int {
+ return minSize << uint(idx)
+}