diff options
| author | 2025-03-09 17:47:56 +0100 | |
|---|---|---|
| committer | 2025-03-10 01:59:49 +0100 | |
| commit | 3ac1ee16f377d31a0fb80c8dae28b6239ac4229e (patch) | |
| tree | f61faa581feaaeaba2542b9f2b8234a590684413 /vendor/github.com/klauspost/compress/internal/snapref | |
| parent | [chore] update URLs to forked source (diff) | |
| download | gotosocial-3ac1ee16f377d31a0fb80c8dae28b6239ac4229e.tar.xz | |
[chore] remove vendor
Diffstat (limited to 'vendor/github.com/klauspost/compress/internal/snapref')
6 files changed, 0 insertions, 1041 deletions
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE deleted file mode 100644 index 6050c10f4..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -   * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -   * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. -   * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go deleted file mode 100644 index 40796a49d..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( -	"encoding/binary" -	"errors" -	"io" -) - -var ( -	// ErrCorrupt reports that the input is invalid. -	ErrCorrupt = errors.New("snappy: corrupt input") -	// ErrTooLarge reports that the uncompressed length is too large. -	ErrTooLarge = errors.New("snappy: decoded block is too large") -	// ErrUnsupported reports that the input isn't supported. -	ErrUnsupported = errors.New("snappy: unsupported input") - -	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { -	v, _, err := decodedLen(src) -	return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { -	v, n := binary.Uvarint(src) -	if n <= 0 || v > 0xffffffff { -		return 0, 0, ErrCorrupt -	} - -	const wordSize = 32 << (^uint(0) >> 32 & 1) -	if wordSize == 32 && v > 0x7fffffff { -		return 0, 0, ErrTooLarge -	} -	return int(v), n, nil -} - -const ( -	decodeErrCodeCorrupt                  = 1 -	decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { -	dLen, s, err := decodedLen(src) -	if err != nil { -		return nil, err -	} -	if dLen <= len(dst) { -		dst = dst[:dLen] -	} else { -		dst = make([]byte, dLen) -	} -	switch decode(dst, src[s:]) { -	case 0: -		return dst, nil -	case decodeErrCodeUnsupportedLiteralLength: -		return nil, errUnsupportedLiteralLength -	} -	return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { -	return &Reader{ -		r:       r, -		decoded: make([]byte, maxBlockSize), -		buf:     make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), -	} -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { -	r       io.Reader -	err     error -	decoded []byte -	buf     []byte -	// decoded[i:j] contains decoded bytes that have not yet been passed on. -	i, j       int -	readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { -	r.r = reader -	r.err = nil -	r.i = 0 -	r.j = 0 -	r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { -	if _, r.err = io.ReadFull(r.r, p); r.err != nil { -		if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { -			r.err = ErrCorrupt -		} -		return false -	} -	return true -} - -func (r *Reader) fill() error { -	for r.i >= r.j { -		if !r.readFull(r.buf[:4], true) { -			return r.err -		} -		chunkType := r.buf[0] -		if !r.readHeader { -			if chunkType != chunkTypeStreamIdentifier { -				r.err = ErrCorrupt -				return r.err -			} -			r.readHeader = true -		} -		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 -		if chunkLen > len(r.buf) { -			r.err = ErrUnsupported -			return r.err -		} - -		// The chunk types are specified at -		// https://github.com/google/snappy/blob/master/framing_format.txt -		switch chunkType { -		case chunkTypeCompressedData: -			// Section 4.2. Compressed data (chunk type 0x00). -			if chunkLen < checksumSize { -				r.err = ErrCorrupt -				return r.err -			} -			buf := r.buf[:chunkLen] -			if !r.readFull(buf, false) { -				return r.err -			} -			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 -			buf = buf[checksumSize:] - -			n, err := DecodedLen(buf) -			if err != nil { -				r.err = err -				return r.err -			} -			if n > len(r.decoded) { -				r.err = ErrCorrupt -				return r.err -			} -			if _, err := Decode(r.decoded, buf); err != nil { -				r.err = err -				return r.err -			} -			if crc(r.decoded[:n]) != checksum { -				r.err = ErrCorrupt -				return r.err -			} -			r.i, r.j = 0, n -			continue - -		case chunkTypeUncompressedData: -			// Section 4.3. Uncompressed data (chunk type 0x01). -			if chunkLen < checksumSize { -				r.err = ErrCorrupt -				return r.err -			} -			buf := r.buf[:checksumSize] -			if !r.readFull(buf, false) { -				return r.err -			} -			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 -			// Read directly into r.decoded instead of via r.buf. -			n := chunkLen - checksumSize -			if n > len(r.decoded) { -				r.err = ErrCorrupt -				return r.err -			} -			if !r.readFull(r.decoded[:n], false) { -				return r.err -			} -			if crc(r.decoded[:n]) != checksum { -				r.err = ErrCorrupt -				return r.err -			} -			r.i, r.j = 0, n -			continue - -		case chunkTypeStreamIdentifier: -			// Section 4.1. Stream identifier (chunk type 0xff). -			if chunkLen != len(magicBody) { -				r.err = ErrCorrupt -				return r.err -			} -			if !r.readFull(r.buf[:len(magicBody)], false) { -				return r.err -			} -			for i := 0; i < len(magicBody); i++ { -				if r.buf[i] != magicBody[i] { -					r.err = ErrCorrupt -					return r.err -				} -			} -			continue -		} - -		if chunkType <= 0x7f { -			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). -			r.err = ErrUnsupported -			return r.err -		} -		// Section 4.4 Padding (chunk type 0xfe). -		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). -		if !r.readFull(r.buf[:chunkLen], false) { -			return r.err -		} -	} - -	return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { -	if r.err != nil { -		return 0, r.err -	} - -	if err := r.fill(); err != nil { -		return 0, err -	} - -	n := copy(p, r.decoded[r.i:r.j]) -	r.i += n -	return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { -	if r.err != nil { -		return 0, r.err -	} - -	if err := r.fill(); err != nil { -		return 0, err -	} - -	c := r.decoded[r.i] -	r.i++ -	return c, nil -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go deleted file mode 100644 index 77395a6b8..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { -	var d, s, offset, length int -	for s < len(src) { -		switch src[s] & 0x03 { -		case tagLiteral: -			x := uint32(src[s] >> 2) -			switch { -			case x < 60: -				s++ -			case x == 60: -				s += 2 -				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. -					return decodeErrCodeCorrupt -				} -				x = uint32(src[s-1]) -			case x == 61: -				s += 3 -				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. -					return decodeErrCodeCorrupt -				} -				x = uint32(src[s-2]) | uint32(src[s-1])<<8 -			case x == 62: -				s += 4 -				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. -					return decodeErrCodeCorrupt -				} -				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 -			case x == 63: -				s += 5 -				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. -					return decodeErrCodeCorrupt -				} -				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 -			} -			length = int(x) + 1 -			if length <= 0 { -				return decodeErrCodeUnsupportedLiteralLength -			} -			if length > len(dst)-d || length > len(src)-s { -				return decodeErrCodeCorrupt -			} -			copy(dst[d:], src[s:s+length]) -			d += length -			s += length -			continue - -		case tagCopy1: -			s += 2 -			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. -				return decodeErrCodeCorrupt -			} -			length = 4 + int(src[s-2])>>2&0x7 -			offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - -		case tagCopy2: -			s += 3 -			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. -				return decodeErrCodeCorrupt -			} -			length = 1 + int(src[s-3])>>2 -			offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - -		case tagCopy4: -			s += 5 -			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. -				return decodeErrCodeCorrupt -			} -			length = 1 + int(src[s-5])>>2 -			offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) -		} - -		if offset <= 0 || d < offset || length > len(dst)-d { -			return decodeErrCodeCorrupt -		} -		// Copy from an earlier sub-slice of dst to a later sub-slice. -		// If no overlap, use the built-in copy: -		if offset >= length { -			copy(dst[d:d+length], dst[d-offset:]) -			d += length -			continue -		} - -		// Unlike the built-in copy function, this byte-by-byte copy always runs -		// forwards, even if the slices overlap. Conceptually, this is: -		// -		// d += forwardCopy(dst[d:d+length], dst[d-offset:]) -		// -		// We align the slices into a and b and show the compiler they are the same size. -		// This allows the loop to run without bounds checks. -		a := dst[d : d+length] -		b := dst[d-offset:] -		b = b[:len(a)] -		for i := range a { -			a[i] = b[i] -		} -		d += length -	} -	if d != len(dst) { -		return decodeErrCodeCorrupt -	} -	return 0 -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go deleted file mode 100644 index 13c6040a5..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( -	"encoding/binary" -	"errors" -	"io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { -	if n := MaxEncodedLen(len(src)); n < 0 { -		panic(ErrTooLarge) -	} else if len(dst) < n { -		dst = make([]byte, n) -	} - -	// The block starts with the varint-encoded length of the decompressed bytes. -	d := binary.PutUvarint(dst, uint64(len(src))) - -	for len(src) > 0 { -		p := src -		src = nil -		if len(p) > maxBlockSize { -			p, src = p[:maxBlockSize], p[maxBlockSize:] -		} -		if len(p) < minNonLiteralBlockSize { -			d += emitLiteral(dst[d:], p) -		} else { -			d += encodeBlock(dst[d:], p) -		} -	} -	return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { -	n := uint64(srcLen) -	if n > 0xffffffff { -		return -1 -	} -	// Compressed data can be defined as: -	//    compressed := item* literal* -	//    item       := literal* copy -	// -	// The trailing literal sequence has a space blowup of at most 62/60 -	// since a literal of length 60 needs one tag byte + one extra byte -	// for length information. -	// -	// Item blowup is trickier to measure. Suppose the "copy" op copies -	// 4 bytes of data. Because of a special check in the encoding code, -	// we produce a 4-byte copy only if the offset is < 65536. Therefore -	// the copy op takes 3 bytes to encode, and this type of item leads -	// to at most the 62/60 blowup for representing literals. -	// -	// Suppose the "copy" op copies 5 bytes of data. If the offset is big -	// enough, it will take 5 bytes to encode the copy op. Therefore the -	// worst case here is a one-byte literal followed by a five-byte copy. -	// That is, 6 bytes of input turn into 7 bytes of "compressed" data. -	// -	// This last factor dominates the blowup, so the final estimate is: -	n = 32 + n + n/6 -	if n > 0xffffffff { -		return -1 -	} -	return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { -	return &Writer{ -		w:    w, -		obuf: make([]byte, obufLen), -	} -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { -	return &Writer{ -		w:    w, -		ibuf: make([]byte, 0, maxBlockSize), -		obuf: make([]byte, obufLen), -	} -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { -	w   io.Writer -	err error - -	// ibuf is a buffer for the incoming (uncompressed) bytes. -	// -	// Its use is optional. For backwards compatibility, Writers created by the -	// NewWriter function have ibuf == nil, do not buffer incoming bytes, and -	// therefore do not need to be Flush'ed or Close'd. -	ibuf []byte - -	// obuf is a buffer for the outgoing (compressed) bytes. -	obuf []byte - -	// wroteStreamHeader is whether we have written the stream header. -	wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { -	w.w = writer -	w.err = nil -	if w.ibuf != nil { -		w.ibuf = w.ibuf[:0] -	} -	w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { -	if w.ibuf == nil { -		// Do not buffer incoming bytes. This does not perform or compress well -		// if the caller of Writer.Write writes many small slices. This -		// behavior is therefore deprecated, but still supported for backwards -		// compatibility with code that doesn't explicitly Flush or Close. -		return w.write(p) -	} - -	// The remainder of this method is based on bufio.Writer.Write from the -	// standard library. - -	for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { -		var n int -		if len(w.ibuf) == 0 { -			// Large write, empty buffer. -			// Write directly from p to avoid copy. -			n, _ = w.write(p) -		} else { -			n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) -			w.ibuf = w.ibuf[:len(w.ibuf)+n] -			w.Flush() -		} -		nRet += n -		p = p[n:] -	} -	if w.err != nil { -		return nRet, w.err -	} -	n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) -	w.ibuf = w.ibuf[:len(w.ibuf)+n] -	nRet += n -	return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { -	if w.err != nil { -		return 0, w.err -	} -	for len(p) > 0 { -		obufStart := len(magicChunk) -		if !w.wroteStreamHeader { -			w.wroteStreamHeader = true -			copy(w.obuf, magicChunk) -			obufStart = 0 -		} - -		var uncompressed []byte -		if len(p) > maxBlockSize { -			uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] -		} else { -			uncompressed, p = p, nil -		} -		checksum := crc(uncompressed) - -		// Compress the buffer, discarding the result if the improvement -		// isn't at least 12.5%. -		compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) -		chunkType := uint8(chunkTypeCompressedData) -		chunkLen := 4 + len(compressed) -		obufEnd := obufHeaderLen + len(compressed) -		if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { -			chunkType = chunkTypeUncompressedData -			chunkLen = 4 + len(uncompressed) -			obufEnd = obufHeaderLen -		} - -		// Fill in the per-chunk header that comes before the body. -		w.obuf[len(magicChunk)+0] = chunkType -		w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) -		w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) -		w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) -		w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) -		w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) -		w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) -		w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - -		if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { -			w.err = err -			return nRet, err -		} -		if chunkType == chunkTypeUncompressedData { -			if _, err := w.w.Write(uncompressed); err != nil { -				w.err = err -				return nRet, err -			} -		} -		nRet += len(uncompressed) -	} -	return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { -	if w.err != nil { -		return w.err -	} -	if len(w.ibuf) == 0 { -		return nil -	} -	w.write(w.ibuf) -	w.ibuf = w.ibuf[:0] -	return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { -	w.Flush() -	ret := w.err -	if w.err == nil { -		w.err = errClosed -	} -	return ret -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go deleted file mode 100644 index 2754bac6f..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -func load32(b []byte, i int) uint32 { -	b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. -	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { -	b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. -	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | -		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// -//	dst is long enough to hold the encoded bytes -//	1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { -	i, n := 0, uint(len(lit)-1) -	switch { -	case n < 60: -		dst[0] = uint8(n)<<2 | tagLiteral -		i = 1 -	case n < 1<<8: -		dst[0] = 60<<2 | tagLiteral -		dst[1] = uint8(n) -		i = 2 -	default: -		dst[0] = 61<<2 | tagLiteral -		dst[1] = uint8(n) -		dst[2] = uint8(n >> 8) -		i = 3 -	} -	return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// -//	dst is long enough to hold the encoded bytes -//	1 <= offset && offset <= 65535 -//	4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { -	i := 0 -	// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The -	// threshold for this loop is a little higher (at 68 = 64 + 4), and the -	// length emitted down below is a little lower (at 60 = 64 - 4), because -	// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed -	// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as -	// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as -	// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a -	// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an -	// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. -	for length >= 68 { -		// Emit a length 64 copy, encoded as 3 bytes. -		dst[i+0] = 63<<2 | tagCopy2 -		dst[i+1] = uint8(offset) -		dst[i+2] = uint8(offset >> 8) -		i += 3 -		length -= 64 -	} -	if length > 64 { -		// Emit a length 60 copy, encoded as 3 bytes. -		dst[i+0] = 59<<2 | tagCopy2 -		dst[i+1] = uint8(offset) -		dst[i+2] = uint8(offset >> 8) -		i += 3 -		length -= 60 -	} -	if length >= 12 || offset >= 2048 { -		// Emit the remaining copy, encoded as 3 bytes. -		dst[i+0] = uint8(length-1)<<2 | tagCopy2 -		dst[i+1] = uint8(offset) -		dst[i+2] = uint8(offset >> 8) -		return i + 3 -	} -	// Emit the remaining copy, encoded as 2 bytes. -	dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 -	dst[i+1] = uint8(offset) -	return i + 2 -} - -func hash(u, shift uint32) uint32 { -	return (u * 0x1e35a7bd) >> shift -} - -// EncodeBlockInto exposes encodeBlock but checks dst size. -func EncodeBlockInto(dst, src []byte) (d int) { -	if MaxEncodedLen(len(src)) > len(dst) { -		return 0 -	} - -	// encodeBlock breaks on too big blocks, so split. -	for len(src) > 0 { -		p := src -		src = nil -		if len(p) > maxBlockSize { -			p, src = p[:maxBlockSize], p[maxBlockSize:] -		} -		if len(p) < minNonLiteralBlockSize { -			d += emitLiteral(dst[d:], p) -		} else { -			d += encodeBlock(dst[d:], p) -		} -	} -	return d -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -//	len(dst) >= MaxEncodedLen(len(src)) && -//	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { -	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. -	// The table element type is uint16, as s < sLimit and sLimit < len(src) -	// and len(src) <= maxBlockSize and maxBlockSize == 65536. -	const ( -		maxTableSize = 1 << 14 -		// tableMask is redundant, but helps the compiler eliminate bounds -		// checks. -		tableMask = maxTableSize - 1 -	) -	shift := uint32(32 - 8) -	for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { -		shift-- -	} -	// In Go, all array elements are zero-initialized, so there is no advantage -	// to a smaller tableSize per se. However, it matches the C++ algorithm, -	// and in the asm versions of this code, we can get away with zeroing only -	// the first tableSize elements. -	var table [maxTableSize]uint16 - -	// sLimit is when to stop looking for offset/length copies. The inputMargin -	// lets us use a fast path for emitLiteral in the main loop, while we are -	// looking for copies. -	sLimit := len(src) - inputMargin - -	// nextEmit is where in src the next emitLiteral should start from. -	nextEmit := 0 - -	// The encoded form must start with a literal, as there are no previous -	// bytes to copy, so we start looking for hash matches at s == 1. -	s := 1 -	nextHash := hash(load32(src, s), shift) - -	for { -		// Copied from the C++ snappy implementation: -		// -		// Heuristic match skipping: If 32 bytes are scanned with no matches -		// found, start looking only at every other byte. If 32 more bytes are -		// scanned (or skipped), look at every third byte, etc.. When a match -		// is found, immediately go back to looking at every byte. This is a -		// small loss (~5% performance, ~0.1% density) for compressible data -		// due to more bookkeeping, but for non-compressible data (such as -		// JPEG) it's a huge win since the compressor quickly "realizes" the -		// data is incompressible and doesn't bother looking for matches -		// everywhere. -		// -		// The "skip" variable keeps track of how many bytes there are since -		// the last match; dividing it by 32 (ie. right-shifting by five) gives -		// the number of bytes to move ahead for each iteration. -		skip := 32 - -		nextS := s -		candidate := 0 -		for { -			s = nextS -			bytesBetweenHashLookups := skip >> 5 -			nextS = s + bytesBetweenHashLookups -			skip += bytesBetweenHashLookups -			if nextS > sLimit { -				goto emitRemainder -			} -			candidate = int(table[nextHash&tableMask]) -			table[nextHash&tableMask] = uint16(s) -			nextHash = hash(load32(src, nextS), shift) -			if load32(src, s) == load32(src, candidate) { -				break -			} -		} - -		// A 4-byte match has been found. We'll later see if more than 4 bytes -		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit -		// them as literal bytes. -		d += emitLiteral(dst[d:], src[nextEmit:s]) - -		// Call emitCopy, and then see if another emitCopy could be our next -		// move. Repeat until we find no match for the input immediately after -		// what was consumed by the last emitCopy call. -		// -		// If we exit this loop normally then we need to call emitLiteral next, -		// though we don't yet know how big the literal will be. We handle that -		// by proceeding to the next iteration of the main loop. We also can -		// exit this loop via goto if we get close to exhausting the input. -		for { -			// Invariant: we have a 4-byte match at s, and no need to emit any -			// literal bytes prior to s. -			base := s - -			// Extend the 4-byte match as long as possible. -			// -			// This is an inlined version of: -			//	s = extendMatch(src, candidate+4, s+4) -			s += 4 -			for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { -			} - -			d += emitCopy(dst[d:], base-candidate, s-base) -			nextEmit = s -			if s >= sLimit { -				goto emitRemainder -			} - -			// We could immediately start working at s now, but to improve -			// compression we first update the hash table at s-1 and at s. If -			// another emitCopy is not our next move, also calculate nextHash -			// at s+1. At least on GOARCH=amd64, these three hash calculations -			// are faster as one load64 call (with some shifts) instead of -			// three load32 calls. -			x := load64(src, s-1) -			prevHash := hash(uint32(x>>0), shift) -			table[prevHash&tableMask] = uint16(s - 1) -			currHash := hash(uint32(x>>8), shift) -			candidate = int(table[currHash&tableMask]) -			table[currHash&tableMask] = uint16(s) -			if uint32(x>>8) != load32(src, candidate) { -				nextHash = hash(uint32(x>>16), shift) -				s++ -				break -			} -		} -	} - -emitRemainder: -	if nextEmit < len(src) { -		d += emitLiteral(dst[d:], src[nextEmit:]) -	} -	return d -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go deleted file mode 100644 index 34d01f4aa..000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snapref implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snapref - -import ( -	"hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: -  - If m < 60, the next 1 + m bytes are literal bytes. -  - Otherwise, let n be the little-endian unsigned integer denoted by the next -    m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: -  - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). -    The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 -    of the offset. The next byte is bits 0-7 of the offset. -  - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). -    The length is 1 + m. The offset is the little-endian unsigned integer -    denoted by the next 2 bytes. -  - For l == 3, this tag is a legacy format that is no longer issued by most -    encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in -    [1, 65). The length is 1 + m. The offset is the little-endian unsigned -    integer denoted by the next 4 bytes. -*/ -const ( -	tagLiteral = 0x00 -	tagCopy1   = 0x01 -	tagCopy2   = 0x02 -	tagCopy4   = 0x03 -) - -const ( -	checksumSize    = 4 -	chunkHeaderSize = 4 -	magicChunk      = "\xff\x06\x00\x00" + magicBody -	magicBody       = "sNaPpY" - -	// maxBlockSize is the maximum size of the input to encodeBlock. It is not -	// part of the wire format per se, but some parts of the encoder assume -	// that an offset fits into a uint16. -	// -	// Also, for the framing format (Writer type instead of Encode function), -	// https://github.com/google/snappy/blob/master/framing_format.txt says -	// that "the uncompressed data in a chunk must be no longer than 65536 -	// bytes". -	maxBlockSize = 65536 - -	// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is -	// hard coded to be a const instead of a variable, so that obufLen can also -	// be a const. Their equivalence is confirmed by -	// TestMaxEncodedLenOfMaxBlockSize. -	maxEncodedLenOfMaxBlockSize = 76490 - -	obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize -	obufLen       = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( -	chunkTypeCompressedData   = 0x00 -	chunkTypeUncompressedData = 0x01 -	chunkTypePadding          = 0xfe -	chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { -	c := crc32.Update(0, crcTable, b) -	return uint32(c>>15|c<<17) + 0xa282ead8 -}  | 
