summaryrefslogtreecommitdiff
path: root/vendor/github.com/ugorji/go
diff options
context:
space:
mode:
authorLibravatar Tobi Smethurst <31960611+tsmethurst@users.noreply.github.com>2021-08-12 21:03:24 +0200
committerLibravatar GitHub <noreply@github.com>2021-08-12 21:03:24 +0200
commit98263a7de64269898a2f81207e38943b5c8e8653 (patch)
tree743c90f109a6c5d27832d1dcef2388d939f0f77a /vendor/github.com/ugorji/go
parentText duplication fix (#137) (diff)
downloadgotosocial-98263a7de64269898a2f81207e38943b5c8e8653.tar.xz
Grand test fixup (#138)
* start fixing up tests * fix up tests + automate with drone * fiddle with linting * messing about with drone.yml * some more fiddling * hmmm * add cache * add vendor directory * verbose * ci updates * update some little things * update sig
Diffstat (limited to 'vendor/github.com/ugorji/go')
-rw-r--r--vendor/github.com/ugorji/go/codec/0_importpath.go7
-rw-r--r--vendor/github.com/ugorji/go/codec/LICENSE22
-rw-r--r--vendor/github.com/ugorji/go/codec/README.md283
-rw-r--r--vendor/github.com/ugorji/go/codec/binc.go1308
-rw-r--r--vendor/github.com/ugorji/go/codec/build.sh368
-rw-r--r--vendor/github.com/ugorji/go/codec/cbor.go949
-rw-r--r--vendor/github.com/ugorji/go/codec/codecgen.go17
-rw-r--r--vendor/github.com/ugorji/go/codec/decimal.go491
-rw-r--r--vendor/github.com/ugorji/go/codec/decode.go2350
-rw-r--r--vendor/github.com/ugorji/go/codec/doc.go228
-rw-r--r--vendor/github.com/ugorji/go/codec/encode.go1479
-rw-r--r--vendor/github.com/ugorji/go/codec/fast-path.generated.go6157
-rw-r--r--vendor/github.com/ugorji/go/codec/fast-path.go.tmpl555
-rw-r--r--vendor/github.com/ugorji/go/codec/fast-path.not.go41
-rw-r--r--vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl90
-rw-r--r--vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl58
-rw-r--r--vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl27
-rw-r--r--vendor/github.com/ugorji/go/codec/gen-helper.generated.go277
-rw-r--r--vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl249
-rw-r--r--vendor/github.com/ugorji/go/codec/gen.generated.go192
-rw-r--r--vendor/github.com/ugorji/go/codec/gen.go2801
-rw-r--r--vendor/github.com/ugorji/go/codec/go.mod5
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go15
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go20
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_fmt_time_gte_go15.go13
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_fmt_time_lt_go15.go16
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go110.go13
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_makemap_not_unsafe_gte_go110.go14
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_makemap_unsafe_gte_go110.go25
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_maprange_gte_go112.go41
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_maprange_lt_go112.go45
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go9
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go9
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go22
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go11
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go11
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go9
-rw-r--r--vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go9
-rw-r--r--vendor/github.com/ugorji/go/codec/helper.go2972
-rw-r--r--vendor/github.com/ugorji/go/codec/helper.s0
-rw-r--r--vendor/github.com/ugorji/go/codec/helper_internal.go147
-rw-r--r--vendor/github.com/ugorji/go/codec/helper_not_unsafe.go670
-rw-r--r--vendor/github.com/ugorji/go/codec/helper_not_unsafe_not_gc.go19
-rw-r--r--vendor/github.com/ugorji/go/codec/helper_unsafe.go1301
-rw-r--r--vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_gc.go171
-rw-r--r--vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_not_gc.go80
-rw-r--r--vendor/github.com/ugorji/go/codec/json.go1457
-rw-r--r--vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl235
-rw-r--r--vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl101
-rw-r--r--vendor/github.com/ugorji/go/codec/msgpack.go1229
-rw-r--r--vendor/github.com/ugorji/go/codec/reader.go816
-rw-r--r--vendor/github.com/ugorji/go/codec/register_ext.go38
-rw-r--r--vendor/github.com/ugorji/go/codec/rpc.go232
-rw-r--r--vendor/github.com/ugorji/go/codec/simple.go747
-rw-r--r--vendor/github.com/ugorji/go/codec/sort-slice.generated.go158
-rw-r--r--vendor/github.com/ugorji/go/codec/sort-slice.go.tmpl66
-rw-r--r--vendor/github.com/ugorji/go/codec/test-cbor-goldens.json639
-rw-r--r--vendor/github.com/ugorji/go/codec/test.py138
-rw-r--r--vendor/github.com/ugorji/go/codec/writer.go289
59 files changed, 29741 insertions, 0 deletions
diff --git a/vendor/github.com/ugorji/go/codec/0_importpath.go b/vendor/github.com/ugorji/go/codec/0_importpath.go
new file mode 100644
index 000000000..adbe862c2
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/0_importpath.go
@@ -0,0 +1,7 @@
+// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec // import "github.com/ugorji/go/codec"
+
+// This establishes that this package must be imported as github.com/ugorji/go/codec.
+// It makes forking easier, and plays well with pre-module releases of go.
diff --git a/vendor/github.com/ugorji/go/codec/LICENSE b/vendor/github.com/ugorji/go/codec/LICENSE
new file mode 100644
index 000000000..36a8bcf10
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2012-2020 Ugorji Nwoke.
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/ugorji/go/codec/README.md b/vendor/github.com/ugorji/go/codec/README.md
new file mode 100644
index 000000000..138a5fc9c
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/README.md
@@ -0,0 +1,283 @@
+# Package Documentation for github.com/ugorji/go/codec
+
+Package codec provides a High Performance, Feature-Rich Idiomatic Go 1.4+
+codec/encoding library for binc, msgpack, cbor, json.
+
+Supported Serialization formats are:
+
+ - msgpack: https://github.com/msgpack/msgpack
+ - binc: http://github.com/ugorji/binc
+ - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
+ - json: http://json.org http://tools.ietf.org/html/rfc7159
+ - simple:
+
+This package will carefully use 'package unsafe' for performance reasons in
+specific places. You can build without unsafe use by passing the safe or
+appengine tag i.e. 'go install -tags=codec.safe ...'.
+
+This library works with both the standard `gc` and the `gccgo` compilers.
+
+For detailed usage information, read the primer at
+http://ugorji.net/blog/go-codec-primer .
+
+The idiomatic Go support is as seen in other encoding packages in the
+standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+ - Simple but extremely powerful and feature-rich API
+ - Support for go 1.4 and above, while selectively using newer APIs for later releases
+ - Excellent code coverage ( > 90% )
+ - Very High Performance.
+ Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
+ - Careful selected use of 'unsafe' for targeted performance gains.
+ - 100% safe mode supported, where 'unsafe' is not used at all.
+ - Lock-free (sans mutex) concurrency for scaling to 100's of cores
+ - In-place updates during decode, with option to zero value in maps and slices prior to decode
+ - Coerce types where appropriate
+ e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc
+ - Corner Cases:
+ Overflows, nil maps/slices, nil values in streams are handled correctly
+ - Standard field renaming via tags
+ - Support for omitting empty fields during an encoding
+ - Encoding from any value and decoding into pointer to any value
+ (struct, slice, map, primitives, pointers, interface{}, etc)
+ - Extensions to support efficient encoding/decoding of any named types
+ - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
+ - Support using existence of `IsZero() bool` to determine if a value is a zero value.
+ Analogous to time.Time.IsZero() bool.
+ - Decoding without a schema (into a interface{}).
+ Includes Options to configure what specific map or slice type to use
+ when decoding an encoded list or map into a nil interface{}
+ - Mapping a non-interface type to an interface, so we can decode appropriately
+ into any interface type with a correctly configured non-interface value.
+ - Encode a struct as an array, and decode struct from an array in the data stream
+ - Option to encode struct keys as numbers (instead of strings)
+ (to support structured streams with fields encoded as numeric codes)
+ - Comprehensive support for anonymous fields
+ - Fast (no-reflection) encoding/decoding of common maps and slices
+ - Code-generation for faster performance, supported in go 1.6+
+ - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
+ - Support indefinite-length formats to enable true streaming
+ (for formats which support it e.g. json, cbor)
+ - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
+ This mostly applies to maps, where iteration order is non-deterministic.
+ - NIL in data stream decoded as zero value
+ - Never silently skip data when decoding.
+ User decides whether to return an error or silently skip data when keys or indexes
+ in the data stream do not map to fields in the struct.
+ - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
+ - Encode/Decode from/to chan types (for iterative streaming support)
+ - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
+ - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+ - Handle unique idiosyncrasies of codecs e.g.
+ - For messagepack, configure how ambiguities in handling raw bytes are resolved
+ - For messagepack, provide rpc server/client codec to support
+ msgpack-rpc protocol defined at:
+ https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+
+
+## Extension Support
+
+Users can register a function to handle the encoding or decoding of their
+custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+```go
+ type BisSet []int
+ type BitSet64 uint64
+ type UUID string
+ type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+ type GifImage struct { ... }
+```
+
+As an illustration, MyStructWithUnexportedFields would normally be encoded
+as an empty map because it has no exported fields, while UUID would be
+encoded as a string. However, with extension support, you can encode any of
+these however you like.
+
+There is also seamless support provided for registering an extension (with a
+tag) but letting the encoding mechanism default to the standard way.
+
+
+## Custom Encoding and Decoding
+
+This package maintains symmetry in the encoding and decoding halfs. We
+determine how to encode or decode by walking this decision tree
+
+ - is there an extension registered for the type?
+ - is type a codec.Selfer?
+ - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
+ - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
+ - is format text-based, and type an encoding.TextMarshaler and TextUnmarshaler?
+ - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
+
+This symmetry is important to reduce chances of issues happening because the
+encoding and decoding sides are out of sync e.g. decoded via very specific
+encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
+
+Consequently, if a type only defines one-half of the symmetry (e.g. it
+implements UnmarshalJSON() but not MarshalJSON() ), then that type doesn't
+satisfy the check and we will continue walking down the decision tree.
+
+
+## RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used with
+the standard net/rpc package.
+
+
+## Usage
+
+The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent
+modification.
+
+The Encoder and Decoder are NOT safe for concurrent use.
+
+Consequently, the usage model is basically:
+
+ - Create and initialize the Handle before any use.
+ Once created, DO NOT modify it.
+ - Multiple Encoders or Decoders can now use the Handle concurrently.
+ They only read information off the Handle (never write).
+ - However, each Encoder or Decoder MUST not be used concurrently
+ - To re-use an Encoder/Decoder, call Reset(...) on it first.
+ This allows you use state maintained on the Encoder/Decoder.
+
+Sample usage model:
+
+```go
+ // create and configure Handle
+ var (
+ bh codec.BincHandle
+ mh codec.MsgpackHandle
+ ch codec.CborHandle
+ )
+
+ mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+ // configure extensions
+ // e.g. for msgpack, define functions and enable Time support for tag 1
+ // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
+
+ // create and use decoder/encoder
+ var (
+ r io.Reader
+ w io.Writer
+ b []byte
+ h = &bh // or mh to use msgpack
+ )
+
+ dec = codec.NewDecoder(r, h)
+ dec = codec.NewDecoderBytes(b, h)
+ err = dec.Decode(&v)
+
+ enc = codec.NewEncoder(w, h)
+ enc = codec.NewEncoderBytes(&b, h)
+ err = enc.Encode(v)
+
+ //RPC Server
+ go func() {
+ for {
+ conn, err := listener.Accept()
+ rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+ rpc.ServeCodec(rpcCodec)
+ }
+ }()
+
+ //RPC Communication (client side)
+ conn, err = net.Dial("tcp", "localhost:5555")
+ rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+ client := rpc.NewClientWithCodec(rpcCodec)
+```
+
+
+## Running Tests
+
+To run tests, use the following:
+
+```
+ go test
+```
+
+To run the full suite of tests, use the following:
+
+```
+ go test -tags alltests -run Suite
+```
+
+You can run the tag 'codec.safe' to run tests or build in safe mode. e.g.
+
+```
+ go test -tags codec.safe -run Json
+ go test -tags "alltests codec.safe" -run Suite
+```
+
+## Running Benchmarks
+
+```
+ cd bench
+ go test -bench . -benchmem -benchtime 1s
+```
+
+Please see http://github.com/ugorji/go-codec-bench .
+
+
+## Caveats
+
+Struct fields matching the following are ignored during encoding and
+decoding
+
+ - struct tag value set to -
+ - func, complex numbers, unsafe pointers
+ - unexported and not embedded
+ - unexported and embedded and not struct kind
+ - unexported and embedded pointers (from go1.10)
+
+Every other field in a struct will be encoded/decoded.
+
+Embedded fields are encoded as if they exist in the top-level struct, with
+some caveats. See Encode documentation.
+
+## Exported Package API
+
+```go
+const CborStreamBytes byte = 0x5f ...
+const GenVersion = 25
+var SelfExt = &extFailWrapper{}
+var GoRpc goRpc
+var MsgpackSpecRpc msgpackSpecRpc
+func GenHelper() (g genHelper)
+type BasicHandle struct{ ... }
+type BincHandle struct{ ... }
+type BytesExt interface{ ... }
+type CborHandle struct{ ... }
+type DecodeOptions struct{ ... }
+type Decoder struct{ ... }
+ func NewDecoder(r io.Reader, h Handle) *Decoder
+ func NewDecoderBytes(in []byte, h Handle) *Decoder
+ func NewDecoderString(s string, h Handle) *Decoder
+type EncodeOptions struct{ ... }
+type Encoder struct{ ... }
+ func NewEncoder(w io.Writer, h Handle) *Encoder
+ func NewEncoderBytes(out *[]byte, h Handle) *Encoder
+type Ext interface{ ... }
+type Handle interface{ ... }
+type InterfaceExt interface{ ... }
+type JsonHandle struct{ ... }
+type MapBySlice interface{ ... }
+type MissingFielder interface{ ... }
+type MsgpackHandle struct{ ... }
+type MsgpackSpecRpcMultiArgs []interface{}
+type RPCOptions struct{ ... }
+type Raw []byte
+type RawExt struct{ ... }
+type Rpc interface{ ... }
+type Selfer interface{ ... }
+type SimpleHandle struct{ ... }
+type TypeInfos struct{ ... }
+ func NewTypeInfos(tags []string) *TypeInfos
+```
diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go
new file mode 100644
index 000000000..93d85cb4a
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/binc.go
@@ -0,0 +1,1308 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+ "time"
+)
+
+// Symbol management:
+// - symbols are stored in a symbol map during encoding and decoding.
+// - the symbols persist until the (En|De)coder ResetXXX method is called.
+
+const bincDoPrune = true
+
+// vd as low 4 bits (there are 16 slots)
+const (
+ bincVdSpecial byte = iota
+ bincVdPosInt
+ bincVdNegInt
+ bincVdFloat
+
+ bincVdString
+ bincVdByteArray
+ bincVdArray
+ bincVdMap
+
+ bincVdTimestamp
+ bincVdSmallInt
+ _ // bincVdUnicodeOther
+ bincVdSymbol
+
+ _ // bincVdDecimal
+ _ // open slot
+ _ // open slot
+ bincVdCustomExt = 0x0f
+)
+
+const (
+ bincSpNil byte = iota
+ bincSpFalse
+ bincSpTrue
+ bincSpNan
+ bincSpPosInf
+ bincSpNegInf
+ bincSpZeroFloat
+ bincSpZero
+ bincSpNegOne
+)
+
+const (
+ _ byte = iota // bincFlBin16
+ bincFlBin32
+ _ // bincFlBin32e
+ bincFlBin64
+ _ // bincFlBin64e
+ // others not currently supported
+)
+
+const bincBdNil = 0 // bincVdSpecial<<4 | bincSpNil // staticcheck barfs on this (SA4016)
+
+var (
+ bincdescSpecialVsNames = map[byte]string{
+ bincSpNil: "nil",
+ bincSpFalse: "false",
+ bincSpTrue: "true",
+ bincSpNan: "float",
+ bincSpPosInf: "float",
+ bincSpNegInf: "float",
+ bincSpZeroFloat: "float",
+ bincSpZero: "uint",
+ bincSpNegOne: "int",
+ }
+ bincdescVdNames = map[byte]string{
+ bincVdSpecial: "special",
+ bincVdSmallInt: "uint",
+ bincVdPosInt: "uint",
+ bincVdFloat: "float",
+ bincVdSymbol: "string",
+ bincVdString: "string",
+ bincVdByteArray: "bytes",
+ bincVdTimestamp: "time",
+ bincVdCustomExt: "ext",
+ bincVdArray: "array",
+ bincVdMap: "map",
+ }
+)
+
+func bincdescbd(bd byte) (s string) {
+ return bincdesc(bd>>4, bd&0x0f)
+}
+
+func bincdesc(vd, vs byte) (s string) {
+ if vd == bincVdSpecial {
+ s = bincdescSpecialVsNames[vs]
+ } else {
+ s = bincdescVdNames[vd]
+ }
+ if s == "" {
+ s = "unknown"
+ }
+ return
+}
+
+type bincEncState struct {
+ m map[string]uint16 // symbols
+}
+
+func (e bincEncState) captureState() interface{} { return e.m }
+func (e *bincEncState) resetState() { e.m = nil }
+func (e *bincEncState) reset() { e.resetState() }
+func (e *bincEncState) restoreState(v interface{}) { e.m = v.(map[string]uint16) }
+
+type bincEncDriver struct {
+ noBuiltInTypes
+ encDriverNoopContainerWriter
+ h *BincHandle
+ bincEncState
+
+ e Encoder
+}
+
+func (e *bincEncDriver) encoder() *Encoder {
+ return &e.e
+}
+
+func (e *bincEncDriver) EncodeNil() {
+ e.e.encWr.writen1(bincBdNil)
+}
+
+func (e *bincEncDriver) EncodeTime(t time.Time) {
+ if t.IsZero() {
+ e.EncodeNil()
+ } else {
+ bs := bincEncodeTime(t)
+ e.e.encWr.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
+ e.e.encWr.writeb(bs)
+ }
+}
+
+func (e *bincEncDriver) EncodeBool(b bool) {
+ if b {
+ e.e.encWr.writen1(bincVdSpecial<<4 | bincSpTrue)
+ } else {
+ e.e.encWr.writen1(bincVdSpecial<<4 | bincSpFalse)
+ }
+}
+
+func (e *bincEncDriver) encSpFloat(f float64) (done bool) {
+ if f == 0 {
+ e.e.encWr.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
+ } else if math.IsNaN(float64(f)) {
+ e.e.encWr.writen1(bincVdSpecial<<4 | bincSpNan)
+ } else if math.IsInf(float64(f), +1) {
+ e.e.encWr.writen1(bincVdSpecial<<4 | bincSpPosInf)
+ } else if math.IsInf(float64(f), -1) {
+ e.e.encWr.writen1(bincVdSpecial<<4 | bincSpNegInf)
+ } else {
+ return
+ }
+ return true
+}
+
+func (e *bincEncDriver) EncodeFloat32(f float32) {
+ if !e.encSpFloat(float64(f)) {
+ e.e.encWr.writen1(bincVdFloat<<4 | bincFlBin32)
+ bigen.writeUint32(e.e.w(), math.Float32bits(f))
+ }
+}
+
+func (e *bincEncDriver) EncodeFloat64(f float64) {
+ if e.encSpFloat(f) {
+ return
+ }
+ b := bigen.PutUint64(math.Float64bits(f))
+ if bincDoPrune {
+ i := 7
+ for ; i >= 0 && (b[i] == 0); i-- {
+ }
+ i++
+ if i <= 6 {
+ e.e.encWr.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64)
+ e.e.encWr.writen1(byte(i))
+ e.e.encWr.writeb(b[:i])
+ return
+ }
+ }
+ e.e.encWr.writen1(bincVdFloat<<4 | bincFlBin64)
+ e.e.encWr.writen8(b)
+}
+
+func (e *bincEncDriver) encIntegerPrune32(bd byte, pos bool, v uint64) {
+ b := bigen.PutUint32(uint32(v))
+ if bincDoPrune {
+ i := byte(pruneSignExt(b[:], pos))
+ e.e.encWr.writen1(bd | 3 - i)
+ e.e.encWr.writeb(b[i:])
+ } else {
+ e.e.encWr.writen1(bd | 3)
+ e.e.encWr.writen4(b)
+ }
+}
+
+func (e *bincEncDriver) encIntegerPrune64(bd byte, pos bool, v uint64) {
+ b := bigen.PutUint64(v)
+ if bincDoPrune {
+ i := byte(pruneSignExt(b[:], pos))
+ e.e.encWr.writen1(bd | 7 - i)
+ e.e.encWr.writeb(b[i:])
+ } else {
+ e.e.encWr.writen1(bd | 7)
+ e.e.encWr.writen8(b)
+ }
+}
+
+func (e *bincEncDriver) EncodeInt(v int64) {
+ if v >= 0 {
+ e.encUint(bincVdPosInt<<4, true, uint64(v))
+ } else if v == -1 {
+ e.e.encWr.writen1(bincVdSpecial<<4 | bincSpNegOne)
+ } else {
+ e.encUint(bincVdNegInt<<4, false, uint64(-v))
+ }
+}
+
+func (e *bincEncDriver) EncodeUint(v uint64) {
+ e.encUint(bincVdPosInt<<4, true, v)
+}
+
+func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) {
+ if v == 0 {
+ e.e.encWr.writen1(bincVdSpecial<<4 | bincSpZero)
+ } else if pos && v >= 1 && v <= 16 {
+ e.e.encWr.writen1(bincVdSmallInt<<4 | byte(v-1))
+ } else if v <= math.MaxUint8 {
+ e.e.encWr.writen2(bd|0x0, byte(v))
+ } else if v <= math.MaxUint16 {
+ e.e.encWr.writen1(bd | 0x01)
+ bigen.writeUint16(e.e.w(), uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.encIntegerPrune32(bd, pos, v)
+ } else {
+ e.encIntegerPrune64(bd, pos, v)
+ }
+}
+
+func (e *bincEncDriver) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
+ var bs0, bs []byte
+ if ext == SelfExt {
+ bs0 = e.e.blist.get(1024)
+ bs = bs0
+ e.e.sideEncode(v, basetype, &bs)
+ } else {
+ bs = ext.WriteExt(v)
+ }
+ if bs == nil {
+ e.EncodeNil()
+ goto END
+ }
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.e.encWr.writeb(bs)
+END:
+ if ext == SelfExt {
+ e.e.blist.put(bs)
+ if !byteSliceSameData(bs0, bs) {
+ e.e.blist.put(bs0)
+ }
+ }
+}
+
+func (e *bincEncDriver) EncodeRawExt(re *RawExt) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.e.encWr.writeb(re.Data)
+}
+
+func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
+ e.encLen(bincVdCustomExt<<4, uint64(length))
+ e.e.encWr.writen1(xtag)
+}
+
+func (e *bincEncDriver) WriteArrayStart(length int) {
+ e.encLen(bincVdArray<<4, uint64(length))
+}
+
+func (e *bincEncDriver) WriteMapStart(length int) {
+ e.encLen(bincVdMap<<4, uint64(length))
+}
+
+func (e *bincEncDriver) EncodeSymbol(v string) {
+ //symbols only offer benefit when string length > 1.
+ //This is because strings with length 1 take only 2 bytes to store
+ //(bd with embedded length, and single byte for string val).
+
+ l := len(v)
+ if l == 0 {
+ e.encBytesLen(cUTF8, 0)
+ return
+ } else if l == 1 {
+ e.encBytesLen(cUTF8, 1)
+ e.e.encWr.writen1(v[0])
+ return
+ }
+ if e.m == nil {
+ e.m = make(map[string]uint16, 16)
+ }
+ ui, ok := e.m[v]
+ if ok {
+ if ui <= math.MaxUint8 {
+ e.e.encWr.writen2(bincVdSymbol<<4, byte(ui))
+ } else {
+ e.e.encWr.writen1(bincVdSymbol<<4 | 0x8)
+ bigen.writeUint16(e.e.w(), ui)
+ }
+ } else {
+ e.e.seq++
+ ui = e.e.seq
+ e.m[v] = ui
+ var lenprec uint8
+ if l <= math.MaxUint8 {
+ // lenprec = 0
+ } else if l <= math.MaxUint16 {
+ lenprec = 1
+ } else if int64(l) <= math.MaxUint32 {
+ lenprec = 2
+ } else {
+ lenprec = 3
+ }
+ if ui <= math.MaxUint8 {
+ e.e.encWr.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui))
+ } else {
+ e.e.encWr.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec)
+ bigen.writeUint16(e.e.w(), ui)
+ }
+ if lenprec == 0 {
+ e.e.encWr.writen1(byte(l))
+ } else if lenprec == 1 {
+ bigen.writeUint16(e.e.w(), uint16(l))
+ } else if lenprec == 2 {
+ bigen.writeUint32(e.e.w(), uint32(l))
+ } else {
+ bigen.writeUint64(e.e.w(), uint64(l))
+ }
+ e.e.encWr.writestr(v)
+ }
+}
+
+func (e *bincEncDriver) EncodeString(v string) {
+ if e.h.StringToRaw {
+ e.encLen(bincVdByteArray<<4, uint64(len(v)))
+ if len(v) > 0 {
+ e.e.encWr.writestr(v)
+ }
+ return
+ }
+ e.EncodeStringEnc(cUTF8, v)
+}
+
+func (e *bincEncDriver) EncodeStringEnc(c charEncoding, v string) {
+ if e.e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 1) {
+ e.EncodeSymbol(v)
+ return
+ }
+ e.encLen(bincVdString<<4, uint64(len(v)))
+ if len(v) > 0 {
+ e.e.encWr.writestr(v)
+ }
+}
+
+func (e *bincEncDriver) EncodeStringBytesRaw(v []byte) {
+ if v == nil {
+ e.EncodeNil()
+ return
+ }
+ e.encLen(bincVdByteArray<<4, uint64(len(v)))
+ if len(v) > 0 {
+ e.e.encWr.writeb(v)
+ }
+}
+
+func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
+ // MARKER: we currently only support UTF-8 (string) and RAW (bytearray).
+ // We should consider supporting bincUnicodeOther.
+
+ if c == cRAW {
+ e.encLen(bincVdByteArray<<4, length)
+ } else {
+ e.encLen(bincVdString<<4, length)
+ }
+}
+
+func (e *bincEncDriver) encLen(bd byte, l uint64) {
+ if l < 12 {
+ e.e.encWr.writen1(bd | uint8(l+4))
+ } else {
+ e.encLenNumber(bd, l)
+ }
+}
+
+func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
+ if v <= math.MaxUint8 {
+ e.e.encWr.writen2(bd, byte(v))
+ } else if v <= math.MaxUint16 {
+ e.e.encWr.writen1(bd | 0x01)
+ bigen.writeUint16(e.e.w(), uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.e.encWr.writen1(bd | 0x02)
+ bigen.writeUint32(e.e.w(), uint32(v))
+ } else {
+ e.e.encWr.writen1(bd | 0x03)
+ bigen.writeUint64(e.e.w(), uint64(v))
+ }
+}
+
+//------------------------------------
+
+type bincDecState struct {
+ bdRead bool
+ bd byte
+ vd byte
+ vs byte
+
+ _ bool
+ // MARKER: consider using binary search here instead of a map (ie bincDecSymbol)
+ s map[uint16][]byte
+}
+
+func (x bincDecState) captureState() interface{} { return x }
+func (x *bincDecState) resetState() { *x = bincDecState{} }
+func (x *bincDecState) reset() { x.resetState() }
+func (x *bincDecState) restoreState(v interface{}) { *x = v.(bincDecState) }
+
+type bincDecDriver struct {
+ decDriverNoopContainerReader
+ decDriverNoopNumberHelper
+ noBuiltInTypes
+
+ h *BincHandle
+
+ bincDecState
+ d Decoder
+}
+
+func (d *bincDecDriver) decoder() *Decoder {
+ return &d.d
+}
+
+func (d *bincDecDriver) descBd() string {
+ return sprintf("%v (%s)", d.bd, bincdescbd(d.bd))
+}
+
+func (d *bincDecDriver) readNextBd() {
+ d.bd = d.d.decRd.readn1()
+ d.vd = d.bd >> 4
+ d.vs = d.bd & 0x0f
+ d.bdRead = true
+}
+
+func (d *bincDecDriver) advanceNil() (null bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincBdNil {
+ d.bdRead = false
+ return true // null = true
+ }
+ return
+}
+
+func (d *bincDecDriver) TryNil() bool {
+ return d.advanceNil()
+}
+
+func (d *bincDecDriver) ContainerType() (vt valueType) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == bincBdNil {
+ d.bdRead = false
+ return valueTypeNil
+ } else if d.vd == bincVdByteArray {
+ return valueTypeBytes
+ } else if d.vd == bincVdString {
+ return valueTypeString
+ } else if d.vd == bincVdArray {
+ return valueTypeArray
+ } else if d.vd == bincVdMap {
+ return valueTypeMap
+ }
+ return valueTypeUnset
+}
+
+func (d *bincDecDriver) DecodeTime() (t time.Time) {
+ if d.advanceNil() {
+ return
+ }
+ if d.vd != bincVdTimestamp {
+ d.d.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ t, err := bincDecodeTime(d.d.decRd.readx(uint(d.vs)))
+ halt.onerror(err)
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) decFloatPruned(maxlen uint8) {
+ l := d.d.decRd.readn1()
+ if l > maxlen {
+ d.d.errorf("cannot read float - at most %v bytes used to represent float - received %v bytes", maxlen, l)
+ }
+ for i := l; i < maxlen; i++ {
+ d.d.b[i] = 0
+ }
+ d.d.decRd.readb(d.d.b[0:l])
+}
+
+func (d *bincDecDriver) decFloatPre32() (b [4]byte) {
+ if d.vs&0x8 == 0 {
+ b = d.d.decRd.readn4()
+ } else {
+ d.decFloatPruned(4)
+ copy(b[:], d.d.b[:])
+ }
+ return
+}
+
+func (d *bincDecDriver) decFloatPre64() (b [8]byte) {
+ if d.vs&0x8 == 0 {
+ b = d.d.decRd.readn8()
+ } else {
+ d.decFloatPruned(8)
+ copy(b[:], d.d.b[:])
+ }
+ return
+}
+
+func (d *bincDecDriver) decFloatVal() (f float64) {
+ switch d.vs & 0x7 {
+ case bincFlBin32:
+ f = float64(math.Float32frombits(bigen.Uint32(d.decFloatPre32())))
+ case bincFlBin64:
+ f = math.Float64frombits(bigen.Uint64(d.decFloatPre64()))
+ default:
+ // ok = false
+ d.d.errorf("read float supports only float32/64 - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ return
+}
+
+func (d *bincDecDriver) decUint() (v uint64) {
+ switch d.vs {
+ case 0:
+ v = uint64(d.d.decRd.readn1())
+ case 1:
+ v = uint64(bigen.Uint16(d.d.decRd.readn2()))
+ case 2:
+ v = uint64(bigen.Uint32(d.d.decRd.readn3()))
+ case 3:
+ v = uint64(bigen.Uint32(d.d.decRd.readn4()))
+ case 4, 5, 6:
+ var b [8]byte
+ lim := 7 - d.vs
+ bs := d.d.b[lim:8]
+ d.d.decRd.readb(bs)
+ copy(b[lim:], bs)
+ v = bigen.Uint64(b)
+ case 7:
+ v = bigen.Uint64(d.d.decRd.readn8())
+ default:
+ d.d.errorf("unsigned integers with greater than 64 bits of precision not supported: d.vs: %v %x", d.vs, d.vs)
+ }
+ return
+}
+
+func (d *bincDecDriver) uintBytes() (bs []byte) {
+ switch d.vs {
+ case 0:
+ bs = d.d.b[:1]
+ bs[0] = d.d.decRd.readn1()
+ case 1:
+ bs = d.d.b[:2]
+ d.d.decRd.readb(bs)
+ case 2:
+ bs = d.d.b[:3]
+ d.d.decRd.readb(bs)
+ case 3:
+ bs = d.d.b[:4]
+ d.d.decRd.readb(bs)
+ case 4, 5, 6:
+ lim := 7 - d.vs
+ bs = d.d.b[lim:8]
+ d.d.decRd.readb(bs)
+ case 7:
+ bs = d.d.b[:8]
+ d.d.decRd.readb(bs)
+ default:
+ d.d.errorf("unsigned integers with greater than 64 bits of precision not supported: d.vs: %v %x", d.vs, d.vs)
+ }
+ return
+}
+
+func (d *bincDecDriver) decInteger() (ui uint64, neg, ok bool) {
+ ok = true
+ vd, vs := d.vd, d.vs
+ if vd == bincVdPosInt {
+ ui = d.decUint()
+ } else if vd == bincVdNegInt {
+ ui = d.decUint()
+ neg = true
+ } else if vd == bincVdSmallInt {
+ ui = uint64(d.vs) + 1
+ } else if vd == bincVdSpecial {
+ if vs == bincSpZero {
+ // i = 0
+ } else if vs == bincSpNegOne {
+ neg = true
+ ui = 1
+ } else {
+ ok = false
+ // d.d.errorf("integer decode has invalid special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ } else {
+ ok = false
+ // d.d.errorf("integer can only be decoded from int/uint. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
+ }
+ return
+}
+
+func (d *bincDecDriver) decFloat() (f float64, ok bool) {
+ ok = true
+ vd, vs := d.vd, d.vs
+ if vd == bincVdSpecial {
+ if vs == bincSpNan {
+ f = math.NaN()
+ } else if vs == bincSpPosInf {
+ f = math.Inf(1)
+ } else if vs == bincSpZeroFloat || vs == bincSpZero {
+
+ } else if vs == bincSpNegInf {
+ f = math.Inf(-1)
+ } else {
+ ok = false
+ // d.d.errorf("float - invalid special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ } else if vd == bincVdFloat {
+ f = d.decFloatVal()
+ } else {
+ ok = false
+ }
+ return
+}
+
+func (d *bincDecDriver) DecodeInt64() (i int64) {
+ if d.advanceNil() {
+ return
+ }
+ i = decNegintPosintFloatNumberHelper{&d.d}.int64(d.decInteger())
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeUint64() (ui uint64) {
+ if d.advanceNil() {
+ return
+ }
+ ui = decNegintPosintFloatNumberHelper{&d.d}.uint64(d.decInteger())
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeFloat64() (f float64) {
+ if d.advanceNil() {
+ return
+ }
+ f = decNegintPosintFloatNumberHelper{&d.d}.float64(d.decFloat())
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeBool() (b bool) {
+ if d.advanceNil() {
+ return
+ }
+ if d.bd == (bincVdSpecial | bincSpFalse) {
+ // b = false
+ } else if d.bd == (bincVdSpecial | bincSpTrue) {
+ b = true
+ } else {
+ d.d.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) ReadMapStart() (length int) {
+ if d.advanceNil() {
+ return containerLenNil
+ }
+ if d.vd != bincVdMap {
+ d.d.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ length = d.decLen()
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) ReadArrayStart() (length int) {
+ if d.advanceNil() {
+ return containerLenNil
+ }
+ if d.vd != bincVdArray {
+ d.d.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ length = d.decLen()
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) decLen() int {
+ if d.vs > 3 {
+ return int(d.vs - 4)
+ }
+ return int(d.decLenNumber())
+}
+
+func (d *bincDecDriver) decLenNumber() (v uint64) {
+ if x := d.vs; x == 0 {
+ v = uint64(d.d.decRd.readn1())
+ } else if x == 1 {
+ v = uint64(bigen.Uint16(d.d.decRd.readn2()))
+ } else if x == 2 {
+ v = uint64(bigen.Uint32(d.d.decRd.readn4()))
+ } else {
+ v = bigen.Uint64(d.d.decRd.readn8())
+ }
+ return
+}
+
+// func (d *bincDecDriver) decStringBytes(bs []byte, zerocopy bool) (bs2 []byte) {
+func (d *bincDecDriver) DecodeStringAsBytes() (bs2 []byte) {
+ d.d.decByteState = decByteStateNone
+ if d.advanceNil() {
+ return
+ }
+ var slen = -1
+ switch d.vd {
+ case bincVdString, bincVdByteArray:
+ slen = d.decLen()
+ if d.d.bytes {
+ d.d.decByteState = decByteStateZerocopy
+ bs2 = d.d.decRd.rb.readx(uint(slen))
+ } else {
+ d.d.decByteState = decByteStateReuseBuf
+ bs2 = decByteSlice(d.d.r(), slen, d.d.h.MaxInitLen, d.d.b[:])
+ }
+ case bincVdSymbol:
+ // zerocopy doesn't apply for symbols,
+ // as the values must be stored in a table for later use.
+ var symbol uint16
+ vs := d.vs
+ if vs&0x8 == 0 {
+ symbol = uint16(d.d.decRd.readn1())
+ } else {
+ symbol = uint16(bigen.Uint16(d.d.decRd.readn2()))
+ }
+ if d.s == nil {
+ d.s = make(map[uint16][]byte, 16)
+ }
+
+ if vs&0x4 == 0 {
+ bs2 = d.s[symbol]
+ } else {
+ switch vs & 0x3 {
+ case 0:
+ slen = int(d.d.decRd.readn1())
+ case 1:
+ slen = int(bigen.Uint16(d.d.decRd.readn2()))
+ case 2:
+ slen = int(bigen.Uint32(d.d.decRd.readn4()))
+ case 3:
+ slen = int(bigen.Uint64(d.d.decRd.readn8()))
+ }
+ // As we are using symbols, do not store any part of
+ // the parameter bs in the map, as it might be a shared buffer.
+ bs2 = decByteSlice(d.d.r(), slen, d.d.h.MaxInitLen, nil)
+ d.s[symbol] = bs2
+ }
+ default:
+ d.d.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeBytes(bs []byte) (bsOut []byte) {
+ d.d.decByteState = decByteStateNone
+ if d.advanceNil() {
+ return
+ }
+ if d.vd == bincVdArray {
+ if bs == nil {
+ bs = d.d.b[:]
+ d.d.decByteState = decByteStateReuseBuf
+ }
+ slen := d.ReadArrayStart()
+ var changed bool
+ if bs, changed = usableByteSlice(bs, slen); changed {
+ d.d.decByteState = decByteStateNone
+ }
+ for i := 0; i < slen; i++ {
+ bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8))
+ }
+ return bs
+ }
+ var clen int
+ if d.vd == bincVdString || d.vd == bincVdByteArray {
+ clen = d.decLen()
+ } else {
+ d.d.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ d.bdRead = false
+ if d.d.zerocopy() {
+ d.d.decByteState = decByteStateZerocopy
+ return d.d.decRd.rb.readx(uint(clen))
+ }
+ if bs == nil {
+ bs = d.d.b[:]
+ d.d.decByteState = decByteStateReuseBuf
+ }
+ return decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, bs)
+}
+
+func (d *bincDecDriver) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
+ if xtag > 0xff {
+ d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
+ }
+ if d.advanceNil() {
+ return
+ }
+ xbs, realxtag1, zerocopy := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag := uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.setData(xbs, zerocopy)
+ } else if ext == SelfExt {
+ d.d.sideDecode(rv, basetype, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+}
+
+func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xbs []byte, xtag byte, zerocopy bool) {
+ if d.vd == bincVdCustomExt {
+ l := d.decLen()
+ xtag = d.d.decRd.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag)
+ }
+ if d.d.bytes {
+ xbs = d.d.decRd.rb.readx(uint(l))
+ zerocopy = true
+ } else {
+ xbs = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:])
+ }
+ } else if d.vd == bincVdByteArray {
+ xbs = d.DecodeBytes(nil)
+ } else {
+ d.d.errorf("ext expects extensions or byte array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *bincDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := d.d.naked()
+ var decodeFurther bool
+
+ switch d.vd {
+ case bincVdSpecial:
+ switch d.vs {
+ case bincSpNil:
+ n.v = valueTypeNil
+ case bincSpFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case bincSpTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case bincSpNan:
+ n.v = valueTypeFloat
+ n.f = math.NaN()
+ case bincSpPosInf:
+ n.v = valueTypeFloat
+ n.f = math.Inf(1)
+ case bincSpNegInf:
+ n.v = valueTypeFloat
+ n.f = math.Inf(-1)
+ case bincSpZeroFloat:
+ n.v = valueTypeFloat
+ n.f = float64(0)
+ case bincSpZero:
+ n.v = valueTypeUint
+ n.u = uint64(0) // int8(0)
+ case bincSpNegOne:
+ n.v = valueTypeInt
+ n.i = int64(-1) // int8(-1)
+ default:
+ d.d.errorf("cannot infer value - unrecognized special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ case bincVdSmallInt:
+ n.v = valueTypeUint
+ n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
+ case bincVdPosInt:
+ n.v = valueTypeUint
+ n.u = d.decUint()
+ case bincVdNegInt:
+ n.v = valueTypeInt
+ n.i = -(int64(d.decUint()))
+ case bincVdFloat:
+ n.v = valueTypeFloat
+ n.f = d.decFloatVal()
+ case bincVdString:
+ n.v = valueTypeString
+ n.s = d.d.stringZC(d.DecodeStringAsBytes())
+ case bincVdByteArray:
+ d.d.fauxUnionReadRawBytes(false)
+ case bincVdSymbol:
+ n.v = valueTypeSymbol
+ n.s = d.d.stringZC(d.DecodeStringAsBytes())
+ case bincVdTimestamp:
+ n.v = valueTypeTime
+ tt, err := bincDecodeTime(d.d.decRd.readx(uint(d.vs)))
+ halt.onerror(err)
+ n.t = tt
+ case bincVdCustomExt:
+ n.v = valueTypeExt
+ l := d.decLen()
+ n.u = uint64(d.d.decRd.readn1())
+ if d.d.bytes {
+ n.l = d.d.decRd.rb.readx(uint(l))
+ } else {
+ n.l = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:])
+ }
+ case bincVdArray:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case bincVdMap:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ d.d.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ if n.v == valueTypeUint && d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = int64(n.u)
+ }
+}
+
+func (d *bincDecDriver) nextValueBytes(v0 []byte) (v []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ v = v0
+ var h = decNextValueBytesHelper{d: &d.d}
+ var cursor = d.d.rb.c - 1
+ h.append1(&v, d.bd)
+ v = d.nextValueBytesBdReadR(v)
+ d.bdRead = false
+ h.bytesRdV(&v, cursor)
+ return
+}
+
+func (d *bincDecDriver) nextValueBytesR(v0 []byte) (v []byte) {
+ d.readNextBd()
+ v = v0
+ var h = decNextValueBytesHelper{d: &d.d}
+ h.append1(&v, d.bd)
+ return d.nextValueBytesBdReadR(v)
+}
+
+func (d *bincDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
+ v = v0
+ var h = decNextValueBytesHelper{d: &d.d}
+
+ fnLen := func(vs byte) uint {
+ switch vs {
+ case 0:
+ x := d.d.decRd.readn1()
+ h.append1(&v, x)
+ return uint(x)
+ case 1:
+ x := d.d.decRd.readn2()
+ h.appendN(&v, x[:]...)
+ return uint(bigen.Uint16(x))
+ case 2:
+ x := d.d.decRd.readn4()
+ h.appendN(&v, x[:]...)
+ return uint(bigen.Uint32(x))
+ case 3:
+ x := d.d.decRd.readn8()
+ h.appendN(&v, x[:]...)
+ return uint(bigen.Uint64(x))
+ default:
+ return uint(vs - 4)
+ }
+ }
+
+ var clen uint
+
+ switch d.vd {
+ case bincVdSpecial:
+ switch d.vs {
+ case bincSpNil, bincSpFalse, bincSpTrue, bincSpNan, bincSpPosInf: // pass
+ case bincSpNegInf, bincSpZeroFloat, bincSpZero, bincSpNegOne: // pass
+ default:
+ d.d.errorf("cannot infer value - unrecognized special value %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ case bincVdSmallInt: // pass
+ case bincVdPosInt, bincVdNegInt:
+ bs := d.uintBytes()
+ h.appendN(&v, bs...)
+ case bincVdFloat:
+ fn := func(xlen byte) {
+ if d.vs&0x8 != 0 {
+ xlen = d.d.decRd.readn1()
+ h.append1(&v, xlen)
+ if xlen > 8 {
+ d.d.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", xlen)
+ }
+ }
+ d.d.decRd.readb(d.d.b[:xlen])
+ h.appendN(&v, d.d.b[:xlen]...)
+ }
+ switch d.vs & 0x7 {
+ case bincFlBin32:
+ fn(4)
+ case bincFlBin64:
+ fn(8)
+ default:
+ d.d.errorf("read float supports only float32/64 - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ case bincVdString, bincVdByteArray:
+ clen = fnLen(d.vs)
+ h.appendN(&v, d.d.decRd.readx(clen)...)
+ case bincVdSymbol:
+ if d.vs&0x8 == 0 {
+ h.append1(&v, d.d.decRd.readn1())
+ } else {
+ h.appendN(&v, d.d.decRd.rb.readx(2)...)
+ }
+ if d.vs&0x4 != 0 {
+ clen = fnLen(d.vs & 0x3)
+ h.appendN(&v, d.d.decRd.readx(clen)...)
+ }
+ case bincVdTimestamp:
+ h.appendN(&v, d.d.decRd.readx(uint(d.vs))...)
+ case bincVdCustomExt:
+ clen = fnLen(d.vs)
+ h.append1(&v, d.d.decRd.readn1()) // tag
+ h.appendN(&v, d.d.decRd.readx(clen)...)
+ case bincVdArray:
+ clen = fnLen(d.vs)
+ for i := uint(0); i < clen; i++ {
+ v = d.nextValueBytesR(v)
+ }
+ case bincVdMap:
+ clen = fnLen(d.vs)
+ for i := uint(0); i < clen; i++ {
+ v = d.nextValueBytesR(v)
+ v = d.nextValueBytesR(v)
+ }
+ default:
+ d.d.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs))
+ }
+ return
+}
+
+//------------------------------------
+
+//BincHandle is a Handle for the Binc Schema-Free Encoding Format
+//defined at https://github.com/ugorji/binc .
+//
+//BincHandle currently supports all Binc features with the following EXCEPTIONS:
+// - only integers up to 64 bits of precision are supported.
+// big integers are unsupported.
+// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
+// extended precision and decimal IEEE 754 floats are unsupported.
+// - Only UTF-8 strings supported.
+// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
+//
+//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
+type BincHandle struct {
+ BasicHandle
+ binaryEncodingType
+ // noElemSeparators
+
+ // AsSymbols defines what should be encoded as symbols.
+ //
+ // Encoding as symbols can reduce the encoded size significantly.
+ //
+ // However, during decoding, each string to be encoded as a symbol must
+ // be checked to see if it has been seen before. Consequently, encoding time
+ // will increase if using symbols, because string comparisons has a clear cost.
+ //
+ // Values:
+ // - 0: default: library uses best judgement
+ // - 1: use symbols
+ // - 2: do not use symbols
+ AsSymbols uint8
+
+ // AsSymbols: may later on introduce more options ...
+ // - m: map keys
+ // - s: struct fields
+ // - n: none
+ // - a: all: same as m, s, ...
+
+ // _ [7]uint64 // padding (cache-aligned)
+}
+
+// Name returns the name of the handle: binc
+func (h *BincHandle) Name() string { return "binc" }
+
+func (h *BincHandle) desc(bd byte) string { return bincdesc(bd>>4, bd&0x0f) }
+
+func (h *BincHandle) newEncDriver() encDriver {
+ var e = &bincEncDriver{h: h}
+ e.e.e = e
+ e.e.init(h)
+ e.reset()
+ return e
+}
+
+func (h *BincHandle) newDecDriver() decDriver {
+ d := &bincDecDriver{h: h}
+ d.d.d = d
+ d.d.init(h)
+ d.reset()
+ return d
+}
+
+// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
+
+// EncodeTime encodes a time.Time as a []byte, including
+// information on the instant in time and UTC offset.
+//
+// Format Description
+//
+// A timestamp is composed of 3 components:
+//
+// - secs: signed integer representing seconds since unix epoch
+// - nsces: unsigned integer representing fractional seconds as a
+// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
+// - tz: signed integer representing timezone offset in minutes east of UTC,
+// and a dst (daylight savings time) flag
+//
+// When encoding a timestamp, the first byte is the descriptor, which
+// defines which components are encoded and how many bytes are used to
+// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
+// is not encoded in the byte array explicitly*.
+//
+// Descriptor 8 bits are of the form `A B C DDD EE`:
+// A: Is secs component encoded? 1 = true
+// B: Is nsecs component encoded? 1 = true
+// C: Is tz component encoded? 1 = true
+// DDD: Number of extra bytes for secs (range 0-7).
+// If A = 1, secs encoded in DDD+1 bytes.
+// If A = 0, secs is not encoded, and is assumed to be 0.
+// If A = 1, then we need at least 1 byte to encode secs.
+// DDD says the number of extra bytes beyond that 1.
+// E.g. if DDD=0, then secs is represented in 1 byte.
+// if DDD=2, then secs is represented in 3 bytes.
+// EE: Number of extra bytes for nsecs (range 0-3).
+// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
+//
+// Following the descriptor bytes, subsequent bytes are:
+//
+// secs component encoded in `DDD + 1` bytes (if A == 1)
+// nsecs component encoded in `EE + 1` bytes (if B == 1)
+// tz component encoded in 2 bytes (if C == 1)
+//
+// secs and nsecs components are integers encoded in a BigEndian
+// 2-complement encoding format.
+//
+// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
+// Least significant bit 0 are described below:
+//
+// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
+// Bit 15 = have\_dst: set to 1 if we set the dst flag.
+// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
+// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
+//
+func bincEncodeTime(t time.Time) []byte {
+ // t := rv2i(rv).(time.Time)
+ tsecs, tnsecs := t.Unix(), t.Nanosecond()
+ var (
+ bd byte
+ bs [16]byte
+ i int = 1
+ )
+ l := t.Location()
+ if l == time.UTC {
+ l = nil
+ }
+ if tsecs != 0 {
+ bd = bd | 0x80
+ btmp := bigen.PutUint64(uint64(tsecs))
+ f := pruneSignExt(btmp[:], tsecs >= 0)
+ bd = bd | (byte(7-f) << 2)
+ copy(bs[i:], btmp[f:])
+ i = i + (8 - f)
+ }
+ if tnsecs != 0 {
+ bd = bd | 0x40
+ btmp := bigen.PutUint32(uint32(tnsecs))
+ f := pruneSignExt(btmp[:4], true)
+ bd = bd | byte(3-f)
+ copy(bs[i:], btmp[f:4])
+ i = i + (4 - f)
+ }
+ if l != nil {
+ bd = bd | 0x20
+ // Note that Go Libs do not give access to dst flag.
+ _, zoneOffset := t.Zone()
+ // zoneName, zoneOffset := t.Zone()
+ zoneOffset /= 60
+ z := uint16(zoneOffset)
+ btmp := bigen.PutUint16(z)
+ // clear dst flags
+ bs[i] = btmp[0] & 0x3f
+ bs[i+1] = btmp[1]
+ i = i + 2
+ }
+ bs[0] = bd
+ return bs[0:i]
+}
+
+// bincDecodeTime decodes a []byte into a time.Time.
+func bincDecodeTime(bs []byte) (tt time.Time, err error) {
+ bd := bs[0]
+ var (
+ tsec int64
+ tnsec uint32
+ tz uint16
+ i byte = 1
+ i2 byte
+ n byte
+ )
+ if bd&(1<<7) != 0 {
+ var btmp [8]byte
+ n = ((bd >> 2) & 0x7) + 1
+ i2 = i + n
+ copy(btmp[8-n:], bs[i:i2])
+ // if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
+ if bs[i]&(1<<7) != 0 {
+ copy(btmp[0:8-n], bsAll0xff)
+ }
+ i = i2
+ tsec = int64(bigen.Uint64(btmp))
+ }
+ if bd&(1<<6) != 0 {
+ var btmp [4]byte
+ n = (bd & 0x3) + 1
+ i2 = i + n
+ copy(btmp[4-n:], bs[i:i2])
+ i = i2
+ tnsec = bigen.Uint32(btmp)
+ }
+ if bd&(1<<5) == 0 {
+ tt = time.Unix(tsec, int64(tnsec)).UTC()
+ return
+ }
+ // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
+ // However, we need name here, so it can be shown when time is printf.d.
+ // Zone name is in form: UTC-08:00.
+ // Note that Go Libs do not give access to dst flag, so we ignore dst bits
+
+ tz = bigen.Uint16([2]byte{bs[i], bs[i+1]})
+ // sign extend sign bit into top 2 MSB (which were dst bits):
+ if tz&(1<<13) == 0 { // positive
+ tz = tz & 0x3fff //clear 2 MSBs: dst bits
+ } else { // negative
+ tz = tz | 0xc000 //set 2 MSBs: dst bits
+ }
+ tzint := int16(tz)
+ if tzint == 0 {
+ tt = time.Unix(tsec, int64(tnsec)).UTC()
+ } else {
+ // For Go Time, do not use a descriptive timezone.
+ // It's unnecessary, and makes it harder to do a reflect.DeepEqual.
+ // The Offset already tells what the offset should be, if not on UTC and unknown zone name.
+ // var zoneName = timeLocUTCName(tzint)
+ tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
+ }
+ return
+}
+
+var _ decDriver = (*bincDecDriver)(nil)
+var _ encDriver = (*bincEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/build.sh b/vendor/github.com/ugorji/go/codec/build.sh
new file mode 100644
index 000000000..c7704f758
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/build.sh
@@ -0,0 +1,368 @@
+#!/bin/bash
+
+# Run all the different permutations of all the tests and other things
+# This helps ensure that nothing gets broken.
+
+_tests() {
+ local vet="" # TODO: make it off
+ local gover=$( ${gocmd} version | cut -f 3 -d ' ' )
+ [[ $( ${gocmd} version ) == *"gccgo"* ]] && zcover=0
+ [[ $( ${gocmd} version ) == *"gollvm"* ]] && zcover=0
+ case $gover in
+ go1.[7-9]*|go1.1[0-9]*|go2.*|devel*) true ;;
+ *) return 1
+ esac
+ # note that codecgen requires fastpath, so you cannot do "codecgen codec.notfastpath"
+ # we test the following permutations wnich all execute different code paths as below.
+ echo "TestCodecSuite: (fastpath/unsafe), (!fastpath/unsafe), (fastpath/!unsafe), (!fastpath/!unsafe), (codecgen/unsafe)"
+ local echo=1
+ local nc=2 # count
+ local cpus="1,$(nproc)"
+ # if using the race detector, then set nc to
+ if [[ " ${zargs[@]} " =~ "-race" ]]; then
+ cpus="$(nproc)"
+ fi
+ local a=( "" "codec.notfastpath" "codec.safe" "codec.notfastpath codec.safe" "codecgen" )
+ local b=()
+ local c=()
+ for i in "${a[@]}"
+ do
+ local i2=${i:-default}
+ [[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'alltests $i'; RUN: 'TestCodecSuite'"
+ [[ "$zcover" == "1" ]] && c=( -coverprofile "${i2// /-}.cov.out" )
+ true &&
+ ${gocmd} vet -printfuncs "errorf" "$@" &&
+ if [[ "$echo" == 1 ]]; then set -o xtrace; fi &&
+ ${gocmd} test ${zargs[*]} ${ztestargs[*]} -vet "$vet" -tags "alltests $i" -count $nc -cpu $cpus -run "TestCodecSuite" "${c[@]}" "$@" &
+ if [[ "$echo" == 1 ]]; then set +o xtrace; fi
+ b+=("${i2// /-}.cov.out")
+ [[ "$zwait" == "1" ]] && wait
+
+ # if [[ "$?" != 0 ]]; then return 1; fi
+ done
+ if [[ "$zextra" == "1" ]]; then
+ [[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'codec.notfastpath x'; RUN: 'Test.*X$'"
+ [[ "$zcover" == "1" ]] && c=( -coverprofile "x.cov.out" )
+ ${gocmd} test ${zargs[*]} ${ztestargs[*]} -vet "$vet" -tags "codec.notfastpath x" -count $nc -run 'Test.*X$' "${c[@]}" &
+ b+=("x.cov.out")
+ [[ "$zwait" == "1" ]] && wait
+ fi
+ wait
+ # go tool cover is not supported for gccgo, gollvm, other non-standard go compilers
+ [[ "$zcover" == "1" ]] &&
+ command -v gocovmerge &&
+ gocovmerge "${b[@]}" > __merge.cov.out &&
+ ${gocmd} tool cover -html=__merge.cov.out
+}
+
+# is a generation needed?
+_ng() {
+ local a="$1"
+ if [[ ! -e "$a" ]]; then echo 1; return; fi
+ for i in `ls -1 *.go.tmpl gen.go values_test.go`
+ do
+ if [[ "$a" -ot "$i" ]]; then echo 1; return; fi
+ done
+}
+
+_prependbt() {
+ cat > ${2} <<EOF
+// +build generated
+
+EOF
+ cat ${1} >> ${2}
+ rm -f ${1}
+}
+
+# _build generates fast-path.go and gen-helper.go.
+_build() {
+ if ! [[ "${zforce}" || $(_ng "fast-path.generated.go") || $(_ng "gen-helper.generated.go") || $(_ng "gen.generated.go") ]]; then return 0; fi
+
+ if [ "${zbak}" ]; then
+ _zts=`date '+%m%d%Y_%H%M%S'`
+ _gg=".generated.go"
+ [ -e "gen-helper${_gg}" ] && mv gen-helper${_gg} gen-helper${_gg}__${_zts}.bak
+ [ -e "fast-path${_gg}" ] && mv fast-path${_gg} fast-path${_gg}__${_zts}.bak
+ [ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak
+ fi
+ rm -f gen-helper.generated.go fast-path.generated.go gen.generated.go \
+ *safe.generated.go *_generated_test.go *.generated_ffjson_expose.go
+
+ cat > gen.generated.go <<EOF
+// +build codecgen.exec
+
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
+
+const genDecMapTmpl = \`
+EOF
+ cat >> gen.generated.go < gen-dec-map.go.tmpl
+ cat >> gen.generated.go <<EOF
+\`
+
+const genDecListTmpl = \`
+EOF
+ cat >> gen.generated.go < gen-dec-array.go.tmpl
+ cat >> gen.generated.go <<EOF
+\`
+
+const genEncChanTmpl = \`
+EOF
+ cat >> gen.generated.go < gen-enc-chan.go.tmpl
+ cat >> gen.generated.go <<EOF
+\`
+EOF
+ cat > gen-from-tmpl.codec.generated.go <<EOF
+package codec
+func GenRunTmpl2Go(in, out string) { genRunTmpl2Go(in, out) }
+func GenRunSortTmpl2Go(in, out string) { genRunSortTmpl2Go(in, out) }
+EOF
+
+ # stub xxxRv and xxxRvSlice creation, before you create it
+ cat > gen-from-tmpl.sort-slice-stubs.generated.go <<EOF
+// +build codecgen.sort_slice
+
+package codec
+
+import "reflect"
+import "time"
+
+EOF
+
+ for i in string bool uint64 int64 float64 bytes time; do
+ local i2=$i
+ case $i in
+ 'time' ) i2="time.Time";;
+ 'bytes' ) i2="[]byte";;
+ esac
+
+ cat >> gen-from-tmpl.sort-slice-stubs.generated.go <<EOF
+type ${i}Rv struct { v ${i2}; r reflect.Value }
+
+type ${i}RvSlice []${i}Rv
+
+func (${i}RvSlice) Len() int { return 0 }
+func (${i}RvSlice) Less(i, j int) bool { return false }
+func (${i}RvSlice) Swap(i, j int) {}
+
+type ${i}Intf struct { v ${i2}; i interface{} }
+
+type ${i}IntfSlice []${i}Intf
+
+func (${i}IntfSlice) Len() int { return 0 }
+func (${i}IntfSlice) Less(i, j int) bool { return false }
+func (${i}IntfSlice) Swap(i, j int) {}
+
+EOF
+ done
+
+ sed -e 's+// __DO_NOT_REMOVE__NEEDED_FOR_REPLACING__IMPORT_PATH__FOR_CODEC_BENCH__+import . "github.com/ugorji/go/codec"+' \
+ shared_test.go > bench/shared_test.go
+
+ # explicitly return 0 if this passes, else return 1
+ local btags="codec.notfastpath codec.safe codecgen.exec"
+ rm -f sort-slice.generated.go fast-path.generated.go gen-helper.generated.go mammoth_generated_test.go mammoth2_generated_test.go
+
+ cat > gen-from-tmpl.sort-slice.generated.go <<EOF
+// +build ignore
+
+package main
+
+import "${zpkg}"
+
+func main() {
+codec.GenRunSortTmpl2Go("sort-slice.go.tmpl", "sort-slice.generated.go")
+}
+EOF
+
+ ${gocmd} run -tags "$btags codecgen.sort_slice" gen-from-tmpl.sort-slice.generated.go || return 1
+ rm -f gen-from-tmpl.sort-slice.generated.go
+
+ cat > gen-from-tmpl.generated.go <<EOF
+// +build ignore
+
+package main
+
+import "${zpkg}"
+
+func main() {
+codec.GenRunTmpl2Go("fast-path.go.tmpl", "fast-path.generated.go")
+codec.GenRunTmpl2Go("gen-helper.go.tmpl", "gen-helper.generated.go")
+codec.GenRunTmpl2Go("mammoth-test.go.tmpl", "mammoth_generated_test.go")
+codec.GenRunTmpl2Go("mammoth2-test.go.tmpl", "mammoth2_generated_test.go")
+}
+EOF
+
+ ${gocmd} run -tags "$btags" gen-from-tmpl.generated.go || return 1
+ rm -f gen-from-tmpl.generated.go
+
+ rm -f gen-from-tmpl.*generated.go
+ return 0
+}
+
+_codegenerators() {
+ local c5="_generated_test.go"
+ local c7="$PWD/codecgen"
+ local c8="$c7/__codecgen"
+ local c9="codecgen-scratch.go"
+
+ if ! [[ $zforce || $(_ng "values_codecgen${c5}") ]]; then return 0; fi
+
+ # Note: ensure you run the codecgen for this codebase/directory i.e. ./codecgen/codecgen
+ true &&
+ echo "codecgen ... " &&
+ if [[ $zforce || ! -f "$c8" || "$c7/gen.go" -nt "$c8" ]]; then
+ echo "rebuilding codecgen ... " && ( cd codecgen && ${gocmd} build -o $c8 ${zargs[*]} . )
+ fi &&
+ $c8 -rt 'codecgen' -t 'codecgen generated' -o "values_codecgen${c5}" -d 19780 "$zfin" "$zfin2" &&
+ cp mammoth2_generated_test.go $c9 &&
+ $c8 -t 'codecgen,!codec.notfastpath,!codec.notmammoth generated,!codec.notfastpath,!codec.notmammoth' -o "mammoth2_codecgen${c5}" -d 19781 "mammoth2_generated_test.go" &&
+ rm -f $c9 &&
+ echo "generators done!"
+}
+
+_prebuild() {
+ echo "prebuild: zforce: $zforce"
+ local d="$PWD"
+ local zfin="test_values.generated.go"
+ local zfin2="test_values_flex.generated.go"
+ local zpkg="github.com/ugorji/go/codec"
+ local returncode=1
+
+ # zpkg=${d##*/src/}
+ # zgobase=${d%%/src/*}
+ # rm -f *_generated_test.go
+ rm -f codecgen-*.go &&
+ _build &&
+ cp $d/values_test.go $d/$zfin &&
+ cp $d/values_flex_test.go $d/$zfin2 &&
+ _codegenerators &&
+ if [[ "$(type -t _codegenerators_external )" = "function" ]]; then _codegenerators_external ; fi &&
+ if [[ $zforce ]]; then ${gocmd} install ${zargs[*]} .; fi &&
+ returncode=0 &&
+ echo "prebuild done successfully"
+ rm -f $d/$zfin $d/$zfin2
+ return $returncode
+ # unset zfin zfin2 zpkg
+}
+
+_make() {
+ local makeforce=${zforce}
+ zforce=1
+ (cd codecgen && ${gocmd} install ${zargs[*]} .) && _prebuild && ${gocmd} install ${zargs[*]} .
+ zforce=${makeforce}
+}
+
+_clean() {
+ rm -f \
+ gen-from-tmpl.*generated.go \
+ codecgen-*.go \
+ test_values.generated.go test_values_flex.generated.go
+}
+
+_release() {
+ local reply
+ read -p "Pre-release validation takes a few minutes and MUST be run from within GOPATH/src. Confirm y/n? " -n 1 -r reply
+ echo
+ if [[ ! $reply =~ ^[Yy]$ ]]; then return 1; fi
+
+ # expects GOROOT, GOROOT_BOOTSTRAP to have been set.
+ if [[ -z "${GOROOT// }" || -z "${GOROOT_BOOTSTRAP// }" ]]; then return 1; fi
+ # (cd $GOROOT && git checkout -f master && git pull && git reset --hard)
+ (cd $GOROOT && git pull)
+ local f=`pwd`/make.release.out
+ cat > $f <<EOF
+========== `date` ===========
+EOF
+ # # go 1.6 and below kept giving memory errors on Mac OS X during SDK build or go run execution,
+ # # that is fine, as we only explicitly test the last 3 releases and tip (2 years).
+ local makeforce=${zforce}
+ zforce=1
+ for i in 1.10 1.11 1.12 master
+ do
+ echo "*********** $i ***********" >>$f
+ if [[ "$i" != "master" ]]; then i="release-branch.go$i"; fi
+ (false ||
+ (echo "===== BUILDING GO SDK for branch: $i ... =====" &&
+ cd $GOROOT &&
+ git checkout -f $i && git reset --hard && git clean -f . &&
+ cd src && ./make.bash >>$f 2>&1 && sleep 1 ) ) &&
+ echo "===== GO SDK BUILD DONE =====" &&
+ _prebuild &&
+ echo "===== PREBUILD DONE with exit: $? =====" &&
+ _tests "$@"
+ if [[ "$?" != 0 ]]; then return 1; fi
+ done
+ zforce=${makeforce}
+ echo "++++++++ RELEASE TEST SUITES ALL PASSED ++++++++"
+}
+
+_usage() {
+ # hidden args:
+ # -pf [p=prebuild (f=force)]
+
+ cat <<EOF
+primary usage: $0
+ -[tesow m n l d] -> [t=tests (e=extra, s=short, o=cover, w=wait), m=make, n=inlining diagnostics, l=mid-stack inlining, d=race detector]
+ -v -> v=verbose
+EOF
+ if [[ "$(type -t _usage_run)" = "function" ]]; then _usage_run ; fi
+}
+
+_main() {
+ if [[ -z "$1" ]]; then _usage; return 1; fi
+ local x # determines the main action to run in this build
+ local zforce # force
+ local zcover # generate cover profile and show in browser when done
+ local zwait # run tests in sequence, not parallel ie wait for one to finish before starting another
+ local zextra # means run extra (python based tests, etc) during testing
+
+ local ztestargs=()
+ local zargs=()
+ local zverbose=()
+ local zbenchflags=""
+
+ local gocmd=${MYGOCMD:-go}
+
+ OPTIND=1
+ while getopts ":cetmnrgpfvldsowkxyzb:" flag
+ do
+ case "x$flag" in
+ 'xo') zcover=1 ;;
+ 'xe') zextra=1 ;;
+ 'xw') zwait=1 ;;
+ 'xf') zforce=1 ;;
+ 'xs') ztestargs+=("-short") ;;
+ 'xv') zverbose+=(1) ;;
+ 'xl') zargs+=("-gcflags"); zargs+=("-l=4") ;;
+ 'xn') zargs+=("-gcflags"); zargs+=("-m=2") ;;
+ 'xd') zargs+=("-race") ;;
+ 'xb') x='b'; zbenchflags=${OPTARG} ;;
+ x\?) _usage; return 1 ;;
+ *) x=$flag ;;
+ esac
+ done
+ shift $((OPTIND-1))
+ # echo ">>>> _main: extra args: $@"
+ case "x$x" in
+ 'xt') _tests "$@" ;;
+ 'xm') _make "$@" ;;
+ 'xr') _release "$@" ;;
+ 'xg') _go ;;
+ 'xp') _prebuild "$@" ;;
+ 'xc') _clean "$@" ;;
+ 'xx') _analyze_checks "$@" ;;
+ 'xy') _analyze_debug_types "$@" ;;
+ 'xz') _analyze_do_inlining_and_more "$@" ;;
+ 'xk') _go_compiler_validation_suite ;;
+ 'xb') _bench "$@" ;;
+ esac
+ # unset zforce zargs zbenchflags
+}
+
+[ "." = `dirname $0` ] && _main "$@"
+
diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go
new file mode 100644
index 000000000..0be358ec4
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/cbor.go
@@ -0,0 +1,949 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+ "time"
+)
+
+// major
+const (
+ cborMajorUint byte = iota
+ cborMajorNegInt
+ cborMajorBytes
+ cborMajorString
+ cborMajorArray
+ cborMajorMap
+ cborMajorTag
+ cborMajorSimpleOrFloat
+)
+
+// simple
+const (
+ cborBdFalse byte = 0xf4 + iota
+ cborBdTrue
+ cborBdNil
+ cborBdUndefined
+ cborBdExt
+ cborBdFloat16
+ cborBdFloat32
+ cborBdFloat64
+)
+
+// indefinite
+const (
+ cborBdIndefiniteBytes byte = 0x5f
+ cborBdIndefiniteString byte = 0x7f
+ cborBdIndefiniteArray byte = 0x9f
+ cborBdIndefiniteMap byte = 0xbf
+ cborBdBreak byte = 0xff
+)
+
+// These define some in-stream descriptors for
+// manual encoding e.g. when doing explicit indefinite-length
+const (
+ CborStreamBytes byte = 0x5f
+ CborStreamString byte = 0x7f
+ CborStreamArray byte = 0x9f
+ CborStreamMap byte = 0xbf
+ CborStreamBreak byte = 0xff
+)
+
+// base values
+const (
+ cborBaseUint byte = 0x00
+ cborBaseNegInt byte = 0x20
+ cborBaseBytes byte = 0x40
+ cborBaseString byte = 0x60
+ cborBaseArray byte = 0x80
+ cborBaseMap byte = 0xa0
+ cborBaseTag byte = 0xc0
+ cborBaseSimple byte = 0xe0
+)
+
+// const (
+// cborSelfDesrTag byte = 0xd9
+// cborSelfDesrTag2 byte = 0xd9
+// cborSelfDesrTag3 byte = 0xf7
+// )
+
+var (
+ cbordescSimpleNames = map[byte]string{
+ cborBdNil: "nil",
+ cborBdFalse: "false",
+ cborBdTrue: "true",
+ cborBdFloat16: "float",
+ cborBdFloat32: "float",
+ cborBdFloat64: "float",
+ cborBdBreak: "break",
+ }
+ cbordescIndefNames = map[byte]string{
+ cborBdIndefiniteBytes: "bytes*",
+ cborBdIndefiniteString: "string*",
+ cborBdIndefiniteArray: "array*",
+ cborBdIndefiniteMap: "map*",
+ }
+ cbordescMajorNames = map[byte]string{
+ cborMajorUint: "(u)int",
+ cborMajorNegInt: "int",
+ cborMajorBytes: "bytes",
+ cborMajorString: "string",
+ cborMajorArray: "array",
+ cborMajorMap: "map",
+ cborMajorTag: "tag",
+ cborMajorSimpleOrFloat: "simple",
+ }
+)
+
+func cbordesc(bd byte) (s string) {
+ bm := bd >> 5
+ if bm == cborMajorSimpleOrFloat {
+ s = cbordescSimpleNames[bd]
+ } else {
+ s = cbordescMajorNames[bm]
+ if s == "" {
+ s = cbordescIndefNames[bd]
+ }
+ }
+ if s == "" {
+ s = "unknown"
+ }
+ return
+}
+
+// -------------------
+
+type cborEncDriver struct {
+ noBuiltInTypes
+ encDriverNoState
+ encDriverNoopContainerWriter
+ h *CborHandle
+
+ e Encoder
+}
+
+func (e *cborEncDriver) encoder() *Encoder {
+ return &e.e
+}
+
+func (e *cborEncDriver) EncodeNil() {
+ e.e.encWr.writen1(cborBdNil)
+}
+
+func (e *cborEncDriver) EncodeBool(b bool) {
+ if b {
+ e.e.encWr.writen1(cborBdTrue)
+ } else {
+ e.e.encWr.writen1(cborBdFalse)
+ }
+}
+
+func (e *cborEncDriver) EncodeFloat32(f float32) {
+ b := math.Float32bits(f)
+ if e.h.OptimumSize {
+ if h := floatToHalfFloatBits(b); halfFloatToFloatBits(h) == b {
+ e.e.encWr.writen1(cborBdFloat16)
+ bigen.writeUint16(e.e.w(), h)
+ return
+ }
+ }
+ e.e.encWr.writen1(cborBdFloat32)
+ bigen.writeUint32(e.e.w(), b)
+}
+
+func (e *cborEncDriver) EncodeFloat64(f float64) {
+ if e.h.OptimumSize {
+ if f32 := float32(f); float64(f32) == f {
+ e.EncodeFloat32(f32)
+ return
+ }
+ }
+ e.e.encWr.writen1(cborBdFloat64)
+ bigen.writeUint64(e.e.w(), math.Float64bits(f))
+}
+
+func (e *cborEncDriver) encUint(v uint64, bd byte) {
+ if v <= 0x17 {
+ e.e.encWr.writen1(byte(v) + bd)
+ } else if v <= math.MaxUint8 {
+ e.e.encWr.writen2(bd+0x18, uint8(v))
+ } else if v <= math.MaxUint16 {
+ e.e.encWr.writen1(bd + 0x19)
+ bigen.writeUint16(e.e.w(), uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.e.encWr.writen1(bd + 0x1a)
+ bigen.writeUint32(e.e.w(), uint32(v))
+ } else { // if v <= math.MaxUint64 {
+ e.e.encWr.writen1(bd + 0x1b)
+ bigen.writeUint64(e.e.w(), v)
+ }
+}
+
+func (e *cborEncDriver) EncodeInt(v int64) {
+ if v < 0 {
+ e.encUint(uint64(-1-v), cborBaseNegInt)
+ } else {
+ e.encUint(uint64(v), cborBaseUint)
+ }
+}
+
+func (e *cborEncDriver) EncodeUint(v uint64) {
+ e.encUint(v, cborBaseUint)
+}
+
+func (e *cborEncDriver) encLen(bd byte, length int) {
+ e.encUint(uint64(length), bd)
+}
+
+func (e *cborEncDriver) EncodeTime(t time.Time) {
+ if t.IsZero() {
+ e.EncodeNil()
+ } else if e.h.TimeRFC3339 {
+ e.encUint(0, cborBaseTag)
+ e.encStringBytesS(cborBaseString, t.Format(time.RFC3339Nano))
+ } else {
+ e.encUint(1, cborBaseTag)
+ t = t.UTC().Round(time.Microsecond)
+ sec, nsec := t.Unix(), uint64(t.Nanosecond())
+ if nsec == 0 {
+ e.EncodeInt(sec)
+ } else {
+ e.EncodeFloat64(float64(sec) + float64(nsec)/1e9)
+ }
+ }
+}
+
+func (e *cborEncDriver) EncodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
+ e.encUint(uint64(xtag), cborBaseTag)
+ if ext == SelfExt {
+ e.e.encodeValue(baseRV(rv), e.h.fnNoExt(basetype))
+ } else if v := ext.ConvertExt(rv); v == nil {
+ e.EncodeNil()
+ } else {
+ e.e.encode(v)
+ }
+}
+
+func (e *cborEncDriver) EncodeRawExt(re *RawExt) {
+ e.encUint(uint64(re.Tag), cborBaseTag)
+ // only encodes re.Value (never re.Data)
+ if re.Value != nil {
+ e.e.encode(re.Value)
+ } else {
+ e.EncodeNil()
+ }
+}
+
+func (e *cborEncDriver) WriteArrayStart(length int) {
+ if e.h.IndefiniteLength {
+ e.e.encWr.writen1(cborBdIndefiniteArray)
+ } else {
+ e.encLen(cborBaseArray, length)
+ }
+}
+
+func (e *cborEncDriver) WriteMapStart(length int) {
+ if e.h.IndefiniteLength {
+ e.e.encWr.writen1(cborBdIndefiniteMap)
+ } else {
+ e.encLen(cborBaseMap, length)
+ }
+}
+
+func (e *cborEncDriver) WriteMapEnd() {
+ if e.h.IndefiniteLength {
+ e.e.encWr.writen1(cborBdBreak)
+ }
+}
+
+func (e *cborEncDriver) WriteArrayEnd() {
+ if e.h.IndefiniteLength {
+ e.e.encWr.writen1(cborBdBreak)
+ }
+}
+
+func (e *cborEncDriver) EncodeString(v string) {
+ bb := cborBaseString
+ if e.h.StringToRaw {
+ bb = cborBaseBytes
+ }
+ e.encStringBytesS(bb, v)
+}
+
+func (e *cborEncDriver) EncodeStringBytesRaw(v []byte) {
+ if v == nil {
+ e.EncodeNil()
+ } else {
+ e.encStringBytesS(cborBaseBytes, stringView(v))
+ }
+}
+
+func (e *cborEncDriver) encStringBytesS(bb byte, v string) {
+ if e.h.IndefiniteLength {
+ if bb == cborBaseBytes {
+ e.e.encWr.writen1(cborBdIndefiniteBytes)
+ } else {
+ e.e.encWr.writen1(cborBdIndefiniteString)
+ }
+ var vlen uint = uint(len(v))
+ blen := vlen / 4
+ if blen == 0 {
+ blen = 64
+ } else if blen > 1024 {
+ blen = 1024
+ }
+ for i := uint(0); i < vlen; {
+ var v2 string
+ i2 := i + blen
+ if i2 >= i && i2 < vlen {
+ v2 = v[i:i2]
+ } else {
+ v2 = v[i:]
+ }
+ e.encLen(bb, len(v2))
+ e.e.encWr.writestr(v2)
+ i = i2
+ }
+ e.e.encWr.writen1(cborBdBreak)
+ } else {
+ e.encLen(bb, len(v))
+ e.e.encWr.writestr(v)
+ }
+}
+
+// ----------------------
+
+type cborDecDriver struct {
+ decDriverNoopContainerReader
+ decDriverNoopNumberHelper
+ h *CborHandle
+ bdAndBdread
+ st bool // skip tags
+ _ bool // found nil
+ noBuiltInTypes
+ d Decoder
+}
+
+func (d *cborDecDriver) decoder() *Decoder {
+ return &d.d
+}
+
+func (d *cborDecDriver) descBd() string {
+ return sprintf("%v (%s)", d.bd, cbordesc(d.bd))
+}
+
+func (d *cborDecDriver) readNextBd() {
+ d.bd = d.d.decRd.readn1()
+ d.bdRead = true
+}
+
+func (d *cborDecDriver) advanceNil() (null bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == cborBdNil || d.bd == cborBdUndefined {
+ d.bdRead = false
+ return true // null = true
+ }
+ return
+}
+
+func (d *cborDecDriver) TryNil() bool {
+ return d.advanceNil()
+}
+
+// skipTags is called to skip any tags in the stream.
+//
+// Since any value can be tagged, then we should call skipTags
+// before any value is decoded.
+//
+// By definition, skipTags should not be called before
+// checking for break, or nil or undefined.
+func (d *cborDecDriver) skipTags() {
+ for d.bd>>5 == cborMajorTag {
+ d.decUint()
+ d.bd = d.d.decRd.readn1()
+ }
+}
+
+func (d *cborDecDriver) ContainerType() (vt valueType) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.st {
+ d.skipTags()
+ }
+ if d.bd == cborBdNil {
+ d.bdRead = false // always consume nil after seeing it in container type
+ return valueTypeNil
+ }
+ major := d.bd >> 5
+ if major == cborMajorBytes {
+ return valueTypeBytes
+ } else if major == cborMajorString {
+ return valueTypeString
+ } else if major == cborMajorArray {
+ return valueTypeArray
+ } else if major == cborMajorMap {
+ return valueTypeMap
+ }
+ return valueTypeUnset
+}
+
+func (d *cborDecDriver) CheckBreak() (v bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == cborBdBreak {
+ d.bdRead = false
+ v = true
+ }
+ return
+}
+
+func (d *cborDecDriver) decUint() (ui uint64) {
+ v := d.bd & 0x1f
+ if v <= 0x17 {
+ ui = uint64(v)
+ } else if v == 0x18 {
+ ui = uint64(d.d.decRd.readn1())
+ } else if v == 0x19 {
+ ui = uint64(bigen.Uint16(d.d.decRd.readn2()))
+ } else if v == 0x1a {
+ ui = uint64(bigen.Uint32(d.d.decRd.readn4()))
+ } else if v == 0x1b {
+ ui = uint64(bigen.Uint64(d.d.decRd.readn8()))
+ } else {
+ d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd))
+ }
+ return
+}
+
+func (d *cborDecDriver) decLen() int {
+ return int(d.decUint())
+}
+
+func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
+ d.bdRead = false
+ for !d.CheckBreak() {
+ if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorString {
+ d.d.errorf("invalid indefinite string/bytes %x (%s); got major %v, expected %v or %v",
+ d.bd, cbordesc(d.bd), major, cborMajorBytes, cborMajorString)
+ }
+ n := uint(d.decLen())
+ oldLen := uint(len(bs))
+ newLen := oldLen + n
+ if newLen > uint(cap(bs)) {
+ bs2 := make([]byte, newLen, 2*uint(cap(bs))+n)
+ copy(bs2, bs)
+ bs = bs2
+ } else {
+ bs = bs[:newLen]
+ }
+ d.d.decRd.readb(bs[oldLen:newLen])
+ d.bdRead = false
+ }
+ d.bdRead = false
+ return bs
+}
+
+func (d *cborDecDriver) decFloat() (f float64, ok bool) {
+ ok = true
+ switch d.bd {
+ case cborBdFloat16:
+ f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.d.decRd.readn2()))))
+ case cborBdFloat32:
+ f = float64(math.Float32frombits(bigen.Uint32(d.d.decRd.readn4())))
+ case cborBdFloat64:
+ f = math.Float64frombits(bigen.Uint64(d.d.decRd.readn8()))
+ default:
+ ok = false
+ }
+ return
+}
+
+func (d *cborDecDriver) decInteger() (ui uint64, neg, ok bool) {
+ ok = true
+ switch d.bd >> 5 {
+ case cborMajorUint:
+ ui = d.decUint()
+ case cborMajorNegInt:
+ ui = d.decUint()
+ neg = true
+ default:
+ ok = false
+ }
+ return
+}
+
+func (d *cborDecDriver) DecodeInt64() (i int64) {
+ if d.advanceNil() {
+ return
+ }
+ if d.st {
+ d.skipTags()
+ }
+ i = decNegintPosintFloatNumberHelper{&d.d}.int64(d.decInteger())
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeUint64() (ui uint64) {
+ if d.advanceNil() {
+ return
+ }
+ if d.st {
+ d.skipTags()
+ }
+ ui = decNegintPosintFloatNumberHelper{&d.d}.uint64(d.decInteger())
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) DecodeFloat64() (f float64) {
+ if d.advanceNil() {
+ return
+ }
+ if d.st {
+ d.skipTags()
+ }
+ f = decNegintPosintFloatNumberHelper{&d.d}.float64(d.decFloat())
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *cborDecDriver) DecodeBool() (b bool) {
+ if d.advanceNil() {
+ return
+ }
+ if d.st {
+ d.skipTags()
+ }
+ if d.bd == cborBdTrue {
+ b = true
+ } else if d.bd == cborBdFalse {
+ } else {
+ d.d.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd))
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *cborDecDriver) ReadMapStart() (length int) {
+ if d.advanceNil() {
+ return containerLenNil
+ }
+ if d.st {
+ d.skipTags()
+ }
+ d.bdRead = false
+ if d.bd == cborBdIndefiniteMap {
+ return containerLenUnknown
+ }
+ if d.bd>>5 != cborMajorMap {
+ d.d.errorf("error reading map; got major type: %x, expected %x/%s", d.bd>>5, cborMajorMap, cbordesc(d.bd))
+ }
+ return d.decLen()
+}
+
+func (d *cborDecDriver) ReadArrayStart() (length int) {
+ if d.advanceNil() {
+ return containerLenNil
+ }
+ if d.st {
+ d.skipTags()
+ }
+ d.bdRead = false
+ if d.bd == cborBdIndefiniteArray {
+ return containerLenUnknown
+ }
+ if d.bd>>5 != cborMajorArray {
+ d.d.errorf("invalid array; got major type: %x, expect: %x/%s", d.bd>>5, cborMajorArray, cbordesc(d.bd))
+ }
+ return d.decLen()
+}
+
+func (d *cborDecDriver) DecodeBytes(bs []byte) (bsOut []byte) {
+ d.d.decByteState = decByteStateNone
+ if d.advanceNil() {
+ return
+ }
+ if d.st {
+ d.skipTags()
+ }
+ if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
+ d.bdRead = false
+ if bs == nil {
+ d.d.decByteState = decByteStateReuseBuf
+ return d.decAppendIndefiniteBytes(d.d.b[:0])
+ }
+ return d.decAppendIndefiniteBytes(bs[:0])
+ }
+ if d.bd == cborBdIndefiniteArray {
+ d.bdRead = false
+ if bs == nil {
+ d.d.decByteState = decByteStateReuseBuf
+ bs = d.d.b[:0]
+ } else {
+ bs = bs[:0]
+ }
+ for !d.CheckBreak() {
+ bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8)))
+ }
+ return bs
+ }
+ if d.bd>>5 == cborMajorArray {
+ d.bdRead = false
+ if bs == nil {
+ d.d.decByteState = decByteStateReuseBuf
+ bs = d.d.b[:]
+ }
+ slen := d.decLen()
+ var changed bool
+ if bs, changed = usableByteSlice(bs, slen); changed {
+ d.d.decByteState = decByteStateNone
+ }
+ for i := 0; i < len(bs); i++ {
+ bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8))
+ }
+ return bs
+ }
+ clen := d.decLen()
+ d.bdRead = false
+ if d.d.zerocopy() {
+ d.d.decByteState = decByteStateZerocopy
+ return d.d.decRd.rb.readx(uint(clen))
+ }
+ if bs == nil {
+ d.d.decByteState = decByteStateReuseBuf
+ bs = d.d.b[:]
+ }
+ return decByteSlice(d.d.r(), clen, d.h.MaxInitLen, bs)
+}
+
+func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) {
+ return d.DecodeBytes(nil)
+}
+
+func (d *cborDecDriver) DecodeTime() (t time.Time) {
+ if d.advanceNil() {
+ return
+ }
+ if d.bd>>5 != cborMajorTag {
+ d.d.errorf("error reading tag; expected major type: %x, got: %x", cborMajorTag, d.bd>>5)
+ }
+ xtag := d.decUint()
+ d.bdRead = false
+ return d.decodeTime(xtag)
+}
+
+func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) {
+ switch xtag {
+ case 0:
+ var err error
+ t, err = time.Parse(time.RFC3339, stringView(d.DecodeStringAsBytes()))
+ d.d.onerror(err)
+ case 1:
+ f1, f2 := math.Modf(d.DecodeFloat64())
+ t = time.Unix(int64(f1), int64(f2*1e9))
+ default:
+ d.d.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag)
+ }
+ t = t.UTC().Round(time.Microsecond)
+ return
+}
+
+func (d *cborDecDriver) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
+ if d.advanceNil() {
+ return
+ }
+ if d.bd>>5 != cborMajorTag {
+ d.d.errorf("error reading tag; expected major type: %x, got: %x", cborMajorTag, d.bd>>5)
+ }
+ realxtag := d.decUint()
+ d.bdRead = false
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ d.d.decode(&re.Value)
+ } else if xtag != realxtag {
+ d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
+ } else if ext == SelfExt {
+ d.d.decodeValue(baseRV(rv), d.h.fnNoExt(basetype))
+ } else {
+ d.d.interfaceExtConvertAndDecode(rv, ext)
+ }
+ d.bdRead = false
+}
+
+func (d *cborDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := d.d.naked()
+ var decodeFurther bool
+
+ switch d.bd >> 5 {
+ case cborMajorUint:
+ if d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = d.DecodeInt64()
+ } else {
+ n.v = valueTypeUint
+ n.u = d.DecodeUint64()
+ }
+ case cborMajorNegInt:
+ n.v = valueTypeInt
+ n.i = d.DecodeInt64()
+ case cborMajorBytes:
+ d.d.fauxUnionReadRawBytes(false)
+ case cborMajorString:
+ n.v = valueTypeString
+ n.s = d.d.stringZC(d.DecodeStringAsBytes())
+ case cborMajorArray:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case cborMajorMap:
+ n.v = valueTypeMap
+ decodeFurther = true
+ case cborMajorTag:
+ n.v = valueTypeExt
+ n.u = d.decUint()
+ n.l = nil
+ if n.u == 0 || n.u == 1 {
+ d.bdRead = false
+ n.v = valueTypeTime
+ n.t = d.decodeTime(n.u)
+ } else if d.st && d.h.getExtForTag(n.u) == nil {
+ // d.skipTags() // no need to call this - tags already skipped
+ d.bdRead = false
+ d.DecodeNaked()
+ return // return when done (as true recursive function)
+ }
+ case cborMajorSimpleOrFloat:
+ switch d.bd {
+ case cborBdNil, cborBdUndefined:
+ n.v = valueTypeNil
+ case cborBdFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case cborBdTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case cborBdFloat16, cborBdFloat32, cborBdFloat64:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat64()
+ default:
+ d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
+ }
+ default: // should never happen
+ d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
+ }
+ if !decodeFurther {
+ d.bdRead = false
+ }
+}
+
+func (d *cborDecDriver) uintBytes() (v []byte, ui uint64) {
+ // this is only used by nextValueBytes, so it's ok to
+ // use readx and bigenstd here.
+ switch vv := d.bd & 0x1f; vv {
+ case 0x18:
+ v = d.d.decRd.readx(1)
+ ui = uint64(v[0])
+ case 0x19:
+ v = d.d.decRd.readx(2)
+ ui = uint64(bigenstd.Uint16(v))
+ case 0x1a:
+ v = d.d.decRd.readx(4)
+ ui = uint64(bigenstd.Uint32(v))
+ case 0x1b:
+ v = d.d.decRd.readx(8)
+ ui = uint64(bigenstd.Uint64(v))
+ default:
+ if vv > 0x1b {
+ d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd))
+ }
+ ui = uint64(vv)
+ }
+ return
+}
+
+func (d *cborDecDriver) nextValueBytes(v0 []byte) (v []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ v = v0
+ var h = decNextValueBytesHelper{d: &d.d}
+ var cursor = d.d.rb.c - 1
+ h.append1(&v, d.bd)
+ v = d.nextValueBytesBdReadR(v)
+ d.bdRead = false
+ h.bytesRdV(&v, cursor)
+ return
+}
+
+func (d *cborDecDriver) nextValueBytesR(v0 []byte) (v []byte) {
+ d.readNextBd()
+ v = v0
+ var h = decNextValueBytesHelper{d: &d.d}
+ h.append1(&v, d.bd)
+ return d.nextValueBytesBdReadR(v)
+}
+
+func (d *cborDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
+ v = v0
+ var h = decNextValueBytesHelper{d: &d.d}
+
+ var bs []byte
+ var ui uint64
+
+ switch d.bd >> 5 {
+ case cborMajorUint, cborMajorNegInt:
+ bs, _ = d.uintBytes()
+ h.appendN(&v, bs...)
+ case cborMajorString, cborMajorBytes:
+ if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
+ for {
+ d.readNextBd()
+ h.append1(&v, d.bd)
+ if d.bd == cborBdBreak {
+ break
+ }
+ bs, ui = d.uintBytes()
+ h.appendN(&v, bs...)
+ h.appendN(&v, d.d.decRd.readx(uint(ui))...)
+ }
+ } else {
+ bs, ui = d.uintBytes()
+ h.appendN(&v, bs...)
+ h.appendN(&v, d.d.decRd.readx(uint(ui))...)
+ }
+ case cborMajorArray:
+ if d.bd == cborBdIndefiniteArray {
+ for {
+ d.readNextBd()
+ h.append1(&v, d.bd)
+ if d.bd == cborBdBreak {
+ break
+ }
+ v = d.nextValueBytesBdReadR(v)
+ }
+ } else {
+ bs, ui = d.uintBytes()
+ h.appendN(&v, bs...)
+ for i := uint64(0); i < ui; i++ {
+ v = d.nextValueBytesR(v)
+ }
+ }
+ case cborMajorMap:
+ if d.bd == cborBdIndefiniteMap {
+ for {
+ d.readNextBd()
+ h.append1(&v, d.bd)
+ if d.bd == cborBdBreak {
+ break
+ }
+ v = d.nextValueBytesBdReadR(v)
+ v = d.nextValueBytesR(v)
+ }
+ } else {
+ bs, ui = d.uintBytes()
+ h.appendN(&v, bs...)
+ for i := uint64(0); i < ui; i++ {
+ v = d.nextValueBytesR(v)
+ v = d.nextValueBytesR(v)
+ }
+ }
+ case cborMajorTag:
+ bs, _ = d.uintBytes()
+ h.appendN(&v, bs...)
+ v = d.nextValueBytesR(v)
+ case cborMajorSimpleOrFloat:
+ switch d.bd {
+ case cborBdNil, cborBdUndefined, cborBdFalse, cborBdTrue: // pass
+ case cborBdFloat16:
+ h.appendN(&v, d.d.decRd.readx(2)...)
+ case cborBdFloat32:
+ h.appendN(&v, d.d.decRd.readx(4)...)
+ case cborBdFloat64:
+ h.appendN(&v, d.d.decRd.readx(8)...)
+ default:
+ d.d.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd)
+ }
+ default: // should never happen
+ d.d.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd)
+ }
+ return
+}
+
+// -------------------------
+
+// CborHandle is a Handle for the CBOR encoding format,
+// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
+//
+// CBOR is comprehensively supported, including support for:
+// - indefinite-length arrays/maps/bytes/strings
+// - (extension) tags in range 0..0xffff (0 .. 65535)
+// - half, single and double-precision floats
+// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
+// - nil, true, false, ...
+// - arrays and maps, bytes and text strings
+//
+// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
+// Users can implement them as needed (using SetExt), including spec-documented ones:
+// - timestamp, BigNum, BigFloat, Decimals,
+// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
+type CborHandle struct {
+ binaryEncodingType
+ // noElemSeparators
+ BasicHandle
+
+ // IndefiniteLength=true, means that we encode using indefinitelength
+ IndefiniteLength bool
+
+ // TimeRFC3339 says to encode time.Time using RFC3339 format.
+ // If unset, we encode time.Time using seconds past epoch.
+ TimeRFC3339 bool
+
+ // SkipUnexpectedTags says to skip over any tags for which extensions are
+ // not defined. This is in keeping with the cbor spec on "Optional Tagging of Items".
+ //
+ // Furthermore, this allows the skipping over of the Self Describing Tag 0xd9d9f7.
+ SkipUnexpectedTags bool
+}
+
+// Name returns the name of the handle: cbor
+func (h *CborHandle) Name() string { return "cbor" }
+
+func (h *CborHandle) desc(bd byte) string { return cbordesc(bd) }
+
+func (h *CborHandle) newEncDriver() encDriver {
+ var e = &cborEncDriver{h: h}
+ e.e.e = e
+ e.e.init(h)
+ e.reset()
+ return e
+}
+
+func (h *CborHandle) newDecDriver() decDriver {
+ d := &cborDecDriver{h: h, st: h.SkipUnexpectedTags}
+ d.d.d = d
+ d.d.cbor = true
+ d.d.init(h)
+ d.reset()
+ return d
+}
+
+func (d *cborDecDriver) reset() {
+ d.bdAndBdread.reset()
+ d.st = d.h.SkipUnexpectedTags
+}
+
+var _ decDriver = (*cborDecDriver)(nil)
+var _ encDriver = (*cborEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/codecgen.go b/vendor/github.com/ugorji/go/codec/codecgen.go
new file mode 100644
index 000000000..49fb8e515
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/codecgen.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build codecgen || generated
+// +build codecgen generated
+
+package codec
+
+// this file sets the codecgen variable to true
+// when the build tag codecgen is set.
+//
+// some tests depend on knowing whether in the context of codecgen or not.
+// For example, some tests should be skipped during codecgen e.g. missing fields tests.
+
+func init() {
+ codecgen = true
+}
diff --git a/vendor/github.com/ugorji/go/codec/decimal.go b/vendor/github.com/ugorji/go/codec/decimal.go
new file mode 100644
index 000000000..6b617f5a9
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/decimal.go
@@ -0,0 +1,491 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "strconv"
+)
+
+// Per go spec, floats are represented in memory as
+// IEEE single or double precision floating point values.
+//
+// We also looked at the source for stdlib math/modf.go,
+// reviewed https://github.com/chewxy/math32
+// and read wikipedia documents describing the formats.
+//
+// It became clear that we could easily look at the bits to determine
+// whether any fraction exists.
+
+func parseFloat32(b []byte) (f float32, err error) {
+ return parseFloat32_custom(b)
+}
+
+func parseFloat64(b []byte) (f float64, err error) {
+ return parseFloat64_custom(b)
+}
+
+func parseFloat32_strconv(b []byte) (f float32, err error) {
+ f64, err := strconv.ParseFloat(stringView(b), 32)
+ f = float32(f64)
+ return
+}
+
+func parseFloat64_strconv(b []byte) (f float64, err error) {
+ return strconv.ParseFloat(stringView(b), 64)
+}
+
+// ------ parseFloat custom below --------
+
+// JSON really supports decimal numbers in base 10 notation, with exponent support.
+//
+// We assume the following:
+// - a lot of floating point numbers in json files will have defined precision
+// (in terms of number of digits after decimal point), etc.
+// - these (referenced above) can be written in exact format.
+//
+// strconv.ParseFloat has some unnecessary overhead which we can do without
+// for the common case:
+//
+// - expensive char-by-char check to see if underscores are in right place
+// - testing for and skipping underscores
+// - check if the string matches ignorecase +/- inf, +/- infinity, nan
+// - support for base 16 (0xFFFF...)
+//
+// The functions below will try a fast-path for floats which can be decoded
+// without any loss of precision, meaning they:
+//
+// - fits within the significand bits of the 32-bits or 64-bits
+// - exponent fits within the exponent value
+// - there is no truncation (any extra numbers are all trailing zeros)
+//
+// To figure out what the values are for maxMantDigits, use this idea below:
+//
+// 2^23 = 838 8608 (between 10^ 6 and 10^ 7) (significand bits of uint32)
+// 2^32 = 42 9496 7296 (between 10^ 9 and 10^10) (full uint32)
+// 2^52 = 4503 5996 2737 0496 (between 10^15 and 10^16) (significand bits of uint64)
+// 2^64 = 1844 6744 0737 0955 1616 (between 10^19 and 10^20) (full uint64)
+//
+// Note: we only allow for up to what can comfortably fit into the significand
+// ignoring the exponent, and we only try to parse iff significand fits.
+
+const (
+ fMaxMultiplierForExactPow10_64 = 1e15
+ fMaxMultiplierForExactPow10_32 = 1e7
+
+ fUint64Cutoff = (1<<64-1)/10 + 1
+ // fUint32Cutoff = (1<<32-1)/10 + 1
+
+ fBase = 10
+)
+
+const (
+ thousand = 1000
+ million = thousand * thousand
+ billion = thousand * million
+ trillion = thousand * billion
+ quadrillion = thousand * trillion
+ quintillion = thousand * quadrillion
+)
+
+// Exact powers of 10.
+var uint64pow10 = [...]uint64{
+ 1, 10, 100,
+ 1 * thousand, 10 * thousand, 100 * thousand,
+ 1 * million, 10 * million, 100 * million,
+ 1 * billion, 10 * billion, 100 * billion,
+ 1 * trillion, 10 * trillion, 100 * trillion,
+ 1 * quadrillion, 10 * quadrillion, 100 * quadrillion,
+ 1 * quintillion, 10 * quintillion,
+}
+var float64pow10 = [...]float64{
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22,
+}
+var float32pow10 = [...]float32{
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10,
+}
+
+type floatinfo struct {
+ mantbits uint8
+
+ // expbits uint8 // (unused)
+ // bias int16 // (unused)
+ // is32bit bool // (unused)
+
+ exactPow10 int8 // Exact powers of ten are <= 10^N (32: 10, 64: 22)
+
+ exactInts int8 // Exact integers are <= 10^N (for non-float, set to 0)
+
+ // maxMantDigits int8 // 10^19 fits in uint64, while 10^9 fits in uint32
+
+ mantCutoffIsUint64Cutoff bool
+
+ mantCutoff uint64
+}
+
+var fi32 = floatinfo{23, 10, 7, false, 1<<23 - 1}
+var fi64 = floatinfo{52, 22, 15, false, 1<<52 - 1}
+
+var fi64u = floatinfo{0, 19, 0, true, fUint64Cutoff}
+
+func noFrac64(fbits uint64) bool {
+ exp := uint64(fbits>>52)&0x7FF - 1023 // uint(x>>shift)&mask - bias
+ // clear top 12+e bits, the integer part; if the rest is 0, then no fraction.
+ return exp < 52 && fbits<<(12+exp) == 0 // means there's no fractional part
+}
+
+func noFrac32(fbits uint32) bool {
+ exp := uint32(fbits>>23)&0xFF - 127 // uint(x>>shift)&mask - bias
+ // clear top 9+e bits, the integer part; if the rest is 0, then no fraction.
+ return exp < 23 && fbits<<(9+exp) == 0 // means there's no fractional part
+}
+
+func strconvParseErr(b []byte, fn string) error {
+ return &strconv.NumError{
+ Func: fn,
+ Err: strconv.ErrSyntax,
+ Num: string(b),
+ }
+}
+
+func parseFloat32_reader(r readFloatResult) (f float32, fail bool) {
+ f = float32(r.mantissa)
+ if r.exp == 0 {
+ } else if r.exp < 0 { // int / 10^k
+ f /= float32pow10[uint8(-r.exp)]
+ } else { // exp > 0
+ if r.exp > fi32.exactPow10 {
+ f *= float32pow10[r.exp-fi32.exactPow10]
+ if f > fMaxMultiplierForExactPow10_32 { // exponent too large - outside range
+ fail = true
+ return // ok = false
+ }
+ f *= float32pow10[fi32.exactPow10]
+ } else {
+ f *= float32pow10[uint8(r.exp)]
+ }
+ }
+ if r.neg {
+ f = -f
+ }
+ return
+}
+
+func parseFloat32_custom(b []byte) (f float32, err error) {
+ r := readFloat(b, fi32)
+ if r.bad {
+ return 0, strconvParseErr(b, "ParseFloat")
+ }
+ if r.ok {
+ f, r.bad = parseFloat32_reader(r)
+ if !r.bad {
+ return
+ }
+ }
+ return parseFloat32_strconv(b)
+}
+
+func parseFloat64_reader(r readFloatResult) (f float64, fail bool) {
+ f = float64(r.mantissa)
+ if r.exp == 0 {
+ } else if r.exp < 0 { // int / 10^k
+ f /= float64pow10[-uint8(r.exp)]
+ } else { // exp > 0
+ if r.exp > fi64.exactPow10 {
+ f *= float64pow10[r.exp-fi64.exactPow10]
+ if f > fMaxMultiplierForExactPow10_64 { // exponent too large - outside range
+ fail = true
+ return
+ }
+ f *= float64pow10[fi64.exactPow10]
+ } else {
+ f *= float64pow10[uint8(r.exp)]
+ }
+ }
+ if r.neg {
+ f = -f
+ }
+ return
+}
+
+func parseFloat64_custom(b []byte) (f float64, err error) {
+ r := readFloat(b, fi64)
+ if r.bad {
+ return 0, strconvParseErr(b, "ParseFloat")
+ }
+ if r.ok {
+ f, r.bad = parseFloat64_reader(r)
+ if !r.bad {
+ return
+ }
+ }
+ return parseFloat64_strconv(b)
+}
+
+func parseUint64_simple(b []byte) (n uint64, ok bool) {
+ var i int
+ var n1 uint64
+ var c uint8
+LOOP:
+ if i < len(b) {
+ c = b[i]
+ // unsigned integers don't overflow well on multiplication, so check cutoff here
+ // e.g. (maxUint64-5)*10 doesn't overflow well ...
+ // if n >= fUint64Cutoff || !isDigitChar(b[i]) { // if c < '0' || c > '9' {
+ if n >= fUint64Cutoff || c < '0' || c > '9' {
+ return
+ } else if c == '0' {
+ n *= fBase
+ } else {
+ n1 = n
+ n = n*fBase + uint64(c-'0')
+ if n < n1 {
+ return
+ }
+ }
+ i++
+ goto LOOP
+ }
+ ok = true
+ return
+}
+
+func parseUint64_reader(r readFloatResult) (f uint64, fail bool) {
+ f = r.mantissa
+ if r.exp == 0 {
+ } else if r.exp < 0 { // int / 10^k
+ if f%uint64pow10[uint8(-r.exp)] != 0 {
+ fail = true
+ } else {
+ f /= uint64pow10[uint8(-r.exp)]
+ }
+ } else { // exp > 0
+ f *= uint64pow10[uint8(r.exp)]
+ }
+ return
+}
+
+func parseInteger_bytes(b []byte) (u uint64, neg, ok bool) {
+ if len(b) == 0 {
+ ok = true
+ return
+ }
+ if b[0] == '-' {
+ if len(b) == 1 {
+ return
+ }
+ neg = true
+ b = b[1:]
+ }
+
+ u, ok = parseUint64_simple(b)
+ if ok {
+ return
+ }
+
+ r := readFloat(b, fi64u)
+ if r.ok {
+ var fail bool
+ u, fail = parseUint64_reader(r)
+ if fail {
+ f, err := parseFloat64(b)
+ if err != nil {
+ return
+ }
+ if !noFrac64(math.Float64bits(f)) {
+ return
+ }
+ u = uint64(f)
+ }
+ ok = true
+ return
+ }
+ return
+}
+
+// parseNumber will return an integer if only composed of [-]?[0-9]+
+// Else it will return a float.
+func parseNumber(b []byte, z *fauxUnion, preferSignedInt bool) (err error) {
+ var ok, neg bool
+ var f uint64
+
+ if len(b) == 0 {
+ return
+ }
+
+ if b[0] == '-' {
+ neg = true
+ f, ok = parseUint64_simple(b[1:])
+ } else {
+ f, ok = parseUint64_simple(b)
+ }
+
+ if ok {
+ if neg {
+ z.v = valueTypeInt
+ if chkOvf.Uint2Int(f, neg) {
+ return strconvParseErr(b, "ParseInt")
+ }
+ z.i = -int64(f)
+ } else if preferSignedInt {
+ z.v = valueTypeInt
+ if chkOvf.Uint2Int(f, neg) {
+ return strconvParseErr(b, "ParseInt")
+ }
+ z.i = int64(f)
+ } else {
+ z.v = valueTypeUint
+ z.u = f
+ }
+ return
+ }
+
+ z.v = valueTypeFloat
+ z.f, err = parseFloat64_custom(b)
+ return
+}
+
+type readFloatResult struct {
+ mantissa uint64
+ exp int8
+ neg bool
+ trunc bool
+ bad bool // bad decimal string
+ hardexp bool // exponent is hard to handle (> 2 digits, etc)
+ ok bool
+ // sawdot bool
+ // sawexp bool
+ //_ [2]bool // padding
+}
+
+func readFloat(s []byte, y floatinfo) (r readFloatResult) {
+ var i uint // uint, so that we eliminate bounds checking
+ var slen = uint(len(s))
+ if slen == 0 {
+ // read an empty string as the zero value
+ // r.bad = true
+ r.ok = true
+ return
+ }
+
+ if s[0] == '-' {
+ r.neg = true
+ i++
+ }
+
+ // we considered punting early if string has length > maxMantDigits, but this doesn't account
+ // for trailing 0's e.g. 700000000000000000000 can be encoded exactly as it is 7e20
+
+ var nd, ndMant, dp int8
+ var sawdot, sawexp bool
+ var xu uint64
+
+LOOP:
+ for ; i < slen; i++ {
+ switch s[i] {
+ case '.':
+ if sawdot {
+ r.bad = true
+ return
+ }
+ sawdot = true
+ dp = nd
+ case 'e', 'E':
+ sawexp = true
+ break LOOP
+ case '0':
+ if nd == 0 {
+ dp--
+ continue LOOP
+ }
+ nd++
+ if r.mantissa < y.mantCutoff {
+ r.mantissa *= fBase
+ ndMant++
+ }
+ case '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ nd++
+ if y.mantCutoffIsUint64Cutoff && r.mantissa < fUint64Cutoff {
+ r.mantissa *= fBase
+ xu = r.mantissa + uint64(s[i]-'0')
+ if xu < r.mantissa {
+ r.trunc = true
+ return
+ }
+ r.mantissa = xu
+ } else if r.mantissa < y.mantCutoff {
+ // mantissa = (mantissa << 1) + (mantissa << 3) + uint64(c-'0')
+ r.mantissa = r.mantissa*fBase + uint64(s[i]-'0')
+ } else {
+ r.trunc = true
+ return
+ }
+ ndMant++
+ default:
+ r.bad = true
+ return
+ }
+ }
+
+ if !sawdot {
+ dp = nd
+ }
+
+ if sawexp {
+ i++
+ if i < slen {
+ var eneg bool
+ if s[i] == '+' {
+ i++
+ } else if s[i] == '-' {
+ i++
+ eneg = true
+ }
+ if i < slen {
+ // for exact match, exponent is 1 or 2 digits (float64: -22 to 37, float32: -1 to 17).
+ // exit quick if exponent is more than 2 digits.
+ if i+2 < slen {
+ r.hardexp = true
+ return
+ }
+ var e int8
+ if s[i] < '0' || s[i] > '9' { // !isDigitChar(s[i]) { //
+ r.bad = true
+ return
+ }
+ e = int8(s[i] - '0')
+ i++
+ if i < slen {
+ if s[i] < '0' || s[i] > '9' { // !isDigitChar(s[i]) { //
+ r.bad = true
+ return
+ }
+ e = e*fBase + int8(s[i]-'0') // (e << 1) + (e << 3) + int8(s[i]-'0')
+ i++
+ }
+ if eneg {
+ dp -= e
+ } else {
+ dp += e
+ }
+ }
+ }
+ }
+
+ if r.mantissa != 0 {
+ r.exp = dp - ndMant
+ // do not set ok=true for cases we cannot handle
+ if r.exp < -y.exactPow10 ||
+ r.exp > y.exactInts+y.exactPow10 ||
+ (y.mantbits != 0 && r.mantissa>>y.mantbits != 0) {
+ r.hardexp = true
+ return
+ }
+ }
+
+ r.ok = true
+ return
+}
diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go
new file mode 100644
index 000000000..d454db09c
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/decode.go
@@ -0,0 +1,2350 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "encoding"
+ "errors"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const msgBadDesc = "unrecognized descriptor byte"
+
+const (
+ decDefMaxDepth = 1024 // maximum depth
+ decDefChanCap = 64 // should be large, as cap cannot be expanded
+ decScratchByteArrayLen = (8 + 2 + 2) * 8 // around cacheLineSize ie ~64, depending on Decoder size
+
+ // MARKER: massage decScratchByteArrayLen to ensure xxxDecDriver structs fit within cacheLine*N
+
+ // decFailNonEmptyIntf configures whether we error
+ // when decoding naked into a non-empty interface.
+ //
+ // Typically, we cannot decode non-nil stream value into
+ // nil interface with methods (e.g. io.Reader).
+ // However, in some scenarios, this should be allowed:
+ // - MapType
+ // - SliceType
+ // - Extensions
+ //
+ // Consequently, we should relax this. Put it behind a const flag for now.
+ decFailNonEmptyIntf = false
+
+ // decUseTransient says that we should not use the transient optimization.
+ //
+ // There's potential for GC corruption or memory overwrites if transient isn't
+ // used carefully, so this flag helps turn it off quickly if needed.
+ //
+ // Use it everywhere needed so we can completely remove unused code blocks.
+ decUseTransient = true
+)
+
+var (
+ errNeedMapOrArrayDecodeToStruct = errors.New("only encoded map or array can decode into struct")
+ errCannotDecodeIntoNil = errors.New("cannot decode into nil")
+
+ errExpandSliceCannotChange = errors.New("expand slice: cannot change")
+
+ errDecoderNotInitialized = errors.New("Decoder not initialized")
+
+ errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read")
+ errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read")
+ errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown")
+ errMaxDepthExceeded = errors.New("maximum decoding depth exceeded")
+)
+
+// decByteState tracks where the []byte returned by the last call
+// to DecodeBytes or DecodeStringAsByte came from
+type decByteState uint8
+
+const (
+ decByteStateNone decByteState = iota
+ decByteStateZerocopy // view into []byte that we are decoding from
+ decByteStateReuseBuf // view into transient buffer used internally by decDriver
+ // decByteStateNewAlloc
+)
+
+type decNotDecodeableReason uint8
+
+const (
+ decNotDecodeableReasonUnknown decNotDecodeableReason = iota
+ decNotDecodeableReasonBadKind
+ decNotDecodeableReasonNonAddrValue
+ decNotDecodeableReasonNilReference
+)
+
+type decDriver interface {
+ // this will check if the next token is a break.
+ CheckBreak() bool
+
+ // TryNil tries to decode as nil.
+ // If a nil is in the stream, it consumes it and returns true.
+ //
+ // Note: if TryNil returns true, that must be handled.
+ TryNil() bool
+
+ // ContainerType returns one of: Bytes, String, Nil, Slice or Map.
+ //
+ // Return unSet if not known.
+ //
+ // Note: Implementations MUST fully consume sentinel container types, specifically Nil.
+ ContainerType() (vt valueType)
+
+ // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt.
+ // For maps and arrays, it will not do the decoding in-band, but will signal
+ // the decoder, so that is done later, by setting the fauxUnion.valueType field.
+ //
+ // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
+ // for extensions, DecodeNaked must read the tag and the []byte if it exists.
+ // if the []byte is not read, then kInterfaceNaked will treat it as a Handle
+ // that stores the subsequent value in-band, and complete reading the RawExt.
+ //
+ // extensions should also use readx to decode them, for efficiency.
+ // kInterface will extract the detached byte slice if it has to pass it outside its realm.
+ DecodeNaked()
+
+ DecodeInt64() (i int64)
+ DecodeUint64() (ui uint64)
+
+ DecodeFloat64() (f float64)
+ DecodeBool() (b bool)
+
+ // DecodeStringAsBytes returns the bytes representing a string.
+ // It will return a view into scratch buffer or input []byte (if applicable).
+ //
+ // Note: This can also decode symbols, if supported.
+ //
+ // Users should consume it right away and not store it for later use.
+ DecodeStringAsBytes() (v []byte)
+
+ // DecodeBytes returns the bytes representing a binary value.
+ // It will return a view into scratch buffer or input []byte (if applicable).
+ //
+ // All implementations must honor the contract below:
+ // if ZeroCopy and applicable, return a view into input []byte we are decoding from
+ // else if in == nil, return a view into scratch buffer
+ // else append decoded value to in[:0] and return that
+ // (this can be simulated by passing []byte{} as in parameter)
+ //
+ // Implementations must also update Decoder.decByteState on each call to
+ // DecodeBytes or DecodeStringAsBytes. Some callers may check that and work appropriately.
+ //
+ // Note: DecodeBytes may decode past the length of the passed byte slice, up to the cap.
+ // Consequently, it is ok to pass a zero-len slice to DecodeBytes, as the returned
+ // byte slice will have the appropriate length.
+ DecodeBytes(in []byte) (out []byte)
+ // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte)
+
+ // DecodeExt will decode into a *RawExt or into an extension.
+ DecodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext)
+ // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte)
+
+ DecodeTime() (t time.Time)
+
+ // ReadArrayStart will return the length of the array.
+ // If the format doesn't prefix the length, it returns containerLenUnknown.
+ // If the expected array was a nil in the stream, it returns containerLenNil.
+ ReadArrayStart() int
+ ReadArrayEnd()
+
+ // ReadMapStart will return the length of the array.
+ // If the format doesn't prefix the length, it returns containerLenUnknown.
+ // If the expected array was a nil in the stream, it returns containerLenNil.
+ ReadMapStart() int
+ ReadMapEnd()
+
+ reset()
+
+ // atEndOfDecode()
+
+ // nextValueBytes will return the bytes representing the next value in the stream.
+ //
+ // if start is nil, then treat it as a request to discard the next set of bytes,
+ // and the return response does not matter.
+ // Typically, this means that the returned []byte is nil/empty/undefined.
+ //
+ // Optimize for decoding from a []byte, where the nextValueBytes will just be a sub-slice
+ // of the input slice. Callers that need to use this to not be a view into the input bytes
+ // should handle it appropriately.
+ nextValueBytes(start []byte) []byte
+
+ // descBd will describe the token descriptor that signifies what type was decoded
+ descBd() string
+
+ decoder() *Decoder
+
+ driverStateManager
+ decNegintPosintFloatNumber
+}
+
+type decDriverContainerTracker interface {
+ ReadArrayElem()
+ ReadMapElemKey()
+ ReadMapElemValue()
+}
+
+type decNegintPosintFloatNumber interface {
+ decInteger() (ui uint64, neg, ok bool)
+ decFloat() (f float64, ok bool)
+}
+
+type decDriverNoopNumberHelper struct{}
+
+func (x decDriverNoopNumberHelper) decInteger() (ui uint64, neg, ok bool) {
+ panic("decInteger unsupported")
+}
+func (x decDriverNoopNumberHelper) decFloat() (f float64, ok bool) { panic("decFloat unsupported") }
+
+type decDriverNoopContainerReader struct{}
+
+func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { panic("ReadArrayStart unsupported") }
+func (x decDriverNoopContainerReader) ReadArrayEnd() {}
+func (x decDriverNoopContainerReader) ReadMapStart() (v int) { panic("ReadMapStart unsupported") }
+func (x decDriverNoopContainerReader) ReadMapEnd() {}
+func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return }
+
+// DecodeOptions captures configuration options during decode.
+type DecodeOptions struct {
+ // MapType specifies type to use during schema-less decoding of a map in the stream.
+ // If nil (unset), we default to map[string]interface{} iff json handle and MapKeyAsString=true,
+ // else map[interface{}]interface{}.
+ MapType reflect.Type
+
+ // SliceType specifies type to use during schema-less decoding of an array in the stream.
+ // If nil (unset), we default to []interface{} for all formats.
+ SliceType reflect.Type
+
+ // MaxInitLen defines the maxinum initial length that we "make" a collection
+ // (string, slice, map, chan). If 0 or negative, we default to a sensible value
+ // based on the size of an element in the collection.
+ //
+ // For example, when decoding, a stream may say that it has 2^64 elements.
+ // We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash.
+ // Instead, we provision up to MaxInitLen, fill that up, and start appending after that.
+ MaxInitLen int
+
+ // ReaderBufferSize is the size of the buffer used when reading.
+ //
+ // if > 0, we use a smart buffer internally for performance purposes.
+ ReaderBufferSize int
+
+ // MaxDepth defines the maximum depth when decoding nested
+ // maps and slices. If 0 or negative, we default to a suitably large number (currently 1024).
+ MaxDepth int16
+
+ // If ErrorIfNoField, return an error when decoding a map
+ // from a codec stream into a struct, and no matching struct field is found.
+ ErrorIfNoField bool
+
+ // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded.
+ // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array,
+ // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set).
+ ErrorIfNoArrayExpand bool
+
+ // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64).
+ SignedInteger bool
+
+ // MapValueReset controls how we decode into a map value.
+ //
+ // By default, we MAY retrieve the mapping for a key, and then decode into that.
+ // However, especially with big maps, that retrieval may be expensive and unnecessary
+ // if the stream already contains all that is necessary to recreate the value.
+ //
+ // If true, we will never retrieve the previous mapping,
+ // but rather decode into a new value and set that in the map.
+ //
+ // If false, we will retrieve the previous mapping if necessary e.g.
+ // the previous mapping is a pointer, or is a struct or array with pre-set state,
+ // or is an interface.
+ MapValueReset bool
+
+ // SliceElementReset: on decoding a slice, reset the element to a zero value first.
+ //
+ // concern: if the slice already contained some garbage, we will decode into that garbage.
+ SliceElementReset bool
+
+ // InterfaceReset controls how we decode into an interface.
+ //
+ // By default, when we see a field that is an interface{...},
+ // or a map with interface{...} value, we will attempt decoding into the
+ // "contained" value.
+ //
+ // However, this prevents us from reading a string into an interface{}
+ // that formerly contained a number.
+ //
+ // If true, we will decode into a new "blank" value, and set that in the interface.
+ // If false, we will decode into whatever is contained in the interface.
+ InterfaceReset bool
+
+ // InternString controls interning of strings during decoding.
+ //
+ // Some handles, e.g. json, typically will read map keys as strings.
+ // If the set of keys are finite, it may help reduce allocation to
+ // look them up from a map (than to allocate them afresh).
+ //
+ // Note: Handles will be smart when using the intern functionality.
+ // Every string should not be interned.
+ // An excellent use-case for interning is struct field names,
+ // or map keys where key type is string.
+ InternString bool
+
+ // PreferArrayOverSlice controls whether to decode to an array or a slice.
+ //
+ // This only impacts decoding into a nil interface{}.
+ //
+ // Consequently, it has no effect on codecgen.
+ //
+ // *Note*: This only applies if using go1.5 and above,
+ // as it requires reflect.ArrayOf support which was absent before go1.5.
+ PreferArrayOverSlice bool
+
+ // DeleteOnNilMapValue controls how to decode a nil value in the stream.
+ //
+ // If true, we will delete the mapping of the key.
+ // Else, just set the mapping to the zero value of the type.
+ //
+ // Deprecated: This does NOTHING and is left behind for compiling compatibility.
+ // This change is necessitated because 'nil' in a stream now consistently
+ // means the zero value (ie reset the value to its zero state).
+ DeleteOnNilMapValue bool
+
+ // RawToString controls how raw bytes in a stream are decoded into a nil interface{}.
+ // By default, they are decoded as []byte, but can be decoded as string (if configured).
+ RawToString bool
+
+ // ZeroCopy controls whether decoded values of []byte or string type
+ // point into the input []byte parameter passed to a NewDecoderBytes/ResetBytes(...) call.
+ //
+ // To illustrate, if ZeroCopy and decoding from a []byte (not io.Writer),
+ // then a []byte or string in the output result may just be a slice of (point into)
+ // the input bytes.
+ //
+ // This optimization prevents unnecessary copying.
+ //
+ // However, it is made optional, as the caller MUST ensure that the input parameter []byte is
+ // not modified after the Decode() happens, as any changes are mirrored in the decoded result.
+ ZeroCopy bool
+
+ // PreferPointerForStructOrArray controls whether a struct or array
+ // is stored in a nil interface{}, or a pointer to it.
+ //
+ // This mostly impacts when we decode registered extensions.
+ PreferPointerForStructOrArray bool
+}
+
+// ----------------------------------------
+
+func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) {
+ d.d.DecodeExt(rv2i(rv), f.ti.rt, 0, nil)
+}
+
+func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) {
+ d.d.DecodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn)
+}
+
+func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) {
+ rv2i(rv).(Selfer).CodecDecodeSelf(d)
+}
+
+func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) {
+ bm := rv2i(rv).(encoding.BinaryUnmarshaler)
+ xbs := d.d.DecodeBytes(nil)
+ fnerr := bm.UnmarshalBinary(xbs)
+ d.onerror(fnerr)
+}
+
+func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) {
+ tm := rv2i(rv).(encoding.TextUnmarshaler)
+ fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes())
+ d.onerror(fnerr)
+}
+
+func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) {
+ d.jsonUnmarshalV(rv2i(rv).(jsonUnmarshaler))
+}
+
+func (d *Decoder) jsonUnmarshalV(tm jsonUnmarshaler) {
+ // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
+ var bs0 = []byte{}
+ if !d.bytes {
+ bs0 = d.blist.get(256)
+ }
+ bs := d.d.nextValueBytes(bs0)
+ fnerr := tm.UnmarshalJSON(bs)
+ if !d.bytes {
+ d.blist.put(bs)
+ if !byteSliceSameData(bs0, bs) {
+ d.blist.put(bs0)
+ }
+ }
+ d.onerror(fnerr)
+}
+
+func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) {
+ d.errorf("no decoding function defined for kind %v", rv.Kind())
+}
+
+func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) {
+ rvSetBytes(rv, d.rawBytes())
+}
+
+func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) {
+ rvSetString(rv, d.stringZC(d.d.DecodeStringAsBytes()))
+}
+
+func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) {
+ rvSetBool(rv, d.d.DecodeBool())
+}
+
+func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) {
+ rvSetTime(rv, d.d.DecodeTime())
+}
+
+func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
+ rvSetFloat32(rv, d.decodeFloat32())
+}
+
+func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
+ rvSetFloat64(rv, d.d.DecodeFloat64())
+}
+
+func (d *Decoder) kComplex64(f *codecFnInfo, rv reflect.Value) {
+ rvSetComplex64(rv, complex(d.decodeFloat32(), 0))
+}
+
+func (d *Decoder) kComplex128(f *codecFnInfo, rv reflect.Value) {
+ rvSetComplex128(rv, complex(d.d.DecodeFloat64(), 0))
+}
+
+func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) {
+ rvSetInt(rv, int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)))
+}
+
+func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) {
+ rvSetInt8(rv, int8(chkOvf.IntV(d.d.DecodeInt64(), 8)))
+}
+
+func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) {
+ rvSetInt16(rv, int16(chkOvf.IntV(d.d.DecodeInt64(), 16)))
+}
+
+func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) {
+ rvSetInt32(rv, int32(chkOvf.IntV(d.d.DecodeInt64(), 32)))
+}
+
+func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) {
+ rvSetInt64(rv, d.d.DecodeInt64())
+}
+
+func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) {
+ rvSetUint(rv, uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)))
+}
+
+func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
+ rvSetUintptr(rv, uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)))
+}
+
+func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) {
+ rvSetUint8(rv, uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)))
+}
+
+func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) {
+ rvSetUint16(rv, uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)))
+}
+
+func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) {
+ rvSetUint32(rv, uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)))
+}
+
+func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) {
+ rvSetUint64(rv, d.d.DecodeUint64())
+}
+
+func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) {
+ // nil interface:
+ // use some hieristics to decode it appropriately
+ // based on the detected next value in the stream.
+ n := d.naked()
+ d.d.DecodeNaked()
+
+ // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader).
+ // Howver, it is possible that the user has ways to pass in a type for a given interface
+ // - MapType
+ // - SliceType
+ // - Extensions
+ //
+ // Consequently, we should relax this. Put it behind a const flag for now.
+ if decFailNonEmptyIntf && f.ti.numMeth > 0 {
+ d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth)
+ }
+ switch n.v {
+ case valueTypeMap:
+ mtid := d.mtid
+ if mtid == 0 {
+ if d.jsms { // if json, default to a map type with string keys
+ mtid = mapStrIntfTypId // for json performance
+ } else {
+ mtid = mapIntfIntfTypId
+ }
+ }
+ if mtid == mapStrIntfTypId {
+ var v2 map[string]interface{}
+ d.decode(&v2)
+ rvn = rv4iptr(&v2).Elem()
+ } else if mtid == mapIntfIntfTypId {
+ var v2 map[interface{}]interface{}
+ d.decode(&v2)
+ rvn = rv4iptr(&v2).Elem()
+ } else if d.mtr {
+ rvn = reflect.New(d.h.MapType)
+ d.decode(rv2i(rvn))
+ rvn = rvn.Elem()
+ } else {
+ rvn = rvZeroAddrK(d.h.MapType, reflect.Map)
+ d.decodeValue(rvn, nil)
+ }
+ case valueTypeArray:
+ if d.stid == 0 || d.stid == intfSliceTypId {
+ var v2 []interface{}
+ d.decode(&v2)
+ rvn = rv4iptr(&v2).Elem()
+ } else if d.str {
+ rvn = reflect.New(d.h.SliceType)
+ d.decode(rv2i(rvn))
+ rvn = rvn.Elem()
+ } else {
+ rvn = rvZeroAddrK(d.h.SliceType, reflect.Slice)
+ d.decodeValue(rvn, nil)
+ }
+ if reflectArrayOfSupported && d.h.PreferArrayOverSlice {
+ rvn = rvGetArray4Slice(rvn)
+ }
+ case valueTypeExt:
+ tag, bytes := n.u, n.l // calling decode below might taint the values
+ bfn := d.h.getExtForTag(tag)
+ var re = RawExt{Tag: tag}
+ if bytes == nil {
+ // it is one of the InterfaceExt ones: json and cbor.
+ // most likely cbor, as json decoding never reveals valueTypeExt (no tagging support)
+ if bfn == nil {
+ d.decode(&re.Value)
+ rvn = rv4iptr(&re).Elem()
+ } else {
+ if bfn.ext == SelfExt {
+ rvn = rvZeroAddrK(bfn.rt, bfn.rt.Kind())
+ d.decodeValue(rvn, d.h.fnNoExt(bfn.rt))
+ } else {
+ rvn = reflect.New(bfn.rt)
+ d.interfaceExtConvertAndDecode(rv2i(rvn), bfn.ext)
+ rvn = rvn.Elem()
+ }
+ }
+ } else {
+ // one of the BytesExt ones: binc, msgpack, simple
+ if bfn == nil {
+ re.setData(bytes, false)
+ rvn = rv4iptr(&re).Elem()
+ } else {
+ rvn = reflect.New(bfn.rt)
+ if bfn.ext == SelfExt {
+ d.sideDecode(rv2i(rvn), bfn.rt, bytes)
+ } else {
+ bfn.ext.ReadExt(rv2i(rvn), bytes)
+ }
+ rvn = rvn.Elem()
+ }
+ }
+ // if struct/array, directly store pointer into the interface
+ if d.h.PreferPointerForStructOrArray && rvn.CanAddr() {
+ if rk := rvn.Kind(); rk == reflect.Array || rk == reflect.Struct {
+ rvn = rvn.Addr()
+ }
+ }
+ case valueTypeNil:
+ // rvn = reflect.Zero(f.ti.rt)
+ // no-op
+ case valueTypeInt:
+ rvn = n.ri()
+ case valueTypeUint:
+ rvn = n.ru()
+ case valueTypeFloat:
+ rvn = n.rf()
+ case valueTypeBool:
+ rvn = n.rb()
+ case valueTypeString, valueTypeSymbol:
+ rvn = n.rs()
+ case valueTypeBytes:
+ rvn = n.rl()
+ case valueTypeTime:
+ rvn = n.rt()
+ default:
+ halt.errorf("kInterfaceNaked: unexpected valueType: %d", n.v)
+ }
+ return
+}
+
+func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) {
+ // Note: A consequence of how kInterface works, is that
+ // if an interface already contains something, we try
+ // to decode into what was there before.
+ // We do not replace with a generic value (as got from decodeNaked).
+ //
+ // every interface passed here MUST be settable.
+ //
+ // ensure you call rvSetIntf(...) before returning.
+
+ isnilrv := rvIsNil(rv)
+
+ var rvn reflect.Value
+
+ if d.h.InterfaceReset {
+ // check if mapping to a type: if so, initialize it and move on
+ rvn = d.h.intf2impl(f.ti.rtid)
+ if !rvn.IsValid() {
+ rvn = d.kInterfaceNaked(f)
+ if rvn.IsValid() {
+ rvSetIntf(rv, rvn)
+ } else if !isnilrv {
+ decSetNonNilRV2Zero4Intf(rv)
+ }
+ return
+ }
+ } else if isnilrv {
+ // check if mapping to a type: if so, initialize it and move on
+ rvn = d.h.intf2impl(f.ti.rtid)
+ if !rvn.IsValid() {
+ rvn = d.kInterfaceNaked(f)
+ if rvn.IsValid() {
+ rvSetIntf(rv, rvn)
+ }
+ return
+ }
+ } else {
+ // now we have a non-nil interface value, meaning it contains a type
+ rvn = rv.Elem()
+ }
+
+ // rvn is now a non-interface type
+
+ canDecode, _ := isDecodeable(rvn)
+
+ // Note: interface{} is settable, but underlying type may not be.
+ // Consequently, we MAY have to allocate a value (containing the underlying value),
+ // decode into it, and reset the interface to that new value.
+
+ if !canDecode {
+ rvn2 := d.oneShotAddrRV(rvType(rvn), rvn.Kind())
+ rvSetDirect(rvn2, rvn)
+ rvn = rvn2
+ }
+
+ d.decodeValue(rvn, nil)
+ rvSetIntf(rv, rvn)
+}
+
+func decStructFieldKeyNotString(dd decDriver, keyType valueType, b *[decScratchByteArrayLen]byte) (rvkencname []byte) {
+ if keyType == valueTypeInt {
+ rvkencname = strconv.AppendInt(b[:0], dd.DecodeInt64(), 10)
+ } else if keyType == valueTypeUint {
+ rvkencname = strconv.AppendUint(b[:0], dd.DecodeUint64(), 10)
+ } else if keyType == valueTypeFloat {
+ rvkencname = strconv.AppendFloat(b[:0], dd.DecodeFloat64(), 'f', -1, 64)
+ } else {
+ halt.errorf("invalid struct key type: %v", keyType)
+ }
+ return
+}
+
+func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) {
+ ctyp := d.d.ContainerType()
+ ti := f.ti
+ var mf MissingFielder
+ if ti.flagMissingFielder {
+ mf = rv2i(rv).(MissingFielder)
+ } else if ti.flagMissingFielderPtr {
+ mf = rv2i(rvAddr(rv, ti.ptr)).(MissingFielder)
+ }
+ if ctyp == valueTypeMap {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == 0 {
+ d.mapEnd()
+ return
+ }
+ hasLen := containerLen >= 0
+ var name2 []byte
+ if mf != nil {
+ var namearr2 [16]byte
+ name2 = namearr2[:0]
+ }
+ var rvkencname []byte
+ for j := 0; d.containerNext(j, containerLen, hasLen); j++ {
+ d.mapElemKey()
+ if ti.keyType == valueTypeString {
+ rvkencname = d.d.DecodeStringAsBytes()
+ } else {
+ rvkencname = decStructFieldKeyNotString(d.d, ti.keyType, &d.b)
+ }
+ d.mapElemValue()
+ if si := ti.siForEncName(rvkencname); si != nil {
+ d.decodeValue(si.path.fieldAlloc(rv), nil)
+ } else if mf != nil {
+ // store rvkencname in new []byte, as it previously shares Decoder.b, which is used in decode
+ name2 = append(name2[:0], rvkencname...)
+ var f interface{}
+ d.decode(&f)
+ if !mf.CodecMissingField(name2, f) && d.h.ErrorIfNoField {
+ d.errorf("no matching struct field when decoding stream map with key: %s ", stringView(name2))
+ }
+ } else {
+ d.structFieldNotFound(-1, stringView(rvkencname))
+ }
+ }
+ d.mapEnd()
+ } else if ctyp == valueTypeArray {
+ containerLen := d.arrayStart(d.d.ReadArrayStart())
+ if containerLen == 0 {
+ d.arrayEnd()
+ return
+ }
+ // Not much gain from doing it two ways for array.
+ // Arrays are not used as much for structs.
+ hasLen := containerLen >= 0
+ var checkbreak bool
+ tisfi := ti.sfi.source()
+ for j, si := range tisfi {
+ if hasLen {
+ if j == containerLen {
+ break
+ }
+ } else if d.checkBreak() {
+ checkbreak = true
+ break
+ }
+ d.arrayElem()
+ d.decodeValue(si.path.fieldAlloc(rv), nil)
+ }
+ var proceed bool
+ if hasLen {
+ proceed = containerLen > len(tisfi)
+ } else {
+ proceed = !checkbreak
+ }
+ // if (hasLen && containerLen > len(tisfi)) || (!hasLen && !checkbreak) {
+ if proceed {
+ // read remaining values and throw away
+ for j := len(tisfi); ; j++ {
+ if !d.containerNext(j, containerLen, hasLen) {
+ break
+ }
+ d.arrayElem()
+ d.structFieldNotFound(j, "")
+ }
+ }
+ d.arrayEnd()
+ } else {
+ d.onerror(errNeedMapOrArrayDecodeToStruct)
+ }
+}
+
+func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) {
+ // A slice can be set from a map or array in stream.
+ // This way, the order can be kept (as order is lost with map).
+
+ // Note: rv is a slice type here - guaranteed
+
+ ti := f.ti
+ rvCanset := rv.CanSet()
+
+ ctyp := d.d.ContainerType()
+ if ctyp == valueTypeBytes || ctyp == valueTypeString {
+ // you can only decode bytes or string in the stream into a slice or array of bytes
+ if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) {
+ d.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt)
+ }
+ rvbs := rvGetBytes(rv)
+ if !rvCanset {
+ // not addressable byte slice, so do not decode into it past the length
+ rvbs = rvbs[:len(rvbs):len(rvbs)]
+ }
+ bs2 := d.decodeBytesInto(rvbs)
+ // if !(len(bs2) == len(rvbs) && byteSliceSameData(rvbs, bs2)) {
+ if !(len(bs2) > 0 && len(bs2) == len(rvbs) && &bs2[0] == &rvbs[0]) {
+ if rvCanset {
+ rvSetBytes(rv, bs2)
+ } else if len(rvbs) > 0 && len(bs2) > 0 {
+ copy(rvbs, bs2)
+ }
+ }
+ return
+ }
+
+ slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) - never Nil
+
+ // an array can never return a nil slice. so no need to check f.array here.
+ if containerLenS == 0 {
+ if rvCanset {
+ if rvIsNil(rv) {
+ rvSetDirect(rv, rvSliceZeroCap(ti.rt))
+ } else {
+ rvSetSliceLen(rv, 0)
+ }
+ }
+ slh.End()
+ return
+ }
+
+ rtelem0Mut := !scalarBitset.isset(ti.elemkind)
+ rtelem := ti.elem
+
+ for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() {
+ rtelem = rtelem.Elem()
+ }
+
+ var fn *codecFn
+
+ var rvChanged bool
+
+ var rv0 = rv
+ var rv9 reflect.Value
+
+ rvlen := rvLenSlice(rv)
+ rvcap := rvCapSlice(rv)
+ hasLen := containerLenS > 0
+ if hasLen {
+ if containerLenS > rvcap {
+ oldRvlenGtZero := rvlen > 0
+ rvlen1 := decInferLen(containerLenS, d.h.MaxInitLen, int(ti.elemsize))
+ if rvlen1 == rvlen {
+ } else if rvlen1 <= rvcap {
+ if rvCanset {
+ rvlen = rvlen1
+ rvSetSliceLen(rv, rvlen)
+ }
+ } else if rvCanset { // rvlen1 > rvcap
+ rvlen = rvlen1
+ rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen)
+ rvcap = rvlen
+ rvChanged = !rvCanset
+ } else { // rvlen1 > rvcap && !canSet
+ d.errorf("cannot decode into non-settable slice")
+ }
+ if rvChanged && oldRvlenGtZero && rtelem0Mut {
+ rvCopySlice(rv, rv0, rtelem) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap)
+ }
+ } else if containerLenS != rvlen {
+ if rvCanset {
+ rvlen = containerLenS
+ rvSetSliceLen(rv, rvlen)
+ }
+ }
+ }
+
+ // consider creating new element once, and just decoding into it.
+ var elemReset = d.h.SliceElementReset
+
+ var j int
+
+ for ; d.containerNext(j, containerLenS, hasLen); j++ {
+ if j == 0 {
+ if rvIsNil(rv) { // means hasLen = false
+ if rvCanset {
+ rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(ti.elemsize))
+ rv, rvCanset = rvMakeSlice(rv, f.ti, rvlen, rvlen)
+ rvcap = rvlen
+ rvChanged = !rvCanset
+ } else {
+ d.errorf("cannot decode into non-settable slice")
+ }
+ }
+ if fn == nil {
+ fn = d.h.fn(rtelem)
+ }
+ }
+ // if indefinite, etc, then expand the slice if necessary
+ if j >= rvlen {
+ slh.ElemContainerState(j)
+
+ // expand the slice up to the cap.
+ // Note that we did, so we have to reset it later.
+
+ if rvlen < rvcap {
+ rvlen = rvcap
+ if rvCanset {
+ rvSetSliceLen(rv, rvlen)
+ } else if rvChanged {
+ rv = rvSlice(rv, rvlen)
+ } else {
+ d.onerror(errExpandSliceCannotChange)
+ }
+ } else {
+ if !(rvCanset || rvChanged) {
+ d.onerror(errExpandSliceCannotChange)
+ }
+ rv, rvcap, rvCanset = rvGrowSlice(rv, f.ti, rvcap, 1)
+ rvlen = rvcap
+ rvChanged = !rvCanset
+ }
+ } else {
+ slh.ElemContainerState(j)
+ }
+ rv9 = rvSliceIndex(rv, j, f.ti)
+ if elemReset {
+ rvSetZero(rv9)
+ }
+ d.decodeValue(rv9, fn)
+ }
+ if j < rvlen {
+ if rvCanset {
+ rvSetSliceLen(rv, j)
+ } else if rvChanged {
+ rv = rvSlice(rv, j)
+ }
+ // rvlen = j
+ } else if j == 0 && rvIsNil(rv) {
+ if rvCanset {
+ rv = rvSliceZeroCap(ti.rt)
+ rvCanset = false
+ rvChanged = true
+ }
+ }
+ slh.End()
+
+ if rvChanged { // infers rvCanset=true, so it can be reset
+ rvSetDirect(rv0, rv)
+ }
+}
+
+func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) {
+ // An array can be set from a map or array in stream.
+
+ ctyp := d.d.ContainerType()
+ if handleBytesWithinKArray && (ctyp == valueTypeBytes || ctyp == valueTypeString) {
+ // you can only decode bytes or string in the stream into a slice or array of bytes
+ if f.ti.elemkind != uint8(reflect.Uint8) {
+ d.errorf("bytes/string in stream can decode into array of bytes, but not %v", f.ti.rt)
+ }
+ rvbs := rvGetArrayBytes(rv, nil)
+ bs2 := d.decodeBytesInto(rvbs)
+ if !byteSliceSameData(rvbs, bs2) && len(rvbs) > 0 && len(bs2) > 0 {
+ copy(rvbs, bs2)
+ }
+ return
+ }
+
+ slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) - never Nil
+
+ // an array can never return a nil slice. so no need to check f.array here.
+ if containerLenS == 0 {
+ slh.End()
+ return
+ }
+
+ rtelem := f.ti.elem
+ for k := reflect.Kind(f.ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() {
+ rtelem = rtelem.Elem()
+ }
+
+ var fn *codecFn
+
+ var rv9 reflect.Value
+
+ rvlen := rv.Len() // same as cap
+ hasLen := containerLenS > 0
+ if hasLen && containerLenS > rvlen {
+ d.errorf("cannot decode into array with length: %v, less than container length: %v", rvlen, containerLenS)
+ }
+
+ // consider creating new element once, and just decoding into it.
+ var elemReset = d.h.SliceElementReset
+
+ for j := 0; d.containerNext(j, containerLenS, hasLen); j++ {
+ // note that you cannot expand the array if indefinite and we go past array length
+ if j >= rvlen {
+ slh.arrayCannotExpand(hasLen, rvlen, j, containerLenS)
+ return
+ }
+
+ slh.ElemContainerState(j)
+ rv9 = rvArrayIndex(rv, j, f.ti)
+ if elemReset {
+ rvSetZero(rv9)
+ }
+
+ if fn == nil {
+ fn = d.h.fn(rtelem)
+ }
+ d.decodeValue(rv9, fn)
+ }
+ slh.End()
+}
+
+func (d *Decoder) kChan(f *codecFnInfo, rv reflect.Value) {
+ // A slice can be set from a map or array in stream.
+ // This way, the order can be kept (as order is lost with map).
+
+ ti := f.ti
+ if ti.chandir&uint8(reflect.SendDir) == 0 {
+ d.errorf("receive-only channel cannot be decoded")
+ }
+ ctyp := d.d.ContainerType()
+ if ctyp == valueTypeBytes || ctyp == valueTypeString {
+ // you can only decode bytes or string in the stream into a slice or array of bytes
+ if !(ti.rtid == uint8SliceTypId || ti.elemkind == uint8(reflect.Uint8)) {
+ d.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt)
+ }
+ bs2 := d.d.DecodeBytes(nil)
+ irv := rv2i(rv)
+ ch, ok := irv.(chan<- byte)
+ if !ok {
+ ch = irv.(chan byte)
+ }
+ for _, b := range bs2 {
+ ch <- b
+ }
+ return
+ }
+
+ var rvCanset = rv.CanSet()
+
+ // only expects valueType(Array|Map - nil handled above)
+ slh, containerLenS := d.decSliceHelperStart()
+
+ // an array can never return a nil slice. so no need to check f.array here.
+ if containerLenS == 0 {
+ if rvCanset && rvIsNil(rv) {
+ rvSetDirect(rv, reflect.MakeChan(ti.rt, 0))
+ }
+ slh.End()
+ return
+ }
+
+ rtelem := ti.elem
+ useTransient := decUseTransient && ti.elemkind != byte(reflect.Ptr) && ti.tielem.flagCanTransient
+
+ for k := reflect.Kind(ti.elemkind); k == reflect.Ptr; k = rtelem.Kind() {
+ rtelem = rtelem.Elem()
+ }
+
+ var fn *codecFn
+
+ var rvChanged bool
+ var rv0 = rv
+ var rv9 reflect.Value
+
+ var rvlen int // = rv.Len()
+ hasLen := containerLenS > 0
+
+ for j := 0; d.containerNext(j, containerLenS, hasLen); j++ {
+ if j == 0 {
+ if rvIsNil(rv) {
+ if hasLen {
+ rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(ti.elemsize))
+ } else {
+ rvlen = decDefChanCap
+ }
+ if rvCanset {
+ rv = reflect.MakeChan(ti.rt, rvlen)
+ rvChanged = true
+ } else {
+ d.errorf("cannot decode into non-settable chan")
+ }
+ }
+ if fn == nil {
+ fn = d.h.fn(rtelem)
+ }
+ }
+ slh.ElemContainerState(j)
+ if rv9.IsValid() {
+ rvSetZero(rv9)
+ } else if decUseTransient && useTransient {
+ rv9 = d.perType.TransientAddrK(ti.elem, reflect.Kind(ti.elemkind))
+ } else {
+ rv9 = rvZeroAddrK(ti.elem, reflect.Kind(ti.elemkind))
+ }
+ if !d.d.TryNil() {
+ d.decodeValueNoCheckNil(rv9, fn)
+ }
+ rv.Send(rv9)
+ }
+ slh.End()
+
+ if rvChanged { // infers rvCanset=true, so it can be reset
+ rvSetDirect(rv0, rv)
+ }
+
+}
+
+func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ ti := f.ti
+ if rvIsNil(rv) {
+ rvlen := decInferLen(containerLen, d.h.MaxInitLen, int(ti.keysize+ti.elemsize))
+ rvSetDirect(rv, makeMapReflect(ti.rt, rvlen))
+ }
+
+ if containerLen == 0 {
+ d.mapEnd()
+ return
+ }
+
+ ktype, vtype := ti.key, ti.elem
+ ktypeId := rt2id(ktype)
+ vtypeKind := reflect.Kind(ti.elemkind)
+ ktypeKind := reflect.Kind(ti.keykind)
+ kfast := mapKeyFastKindFor(ktypeKind)
+ visindirect := mapStoresElemIndirect(uintptr(ti.elemsize))
+ visref := refBitset.isset(ti.elemkind)
+
+ vtypePtr := vtypeKind == reflect.Ptr
+ ktypePtr := ktypeKind == reflect.Ptr
+
+ vTransient := decUseTransient && !vtypePtr && ti.tielem.flagCanTransient
+ kTransient := decUseTransient && !ktypePtr && ti.tikey.flagCanTransient
+
+ var vtypeElem reflect.Type
+
+ var keyFn, valFn *codecFn
+ var ktypeLo, vtypeLo = ktype, vtype
+
+ if ktypeKind == reflect.Ptr {
+ for ktypeLo = ktype.Elem(); ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() {
+ }
+ }
+
+ if vtypePtr {
+ vtypeElem = vtype.Elem()
+ for vtypeLo = vtypeElem; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() {
+ }
+ }
+
+ rvkMut := !scalarBitset.isset(ti.keykind) // if ktype is immutable, then re-use the same rvk.
+ rvvMut := !scalarBitset.isset(ti.elemkind)
+ rvvCanNil := isnilBitset.isset(ti.elemkind)
+
+ // rvk: key
+ // rvkn: if non-mutable, on each iteration of loop, set rvk to this
+ // rvv: value
+ // rvvn: if non-mutable, on each iteration of loop, set rvv to this
+ // if mutable, may be used as a temporary value for local-scoped operations
+ // rvva: if mutable, used as transient value for use for key lookup
+ // rvvz: zero value of map value type, used to do a map set when nil is found in stream
+ var rvk, rvkn, rvv, rvvn, rvva, rvvz reflect.Value
+
+ // we do a doMapGet if kind is mutable, and InterfaceReset=true if interface
+ var doMapGet, doMapSet bool
+
+ if !d.h.MapValueReset {
+ if rvvMut && (vtypeKind != reflect.Interface || !d.h.InterfaceReset) {
+ doMapGet = true
+ rvva = mapAddrLoopvarRV(vtype, vtypeKind)
+ }
+ }
+
+ ktypeIsString := ktypeId == stringTypId
+ ktypeIsIntf := ktypeId == intfTypId
+
+ hasLen := containerLen > 0
+
+ // kstrbs is used locally for the key bytes, so we can reduce allocation.
+ // When we read keys, we copy to this local bytes array, and use a stringView for lookup.
+ // We only convert it into a true string if we have to do a set on the map.
+
+ // Since kstr2bs will usually escape to the heap, declaring a [64]byte array may be wasteful.
+ // It is only valuable if we are sure that it is declared on the stack.
+ // var kstrarr [64]byte // most keys are less than 32 bytes, and even more less than 64
+ // var kstrbs = kstrarr[:0]
+ var kstrbs []byte
+ var kstr2bs []byte
+ var s string
+
+ var callFnRvk bool
+
+ fnRvk2 := func() (s string) {
+ callFnRvk = false
+ if len(kstr2bs) < 2 {
+ return string(kstr2bs)
+ }
+ return d.mapKeyString(&callFnRvk, &kstrbs, &kstr2bs)
+ }
+
+ // Use a possibly transient (map) value (and key), to reduce allocation
+
+ for j := 0; d.containerNext(j, containerLen, hasLen); j++ {
+ callFnRvk = false
+ if j == 0 {
+ // if vtypekind is a scalar and thus value will be decoded using TransientAddrK,
+ // then it is ok to use TransientAddr2K for the map key.
+ if decUseTransient && vTransient && kTransient {
+ rvk = d.perType.TransientAddr2K(ktype, ktypeKind)
+ } else {
+ rvk = rvZeroAddrK(ktype, ktypeKind)
+ }
+ if !rvkMut {
+ rvkn = rvk
+ }
+ if !rvvMut {
+ if decUseTransient && vTransient {
+ rvvn = d.perType.TransientAddrK(vtype, vtypeKind)
+ } else {
+ rvvn = rvZeroAddrK(vtype, vtypeKind)
+ }
+ }
+ if !ktypeIsString && keyFn == nil {
+ keyFn = d.h.fn(ktypeLo)
+ }
+ if valFn == nil {
+ valFn = d.h.fn(vtypeLo)
+ }
+ } else if rvkMut {
+ rvSetZero(rvk)
+ } else {
+ rvk = rvkn
+ }
+
+ d.mapElemKey()
+ if ktypeIsString {
+ kstr2bs = d.d.DecodeStringAsBytes()
+ rvSetString(rvk, fnRvk2())
+ } else {
+ d.decByteState = decByteStateNone
+ d.decodeValue(rvk, keyFn)
+ // special case if interface wrapping a byte slice
+ if ktypeIsIntf {
+ if rvk2 := rvk.Elem(); rvk2.IsValid() && rvType(rvk2) == uint8SliceTyp {
+ kstr2bs = rvGetBytes(rvk2)
+ rvSetIntf(rvk, rv4istr(fnRvk2()))
+ }
+ // NOTE: consider failing early if map/slice/func
+ }
+ }
+
+ d.mapElemValue()
+
+ if d.d.TryNil() {
+ // since a map, we have to set zero value if needed
+ if !rvvz.IsValid() {
+ rvvz = rvZeroK(vtype, vtypeKind)
+ }
+ if callFnRvk {
+ s = d.string(kstr2bs)
+ if ktypeIsString {
+ rvSetString(rvk, s)
+ } else { // ktypeIsIntf
+ rvSetIntf(rvk, rv4istr(s))
+ }
+ }
+ mapSet(rv, rvk, rvvz, kfast, visindirect, visref)
+ continue
+ }
+
+ // there is non-nil content in the stream to decode ...
+ // consequently, it's ok to just directly create new value to the pointer (if vtypePtr)
+
+ // set doMapSet to false iff u do a get, and the return value is a non-nil pointer
+ doMapSet = true
+
+ if !rvvMut {
+ rvv = rvvn
+ } else if !doMapGet {
+ goto NEW_RVV
+ } else {
+ rvv = mapGet(rv, rvk, rvva, kfast, visindirect, visref)
+ if !rvv.IsValid() || (rvvCanNil && rvIsNil(rvv)) {
+ goto NEW_RVV
+ }
+ switch vtypeKind {
+ case reflect.Ptr, reflect.Map: // ok to decode directly into map
+ doMapSet = false
+ case reflect.Interface:
+ // if an interface{}, just decode into it iff a non-nil ptr/map, else allocate afresh
+ rvvn = rvv.Elem()
+ if k := rvvn.Kind(); (k == reflect.Ptr || k == reflect.Map) && !rvIsNil(rvvn) {
+ d.decodeValueNoCheckNil(rvvn, nil) // valFn is incorrect here
+ continue
+ }
+ // make addressable (so we can set the interface)
+ rvvn = rvZeroAddrK(vtype, vtypeKind)
+ rvSetIntf(rvvn, rvv)
+ rvv = rvvn
+ default:
+ // make addressable (so you can set the slice/array elements, etc)
+ if decUseTransient && vTransient {
+ rvvn = d.perType.TransientAddrK(vtype, vtypeKind)
+ } else {
+ rvvn = rvZeroAddrK(vtype, vtypeKind)
+ }
+ rvSetDirect(rvvn, rvv)
+ rvv = rvvn
+ }
+ }
+ goto DECODE_VALUE_NO_CHECK_NIL
+
+ NEW_RVV:
+ if vtypePtr {
+ rvv = reflect.New(vtypeElem) // non-nil in stream, so allocate value
+ } else if decUseTransient && vTransient {
+ rvv = d.perType.TransientAddrK(vtype, vtypeKind)
+ } else {
+ rvv = rvZeroAddrK(vtype, vtypeKind)
+ }
+
+ DECODE_VALUE_NO_CHECK_NIL:
+ d.decodeValueNoCheckNil(rvv, valFn)
+
+ if doMapSet {
+ if callFnRvk {
+ s = d.string(kstr2bs)
+ if ktypeIsString {
+ rvSetString(rvk, s)
+ } else { // ktypeIsIntf
+ rvSetIntf(rvk, rv4istr(s))
+ }
+ }
+ mapSet(rv, rvk, rvv, kfast, visindirect, visref)
+ }
+ }
+
+ d.mapEnd()
+}
+
+// Decoder reads and decodes an object from an input stream in a supported format.
+//
+// Decoder is NOT safe for concurrent use i.e. a Decoder cannot be used
+// concurrently in multiple goroutines.
+//
+// However, as Decoder could be allocation heavy to initialize, a Reset method is provided
+// so its state can be reused to decode new input streams repeatedly.
+// This is the idiomatic way to use.
+type Decoder struct {
+ panicHdl
+
+ d decDriver
+
+ // cache the mapTypeId and sliceTypeId for faster comparisons
+ mtid uintptr
+ stid uintptr
+
+ h *BasicHandle
+
+ blist bytesFreelist
+
+ // ---- cpu cache line boundary?
+ decRd
+
+ // ---- cpu cache line boundary?
+ n fauxUnion
+
+ hh Handle
+ err error
+
+ perType decPerType
+
+ // used for interning strings
+ is internerMap
+
+ // ---- cpu cache line boundary?
+ // ---- writable fields during execution --- *try* to keep in sep cache line
+ maxdepth int16
+ depth int16
+
+ // Extensions can call Decode() within a current Decode() call.
+ // We need to know when the top level Decode() call returns,
+ // so we can decide whether to Release() or not.
+ calls uint16 // what depth in mustDecode are we in now.
+
+ c containerState
+
+ decByteState
+
+ // b is an always-available scratch buffer used by Decoder and decDrivers.
+ // By being always-available, it can be used for one-off things without
+ // having to get from freelist, use, and return back to freelist.
+ b [decScratchByteArrayLen]byte
+}
+
+// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
+//
+// For efficiency, Users are encouraged to configure ReaderBufferSize on the handle
+// OR pass in a memory buffered reader (eg bufio.Reader, bytes.Buffer).
+func NewDecoder(r io.Reader, h Handle) *Decoder {
+ d := h.newDecDriver().decoder()
+ if r != nil {
+ d.Reset(r)
+ }
+ return d
+}
+
+// NewDecoderBytes returns a Decoder which efficiently decodes directly
+// from a byte slice with zero copying.
+func NewDecoderBytes(in []byte, h Handle) *Decoder {
+ d := h.newDecDriver().decoder()
+ if in != nil {
+ d.ResetBytes(in)
+ }
+ return d
+}
+
+// NewDecoderString returns a Decoder which efficiently decodes directly
+// from a string with zero copying.
+//
+// It is a convenience function that calls NewDecoderBytes with a
+// []byte view into the string.
+//
+// This can be an efficient zero-copy if using default mode i.e. without codec.safe tag.
+func NewDecoderString(s string, h Handle) *Decoder {
+ return NewDecoderBytes(bytesView(s), h)
+}
+
+func (d *Decoder) r() *decRd {
+ return &d.decRd
+}
+
+func (d *Decoder) init(h Handle) {
+ initHandle(h)
+ d.bytes = true
+ d.err = errDecoderNotInitialized
+ d.h = h.getBasicHandle()
+ d.hh = h
+ d.be = h.isBinary()
+ if d.h.InternString && d.is == nil {
+ d.is.init()
+ }
+ // NOTE: do not initialize d.n here. It is lazily initialized in d.naked()
+}
+
+func (d *Decoder) resetCommon() {
+ d.d.reset()
+ d.err = nil
+ d.c = 0
+ d.decByteState = decByteStateNone
+ d.depth = 0
+ d.calls = 0
+ // reset all things which were cached from the Handle, but could change
+ d.maxdepth = decDefMaxDepth
+ if d.h.MaxDepth > 0 {
+ d.maxdepth = d.h.MaxDepth
+ }
+ d.mtid = 0
+ d.stid = 0
+ d.mtr = false
+ d.str = false
+ if d.h.MapType != nil {
+ d.mtid = rt2id(d.h.MapType)
+ d.mtr = fastpathAvIndex(d.mtid) != -1
+ }
+ if d.h.SliceType != nil {
+ d.stid = rt2id(d.h.SliceType)
+ d.str = fastpathAvIndex(d.stid) != -1
+ }
+}
+
+// Reset the Decoder with a new Reader to decode from,
+// clearing all state from last run(s).
+func (d *Decoder) Reset(r io.Reader) {
+ if r == nil {
+ r = &eofReader
+ }
+ d.bytes = false
+ if d.h.ReaderBufferSize > 0 {
+ if d.bi == nil {
+ d.bi = new(bufioDecReader)
+ }
+ d.bi.reset(r, d.h.ReaderBufferSize, &d.blist)
+ d.bufio = true
+ d.decReader = d.bi
+ } else {
+ if d.ri == nil {
+ d.ri = new(ioDecReader)
+ }
+ d.ri.reset(r, &d.blist)
+ d.bufio = false
+ d.decReader = d.ri
+ }
+ d.resetCommon()
+}
+
+// ResetBytes resets the Decoder with a new []byte to decode from,
+// clearing all state from last run(s).
+func (d *Decoder) ResetBytes(in []byte) {
+ if in == nil {
+ in = []byte{}
+ }
+ d.bufio = false
+ d.bytes = true
+ d.decReader = &d.rb
+ d.rb.reset(in)
+ d.resetCommon()
+}
+
+// ResetString resets the Decoder with a new string to decode from,
+// clearing all state from last run(s).
+//
+// It is a convenience function that calls ResetBytes with a
+// []byte view into the string.
+//
+// This can be an efficient zero-copy if using default mode i.e. without codec.safe tag.
+func (d *Decoder) ResetString(s string) {
+ d.ResetBytes(bytesView(s))
+}
+
+func (d *Decoder) naked() *fauxUnion {
+ return &d.n
+}
+
+// Decode decodes the stream from reader and stores the result in the
+// value pointed to by v. v cannot be a nil pointer. v can also be
+// a reflect.Value of a pointer.
+//
+// Note that a pointer to a nil interface is not a nil pointer.
+// If you do not know what type of stream it is, pass in a pointer to a nil interface.
+// We will decode and store a value in that nil interface.
+//
+// Sample usages:
+// // Decoding into a non-nil typed value
+// var f float32
+// err = codec.NewDecoder(r, handle).Decode(&f)
+//
+// // Decoding into nil interface
+// var v interface{}
+// dec := codec.NewDecoder(r, handle)
+// err = dec.Decode(&v)
+//
+// When decoding into a nil interface{}, we will decode into an appropriate value based
+// on the contents of the stream:
+// - Numbers are decoded as float64, int64 or uint64.
+// - Other values are decoded appropriately depending on the type:
+// bool, string, []byte, time.Time, etc
+// - Extensions are decoded as RawExt (if no ext function registered for the tag)
+// Configurations exist on the Handle to override defaults
+// (e.g. for MapType, SliceType and how to decode raw bytes).
+//
+// When decoding into a non-nil interface{} value, the mode of encoding is based on the
+// type of the value. When a value is seen:
+// - If an extension is registered for it, call that extension function
+// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error
+// - Else decode it based on its reflect.Kind
+//
+// There are some special rules when decoding into containers (slice/array/map/struct).
+// Decode will typically use the stream contents to UPDATE the container i.e. the values
+// in these containers will not be zero'ed before decoding.
+// - A map can be decoded from a stream map, by updating matching keys.
+// - A slice can be decoded from a stream array,
+// by updating the first n elements, where n is length of the stream.
+// - A slice can be decoded from a stream map, by decoding as if
+// it contains a sequence of key-value pairs.
+// - A struct can be decoded from a stream map, by updating matching fields.
+// - A struct can be decoded from a stream array,
+// by updating fields as they occur in the struct (by index).
+//
+// This in-place update maintains consistency in the decoding philosophy (i.e. we ALWAYS update
+// in place by default). However, the consequence of this is that values in slices or maps
+// which are not zero'ed before hand, will have part of the prior values in place after decode
+// if the stream doesn't contain an update for those parts.
+//
+// This in-place update can be disabled by configuring the MapValueReset and SliceElementReset
+// decode options available on every handle.
+//
+// Furthermore, when decoding a stream map or array with length of 0 into a nil map or slice,
+// we reset the destination map or slice to a zero-length value.
+//
+// However, when decoding a stream nil, we reset the destination container
+// to its "zero" value (e.g. nil for slice/map, etc).
+//
+// Note: we allow nil values in the stream anywhere except for map keys.
+// A nil value in the encoded stream where a map key is expected is treated as an error.
+func (d *Decoder) Decode(v interface{}) (err error) {
+ // tried to use closure, as runtime optimizes defer with no params.
+ // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc).
+ // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139
+ if !debugging {
+ defer func() {
+ if x := recover(); x != nil {
+ panicValToErr(d, x, &d.err)
+ err = d.err
+ }
+ }()
+ }
+
+ d.MustDecode(v)
+ return
+}
+
+// MustDecode is like Decode, but panics if unable to Decode.
+//
+// Note: This provides insight to the code location that triggered the error.
+func (d *Decoder) MustDecode(v interface{}) {
+ halt.onerror(d.err)
+ if d.hh == nil {
+ halt.onerror(errNoFormatHandle)
+ }
+
+ // Top-level: v is a pointer and not nil.
+ d.calls++
+ d.decode(v)
+ d.calls--
+}
+
+// Release releases shared (pooled) resources.
+//
+// It is important to call Release() when done with a Decoder, so those resources
+// are released instantly for use by subsequently created Decoders.
+//
+// By default, Release() is automatically called unless the option ExplicitRelease is set.
+//
+// Deprecated: Release is a no-op as pooled resources are not used with an Decoder.
+// This method is kept for compatibility reasons only.
+func (d *Decoder) Release() {
+}
+
+func (d *Decoder) swallow() {
+ d.d.nextValueBytes(nil)
+}
+
+func (d *Decoder) swallowErr() (err error) {
+ if !debugging {
+ defer func() {
+ if x := recover(); x != nil {
+ panicValToErr(d, x, &err)
+ }
+ }()
+ }
+ d.swallow()
+ return
+}
+
+func setZero(iv interface{}) {
+ if iv == nil {
+ return
+ }
+ rv, ok := isNil(iv)
+ if ok {
+ return
+ }
+ // var canDecode bool
+ switch v := iv.(type) {
+ case *string:
+ *v = ""
+ case *bool:
+ *v = false
+ case *int:
+ *v = 0
+ case *int8:
+ *v = 0
+ case *int16:
+ *v = 0
+ case *int32:
+ *v = 0
+ case *int64:
+ *v = 0
+ case *uint:
+ *v = 0
+ case *uint8:
+ *v = 0
+ case *uint16:
+ *v = 0
+ case *uint32:
+ *v = 0
+ case *uint64:
+ *v = 0
+ case *float32:
+ *v = 0
+ case *float64:
+ *v = 0
+ case *complex64:
+ *v = 0
+ case *complex128:
+ *v = 0
+ case *[]byte:
+ *v = nil
+ case *Raw:
+ *v = nil
+ case *time.Time:
+ *v = time.Time{}
+ case reflect.Value:
+ decSetNonNilRV2Zero(v)
+ default:
+ if !fastpathDecodeSetZeroTypeSwitch(iv) {
+ decSetNonNilRV2Zero(rv)
+ }
+ }
+}
+
+// decSetNonNilRV2Zero will set the non-nil value to its zero value.
+func decSetNonNilRV2Zero(v reflect.Value) {
+ // If not decodeable (settable), we do not touch it.
+ // We considered empty'ing it if not decodeable e.g.
+ // - if chan, drain it
+ // - if map, clear it
+ // - if slice or array, zero all elements up to len
+ //
+ // However, we decided instead that we either will set the
+ // whole value to the zero value, or leave AS IS.
+
+ k := v.Kind()
+ if k == reflect.Interface {
+ decSetNonNilRV2Zero4Intf(v)
+ } else if k == reflect.Ptr {
+ decSetNonNilRV2Zero4Ptr(v)
+ } else if v.CanSet() {
+ rvSetDirectZero(v)
+ }
+}
+
+func decSetNonNilRV2Zero4Ptr(v reflect.Value) {
+ ve := v.Elem()
+ if ve.CanSet() {
+ rvSetZero(ve) // we can have a pointer to an interface
+ } else if v.CanSet() {
+ rvSetZero(v)
+ }
+}
+
+func decSetNonNilRV2Zero4Intf(v reflect.Value) {
+ ve := v.Elem()
+ if ve.CanSet() {
+ rvSetDirectZero(ve) // interfaces always have element as a non-interface
+ } else if v.CanSet() {
+ rvSetZero(v)
+ }
+}
+
+func (d *Decoder) decode(iv interface{}) {
+ // a switch with only concrete types can be optimized.
+ // consequently, we deal with nil and interfaces outside the switch.
+
+ if iv == nil {
+ d.onerror(errCannotDecodeIntoNil)
+ }
+
+ switch v := iv.(type) {
+ // case nil:
+ // case Selfer:
+ case reflect.Value:
+ if x, _ := isDecodeable(v); !x {
+ d.haltAsNotDecodeable(v)
+ }
+ d.decodeValue(v, nil)
+ case *string:
+ *v = d.stringZC(d.d.DecodeStringAsBytes())
+ case *bool:
+ *v = d.d.DecodeBool()
+ case *int:
+ *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ case *int8:
+ *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8))
+ case *int16:
+ *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16))
+ case *int32:
+ *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ case *int64:
+ *v = d.d.DecodeInt64()
+ case *uint:
+ *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))
+ case *uint8:
+ *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ case *uint16:
+ *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))
+ case *uint32:
+ *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))
+ case *uint64:
+ *v = d.d.DecodeUint64()
+ case *float32:
+ *v = d.decodeFloat32()
+ case *float64:
+ *v = d.d.DecodeFloat64()
+ case *complex64:
+ *v = complex(d.decodeFloat32(), 0)
+ case *complex128:
+ *v = complex(d.d.DecodeFloat64(), 0)
+ case *[]byte:
+ *v = d.decodeBytesInto(*v)
+ case []byte:
+ // not addressable byte slice, so do not decode into it past the length
+ b := d.decodeBytesInto(v[:len(v):len(v)])
+ if !(len(b) > 0 && len(b) == len(v) && &b[0] == &v[0]) { // not same slice
+ copy(v, b)
+ }
+ case *time.Time:
+ *v = d.d.DecodeTime()
+ case *Raw:
+ *v = d.rawBytes()
+
+ case *interface{}:
+ d.decodeValue(rv4iptr(v), nil)
+
+ default:
+ // we can't check non-predefined types, as they might be a Selfer or extension.
+ if skipFastpathTypeSwitchInDirectCall || !fastpathDecodeTypeSwitch(iv, d) {
+ v := reflect.ValueOf(iv)
+ if x, _ := isDecodeable(v); !x {
+ d.haltAsNotDecodeable(v)
+ }
+ d.decodeValue(v, nil)
+ }
+ }
+}
+
+// decodeValue MUST be called by the actual value we want to decode into,
+// not its addr or a reference to it.
+//
+// This way, we know if it is itself a pointer, and can handle nil in
+// the stream effectively.
+//
+// Note that decodeValue will handle nil in the stream early, so that the
+// subsequent calls i.e. kXXX methods, etc do not have to handle it themselves.
+func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn) {
+ if d.d.TryNil() {
+ decSetNonNilRV2Zero(rv)
+ return
+ }
+ d.decodeValueNoCheckNil(rv, fn)
+}
+
+func (d *Decoder) decodeValueNoCheckNil(rv reflect.Value, fn *codecFn) {
+ // If stream is not containing a nil value, then we can deref to the base
+ // non-pointer value, and decode into that.
+ var rvp reflect.Value
+ var rvpValid bool
+PTR:
+ if rv.Kind() == reflect.Ptr {
+ rvpValid = true
+ if rvIsNil(rv) {
+ rvSetDirect(rv, reflect.New(rvType(rv).Elem()))
+ }
+ rvp = rv
+ rv = rv.Elem()
+ goto PTR
+ }
+
+ if fn == nil {
+ fn = d.h.fn(rvType(rv))
+ }
+ if fn.i.addrD {
+ if rvpValid {
+ rv = rvp
+ } else if rv.CanAddr() {
+ rv = rvAddr(rv, fn.i.ti.ptr)
+ } else if fn.i.addrDf {
+ d.errorf("cannot decode into a non-pointer value")
+ }
+ }
+ fn.fd(d, &fn.i, rv)
+}
+
+func (d *Decoder) structFieldNotFound(index int, rvkencname string) {
+ // Note: rvkencname is used only if there is an error, to pass into d.errorf.
+ // Consequently, it is ok to pass in a stringView
+ // Since rvkencname may be a stringView, do NOT pass it to another function.
+ if d.h.ErrorIfNoField {
+ if index >= 0 {
+ d.errorf("no matching struct field found when decoding stream array at index %v", index)
+ } else if rvkencname != "" {
+ d.errorf("no matching struct field found when decoding stream map with key " + rvkencname)
+ }
+ }
+ d.swallow()
+}
+
+func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) {
+ if d.h.ErrorIfNoArrayExpand {
+ d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen)
+ }
+}
+
+func (d *Decoder) haltAsNotDecodeable(rv reflect.Value) {
+ if !rv.IsValid() {
+ d.onerror(errCannotDecodeIntoNil)
+ }
+ // check if an interface can be retrieved, before grabbing an interface
+ if !rv.CanInterface() {
+ d.errorf("cannot decode into a value without an interface: %v", rv)
+ }
+ d.errorf("cannot decode into value of kind: %v, %#v", rv.Kind(), rv2i(rv))
+}
+
+func (d *Decoder) depthIncr() {
+ d.depth++
+ if d.depth >= d.maxdepth {
+ d.onerror(errMaxDepthExceeded)
+ }
+}
+
+func (d *Decoder) depthDecr() {
+ d.depth--
+}
+
+// Possibly get an interned version of a string, iff InternString=true and decoding a map key.
+//
+// This should mostly be used for map keys, where the key type is string.
+// This is because keys of a map/struct are typically reused across many objects.
+func (d *Decoder) string(v []byte) (s string) {
+ if d.is == nil || d.c != containerMapKey || len(v) < 2 || len(v) > internMaxStrLen {
+ return string(v)
+ }
+ return d.is.string(v)
+}
+
+func (d *Decoder) zerocopy() bool {
+ return d.bytes && d.h.ZeroCopy
+}
+
+// decodeBytesInto is a convenience delegate function to decDriver.DecodeBytes.
+// It ensures that `in` is not a nil byte, before calling decDriver.DecodeBytes,
+// as decDriver.DecodeBytes treats a nil as a hint to use its internal scratch buffer.
+func (d *Decoder) decodeBytesInto(in []byte) (v []byte) {
+ if in == nil {
+ in = []byte{}
+ }
+ return d.d.DecodeBytes(in)
+}
+
+func (d *Decoder) rawBytes() (v []byte) {
+ // ensure that this is not a view into the bytes
+ // i.e. if necessary, make new copy always.
+ v = d.d.nextValueBytes([]byte{})
+ if d.bytes && !d.h.ZeroCopy {
+ v0 := v
+ v = make([]byte, len(v))
+ copy(v, v0)
+ }
+ return
+}
+
+func (d *Decoder) wrapErr(v error, err *error) {
+ *err = wrapCodecErr(v, d.hh.Name(), d.NumBytesRead(), false)
+}
+
+// NumBytesRead returns the number of bytes read
+func (d *Decoder) NumBytesRead() int {
+ return int(d.r().numread())
+}
+
+// decodeFloat32 will delegate to an appropriate DecodeFloat32 implementation (if exists),
+// else if will call DecodeFloat64 and ensure the value doesn't overflow.
+//
+// Note that we return float64 to reduce unnecessary conversions
+func (d *Decoder) decodeFloat32() float32 {
+ if d.js {
+ return d.jsondriver().DecodeFloat32() // custom implementation for 32-bit
+ }
+ return float32(chkOvf.Float32V(d.d.DecodeFloat64()))
+}
+
+// ---- container tracking
+// Note: We update the .c after calling the callback.
+// This way, the callback can know what the last status was.
+
+// MARKER: do not call mapEnd if mapStart returns containerLenNil.
+
+func (d *Decoder) containerNext(j, containerLen int, hasLen bool) bool {
+ // return (hasLen && j < containerLen) || !(hasLen || slh.d.checkBreak())
+ if hasLen {
+ return j < containerLen
+ }
+ return !d.checkBreak()
+}
+
+func (d *Decoder) mapStart(v int) int {
+ if v != containerLenNil {
+ d.depthIncr()
+ d.c = containerMapStart
+ }
+ return v
+}
+
+func (d *Decoder) mapElemKey() {
+ if d.js {
+ d.jsondriver().ReadMapElemKey()
+ }
+ d.c = containerMapKey
+}
+
+func (d *Decoder) mapElemValue() {
+ if d.js {
+ d.jsondriver().ReadMapElemValue()
+ }
+ d.c = containerMapValue
+}
+
+func (d *Decoder) mapEnd() {
+ d.d.ReadMapEnd()
+ d.depthDecr()
+ d.c = 0
+}
+
+func (d *Decoder) arrayStart(v int) int {
+ if v != containerLenNil {
+ d.depthIncr()
+ d.c = containerArrayStart
+ }
+ return v
+}
+
+func (d *Decoder) arrayElem() {
+ if d.js {
+ d.jsondriver().ReadArrayElem()
+ }
+ d.c = containerArrayElem
+}
+
+func (d *Decoder) arrayEnd() {
+ d.d.ReadArrayEnd()
+ d.depthDecr()
+ d.c = 0
+}
+
+func (d *Decoder) interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt) {
+ // var v interface{} = ext.ConvertExt(rv)
+ // d.d.decode(&v)
+ // ext.UpdateExt(rv, v)
+
+ // assume v is a pointer:
+ // - if struct|array, pass as is to ConvertExt
+ // - else make it non-addressable and pass to ConvertExt
+ // - make return value from ConvertExt addressable
+ // - decode into it
+ // - return the interface for passing into UpdateExt.
+ // - interface should be a pointer if struct|array, else a value
+
+ var s interface{}
+ rv := reflect.ValueOf(v)
+ rv2 := rv.Elem()
+ rvk := rv2.Kind()
+ if rvk == reflect.Struct || rvk == reflect.Array {
+ s = ext.ConvertExt(v)
+ } else {
+ s = ext.ConvertExt(rv2i(rv2))
+ }
+ rv = reflect.ValueOf(s)
+
+ // We cannot use isDecodeable here, as the value converted may be nil,
+ // or it may not be nil but is not addressable and thus we cannot extend it, etc.
+ // Instead, we just ensure that the value is addressable.
+
+ if !rv.CanAddr() {
+ rvk = rv.Kind()
+ rv2 = d.oneShotAddrRV(rvType(rv), rvk)
+ if rvk == reflect.Interface {
+ rvSetIntf(rv2, rv)
+ } else {
+ rvSetDirect(rv2, rv)
+ }
+ rv = rv2
+ }
+
+ d.decodeValue(rv, nil)
+ ext.UpdateExt(v, rv2i(rv))
+}
+
+func (d *Decoder) sideDecode(v interface{}, basetype reflect.Type, bs []byte) {
+ // NewDecoderBytes(bs, d.hh).decodeValue(baseRV(v), d.h.fnNoExt(basetype))
+
+ defer func(rb bytesDecReader, bytes bool,
+ c containerState, dbs decByteState, depth int16, r decReader, state interface{}) {
+ d.rb = rb
+ d.bytes = bytes
+ d.c = c
+ d.decByteState = dbs
+ d.depth = depth
+ d.decReader = r
+ d.d.restoreState(state)
+ }(d.rb, d.bytes, d.c, d.decByteState, d.depth, d.decReader, d.d.captureState())
+
+ // d.rb.reset(in)
+ d.rb = bytesDecReader{bs[:len(bs):len(bs)], 0}
+ d.bytes = true
+ d.decReader = &d.rb
+ d.d.resetState()
+ d.c = 0
+ d.decByteState = decByteStateNone
+ d.depth = 0
+
+ // must call using fnNoExt
+ d.decodeValue(baseRV(v), d.h.fnNoExt(basetype))
+}
+
+func (d *Decoder) fauxUnionReadRawBytes(asString bool) {
+ if asString || d.h.RawToString {
+ d.n.v = valueTypeString
+ // fauxUnion is only used within DecodeNaked calls; consequently, we should try to intern.
+ d.n.s = d.stringZC(d.d.DecodeBytes(nil))
+ } else {
+ d.n.v = valueTypeBytes
+ d.n.l = d.d.DecodeBytes([]byte{})
+ }
+}
+
+func (d *Decoder) oneShotAddrRV(rvt reflect.Type, rvk reflect.Kind) reflect.Value {
+ if decUseTransient &&
+ (numBoolStrSliceBitset.isset(byte(rvk)) ||
+ ((rvk == reflect.Struct || rvk == reflect.Array) &&
+ d.h.getTypeInfo(rt2id(rvt), rvt).flagCanTransient)) {
+ return d.perType.TransientAddrK(rvt, rvk)
+ }
+ return rvZeroAddrK(rvt, rvk)
+}
+
+// --------------------------------------------------
+
+// decSliceHelper assists when decoding into a slice, from a map or an array in the stream.
+// A slice can be set from a map or array in stream. This supports the MapBySlice interface.
+//
+// Note: if IsNil, do not call ElemContainerState.
+type decSliceHelper struct {
+ d *Decoder
+ ct valueType
+ Array bool
+ IsNil bool
+}
+
+func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) {
+ x.ct = d.d.ContainerType()
+ x.d = d
+ switch x.ct {
+ case valueTypeNil:
+ x.IsNil = true
+ case valueTypeArray:
+ x.Array = true
+ clen = d.arrayStart(d.d.ReadArrayStart())
+ case valueTypeMap:
+ clen = d.mapStart(d.d.ReadMapStart())
+ clen += clen
+ default:
+ d.errorf("only encoded map or array can be decoded into a slice (%d)", x.ct)
+ }
+ return
+}
+
+func (x decSliceHelper) End() {
+ if x.IsNil {
+ } else if x.Array {
+ x.d.arrayEnd()
+ } else {
+ x.d.mapEnd()
+ }
+}
+
+func (x decSliceHelper) ElemContainerState(index int) {
+ // Note: if isnil, clen=0, so we never call into ElemContainerState
+
+ if x.Array {
+ x.d.arrayElem()
+ } else if index&1 == 0 { // index%2 == 0 {
+ x.d.mapElemKey()
+ } else {
+ x.d.mapElemValue()
+ }
+}
+
+func (x decSliceHelper) arrayCannotExpand(hasLen bool, lenv, j, containerLenS int) {
+ x.d.arrayCannotExpand(lenv, j+1)
+ // drain completely and return
+ x.ElemContainerState(j)
+ x.d.swallow()
+ j++
+ for ; x.d.containerNext(j, containerLenS, hasLen); j++ {
+ x.ElemContainerState(j)
+ x.d.swallow()
+ }
+ x.End()
+}
+
+// decNextValueBytesHelper helps with NextValueBytes calls.
+//
+// Typical usage:
+// - each Handle's decDriver will implement a high level nextValueBytes,
+// which will track the current cursor, delegate to a nextValueBytesR
+// method, and then potentially call bytesRdV at the end.
+//
+// See simple.go for typical usage model.
+type decNextValueBytesHelper struct {
+ d *Decoder
+}
+
+func (x decNextValueBytesHelper) append1(v *[]byte, b byte) {
+ if *v != nil && !x.d.bytes {
+ *v = append(*v, b)
+ }
+}
+
+func (x decNextValueBytesHelper) appendN(v *[]byte, b ...byte) {
+ if *v != nil && !x.d.bytes {
+ *v = append(*v, b...)
+ }
+}
+
+func (x decNextValueBytesHelper) bytesRdV(v *[]byte, startpos uint) {
+ if x.d.bytes {
+ *v = x.d.rb.b[startpos:x.d.rb.c]
+ }
+}
+
+// decNegintPosintFloatNumberHelper is used for formats that are binary
+// and have distinct ways of storing positive integers vs negative integers
+// vs floats, which are uniquely identified by the byte descriptor.
+//
+// Currently, these formats are binc, cbor and simple.
+type decNegintPosintFloatNumberHelper struct {
+ d *Decoder
+}
+
+func (x decNegintPosintFloatNumberHelper) uint64(ui uint64, neg, ok bool) uint64 {
+ if ok && !neg {
+ return ui
+ }
+ return x.uint64TryFloat(ok)
+}
+
+func (x decNegintPosintFloatNumberHelper) uint64TryFloat(ok bool) (ui uint64) {
+ if ok { // neg = true
+ x.d.errorf("assigning negative signed value to unsigned type")
+ }
+ f, ok := x.d.d.decFloat()
+ if ok && f >= 0 && noFrac64(math.Float64bits(f)) {
+ ui = uint64(f)
+ } else {
+ x.d.errorf("invalid number loading uint64, with descriptor: %v", x.d.d.descBd())
+ }
+ return ui
+}
+
+func decNegintPosintFloatNumberHelperInt64v(ui uint64, neg, incrIfNeg bool) (i int64) {
+ if neg && incrIfNeg {
+ ui++
+ }
+ i = chkOvf.SignedIntV(ui)
+ if neg {
+ i = -i
+ }
+ return
+}
+
+func (x decNegintPosintFloatNumberHelper) int64(ui uint64, neg, ok bool) (i int64) {
+ if ok {
+ return decNegintPosintFloatNumberHelperInt64v(ui, neg, x.d.cbor)
+ }
+ // return x.int64TryFloat()
+ // }
+ // func (x decNegintPosintFloatNumberHelper) int64TryFloat() (i int64) {
+ f, ok := x.d.d.decFloat()
+ if ok && noFrac64(math.Float64bits(f)) {
+ i = int64(f)
+ } else {
+ x.d.errorf("invalid number loading uint64, with descriptor: %v", x.d.d.descBd())
+ }
+ return
+}
+
+func (x decNegintPosintFloatNumberHelper) float64(f float64, ok bool) float64 {
+ if ok {
+ return f
+ }
+ return x.float64TryInteger()
+}
+
+func (x decNegintPosintFloatNumberHelper) float64TryInteger() float64 {
+ ui, neg, ok := x.d.d.decInteger()
+ if !ok {
+ x.d.errorf("invalid descriptor for float: %v", x.d.d.descBd())
+ }
+ return float64(decNegintPosintFloatNumberHelperInt64v(ui, neg, x.d.cbor))
+}
+
+// isDecodeable checks if value can be decoded into
+//
+// decode can take any reflect.Value that is a inherently addressable i.e.
+// - non-nil chan (we will SEND to it)
+// - non-nil slice (we will set its elements)
+// - non-nil map (we will put into it)
+// - non-nil pointer (we can "update" it)
+// - func: no
+// - interface: no
+// - array: if canAddr=true
+// - any other value pointer: if canAddr=true
+func isDecodeable(rv reflect.Value) (canDecode bool, reason decNotDecodeableReason) {
+ switch rv.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.Chan, reflect.Map:
+ canDecode = !rvIsNil(rv)
+ reason = decNotDecodeableReasonNilReference
+ case reflect.Func, reflect.Interface, reflect.Invalid, reflect.UnsafePointer:
+ reason = decNotDecodeableReasonBadKind
+ default:
+ canDecode = rv.CanAddr()
+ reason = decNotDecodeableReasonNonAddrValue
+ }
+ return
+}
+
+func decByteSlice(r *decRd, clen, maxInitLen int, bs []byte) (bsOut []byte) {
+ if clen == 0 {
+ return zeroByteSlice
+ }
+ if len(bs) == clen {
+ bsOut = bs
+ r.readb(bsOut)
+ } else if cap(bs) >= clen {
+ bsOut = bs[:clen]
+ r.readb(bsOut)
+ } else {
+ var len2 int
+ for len2 < clen {
+ len3 := decInferLen(clen-len2, maxInitLen, 1)
+ bs3 := bsOut
+ bsOut = make([]byte, len2+len3)
+ copy(bsOut, bs3)
+ r.readb(bsOut[len2:])
+ len2 += len3
+ }
+ }
+ return
+}
+
+// decInferLen will infer a sensible length, given the following:
+// - clen: length wanted.
+// - maxlen: max length to be returned.
+// if <= 0, it is unset, and we infer it based on the unit size
+// - unit: number of bytes for each element of the collection
+func decInferLen(clen, maxlen, unit int) int {
+ // anecdotal testing showed increase in allocation with map length of 16.
+ // We saw same typical alloc from 0-8, then a 20% increase at 16.
+ // Thus, we set it to 8.
+ const (
+ minLenIfUnset = 8
+ maxMem = 256 * 1024 // 256Kb Memory
+ )
+
+ // handle when maxlen is not set i.e. <= 0
+
+ // clen==0: use 0
+ // maxlen<=0, clen<0: use default
+ // maxlen> 0, clen<0: use default
+ // maxlen<=0, clen>0: infer maxlen, and cap on it
+ // maxlen> 0, clen>0: cap at maxlen
+
+ if clen == 0 || clen == containerLenNil {
+ return 0
+ }
+ if clen < 0 {
+ // if unspecified, return 64 for bytes, ... 8 for uint64, ... and everything else
+ clen = 64 / unit
+ if clen > minLenIfUnset {
+ return clen
+ }
+ return minLenIfUnset
+ }
+ if unit <= 0 {
+ return clen
+ }
+ if maxlen <= 0 {
+ maxlen = maxMem / unit
+ }
+ if clen < maxlen {
+ return clen
+ }
+ return maxlen
+}
diff --git a/vendor/github.com/ugorji/go/codec/doc.go b/vendor/github.com/ugorji/go/codec/doc.go
new file mode 100644
index 000000000..b390f9a8f
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/doc.go
@@ -0,0 +1,228 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+/*
+Package codec provides a
+High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library
+for binc, msgpack, cbor, json.
+
+Supported Serialization formats are:
+
+ - msgpack: https://github.com/msgpack/msgpack
+ - binc: http://github.com/ugorji/binc
+ - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
+ - json: http://json.org http://tools.ietf.org/html/rfc7159
+ - simple:
+
+This package will carefully use 'package unsafe' for performance reasons in specific places.
+You can build without unsafe use by passing the safe or appengine tag
+i.e. 'go install -tags=codec.safe ...'.
+
+This library works with both the standard `gc` and the `gccgo` compilers.
+
+For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
+
+The idiomatic Go support is as seen in other encoding packages in
+the standard library (ie json, xml, gob, etc).
+
+Rich Feature Set includes:
+
+ - Simple but extremely powerful and feature-rich API
+ - Support for go 1.4 and above, while selectively using newer APIs for later releases
+ - Excellent code coverage ( > 90% )
+ - Very High Performance.
+ Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
+ - Careful selected use of 'unsafe' for targeted performance gains.
+ - 100% safe mode supported, where 'unsafe' is not used at all.
+ - Lock-free (sans mutex) concurrency for scaling to 100's of cores
+ - In-place updates during decode, with option to zero value in maps and slices prior to decode
+ - Coerce types where appropriate
+ e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc
+ - Corner Cases:
+ Overflows, nil maps/slices, nil values in streams are handled correctly
+ - Standard field renaming via tags
+ - Support for omitting empty fields during an encoding
+ - Encoding from any value and decoding into pointer to any value
+ (struct, slice, map, primitives, pointers, interface{}, etc)
+ - Extensions to support efficient encoding/decoding of any named types
+ - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
+ - Support using existence of `IsZero() bool` to determine if a value is a zero value.
+ Analogous to time.Time.IsZero() bool.
+ - Decoding without a schema (into a interface{}).
+ Includes Options to configure what specific map or slice type to use
+ when decoding an encoded list or map into a nil interface{}
+ - Mapping a non-interface type to an interface, so we can decode appropriately
+ into any interface type with a correctly configured non-interface value.
+ - Encode a struct as an array, and decode struct from an array in the data stream
+ - Option to encode struct keys as numbers (instead of strings)
+ (to support structured streams with fields encoded as numeric codes)
+ - Comprehensive support for anonymous fields
+ - Fast (no-reflection) encoding/decoding of common maps and slices
+ - Code-generation for faster performance, supported in go 1.6+
+ - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
+ - Support indefinite-length formats to enable true streaming
+ (for formats which support it e.g. json, cbor)
+ - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
+ This mostly applies to maps, where iteration order is non-deterministic.
+ - NIL in data stream decoded as zero value
+ - Never silently skip data when decoding.
+ User decides whether to return an error or silently skip data when keys or indexes
+ in the data stream do not map to fields in the struct.
+ - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
+ - Encode/Decode from/to chan types (for iterative streaming support)
+ - Drop-in replacement for encoding/json. `json:` key in struct tag supported.
+ - Provides a RPC Server and Client Codec for net/rpc communication protocol.
+ - Handle unique idiosyncrasies of codecs e.g.
+ - For messagepack, configure how ambiguities in handling raw bytes are resolved
+ - For messagepack, provide rpc server/client codec to support
+ msgpack-rpc protocol defined at:
+ https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+
+Extension Support
+
+Users can register a function to handle the encoding or decoding of
+their custom types.
+
+There are no restrictions on what the custom type can be. Some examples:
+
+ type BisSet []int
+ type BitSet64 uint64
+ type UUID string
+ type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
+ type GifImage struct { ... }
+
+As an illustration, MyStructWithUnexportedFields would normally be
+encoded as an empty map because it has no exported fields, while UUID
+would be encoded as a string. However, with extension support, you can
+encode any of these however you like.
+
+There is also seamless support provided for registering an extension (with a tag)
+but letting the encoding mechanism default to the standard way.
+
+Custom Encoding and Decoding
+
+This package maintains symmetry in the encoding and decoding halfs.
+We determine how to encode or decode by walking this decision tree
+
+ - is there an extension registered for the type?
+ - is type a codec.Selfer?
+ - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
+ - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
+ - is format text-based, and type an encoding.TextMarshaler and TextUnmarshaler?
+ - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
+
+This symmetry is important to reduce chances of issues happening because the
+encoding and decoding sides are out of sync e.g. decoded via very specific
+encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
+
+Consequently, if a type only defines one-half of the symmetry
+(e.g. it implements UnmarshalJSON() but not MarshalJSON() ),
+then that type doesn't satisfy the check and we will continue walking down the
+decision tree.
+
+RPC
+
+RPC Client and Server Codecs are implemented, so the codecs can be used
+with the standard net/rpc package.
+
+Usage
+
+The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification.
+
+The Encoder and Decoder are NOT safe for concurrent use.
+
+Consequently, the usage model is basically:
+
+ - Create and initialize the Handle before any use.
+ Once created, DO NOT modify it.
+ - Multiple Encoders or Decoders can now use the Handle concurrently.
+ They only read information off the Handle (never write).
+ - However, each Encoder or Decoder MUST not be used concurrently
+ - To re-use an Encoder/Decoder, call Reset(...) on it first.
+ This allows you use state maintained on the Encoder/Decoder.
+
+Sample usage model:
+
+ // create and configure Handle
+ var (
+ bh codec.BincHandle
+ mh codec.MsgpackHandle
+ ch codec.CborHandle
+ )
+
+ mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
+
+ // configure extensions
+ // e.g. for msgpack, define functions and enable Time support for tag 1
+ // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
+
+ // create and use decoder/encoder
+ var (
+ r io.Reader
+ w io.Writer
+ b []byte
+ h = &bh // or mh to use msgpack
+ )
+
+ dec = codec.NewDecoder(r, h)
+ dec = codec.NewDecoderBytes(b, h)
+ err = dec.Decode(&v)
+
+ enc = codec.NewEncoder(w, h)
+ enc = codec.NewEncoderBytes(&b, h)
+ err = enc.Encode(v)
+
+ //RPC Server
+ go func() {
+ for {
+ conn, err := listener.Accept()
+ rpcCodec := codec.GoRpc.ServerCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
+ rpc.ServeCodec(rpcCodec)
+ }
+ }()
+
+ //RPC Communication (client side)
+ conn, err = net.Dial("tcp", "localhost:5555")
+ rpcCodec := codec.GoRpc.ClientCodec(conn, h)
+ //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
+ client := rpc.NewClientWithCodec(rpcCodec)
+
+Running Tests
+
+To run tests, use the following:
+
+ go test
+
+To run the full suite of tests, use the following:
+
+ go test -tags alltests -run Suite
+
+You can run the tag 'codec.safe' to run tests or build in safe mode. e.g.
+
+ go test -tags codec.safe -run Json
+ go test -tags "alltests codec.safe" -run Suite
+
+Running Benchmarks
+
+ cd bench
+ go test -bench . -benchmem -benchtime 1s
+
+Please see http://github.com/ugorji/go-codec-bench .
+
+Caveats
+
+Struct fields matching the following are ignored during encoding and decoding
+ - struct tag value set to -
+ - func, complex numbers, unsafe pointers
+ - unexported and not embedded
+ - unexported and embedded and not struct kind
+ - unexported and embedded pointers (from go1.10)
+
+Every other field in a struct will be encoded/decoded.
+
+Embedded fields are encoded as if they exist in the top-level struct,
+with some caveats. See Encode documentation.
+
+*/
+package codec
diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go
new file mode 100644
index 000000000..e411bdb81
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/encode.go
@@ -0,0 +1,1479 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "encoding"
+ "errors"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+)
+
+// defEncByteBufSize is the default size of []byte used
+// for bufio buffer or []byte (when nil passed)
+const defEncByteBufSize = 1 << 10 // 4:16, 6:64, 8:256, 10:1024
+
+var errEncoderNotInitialized = errors.New("Encoder not initialized")
+
+// encDriver abstracts the actual codec (binc vs msgpack, etc)
+type encDriver interface {
+ EncodeNil()
+ EncodeInt(i int64)
+ EncodeUint(i uint64)
+ EncodeBool(b bool)
+ EncodeFloat32(f float32)
+ EncodeFloat64(f float64)
+ EncodeRawExt(re *RawExt)
+ EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext)
+ // EncodeString using cUTF8, honor'ing StringToRaw flag
+ EncodeString(v string)
+ EncodeStringBytesRaw(v []byte)
+ EncodeTime(time.Time)
+ WriteArrayStart(length int)
+ WriteArrayEnd()
+ WriteMapStart(length int)
+ WriteMapEnd()
+
+ // reset will reset current encoding runtime state, and cached information from the handle
+ reset()
+
+ encoder() *Encoder
+
+ driverStateManager
+}
+
+type encDriverContainerTracker interface {
+ WriteArrayElem()
+ WriteMapElemKey()
+ WriteMapElemValue()
+}
+
+type encDriverNoState struct{}
+
+func (encDriverNoState) captureState() interface{} { return nil }
+func (encDriverNoState) reset() {}
+func (encDriverNoState) resetState() {}
+func (encDriverNoState) restoreState(v interface{}) {}
+
+type encDriverNoopContainerWriter struct{}
+
+func (encDriverNoopContainerWriter) WriteArrayStart(length int) {}
+func (encDriverNoopContainerWriter) WriteArrayEnd() {}
+func (encDriverNoopContainerWriter) WriteMapStart(length int) {}
+func (encDriverNoopContainerWriter) WriteMapEnd() {}
+
+// encStructFieldObj[Slice] is used for sorting when there are missing fields and canonical flag is set
+type encStructFieldObj struct {
+ key string
+ rv reflect.Value
+ intf interface{}
+ ascii bool
+ isRv bool
+}
+
+type encStructFieldObjSlice []encStructFieldObj
+
+func (p encStructFieldObjSlice) Len() int { return len(p) }
+func (p encStructFieldObjSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p encStructFieldObjSlice) Less(i, j int) bool {
+ return p[uint(i)].key < p[uint(j)].key
+}
+
+// EncodeOptions captures configuration options during encode.
+type EncodeOptions struct {
+ // WriterBufferSize is the size of the buffer used when writing.
+ //
+ // if > 0, we use a smart buffer internally for performance purposes.
+ WriterBufferSize int
+
+ // ChanRecvTimeout is the timeout used when selecting from a chan.
+ //
+ // Configuring this controls how we receive from a chan during the encoding process.
+ // - If ==0, we only consume the elements currently available in the chan.
+ // - if <0, we consume until the chan is closed.
+ // - If >0, we consume until this timeout.
+ ChanRecvTimeout time.Duration
+
+ // StructToArray specifies to encode a struct as an array, and not as a map
+ StructToArray bool
+
+ // Canonical representation means that encoding a value will always result in the same
+ // sequence of bytes.
+ //
+ // This only affects maps, as the iteration order for maps is random.
+ //
+ // The implementation MAY use the natural sort order for the map keys if possible:
+ //
+ // - If there is a natural sort order (ie for number, bool, string or []byte keys),
+ // then the map keys are first sorted in natural order and then written
+ // with corresponding map values to the strema.
+ // - If there is no natural sort order, then the map keys will first be
+ // encoded into []byte, and then sorted,
+ // before writing the sorted keys and the corresponding map values to the stream.
+ //
+ Canonical bool
+
+ // CheckCircularRef controls whether we check for circular references
+ // and error fast during an encode.
+ //
+ // If enabled, an error is received if a pointer to a struct
+ // references itself either directly or through one of its fields (iteratively).
+ //
+ // This is opt-in, as there may be a performance hit to checking circular references.
+ CheckCircularRef bool
+
+ // RecursiveEmptyCheck controls how we determine whether a value is empty.
+ //
+ // If true, we descend into interfaces and pointers to reursively check if value is empty.
+ //
+ // We *might* check struct fields one by one to see if empty
+ // (if we cannot directly check if a struct value is equal to its zero value).
+ // If so, we honor IsZero, Comparable, IsCodecEmpty(), etc.
+ // Note: This *may* make OmitEmpty more expensive due to the large number of reflect calls.
+ //
+ // If false, we check if the value is equal to its zero value (newly allocated state).
+ RecursiveEmptyCheck bool
+
+ // Raw controls whether we encode Raw values.
+ // This is a "dangerous" option and must be explicitly set.
+ // If set, we blindly encode Raw values as-is, without checking
+ // if they are a correct representation of a value in that format.
+ // If unset, we error out.
+ Raw bool
+
+ // StringToRaw controls how strings are encoded.
+ //
+ // As a go string is just an (immutable) sequence of bytes,
+ // it can be encoded either as raw bytes or as a UTF string.
+ //
+ // By default, strings are encoded as UTF-8.
+ // but can be treated as []byte during an encode.
+ //
+ // Note that things which we know (by definition) to be UTF-8
+ // are ALWAYS encoded as UTF-8 strings.
+ // These include encoding.TextMarshaler, time.Format calls, struct field names, etc.
+ StringToRaw bool
+
+ // OptimumSize controls whether we optimize for the smallest size.
+ //
+ // Some formats will use this flag to determine whether to encode
+ // in the smallest size possible, even if it takes slightly longer.
+ //
+ // For example, some formats that support half-floats might check if it is possible
+ // to store a float64 as a half float. Doing this check has a small performance cost,
+ // but the benefit is that the encoded message will be smaller.
+ OptimumSize bool
+
+ // NoAddressableReadonly controls whether we try to force a non-addressable value
+ // to be addressable so we can call a pointer method on it e.g. for types
+ // that support Selfer, json.Marshaler, etc.
+ //
+ // Use it in the very rare occurrence that your types modify a pointer value when calling
+ // an encode callback function e.g. JsonMarshal, TextMarshal, BinaryMarshal or CodecEncodeSelf.
+ NoAddressableReadonly bool
+}
+
+// ---------------------------------------------
+
+func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeRawExt(rv2i(rv).(*RawExt))
+}
+
+func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeExt(rv2i(rv), f.ti.rt, f.xfTag, f.xfFn)
+}
+
+func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) {
+ rv2i(rv).(Selfer).CodecEncodeSelf(e)
+}
+
+func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) {
+ bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary()
+ e.marshalRaw(bs, fnerr)
+}
+
+func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) {
+ bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText()
+ e.marshalUtf8(bs, fnerr)
+}
+
+func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) {
+ bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON()
+ e.marshalAsis(bs, fnerr)
+}
+
+func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) {
+ e.rawBytes(rv2i(rv).(Raw))
+}
+
+func (e *Encoder) encodeComplex64(v complex64) {
+ if imag(v) != 0 {
+ e.errorf("cannot encode complex number: %v, with imaginary values: %v", v, imag(v))
+ }
+ e.e.EncodeFloat32(real(v))
+}
+
+func (e *Encoder) encodeComplex128(v complex128) {
+ if imag(v) != 0 {
+ e.errorf("cannot encode complex number: %v, with imaginary values: %v", v, imag(v))
+ }
+ e.e.EncodeFloat64(real(v))
+}
+
+func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeBool(rvGetBool(rv))
+}
+
+func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeTime(rvGetTime(rv))
+}
+
+func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeString(rvGetString(rv))
+}
+
+func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeFloat32(rvGetFloat32(rv))
+}
+
+func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeFloat64(rvGetFloat64(rv))
+}
+
+func (e *Encoder) kComplex64(f *codecFnInfo, rv reflect.Value) {
+ e.encodeComplex64(rvGetComplex64(rv))
+}
+
+func (e *Encoder) kComplex128(f *codecFnInfo, rv reflect.Value) {
+ e.encodeComplex128(rvGetComplex128(rv))
+}
+
+func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeInt(int64(rvGetInt(rv)))
+}
+
+func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeInt(int64(rvGetInt8(rv)))
+}
+
+func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeInt(int64(rvGetInt16(rv)))
+}
+
+func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeInt(int64(rvGetInt32(rv)))
+}
+
+func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeInt(int64(rvGetInt64(rv)))
+}
+
+func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeUint(uint64(rvGetUint(rv)))
+}
+
+func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeUint(uint64(rvGetUint8(rv)))
+}
+
+func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeUint(uint64(rvGetUint16(rv)))
+}
+
+func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeUint(uint64(rvGetUint32(rv)))
+}
+
+func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeUint(uint64(rvGetUint64(rv)))
+}
+
+func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) {
+ e.e.EncodeUint(uint64(rvGetUintptr(rv)))
+}
+
+func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) {
+ e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv)
+}
+
+func chanToSlice(rv reflect.Value, rtslice reflect.Type, timeout time.Duration) (rvcs reflect.Value) {
+ rvcs = rvZeroK(rtslice, reflect.Slice)
+ if timeout < 0 { // consume until close
+ for {
+ recv, recvOk := rv.Recv()
+ if !recvOk {
+ break
+ }
+ rvcs = reflect.Append(rvcs, recv)
+ }
+ } else {
+ cases := make([]reflect.SelectCase, 2)
+ cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv}
+ if timeout == 0 {
+ cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault}
+ } else {
+ tt := time.NewTimer(timeout)
+ cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)}
+ }
+ for {
+ chosen, recv, recvOk := reflect.Select(cases)
+ if chosen == 1 || !recvOk {
+ break
+ }
+ rvcs = reflect.Append(rvcs, recv)
+ }
+ }
+ return
+}
+
+func (e *Encoder) kSeqFn(rtelem reflect.Type) (fn *codecFn) {
+ for rtelem.Kind() == reflect.Ptr {
+ rtelem = rtelem.Elem()
+ }
+ // if kind is reflect.Interface, do not pre-determine the encoding type,
+ // because preEncodeValue may break it down to a concrete type and kInterface will bomb.
+ if rtelem.Kind() != reflect.Interface {
+ fn = e.h.fn(rtelem)
+ }
+ return
+}
+
+func (e *Encoder) kSliceWMbs(rv reflect.Value, ti *typeInfo) {
+ var l = rvLenSlice(rv)
+ if l == 0 {
+ e.mapStart(0)
+ } else {
+ e.haltOnMbsOddLen(l)
+ e.mapStart(l >> 1) // e.mapStart(l / 2)
+ fn := e.kSeqFn(ti.elem)
+ for j := 0; j < l; j++ {
+ if j&1 == 0 { // j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ e.encodeValue(rvSliceIndex(rv, j, ti), fn)
+ }
+ }
+ e.mapEnd()
+}
+
+func (e *Encoder) kSliceW(rv reflect.Value, ti *typeInfo) {
+ var l = rvLenSlice(rv)
+ e.arrayStart(l)
+ if l > 0 {
+ fn := e.kSeqFn(ti.elem)
+ for j := 0; j < l; j++ {
+ e.arrayElem()
+ e.encodeValue(rvSliceIndex(rv, j, ti), fn)
+ }
+ }
+ e.arrayEnd()
+}
+
+func (e *Encoder) kArrayWMbs(rv reflect.Value, ti *typeInfo) {
+ var l = rv.Len()
+ if l == 0 {
+ e.mapStart(0)
+ } else {
+ e.haltOnMbsOddLen(l)
+ e.mapStart(l >> 1) // e.mapStart(l / 2)
+ fn := e.kSeqFn(ti.elem)
+ for j := 0; j < l; j++ {
+ if j&1 == 0 { // j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ e.encodeValue(rv.Index(j), fn)
+ }
+ }
+ e.mapEnd()
+}
+
+func (e *Encoder) kArrayW(rv reflect.Value, ti *typeInfo) {
+ var l = rv.Len()
+ e.arrayStart(l)
+ if l > 0 {
+ fn := e.kSeqFn(ti.elem)
+ for j := 0; j < l; j++ {
+ e.arrayElem()
+ e.encodeValue(rv.Index(j), fn)
+ }
+ }
+ e.arrayEnd()
+}
+
+func (e *Encoder) kChan(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.chandir&uint8(reflect.RecvDir) == 0 {
+ e.errorf("send-only channel cannot be encoded")
+ }
+ if !f.ti.mbs && uint8TypId == rt2id(f.ti.elem) {
+ e.kSliceBytesChan(rv)
+ return
+ }
+ rtslice := reflect.SliceOf(f.ti.elem)
+ rv = chanToSlice(rv, rtslice, e.h.ChanRecvTimeout)
+ ti := e.h.getTypeInfo(rt2id(rtslice), rtslice)
+ if f.ti.mbs {
+ e.kSliceWMbs(rv, ti)
+ } else {
+ e.kSliceW(rv, ti)
+ }
+}
+
+func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ e.kSliceWMbs(rv, f.ti)
+ } else if f.ti.rtid == uint8SliceTypId || uint8TypId == rt2id(f.ti.elem) {
+ e.e.EncodeStringBytesRaw(rvGetBytes(rv))
+ } else {
+ e.kSliceW(rv, f.ti)
+ }
+}
+
+func (e *Encoder) kArray(f *codecFnInfo, rv reflect.Value) {
+ if f.ti.mbs {
+ e.kArrayWMbs(rv, f.ti)
+ } else if handleBytesWithinKArray && uint8TypId == rt2id(f.ti.elem) {
+ e.e.EncodeStringBytesRaw(rvGetArrayBytes(rv, []byte{}))
+ } else {
+ e.kArrayW(rv, f.ti)
+ }
+}
+
+func (e *Encoder) kSliceBytesChan(rv reflect.Value) {
+ // do not use range, so that the number of elements encoded
+ // does not change, and encoding does not hang waiting on someone to close chan.
+
+ bs0 := e.blist.peek(32, true)
+ bs := bs0
+
+ irv := rv2i(rv)
+ ch, ok := irv.(<-chan byte)
+ if !ok {
+ ch = irv.(chan byte)
+ }
+
+L1:
+ switch timeout := e.h.ChanRecvTimeout; {
+ case timeout == 0: // only consume available
+ for {
+ select {
+ case b := <-ch:
+ bs = append(bs, b)
+ default:
+ break L1
+ }
+ }
+ case timeout > 0: // consume until timeout
+ tt := time.NewTimer(timeout)
+ for {
+ select {
+ case b := <-ch:
+ bs = append(bs, b)
+ case <-tt.C:
+ // close(tt.C)
+ break L1
+ }
+ }
+ default: // consume until close
+ for b := range ch {
+ bs = append(bs, b)
+ }
+ }
+
+ e.e.EncodeStringBytesRaw(bs)
+ e.blist.put(bs)
+ if !byteSliceSameData(bs0, bs) {
+ e.blist.put(bs0)
+ }
+}
+
+func (e *Encoder) kStructSfi(f *codecFnInfo) []*structFieldInfo {
+ if e.h.Canonical {
+ return f.ti.sfi.sorted()
+ }
+ return f.ti.sfi.source()
+}
+
+func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) {
+ var tisfi []*structFieldInfo
+ if f.ti.toArray || e.h.StructToArray { // toArray
+ tisfi = f.ti.sfi.source()
+ e.arrayStart(len(tisfi))
+ for _, si := range tisfi {
+ e.arrayElem()
+ e.encodeValue(si.path.field(rv), nil)
+ }
+ e.arrayEnd()
+ } else {
+ tisfi = e.kStructSfi(f)
+ e.mapStart(len(tisfi))
+ keytyp := f.ti.keyType
+ for _, si := range tisfi {
+ e.mapElemKey()
+ e.kStructFieldKey(keytyp, si.path.encNameAsciiAlphaNum, si.encName)
+ e.mapElemValue()
+ e.encodeValue(si.path.field(rv), nil)
+ }
+ e.mapEnd()
+ }
+}
+
+func (e *Encoder) kStructFieldKey(keyType valueType, encNameAsciiAlphaNum bool, encName string) {
+ encStructFieldKey(encName, e.e, e.w(), keyType, encNameAsciiAlphaNum, e.js)
+}
+
+func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) {
+ var newlen int
+ ti := f.ti
+ toMap := !(ti.toArray || e.h.StructToArray)
+ var mf map[string]interface{}
+ if ti.flagMissingFielder {
+ mf = rv2i(rv).(MissingFielder).CodecMissingFields()
+ toMap = true
+ newlen += len(mf)
+ } else if ti.flagMissingFielderPtr {
+ rv2 := e.addrRV(rv, ti.rt, ti.ptr)
+ mf = rv2i(rv2).(MissingFielder).CodecMissingFields()
+ toMap = true
+ newlen += len(mf)
+ }
+ tisfi := ti.sfi.source()
+ newlen += len(tisfi)
+
+ var fkvs = e.slist.get(newlen)[:newlen]
+
+ recur := e.h.RecursiveEmptyCheck
+
+ var kv sfiRv
+ var j int
+ if toMap {
+ newlen = 0
+ for _, si := range e.kStructSfi(f) {
+ kv.r = si.path.field(rv)
+ if si.path.omitEmpty && isEmptyValue(kv.r, e.h.TypeInfos, recur) {
+ continue
+ }
+ kv.v = si
+ fkvs[newlen] = kv
+ newlen++
+ }
+
+ var mf2s []stringIntf
+ if len(mf) > 0 {
+ mf2s = make([]stringIntf, 0, len(mf))
+ for k, v := range mf {
+ if k == "" {
+ continue
+ }
+ if ti.infoFieldOmitempty && isEmptyValue(reflect.ValueOf(v), e.h.TypeInfos, recur) {
+ continue
+ }
+ mf2s = append(mf2s, stringIntf{k, v})
+ }
+ }
+
+ e.mapStart(newlen + len(mf2s))
+
+ // When there are missing fields, and Canonical flag is set,
+ // we cannot have the missing fields and struct fields sorted independently.
+ // We have to capture them together and sort as a unit.
+
+ if len(mf2s) > 0 && e.h.Canonical {
+ mf2w := make([]encStructFieldObj, newlen+len(mf2s))
+ for j = 0; j < newlen; j++ {
+ kv = fkvs[j]
+ mf2w[j] = encStructFieldObj{kv.v.encName, kv.r, nil, kv.v.path.encNameAsciiAlphaNum, true}
+ }
+ for _, v := range mf2s {
+ mf2w[j] = encStructFieldObj{v.v, reflect.Value{}, v.i, false, false}
+ j++
+ }
+ sort.Sort((encStructFieldObjSlice)(mf2w))
+ for _, v := range mf2w {
+ e.mapElemKey()
+ e.kStructFieldKey(ti.keyType, v.ascii, v.key)
+ e.mapElemValue()
+ if v.isRv {
+ e.encodeValue(v.rv, nil)
+ } else {
+ e.encode(v.intf)
+ }
+ }
+ } else {
+ keytyp := ti.keyType
+ for j = 0; j < newlen; j++ {
+ kv = fkvs[j]
+ e.mapElemKey()
+ e.kStructFieldKey(keytyp, kv.v.path.encNameAsciiAlphaNum, kv.v.encName)
+ e.mapElemValue()
+ e.encodeValue(kv.r, nil)
+ }
+ for _, v := range mf2s {
+ e.mapElemKey()
+ e.kStructFieldKey(keytyp, false, v.v)
+ e.mapElemValue()
+ e.encode(v.i)
+ }
+ }
+
+ e.mapEnd()
+ } else {
+ newlen = len(tisfi)
+ for i, si := range tisfi { // use unsorted array (to match sequence in struct)
+ kv.r = si.path.field(rv)
+ // use the zero value.
+ // if a reference or struct, set to nil (so you do not output too much)
+ if si.path.omitEmpty && isEmptyValue(kv.r, e.h.TypeInfos, recur) {
+ switch kv.r.Kind() {
+ case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice:
+ kv.r = reflect.Value{} //encode as nil
+ }
+ }
+ fkvs[i] = kv
+ }
+ // encode it all
+ e.arrayStart(newlen)
+ for j = 0; j < newlen; j++ {
+ e.arrayElem()
+ e.encodeValue(fkvs[j].r, nil)
+ }
+ e.arrayEnd()
+ }
+
+ // do not use defer. Instead, use explicit pool return at end of function.
+ // defer has a cost we are trying to avoid.
+ // If there is a panic and these slices are not returned, it is ok.
+ e.slist.put(fkvs)
+}
+
+func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) {
+ l := rvLenMap(rv)
+ e.mapStart(l)
+ if l == 0 {
+ e.mapEnd()
+ return
+ }
+
+ // determine the underlying key and val encFn's for the map.
+ // This eliminates some work which is done for each loop iteration i.e.
+ // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn.
+ //
+ // However, if kind is reflect.Interface, do not pre-determine the
+ // encoding type, because preEncodeValue may break it down to
+ // a concrete type and kInterface will bomb.
+
+ var keyFn, valFn *codecFn
+
+ ktypeKind := reflect.Kind(f.ti.keykind)
+ vtypeKind := reflect.Kind(f.ti.elemkind)
+
+ rtval := f.ti.elem
+ rtvalkind := vtypeKind
+ for rtvalkind == reflect.Ptr {
+ rtval = rtval.Elem()
+ rtvalkind = rtval.Kind()
+ }
+ if rtvalkind != reflect.Interface {
+ valFn = e.h.fn(rtval)
+ }
+
+ var rvv = mapAddrLoopvarRV(f.ti.elem, vtypeKind)
+
+ if e.h.Canonical {
+ e.kMapCanonical(f.ti, rv, rvv, valFn)
+ e.mapEnd()
+ return
+ }
+
+ rtkey := f.ti.key
+ var keyTypeIsString = stringTypId == rt2id(rtkey) // rtkeyid
+ if !keyTypeIsString {
+ for rtkey.Kind() == reflect.Ptr {
+ rtkey = rtkey.Elem()
+ }
+ if rtkey.Kind() != reflect.Interface {
+ keyFn = e.h.fn(rtkey)
+ }
+ }
+
+ var rvk = mapAddrLoopvarRV(f.ti.key, ktypeKind)
+
+ var it mapIter
+ mapRange(&it, rv, rvk, rvv, true)
+
+ for it.Next() {
+ e.mapElemKey()
+ if keyTypeIsString {
+ e.e.EncodeString(it.Key().String())
+ } else {
+ e.encodeValue(it.Key(), keyFn)
+ }
+ e.mapElemValue()
+ e.encodeValue(it.Value(), valFn)
+ }
+ it.Done()
+
+ e.mapEnd()
+}
+
+func (e *Encoder) kMapCanonical(ti *typeInfo, rv, rvv reflect.Value, valFn *codecFn) {
+ // we previously did out-of-band if an extension was registered.
+ // This is not necessary, as the natural kind is sufficient for ordering.
+
+ rtkey := ti.key
+ mks := rv.MapKeys()
+ rtkeyKind := rtkey.Kind()
+ kfast := mapKeyFastKindFor(rtkeyKind)
+ visindirect := mapStoresElemIndirect(uintptr(ti.elemsize))
+ visref := refBitset.isset(ti.elemkind)
+
+ switch rtkeyKind {
+ case reflect.Bool:
+ mksv := make([]boolRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Bool()
+ }
+ sort.Sort(boolRvSlice(mksv))
+ for i := range mksv {
+ e.mapElemKey()
+ e.e.EncodeBool(mksv[i].v)
+ e.mapElemValue()
+ e.encodeValue(mapGet(rv, mksv[i].r, rvv, kfast, visindirect, visref), valFn)
+ }
+ case reflect.String:
+ mksv := make([]stringRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.String()
+ }
+ sort.Sort(stringRvSlice(mksv))
+ for i := range mksv {
+ e.mapElemKey()
+ e.e.EncodeString(mksv[i].v)
+ e.mapElemValue()
+ e.encodeValue(mapGet(rv, mksv[i].r, rvv, kfast, visindirect, visref), valFn)
+ }
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr:
+ mksv := make([]uint64Rv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Uint()
+ }
+ sort.Sort(uint64RvSlice(mksv))
+ for i := range mksv {
+ e.mapElemKey()
+ e.e.EncodeUint(mksv[i].v)
+ e.mapElemValue()
+ e.encodeValue(mapGet(rv, mksv[i].r, rvv, kfast, visindirect, visref), valFn)
+ }
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ mksv := make([]int64Rv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Int()
+ }
+ sort.Sort(int64RvSlice(mksv))
+ for i := range mksv {
+ e.mapElemKey()
+ e.e.EncodeInt(mksv[i].v)
+ e.mapElemValue()
+ e.encodeValue(mapGet(rv, mksv[i].r, rvv, kfast, visindirect, visref), valFn)
+ }
+ case reflect.Float32:
+ mksv := make([]float64Rv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Float()
+ }
+ sort.Sort(float64RvSlice(mksv))
+ for i := range mksv {
+ e.mapElemKey()
+ e.e.EncodeFloat32(float32(mksv[i].v))
+ e.mapElemValue()
+ e.encodeValue(mapGet(rv, mksv[i].r, rvv, kfast, visindirect, visref), valFn)
+ }
+ case reflect.Float64:
+ mksv := make([]float64Rv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = k.Float()
+ }
+ sort.Sort(float64RvSlice(mksv))
+ for i := range mksv {
+ e.mapElemKey()
+ e.e.EncodeFloat64(mksv[i].v)
+ e.mapElemValue()
+ e.encodeValue(mapGet(rv, mksv[i].r, rvv, kfast, visindirect, visref), valFn)
+ }
+ case reflect.Struct:
+ if rtkey == timeTyp {
+ mksv := make([]timeRv, len(mks))
+ for i, k := range mks {
+ v := &mksv[i]
+ v.r = k
+ v.v = rv2i(k).(time.Time)
+ }
+ sort.Sort(timeRvSlice(mksv))
+ for i := range mksv {
+ e.mapElemKey()
+ e.e.EncodeTime(mksv[i].v)
+ e.mapElemValue()
+ e.encodeValue(mapGet(rv, mksv[i].r, rvv, kfast, visindirect, visref), valFn)
+ }
+ break
+ }
+ fallthrough
+ default:
+ // out-of-band
+ // first encode each key to a []byte first, then sort them, then record
+ bs0 := e.blist.get(len(mks) * 16)
+ mksv := bs0
+ mksbv := make([]bytesRv, len(mks))
+
+ func() {
+ // replicate sideEncode logic
+ defer func(wb bytesEncAppender, bytes bool, c containerState, state interface{}) {
+ e.wb = wb
+ e.bytes = bytes
+ e.c = c
+ e.e.restoreState(state)
+ }(e.wb, e.bytes, e.c, e.e.captureState())
+
+ // e2 := NewEncoderBytes(&mksv, e.hh)
+ e.wb = bytesEncAppender{mksv[:0], &mksv}
+ e.bytes = true
+ e.c = 0
+ e.e.resetState()
+
+ for i, k := range mks {
+ v := &mksbv[i]
+ l := len(mksv)
+
+ e.encodeValue(k, nil)
+ e.atEndOfEncode()
+ e.w().end()
+
+ v.r = k
+ v.v = mksv[l:]
+ }
+ }()
+
+ sort.Sort(bytesRvSlice(mksbv))
+ for j := range mksbv {
+ e.mapElemKey()
+ e.encWr.writeb(mksbv[j].v)
+ e.mapElemValue()
+ e.encodeValue(mapGet(rv, mksbv[j].r, rvv, kfast, visindirect, visref), valFn)
+ }
+ e.blist.put(mksv)
+ if !byteSliceSameData(bs0, mksv) {
+ e.blist.put(bs0)
+ }
+ }
+}
+
+// Encoder writes an object to an output stream in a supported format.
+//
+// Encoder is NOT safe for concurrent use i.e. a Encoder cannot be used
+// concurrently in multiple goroutines.
+//
+// However, as Encoder could be allocation heavy to initialize, a Reset method is provided
+// so its state can be reused to decode new input streams repeatedly.
+// This is the idiomatic way to use.
+type Encoder struct {
+ panicHdl
+
+ e encDriver
+
+ h *BasicHandle
+
+ // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder
+ encWr
+
+ // ---- cpu cache line boundary
+ hh Handle
+
+ blist bytesFreelist
+ err error
+
+ // ---- cpu cache line boundary
+
+ // ---- writable fields during execution --- *try* to keep in sep cache line
+
+ // ci holds interfaces during an encoding (if CheckCircularRef=true)
+ //
+ // We considered using a []uintptr (slice of pointer addresses) retrievable via rv.UnsafeAddr.
+ // However, it is possible for the same pointer to point to 2 different types e.g.
+ // type T struct { tHelper }
+ // Here, for var v T; &v and &v.tHelper are the same pointer.
+ // Consequently, we need a tuple of type and pointer, which interface{} natively provides.
+ ci []interface{} // []uintptr
+
+ perType encPerType
+
+ slist sfiRvFreelist
+}
+
+// NewEncoder returns an Encoder for encoding into an io.Writer.
+//
+// For efficiency, Users are encouraged to configure WriterBufferSize on the handle
+// OR pass in a memory buffered writer (eg bufio.Writer, bytes.Buffer).
+func NewEncoder(w io.Writer, h Handle) *Encoder {
+ e := h.newEncDriver().encoder()
+ if w != nil {
+ e.Reset(w)
+ }
+ return e
+}
+
+// NewEncoderBytes returns an encoder for encoding directly and efficiently
+// into a byte slice, using zero-copying to temporary slices.
+//
+// It will potentially replace the output byte slice pointed to.
+// After encoding, the out parameter contains the encoded contents.
+func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
+ e := h.newEncDriver().encoder()
+ if out != nil {
+ e.ResetBytes(out)
+ }
+ return e
+}
+
+func (e *Encoder) init(h Handle) {
+ initHandle(h)
+ e.err = errEncoderNotInitialized
+ e.bytes = true
+ e.hh = h
+ e.h = h.getBasicHandle()
+ e.be = e.hh.isBinary()
+}
+
+func (e *Encoder) w() *encWr {
+ return &e.encWr
+}
+
+func (e *Encoder) resetCommon() {
+ e.e.reset()
+ if e.ci != nil {
+ e.ci = e.ci[:0]
+ }
+ e.c = 0
+ e.calls = 0
+ e.seq = 0
+ e.err = nil
+}
+
+// Reset resets the Encoder with a new output stream.
+//
+// This accommodates using the state of the Encoder,
+// where it has "cached" information about sub-engines.
+func (e *Encoder) Reset(w io.Writer) {
+ e.bytes = false
+ if e.wf == nil {
+ e.wf = new(bufioEncWriter)
+ }
+ e.wf.reset(w, e.h.WriterBufferSize, &e.blist)
+ e.resetCommon()
+}
+
+// ResetBytes resets the Encoder with a new destination output []byte.
+func (e *Encoder) ResetBytes(out *[]byte) {
+ e.bytes = true
+ e.wb.reset(encInBytes(out), out)
+ e.resetCommon()
+}
+
+// Encode writes an object into a stream.
+//
+// Encoding can be configured via the struct tag for the fields.
+// The key (in the struct tags) that we look at is configurable.
+//
+// By default, we look up the "codec" key in the struct field's tags,
+// and fall bak to the "json" key if "codec" is absent.
+// That key in struct field's tag value is the key name,
+// followed by an optional comma and options.
+//
+// To set an option on all fields (e.g. omitempty on all fields), you
+// can create a field called _struct, and set flags on it. The options
+// which can be set on _struct are:
+// - omitempty: so all fields are omitted if empty
+// - toarray: so struct is encoded as an array
+// - int: so struct key names are encoded as signed integers (instead of strings)
+// - uint: so struct key names are encoded as unsigned integers (instead of strings)
+// - float: so struct key names are encoded as floats (instead of strings)
+// More details on these below.
+//
+// Struct values "usually" encode as maps. Each exported struct field is encoded unless:
+// - the field's tag is "-", OR
+// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option.
+//
+// When encoding as a map, the first string in the tag (before the comma)
+// is the map key string to use when encoding.
+// ...
+// This key is typically encoded as a string.
+// However, there are instances where the encoded stream has mapping keys encoded as numbers.
+// For example, some cbor streams have keys as integer codes in the stream, but they should map
+// to fields in a structured object. Consequently, a struct is the natural representation in code.
+// For these, configure the struct to encode/decode the keys as numbers (instead of string).
+// This is done with the int,uint or float option on the _struct field (see above).
+//
+// However, struct values may encode as arrays. This happens when:
+// - StructToArray Encode option is set, OR
+// - the tag on the _struct field sets the "toarray" option
+// Note that omitempty is ignored when encoding struct values as arrays,
+// as an entry must be encoded for each field, to maintain its position.
+//
+// Values with types that implement MapBySlice are encoded as stream maps.
+//
+// The empty values (for omitempty option) are false, 0, any nil pointer
+// or interface value, and any array, slice, map, or string of length zero.
+//
+// Anonymous fields are encoded inline except:
+// - the struct tag specifies a replacement name (first value)
+// - the field is of an interface type
+//
+// Examples:
+//
+// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below.
+// type MyStruct struct {
+// _struct bool `codec:",omitempty"` //set omitempty for every field
+// Field1 string `codec:"-"` //skip this field
+// Field2 int `codec:"myName"` //Use key "myName" in encode stream
+// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty.
+// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty.
+// io.Reader //use key "Reader".
+// MyStruct `codec:"my1" //use key "my1".
+// MyStruct //inline it
+// ...
+// }
+//
+// type MyStruct struct {
+// _struct bool `codec:",toarray"` //encode struct as an array
+// }
+//
+// type MyStruct struct {
+// _struct bool `codec:",uint"` //encode struct with "unsigned integer" keys
+// Field1 string `codec:"1"` //encode Field1 key using: EncodeInt(1)
+// Field2 string `codec:"2"` //encode Field2 key using: EncodeInt(2)
+// }
+//
+// The mode of encoding is based on the type of the value. When a value is seen:
+// - If a Selfer, call its CodecEncodeSelf method
+// - If an extension is registered for it, call that extension function
+// - If implements encoding.(Binary|Text|JSON)Marshaler, call Marshal(Binary|Text|JSON) method
+// - Else encode it based on its reflect.Kind
+//
+// Note that struct field names and keys in map[string]XXX will be treated as symbols.
+// Some formats support symbols (e.g. binc) and will properly encode the string
+// only once in the stream, and use a tag to refer to it thereafter.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ // tried to use closure, as runtime optimizes defer with no params.
+ // This seemed to be causing weird issues (like circular reference found, unexpected panic, etc).
+ // Also, see https://github.com/golang/go/issues/14939#issuecomment-417836139
+ if !debugging {
+ defer func() {
+ // if error occurred during encoding, return that error;
+ // else if error occurred on end'ing (i.e. during flush), return that error.
+ if x := recover(); x != nil {
+ panicValToErr(e, x, &e.err)
+ err = e.err
+ }
+ }()
+ }
+
+ e.MustEncode(v)
+ return
+}
+
+// MustEncode is like Encode, but panics if unable to Encode.
+//
+// Note: This provides insight to the code location that triggered the error.
+func (e *Encoder) MustEncode(v interface{}) {
+ halt.onerror(e.err)
+ if e.hh == nil {
+ halt.onerror(errNoFormatHandle)
+ }
+
+ e.calls++
+ e.encode(v)
+ e.calls--
+ if e.calls == 0 {
+ e.atEndOfEncode()
+ e.w().end()
+ }
+}
+
+// Release releases shared (pooled) resources.
+//
+// It is important to call Release() when done with an Encoder, so those resources
+// are released instantly for use by subsequently created Encoders.
+//
+// Deprecated: Release is a no-op as pooled resources are not used with an Encoder.
+// This method is kept for compatibility reasons only.
+func (e *Encoder) Release() {
+}
+
+func (e *Encoder) encode(iv interface{}) {
+ // MARKER: a switch with only concrete types can be optimized.
+ // consequently, we deal with nil and interfaces outside the switch.
+
+ if iv == nil {
+ e.e.EncodeNil()
+ return
+ }
+
+ rv, ok := isNil(iv)
+ if ok {
+ e.e.EncodeNil()
+ return
+ }
+
+ switch v := iv.(type) {
+ // case nil:
+ // case Selfer:
+ case Raw:
+ e.rawBytes(v)
+ case reflect.Value:
+ e.encodeValue(v, nil)
+
+ case string:
+ e.e.EncodeString(v)
+ case bool:
+ e.e.EncodeBool(v)
+ case int:
+ e.e.EncodeInt(int64(v))
+ case int8:
+ e.e.EncodeInt(int64(v))
+ case int16:
+ e.e.EncodeInt(int64(v))
+ case int32:
+ e.e.EncodeInt(int64(v))
+ case int64:
+ e.e.EncodeInt(v)
+ case uint:
+ e.e.EncodeUint(uint64(v))
+ case uint8:
+ e.e.EncodeUint(uint64(v))
+ case uint16:
+ e.e.EncodeUint(uint64(v))
+ case uint32:
+ e.e.EncodeUint(uint64(v))
+ case uint64:
+ e.e.EncodeUint(v)
+ case uintptr:
+ e.e.EncodeUint(uint64(v))
+ case float32:
+ e.e.EncodeFloat32(v)
+ case float64:
+ e.e.EncodeFloat64(v)
+ case complex64:
+ e.encodeComplex64(v)
+ case complex128:
+ e.encodeComplex128(v)
+ case time.Time:
+ e.e.EncodeTime(v)
+ case []byte:
+ e.e.EncodeStringBytesRaw(v)
+ case *Raw:
+ e.rawBytes(*v)
+ case *string:
+ e.e.EncodeString(*v)
+ case *bool:
+ e.e.EncodeBool(*v)
+ case *int:
+ e.e.EncodeInt(int64(*v))
+ case *int8:
+ e.e.EncodeInt(int64(*v))
+ case *int16:
+ e.e.EncodeInt(int64(*v))
+ case *int32:
+ e.e.EncodeInt(int64(*v))
+ case *int64:
+ e.e.EncodeInt(*v)
+ case *uint:
+ e.e.EncodeUint(uint64(*v))
+ case *uint8:
+ e.e.EncodeUint(uint64(*v))
+ case *uint16:
+ e.e.EncodeUint(uint64(*v))
+ case *uint32:
+ e.e.EncodeUint(uint64(*v))
+ case *uint64:
+ e.e.EncodeUint(*v)
+ case *uintptr:
+ e.e.EncodeUint(uint64(*v))
+ case *float32:
+ e.e.EncodeFloat32(*v)
+ case *float64:
+ e.e.EncodeFloat64(*v)
+ case *complex64:
+ e.encodeComplex64(*v)
+ case *complex128:
+ e.encodeComplex128(*v)
+ case *time.Time:
+ e.e.EncodeTime(*v)
+ case *[]byte:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ e.e.EncodeStringBytesRaw(*v)
+ }
+ default:
+ // we can't check non-predefined types, as they might be a Selfer or extension.
+ if skipFastpathTypeSwitchInDirectCall || !fastpathEncodeTypeSwitch(iv, e) {
+ e.encodeValue(rv, nil)
+ }
+ }
+}
+
+// encodeValue will encode a value.
+//
+// Note that encodeValue will handle nil in the stream early, so that the
+// subsequent calls i.e. kXXX methods, etc do not have to handle it themselves.
+func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn) {
+ // if a valid fn is passed, it MUST BE for the dereferenced type of rv
+
+ // MARKER: We check if value is nil here, so that the kXXX method do not have to.
+
+ var sptr interface{}
+ var rvp reflect.Value
+ var rvpValid bool
+TOP:
+ switch rv.Kind() {
+ case reflect.Ptr:
+ if rvIsNil(rv) {
+ e.e.EncodeNil()
+ return
+ }
+ rvpValid = true
+ rvp = rv
+ rv = rv.Elem()
+ goto TOP
+ case reflect.Interface:
+ if rvIsNil(rv) {
+ e.e.EncodeNil()
+ return
+ }
+ rvpValid = false
+ rvp = reflect.Value{}
+ rv = rv.Elem()
+ goto TOP
+ case reflect.Struct:
+ if rvpValid && e.h.CheckCircularRef {
+ sptr = rv2i(rvp)
+ for _, vv := range e.ci {
+ if eq4i(sptr, vv) { // error if sptr already seen
+ e.errorf("circular reference found: %p, %T", sptr, sptr)
+ }
+ }
+ e.ci = append(e.ci, sptr)
+ }
+ case reflect.Slice, reflect.Map, reflect.Chan:
+ if rvIsNil(rv) {
+ e.e.EncodeNil()
+ return
+ }
+ case reflect.Invalid, reflect.Func:
+ e.e.EncodeNil()
+ return
+ }
+
+ if fn == nil {
+ fn = e.h.fn(rvType(rv))
+ }
+
+ if !fn.i.addrE { // typically, addrE = false, so check it first
+ // keep rv same
+ } else if rvpValid {
+ rv = rvp
+ } else {
+ rv = e.addrRV(rv, fn.i.ti.rt, fn.i.ti.ptr)
+ }
+ fn.fe(e, &fn.i, rv)
+
+ if sptr != nil { // remove sptr
+ e.ci = e.ci[:len(e.ci)-1]
+ }
+}
+
+// addrRV returns a addressable value which may be readonly
+func (e *Encoder) addrRV(rv reflect.Value, typ, ptrType reflect.Type) (rva reflect.Value) {
+ if rv.CanAddr() {
+ return rvAddr(rv, ptrType)
+ }
+ if e.h.NoAddressableReadonly {
+ rva = reflect.New(typ)
+ rvSetDirect(rva.Elem(), rv)
+ return
+ }
+ return rvAddr(e.perType.AddressableRO(rv), ptrType)
+}
+
+func (e *Encoder) marshalUtf8(bs []byte, fnerr error) {
+ e.onerror(fnerr)
+ if bs == nil {
+ e.e.EncodeNil()
+ } else {
+ e.e.EncodeString(stringView(bs))
+ }
+}
+
+func (e *Encoder) marshalAsis(bs []byte, fnerr error) {
+ e.onerror(fnerr)
+ if bs == nil {
+ e.e.EncodeNil()
+ } else {
+ e.encWr.writeb(bs) // e.asis(bs)
+ }
+}
+
+func (e *Encoder) marshalRaw(bs []byte, fnerr error) {
+ e.onerror(fnerr)
+ if bs == nil {
+ e.e.EncodeNil()
+ } else {
+ e.e.EncodeStringBytesRaw(bs)
+ }
+}
+
+func (e *Encoder) rawBytes(vv Raw) {
+ v := []byte(vv)
+ if !e.h.Raw {
+ e.errorf("Raw values cannot be encoded: %v", v)
+ }
+ e.encWr.writeb(v)
+}
+
+func (e *Encoder) wrapErr(v error, err *error) {
+ *err = wrapCodecErr(v, e.hh.Name(), 0, true)
+}
+
+// ---- container tracker methods
+// Note: We update the .c after calling the callback.
+// This way, the callback can know what the last status was.
+
+func (e *Encoder) mapStart(length int) {
+ e.e.WriteMapStart(length)
+ e.c = containerMapStart
+}
+
+func (e *Encoder) mapElemKey() {
+ if e.js {
+ e.jsondriver().WriteMapElemKey()
+ }
+ e.c = containerMapKey
+}
+
+func (e *Encoder) mapElemValue() {
+ if e.js {
+ e.jsondriver().WriteMapElemValue()
+ }
+ e.c = containerMapValue
+}
+
+func (e *Encoder) mapEnd() {
+ e.e.WriteMapEnd()
+ e.c = 0
+}
+
+func (e *Encoder) arrayStart(length int) {
+ e.e.WriteArrayStart(length)
+ e.c = containerArrayStart
+}
+
+func (e *Encoder) arrayElem() {
+ if e.js {
+ e.jsondriver().WriteArrayElem()
+ }
+ e.c = containerArrayElem
+}
+
+func (e *Encoder) arrayEnd() {
+ e.e.WriteArrayEnd()
+ e.c = 0
+}
+
+// ----------
+
+func (e *Encoder) haltOnMbsOddLen(length int) {
+ if length&1 != 0 { // similar to &1==1 or %2 == 1
+ e.errorf("mapBySlice requires even slice length, but got %v", length)
+ }
+}
+
+func (e *Encoder) atEndOfEncode() {
+ // e.e.atEndOfEncode()
+ if e.js {
+ e.jsondriver().atEndOfEncode()
+ }
+}
+
+func (e *Encoder) sideEncode(v interface{}, basetype reflect.Type, bs *[]byte) {
+ // rv := baseRV(v)
+ // e2 := NewEncoderBytes(bs, e.hh)
+ // e2.encodeValue(rv, e2.h.fnNoExt(basetype))
+ // e2.atEndOfEncode()
+ // e2.w().end()
+
+ defer func(wb bytesEncAppender, bytes bool, c containerState, state interface{}) {
+ e.wb = wb
+ e.bytes = bytes
+ e.c = c
+ e.e.restoreState(state)
+ }(e.wb, e.bytes, e.c, e.e.captureState())
+
+ e.wb = bytesEncAppender{encInBytes(bs)[:0], bs}
+ e.bytes = true
+ e.c = 0
+ e.e.resetState()
+
+ // must call using fnNoExt
+ rv := baseRV(v)
+ e.encodeValue(rv, e.h.fnNoExt(basetype))
+ e.atEndOfEncode()
+ e.w().end()
+}
+
+func encInBytes(out *[]byte) (in []byte) {
+ in = *out
+ if in == nil {
+ in = make([]byte, defEncByteBufSize)
+ }
+ return
+}
+
+func encStructFieldKey(encName string, ee encDriver, w *encWr,
+ keyType valueType, encNameAsciiAlphaNum bool, js bool) {
+ // use if-else-if, not switch (which compiles to binary-search)
+ // since keyType is typically valueTypeString, branch prediction is pretty good.
+
+ if keyType == valueTypeString {
+ if js && encNameAsciiAlphaNum { // keyType == valueTypeString
+ w.writeqstr(encName)
+ } else { // keyType == valueTypeString
+ ee.EncodeString(encName)
+ }
+ } else if keyType == valueTypeInt {
+ ee.EncodeInt(must.Int(strconv.ParseInt(encName, 10, 64)))
+ } else if keyType == valueTypeUint {
+ ee.EncodeUint(must.Uint(strconv.ParseUint(encName, 10, 64)))
+ } else if keyType == valueTypeFloat {
+ ee.EncodeFloat64(must.Float(strconv.ParseFloat(encName, 64)))
+ } else {
+ halt.errorf("invalid struct key type: %v", keyType)
+ }
+}
diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go
new file mode 100644
index 000000000..a2c258196
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/fast-path.generated.go
@@ -0,0 +1,6157 @@
+//go:build !notfastpath && !codec.notfastpath
+// +build !notfastpath,!codec.notfastpath
+
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from fast-path.go.tmpl - DO NOT EDIT.
+
+package codec
+
+// Fast path functions try to create a fast path encode or decode implementation
+// for common maps and slices.
+//
+// We define the functions and register them in this single file
+// so as not to pollute the encode.go and decode.go, and create a dependency in there.
+// This file can be omitted without causing a build failure.
+//
+// The advantage of fast paths is:
+// - Many calls bypass reflection altogether
+//
+// Currently support
+// - slice of all builtin types (numeric, bool, string, []byte)
+// - maps of builtin types to builtin or interface{} type, EXCEPT FOR
+// keys of type uintptr, int8/16/32, uint16/32, float32/64, bool, interface{}
+// AND values of type type int8/16/32, uint16/32
+// This should provide adequate "typical" implementations.
+//
+// Note that fast track decode functions must handle values for which an address cannot be obtained.
+// For example:
+// m2 := map[string]int{}
+// p2 := []interface{}{m2}
+// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
+//
+
+import (
+ "reflect"
+ "sort"
+)
+
+const fastpathEnabled = true
+
+type fastpathT struct{}
+
+var fastpathTV fastpathT
+
+type fastpathE struct {
+ rt reflect.Type
+ encfn func(*Encoder, *codecFnInfo, reflect.Value)
+ decfn func(*Decoder, *codecFnInfo, reflect.Value)
+}
+
+type fastpathA [56]fastpathE
+type fastpathARtid [56]uintptr
+
+var fastpathAv fastpathA
+var fastpathAvRtid fastpathARtid
+
+type fastpathAslice struct{}
+
+func (fastpathAslice) Len() int { return 56 }
+func (fastpathAslice) Less(i, j int) bool {
+ return fastpathAvRtid[uint(i)] < fastpathAvRtid[uint(j)]
+}
+func (fastpathAslice) Swap(i, j int) {
+ fastpathAvRtid[uint(i)], fastpathAvRtid[uint(j)] = fastpathAvRtid[uint(j)], fastpathAvRtid[uint(i)]
+ fastpathAv[uint(i)], fastpathAv[uint(j)] = fastpathAv[uint(j)], fastpathAv[uint(i)]
+}
+
+func fastpathAvIndex(rtid uintptr) int {
+ // use binary search to grab the index (adapted from sort/search.go)
+ // Note: we use goto (instead of for loop) so this can be inlined.
+ // h, i, j := 0, 0, 56
+ var h, i uint
+ var j uint = 56
+LOOP:
+ if i < j {
+ h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
+ if fastpathAvRtid[h] < rtid {
+ i = h + 1
+ } else {
+ j = h
+ }
+ goto LOOP
+ }
+ if i < 56 && fastpathAvRtid[i] == rtid {
+ return int(i)
+ }
+ return -1
+}
+
+// due to possible initialization loop error, make fastpath in an init()
+func init() {
+ var i uint = 0
+ fn := func(v interface{},
+ fe func(*Encoder, *codecFnInfo, reflect.Value),
+ fd func(*Decoder, *codecFnInfo, reflect.Value)) {
+ xrt := reflect.TypeOf(v)
+ xptr := rt2id(xrt)
+ fastpathAvRtid[i] = xptr
+ fastpathAv[i] = fastpathE{xrt, fe, fd}
+ i++
+ }
+
+ fn([]interface{}(nil), (*Encoder).fastpathEncSliceIntfR, (*Decoder).fastpathDecSliceIntfR)
+ fn([]string(nil), (*Encoder).fastpathEncSliceStringR, (*Decoder).fastpathDecSliceStringR)
+ fn([][]byte(nil), (*Encoder).fastpathEncSliceBytesR, (*Decoder).fastpathDecSliceBytesR)
+ fn([]float32(nil), (*Encoder).fastpathEncSliceFloat32R, (*Decoder).fastpathDecSliceFloat32R)
+ fn([]float64(nil), (*Encoder).fastpathEncSliceFloat64R, (*Decoder).fastpathDecSliceFloat64R)
+ fn([]uint8(nil), (*Encoder).fastpathEncSliceUint8R, (*Decoder).fastpathDecSliceUint8R)
+ fn([]uint64(nil), (*Encoder).fastpathEncSliceUint64R, (*Decoder).fastpathDecSliceUint64R)
+ fn([]int(nil), (*Encoder).fastpathEncSliceIntR, (*Decoder).fastpathDecSliceIntR)
+ fn([]int32(nil), (*Encoder).fastpathEncSliceInt32R, (*Decoder).fastpathDecSliceInt32R)
+ fn([]int64(nil), (*Encoder).fastpathEncSliceInt64R, (*Decoder).fastpathDecSliceInt64R)
+ fn([]bool(nil), (*Encoder).fastpathEncSliceBoolR, (*Decoder).fastpathDecSliceBoolR)
+
+ fn(map[string]interface{}(nil), (*Encoder).fastpathEncMapStringIntfR, (*Decoder).fastpathDecMapStringIntfR)
+ fn(map[string]string(nil), (*Encoder).fastpathEncMapStringStringR, (*Decoder).fastpathDecMapStringStringR)
+ fn(map[string][]byte(nil), (*Encoder).fastpathEncMapStringBytesR, (*Decoder).fastpathDecMapStringBytesR)
+ fn(map[string]uint8(nil), (*Encoder).fastpathEncMapStringUint8R, (*Decoder).fastpathDecMapStringUint8R)
+ fn(map[string]uint64(nil), (*Encoder).fastpathEncMapStringUint64R, (*Decoder).fastpathDecMapStringUint64R)
+ fn(map[string]int(nil), (*Encoder).fastpathEncMapStringIntR, (*Decoder).fastpathDecMapStringIntR)
+ fn(map[string]int32(nil), (*Encoder).fastpathEncMapStringInt32R, (*Decoder).fastpathDecMapStringInt32R)
+ fn(map[string]float64(nil), (*Encoder).fastpathEncMapStringFloat64R, (*Decoder).fastpathDecMapStringFloat64R)
+ fn(map[string]bool(nil), (*Encoder).fastpathEncMapStringBoolR, (*Decoder).fastpathDecMapStringBoolR)
+ fn(map[uint8]interface{}(nil), (*Encoder).fastpathEncMapUint8IntfR, (*Decoder).fastpathDecMapUint8IntfR)
+ fn(map[uint8]string(nil), (*Encoder).fastpathEncMapUint8StringR, (*Decoder).fastpathDecMapUint8StringR)
+ fn(map[uint8][]byte(nil), (*Encoder).fastpathEncMapUint8BytesR, (*Decoder).fastpathDecMapUint8BytesR)
+ fn(map[uint8]uint8(nil), (*Encoder).fastpathEncMapUint8Uint8R, (*Decoder).fastpathDecMapUint8Uint8R)
+ fn(map[uint8]uint64(nil), (*Encoder).fastpathEncMapUint8Uint64R, (*Decoder).fastpathDecMapUint8Uint64R)
+ fn(map[uint8]int(nil), (*Encoder).fastpathEncMapUint8IntR, (*Decoder).fastpathDecMapUint8IntR)
+ fn(map[uint8]int32(nil), (*Encoder).fastpathEncMapUint8Int32R, (*Decoder).fastpathDecMapUint8Int32R)
+ fn(map[uint8]float64(nil), (*Encoder).fastpathEncMapUint8Float64R, (*Decoder).fastpathDecMapUint8Float64R)
+ fn(map[uint8]bool(nil), (*Encoder).fastpathEncMapUint8BoolR, (*Decoder).fastpathDecMapUint8BoolR)
+ fn(map[uint64]interface{}(nil), (*Encoder).fastpathEncMapUint64IntfR, (*Decoder).fastpathDecMapUint64IntfR)
+ fn(map[uint64]string(nil), (*Encoder).fastpathEncMapUint64StringR, (*Decoder).fastpathDecMapUint64StringR)
+ fn(map[uint64][]byte(nil), (*Encoder).fastpathEncMapUint64BytesR, (*Decoder).fastpathDecMapUint64BytesR)
+ fn(map[uint64]uint8(nil), (*Encoder).fastpathEncMapUint64Uint8R, (*Decoder).fastpathDecMapUint64Uint8R)
+ fn(map[uint64]uint64(nil), (*Encoder).fastpathEncMapUint64Uint64R, (*Decoder).fastpathDecMapUint64Uint64R)
+ fn(map[uint64]int(nil), (*Encoder).fastpathEncMapUint64IntR, (*Decoder).fastpathDecMapUint64IntR)
+ fn(map[uint64]int32(nil), (*Encoder).fastpathEncMapUint64Int32R, (*Decoder).fastpathDecMapUint64Int32R)
+ fn(map[uint64]float64(nil), (*Encoder).fastpathEncMapUint64Float64R, (*Decoder).fastpathDecMapUint64Float64R)
+ fn(map[uint64]bool(nil), (*Encoder).fastpathEncMapUint64BoolR, (*Decoder).fastpathDecMapUint64BoolR)
+ fn(map[int]interface{}(nil), (*Encoder).fastpathEncMapIntIntfR, (*Decoder).fastpathDecMapIntIntfR)
+ fn(map[int]string(nil), (*Encoder).fastpathEncMapIntStringR, (*Decoder).fastpathDecMapIntStringR)
+ fn(map[int][]byte(nil), (*Encoder).fastpathEncMapIntBytesR, (*Decoder).fastpathDecMapIntBytesR)
+ fn(map[int]uint8(nil), (*Encoder).fastpathEncMapIntUint8R, (*Decoder).fastpathDecMapIntUint8R)
+ fn(map[int]uint64(nil), (*Encoder).fastpathEncMapIntUint64R, (*Decoder).fastpathDecMapIntUint64R)
+ fn(map[int]int(nil), (*Encoder).fastpathEncMapIntIntR, (*Decoder).fastpathDecMapIntIntR)
+ fn(map[int]int32(nil), (*Encoder).fastpathEncMapIntInt32R, (*Decoder).fastpathDecMapIntInt32R)
+ fn(map[int]float64(nil), (*Encoder).fastpathEncMapIntFloat64R, (*Decoder).fastpathDecMapIntFloat64R)
+ fn(map[int]bool(nil), (*Encoder).fastpathEncMapIntBoolR, (*Decoder).fastpathDecMapIntBoolR)
+ fn(map[int32]interface{}(nil), (*Encoder).fastpathEncMapInt32IntfR, (*Decoder).fastpathDecMapInt32IntfR)
+ fn(map[int32]string(nil), (*Encoder).fastpathEncMapInt32StringR, (*Decoder).fastpathDecMapInt32StringR)
+ fn(map[int32][]byte(nil), (*Encoder).fastpathEncMapInt32BytesR, (*Decoder).fastpathDecMapInt32BytesR)
+ fn(map[int32]uint8(nil), (*Encoder).fastpathEncMapInt32Uint8R, (*Decoder).fastpathDecMapInt32Uint8R)
+ fn(map[int32]uint64(nil), (*Encoder).fastpathEncMapInt32Uint64R, (*Decoder).fastpathDecMapInt32Uint64R)
+ fn(map[int32]int(nil), (*Encoder).fastpathEncMapInt32IntR, (*Decoder).fastpathDecMapInt32IntR)
+ fn(map[int32]int32(nil), (*Encoder).fastpathEncMapInt32Int32R, (*Decoder).fastpathDecMapInt32Int32R)
+ fn(map[int32]float64(nil), (*Encoder).fastpathEncMapInt32Float64R, (*Decoder).fastpathDecMapInt32Float64R)
+ fn(map[int32]bool(nil), (*Encoder).fastpathEncMapInt32BoolR, (*Decoder).fastpathDecMapInt32BoolR)
+
+ sort.Sort(fastpathAslice{})
+}
+
+// -- encode
+
+// -- -- fast path type switch
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
+ switch v := iv.(type) {
+ case []interface{}:
+ fastpathTV.EncSliceIntfV(v, e)
+ case *[]interface{}:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncSliceIntfV(*v, e)
+ }
+ case []string:
+ fastpathTV.EncSliceStringV(v, e)
+ case *[]string:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncSliceStringV(*v, e)
+ }
+ case [][]byte:
+ fastpathTV.EncSliceBytesV(v, e)
+ case *[][]byte:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncSliceBytesV(*v, e)
+ }
+ case []float32:
+ fastpathTV.EncSliceFloat32V(v, e)
+ case *[]float32:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncSliceFloat32V(*v, e)
+ }
+ case []float64:
+ fastpathTV.EncSliceFloat64V(v, e)
+ case *[]float64:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncSliceFloat64V(*v, e)
+ }
+ case []uint8:
+ fastpathTV.EncSliceUint8V(v, e)
+ case *[]uint8:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncSliceUint8V(*v, e)
+ }
+ case []uint64:
+ fastpathTV.EncSliceUint64V(v, e)
+ case *[]uint64:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncSliceUint64V(*v, e)
+ }
+ case []int:
+ fastpathTV.EncSliceIntV(v, e)
+ case *[]int:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncSliceIntV(*v, e)
+ }
+ case []int32:
+ fastpathTV.EncSliceInt32V(v, e)
+ case *[]int32:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncSliceInt32V(*v, e)
+ }
+ case []int64:
+ fastpathTV.EncSliceInt64V(v, e)
+ case *[]int64:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncSliceInt64V(*v, e)
+ }
+ case []bool:
+ fastpathTV.EncSliceBoolV(v, e)
+ case *[]bool:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncSliceBoolV(*v, e)
+ }
+ case map[string]interface{}:
+ fastpathTV.EncMapStringIntfV(v, e)
+ case *map[string]interface{}:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapStringIntfV(*v, e)
+ }
+ case map[string]string:
+ fastpathTV.EncMapStringStringV(v, e)
+ case *map[string]string:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapStringStringV(*v, e)
+ }
+ case map[string][]byte:
+ fastpathTV.EncMapStringBytesV(v, e)
+ case *map[string][]byte:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapStringBytesV(*v, e)
+ }
+ case map[string]uint8:
+ fastpathTV.EncMapStringUint8V(v, e)
+ case *map[string]uint8:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapStringUint8V(*v, e)
+ }
+ case map[string]uint64:
+ fastpathTV.EncMapStringUint64V(v, e)
+ case *map[string]uint64:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapStringUint64V(*v, e)
+ }
+ case map[string]int:
+ fastpathTV.EncMapStringIntV(v, e)
+ case *map[string]int:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapStringIntV(*v, e)
+ }
+ case map[string]int32:
+ fastpathTV.EncMapStringInt32V(v, e)
+ case *map[string]int32:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapStringInt32V(*v, e)
+ }
+ case map[string]float64:
+ fastpathTV.EncMapStringFloat64V(v, e)
+ case *map[string]float64:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapStringFloat64V(*v, e)
+ }
+ case map[string]bool:
+ fastpathTV.EncMapStringBoolV(v, e)
+ case *map[string]bool:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapStringBoolV(*v, e)
+ }
+ case map[uint8]interface{}:
+ fastpathTV.EncMapUint8IntfV(v, e)
+ case *map[uint8]interface{}:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint8IntfV(*v, e)
+ }
+ case map[uint8]string:
+ fastpathTV.EncMapUint8StringV(v, e)
+ case *map[uint8]string:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint8StringV(*v, e)
+ }
+ case map[uint8][]byte:
+ fastpathTV.EncMapUint8BytesV(v, e)
+ case *map[uint8][]byte:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint8BytesV(*v, e)
+ }
+ case map[uint8]uint8:
+ fastpathTV.EncMapUint8Uint8V(v, e)
+ case *map[uint8]uint8:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint8Uint8V(*v, e)
+ }
+ case map[uint8]uint64:
+ fastpathTV.EncMapUint8Uint64V(v, e)
+ case *map[uint8]uint64:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint8Uint64V(*v, e)
+ }
+ case map[uint8]int:
+ fastpathTV.EncMapUint8IntV(v, e)
+ case *map[uint8]int:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint8IntV(*v, e)
+ }
+ case map[uint8]int32:
+ fastpathTV.EncMapUint8Int32V(v, e)
+ case *map[uint8]int32:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint8Int32V(*v, e)
+ }
+ case map[uint8]float64:
+ fastpathTV.EncMapUint8Float64V(v, e)
+ case *map[uint8]float64:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint8Float64V(*v, e)
+ }
+ case map[uint8]bool:
+ fastpathTV.EncMapUint8BoolV(v, e)
+ case *map[uint8]bool:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint8BoolV(*v, e)
+ }
+ case map[uint64]interface{}:
+ fastpathTV.EncMapUint64IntfV(v, e)
+ case *map[uint64]interface{}:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint64IntfV(*v, e)
+ }
+ case map[uint64]string:
+ fastpathTV.EncMapUint64StringV(v, e)
+ case *map[uint64]string:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint64StringV(*v, e)
+ }
+ case map[uint64][]byte:
+ fastpathTV.EncMapUint64BytesV(v, e)
+ case *map[uint64][]byte:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint64BytesV(*v, e)
+ }
+ case map[uint64]uint8:
+ fastpathTV.EncMapUint64Uint8V(v, e)
+ case *map[uint64]uint8:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint64Uint8V(*v, e)
+ }
+ case map[uint64]uint64:
+ fastpathTV.EncMapUint64Uint64V(v, e)
+ case *map[uint64]uint64:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint64Uint64V(*v, e)
+ }
+ case map[uint64]int:
+ fastpathTV.EncMapUint64IntV(v, e)
+ case *map[uint64]int:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint64IntV(*v, e)
+ }
+ case map[uint64]int32:
+ fastpathTV.EncMapUint64Int32V(v, e)
+ case *map[uint64]int32:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint64Int32V(*v, e)
+ }
+ case map[uint64]float64:
+ fastpathTV.EncMapUint64Float64V(v, e)
+ case *map[uint64]float64:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint64Float64V(*v, e)
+ }
+ case map[uint64]bool:
+ fastpathTV.EncMapUint64BoolV(v, e)
+ case *map[uint64]bool:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapUint64BoolV(*v, e)
+ }
+ case map[int]interface{}:
+ fastpathTV.EncMapIntIntfV(v, e)
+ case *map[int]interface{}:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapIntIntfV(*v, e)
+ }
+ case map[int]string:
+ fastpathTV.EncMapIntStringV(v, e)
+ case *map[int]string:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapIntStringV(*v, e)
+ }
+ case map[int][]byte:
+ fastpathTV.EncMapIntBytesV(v, e)
+ case *map[int][]byte:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapIntBytesV(*v, e)
+ }
+ case map[int]uint8:
+ fastpathTV.EncMapIntUint8V(v, e)
+ case *map[int]uint8:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapIntUint8V(*v, e)
+ }
+ case map[int]uint64:
+ fastpathTV.EncMapIntUint64V(v, e)
+ case *map[int]uint64:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapIntUint64V(*v, e)
+ }
+ case map[int]int:
+ fastpathTV.EncMapIntIntV(v, e)
+ case *map[int]int:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapIntIntV(*v, e)
+ }
+ case map[int]int32:
+ fastpathTV.EncMapIntInt32V(v, e)
+ case *map[int]int32:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapIntInt32V(*v, e)
+ }
+ case map[int]float64:
+ fastpathTV.EncMapIntFloat64V(v, e)
+ case *map[int]float64:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapIntFloat64V(*v, e)
+ }
+ case map[int]bool:
+ fastpathTV.EncMapIntBoolV(v, e)
+ case *map[int]bool:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapIntBoolV(*v, e)
+ }
+ case map[int32]interface{}:
+ fastpathTV.EncMapInt32IntfV(v, e)
+ case *map[int32]interface{}:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapInt32IntfV(*v, e)
+ }
+ case map[int32]string:
+ fastpathTV.EncMapInt32StringV(v, e)
+ case *map[int32]string:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapInt32StringV(*v, e)
+ }
+ case map[int32][]byte:
+ fastpathTV.EncMapInt32BytesV(v, e)
+ case *map[int32][]byte:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapInt32BytesV(*v, e)
+ }
+ case map[int32]uint8:
+ fastpathTV.EncMapInt32Uint8V(v, e)
+ case *map[int32]uint8:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapInt32Uint8V(*v, e)
+ }
+ case map[int32]uint64:
+ fastpathTV.EncMapInt32Uint64V(v, e)
+ case *map[int32]uint64:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapInt32Uint64V(*v, e)
+ }
+ case map[int32]int:
+ fastpathTV.EncMapInt32IntV(v, e)
+ case *map[int32]int:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapInt32IntV(*v, e)
+ }
+ case map[int32]int32:
+ fastpathTV.EncMapInt32Int32V(v, e)
+ case *map[int32]int32:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapInt32Int32V(*v, e)
+ }
+ case map[int32]float64:
+ fastpathTV.EncMapInt32Float64V(v, e)
+ case *map[int32]float64:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapInt32Float64V(*v, e)
+ }
+ case map[int32]bool:
+ fastpathTV.EncMapInt32BoolV(v, e)
+ case *map[int32]bool:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.EncMapInt32BoolV(*v, e)
+ }
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+func (e *Encoder) fastpathEncSliceIntfR(f *codecFnInfo, rv reflect.Value) {
+ var v []interface{}
+ if rv.Kind() == reflect.Array {
+ rvGetSlice4Array(rv, &v)
+ } else {
+ v = rv2i(rv).([]interface{})
+ }
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceIntfV(v, e)
+ } else {
+ fastpathTV.EncSliceIntfV(v, e)
+ }
+}
+func (fastpathT) EncSliceIntfV(v []interface{}, e *Encoder) {
+ e.arrayStart(len(v))
+ for j := range v {
+ e.arrayElem()
+ e.encode(v[j])
+ }
+ e.arrayEnd()
+}
+func (fastpathT) EncAsMapSliceIntfV(v []interface{}, e *Encoder) {
+ e.haltOnMbsOddLen(len(v))
+ e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
+ for j := range v {
+ if j&1 == 0 { // if j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ e.encode(v[j])
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncSliceStringR(f *codecFnInfo, rv reflect.Value) {
+ var v []string
+ if rv.Kind() == reflect.Array {
+ rvGetSlice4Array(rv, &v)
+ } else {
+ v = rv2i(rv).([]string)
+ }
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceStringV(v, e)
+ } else {
+ fastpathTV.EncSliceStringV(v, e)
+ }
+}
+func (fastpathT) EncSliceStringV(v []string, e *Encoder) {
+ e.arrayStart(len(v))
+ for j := range v {
+ e.arrayElem()
+ e.e.EncodeString(v[j])
+ }
+ e.arrayEnd()
+}
+func (fastpathT) EncAsMapSliceStringV(v []string, e *Encoder) {
+ e.haltOnMbsOddLen(len(v))
+ e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
+ for j := range v {
+ if j&1 == 0 { // if j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ e.e.EncodeString(v[j])
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncSliceBytesR(f *codecFnInfo, rv reflect.Value) {
+ var v [][]byte
+ if rv.Kind() == reflect.Array {
+ rvGetSlice4Array(rv, &v)
+ } else {
+ v = rv2i(rv).([][]byte)
+ }
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceBytesV(v, e)
+ } else {
+ fastpathTV.EncSliceBytesV(v, e)
+ }
+}
+func (fastpathT) EncSliceBytesV(v [][]byte, e *Encoder) {
+ e.arrayStart(len(v))
+ for j := range v {
+ e.arrayElem()
+ e.e.EncodeStringBytesRaw(v[j])
+ }
+ e.arrayEnd()
+}
+func (fastpathT) EncAsMapSliceBytesV(v [][]byte, e *Encoder) {
+ e.haltOnMbsOddLen(len(v))
+ e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
+ for j := range v {
+ if j&1 == 0 { // if j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ e.e.EncodeStringBytesRaw(v[j])
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncSliceFloat32R(f *codecFnInfo, rv reflect.Value) {
+ var v []float32
+ if rv.Kind() == reflect.Array {
+ rvGetSlice4Array(rv, &v)
+ } else {
+ v = rv2i(rv).([]float32)
+ }
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceFloat32V(v, e)
+ } else {
+ fastpathTV.EncSliceFloat32V(v, e)
+ }
+}
+func (fastpathT) EncSliceFloat32V(v []float32, e *Encoder) {
+ e.arrayStart(len(v))
+ for j := range v {
+ e.arrayElem()
+ e.e.EncodeFloat32(v[j])
+ }
+ e.arrayEnd()
+}
+func (fastpathT) EncAsMapSliceFloat32V(v []float32, e *Encoder) {
+ e.haltOnMbsOddLen(len(v))
+ e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
+ for j := range v {
+ if j&1 == 0 { // if j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ e.e.EncodeFloat32(v[j])
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncSliceFloat64R(f *codecFnInfo, rv reflect.Value) {
+ var v []float64
+ if rv.Kind() == reflect.Array {
+ rvGetSlice4Array(rv, &v)
+ } else {
+ v = rv2i(rv).([]float64)
+ }
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceFloat64V(v, e)
+ } else {
+ fastpathTV.EncSliceFloat64V(v, e)
+ }
+}
+func (fastpathT) EncSliceFloat64V(v []float64, e *Encoder) {
+ e.arrayStart(len(v))
+ for j := range v {
+ e.arrayElem()
+ e.e.EncodeFloat64(v[j])
+ }
+ e.arrayEnd()
+}
+func (fastpathT) EncAsMapSliceFloat64V(v []float64, e *Encoder) {
+ e.haltOnMbsOddLen(len(v))
+ e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
+ for j := range v {
+ if j&1 == 0 { // if j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ e.e.EncodeFloat64(v[j])
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncSliceUint8R(f *codecFnInfo, rv reflect.Value) {
+ var v []uint8
+ if rv.Kind() == reflect.Array {
+ rvGetSlice4Array(rv, &v)
+ } else {
+ v = rv2i(rv).([]uint8)
+ }
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint8V(v, e)
+ } else {
+ fastpathTV.EncSliceUint8V(v, e)
+ }
+}
+func (fastpathT) EncSliceUint8V(v []uint8, e *Encoder) {
+ e.e.EncodeStringBytesRaw(v)
+}
+func (fastpathT) EncAsMapSliceUint8V(v []uint8, e *Encoder) {
+ e.haltOnMbsOddLen(len(v))
+ e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
+ for j := range v {
+ if j&1 == 0 { // if j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ e.e.EncodeUint(uint64(v[j]))
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncSliceUint64R(f *codecFnInfo, rv reflect.Value) {
+ var v []uint64
+ if rv.Kind() == reflect.Array {
+ rvGetSlice4Array(rv, &v)
+ } else {
+ v = rv2i(rv).([]uint64)
+ }
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceUint64V(v, e)
+ } else {
+ fastpathTV.EncSliceUint64V(v, e)
+ }
+}
+func (fastpathT) EncSliceUint64V(v []uint64, e *Encoder) {
+ e.arrayStart(len(v))
+ for j := range v {
+ e.arrayElem()
+ e.e.EncodeUint(v[j])
+ }
+ e.arrayEnd()
+}
+func (fastpathT) EncAsMapSliceUint64V(v []uint64, e *Encoder) {
+ e.haltOnMbsOddLen(len(v))
+ e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
+ for j := range v {
+ if j&1 == 0 { // if j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ e.e.EncodeUint(v[j])
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncSliceIntR(f *codecFnInfo, rv reflect.Value) {
+ var v []int
+ if rv.Kind() == reflect.Array {
+ rvGetSlice4Array(rv, &v)
+ } else {
+ v = rv2i(rv).([]int)
+ }
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceIntV(v, e)
+ } else {
+ fastpathTV.EncSliceIntV(v, e)
+ }
+}
+func (fastpathT) EncSliceIntV(v []int, e *Encoder) {
+ e.arrayStart(len(v))
+ for j := range v {
+ e.arrayElem()
+ e.e.EncodeInt(int64(v[j]))
+ }
+ e.arrayEnd()
+}
+func (fastpathT) EncAsMapSliceIntV(v []int, e *Encoder) {
+ e.haltOnMbsOddLen(len(v))
+ e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
+ for j := range v {
+ if j&1 == 0 { // if j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ e.e.EncodeInt(int64(v[j]))
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncSliceInt32R(f *codecFnInfo, rv reflect.Value) {
+ var v []int32
+ if rv.Kind() == reflect.Array {
+ rvGetSlice4Array(rv, &v)
+ } else {
+ v = rv2i(rv).([]int32)
+ }
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt32V(v, e)
+ } else {
+ fastpathTV.EncSliceInt32V(v, e)
+ }
+}
+func (fastpathT) EncSliceInt32V(v []int32, e *Encoder) {
+ e.arrayStart(len(v))
+ for j := range v {
+ e.arrayElem()
+ e.e.EncodeInt(int64(v[j]))
+ }
+ e.arrayEnd()
+}
+func (fastpathT) EncAsMapSliceInt32V(v []int32, e *Encoder) {
+ e.haltOnMbsOddLen(len(v))
+ e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
+ for j := range v {
+ if j&1 == 0 { // if j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ e.e.EncodeInt(int64(v[j]))
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncSliceInt64R(f *codecFnInfo, rv reflect.Value) {
+ var v []int64
+ if rv.Kind() == reflect.Array {
+ rvGetSlice4Array(rv, &v)
+ } else {
+ v = rv2i(rv).([]int64)
+ }
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceInt64V(v, e)
+ } else {
+ fastpathTV.EncSliceInt64V(v, e)
+ }
+}
+func (fastpathT) EncSliceInt64V(v []int64, e *Encoder) {
+ e.arrayStart(len(v))
+ for j := range v {
+ e.arrayElem()
+ e.e.EncodeInt(v[j])
+ }
+ e.arrayEnd()
+}
+func (fastpathT) EncAsMapSliceInt64V(v []int64, e *Encoder) {
+ e.haltOnMbsOddLen(len(v))
+ e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
+ for j := range v {
+ if j&1 == 0 { // if j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ e.e.EncodeInt(v[j])
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncSliceBoolR(f *codecFnInfo, rv reflect.Value) {
+ var v []bool
+ if rv.Kind() == reflect.Array {
+ rvGetSlice4Array(rv, &v)
+ } else {
+ v = rv2i(rv).([]bool)
+ }
+ if f.ti.mbs {
+ fastpathTV.EncAsMapSliceBoolV(v, e)
+ } else {
+ fastpathTV.EncSliceBoolV(v, e)
+ }
+}
+func (fastpathT) EncSliceBoolV(v []bool, e *Encoder) {
+ e.arrayStart(len(v))
+ for j := range v {
+ e.arrayElem()
+ e.e.EncodeBool(v[j])
+ }
+ e.arrayEnd()
+}
+func (fastpathT) EncAsMapSliceBoolV(v []bool, e *Encoder) {
+ e.haltOnMbsOddLen(len(v))
+ e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
+ for j := range v {
+ if j&1 == 0 { // if j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ e.e.EncodeBool(v[j])
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapStringIntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e)
+}
+func (fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.encode(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.encode(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapStringStringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringStringV(rv2i(rv).(map[string]string), e)
+}
+func (fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeString(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeString(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapStringBytesR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringBytesV(rv2i(rv).(map[string][]byte), e)
+}
+func (fastpathT) EncMapStringBytesV(v map[string][]byte, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeStringBytesRaw(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeStringBytesRaw(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapStringUint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e)
+}
+func (fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeUint(uint64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeUint(uint64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapStringUint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e)
+}
+func (fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeUint(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeUint(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapStringIntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringIntV(rv2i(rv).(map[string]int), e)
+}
+func (fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapStringInt32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringInt32V(rv2i(rv).(map[string]int32), e)
+}
+func (fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapStringFloat64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e)
+}
+func (fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeFloat64(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeFloat64(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapStringBoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapStringBoolV(rv2i(rv).(map[string]bool), e)
+}
+func (fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]string, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(stringSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeBool(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeString(k2)
+ e.mapElemValue()
+ e.e.EncodeBool(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint8IntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e)
+}
+func (fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint8, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint8Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.encode(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.encode(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint8StringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e)
+}
+func (fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint8, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint8Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeString(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeString(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint8BytesR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8BytesV(rv2i(rv).(map[uint8][]byte), e)
+}
+func (fastpathT) EncMapUint8BytesV(v map[uint8][]byte, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint8, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint8Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeStringBytesRaw(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeStringBytesRaw(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e)
+}
+func (fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint8, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint8Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeUint(uint64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeUint(uint64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e)
+}
+func (fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint8, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint8Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeUint(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeUint(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint8IntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e)
+}
+func (fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint8, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint8Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint8Int32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e)
+}
+func (fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint8, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint8Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint8Float64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e)
+}
+func (fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint8, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint8Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeFloat64(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeFloat64(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint8BoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e)
+}
+func (fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint8, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint8Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeBool(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(uint64(k2))
+ e.mapElemValue()
+ e.e.EncodeBool(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint64IntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e)
+}
+func (fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint64Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.encode(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.encode(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint64StringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e)
+}
+func (fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint64Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeString(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeString(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint64BytesR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64BytesV(rv2i(rv).(map[uint64][]byte), e)
+}
+func (fastpathT) EncMapUint64BytesV(v map[uint64][]byte, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint64Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeStringBytesRaw(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeStringBytesRaw(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e)
+}
+func (fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint64Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeUint(uint64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeUint(uint64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e)
+}
+func (fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint64Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeUint(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeUint(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint64IntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e)
+}
+func (fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint64Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint64Int32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e)
+}
+func (fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint64Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint64Float64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e)
+}
+func (fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint64Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeFloat64(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeFloat64(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapUint64BoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e)
+}
+func (fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]uint64, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(uint64Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeBool(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeUint(k2)
+ e.mapElemValue()
+ e.e.EncodeBool(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapIntIntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e)
+}
+func (fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.encode(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.encode(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapIntStringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntStringV(rv2i(rv).(map[int]string), e)
+}
+func (fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeString(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeString(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapIntBytesR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntBytesV(rv2i(rv).(map[int][]byte), e)
+}
+func (fastpathT) EncMapIntBytesV(v map[int][]byte, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeStringBytesRaw(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeStringBytesRaw(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapIntUint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e)
+}
+func (fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeUint(uint64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeUint(uint64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapIntUint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e)
+}
+func (fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeUint(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeUint(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapIntIntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntIntV(rv2i(rv).(map[int]int), e)
+}
+func (fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapIntInt32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntInt32V(rv2i(rv).(map[int]int32), e)
+}
+func (fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapIntFloat64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e)
+}
+func (fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeFloat64(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeFloat64(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapIntBoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapIntBoolV(rv2i(rv).(map[int]bool), e)
+}
+func (fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(intSlice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeBool(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeBool(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapInt32IntfR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e)
+}
+func (fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int32, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(int32Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.encode(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.encode(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapInt32StringR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32StringV(rv2i(rv).(map[int32]string), e)
+}
+func (fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int32, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(int32Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeString(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeString(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapInt32BytesR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32BytesV(rv2i(rv).(map[int32][]byte), e)
+}
+func (fastpathT) EncMapInt32BytesV(v map[int32][]byte, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int32, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(int32Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeStringBytesRaw(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeStringBytesRaw(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e)
+}
+func (fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int32, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(int32Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeUint(uint64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeUint(uint64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e)
+}
+func (fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int32, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(int32Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeUint(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeUint(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapInt32IntR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32IntV(rv2i(rv).(map[int32]int), e)
+}
+func (fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int32, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(int32Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapInt32Int32R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e)
+}
+func (fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int32, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(int32Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v[k2]))
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeInt(int64(v2))
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapInt32Float64R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e)
+}
+func (fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int32, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(int32Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeFloat64(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeFloat64(v2)
+ }
+ }
+ e.mapEnd()
+}
+func (e *Encoder) fastpathEncMapInt32BoolR(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e)
+}
+func (fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) {
+ e.mapStart(len(v))
+ if e.h.Canonical {
+ v2 := make([]int32, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = k
+ i++
+ }
+ sort.Sort(int32Slice(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeBool(v[k2])
+ }
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ e.e.EncodeInt(int64(k2))
+ e.mapElemValue()
+ e.e.EncodeBool(v2)
+ }
+ }
+ e.mapEnd()
+}
+
+// -- decode
+
+// -- -- fast path type switch
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
+ var changed bool
+ var containerLen int
+ switch v := iv.(type) {
+ case []interface{}:
+ fastpathTV.DecSliceIntfN(v, d)
+ case *[]interface{}:
+ var v2 []interface{}
+ if v2, changed = fastpathTV.DecSliceIntfY(*v, d); changed {
+ *v = v2
+ }
+ case []string:
+ fastpathTV.DecSliceStringN(v, d)
+ case *[]string:
+ var v2 []string
+ if v2, changed = fastpathTV.DecSliceStringY(*v, d); changed {
+ *v = v2
+ }
+ case [][]byte:
+ fastpathTV.DecSliceBytesN(v, d)
+ case *[][]byte:
+ var v2 [][]byte
+ if v2, changed = fastpathTV.DecSliceBytesY(*v, d); changed {
+ *v = v2
+ }
+ case []float32:
+ fastpathTV.DecSliceFloat32N(v, d)
+ case *[]float32:
+ var v2 []float32
+ if v2, changed = fastpathTV.DecSliceFloat32Y(*v, d); changed {
+ *v = v2
+ }
+ case []float64:
+ fastpathTV.DecSliceFloat64N(v, d)
+ case *[]float64:
+ var v2 []float64
+ if v2, changed = fastpathTV.DecSliceFloat64Y(*v, d); changed {
+ *v = v2
+ }
+ case []uint8:
+ fastpathTV.DecSliceUint8N(v, d)
+ case *[]uint8:
+ var v2 []uint8
+ if v2, changed = fastpathTV.DecSliceUint8Y(*v, d); changed {
+ *v = v2
+ }
+ case []uint64:
+ fastpathTV.DecSliceUint64N(v, d)
+ case *[]uint64:
+ var v2 []uint64
+ if v2, changed = fastpathTV.DecSliceUint64Y(*v, d); changed {
+ *v = v2
+ }
+ case []int:
+ fastpathTV.DecSliceIntN(v, d)
+ case *[]int:
+ var v2 []int
+ if v2, changed = fastpathTV.DecSliceIntY(*v, d); changed {
+ *v = v2
+ }
+ case []int32:
+ fastpathTV.DecSliceInt32N(v, d)
+ case *[]int32:
+ var v2 []int32
+ if v2, changed = fastpathTV.DecSliceInt32Y(*v, d); changed {
+ *v = v2
+ }
+ case []int64:
+ fastpathTV.DecSliceInt64N(v, d)
+ case *[]int64:
+ var v2 []int64
+ if v2, changed = fastpathTV.DecSliceInt64Y(*v, d); changed {
+ *v = v2
+ }
+ case []bool:
+ fastpathTV.DecSliceBoolN(v, d)
+ case *[]bool:
+ var v2 []bool
+ if v2, changed = fastpathTV.DecSliceBoolY(*v, d); changed {
+ *v = v2
+ }
+ case map[string]interface{}:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapStringIntfL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[string]interface{}:
+ fastpathTV.DecMapStringIntfX(v, d)
+ case map[string]string:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapStringStringL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[string]string:
+ fastpathTV.DecMapStringStringX(v, d)
+ case map[string][]byte:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapStringBytesL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[string][]byte:
+ fastpathTV.DecMapStringBytesX(v, d)
+ case map[string]uint8:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapStringUint8L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[string]uint8:
+ fastpathTV.DecMapStringUint8X(v, d)
+ case map[string]uint64:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapStringUint64L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[string]uint64:
+ fastpathTV.DecMapStringUint64X(v, d)
+ case map[string]int:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapStringIntL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[string]int:
+ fastpathTV.DecMapStringIntX(v, d)
+ case map[string]int32:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapStringInt32L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[string]int32:
+ fastpathTV.DecMapStringInt32X(v, d)
+ case map[string]float64:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapStringFloat64L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[string]float64:
+ fastpathTV.DecMapStringFloat64X(v, d)
+ case map[string]bool:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapStringBoolL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[string]bool:
+ fastpathTV.DecMapStringBoolX(v, d)
+ case map[uint8]interface{}:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8IntfL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint8]interface{}:
+ fastpathTV.DecMapUint8IntfX(v, d)
+ case map[uint8]string:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8StringL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint8]string:
+ fastpathTV.DecMapUint8StringX(v, d)
+ case map[uint8][]byte:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8BytesL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint8][]byte:
+ fastpathTV.DecMapUint8BytesX(v, d)
+ case map[uint8]uint8:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8Uint8L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint8]uint8:
+ fastpathTV.DecMapUint8Uint8X(v, d)
+ case map[uint8]uint64:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8Uint64L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint8]uint64:
+ fastpathTV.DecMapUint8Uint64X(v, d)
+ case map[uint8]int:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8IntL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint8]int:
+ fastpathTV.DecMapUint8IntX(v, d)
+ case map[uint8]int32:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8Int32L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint8]int32:
+ fastpathTV.DecMapUint8Int32X(v, d)
+ case map[uint8]float64:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8Float64L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint8]float64:
+ fastpathTV.DecMapUint8Float64X(v, d)
+ case map[uint8]bool:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8BoolL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint8]bool:
+ fastpathTV.DecMapUint8BoolX(v, d)
+ case map[uint64]interface{}:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64IntfL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint64]interface{}:
+ fastpathTV.DecMapUint64IntfX(v, d)
+ case map[uint64]string:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64StringL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint64]string:
+ fastpathTV.DecMapUint64StringX(v, d)
+ case map[uint64][]byte:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64BytesL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint64][]byte:
+ fastpathTV.DecMapUint64BytesX(v, d)
+ case map[uint64]uint8:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64Uint8L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint64]uint8:
+ fastpathTV.DecMapUint64Uint8X(v, d)
+ case map[uint64]uint64:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64Uint64L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint64]uint64:
+ fastpathTV.DecMapUint64Uint64X(v, d)
+ case map[uint64]int:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64IntL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint64]int:
+ fastpathTV.DecMapUint64IntX(v, d)
+ case map[uint64]int32:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64Int32L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint64]int32:
+ fastpathTV.DecMapUint64Int32X(v, d)
+ case map[uint64]float64:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64Float64L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint64]float64:
+ fastpathTV.DecMapUint64Float64X(v, d)
+ case map[uint64]bool:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64BoolL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[uint64]bool:
+ fastpathTV.DecMapUint64BoolX(v, d)
+ case map[int]interface{}:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapIntIntfL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int]interface{}:
+ fastpathTV.DecMapIntIntfX(v, d)
+ case map[int]string:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapIntStringL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int]string:
+ fastpathTV.DecMapIntStringX(v, d)
+ case map[int][]byte:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapIntBytesL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int][]byte:
+ fastpathTV.DecMapIntBytesX(v, d)
+ case map[int]uint8:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapIntUint8L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int]uint8:
+ fastpathTV.DecMapIntUint8X(v, d)
+ case map[int]uint64:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapIntUint64L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int]uint64:
+ fastpathTV.DecMapIntUint64X(v, d)
+ case map[int]int:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapIntIntL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int]int:
+ fastpathTV.DecMapIntIntX(v, d)
+ case map[int]int32:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapIntInt32L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int]int32:
+ fastpathTV.DecMapIntInt32X(v, d)
+ case map[int]float64:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapIntFloat64L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int]float64:
+ fastpathTV.DecMapIntFloat64X(v, d)
+ case map[int]bool:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapIntBoolL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int]bool:
+ fastpathTV.DecMapIntBoolX(v, d)
+ case map[int32]interface{}:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32IntfL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int32]interface{}:
+ fastpathTV.DecMapInt32IntfX(v, d)
+ case map[int32]string:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32StringL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int32]string:
+ fastpathTV.DecMapInt32StringX(v, d)
+ case map[int32][]byte:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32BytesL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int32][]byte:
+ fastpathTV.DecMapInt32BytesX(v, d)
+ case map[int32]uint8:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32Uint8L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int32]uint8:
+ fastpathTV.DecMapInt32Uint8X(v, d)
+ case map[int32]uint64:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32Uint64L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int32]uint64:
+ fastpathTV.DecMapInt32Uint64X(v, d)
+ case map[int32]int:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32IntL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int32]int:
+ fastpathTV.DecMapInt32IntX(v, d)
+ case map[int32]int32:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32Int32L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int32]int32:
+ fastpathTV.DecMapInt32Int32X(v, d)
+ case map[int32]float64:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32Float64L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int32]float64:
+ fastpathTV.DecMapInt32Float64X(v, d)
+ case map[int32]bool:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32BoolL(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[int32]bool:
+ fastpathTV.DecMapInt32BoolX(v, d)
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
+ switch v := iv.(type) {
+ case *[]interface{}:
+ *v = nil
+ case *[]string:
+ *v = nil
+ case *[][]byte:
+ *v = nil
+ case *[]float32:
+ *v = nil
+ case *[]float64:
+ *v = nil
+ case *[]uint8:
+ *v = nil
+ case *[]uint64:
+ *v = nil
+ case *[]int:
+ *v = nil
+ case *[]int32:
+ *v = nil
+ case *[]int64:
+ *v = nil
+ case *[]bool:
+ *v = nil
+
+ case *map[string]interface{}:
+ *v = nil
+ case *map[string]string:
+ *v = nil
+ case *map[string][]byte:
+ *v = nil
+ case *map[string]uint8:
+ *v = nil
+ case *map[string]uint64:
+ *v = nil
+ case *map[string]int:
+ *v = nil
+ case *map[string]int32:
+ *v = nil
+ case *map[string]float64:
+ *v = nil
+ case *map[string]bool:
+ *v = nil
+ case *map[uint8]interface{}:
+ *v = nil
+ case *map[uint8]string:
+ *v = nil
+ case *map[uint8][]byte:
+ *v = nil
+ case *map[uint8]uint8:
+ *v = nil
+ case *map[uint8]uint64:
+ *v = nil
+ case *map[uint8]int:
+ *v = nil
+ case *map[uint8]int32:
+ *v = nil
+ case *map[uint8]float64:
+ *v = nil
+ case *map[uint8]bool:
+ *v = nil
+ case *map[uint64]interface{}:
+ *v = nil
+ case *map[uint64]string:
+ *v = nil
+ case *map[uint64][]byte:
+ *v = nil
+ case *map[uint64]uint8:
+ *v = nil
+ case *map[uint64]uint64:
+ *v = nil
+ case *map[uint64]int:
+ *v = nil
+ case *map[uint64]int32:
+ *v = nil
+ case *map[uint64]float64:
+ *v = nil
+ case *map[uint64]bool:
+ *v = nil
+ case *map[int]interface{}:
+ *v = nil
+ case *map[int]string:
+ *v = nil
+ case *map[int][]byte:
+ *v = nil
+ case *map[int]uint8:
+ *v = nil
+ case *map[int]uint64:
+ *v = nil
+ case *map[int]int:
+ *v = nil
+ case *map[int]int32:
+ *v = nil
+ case *map[int]float64:
+ *v = nil
+ case *map[int]bool:
+ *v = nil
+ case *map[int32]interface{}:
+ *v = nil
+ case *map[int32]string:
+ *v = nil
+ case *map[int32][]byte:
+ *v = nil
+ case *map[int32]uint8:
+ *v = nil
+ case *map[int32]uint64:
+ *v = nil
+ case *map[int32]int:
+ *v = nil
+ case *map[int32]int32:
+ *v = nil
+ case *map[int32]float64:
+ *v = nil
+ case *map[int32]bool:
+ *v = nil
+
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+
+func (d *Decoder) fastpathDecSliceIntfR(f *codecFnInfo, rv reflect.Value) {
+ var v []interface{}
+ switch rv.Kind() {
+ case reflect.Ptr:
+ vp := rv2i(rv).(*[]interface{})
+ var changed bool
+ if v, changed = fastpathTV.DecSliceIntfY(*vp, d); changed {
+ *vp = v
+ }
+ case reflect.Array:
+ rvGetSlice4Array(rv, &v)
+ fastpathTV.DecSliceIntfN(v, d)
+ default:
+ fastpathTV.DecSliceIntfN(rv2i(rv).([]interface{}), d)
+ }
+}
+func (f fastpathT) DecSliceIntfX(vp *[]interface{}, d *Decoder) {
+ if v, changed := f.DecSliceIntfY(*vp, d); changed {
+ *vp = v
+ }
+}
+func (fastpathT) DecSliceIntfY(v []interface{}, d *Decoder) (v2 []interface{}, changed bool) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ if v == nil {
+ return
+ }
+ return nil, true
+ }
+ if containerLenS == 0 {
+ if v == nil {
+ v = []interface{}{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ slh.End()
+ return v, true
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ if xlen <= cap(v) {
+ v = v[:uint(xlen)]
+ } else {
+ v = make([]interface{}, uint(xlen))
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ var j int
+ for j = 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j == 0 && len(v) == 0 { // means hasLen == false
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ v = make([]interface{}, uint(xlen))
+ changed = true
+ }
+ if j >= len(v) {
+ v = append(v, nil)
+ changed = true
+ }
+ slh.ElemContainerState(j)
+ d.decode(&v[uint(j)])
+ }
+ if j < len(v) {
+ v = v[:uint(j)]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = []interface{}{}
+ changed = true
+ }
+ slh.End()
+ return v, changed
+}
+func (fastpathT) DecSliceIntfN(v []interface{}, d *Decoder) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ return
+ }
+ if containerLenS == 0 {
+ slh.End()
+ return
+ }
+ hasLen := containerLenS > 0
+ for j := 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j >= len(v) {
+ slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
+ return
+ }
+ slh.ElemContainerState(j)
+ d.decode(&v[uint(j)])
+ }
+ slh.End()
+}
+
+func (d *Decoder) fastpathDecSliceStringR(f *codecFnInfo, rv reflect.Value) {
+ var v []string
+ switch rv.Kind() {
+ case reflect.Ptr:
+ vp := rv2i(rv).(*[]string)
+ var changed bool
+ if v, changed = fastpathTV.DecSliceStringY(*vp, d); changed {
+ *vp = v
+ }
+ case reflect.Array:
+ rvGetSlice4Array(rv, &v)
+ fastpathTV.DecSliceStringN(v, d)
+ default:
+ fastpathTV.DecSliceStringN(rv2i(rv).([]string), d)
+ }
+}
+func (f fastpathT) DecSliceStringX(vp *[]string, d *Decoder) {
+ if v, changed := f.DecSliceStringY(*vp, d); changed {
+ *vp = v
+ }
+}
+func (fastpathT) DecSliceStringY(v []string, d *Decoder) (v2 []string, changed bool) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ if v == nil {
+ return
+ }
+ return nil, true
+ }
+ if containerLenS == 0 {
+ if v == nil {
+ v = []string{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ slh.End()
+ return v, true
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ if xlen <= cap(v) {
+ v = v[:uint(xlen)]
+ } else {
+ v = make([]string, uint(xlen))
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ var j int
+ for j = 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j == 0 && len(v) == 0 { // means hasLen == false
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16)
+ v = make([]string, uint(xlen))
+ changed = true
+ }
+ if j >= len(v) {
+ v = append(v, "")
+ changed = true
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = d.stringZC(d.d.DecodeStringAsBytes())
+ }
+ if j < len(v) {
+ v = v[:uint(j)]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = []string{}
+ changed = true
+ }
+ slh.End()
+ return v, changed
+}
+func (fastpathT) DecSliceStringN(v []string, d *Decoder) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ return
+ }
+ if containerLenS == 0 {
+ slh.End()
+ return
+ }
+ hasLen := containerLenS > 0
+ for j := 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j >= len(v) {
+ slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
+ return
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = d.stringZC(d.d.DecodeStringAsBytes())
+ }
+ slh.End()
+}
+
+func (d *Decoder) fastpathDecSliceBytesR(f *codecFnInfo, rv reflect.Value) {
+ var v [][]byte
+ switch rv.Kind() {
+ case reflect.Ptr:
+ vp := rv2i(rv).(*[][]byte)
+ var changed bool
+ if v, changed = fastpathTV.DecSliceBytesY(*vp, d); changed {
+ *vp = v
+ }
+ case reflect.Array:
+ rvGetSlice4Array(rv, &v)
+ fastpathTV.DecSliceBytesN(v, d)
+ default:
+ fastpathTV.DecSliceBytesN(rv2i(rv).([][]byte), d)
+ }
+}
+func (f fastpathT) DecSliceBytesX(vp *[][]byte, d *Decoder) {
+ if v, changed := f.DecSliceBytesY(*vp, d); changed {
+ *vp = v
+ }
+}
+func (fastpathT) DecSliceBytesY(v [][]byte, d *Decoder) (v2 [][]byte, changed bool) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ if v == nil {
+ return
+ }
+ return nil, true
+ }
+ if containerLenS == 0 {
+ if v == nil {
+ v = [][]byte{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ slh.End()
+ return v, true
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 24)
+ if xlen <= cap(v) {
+ v = v[:uint(xlen)]
+ } else {
+ v = make([][]byte, uint(xlen))
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ var j int
+ for j = 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j == 0 && len(v) == 0 { // means hasLen == false
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 24)
+ v = make([][]byte, uint(xlen))
+ changed = true
+ }
+ if j >= len(v) {
+ v = append(v, nil)
+ changed = true
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = d.d.DecodeBytes([]byte{})
+ }
+ if j < len(v) {
+ v = v[:uint(j)]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = [][]byte{}
+ changed = true
+ }
+ slh.End()
+ return v, changed
+}
+func (fastpathT) DecSliceBytesN(v [][]byte, d *Decoder) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ return
+ }
+ if containerLenS == 0 {
+ slh.End()
+ return
+ }
+ hasLen := containerLenS > 0
+ for j := 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j >= len(v) {
+ slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
+ return
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = d.d.DecodeBytes([]byte{})
+ }
+ slh.End()
+}
+
+func (d *Decoder) fastpathDecSliceFloat32R(f *codecFnInfo, rv reflect.Value) {
+ var v []float32
+ switch rv.Kind() {
+ case reflect.Ptr:
+ vp := rv2i(rv).(*[]float32)
+ var changed bool
+ if v, changed = fastpathTV.DecSliceFloat32Y(*vp, d); changed {
+ *vp = v
+ }
+ case reflect.Array:
+ rvGetSlice4Array(rv, &v)
+ fastpathTV.DecSliceFloat32N(v, d)
+ default:
+ fastpathTV.DecSliceFloat32N(rv2i(rv).([]float32), d)
+ }
+}
+func (f fastpathT) DecSliceFloat32X(vp *[]float32, d *Decoder) {
+ if v, changed := f.DecSliceFloat32Y(*vp, d); changed {
+ *vp = v
+ }
+}
+func (fastpathT) DecSliceFloat32Y(v []float32, d *Decoder) (v2 []float32, changed bool) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ if v == nil {
+ return
+ }
+ return nil, true
+ }
+ if containerLenS == 0 {
+ if v == nil {
+ v = []float32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ slh.End()
+ return v, true
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xlen <= cap(v) {
+ v = v[:uint(xlen)]
+ } else {
+ v = make([]float32, uint(xlen))
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ var j int
+ for j = 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j == 0 && len(v) == 0 { // means hasLen == false
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ v = make([]float32, uint(xlen))
+ changed = true
+ }
+ if j >= len(v) {
+ v = append(v, 0)
+ changed = true
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = float32(d.decodeFloat32())
+ }
+ if j < len(v) {
+ v = v[:uint(j)]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = []float32{}
+ changed = true
+ }
+ slh.End()
+ return v, changed
+}
+func (fastpathT) DecSliceFloat32N(v []float32, d *Decoder) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ return
+ }
+ if containerLenS == 0 {
+ slh.End()
+ return
+ }
+ hasLen := containerLenS > 0
+ for j := 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j >= len(v) {
+ slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
+ return
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = float32(d.decodeFloat32())
+ }
+ slh.End()
+}
+
+func (d *Decoder) fastpathDecSliceFloat64R(f *codecFnInfo, rv reflect.Value) {
+ var v []float64
+ switch rv.Kind() {
+ case reflect.Ptr:
+ vp := rv2i(rv).(*[]float64)
+ var changed bool
+ if v, changed = fastpathTV.DecSliceFloat64Y(*vp, d); changed {
+ *vp = v
+ }
+ case reflect.Array:
+ rvGetSlice4Array(rv, &v)
+ fastpathTV.DecSliceFloat64N(v, d)
+ default:
+ fastpathTV.DecSliceFloat64N(rv2i(rv).([]float64), d)
+ }
+}
+func (f fastpathT) DecSliceFloat64X(vp *[]float64, d *Decoder) {
+ if v, changed := f.DecSliceFloat64Y(*vp, d); changed {
+ *vp = v
+ }
+}
+func (fastpathT) DecSliceFloat64Y(v []float64, d *Decoder) (v2 []float64, changed bool) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ if v == nil {
+ return
+ }
+ return nil, true
+ }
+ if containerLenS == 0 {
+ if v == nil {
+ v = []float64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ slh.End()
+ return v, true
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xlen <= cap(v) {
+ v = v[:uint(xlen)]
+ } else {
+ v = make([]float64, uint(xlen))
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ var j int
+ for j = 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j == 0 && len(v) == 0 { // means hasLen == false
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ v = make([]float64, uint(xlen))
+ changed = true
+ }
+ if j >= len(v) {
+ v = append(v, 0)
+ changed = true
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = d.d.DecodeFloat64()
+ }
+ if j < len(v) {
+ v = v[:uint(j)]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = []float64{}
+ changed = true
+ }
+ slh.End()
+ return v, changed
+}
+func (fastpathT) DecSliceFloat64N(v []float64, d *Decoder) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ return
+ }
+ if containerLenS == 0 {
+ slh.End()
+ return
+ }
+ hasLen := containerLenS > 0
+ for j := 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j >= len(v) {
+ slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
+ return
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = d.d.DecodeFloat64()
+ }
+ slh.End()
+}
+
+func (d *Decoder) fastpathDecSliceUint8R(f *codecFnInfo, rv reflect.Value) {
+ var v []uint8
+ switch rv.Kind() {
+ case reflect.Ptr:
+ vp := rv2i(rv).(*[]uint8)
+ var changed bool
+ if v, changed = fastpathTV.DecSliceUint8Y(*vp, d); changed {
+ *vp = v
+ }
+ case reflect.Array:
+ rvGetSlice4Array(rv, &v)
+ fastpathTV.DecSliceUint8N(v, d)
+ default:
+ fastpathTV.DecSliceUint8N(rv2i(rv).([]uint8), d)
+ }
+}
+func (f fastpathT) DecSliceUint8X(vp *[]uint8, d *Decoder) {
+ if v, changed := f.DecSliceUint8Y(*vp, d); changed {
+ *vp = v
+ }
+}
+func (fastpathT) DecSliceUint8Y(v []uint8, d *Decoder) (v2 []uint8, changed bool) {
+ switch d.d.ContainerType() {
+ case valueTypeNil, valueTypeMap:
+ break
+ default:
+ v2 = d.decodeBytesInto(v[:len(v):len(v)])
+ changed = !(len(v2) > 0 && len(v2) == len(v) && &v2[0] == &v[0]) // not same slice
+ return
+ }
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ if v == nil {
+ return
+ }
+ return nil, true
+ }
+ if containerLenS == 0 {
+ if v == nil {
+ v = []uint8{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ slh.End()
+ return v, true
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ if xlen <= cap(v) {
+ v = v[:uint(xlen)]
+ } else {
+ v = make([]uint8, uint(xlen))
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ var j int
+ for j = 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j == 0 && len(v) == 0 { // means hasLen == false
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ v = make([]uint8, uint(xlen))
+ changed = true
+ }
+ if j >= len(v) {
+ v = append(v, 0)
+ changed = true
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ }
+ if j < len(v) {
+ v = v[:uint(j)]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = []uint8{}
+ changed = true
+ }
+ slh.End()
+ return v, changed
+}
+func (fastpathT) DecSliceUint8N(v []uint8, d *Decoder) {
+ switch d.d.ContainerType() {
+ case valueTypeNil, valueTypeMap:
+ break
+ default:
+ v2 := d.decodeBytesInto(v[:len(v):len(v)])
+ if !(len(v2) > 0 && len(v2) == len(v) && &v2[0] == &v[0]) { // not same slice
+ copy(v, v2)
+ }
+ return
+ }
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ return
+ }
+ if containerLenS == 0 {
+ slh.End()
+ return
+ }
+ hasLen := containerLenS > 0
+ for j := 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j >= len(v) {
+ slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
+ return
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ }
+ slh.End()
+}
+
+func (d *Decoder) fastpathDecSliceUint64R(f *codecFnInfo, rv reflect.Value) {
+ var v []uint64
+ switch rv.Kind() {
+ case reflect.Ptr:
+ vp := rv2i(rv).(*[]uint64)
+ var changed bool
+ if v, changed = fastpathTV.DecSliceUint64Y(*vp, d); changed {
+ *vp = v
+ }
+ case reflect.Array:
+ rvGetSlice4Array(rv, &v)
+ fastpathTV.DecSliceUint64N(v, d)
+ default:
+ fastpathTV.DecSliceUint64N(rv2i(rv).([]uint64), d)
+ }
+}
+func (f fastpathT) DecSliceUint64X(vp *[]uint64, d *Decoder) {
+ if v, changed := f.DecSliceUint64Y(*vp, d); changed {
+ *vp = v
+ }
+}
+func (fastpathT) DecSliceUint64Y(v []uint64, d *Decoder) (v2 []uint64, changed bool) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ if v == nil {
+ return
+ }
+ return nil, true
+ }
+ if containerLenS == 0 {
+ if v == nil {
+ v = []uint64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ slh.End()
+ return v, true
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xlen <= cap(v) {
+ v = v[:uint(xlen)]
+ } else {
+ v = make([]uint64, uint(xlen))
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ var j int
+ for j = 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j == 0 && len(v) == 0 { // means hasLen == false
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ v = make([]uint64, uint(xlen))
+ changed = true
+ }
+ if j >= len(v) {
+ v = append(v, 0)
+ changed = true
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = d.d.DecodeUint64()
+ }
+ if j < len(v) {
+ v = v[:uint(j)]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = []uint64{}
+ changed = true
+ }
+ slh.End()
+ return v, changed
+}
+func (fastpathT) DecSliceUint64N(v []uint64, d *Decoder) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ return
+ }
+ if containerLenS == 0 {
+ slh.End()
+ return
+ }
+ hasLen := containerLenS > 0
+ for j := 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j >= len(v) {
+ slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
+ return
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = d.d.DecodeUint64()
+ }
+ slh.End()
+}
+
+func (d *Decoder) fastpathDecSliceIntR(f *codecFnInfo, rv reflect.Value) {
+ var v []int
+ switch rv.Kind() {
+ case reflect.Ptr:
+ vp := rv2i(rv).(*[]int)
+ var changed bool
+ if v, changed = fastpathTV.DecSliceIntY(*vp, d); changed {
+ *vp = v
+ }
+ case reflect.Array:
+ rvGetSlice4Array(rv, &v)
+ fastpathTV.DecSliceIntN(v, d)
+ default:
+ fastpathTV.DecSliceIntN(rv2i(rv).([]int), d)
+ }
+}
+func (f fastpathT) DecSliceIntX(vp *[]int, d *Decoder) {
+ if v, changed := f.DecSliceIntY(*vp, d); changed {
+ *vp = v
+ }
+}
+func (fastpathT) DecSliceIntY(v []int, d *Decoder) (v2 []int, changed bool) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ if v == nil {
+ return
+ }
+ return nil, true
+ }
+ if containerLenS == 0 {
+ if v == nil {
+ v = []int{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ slh.End()
+ return v, true
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xlen <= cap(v) {
+ v = v[:uint(xlen)]
+ } else {
+ v = make([]int, uint(xlen))
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ var j int
+ for j = 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j == 0 && len(v) == 0 { // means hasLen == false
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ v = make([]int, uint(xlen))
+ changed = true
+ }
+ if j >= len(v) {
+ v = append(v, 0)
+ changed = true
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ }
+ if j < len(v) {
+ v = v[:uint(j)]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = []int{}
+ changed = true
+ }
+ slh.End()
+ return v, changed
+}
+func (fastpathT) DecSliceIntN(v []int, d *Decoder) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ return
+ }
+ if containerLenS == 0 {
+ slh.End()
+ return
+ }
+ hasLen := containerLenS > 0
+ for j := 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j >= len(v) {
+ slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
+ return
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ }
+ slh.End()
+}
+
+func (d *Decoder) fastpathDecSliceInt32R(f *codecFnInfo, rv reflect.Value) {
+ var v []int32
+ switch rv.Kind() {
+ case reflect.Ptr:
+ vp := rv2i(rv).(*[]int32)
+ var changed bool
+ if v, changed = fastpathTV.DecSliceInt32Y(*vp, d); changed {
+ *vp = v
+ }
+ case reflect.Array:
+ rvGetSlice4Array(rv, &v)
+ fastpathTV.DecSliceInt32N(v, d)
+ default:
+ fastpathTV.DecSliceInt32N(rv2i(rv).([]int32), d)
+ }
+}
+func (f fastpathT) DecSliceInt32X(vp *[]int32, d *Decoder) {
+ if v, changed := f.DecSliceInt32Y(*vp, d); changed {
+ *vp = v
+ }
+}
+func (fastpathT) DecSliceInt32Y(v []int32, d *Decoder) (v2 []int32, changed bool) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ if v == nil {
+ return
+ }
+ return nil, true
+ }
+ if containerLenS == 0 {
+ if v == nil {
+ v = []int32{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ slh.End()
+ return v, true
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ if xlen <= cap(v) {
+ v = v[:uint(xlen)]
+ } else {
+ v = make([]int32, uint(xlen))
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ var j int
+ for j = 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j == 0 && len(v) == 0 { // means hasLen == false
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4)
+ v = make([]int32, uint(xlen))
+ changed = true
+ }
+ if j >= len(v) {
+ v = append(v, 0)
+ changed = true
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ }
+ if j < len(v) {
+ v = v[:uint(j)]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = []int32{}
+ changed = true
+ }
+ slh.End()
+ return v, changed
+}
+func (fastpathT) DecSliceInt32N(v []int32, d *Decoder) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ return
+ }
+ if containerLenS == 0 {
+ slh.End()
+ return
+ }
+ hasLen := containerLenS > 0
+ for j := 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j >= len(v) {
+ slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
+ return
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ }
+ slh.End()
+}
+
+func (d *Decoder) fastpathDecSliceInt64R(f *codecFnInfo, rv reflect.Value) {
+ var v []int64
+ switch rv.Kind() {
+ case reflect.Ptr:
+ vp := rv2i(rv).(*[]int64)
+ var changed bool
+ if v, changed = fastpathTV.DecSliceInt64Y(*vp, d); changed {
+ *vp = v
+ }
+ case reflect.Array:
+ rvGetSlice4Array(rv, &v)
+ fastpathTV.DecSliceInt64N(v, d)
+ default:
+ fastpathTV.DecSliceInt64N(rv2i(rv).([]int64), d)
+ }
+}
+func (f fastpathT) DecSliceInt64X(vp *[]int64, d *Decoder) {
+ if v, changed := f.DecSliceInt64Y(*vp, d); changed {
+ *vp = v
+ }
+}
+func (fastpathT) DecSliceInt64Y(v []int64, d *Decoder) (v2 []int64, changed bool) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ if v == nil {
+ return
+ }
+ return nil, true
+ }
+ if containerLenS == 0 {
+ if v == nil {
+ v = []int64{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ slh.End()
+ return v, true
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ if xlen <= cap(v) {
+ v = v[:uint(xlen)]
+ } else {
+ v = make([]int64, uint(xlen))
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ var j int
+ for j = 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j == 0 && len(v) == 0 { // means hasLen == false
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8)
+ v = make([]int64, uint(xlen))
+ changed = true
+ }
+ if j >= len(v) {
+ v = append(v, 0)
+ changed = true
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = d.d.DecodeInt64()
+ }
+ if j < len(v) {
+ v = v[:uint(j)]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = []int64{}
+ changed = true
+ }
+ slh.End()
+ return v, changed
+}
+func (fastpathT) DecSliceInt64N(v []int64, d *Decoder) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ return
+ }
+ if containerLenS == 0 {
+ slh.End()
+ return
+ }
+ hasLen := containerLenS > 0
+ for j := 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j >= len(v) {
+ slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
+ return
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = d.d.DecodeInt64()
+ }
+ slh.End()
+}
+
+func (d *Decoder) fastpathDecSliceBoolR(f *codecFnInfo, rv reflect.Value) {
+ var v []bool
+ switch rv.Kind() {
+ case reflect.Ptr:
+ vp := rv2i(rv).(*[]bool)
+ var changed bool
+ if v, changed = fastpathTV.DecSliceBoolY(*vp, d); changed {
+ *vp = v
+ }
+ case reflect.Array:
+ rvGetSlice4Array(rv, &v)
+ fastpathTV.DecSliceBoolN(v, d)
+ default:
+ fastpathTV.DecSliceBoolN(rv2i(rv).([]bool), d)
+ }
+}
+func (f fastpathT) DecSliceBoolX(vp *[]bool, d *Decoder) {
+ if v, changed := f.DecSliceBoolY(*vp, d); changed {
+ *vp = v
+ }
+}
+func (fastpathT) DecSliceBoolY(v []bool, d *Decoder) (v2 []bool, changed bool) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ if v == nil {
+ return
+ }
+ return nil, true
+ }
+ if containerLenS == 0 {
+ if v == nil {
+ v = []bool{}
+ } else if len(v) != 0 {
+ v = v[:0]
+ }
+ slh.End()
+ return v, true
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ if xlen <= cap(v) {
+ v = v[:uint(xlen)]
+ } else {
+ v = make([]bool, uint(xlen))
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ var j int
+ for j = 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j == 0 && len(v) == 0 { // means hasLen == false
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1)
+ v = make([]bool, uint(xlen))
+ changed = true
+ }
+ if j >= len(v) {
+ v = append(v, false)
+ changed = true
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = d.d.DecodeBool()
+ }
+ if j < len(v) {
+ v = v[:uint(j)]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = []bool{}
+ changed = true
+ }
+ slh.End()
+ return v, changed
+}
+func (fastpathT) DecSliceBoolN(v []bool, d *Decoder) {
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ return
+ }
+ if containerLenS == 0 {
+ slh.End()
+ return
+ }
+ hasLen := containerLenS > 0
+ for j := 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j >= len(v) {
+ slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
+ return
+ }
+ slh.ElemContainerState(j)
+ v[uint(j)] = d.d.DecodeBool()
+ }
+ slh.End()
+}
+func (d *Decoder) fastpathDecMapStringIntfR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[string]interface{})
+ if *vp == nil {
+ *vp = make(map[string]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 32))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapStringIntfL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapStringIntfL(rv2i(rv).(map[string]interface{}), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[string]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 32))
+ }
+ if containerLen != 0 {
+ f.DecMapStringIntfL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapStringIntfL(v map[string]interface{}, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[string]interface{} given stream length: %v", containerLen)
+ return
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk string
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.stringZC(d.d.DecodeStringAsBytes())
+ d.mapElemValue()
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapStringStringR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[string]string)
+ if *vp == nil {
+ *vp = make(map[string]string, decInferLen(containerLen, d.h.MaxInitLen, 32))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapStringStringL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapStringStringL(rv2i(rv).(map[string]string), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapStringStringX(vp *map[string]string, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[string]string, decInferLen(containerLen, d.h.MaxInitLen, 32))
+ }
+ if containerLen != 0 {
+ f.DecMapStringStringL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapStringStringL(v map[string]string, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[string]string given stream length: %v", containerLen)
+ return
+ }
+ var mk string
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.stringZC(d.d.DecodeStringAsBytes())
+ d.mapElemValue()
+ mv = d.stringZC(d.d.DecodeStringAsBytes())
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapStringBytesR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[string][]byte)
+ if *vp == nil {
+ *vp = make(map[string][]byte, decInferLen(containerLen, d.h.MaxInitLen, 40))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapStringBytesL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapStringBytesL(rv2i(rv).(map[string][]byte), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapStringBytesX(vp *map[string][]byte, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[string][]byte, decInferLen(containerLen, d.h.MaxInitLen, 40))
+ }
+ if containerLen != 0 {
+ f.DecMapStringBytesL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapStringBytesL(v map[string][]byte, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[string][]byte given stream length: %v", containerLen)
+ return
+ }
+ mapGet := v != nil && !d.h.MapValueReset
+ var mk string
+ var mv []byte
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.stringZC(d.d.DecodeStringAsBytes())
+ d.mapElemValue()
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ mv = d.decodeBytesInto(mv)
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapStringUint8R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[string]uint8)
+ if *vp == nil {
+ *vp = make(map[string]uint8, decInferLen(containerLen, d.h.MaxInitLen, 17))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapStringUint8L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapStringUint8L(rv2i(rv).(map[string]uint8), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[string]uint8, decInferLen(containerLen, d.h.MaxInitLen, 17))
+ }
+ if containerLen != 0 {
+ f.DecMapStringUint8L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapStringUint8L(v map[string]uint8, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[string]uint8 given stream length: %v", containerLen)
+ return
+ }
+ var mk string
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.stringZC(d.d.DecodeStringAsBytes())
+ d.mapElemValue()
+ mv = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapStringUint64R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[string]uint64)
+ if *vp == nil {
+ *vp = make(map[string]uint64, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapStringUint64L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapStringUint64L(rv2i(rv).(map[string]uint64), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[string]uint64, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ f.DecMapStringUint64L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapStringUint64L(v map[string]uint64, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[string]uint64 given stream length: %v", containerLen)
+ return
+ }
+ var mk string
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.stringZC(d.d.DecodeStringAsBytes())
+ d.mapElemValue()
+ mv = d.d.DecodeUint64()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapStringIntR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[string]int)
+ if *vp == nil {
+ *vp = make(map[string]int, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapStringIntL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapStringIntL(rv2i(rv).(map[string]int), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapStringIntX(vp *map[string]int, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[string]int, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ f.DecMapStringIntL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapStringIntL(v map[string]int, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[string]int given stream length: %v", containerLen)
+ return
+ }
+ var mk string
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.stringZC(d.d.DecodeStringAsBytes())
+ d.mapElemValue()
+ mv = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapStringInt32R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[string]int32)
+ if *vp == nil {
+ *vp = make(map[string]int32, decInferLen(containerLen, d.h.MaxInitLen, 20))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapStringInt32L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapStringInt32L(rv2i(rv).(map[string]int32), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[string]int32, decInferLen(containerLen, d.h.MaxInitLen, 20))
+ }
+ if containerLen != 0 {
+ f.DecMapStringInt32L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapStringInt32L(v map[string]int32, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[string]int32 given stream length: %v", containerLen)
+ return
+ }
+ var mk string
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.stringZC(d.d.DecodeStringAsBytes())
+ d.mapElemValue()
+ mv = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapStringFloat64R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[string]float64)
+ if *vp == nil {
+ *vp = make(map[string]float64, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapStringFloat64L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapStringFloat64L(rv2i(rv).(map[string]float64), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[string]float64, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ f.DecMapStringFloat64L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapStringFloat64L(v map[string]float64, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[string]float64 given stream length: %v", containerLen)
+ return
+ }
+ var mk string
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.stringZC(d.d.DecodeStringAsBytes())
+ d.mapElemValue()
+ mv = d.d.DecodeFloat64()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapStringBoolR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[string]bool)
+ if *vp == nil {
+ *vp = make(map[string]bool, decInferLen(containerLen, d.h.MaxInitLen, 17))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapStringBoolL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapStringBoolL(rv2i(rv).(map[string]bool), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[string]bool, decInferLen(containerLen, d.h.MaxInitLen, 17))
+ }
+ if containerLen != 0 {
+ f.DecMapStringBoolL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapStringBoolL(v map[string]bool, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[string]bool given stream length: %v", containerLen)
+ return
+ }
+ var mk string
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.stringZC(d.d.DecodeStringAsBytes())
+ d.mapElemValue()
+ mv = d.d.DecodeBool()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint8IntfR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint8]interface{})
+ if *vp == nil {
+ *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 17))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8IntfL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint8IntfL(rv2i(rv).(map[uint8]interface{}), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint8]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 17))
+ }
+ if containerLen != 0 {
+ f.DecMapUint8IntfL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint8IntfL(v map[uint8]interface{}, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint8]interface{} given stream length: %v", containerLen)
+ return
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint8
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ d.mapElemValue()
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint8StringR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint8]string)
+ if *vp == nil {
+ *vp = make(map[uint8]string, decInferLen(containerLen, d.h.MaxInitLen, 17))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8StringL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint8StringL(rv2i(rv).(map[uint8]string), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint8]string, decInferLen(containerLen, d.h.MaxInitLen, 17))
+ }
+ if containerLen != 0 {
+ f.DecMapUint8StringL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint8StringL(v map[uint8]string, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint8]string given stream length: %v", containerLen)
+ return
+ }
+ var mk uint8
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ d.mapElemValue()
+ mv = d.stringZC(d.d.DecodeStringAsBytes())
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint8BytesR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint8][]byte)
+ if *vp == nil {
+ *vp = make(map[uint8][]byte, decInferLen(containerLen, d.h.MaxInitLen, 25))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8BytesL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint8BytesL(rv2i(rv).(map[uint8][]byte), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint8BytesX(vp *map[uint8][]byte, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint8][]byte, decInferLen(containerLen, d.h.MaxInitLen, 25))
+ }
+ if containerLen != 0 {
+ f.DecMapUint8BytesL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint8BytesL(v map[uint8][]byte, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint8][]byte given stream length: %v", containerLen)
+ return
+ }
+ mapGet := v != nil && !d.h.MapValueReset
+ var mk uint8
+ var mv []byte
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ d.mapElemValue()
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ mv = d.decodeBytesInto(mv)
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint8]uint8)
+ if *vp == nil {
+ *vp = make(map[uint8]uint8, decInferLen(containerLen, d.h.MaxInitLen, 2))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8Uint8L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint8Uint8L(rv2i(rv).(map[uint8]uint8), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint8]uint8, decInferLen(containerLen, d.h.MaxInitLen, 2))
+ }
+ if containerLen != 0 {
+ f.DecMapUint8Uint8L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint8Uint8L(v map[uint8]uint8, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint8]uint8 given stream length: %v", containerLen)
+ return
+ }
+ var mk uint8
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ d.mapElemValue()
+ mv = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint8]uint64)
+ if *vp == nil {
+ *vp = make(map[uint8]uint64, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8Uint64L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint8Uint64L(rv2i(rv).(map[uint8]uint64), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint8]uint64, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ f.DecMapUint8Uint64L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint8Uint64L(v map[uint8]uint64, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint8]uint64 given stream length: %v", containerLen)
+ return
+ }
+ var mk uint8
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ d.mapElemValue()
+ mv = d.d.DecodeUint64()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint8IntR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint8]int)
+ if *vp == nil {
+ *vp = make(map[uint8]int, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8IntL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint8IntL(rv2i(rv).(map[uint8]int), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint8]int, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ f.DecMapUint8IntL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint8IntL(v map[uint8]int, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint8]int given stream length: %v", containerLen)
+ return
+ }
+ var mk uint8
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ d.mapElemValue()
+ mv = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint8Int32R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint8]int32)
+ if *vp == nil {
+ *vp = make(map[uint8]int32, decInferLen(containerLen, d.h.MaxInitLen, 5))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8Int32L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint8Int32L(rv2i(rv).(map[uint8]int32), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint8]int32, decInferLen(containerLen, d.h.MaxInitLen, 5))
+ }
+ if containerLen != 0 {
+ f.DecMapUint8Int32L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint8Int32L(v map[uint8]int32, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint8]int32 given stream length: %v", containerLen)
+ return
+ }
+ var mk uint8
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ d.mapElemValue()
+ mv = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint8Float64R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint8]float64)
+ if *vp == nil {
+ *vp = make(map[uint8]float64, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8Float64L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint8Float64L(rv2i(rv).(map[uint8]float64), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint8]float64, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ f.DecMapUint8Float64L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint8Float64L(v map[uint8]float64, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint8]float64 given stream length: %v", containerLen)
+ return
+ }
+ var mk uint8
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ d.mapElemValue()
+ mv = d.d.DecodeFloat64()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint8BoolR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint8]bool)
+ if *vp == nil {
+ *vp = make(map[uint8]bool, decInferLen(containerLen, d.h.MaxInitLen, 2))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint8BoolL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint8BoolL(rv2i(rv).(map[uint8]bool), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint8]bool, decInferLen(containerLen, d.h.MaxInitLen, 2))
+ }
+ if containerLen != 0 {
+ f.DecMapUint8BoolL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint8BoolL(v map[uint8]bool, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint8]bool given stream length: %v", containerLen)
+ return
+ }
+ var mk uint8
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ d.mapElemValue()
+ mv = d.d.DecodeBool()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint64IntfR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint64]interface{})
+ if *vp == nil {
+ *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64IntfL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint64IntfL(rv2i(rv).(map[uint64]interface{}), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint64]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ f.DecMapUint64IntfL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint64IntfL(v map[uint64]interface{}, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint64]interface{} given stream length: %v", containerLen)
+ return
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk uint64
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.d.DecodeUint64()
+ d.mapElemValue()
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint64StringR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint64]string)
+ if *vp == nil {
+ *vp = make(map[uint64]string, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64StringL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint64StringL(rv2i(rv).(map[uint64]string), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint64]string, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ f.DecMapUint64StringL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint64StringL(v map[uint64]string, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint64]string given stream length: %v", containerLen)
+ return
+ }
+ var mk uint64
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.d.DecodeUint64()
+ d.mapElemValue()
+ mv = d.stringZC(d.d.DecodeStringAsBytes())
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint64BytesR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint64][]byte)
+ if *vp == nil {
+ *vp = make(map[uint64][]byte, decInferLen(containerLen, d.h.MaxInitLen, 32))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64BytesL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint64BytesL(rv2i(rv).(map[uint64][]byte), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint64BytesX(vp *map[uint64][]byte, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint64][]byte, decInferLen(containerLen, d.h.MaxInitLen, 32))
+ }
+ if containerLen != 0 {
+ f.DecMapUint64BytesL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint64BytesL(v map[uint64][]byte, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint64][]byte given stream length: %v", containerLen)
+ return
+ }
+ mapGet := v != nil && !d.h.MapValueReset
+ var mk uint64
+ var mv []byte
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.d.DecodeUint64()
+ d.mapElemValue()
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ mv = d.decodeBytesInto(mv)
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint64]uint8)
+ if *vp == nil {
+ *vp = make(map[uint64]uint8, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64Uint8L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint64Uint8L(rv2i(rv).(map[uint64]uint8), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint64]uint8, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ f.DecMapUint64Uint8L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint64Uint8L(v map[uint64]uint8, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint64]uint8 given stream length: %v", containerLen)
+ return
+ }
+ var mk uint64
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.d.DecodeUint64()
+ d.mapElemValue()
+ mv = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint64]uint64)
+ if *vp == nil {
+ *vp = make(map[uint64]uint64, decInferLen(containerLen, d.h.MaxInitLen, 16))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64Uint64L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint64Uint64L(rv2i(rv).(map[uint64]uint64), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint64]uint64, decInferLen(containerLen, d.h.MaxInitLen, 16))
+ }
+ if containerLen != 0 {
+ f.DecMapUint64Uint64L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint64Uint64L(v map[uint64]uint64, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint64]uint64 given stream length: %v", containerLen)
+ return
+ }
+ var mk uint64
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.d.DecodeUint64()
+ d.mapElemValue()
+ mv = d.d.DecodeUint64()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint64IntR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint64]int)
+ if *vp == nil {
+ *vp = make(map[uint64]int, decInferLen(containerLen, d.h.MaxInitLen, 16))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64IntL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint64IntL(rv2i(rv).(map[uint64]int), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint64]int, decInferLen(containerLen, d.h.MaxInitLen, 16))
+ }
+ if containerLen != 0 {
+ f.DecMapUint64IntL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint64IntL(v map[uint64]int, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint64]int given stream length: %v", containerLen)
+ return
+ }
+ var mk uint64
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.d.DecodeUint64()
+ d.mapElemValue()
+ mv = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint64Int32R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint64]int32)
+ if *vp == nil {
+ *vp = make(map[uint64]int32, decInferLen(containerLen, d.h.MaxInitLen, 12))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64Int32L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint64Int32L(rv2i(rv).(map[uint64]int32), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint64]int32, decInferLen(containerLen, d.h.MaxInitLen, 12))
+ }
+ if containerLen != 0 {
+ f.DecMapUint64Int32L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint64Int32L(v map[uint64]int32, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint64]int32 given stream length: %v", containerLen)
+ return
+ }
+ var mk uint64
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.d.DecodeUint64()
+ d.mapElemValue()
+ mv = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint64Float64R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint64]float64)
+ if *vp == nil {
+ *vp = make(map[uint64]float64, decInferLen(containerLen, d.h.MaxInitLen, 16))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64Float64L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint64Float64L(rv2i(rv).(map[uint64]float64), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint64]float64, decInferLen(containerLen, d.h.MaxInitLen, 16))
+ }
+ if containerLen != 0 {
+ f.DecMapUint64Float64L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint64Float64L(v map[uint64]float64, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint64]float64 given stream length: %v", containerLen)
+ return
+ }
+ var mk uint64
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.d.DecodeUint64()
+ d.mapElemValue()
+ mv = d.d.DecodeFloat64()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapUint64BoolR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[uint64]bool)
+ if *vp == nil {
+ *vp = make(map[uint64]bool, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapUint64BoolL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapUint64BoolL(rv2i(rv).(map[uint64]bool), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[uint64]bool, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ f.DecMapUint64BoolL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapUint64BoolL(v map[uint64]bool, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[uint64]bool given stream length: %v", containerLen)
+ return
+ }
+ var mk uint64
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = d.d.DecodeUint64()
+ d.mapElemValue()
+ mv = d.d.DecodeBool()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapIntIntfR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int]interface{})
+ if *vp == nil {
+ *vp = make(map[int]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapIntIntfL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapIntIntfL(rv2i(rv).(map[int]interface{}), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ f.DecMapIntIntfL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapIntIntfL(v map[int]interface{}, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int]interface{} given stream length: %v", containerLen)
+ return
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ d.mapElemValue()
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapIntStringR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int]string)
+ if *vp == nil {
+ *vp = make(map[int]string, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapIntStringL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapIntStringL(rv2i(rv).(map[int]string), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapIntStringX(vp *map[int]string, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int]string, decInferLen(containerLen, d.h.MaxInitLen, 24))
+ }
+ if containerLen != 0 {
+ f.DecMapIntStringL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapIntStringL(v map[int]string, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int]string given stream length: %v", containerLen)
+ return
+ }
+ var mk int
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ d.mapElemValue()
+ mv = d.stringZC(d.d.DecodeStringAsBytes())
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapIntBytesR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int][]byte)
+ if *vp == nil {
+ *vp = make(map[int][]byte, decInferLen(containerLen, d.h.MaxInitLen, 32))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapIntBytesL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapIntBytesL(rv2i(rv).(map[int][]byte), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapIntBytesX(vp *map[int][]byte, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int][]byte, decInferLen(containerLen, d.h.MaxInitLen, 32))
+ }
+ if containerLen != 0 {
+ f.DecMapIntBytesL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapIntBytesL(v map[int][]byte, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int][]byte given stream length: %v", containerLen)
+ return
+ }
+ mapGet := v != nil && !d.h.MapValueReset
+ var mk int
+ var mv []byte
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ d.mapElemValue()
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ mv = d.decodeBytesInto(mv)
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapIntUint8R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int]uint8)
+ if *vp == nil {
+ *vp = make(map[int]uint8, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapIntUint8L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapIntUint8L(rv2i(rv).(map[int]uint8), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int]uint8, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ f.DecMapIntUint8L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapIntUint8L(v map[int]uint8, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int]uint8 given stream length: %v", containerLen)
+ return
+ }
+ var mk int
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ d.mapElemValue()
+ mv = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapIntUint64R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int]uint64)
+ if *vp == nil {
+ *vp = make(map[int]uint64, decInferLen(containerLen, d.h.MaxInitLen, 16))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapIntUint64L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapIntUint64L(rv2i(rv).(map[int]uint64), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int]uint64, decInferLen(containerLen, d.h.MaxInitLen, 16))
+ }
+ if containerLen != 0 {
+ f.DecMapIntUint64L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapIntUint64L(v map[int]uint64, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int]uint64 given stream length: %v", containerLen)
+ return
+ }
+ var mk int
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ d.mapElemValue()
+ mv = d.d.DecodeUint64()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapIntIntR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int]int)
+ if *vp == nil {
+ *vp = make(map[int]int, decInferLen(containerLen, d.h.MaxInitLen, 16))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapIntIntL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapIntIntL(rv2i(rv).(map[int]int), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapIntIntX(vp *map[int]int, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int]int, decInferLen(containerLen, d.h.MaxInitLen, 16))
+ }
+ if containerLen != 0 {
+ f.DecMapIntIntL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapIntIntL(v map[int]int, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int]int given stream length: %v", containerLen)
+ return
+ }
+ var mk int
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ d.mapElemValue()
+ mv = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapIntInt32R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int]int32)
+ if *vp == nil {
+ *vp = make(map[int]int32, decInferLen(containerLen, d.h.MaxInitLen, 12))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapIntInt32L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapIntInt32L(rv2i(rv).(map[int]int32), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int]int32, decInferLen(containerLen, d.h.MaxInitLen, 12))
+ }
+ if containerLen != 0 {
+ f.DecMapIntInt32L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapIntInt32L(v map[int]int32, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int]int32 given stream length: %v", containerLen)
+ return
+ }
+ var mk int
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ d.mapElemValue()
+ mv = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapIntFloat64R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int]float64)
+ if *vp == nil {
+ *vp = make(map[int]float64, decInferLen(containerLen, d.h.MaxInitLen, 16))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapIntFloat64L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapIntFloat64L(rv2i(rv).(map[int]float64), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int]float64, decInferLen(containerLen, d.h.MaxInitLen, 16))
+ }
+ if containerLen != 0 {
+ f.DecMapIntFloat64L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapIntFloat64L(v map[int]float64, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int]float64 given stream length: %v", containerLen)
+ return
+ }
+ var mk int
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ d.mapElemValue()
+ mv = d.d.DecodeFloat64()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapIntBoolR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int]bool)
+ if *vp == nil {
+ *vp = make(map[int]bool, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapIntBoolL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapIntBoolL(rv2i(rv).(map[int]bool), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int]bool, decInferLen(containerLen, d.h.MaxInitLen, 9))
+ }
+ if containerLen != 0 {
+ f.DecMapIntBoolL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapIntBoolL(v map[int]bool, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int]bool given stream length: %v", containerLen)
+ return
+ }
+ var mk int
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ d.mapElemValue()
+ mv = d.d.DecodeBool()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapInt32IntfR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int32]interface{})
+ if *vp == nil {
+ *vp = make(map[int32]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 20))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32IntfL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapInt32IntfL(rv2i(rv).(map[int32]interface{}), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int32]interface{}, decInferLen(containerLen, d.h.MaxInitLen, 20))
+ }
+ if containerLen != 0 {
+ f.DecMapInt32IntfL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapInt32IntfL(v map[int32]interface{}, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int32]interface{} given stream length: %v", containerLen)
+ return
+ }
+ mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ var mk int32
+ var mv interface{}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ d.mapElemValue()
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ d.decode(&mv)
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapInt32StringR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int32]string)
+ if *vp == nil {
+ *vp = make(map[int32]string, decInferLen(containerLen, d.h.MaxInitLen, 20))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32StringL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapInt32StringL(rv2i(rv).(map[int32]string), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int32]string, decInferLen(containerLen, d.h.MaxInitLen, 20))
+ }
+ if containerLen != 0 {
+ f.DecMapInt32StringL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapInt32StringL(v map[int32]string, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int32]string given stream length: %v", containerLen)
+ return
+ }
+ var mk int32
+ var mv string
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ d.mapElemValue()
+ mv = d.stringZC(d.d.DecodeStringAsBytes())
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapInt32BytesR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int32][]byte)
+ if *vp == nil {
+ *vp = make(map[int32][]byte, decInferLen(containerLen, d.h.MaxInitLen, 28))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32BytesL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapInt32BytesL(rv2i(rv).(map[int32][]byte), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapInt32BytesX(vp *map[int32][]byte, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int32][]byte, decInferLen(containerLen, d.h.MaxInitLen, 28))
+ }
+ if containerLen != 0 {
+ f.DecMapInt32BytesL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapInt32BytesL(v map[int32][]byte, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int32][]byte given stream length: %v", containerLen)
+ return
+ }
+ mapGet := v != nil && !d.h.MapValueReset
+ var mk int32
+ var mv []byte
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ d.mapElemValue()
+ if mapGet {
+ mv = v[mk]
+ } else {
+ mv = nil
+ }
+ mv = d.decodeBytesInto(mv)
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int32]uint8)
+ if *vp == nil {
+ *vp = make(map[int32]uint8, decInferLen(containerLen, d.h.MaxInitLen, 5))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32Uint8L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapInt32Uint8L(rv2i(rv).(map[int32]uint8), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int32]uint8, decInferLen(containerLen, d.h.MaxInitLen, 5))
+ }
+ if containerLen != 0 {
+ f.DecMapInt32Uint8L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapInt32Uint8L(v map[int32]uint8, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int32]uint8 given stream length: %v", containerLen)
+ return
+ }
+ var mk int32
+ var mv uint8
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ d.mapElemValue()
+ mv = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int32]uint64)
+ if *vp == nil {
+ *vp = make(map[int32]uint64, decInferLen(containerLen, d.h.MaxInitLen, 12))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32Uint64L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapInt32Uint64L(rv2i(rv).(map[int32]uint64), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int32]uint64, decInferLen(containerLen, d.h.MaxInitLen, 12))
+ }
+ if containerLen != 0 {
+ f.DecMapInt32Uint64L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapInt32Uint64L(v map[int32]uint64, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int32]uint64 given stream length: %v", containerLen)
+ return
+ }
+ var mk int32
+ var mv uint64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ d.mapElemValue()
+ mv = d.d.DecodeUint64()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapInt32IntR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int32]int)
+ if *vp == nil {
+ *vp = make(map[int32]int, decInferLen(containerLen, d.h.MaxInitLen, 12))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32IntL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapInt32IntL(rv2i(rv).(map[int32]int), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int32]int, decInferLen(containerLen, d.h.MaxInitLen, 12))
+ }
+ if containerLen != 0 {
+ f.DecMapInt32IntL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapInt32IntL(v map[int32]int, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int32]int given stream length: %v", containerLen)
+ return
+ }
+ var mk int32
+ var mv int
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ d.mapElemValue()
+ mv = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapInt32Int32R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int32]int32)
+ if *vp == nil {
+ *vp = make(map[int32]int32, decInferLen(containerLen, d.h.MaxInitLen, 8))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32Int32L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapInt32Int32L(rv2i(rv).(map[int32]int32), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int32]int32, decInferLen(containerLen, d.h.MaxInitLen, 8))
+ }
+ if containerLen != 0 {
+ f.DecMapInt32Int32L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapInt32Int32L(v map[int32]int32, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int32]int32 given stream length: %v", containerLen)
+ return
+ }
+ var mk int32
+ var mv int32
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ d.mapElemValue()
+ mv = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapInt32Float64R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int32]float64)
+ if *vp == nil {
+ *vp = make(map[int32]float64, decInferLen(containerLen, d.h.MaxInitLen, 12))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32Float64L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapInt32Float64L(rv2i(rv).(map[int32]float64), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int32]float64, decInferLen(containerLen, d.h.MaxInitLen, 12))
+ }
+ if containerLen != 0 {
+ f.DecMapInt32Float64L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapInt32Float64L(v map[int32]float64, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int32]float64 given stream length: %v", containerLen)
+ return
+ }
+ var mk int32
+ var mv float64
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ d.mapElemValue()
+ mv = d.d.DecodeFloat64()
+ v[mk] = mv
+ }
+}
+func (d *Decoder) fastpathDecMapInt32BoolR(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[int32]bool)
+ if *vp == nil {
+ *vp = make(map[int32]bool, decInferLen(containerLen, d.h.MaxInitLen, 5))
+ }
+ if containerLen != 0 {
+ fastpathTV.DecMapInt32BoolL(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.DecMapInt32BoolL(rv2i(rv).(map[int32]bool), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[int32]bool, decInferLen(containerLen, d.h.MaxInitLen, 5))
+ }
+ if containerLen != 0 {
+ f.DecMapInt32BoolL(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) DecMapInt32BoolL(v map[int32]bool, containerLen int, d *Decoder) {
+ if v == nil {
+ d.errorf("cannot decode into nil map[int32]bool given stream length: %v", containerLen)
+ return
+ }
+ var mk int32
+ var mv bool
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ mk = int32(chkOvf.IntV(d.d.DecodeInt64(), 32))
+ d.mapElemValue()
+ mv = d.d.DecodeBool()
+ v[mk] = mv
+ }
+}
diff --git a/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl
new file mode 100644
index 000000000..56801ee5c
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl
@@ -0,0 +1,555 @@
+// +build !notfastpath
+// +build !codec.notfastpath
+
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from fast-path.go.tmpl - DO NOT EDIT.
+
+package codec
+
+// Fast path functions try to create a fast path encode or decode implementation
+// for common maps and slices.
+//
+// We define the functions and register them in this single file
+// so as not to pollute the encode.go and decode.go, and create a dependency in there.
+// This file can be omitted without causing a build failure.
+//
+// The advantage of fast paths is:
+// - Many calls bypass reflection altogether
+//
+// Currently support
+// - slice of all builtin types (numeric, bool, string, []byte)
+// - maps of builtin types to builtin or interface{} type, EXCEPT FOR
+// keys of type uintptr, int8/16/32, uint16/32, float32/64, bool, interface{}
+// AND values of type type int8/16/32, uint16/32
+// This should provide adequate "typical" implementations.
+//
+// Note that fast track decode functions must handle values for which an address cannot be obtained.
+// For example:
+// m2 := map[string]int{}
+// p2 := []interface{}{m2}
+// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
+//
+
+{{/*
+fastpathEncMapStringUint64R (called by fastpath...switch)
+EncMapStringUint64V (called by codecgen)
+
+fastpathEncSliceBoolR: (called by fastpath...switch) (checks f.ti.mbs and calls one of them below)
+EncSliceBoolV (also called by codecgen)
+EncAsMapSliceBoolV (delegate when mapbyslice=true)
+
+fastpathDecSliceIntfR (called by fastpath...switch) (calls Y or N below depending on if it can be updated)
+DecSliceIntfX (called by codecgen) (calls Y below)
+DecSliceIntfY (delegate when slice CAN be updated)
+DecSliceIntfN (delegate when slice CANNOT be updated e.g. from array or non-addressable slice)
+
+fastpathDecMap...R (called by fastpath...switch) (calls L or X? below)
+DecMap...X (called by codecgen)
+DecMap...L (delegated to by both above)
+*/ -}}
+
+import (
+ "reflect"
+ "sort"
+)
+
+const fastpathEnabled = true
+
+{{/*
+const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v"
+*/ -}}
+
+type fastpathT struct {}
+
+var fastpathTV fastpathT
+
+type fastpathE struct {
+ {{/* rtid uintptr */ -}}
+ rt reflect.Type
+ encfn func(*Encoder, *codecFnInfo, reflect.Value)
+ decfn func(*Decoder, *codecFnInfo, reflect.Value)
+}
+
+type fastpathA [{{ .FastpathLen }}]fastpathE
+type fastpathARtid [{{ .FastpathLen }}]uintptr
+
+var fastpathAv fastpathA
+var fastpathAvRtid fastpathARtid
+
+type fastpathAslice struct{}
+
+func (fastpathAslice) Len() int { return {{ .FastpathLen }} }
+func (fastpathAslice) Less(i, j int) bool {
+ return fastpathAvRtid[uint(i)] < fastpathAvRtid[uint(j)]
+}
+func (fastpathAslice) Swap(i, j int) {
+ fastpathAvRtid[uint(i)], fastpathAvRtid[uint(j)] = fastpathAvRtid[uint(j)], fastpathAvRtid[uint(i)]
+ fastpathAv[uint(i)], fastpathAv[uint(j)] = fastpathAv[uint(j)], fastpathAv[uint(i)]
+}
+
+func fastpathAvIndex(rtid uintptr) int {
+ // use binary search to grab the index (adapted from sort/search.go)
+ // Note: we use goto (instead of for loop) so this can be inlined.
+ // h, i, j := 0, 0, {{ .FastpathLen }}
+ var h, i uint
+ var j uint = {{ .FastpathLen }}
+LOOP:
+ if i < j {
+ h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
+ if fastpathAvRtid[h] < rtid {
+ i = h + 1
+ } else {
+ j = h
+ }
+ goto LOOP
+ }
+ if i < {{ .FastpathLen }} && fastpathAvRtid[i] == rtid {
+ return int(i)
+ }
+ return -1
+}
+
+
+// due to possible initialization loop error, make fastpath in an init()
+func init() {
+ var i uint = 0
+ fn := func(v interface{},
+ fe func(*Encoder, *codecFnInfo, reflect.Value),
+ fd func(*Decoder, *codecFnInfo, reflect.Value)) {
+ xrt := reflect.TypeOf(v)
+ xptr := rt2id(xrt)
+ fastpathAvRtid[i] = xptr
+ fastpathAv[i] = fastpathE{xrt, fe, fd}
+ i++
+ }
+ {{/* do not register []byte in fast-path */}}
+ {{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
+ fn([]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R)
+ {{end}}{{end}}{{end}}
+
+ {{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
+ fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*Encoder).{{ .MethodNamePfx "fastpathEnc" false }}R, (*Decoder).{{ .MethodNamePfx "fastpathDec" false }}R)
+ {{end}}{{end}}{{end}}
+
+ sort.Sort(fastpathAslice{})
+}
+
+// -- encode
+
+// -- -- fast path type switch
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
+ case []{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
+ case *[]{{ .Elem }}:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
+ }
+{{end}}{{end}}{{end -}}
+
+{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
+ case map[{{ .MapKey }}]{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
+ case *map[{{ .MapKey }}]{{ .Elem }}:
+ if *v == nil {
+ e.e.EncodeNil()
+ } else {
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(*v, e)
+ }
+{{end}}{{end}}{{end -}}
+
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
+func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
+ var v []{{ .Elem }}
+ if rv.Kind() == reflect.Array {
+ rvGetSlice4Array(rv, &v)
+ } else {
+ v = rv2i(rv).([]{{ .Elem }})
+ }
+ if f.ti.mbs {
+ fastpathTV.{{ .MethodNamePfx "EncAsMap" false }}V(v, e)
+ } else {
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(v, e)
+ }
+}
+func (fastpathT) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *Encoder) {
+ {{/* if v == nil { e.e.EncodeNil(); return } */ -}}
+ {{ if eq .Elem "uint8" "byte" -}}
+ e.e.EncodeStringBytesRaw(v)
+ {{ else -}}
+ e.arrayStart(len(v))
+ for j := range v {
+ e.arrayElem()
+ {{ encmd .Elem "v[j]"}}
+ }
+ e.arrayEnd()
+ {{ end -}}
+}
+func (fastpathT) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *Encoder) {
+ {{/* if v == nil { e.e.EncodeNil() } else */ -}}
+ e.haltOnMbsOddLen(len(v))
+ {{/*
+ if len(v)&1 != 0 { // similar to &1==1 or %2 == 1
+ e.errorf(fastpathMapBySliceErrMsg, len(v))
+ }
+ */ -}}
+ e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
+ for j := range v {
+ if j&1 == 0 { // if j%2 == 0 {
+ e.mapElemKey()
+ } else {
+ e.mapElemValue()
+ }
+ {{ encmd .Elem "v[j]"}}
+ }
+ e.mapEnd()
+}
+{{end}}{{end}}{{end -}}
+
+{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
+func (e *Encoder) {{ .MethodNamePfx "fastpathEnc" false }}R(f *codecFnInfo, rv reflect.Value) {
+ fastpathTV.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e)
+}
+func (fastpathT) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *Encoder) {
+ {{/* if v == nil { e.e.EncodeNil(); return } */ -}}
+ e.mapStart(len(v))
+ if e.h.Canonical { {{/* need to figure out .NoCanonical */}}
+ {{if eq .MapKey "interface{}"}}{{/* out of band */ -}}
+ var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
+ e2 := NewEncoderBytes(&mksv, e.hh)
+ v2 := make([]bytesIntf, len(v))
+ var i, l uint {{/* put loop variables outside. seems currently needed for better perf */}}
+ var vp *bytesIntf
+ for k2 := range v {
+ l = uint(len(mksv))
+ e2.MustEncode(k2)
+ vp = &v2[i]
+ vp.v = mksv[l:]
+ vp.i = k2
+ i++
+ }
+ sort.Sort(bytesIntfSlice(v2))
+ for j := range v2 {
+ e.mapElemKey()
+ e.asis(v2[j].v)
+ e.mapElemValue()
+ e.encode(v[v2[j].i])
+ } {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
+ var i uint
+ for k := range v {
+ v2[i] = {{if eq $x .MapKey}}k{{else}}{{ $x }}(k){{end}}
+ i++
+ }
+ sort.Sort({{ sorttype .MapKey false}}(v2))
+ for _, k2 := range v2 {
+ e.mapElemKey()
+ {{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ $y := printf "%s(k2)" .MapKey }}{{if eq $x .MapKey }}{{ $y = "k2" }}{{end}}{{ encmd .MapKey $y }}{{end}}
+ e.mapElemValue()
+ {{ $y := printf "v[%s(k2)]" .MapKey }}{{if eq $x .MapKey }}{{ $y = "v[k2]" }}{{end}}{{ encmd .Elem $y }}
+ } {{end}}
+ } else {
+ for k2, v2 := range v {
+ e.mapElemKey()
+ {{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ encmd .MapKey "k2"}}{{end}}
+ e.mapElemValue()
+ {{ encmd .Elem "v2"}}
+ }
+ }
+ e.mapEnd()
+}
+{{end}}{{end}}{{end -}}
+
+// -- decode
+
+// -- -- fast path type switch
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
+ var changed bool
+ var containerLen int
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
+ case []{{ .Elem }}:
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}N(v, d)
+ case *[]{{ .Elem }}:
+ var v2 []{{ .Elem }}
+ if v2, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}Y(*v, d); changed {
+ *v = v2
+ }
+{{end}}{{end}}{{end -}}
+{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/*
+// maps only change if nil, and in that case, there's no point copying
+*/ -}}
+ case map[{{ .MapKey }}]{{ .Elem }}:
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen != containerLenNil {
+ if containerLen != 0 {
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}L(v, containerLen, d)
+ }
+ d.mapEnd()
+ }
+ case *map[{{ .MapKey }}]{{ .Elem }}:
+ {{/*
+ containerLen = d.mapStart(d.d.ReadMapStart())
+ if containerLen == 0 {
+ d.mapEnd()
+ } else if containerLen == containerLenNil {
+ *v = nil
+ } else {
+ if *v == nil {
+ *v = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}))
+ }
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}L(*v, containerLen, d)
+ }
+ // consider delegating fully to X - encoding *map is uncommon, so ok to pay small function call cost
+ */ -}}
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}X(v, d)
+{{end}}{{end}}{{end -}}
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
+ switch v := iv.(type) {
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
+ case *[]{{ .Elem }}:
+ *v = nil
+{{end}}{{end}}{{end}}
+{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
+ case *map[{{ .MapKey }}]{{ .Elem }}:
+ *v = nil
+{{end}}{{end}}{{end}}
+ default:
+ _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
+ return false
+ }
+ return true
+}
+
+// -- -- fast path functions
+{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
+{{/*
+Slices can change if they
+- did not come from an array
+- are addressable (from a ptr)
+- are settable (e.g. contained in an interface{})
+*/}}
+func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
+ {{/*
+ // seqTypeArray=true means that we are not getting a pointer, so no need to check that.
+ if f.seq != seqTypeArray && rv.Kind() == reflect.Ptr {
+ */ -}}
+ var v []{{ .Elem }}
+ switch rv.Kind() {
+ case reflect.Ptr:
+ vp := rv2i(rv).(*[]{{ .Elem }})
+ var changed bool
+ if v, changed = fastpathTV.{{ .MethodNamePfx "Dec" false }}Y(*vp, d); changed {
+ *vp = v
+ }
+ case reflect.Array:
+ rvGetSlice4Array(rv, &v)
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}N(v, d)
+ default:
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}N(rv2i(rv).([]{{ .Elem }}), d)
+ }
+}
+func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *Decoder) {
+ if v, changed := f.{{ .MethodNamePfx "Dec" false }}Y(*vp, d); changed { *vp = v }
+}
+func (fastpathT) {{ .MethodNamePfx "Dec" false }}Y(v []{{ .Elem }}, d *Decoder) (v2 []{{ .Elem }}, changed bool) {
+ {{ if eq .Elem "uint8" "byte" -}}
+ switch d.d.ContainerType() {
+ case valueTypeNil, valueTypeMap:
+ break
+ default:
+ v2 = d.decodeBytesInto(v[:len(v):len(v)])
+ changed = !(len(v2) > 0 && len(v2) == len(v) && &v2[0] == &v[0]) // not same slice
+ return
+ }
+ {{ end -}}
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ if v == nil { return }
+ return nil, true
+ }
+ if containerLenS == 0 {
+ if v == nil { v = []{{ .Elem }}{} } else if len(v) != 0 { v = v[:0] }
+ slh.End()
+ return v, true
+ }
+ hasLen := containerLenS > 0
+ var xlen int
+ if hasLen {
+ if containerLenS > cap(v) {
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }})
+ if xlen <= cap(v) {
+ v = v[:uint(xlen)]
+ } else {
+ v = make([]{{ .Elem }}, uint(xlen))
+ }
+ changed = true
+ } else if containerLenS != len(v) {
+ v = v[:containerLenS]
+ changed = true
+ }
+ }
+ var j int
+ for j = 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ if j == 0 && len(v) == 0 { // means hasLen == false
+ xlen = decInferLen(containerLenS, d.h.MaxInitLen, {{ .Size }}) {{/* xlen = decDefSliceCap */}}
+ v = make([]{{ .Elem }}, uint(xlen))
+ changed = true
+ }
+ {{/* // if indefinite, etc, then expand the slice if necessary */ -}}
+ if j >= len(v) {
+ v = append(v, {{ zerocmd .Elem }})
+ changed = true
+ }
+ slh.ElemContainerState(j)
+ {{ if eq .Elem "interface{}" }}d.decode(&v[uint(j)]){{ else }}v[uint(j)] = {{ decmd .Elem false }}{{ end }}
+ }
+ if j < len(v) {
+ v = v[:uint(j)]
+ changed = true
+ } else if j == 0 && v == nil {
+ v = []{{ .Elem }}{}
+ changed = true
+ }
+ slh.End()
+ return v, changed
+}
+func (fastpathT) {{ .MethodNamePfx "Dec" false }}N(v []{{ .Elem }}, d *Decoder) {
+ {{ if eq .Elem "uint8" "byte" -}}
+ switch d.d.ContainerType() {
+ case valueTypeNil, valueTypeMap:
+ break
+ default:
+ v2 := d.decodeBytesInto(v[:len(v):len(v)])
+ if !(len(v2) > 0 && len(v2) == len(v) && &v2[0] == &v[0]) { // not same slice
+ copy(v, v2)
+ }
+ return
+ }
+ {{ end -}}
+ slh, containerLenS := d.decSliceHelperStart()
+ if slh.IsNil {
+ return
+ }
+ if containerLenS == 0 {
+ slh.End()
+ return
+ }
+ hasLen := containerLenS > 0
+ for j := 0; (hasLen && j < containerLenS) || !(hasLen || d.checkBreak()); j++ {
+ {{/* // if indefinite, etc, then expand the slice if necessary */ -}}
+ if j >= len(v) {
+ slh.arrayCannotExpand(hasLen, len(v), j, containerLenS)
+ return
+ }
+ slh.ElemContainerState(j)
+ {{ if eq .Elem "interface{}" -}}
+ d.decode(&v[uint(j)])
+ {{- else -}}
+ v[uint(j)] = {{ decmd .Elem false }}
+ {{- end }}
+ }
+ slh.End()
+}
+{{end}}{{end}}{{end -}}
+
+{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
+{{/*
+Maps can change if they are
+- addressable (from a ptr)
+- settable (e.g. contained in an interface{})
+
+Also, these methods are called by decodeValue directly, after handling a TryNil.
+Consequently, there's no need to check for containerLenNil here.
+*/ -}}
+func (d *Decoder) {{ .MethodNamePfx "fastpathDec" false }}R(f *codecFnInfo, rv reflect.Value) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ {{/*
+ if containerLen == containerLenNil {
+ if rv.Kind() == reflect.Ptr {
+ *(rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})) = nil
+ }
+ return
+ }
+ */ -}}
+ if rv.Kind() == reflect.Ptr {
+ vp, _ := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})
+ if *vp == nil {
+ *vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}))
+ }
+ if containerLen != 0 {
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d)
+ }
+ } else if containerLen != 0 {
+ fastpathTV.{{ .MethodNamePfx "Dec" false }}L(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), containerLen, d)
+ }
+ d.mapEnd()
+}
+func (f fastpathT) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *Decoder) {
+ containerLen := d.mapStart(d.d.ReadMapStart())
+ if containerLen == containerLenNil {
+ *vp = nil
+ } else {
+ if *vp == nil {
+ *vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.h.MaxInitLen, {{ .Size }}))
+ }
+ if containerLen != 0 {
+ f.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d)
+ }
+ d.mapEnd()
+ }
+}
+func (fastpathT) {{ .MethodNamePfx "Dec" false }}L(v map[{{ .MapKey }}]{{ .Elem }}, containerLen int, d *Decoder) {
+ {{/* No need to check if containerLen == containerLenNil, as that is checked by R and L above */ -}}
+ if v == nil {
+ d.errorf("cannot decode into nil map[{{ .MapKey }}]{{ .Elem }} given stream length: %v", containerLen)
+ {{/* d.swallowMapContents(containerLen) */ -}}
+ return
+ }
+ {{if eq .Elem "interface{}" }}mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset
+ {{else if eq .Elem "bytes" "[]byte" }}mapGet := v != nil && !d.h.MapValueReset
+ {{end -}}
+ var mk {{ .MapKey }}
+ var mv {{ .Elem }}
+ hasLen := containerLen > 0
+ for j := 0; (hasLen && j < containerLen) || !(hasLen || d.checkBreak()); j++ {
+ d.mapElemKey()
+ {{ if eq .MapKey "interface{}" }}mk = nil
+ d.decode(&mk)
+ if bv, bok := mk.([]byte); bok {
+ mk = d.stringZC(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
+ }{{ else }}mk = {{ decmd .MapKey true }}{{ end }}
+ d.mapElemValue()
+ {{ if eq .Elem "interface{}" "[]byte" "bytes" -}}
+ if mapGet { mv = v[mk] } else { mv = nil }
+ {{ end -}}
+ {{ if eq .Elem "interface{}" -}}
+ d.decode(&mv)
+ {{ else if eq .Elem "[]byte" "bytes" -}}
+ mv = d.decodeBytesInto(mv)
+ {{ else -}}
+ mv = {{ decmd .Elem false }}
+ {{ end -}}
+ v[mk] = mv
+ }
+}
+{{end}}{{end}}{{end}}
diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go
new file mode 100644
index 000000000..3fcc8fd3a
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/fast-path.not.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build notfastpath || codec.notfastpath
+// +build notfastpath codec.notfastpath
+
+package codec
+
+import "reflect"
+
+const fastpathEnabled = false
+
+// The generated fast-path code is very large, and adds a few seconds to the build time.
+// This causes test execution, execution of small tools which use codec, etc
+// to take a long time.
+//
+// To mitigate, we now support the notfastpath tag.
+// This tag disables fastpath during build, allowing for faster build, test execution,
+// short-program runs, etc.
+
+func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
+func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
+
+// func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
+// func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
+
+func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false }
+
+type fastpathT struct{}
+type fastpathE struct {
+ rtid uintptr
+ rt reflect.Type
+ encfn func(*Encoder, *codecFnInfo, reflect.Value)
+ decfn func(*Decoder, *codecFnInfo, reflect.Value)
+}
+type fastpathA [0]fastpathE
+
+func fastpathAvIndex(rtid uintptr) int { return -1 }
+
+var fastpathAv fastpathA
+var fastpathTV fastpathT
diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl
new file mode 100644
index 000000000..d2caa0b66
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl
@@ -0,0 +1,90 @@
+{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
+{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}
+{{if not isArray -}}
+var {{var "c"}} bool {{/* // changed */}}
+_ = {{var "c"}}
+if {{var "h"}}.IsNil {
+ if {{var "v"}} != nil {
+ {{var "v"}} = nil
+ {{var "c"}} = true
+ }
+} else {{end -}}
+if {{var "l"}} == 0 {
+ {{if isSlice -}}
+ if {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ } else if len({{var "v"}}) != 0 {
+ {{var "v"}} = {{var "v"}}[:0]
+ {{var "c"}} = true
+ } {{else if isChan }}if {{var "v"}} == nil {
+ {{var "v"}} = make({{ .CTyp }}, 0)
+ {{var "c"}} = true
+ }
+ {{end -}}
+} else {
+ {{var "hl"}} := {{var "l"}} > 0
+ var {{var "rl"}} int
+ _ = {{var "rl"}}
+ {{if isSlice }} if {{var "hl"}} {
+ if {{var "l"}} > cap({{var "v"}}) {
+ {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ if {{var "rl"}} <= cap({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "rl"}}]
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ {{var "c"}} = true
+ } else if {{var "l"}} != len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "l"}}]
+ {{var "c"}} = true
+ }
+ }
+ {{end -}}
+ var {{var "j"}} int
+ {{/* // var {{var "dn"}} bool */ -}}
+ for {{var "j"}} = 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || z.DecCheckBreak()); {{var "j"}}++ { // bounds-check-elimination
+ {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil {
+ if {{var "hl"}} {
+ {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ } else {
+ {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}}
+ }
+ {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}})
+ {{var "c"}} = true
+ }
+ {{end -}}
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */ -}}
+ {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }}
+ {{ decLineVar $x -}}
+ {{var "v"}} <- {{ $x }}
+ {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
+ var {{var "db"}} bool
+ if {{var "j"}} >= len({{var "v"}}) {
+ {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }})
+ {{var "c"}} = true
+ {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
+ {{end -}}
+ }
+ if {{var "db"}} {
+ z.DecSwallow()
+ } else {
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x -}}
+ }
+ {{end -}}
+ }
+ {{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "j"}}]
+ {{var "c"}} = true
+ } else if {{var "j"}} == 0 && {{var "v"}} == nil {
+ {{var "v"}} = make([]{{ .Typ }}, 0)
+ {{var "c"}} = true
+ }
+ {{end -}}
+}
+{{var "h"}}.End()
+{{if not isArray }}if {{var "c"}} {
+ *{{ .Varname }} = {{var "v"}}
+}
+{{end -}}
diff --git a/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
new file mode 100644
index 000000000..a7ce62b59
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl
@@ -0,0 +1,58 @@
+{{var "v"}} := *{{ .Varname }}
+{{var "l"}} := z.DecReadMapStart()
+if {{var "l"}} == codecSelferDecContainerLenNil{{xs}} {
+ *{{ .Varname }} = nil
+} else {
+if {{var "v"}} == nil {
+ {{var "rl"}} := z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
+ *{{ .Varname }} = {{var "v"}}
+}
+{{ $mk := var "mk" -}}
+var {{ $mk }} {{ .KTyp }}
+var {{var "mv"}} {{ .Typ }}
+var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
+if z.DecBasicHandle().MapValueReset {
+ {{if decElemKindPtr}}{{var "mg"}} = true
+ {{else if decElemKindIntf}}if !z.DecBasicHandle().InterfaceReset { {{var "mg"}} = true }
+ {{else if not decElemKindImmutable}}{{var "mg"}} = true
+ {{end}} }
+if {{var "l"}} != 0 {
+ {{var "hl"}} := {{var "l"}} > 0
+ for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || z.DecCheckBreak()); {{var "j"}}++ {
+ z.DecReadMapElemKey()
+ {{ if eq .KTyp "string" -}}
+ {{ decLineVarK $mk -}}{{- /* decLineVarKStrZC $mk */ -}}
+ {{ else -}}
+ {{ decLineVarK $mk -}}
+ {{ end -}}
+ {{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */ -}}
+ if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = z.DecStringZC({{var "bv"}})
+ }
+ {{ end -}}
+ {{if decElemKindPtr -}}
+ {{var "ms"}} = true
+ {{end -}}
+ if {{var "mg"}} {
+ {{if decElemKindPtr -}}
+ {{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{ $mk }}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ }
+ {{else -}}
+ {{var "mv"}} = {{var "v"}}[{{ $mk }}]
+ {{end -}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecReadMapElemValue()
+ {{var "mdn"}} = false
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y -}}
+ if {{var "mdn"}} {
+ {{var "v"}}[{{ $mk }}] = {{decElemZero}}
+ } else {{if decElemKindPtr}} if {{var "ms"}} {{end}} {
+ {{var "v"}}[{{ $mk }}] = {{var "mv"}}
+ }
+}
+} // else len==0: leave as-is (do not clear map entries)
+z.DecReadMapEnd()
+}
diff --git a/vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl
new file mode 100644
index 000000000..4249588a3
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl
@@ -0,0 +1,27 @@
+{{.Label}}:
+switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; {
+case timeout{{.Sfx}} == 0: // only consume available
+ for {
+ select {
+ case b{{.Sfx}} := <-{{.Chan}}:
+ {{ .Slice }} = append({{.Slice}}, b{{.Sfx}})
+ default:
+ break {{.Label}}
+ }
+ }
+case timeout{{.Sfx}} > 0: // consume until timeout
+ tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}})
+ for {
+ select {
+ case b{{.Sfx}} := <-{{.Chan}}:
+ {{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+ case <-tt{{.Sfx}}.C:
+ // close(tt.C)
+ break {{.Label}}
+ }
+ }
+default: // consume until close
+ for b{{.Sfx}} := range {{.Chan}} {
+ {{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+ }
+}
diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
new file mode 100644
index 000000000..5643b6af9
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go
@@ -0,0 +1,277 @@
+// comment this out // + build ignore
+
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from gen-helper.go.tmpl - DO NOT EDIT.
+
+package codec
+
+import (
+ "encoding"
+ "reflect"
+)
+
+// GenVersion is the current version of codecgen.
+const GenVersion = 25
+
+// This file is used to generate helper code for codecgen.
+// The values here i.e. genHelper(En|De)coder are not to be used directly by
+// library users. They WILL change continuously and without notice.
+
+// GenHelperEncoder is exported so that it can be used externally by codecgen.
+//
+// Library users: DO NOT USE IT DIRECTLY or INDIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelper() (g genHelper) { return }
+
+type genHelper struct{}
+
+func (genHelper) Encoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) {
+ ge = genHelperEncoder{e: e}
+ ee = genHelperEncDriver{encDriver: e.e}
+ return
+}
+
+func (genHelper) Decoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) {
+ gd = genHelperDecoder{d: d}
+ dd = genHelperDecDriver{decDriver: d.d}
+ return
+}
+
+type genHelperEncDriver struct {
+ encDriver
+}
+
+type genHelperDecDriver struct {
+ decDriver
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperEncoder struct {
+ M mustHdl
+ F fastpathT
+ e *Encoder
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperDecoder struct {
+ C checkOverflow
+ F fastpathT
+ d *Decoder
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
+ return f.e.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinary() bool {
+ return f.e.be // f.e.hh.isBinaryEncoding()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) IsJSONHandle() bool {
+ return f.e.js
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncFallback(iv interface{}) {
+ // f.e.encodeI(iv, false, false)
+ f.e.encodeValue(reflect.ValueOf(iv), nil)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
+ bs, fnerr := iv.MarshalText()
+ f.e.marshalUtf8(bs, fnerr)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
+ bs, fnerr := iv.MarshalJSON()
+ f.e.marshalAsis(bs, fnerr)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
+ bs, fnerr := iv.MarshalBinary()
+ f.e.marshalRaw(bs, fnerr)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) Extension(v interface{}) (xfn *extTypeTagFn) {
+ return f.e.h.getExtForI(v)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) {
+ f.e.e.EncodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) WriteStr(s string) {
+ f.e.w().writestr(s)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteMapStart(length int) { f.e.mapStart(length) }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteMapEnd() { f.e.mapEnd() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteArrayStart(length int) { f.e.arrayStart(length) }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteArrayEnd() { f.e.arrayEnd() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteArrayElem() { f.e.arrayElem() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteMapElemKey() { f.e.mapElemKey() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteMapElemValue() { f.e.mapElemValue() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncEncodeComplex64(v complex64) { f.e.encodeComplex64(v) }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncEncodeComplex128(v complex128) { f.e.encodeComplex128(v) }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncEncode(v interface{}) { f.e.encode(v) }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncEncodeMapNonNil(v interface{}) {
+ if skipFastpathTypeSwitchInDirectCall || !fastpathEncodeTypeSwitch(v, f.e) {
+ f.e.encodeValue(reflect.ValueOf(v), nil)
+ }
+}
+
+// ---------------- DECODER FOLLOWS -----------------
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
+ return f.d.h
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinary() bool {
+ return f.d.be // f.d.hh.isBinaryEncoding()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
+
+// // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+// func (f genHelperDecoder) DecScratchBuffer() []byte {
+// return f.d.b[:]
+// }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
+ return &f.d.b
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
+ rv := reflect.ValueOf(iv)
+ if chkPtr {
+ if x, _ := isDecodeable(rv); !x {
+ f.d.haltAsNotDecodeable(rv)
+ }
+ }
+ f.d.decodeValue(rv, nil)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
+ return f.d.decSliceHelperStart()
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
+ f.d.structFieldNotFound(index, name)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
+ f.d.arrayCannotExpand(sliceLen, streamLen)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
+ halt.onerror(tm.UnmarshalText(f.d.d.DecodeStringAsBytes()))
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
+ f.d.jsonUnmarshalV(tm)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
+ halt.onerror(bm.UnmarshalBinary(f.d.d.DecodeBytes(nil)))
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) IsJSONHandle() bool {
+ return f.d.js
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) Extension(v interface{}) (xfn *extTypeTagFn) {
+ return f.d.h.getExtForI(v)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
+ f.d.d.DecodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
+ return decInferLen(clen, maxlen, unit)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadMapStart() int { return f.d.mapStart(f.d.d.ReadMapStart()) }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadMapEnd() { f.d.mapEnd() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadArrayStart() int { return f.d.arrayStart(f.d.d.ReadArrayStart()) }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadArrayEnd() { f.d.arrayEnd() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadArrayElem() { f.d.arrayElem() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadMapElemKey() { f.d.mapElemKey() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadMapElemValue() { f.d.mapElemValue() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecDecodeFloat32() float32 { return f.d.decodeFloat32() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecCheckBreak() bool { return f.d.checkBreak() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecStringZC(v []byte) string { return f.d.stringZC(v) }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecodeBytesInto(v []byte) []byte { return f.d.decodeBytesInto(v) }
diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl
new file mode 100644
index 000000000..1f1339684
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl
@@ -0,0 +1,249 @@
+// comment this out // + build ignore
+
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from gen-helper.go.tmpl - DO NOT EDIT.
+
+package codec
+
+import (
+ "encoding"
+ "reflect"
+)
+
+// GenVersion is the current version of codecgen.
+const GenVersion = {{ .Version }}
+
+// This file is used to generate helper code for codecgen.
+// The values here i.e. genHelper(En|De)coder are not to be used directly by
+// library users. They WILL change continuously and without notice.
+
+{{/*
+// To help enforce this, we create an unexported type with exported members.
+// The only way to get the type is via the one exported type that we control (somewhat).
+//
+// When static codecs are created for types, they will use this value
+// to perform encoding or decoding of primitives or known slice or map types.
+*/ -}}
+
+// GenHelperEncoder is exported so that it can be used externally by codecgen.
+//
+// Library users: DO NOT USE IT DIRECTLY or INDIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
+func GenHelper() (g genHelper) { return }
+
+type genHelper struct {}
+
+func (genHelper) Encoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) {
+ ge = genHelperEncoder{e: e}
+ ee = genHelperEncDriver{encDriver: e.e}
+ return
+}
+
+func (genHelper) Decoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) {
+ gd = genHelperDecoder{d: d}
+ dd = genHelperDecDriver{decDriver: d.d}
+ return
+}
+
+type genHelperEncDriver struct {
+ encDriver
+}
+
+type genHelperDecDriver struct {
+ decDriver
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperEncoder struct {
+ M mustHdl
+ F fastpathT
+ e *Encoder
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+type genHelperDecoder struct {
+ C checkOverflow
+ F fastpathT
+ d *Decoder
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
+ return f.e.h
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinary() bool {
+ return f.e.be // f.e.hh.isBinaryEncoding()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) IsJSONHandle() bool {
+ return f.e.js
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncFallback(iv interface{}) {
+ // f.e.encodeI(iv, false, false)
+ f.e.encodeValue(reflect.ValueOf(iv), nil)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
+ bs, fnerr := iv.MarshalText()
+ f.e.marshalUtf8(bs, fnerr)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
+ bs, fnerr := iv.MarshalJSON()
+ f.e.marshalAsis(bs, fnerr)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
+ bs, fnerr := iv.MarshalBinary()
+ f.e.marshalRaw(bs, fnerr)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) Extension(v interface{}) (xfn *extTypeTagFn) {
+ return f.e.h.getExtForI(v)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) {
+ f.e.e.EncodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) WriteStr(s string) {
+ f.e.w().writestr(s)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteMapStart(length int) { f.e.mapStart(length) }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteMapEnd() { f.e.mapEnd() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteArrayStart(length int) { f.e.arrayStart(length) }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteArrayEnd() { f.e.arrayEnd() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteArrayElem() { f.e.arrayElem() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteMapElemKey() { f.e.mapElemKey() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncWriteMapElemValue() { f.e.mapElemValue() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncEncodeComplex64(v complex64) { f.e.encodeComplex64(v) }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncEncodeComplex128(v complex128) { f.e.encodeComplex128(v) }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncEncode(v interface{}) { f.e.encode(v) }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperEncoder) EncEncodeMapNonNil(v interface{}) {
+ if skipFastpathTypeSwitchInDirectCall || !fastpathEncodeTypeSwitch(v, f.e) {
+ f.e.encodeValue(reflect.ValueOf(v), nil)
+ }
+}
+
+// ---------------- DECODER FOLLOWS -----------------
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
+ return f.d.h
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinary() bool {
+ return f.d.be // f.d.hh.isBinaryEncoding()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSwallow() { f.d.swallow() }
+
+// // FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+// func (f genHelperDecoder) DecScratchBuffer() []byte {
+// return f.d.b[:]
+// }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte {
+ return &f.d.b
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
+ rv := reflect.ValueOf(iv)
+ if chkPtr {
+ if x, _ := isDecodeable(rv); !x {
+ f.d.haltAsNotDecodeable(rv)
+ }
+ }
+ f.d.decodeValue(rv, nil)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
+ return f.d.decSliceHelperStart()
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
+ f.d.structFieldNotFound(index, name)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
+ f.d.arrayCannotExpand(sliceLen, streamLen)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
+ halt.onerror(tm.UnmarshalText(f.d.d.DecodeStringAsBytes()))
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
+ f.d.jsonUnmarshalV(tm)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
+ halt.onerror(bm.UnmarshalBinary(f.d.d.DecodeBytes(nil)))
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) IsJSONHandle() bool {
+ return f.d.js
+}
+{{/*
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) I2Rtid(v interface{}) uintptr {
+ return i2rtid(v)
+}
+*/ -}}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) Extension(v interface{}) (xfn *extTypeTagFn) {
+ return f.d.h.getExtForI(v)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) {
+ f.d.d.DecodeExt(v, xfFn.rt, xfFn.tag, xfFn.ext)
+}
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
+ return decInferLen(clen, maxlen, unit)
+}
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadMapStart() int { return f.d.mapStart(f.d.d.ReadMapStart()) }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadMapEnd() { f.d.mapEnd() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadArrayStart() int { return f.d.arrayStart(f.d.d.ReadArrayStart()) }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadArrayEnd() { f.d.arrayEnd() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadArrayElem() { f.d.arrayElem() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadMapElemKey() { f.d.mapElemKey() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecReadMapElemValue() { f.d.mapElemValue() }
+
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecDecodeFloat32() float32 { return f.d.decodeFloat32() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecCheckBreak() bool { return f.d.checkBreak() }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecStringZC(v []byte) string { return f.d.stringZC(v) }
+// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
+func (f genHelperDecoder) DecodeBytesInto(v []byte) []byte { return f.d.decodeBytesInto(v) }
diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go
new file mode 100644
index 000000000..e72d00305
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen.generated.go
@@ -0,0 +1,192 @@
+// +build codecgen.exec
+
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
+
+const genDecMapTmpl = `
+{{var "v"}} := *{{ .Varname }}
+{{var "l"}} := z.DecReadMapStart()
+if {{var "l"}} == codecSelferDecContainerLenNil{{xs}} {
+ *{{ .Varname }} = nil
+} else {
+if {{var "v"}} == nil {
+ {{var "rl"}} := z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
+ *{{ .Varname }} = {{var "v"}}
+}
+{{ $mk := var "mk" -}}
+var {{ $mk }} {{ .KTyp }}
+var {{var "mv"}} {{ .Typ }}
+var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
+if z.DecBasicHandle().MapValueReset {
+ {{if decElemKindPtr}}{{var "mg"}} = true
+ {{else if decElemKindIntf}}if !z.DecBasicHandle().InterfaceReset { {{var "mg"}} = true }
+ {{else if not decElemKindImmutable}}{{var "mg"}} = true
+ {{end}} }
+if {{var "l"}} != 0 {
+ {{var "hl"}} := {{var "l"}} > 0
+ for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || z.DecCheckBreak()); {{var "j"}}++ {
+ z.DecReadMapElemKey()
+ {{ if eq .KTyp "string" -}}
+ {{ decLineVarK $mk -}}{{- /* decLineVarKStrZC $mk */ -}}
+ {{ else -}}
+ {{ decLineVarK $mk -}}
+ {{ end -}}
+ {{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */ -}}
+ if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
+ {{var "mk"}} = z.DecStringZC({{var "bv"}})
+ }
+ {{ end -}}
+ {{if decElemKindPtr -}}
+ {{var "ms"}} = true
+ {{end -}}
+ if {{var "mg"}} {
+ {{if decElemKindPtr -}}
+ {{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{ $mk }}]
+ if {{var "mok"}} {
+ {{var "ms"}} = false
+ }
+ {{else -}}
+ {{var "mv"}} = {{var "v"}}[{{ $mk }}]
+ {{end -}}
+ } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
+ z.DecReadMapElemValue()
+ {{var "mdn"}} = false
+ {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y -}}
+ if {{var "mdn"}} {
+ {{var "v"}}[{{ $mk }}] = {{decElemZero}}
+ } else {{if decElemKindPtr}} if {{var "ms"}} {{end}} {
+ {{var "v"}}[{{ $mk }}] = {{var "mv"}}
+ }
+}
+} // else len==0: leave as-is (do not clear map entries)
+z.DecReadMapEnd()
+}
+`
+
+const genDecListTmpl = `
+{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
+{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}
+{{if not isArray -}}
+var {{var "c"}} bool {{/* // changed */}}
+_ = {{var "c"}}
+if {{var "h"}}.IsNil {
+ if {{var "v"}} != nil {
+ {{var "v"}} = nil
+ {{var "c"}} = true
+ }
+} else {{end -}}
+if {{var "l"}} == 0 {
+ {{if isSlice -}}
+ if {{var "v"}} == nil {
+ {{var "v"}} = []{{ .Typ }}{}
+ {{var "c"}} = true
+ } else if len({{var "v"}}) != 0 {
+ {{var "v"}} = {{var "v"}}[:0]
+ {{var "c"}} = true
+ } {{else if isChan }}if {{var "v"}} == nil {
+ {{var "v"}} = make({{ .CTyp }}, 0)
+ {{var "c"}} = true
+ }
+ {{end -}}
+} else {
+ {{var "hl"}} := {{var "l"}} > 0
+ var {{var "rl"}} int
+ _ = {{var "rl"}}
+ {{if isSlice }} if {{var "hl"}} {
+ if {{var "l"}} > cap({{var "v"}}) {
+ {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ if {{var "rl"}} <= cap({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "rl"}}]
+ } else {
+ {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
+ }
+ {{var "c"}} = true
+ } else if {{var "l"}} != len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "l"}}]
+ {{var "c"}} = true
+ }
+ }
+ {{end -}}
+ var {{var "j"}} int
+ {{/* // var {{var "dn"}} bool */ -}}
+ for {{var "j"}} = 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || z.DecCheckBreak()); {{var "j"}}++ { // bounds-check-elimination
+ {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil {
+ if {{var "hl"}} {
+ {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
+ } else {
+ {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}}
+ }
+ {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}})
+ {{var "c"}} = true
+ }
+ {{end -}}
+ {{var "h"}}.ElemContainerState({{var "j"}})
+ {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */ -}}
+ {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }}
+ {{ decLineVar $x -}}
+ {{var "v"}} <- {{ $x }}
+ {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
+ var {{var "db"}} bool
+ if {{var "j"}} >= len({{var "v"}}) {
+ {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }})
+ {{var "c"}} = true
+ {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
+ {{end -}}
+ }
+ if {{var "db"}} {
+ z.DecSwallow()
+ } else {
+ {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x -}}
+ }
+ {{end -}}
+ }
+ {{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
+ {{var "v"}} = {{var "v"}}[:{{var "j"}}]
+ {{var "c"}} = true
+ } else if {{var "j"}} == 0 && {{var "v"}} == nil {
+ {{var "v"}} = make([]{{ .Typ }}, 0)
+ {{var "c"}} = true
+ }
+ {{end -}}
+}
+{{var "h"}}.End()
+{{if not isArray }}if {{var "c"}} {
+ *{{ .Varname }} = {{var "v"}}
+}
+{{end -}}
+`
+
+const genEncChanTmpl = `
+{{.Label}}:
+switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; {
+case timeout{{.Sfx}} == 0: // only consume available
+ for {
+ select {
+ case b{{.Sfx}} := <-{{.Chan}}:
+ {{ .Slice }} = append({{.Slice}}, b{{.Sfx}})
+ default:
+ break {{.Label}}
+ }
+ }
+case timeout{{.Sfx}} > 0: // consume until timeout
+ tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}})
+ for {
+ select {
+ case b{{.Sfx}} := <-{{.Chan}}:
+ {{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+ case <-tt{{.Sfx}}.C:
+ // close(tt.C)
+ break {{.Label}}
+ }
+ }
+default: // consume until close
+ for b{{.Sfx}} := range {{.Chan}} {
+ {{.Slice}} = append({{.Slice}}, b{{.Sfx}})
+ }
+}
+`
diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go
new file mode 100644
index 000000000..8c5bbf201
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/gen.go
@@ -0,0 +1,2801 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build codecgen.exec
+// +build codecgen.exec
+
+package codec
+
+import (
+ "bytes"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "go/format"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "text/template"
+ "time"
+ // "ugorji.net/zz"
+ "unicode"
+ "unicode/utf8"
+)
+
+// ---------------------------------------------------
+// codecgen supports the full cycle of reflection-based codec:
+// - RawExt
+// - Raw
+// - Extensions
+// - (Binary|Text|JSON)(Unm|M)arshal
+// - generic by-kind
+//
+// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type.
+// In those areas, we try to only do reflection or interface-conversion when NECESSARY:
+// - Extensions, only if Extensions are configured.
+//
+// However, note following codecgen caveats:
+// - Canonical option.
+// If Canonical=true, codecgen'ed code will delegate encoding maps to reflection-based code.
+// This is due to the runtime work needed to marshal a map in canonical mode.
+// - CheckCircularRef option.
+// When encoding a struct, a circular reference can lead to a stack overflow.
+// If CheckCircularRef=true, codecgen'ed code will delegate encoding structs to reflection-based code.
+// - MissingFielder implementation.
+// If a type implements MissingFielder, a Selfer is not generated (with a warning message).
+// Statically reproducing the runtime work needed to extract the missing fields and marshal them along with the struct fields,
+// while handling the Canonical=true special case, was onerous to implement.
+//
+// During encode/decode, Selfer takes precedence.
+// A type implementing Selfer will know how to encode/decode itself statically.
+//
+// The following field types are supported:
+// array: [n]T
+// slice: []T
+// map: map[K]V
+// primitive: [u]int[n], float(32|64), bool, string
+// struct
+//
+// ---------------------------------------------------
+// Note that a Selfer cannot call (e|d).(En|De)code on itself,
+// as this will cause a circular reference, as (En|De)code will call Selfer methods.
+// Any type that implements Selfer must implement completely and not fallback to (En|De)code.
+//
+// In addition, code in this file manages the generation of fast-path implementations of
+// encode/decode of slices/maps of primitive keys/values.
+//
+// Users MUST re-generate their implementations whenever the code shape changes.
+// The generated code will panic if it was generated with a version older than the supporting library.
+// ---------------------------------------------------
+//
+// codec framework is very feature rich.
+// When encoding or decoding into an interface, it depends on the runtime type of the interface.
+// The type of the interface may be a named type, an extension, etc.
+// Consequently, we fallback to runtime codec for encoding/decoding interfaces.
+// In addition, we fallback for any value which cannot be guaranteed at runtime.
+// This allows us support ANY value, including any named types, specifically those which
+// do not implement our interfaces (e.g. Selfer).
+//
+// This explains some slowness compared to other code generation codecs (e.g. msgp).
+// This reduction in speed is only seen when your refers to interfaces,
+// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} }
+//
+// codecgen will panic if the file was generated with an old version of the library in use.
+//
+// Note:
+// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
+// This way, there isn't a function call overhead just to see that we should not enter a block of code.
+//
+// Note:
+// codecgen-generated code depends on the variables defined by fast-path.generated.go.
+// consequently, you cannot run with tags "codecgen codec.notfastpath".
+//
+// Note:
+// genInternalXXX functions are used for generating fast-path and other internally generated
+// files, and not for use in codecgen.
+
+// Size of a struct or value is not portable across machines, especially across 32-bit vs 64-bit
+// operating systems. This is due to types like int, uintptr, pointers, (and derived types like slice), etc
+// which use the natural word size on those machines, which may be 4 bytes (on 32-bit) or 8 bytes (on 64-bit).
+//
+// Within decInferLen calls, we may generate an explicit size of the entry.
+// We do this because decInferLen values are expected to be approximate,
+// and serve as a good hint on the size of the elements or key+value entry.
+//
+// Since development is done on 64-bit machines, the sizes will be roughly correctly
+// on 64-bit OS, and slightly larger than expected on 32-bit OS.
+// This is ok.
+//
+// For reference, look for 'Size' in fast-path.go.tmpl, gen-dec-(array|map).go.tmpl and gen.go (this file).
+
+// GenVersion is the current version of codecgen.
+//
+// MARKER: Increment this value each time codecgen changes fundamentally.
+// Also update codecgen/gen.go (minimumCodecVersion, genVersion, etc).
+// Fundamental changes are:
+// - helper methods change (signature change, new ones added, some removed, etc)
+// - codecgen command line changes
+//
+// v1: Initial Version
+// v2: -
+// v3: Changes for Kubernetes:
+// changes in signature of some unpublished helper methods and codecgen cmdline arguments.
+// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen)
+// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections.
+// v6: removed unsafe from gen, and now uses codecgen.exec tag
+// v7: -
+// v8: current - we now maintain compatibility with old generated code.
+// v9: skipped
+// v10: modified encDriver and decDriver interfaces.
+// v11: remove deprecated methods of encDriver and decDriver.
+// v12: removed deprecated methods from genHelper and changed container tracking logic
+// v13: 20190603 removed DecodeString - use DecodeStringAsBytes instead
+// v14: 20190611 refactored nil handling: TryDecodeAsNil -> selective TryNil, etc
+// v15: 20190626 encDriver.EncodeString handles StringToRaw flag inside handle
+// v16: 20190629 refactoring for v1.1.6
+// v17: 20200911 reduce number of types for which we generate fast path functions (v1.1.8)
+// v18: 20201004 changed definition of genHelper...Extension (to take interface{}) and eliminated I2Rtid method
+// v19: 20201115 updated codecgen cmdline flags and optimized output
+// v20: 20201120 refactored GenHelper to one exported function
+// v21: 20210104 refactored generated code to honor ZeroCopy=true for more efficiency
+// v22: 20210118 fixed issue in generated code when encoding a type which is also a codec.Selfer
+// v23: 20210203 changed slice/map types for which we generate fast-path functions
+// v24: 20210226 robust handling for Canonical|CheckCircularRef flags and MissingFielder implementations
+// v25: 20210406 pass base reflect.Type to side(En|De)code and (En|De)codeExt calls
+const genVersion = 25
+
+const (
+ genCodecPkg = "codec1978" // MARKER: keep in sync with codecgen/gen.go
+ genTempVarPfx = "yy"
+ genTopLevelVarName = "x"
+
+ // ignore canBeNil parameter, and always set to true.
+ // This is because nil can appear anywhere, so we should always check.
+ genAnythingCanBeNil = true
+
+ // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function;
+ // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals
+ // are not executed a lot.
+ //
+ // From testing, it didn't make much difference in runtime, so keep as true (one function only)
+ genUseOneFunctionForDecStructMap = true
+
+ // genStructCanonical configures whether we generate 2 paths based on Canonical flag
+ // when encoding struct fields.
+ genStructCanonical = false
+
+ // genFastpathCanonical configures whether we support Canonical in fast path.
+ // The savings is not much.
+ //
+ // MARKER: This MUST ALWAYS BE TRUE. fast-path.go.tmp doesn't handle it being false.
+ genFastpathCanonical = true
+
+ // genFastpathTrimTypes configures whether we trim uncommon fastpath types.
+ genFastpathTrimTypes = true
+
+ // genDecStructArrayInlineLoopCheck configures whether we create a next function
+ // for each iteration in the loop and call it, or just inline it.
+ //
+ // with inlining, we get better performance but about 10% larger files.
+ genDecStructArrayInlineLoopCheck = true
+)
+
+type genStructMapStyle uint8
+type genStringDecAsBytes string
+type genStringDecZC string
+
+var genStringDecAsBytesTyp = reflect.TypeOf(genStringDecAsBytes(""))
+var genStringDecZCTyp = reflect.TypeOf(genStringDecZC(""))
+var genFormats = []string{"Json", "Cbor", "Msgpack", "Binc", "Simple"}
+
+const (
+ genStructMapStyleConsolidated genStructMapStyle = iota
+ genStructMapStyleLenPrefix
+ genStructMapStyleCheckBreak
+)
+
+var (
+ errGenAllTypesSamePkg = errors.New("All types must be in the same package")
+ errGenExpectArrayOrMap = errors.New("unexpected type - expecting array/map/slice")
+ errGenUnexpectedTypeFastpath = errors.New("fast-path: unexpected type - requires map or slice")
+
+ genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__")
+ genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`)
+)
+
+type genBuf struct {
+ buf []byte
+}
+
+func (x *genBuf) sIf(b bool, s, t string) *genBuf {
+ if b {
+ x.buf = append(x.buf, s...)
+ } else {
+ x.buf = append(x.buf, t...)
+ }
+ return x
+}
+func (x *genBuf) s(s string) *genBuf { x.buf = append(x.buf, s...); return x }
+func (x *genBuf) b(s []byte) *genBuf { x.buf = append(x.buf, s...); return x }
+func (x *genBuf) v() string { return string(x.buf) }
+func (x *genBuf) f(s string, args ...interface{}) { x.s(fmt.Sprintf(s, args...)) }
+func (x *genBuf) reset() {
+ if x.buf != nil {
+ x.buf = x.buf[:0]
+ }
+}
+
+// genRunner holds some state used during a Gen run.
+type genRunner struct {
+ w io.Writer // output
+ c uint64 // counter used for generating varsfx
+ f uint64 // counter used for saying false
+
+ t []reflect.Type // list of types to run selfer on
+ tc reflect.Type // currently running selfer on this type
+ te map[uintptr]bool // types for which the encoder has been created
+ td map[uintptr]bool // types for which the decoder has been created
+ tz map[uintptr]bool // types for which GenIsZero has been created
+
+ cp string // codec import path
+
+ im map[string]reflect.Type // imports to add
+ imn map[string]string // package names of imports to add
+ imc uint64 // counter for import numbers
+
+ is map[reflect.Type]struct{} // types seen during import search
+ bp string // base PkgPath, for which we are generating for
+
+ cpfx string // codec package prefix
+
+ ty map[reflect.Type]struct{} // types for which GenIsZero *should* be created
+ tm map[reflect.Type]struct{} // types for which enc/dec must be generated
+ ts []reflect.Type // types for which enc/dec must be generated
+
+ xs string // top level variable/constant suffix
+ hn string // fn helper type name
+
+ ti *TypeInfos
+ // rr *rand.Rand // random generator for file-specific types
+
+ jsonOnlyWhen, toArrayWhen, omitEmptyWhen *bool
+
+ nx bool // no extensions
+}
+
+type genIfClause struct {
+ hasIf bool
+}
+
+func (g *genIfClause) end(x *genRunner) {
+ if g.hasIf {
+ x.line("}")
+ }
+}
+
+func (g *genIfClause) c(last bool) (v string) {
+ if last {
+ if g.hasIf {
+ v = " } else { "
+ }
+ } else if g.hasIf {
+ v = " } else if "
+ } else {
+ v = "if "
+ g.hasIf = true
+ }
+ return
+}
+
+// Gen will write a complete go file containing Selfer implementations for each
+// type passed. All the types must be in the same package.
+//
+// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINUOUSLY WITHOUT NOTICE.
+func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool,
+ jsonOnlyWhen, toArrayWhen, omitEmptyWhen *bool,
+ ti *TypeInfos, types ...reflect.Type) (warnings []string) {
+ // All types passed to this method do not have a codec.Selfer method implemented directly.
+ // codecgen already checks the AST and skips any types that define the codec.Selfer methods.
+ // Consequently, there's no need to check and trim them if they implement codec.Selfer
+
+ if len(types) == 0 {
+ return
+ }
+ x := genRunner{
+ w: w,
+ t: types,
+ te: make(map[uintptr]bool),
+ td: make(map[uintptr]bool),
+ tz: make(map[uintptr]bool),
+ im: make(map[string]reflect.Type),
+ imn: make(map[string]string),
+ is: make(map[reflect.Type]struct{}),
+ tm: make(map[reflect.Type]struct{}),
+ ty: make(map[reflect.Type]struct{}),
+ ts: []reflect.Type{},
+ bp: genImportPath(types[0]),
+ xs: uid,
+ ti: ti,
+ jsonOnlyWhen: jsonOnlyWhen,
+ toArrayWhen: toArrayWhen,
+ omitEmptyWhen: omitEmptyWhen,
+
+ nx: noExtensions,
+ }
+ if x.ti == nil {
+ x.ti = defTypeInfos
+ }
+ if x.xs == "" {
+ rr := rand.New(rand.NewSource(time.Now().UnixNano()))
+ x.xs = strconv.FormatInt(rr.Int63n(9999), 10)
+ }
+
+ // gather imports first:
+ x.cp = genImportPath(reflect.TypeOf(x))
+ x.imn[x.cp] = genCodecPkg
+
+ // iterate, check if all in same package, and remove any missingfielders
+ for i := 0; i < len(x.t); {
+ t := x.t[i]
+ // xdebugf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name())
+ if genImportPath(t) != x.bp {
+ halt.onerror(errGenAllTypesSamePkg)
+ }
+ ti1 := x.ti.get(rt2id(t), t)
+ if ti1.flagMissingFielder || ti1.flagMissingFielderPtr {
+ // output diagnostic message - that nothing generated for this type
+ warnings = append(warnings, fmt.Sprintf("type: '%v' not generated; implements codec.MissingFielder", t))
+ copy(x.t[i:], x.t[i+1:])
+ x.t = x.t[:len(x.t)-1]
+ continue
+ }
+ x.genRefPkgs(t)
+ i++
+ }
+
+ x.line("// +build go1.6")
+ if buildTags != "" {
+ x.line("// +build " + buildTags)
+ }
+ x.line(`
+
+// Code generated by codecgen - DO NOT EDIT.
+
+`)
+ x.line("package " + pkgName)
+ x.line("")
+ x.line("import (")
+ if x.cp != x.bp {
+ x.cpfx = genCodecPkg + "."
+ x.linef("%s \"%s\"", genCodecPkg, x.cp)
+ }
+ // use a sorted set of im keys, so that we can get consistent output
+ imKeys := make([]string, 0, len(x.im))
+ for k := range x.im {
+ imKeys = append(imKeys, k)
+ }
+ sort.Strings(imKeys)
+ for _, k := range imKeys { // for k, _ := range x.im {
+ if k == x.imn[k] {
+ x.linef("\"%s\"", k)
+ } else {
+ x.linef("%s \"%s\"", x.imn[k], k)
+ }
+ }
+ // add required packages
+ for _, k := range [...]string{"runtime", "errors", "strconv"} { // "reflect", "fmt"
+ if _, ok := x.im[k]; !ok {
+ x.line("\"" + k + "\"")
+ }
+ }
+ x.line(")")
+ x.line("")
+
+ x.line("const (")
+ x.linef("// ----- content types ----")
+ x.linef("codecSelferCcUTF8%s = %v", x.xs, int64(cUTF8))
+ x.linef("codecSelferCcRAW%s = %v", x.xs, int64(cRAW))
+ x.linef("// ----- value types used ----")
+ for _, vt := range [...]valueType{
+ valueTypeArray, valueTypeMap, valueTypeString,
+ valueTypeInt, valueTypeUint, valueTypeFloat,
+ valueTypeNil,
+ } {
+ x.linef("codecSelferValueType%s%s = %v", vt.String(), x.xs, int64(vt))
+ }
+
+ x.linef("codecSelferBitsize%s = uint8(32 << (^uint(0) >> 63))", x.xs)
+ x.linef("codecSelferDecContainerLenNil%s = %d", x.xs, int64(containerLenNil))
+ x.line(")")
+ x.line("var (")
+ x.line("errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + " = " + "errors.New(`only encoded map or array can be decoded into a struct`)")
+ x.line(")")
+ x.line("")
+
+ x.hn = "codecSelfer" + x.xs
+ x.line("type " + x.hn + " struct{}")
+ x.line("")
+ x.linef("func %sFalse() bool { return false }", x.hn)
+ x.linef("func %sTrue() bool { return true }", x.hn)
+ x.line("")
+ x.varsfxreset()
+ x.line("func init() {")
+ x.linef("if %sGenVersion != %v {", x.cpfx, genVersion)
+ x.line("_, file, _, _ := runtime.Caller(0)")
+ x.linef("ver := strconv.FormatInt(int64(%sGenVersion), 10)", x.cpfx)
+ x.outf(`panic(errors.New("codecgen version mismatch: current: %v, need " + ver + ". Re-generate file: " + file))`, genVersion)
+ x.linef("}")
+ if len(imKeys) > 0 {
+ x.line("if false { // reference the types, but skip this branch at build/run time")
+ for _, k := range imKeys {
+ t := x.im[k]
+ x.linef("var _ %s.%s", x.imn[k], t.Name())
+ }
+ x.line("} ") // close if false
+ }
+ x.line("}") // close init
+ x.line("")
+
+ // generate rest of type info
+ for _, t := range x.t {
+ x.tc = t
+ x.linef("func (%s) codecSelferViaCodecgen() {}", x.genTypeName(t))
+ x.selfer(true)
+ x.selfer(false)
+ x.tryGenIsZero(t)
+ }
+
+ for _, t := range x.ts {
+ rtid := rt2id(t)
+ // generate enc functions for all these slice/map types.
+ x.varsfxreset()
+ x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx)
+ x.genRequiredMethodVars(true)
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ x.encListFallback("v", t)
+ case reflect.Map:
+ x.encMapFallback("v", t)
+ default:
+ halt.onerror(errGenExpectArrayOrMap)
+ }
+ x.line("}")
+ x.line("")
+
+ // generate dec functions for all these slice/map types.
+ x.varsfxreset()
+ x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx)
+ x.genRequiredMethodVars(false)
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ x.decListFallback("v", rtid, t)
+ case reflect.Map:
+ x.decMapFallback("v", rtid, t)
+ default:
+ halt.onerror(errGenExpectArrayOrMap)
+ }
+ x.line("}")
+ x.line("")
+ }
+
+ for t := range x.ty {
+ x.tryGenIsZero(t)
+ x.line("")
+ }
+
+ x.line("")
+ return
+}
+
+func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool {
+ // return varname != genTopLevelVarName && t != x.tc
+ // the only time we checkForSelfer is if we are not at the TOP of the generated code.
+ return varname != genTopLevelVarName
+}
+
+func (x *genRunner) arr2str(t reflect.Type, s string) string {
+ if t.Kind() == reflect.Array {
+ return s
+ }
+ return ""
+}
+
+func (x *genRunner) genRequiredMethodVars(encode bool) {
+ x.line("var h " + x.hn)
+ if encode {
+ x.line("z, r := " + x.cpfx + "GenHelper().Encoder(e)")
+ } else {
+ x.line("z, r := " + x.cpfx + "GenHelper().Decoder(d)")
+ }
+ x.line("_, _, _ = h, z, r")
+}
+
+func (x *genRunner) genRefPkgs(t reflect.Type) {
+ if _, ok := x.is[t]; ok {
+ return
+ }
+ x.is[t] = struct{}{}
+ tpkg, tname := genImportPath(t), t.Name()
+ if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' {
+ if _, ok := x.im[tpkg]; !ok {
+ x.im[tpkg] = t
+ if idx := strings.LastIndex(tpkg, "/"); idx < 0 {
+ x.imn[tpkg] = tpkg
+ } else {
+ x.imc++
+ x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false)
+ }
+ }
+ }
+ switch t.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan:
+ x.genRefPkgs(t.Elem())
+ case reflect.Map:
+ x.genRefPkgs(t.Elem())
+ x.genRefPkgs(t.Key())
+ case reflect.Struct:
+ for i := 0; i < t.NumField(); i++ {
+ if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' {
+ x.genRefPkgs(t.Field(i).Type)
+ }
+ }
+ }
+}
+
+// sayFalse will either say "false" or use a function call that returns false.
+func (x *genRunner) sayFalse() string {
+ x.f++
+ if x.f%2 == 0 {
+ return x.hn + "False()"
+ }
+ return "false"
+}
+
+// sayFalse will either say "true" or use a function call that returns true.
+func (x *genRunner) sayTrue() string {
+ x.f++
+ if x.f%2 == 0 {
+ return x.hn + "True()"
+ }
+ return "true"
+}
+
+func (x *genRunner) varsfx() string {
+ x.c++
+ return strconv.FormatUint(x.c, 10)
+}
+
+func (x *genRunner) varsfxreset() {
+ x.c = 0
+}
+
+func (x *genRunner) out(s string) {
+ _, err := io.WriteString(x.w, s)
+ genCheckErr(err)
+}
+
+func (x *genRunner) outf(s string, params ...interface{}) {
+ _, err := fmt.Fprintf(x.w, s, params...)
+ genCheckErr(err)
+}
+
+func (x *genRunner) line(s string) {
+ x.out(s)
+ if len(s) == 0 || s[len(s)-1] != '\n' {
+ x.out("\n")
+ }
+}
+
+func (x *genRunner) lineIf(s string) {
+ if s != "" {
+ x.line(s)
+ }
+}
+
+func (x *genRunner) linef(s string, params ...interface{}) {
+ x.outf(s, params...)
+ if len(s) == 0 || s[len(s)-1] != '\n' {
+ x.out("\n")
+ }
+}
+
+func (x *genRunner) genTypeName(t reflect.Type) (n string) {
+ // if the type has a PkgPath, which doesn't match the current package,
+ // then include it.
+ // We cannot depend on t.String() because it includes current package,
+ // or t.PkgPath because it includes full import path,
+ //
+ var ptrPfx string
+ for t.Kind() == reflect.Ptr {
+ ptrPfx += "*"
+ t = t.Elem()
+ }
+ if tn := t.Name(); tn != "" {
+ return ptrPfx + x.genTypeNamePrim(t)
+ }
+ switch t.Kind() {
+ case reflect.Map:
+ return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem())
+ case reflect.Slice:
+ return ptrPfx + "[]" + x.genTypeName(t.Elem())
+ case reflect.Array:
+ return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem())
+ case reflect.Chan:
+ return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem())
+ default:
+ if t == intfTyp {
+ return ptrPfx + "interface{}"
+ } else {
+ return ptrPfx + x.genTypeNamePrim(t)
+ }
+ }
+}
+
+func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) {
+ if t.Name() == "" {
+ return t.String()
+ } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) {
+ return t.Name()
+ } else {
+ return x.imn[genImportPath(t)] + "." + t.Name()
+ // return t.String() // best way to get the package name inclusive
+ }
+}
+
+func (x *genRunner) genZeroValueR(t reflect.Type) string {
+ // if t is a named type, w
+ switch t.Kind() {
+ case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func,
+ reflect.Slice, reflect.Map, reflect.Invalid:
+ return "nil"
+ case reflect.Bool:
+ return "false"
+ case reflect.String:
+ return `""`
+ case reflect.Struct, reflect.Array:
+ return x.genTypeName(t) + "{}"
+ default: // all numbers
+ return "0"
+ }
+}
+
+func (x *genRunner) genMethodNameT(t reflect.Type) (s string) {
+ return genMethodNameT(t, x.tc)
+}
+
+func (x *genRunner) tryGenIsZero(t reflect.Type) (done bool) {
+ if t.Kind() != reflect.Struct || t.Implements(isCodecEmptyerTyp) {
+ return
+ }
+
+ rtid := rt2id(t)
+
+ if _, ok := x.tz[rtid]; ok {
+ delete(x.ty, t)
+ return
+ }
+
+ x.tz[rtid] = true
+ delete(x.ty, t)
+
+ ti := x.ti.get(rtid, t)
+ tisfi := ti.sfi.source() // always use sequence from file. decStruct expects same thing.
+ varname := genTopLevelVarName
+
+ x.linef("func (%s *%s) IsCodecEmpty() bool {", varname, x.genTypeName(t))
+
+ anonSeen := make(map[reflect.Type]bool)
+ var omitline genBuf
+ for _, si := range tisfi {
+ if si.path.parent != nil {
+ root := si.path.root()
+ if anonSeen[root.typ] {
+ continue
+ }
+ anonSeen[root.typ] = true
+ }
+ t2 := genOmitEmptyLinePreChecks(varname, t, si, &omitline, true)
+ // if Ptr, we already checked if nil above
+ if t2.Type.Kind() != reflect.Ptr {
+ x.doEncOmitEmptyLine(t2, varname, &omitline)
+ omitline.s(" || ")
+ }
+ }
+ omitline.s(" false")
+ x.linef("return !(%s)", omitline.v())
+
+ x.line("}")
+ x.line("")
+ return true
+}
+
+func (x *genRunner) selfer(encode bool) {
+ t := x.tc
+ // ti := x.ti.get(rt2id(t), t)
+ t0 := t
+ // always make decode use a pointer receiver,
+ // and structs/arrays always use a ptr receiver (encode|decode)
+ isptr := !encode || t.Kind() == reflect.Array || (t.Kind() == reflect.Struct && t != timeTyp)
+ x.varsfxreset()
+
+ fnSigPfx := "func (" + genTopLevelVarName + " "
+ if isptr {
+ fnSigPfx += "*"
+ }
+ fnSigPfx += x.genTypeName(t)
+ x.out(fnSigPfx)
+
+ if isptr {
+ t = reflect.PtrTo(t)
+ }
+ if encode {
+ x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {")
+ x.genRequiredMethodVars(true)
+ if t0.Kind() == reflect.Struct {
+ x.linef("if z.EncBasicHandle().CheckCircularRef { z.EncEncode(%s); return }", genTopLevelVarName)
+ }
+ x.encVar(genTopLevelVarName, t)
+ } else {
+ x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ // do not use decVar, as there is no need to check TryDecodeAsNil
+ // or way to elegantly handle that, and also setting it to a
+ // non-nil value doesn't affect the pointer passed.
+ // x.decVar(genTopLevelVarName, t, false)
+ x.dec(genTopLevelVarName, t0, true)
+ }
+ x.line("}")
+ x.line("")
+
+ if encode || t0.Kind() != reflect.Struct {
+ return
+ }
+
+ // write is containerMap
+ if genUseOneFunctionForDecStructMap {
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated)
+ x.line("}")
+ x.line("")
+ } else {
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix)
+ x.line("}")
+ x.line("")
+
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak)
+ x.line("}")
+ x.line("")
+ }
+
+ // write containerArray
+ x.out(fnSigPfx)
+ x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {")
+ x.genRequiredMethodVars(false)
+ x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0)
+ x.line("}")
+ x.line("")
+
+}
+
+// used for chan, array, slice, map
+func (x *genRunner) xtraSM(varname string, t reflect.Type, ti *typeInfo, encode, isptr bool) {
+ var ptrPfx, addrPfx string
+ if isptr {
+ ptrPfx = "*"
+ } else {
+ addrPfx = "&"
+ }
+ if encode {
+ x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), ptrPfx, x.genTypeName(t), varname)
+ } else {
+ x.linef("h.dec%s((*%s)(%s%s), d)", x.genMethodNameT(t), x.genTypeName(t), addrPfx, varname)
+ }
+ x.registerXtraT(t, ti)
+}
+
+func (x *genRunner) registerXtraT(t reflect.Type, ti *typeInfo) {
+ // recursively register the types
+ tk := t.Kind()
+ if tk == reflect.Ptr {
+ x.registerXtraT(t.Elem(), nil)
+ return
+ }
+ if _, ok := x.tm[t]; ok {
+ return
+ }
+
+ switch tk {
+ case reflect.Chan, reflect.Slice, reflect.Array, reflect.Map:
+ default:
+ return
+ }
+ // only register the type if it will not default to a fast-path
+ if ti == nil {
+ ti = x.ti.get(rt2id(t), t)
+ }
+ if _, rtidu := genFastpathUnderlying(t, ti.rtid, ti); fastpathAvIndex(rtidu) != -1 {
+ return
+ }
+ x.tm[t] = struct{}{}
+ x.ts = append(x.ts, t)
+ // check if this refers to any xtra types eg. a slice of array: add the array
+ x.registerXtraT(t.Elem(), nil)
+ if tk == reflect.Map {
+ x.registerXtraT(t.Key(), nil)
+ }
+}
+
+// encVar will encode a variable.
+// The parameter, t, is the reflect.Type of the variable itself
+func (x *genRunner) encVar(varname string, t reflect.Type) {
+ var checkNil bool
+ // case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan:
+ // do not include checkNil for slice and maps, as we already checkNil below it
+ switch t.Kind() {
+ case reflect.Ptr, reflect.Interface, reflect.Chan:
+ checkNil = true
+ }
+ x.encVarChkNil(varname, t, checkNil)
+}
+
+func (x *genRunner) encVarChkNil(varname string, t reflect.Type, checkNil bool) {
+ if checkNil {
+ x.linef("if %s == nil { r.EncodeNil() } else {", varname)
+ }
+
+ switch t.Kind() {
+ case reflect.Ptr:
+ telem := t.Elem()
+ tek := telem.Kind()
+ if tek == reflect.Array || (tek == reflect.Struct && telem != timeTyp) {
+ x.enc(varname, genNonPtr(t), true)
+ break
+ }
+ i := x.varsfx()
+ x.line(genTempVarPfx + i + " := *" + varname)
+ x.enc(genTempVarPfx+i, genNonPtr(t), false)
+ case reflect.Struct, reflect.Array:
+ if t == timeTyp {
+ x.enc(varname, t, false)
+ break
+ }
+ i := x.varsfx()
+ x.line(genTempVarPfx + i + " := &" + varname)
+ x.enc(genTempVarPfx+i, t, true)
+ default:
+ x.enc(varname, t, false)
+ }
+
+ if checkNil {
+ x.line("}")
+ }
+}
+
+// enc will encode a variable (varname) of type t, where t represents T.
+// if t is !time.Time and t is of kind reflect.Struct or reflect.Array, varname is of type *T
+// (to prevent copying),
+// else t is of type T
+func (x *genRunner) enc(varname string, t reflect.Type, isptr bool) {
+ rtid := rt2id(t)
+ ti2 := x.ti.get(rtid, t)
+ // We call CodecEncodeSelf if one of the following are honored:
+ // - the type already implements Selfer, call that
+ // - the type has a Selfer implementation just created, use that
+ // - the type is in the list of the ones we will generate for, but it is not currently being generated
+
+ mi := x.varsfx()
+ // tptr := reflect.PtrTo(t)
+ // tk := t.Kind()
+
+ // check if
+ // - type is time.Time, RawExt, Raw
+ // - the type implements (Text|JSON|Binary)(Unm|M)arshal
+
+ var hasIf genIfClause
+ defer hasIf.end(x) // end if block (if necessary)
+
+ var ptrPfx, addrPfx string
+ if isptr {
+ ptrPfx = "*"
+ } else {
+ addrPfx = "&"
+ }
+
+ if t == timeTyp {
+ x.linef("%s z.EncBasicHandle().TimeBuiltin() { r.EncodeTime(%s%s)", hasIf.c(false), ptrPfx, varname)
+ // return
+ }
+ if t == rawTyp {
+ x.linef("%s z.EncRaw(%s%s)", hasIf.c(true), ptrPfx, varname)
+ return
+ }
+ if t == rawExtTyp {
+ x.linef("%s r.EncodeRawExt(%s%s)", hasIf.c(true), addrPfx, varname)
+ return
+ }
+ // only check for extensions if extensions are configured,
+ // and the type is named, and has a packagePath,
+ // and this is not the CodecEncodeSelf or CodecDecodeSelf method (i.e. it is not a Selfer)
+ if !x.nx && varname != genTopLevelVarName && t != genStringDecAsBytesTyp &&
+ t != genStringDecZCTyp && genImportPath(t) != "" && t.Name() != "" {
+ yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi)
+ x.linef("%s %s := z.Extension(%s); %s != nil { z.EncExtension(%s, %s) ",
+ hasIf.c(false), yy, varname, yy, varname, yy)
+ }
+
+ if x.checkForSelfer(t, varname) {
+ if ti2.flagSelfer {
+ x.linef("%s %s.CodecEncodeSelf(e)", hasIf.c(true), varname)
+ return
+ } else if ti2.flagSelferPtr {
+ x.linef("%s %ssf%s := &%s", hasIf.c(true), genTempVarPfx, mi, varname)
+ x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi)
+ return
+ }
+
+ if _, ok := x.te[rtid]; ok {
+ x.linef("%s %s.CodecEncodeSelf(e)", hasIf.c(true), varname)
+ return
+ }
+ }
+
+ inlist := false
+ for _, t0 := range x.t {
+ if t == t0 {
+ inlist = true
+ if x.checkForSelfer(t, varname) {
+ x.linef("%s %s.CodecEncodeSelf(e)", hasIf.c(true), varname)
+ return
+ }
+ break
+ }
+ }
+
+ var rtidAdded bool
+ if t == x.tc {
+ x.te[rtid] = true
+ rtidAdded = true
+ }
+
+ if ti2.flagBinaryMarshaler {
+ x.linef("%s z.EncBinary() { z.EncBinaryMarshal(%s%v) ", hasIf.c(false), ptrPfx, varname)
+ } else if ti2.flagBinaryMarshalerPtr {
+ x.linef("%s z.EncBinary() { z.EncBinaryMarshal(%s%v) ", hasIf.c(false), addrPfx, varname)
+ }
+
+ if ti2.flagJsonMarshaler {
+ x.linef("%s !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%s%v) ", hasIf.c(false), ptrPfx, varname)
+ } else if ti2.flagJsonMarshalerPtr {
+ x.linef("%s !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%s%v) ", hasIf.c(false), addrPfx, varname)
+ } else if ti2.flagTextMarshaler {
+ x.linef("%s !z.EncBinary() { z.EncTextMarshal(%s%v) ", hasIf.c(false), ptrPfx, varname)
+ } else if ti2.flagTextMarshalerPtr {
+ x.linef("%s !z.EncBinary() { z.EncTextMarshal(%s%v) ", hasIf.c(false), addrPfx, varname)
+ }
+
+ x.lineIf(hasIf.c(true))
+
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x.line("r.EncodeInt(int64(" + varname + "))")
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x.line("r.EncodeUint(uint64(" + varname + "))")
+ case reflect.Float32:
+ x.line("r.EncodeFloat32(float32(" + varname + "))")
+ case reflect.Float64:
+ x.line("r.EncodeFloat64(float64(" + varname + "))")
+ case reflect.Complex64:
+ x.linef("z.EncEncodeComplex64(complex64(%s))", varname)
+ case reflect.Complex128:
+ x.linef("z.EncEncodeComplex128(complex128(%s))", varname)
+ case reflect.Bool:
+ x.line("r.EncodeBool(bool(" + varname + "))")
+ case reflect.String:
+ x.linef("r.EncodeString(string(%s))", varname)
+ case reflect.Chan:
+ x.xtraSM(varname, t, ti2, true, false)
+ // x.encListFallback(varname, rtid, t)
+ case reflect.Array:
+ _, rtidu := genFastpathUnderlying(t, rtid, ti2)
+ if fastpathAvIndex(rtidu) != -1 {
+ g := x.newFastpathGenV(ti2.key)
+ x.linef("z.F.%sV((%s)(%s[:]), e)", g.MethodNamePfx("Enc", false), x.genTypeName(ti2.key), varname)
+ } else {
+ x.xtraSM(varname, t, ti2, true, true)
+ }
+ case reflect.Slice:
+ // if nil, call dedicated function
+ // if a []byte, call dedicated function
+ // if a known fastpath slice, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+
+ x.linef("if %s == nil { r.EncodeNil() } else {", varname)
+ if rtid == uint8SliceTypId {
+ x.line("r.EncodeStringBytesRaw([]byte(" + varname + "))")
+ } else {
+ tu, rtidu := genFastpathUnderlying(t, rtid, ti2)
+ if fastpathAvIndex(rtidu) != -1 {
+ g := x.newFastpathGenV(tu)
+ if rtid == rtidu {
+ x.linef("z.F.%sV(%s, e)", g.MethodNamePfx("Enc", false), varname)
+ } else {
+ x.linef("z.F.%sV((%s)(%s), e)", g.MethodNamePfx("Enc", false), x.genTypeName(tu), varname)
+ }
+ } else {
+ x.xtraSM(varname, t, ti2, true, false)
+ }
+ }
+ x.linef("} // end block: if %s slice == nil", varname)
+ case reflect.Map:
+ // if nil, call dedicated function
+ // if a known fastpath map, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+ x.linef("if %s == nil { r.EncodeNil() } else {", varname)
+ tu, rtidu := genFastpathUnderlying(t, rtid, ti2)
+ if fastpathAvIndex(rtidu) != -1 {
+ g := x.newFastpathGenV(tu)
+ if rtid == rtidu {
+ x.linef("z.F.%sV(%s, e)", g.MethodNamePfx("Enc", false), varname)
+ } else {
+ x.linef("z.F.%sV((%s)(%s), e)", g.MethodNamePfx("Enc", false), x.genTypeName(tu), varname)
+ }
+ } else {
+ x.xtraSM(varname, t, ti2, true, false)
+ }
+ x.linef("} // end block: if %s map == nil", varname)
+ case reflect.Struct:
+ if !inlist {
+ delete(x.te, rtid)
+ x.line("z.EncFallback(" + varname + ")")
+ break
+ }
+ x.encStruct(varname, rtid, t)
+ default:
+ if rtidAdded {
+ delete(x.te, rtid)
+ }
+ x.line("z.EncFallback(" + varname + ")")
+ }
+}
+
+func (x *genRunner) encZero(t reflect.Type) {
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x.line("r.EncodeInt(0)")
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ x.line("r.EncodeUint(0)")
+ case reflect.Float32:
+ x.line("r.EncodeFloat32(0)")
+ case reflect.Float64:
+ x.line("r.EncodeFloat64(0)")
+ case reflect.Complex64:
+ x.line("z.EncEncodeComplex64(0)")
+ case reflect.Complex128:
+ x.line("z.EncEncodeComplex128(0)")
+ case reflect.Bool:
+ x.line("r.EncodeBool(false)")
+ case reflect.String:
+ x.linef(`r.EncodeString("")`)
+ default:
+ x.line("r.EncodeNil()")
+ }
+}
+
+func genOmitEmptyLinePreChecks(varname string, t reflect.Type, si *structFieldInfo, omitline *genBuf, oneLevel bool) (t2 reflect.StructField) {
+ // xdebug2f("calling genOmitEmptyLinePreChecks on: %v", t)
+ t2typ := t
+ varname3 := varname
+ // go through the loop, record the t2 field explicitly,
+ // and gather the omit line if embedded in pointers.
+ fullpath := si.path.fullpath()
+ for i, path := range fullpath {
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(int(path.index))
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ // do not include actual field in the omit line.
+ // that is done subsequently (right after - below).
+ if i+1 < len(fullpath) && t2typ.Kind() == reflect.Ptr {
+ omitline.s(varname3).s(" != nil && ")
+ }
+ if oneLevel {
+ break
+ }
+ }
+ return
+}
+
+func (x *genRunner) doEncOmitEmptyLine(t2 reflect.StructField, varname string, buf *genBuf) {
+ x.f = 0
+ x.encOmitEmptyLine(t2, varname, buf)
+}
+
+func (x *genRunner) encOmitEmptyLine(t2 reflect.StructField, varname string, buf *genBuf) {
+ // xdebugf("calling encOmitEmptyLine on: %v", t2.Type)
+ // smartly check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc.
+ // also, for maps/slices/arrays, check if len ! 0 (not if == zero value)
+ varname2 := varname + "." + t2.Name
+ switch t2.Type.Kind() {
+ case reflect.Struct:
+ rtid2 := rt2id(t2.Type)
+ ti2 := x.ti.get(rtid2, t2.Type)
+ // xdebugf(">>>> structfield: omitempty: type: %s, field: %s\n", t2.Type.Name(), t2.Name)
+ if ti2.rtid == timeTypId {
+ buf.s("!(").s(varname2).s(".IsZero())")
+ break
+ }
+ if ti2.flagIsZeroerPtr || ti2.flagIsZeroer {
+ buf.s("!(").s(varname2).s(".IsZero())")
+ break
+ }
+ if t2.Type.Implements(isCodecEmptyerTyp) {
+ buf.s("!(").s(varname2).s(".IsCodecEmpty())")
+ break
+ }
+ _, ok := x.tz[rtid2]
+ if ok {
+ buf.s("!(").s(varname2).s(".IsCodecEmpty())")
+ break
+ }
+ // if we *should* create a IsCodecEmpty for it, but haven't yet, add it here
+ // _, ok = x.ty[rtid2]
+ if genImportPath(t2.Type) == x.bp {
+ x.ty[t2.Type] = struct{}{}
+ buf.s("!(").s(varname2).s(".IsCodecEmpty())")
+ break
+ }
+ if ti2.flagComparable {
+ buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type))
+ break
+ }
+ // fmt.Printf("???? !!!! We shouldn't get to this point !!!! ???? - for type: %v\n", t2.Type)
+ // buf.s("(")
+ buf.s(x.sayFalse()) // buf.s("false")
+ for i, n := 0, t2.Type.NumField(); i < n; i++ {
+ f := t2.Type.Field(i)
+ if f.PkgPath != "" { // unexported
+ continue
+ }
+ buf.s(" || ")
+ x.encOmitEmptyLine(f, varname2, buf)
+ }
+ //buf.s(")")
+ case reflect.Bool:
+ buf.s("bool(").s(varname2).s(")")
+ case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan:
+ buf.s("len(").s(varname2).s(") != 0")
+ default:
+ buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type))
+ }
+}
+
+func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
+ // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. )
+ // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it
+
+ // if t === type currently running selfer on, do for all
+ ti := x.ti.get(rtid, t)
+ i := x.varsfx()
+ // sepVarname := genTempVarPfx + "sep" + i
+ numfieldsvar := genTempVarPfx + "q" + i
+ ti2arrayvar := genTempVarPfx + "r" + i
+ struct2arrvar := genTempVarPfx + "2arr" + i
+
+ tisfi := ti.sfi.source() // always use sequence from file. decStruct expects same thing.
+
+ type genFQN struct {
+ i string
+ fqname string
+ nilLine genBuf
+ nilVar string
+ canNil bool
+ sf reflect.StructField
+ }
+
+ genFQNs := make([]genFQN, len(tisfi))
+ for j, si := range tisfi {
+ q := &genFQNs[j]
+ q.i = x.varsfx()
+ q.nilVar = genTempVarPfx + "n" + q.i
+ q.canNil = false
+ q.fqname = varname
+ {
+ t2typ := t
+ fullpath := si.path.fullpath()
+ for _, path := range fullpath {
+ for t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ q.sf = t2typ.Field(int(path.index))
+ t2typ = q.sf.Type
+ q.fqname += "." + q.sf.Name
+ if t2typ.Kind() == reflect.Ptr {
+ if !q.canNil {
+ q.nilLine.f("%s == nil", q.fqname)
+ q.canNil = true
+ } else {
+ q.nilLine.f(" || %s == nil", q.fqname)
+ }
+ }
+ }
+ }
+ }
+
+ // x.line(sepVarname + " := !z.EncBinary()")
+ x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar)
+ // x.linef("_, _ = %s, %s", sepVarname, struct2arrvar)
+ x.linef("_ = %s", struct2arrvar)
+ x.linef("const %s bool = %v // struct tag has 'toArray'", ti2arrayvar, ti.toArray)
+
+ for j := range genFQNs {
+ q := &genFQNs[j]
+ if q.canNil {
+ x.linef("var %s bool = %s", q.nilVar, q.nilLine.v())
+ }
+ }
+
+ // var nn int
+ // due to omitEmpty, we need to calculate the
+ // number of non-empty things we write out first.
+ // This is required as we need to pre-determine the size of the container,
+ // to support length-prefixing.
+ omitEmptySometimes := x.omitEmptyWhen == nil
+ omitEmptyAlways := (x.omitEmptyWhen != nil && *(x.omitEmptyWhen))
+ // omitEmptyNever := (x.omitEmptyWhen != nil && !*(x.omitEmptyWhen))
+
+ toArraySometimes := x.toArrayWhen == nil
+ toArrayAlways := (x.toArrayWhen != nil && *(x.toArrayWhen))
+ toArrayNever := (x.toArrayWhen != nil && !(*(x.toArrayWhen)))
+
+ if (omitEmptySometimes && ti.anyOmitEmpty) || omitEmptyAlways {
+ x.linef("var %s = [%v]bool{ // should field at this index be written?", numfieldsvar, len(tisfi))
+
+ for _, si := range tisfi {
+ if omitEmptySometimes && !si.path.omitEmpty {
+ x.linef("true, // %s", si.encName) // si.fieldName)
+ continue
+ }
+ var omitline genBuf
+ t2 := genOmitEmptyLinePreChecks(varname, t, si, &omitline, false)
+ x.doEncOmitEmptyLine(t2, varname, &omitline)
+ x.linef("%s, // %s", omitline.v(), si.encName) // si.fieldName)
+ }
+ x.line("}")
+ x.linef("_ = %s", numfieldsvar)
+ }
+
+ if toArraySometimes {
+ x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray
+ }
+ if toArraySometimes || toArrayAlways {
+ x.linef("z.EncWriteArrayStart(%d)", len(tisfi))
+
+ for j, si := range tisfi {
+ doOmitEmptyCheck := (omitEmptySometimes && si.path.omitEmpty) || omitEmptyAlways
+ q := &genFQNs[j]
+ // if the type of the field is a Selfer, or one of the ones
+ if q.canNil {
+ x.linef("if %s { z.EncWriteArrayElem(); r.EncodeNil() } else { ", q.nilVar)
+ }
+ x.linef("z.EncWriteArrayElem()")
+ if doOmitEmptyCheck {
+ x.linef("if %s[%v] {", numfieldsvar, j)
+ }
+ x.encVarChkNil(q.fqname, q.sf.Type, false)
+ if doOmitEmptyCheck {
+ x.linef("} else {")
+ x.encZero(q.sf.Type)
+ x.linef("}")
+ }
+ if q.canNil {
+ x.line("}")
+ }
+ }
+
+ x.line("z.EncWriteArrayEnd()")
+ }
+ if toArraySometimes {
+ x.linef("} else {") // if not ti.toArray
+ }
+ if toArraySometimes || toArrayNever {
+ if (omitEmptySometimes && ti.anyOmitEmpty) || omitEmptyAlways {
+ x.linef("var %snn%s int", genTempVarPfx, i)
+ x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i)
+ x.linef("z.EncWriteMapStart(%snn%s)", genTempVarPfx, i)
+ x.linef("%snn%s = %v", genTempVarPfx, i, 0)
+ } else {
+ x.linef("z.EncWriteMapStart(%d)", len(tisfi))
+ }
+
+ fn := func(tisfi []*structFieldInfo) {
+ for j, si := range tisfi {
+ q := &genFQNs[j]
+ doOmitEmptyCheck := (omitEmptySometimes && si.path.omitEmpty) || omitEmptyAlways
+ if doOmitEmptyCheck {
+ x.linef("if %s[%v] {", numfieldsvar, j)
+ }
+ x.linef("z.EncWriteMapElemKey()")
+
+ // emulate EncStructFieldKey
+ switch ti.keyType {
+ case valueTypeInt:
+ x.linef("r.EncodeInt(z.M.Int(strconv.ParseInt(`%s`, 10, 64)))", si.encName)
+ case valueTypeUint:
+ x.linef("r.EncodeUint(z.M.Uint(strconv.ParseUint(`%s`, 10, 64)))", si.encName)
+ case valueTypeFloat:
+ x.linef("r.EncodeFloat64(z.M.Float(strconv.ParseFloat(`%s`, 64)))", si.encName)
+ default: // string
+ if x.jsonOnlyWhen == nil {
+ if si.path.encNameAsciiAlphaNum {
+ x.linef(`if z.IsJSONHandle() { z.WriteStr("\"%s\"") } else { `, si.encName)
+ }
+ x.linef("r.EncodeString(`%s`)", si.encName)
+ if si.path.encNameAsciiAlphaNum {
+ x.linef("}")
+ }
+ } else if *(x.jsonOnlyWhen) {
+ if si.path.encNameAsciiAlphaNum {
+ x.linef(`z.WriteStr("\"%s\"")`, si.encName)
+ } else {
+ x.linef("r.EncodeString(`%s`)", si.encName)
+ }
+ } else {
+ x.linef("r.EncodeString(`%s`)", si.encName)
+ }
+ }
+ x.line("z.EncWriteMapElemValue()")
+ if q.canNil {
+ x.line("if " + q.nilVar + " { r.EncodeNil() } else { ")
+ x.encVarChkNil(q.fqname, q.sf.Type, false)
+ x.line("}")
+ } else {
+ x.encVarChkNil(q.fqname, q.sf.Type, false)
+ }
+ if doOmitEmptyCheck {
+ x.line("}")
+ }
+ }
+ }
+
+ if genStructCanonical {
+ x.linef("if z.EncBasicHandle().Canonical {") // if Canonical block
+ fn(ti.sfi.sorted())
+ x.linef("} else {") // else !cononical block
+ fn(ti.sfi.source())
+ x.linef("}") // end if Canonical block
+ } else {
+ fn(tisfi)
+ }
+
+ x.line("z.EncWriteMapEnd()")
+ }
+ if toArraySometimes {
+ x.linef("} ") // end if/else ti.toArray
+ }
+}
+
+func (x *genRunner) encListFallback(varname string, t reflect.Type) {
+ x.linef("if %s == nil { r.EncodeNil(); return }", varname)
+ elemBytes := t.Elem().Kind() == reflect.Uint8
+ if t.AssignableTo(uint8SliceTyp) {
+ x.linef("r.EncodeStringBytesRaw([]byte(%s))", varname)
+ return
+ }
+ if t.Kind() == reflect.Array && elemBytes {
+ x.linef("r.EncodeStringBytesRaw(((*[%d]byte)(%s))[:])", t.Len(), varname)
+ return
+ }
+ i := x.varsfx()
+ if t.Kind() == reflect.Chan {
+ type ts struct {
+ Label, Chan, Slice, Sfx string
+ }
+ tm, err := template.New("").Parse(genEncChanTmpl)
+ genCheckErr(err)
+ x.linef("if %s == nil { r.EncodeNil() } else { ", varname)
+ x.linef("var sch%s []%s", i, x.genTypeName(t.Elem()))
+ err = tm.Execute(x.w, &ts{"Lsch" + i, varname, "sch" + i, i})
+ genCheckErr(err)
+ if elemBytes {
+ x.linef("r.EncodeStringBytesRaw([]byte(%s))", "sch"+i)
+ x.line("}")
+ return
+ }
+ varname = "sch" + i
+ }
+
+ x.line("z.EncWriteArrayStart(len(" + varname + "))")
+
+ // x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname)
+ // x.linef("z.EncWriteArrayElem()")
+ // x.encVar(genTempVarPfx+"v"+i, t.Elem())
+ // x.line("}")
+
+ x.linef("for %sv%s := range %s {", genTempVarPfx, i, varname)
+ x.linef("z.EncWriteArrayElem()")
+ x.encVar(fmt.Sprintf("%s[%sv%s]", varname, genTempVarPfx, i), t.Elem())
+ x.line("}")
+
+ x.line("z.EncWriteArrayEnd()")
+ if t.Kind() == reflect.Chan {
+ x.line("}")
+ }
+}
+
+func (x *genRunner) encMapFallback(varname string, t reflect.Type) {
+ x.linef("if %s == nil { r.EncodeNil()", varname)
+ x.linef("} else if z.EncBasicHandle().Canonical { z.EncEncodeMapNonNil(%s)", varname)
+ x.line("} else {")
+ i := x.varsfx()
+ x.linef("z.EncWriteMapStart(len(%s))", varname)
+ x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
+ x.linef("z.EncWriteMapElemKey()")
+ x.encVar(genTempVarPfx+"k"+i, t.Key())
+ x.line("z.EncWriteMapElemValue()")
+ x.encVar(genTempVarPfx+"v"+i, t.Elem())
+ x.line("}")
+ x.line("z.EncWriteMapEnd()")
+ x.line("}")
+}
+
+func (x *genRunner) decVarInitPtr(varname, nilvar string, t reflect.Type, si *structFieldInfo,
+ newbuf, nilbuf *genBuf) (varname3 string, t2 reflect.StructField) {
+ //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value.
+ // t2 = t.FieldByIndex(si.is)
+ varname3 = varname
+ t2typ := t
+ t2kind := t2typ.Kind()
+ var nilbufed bool
+ if si != nil {
+ fullpath := si.path.fullpath()
+ for _, path := range fullpath {
+ // only one-level pointers can be seen in a type
+ if t2typ.Kind() == reflect.Ptr {
+ t2typ = t2typ.Elem()
+ }
+ t2 = t2typ.Field(int(path.index))
+ t2typ = t2.Type
+ varname3 = varname3 + "." + t2.Name
+ t2kind = t2typ.Kind()
+ if t2kind != reflect.Ptr {
+ continue
+ }
+ if newbuf != nil {
+ if len(newbuf.buf) > 0 {
+ newbuf.s("\n")
+ }
+ newbuf.f("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem()))
+ }
+ if nilbuf != nil {
+ if !nilbufed {
+ nilbuf.s("if ").s(varname3).s(" != nil")
+ nilbufed = true
+ } else {
+ nilbuf.s(" && ").s(varname3).s(" != nil")
+ }
+ }
+ }
+ }
+ if nilbuf != nil {
+ if nilbufed {
+ nilbuf.s(" { ").s("// remove the if-true\n")
+ }
+ if nilvar != "" {
+ nilbuf.s(nilvar).s(" = true")
+ } else if tk := t2typ.Kind(); tk == reflect.Ptr {
+ if strings.IndexByte(varname3, '.') != -1 || strings.IndexByte(varname3, '[') != -1 {
+ nilbuf.s(varname3).s(" = nil")
+ } else {
+ nilbuf.s("*").s(varname3).s(" = ").s(x.genZeroValueR(t2typ.Elem()))
+ }
+ } else {
+ nilbuf.s(varname3).s(" = ").s(x.genZeroValueR(t2typ))
+ }
+ if nilbufed {
+ nilbuf.s("}")
+ }
+ }
+ return
+}
+
+// decVar takes a variable called varname, of type t
+func (x *genRunner) decVarMain(varname, rand string, t reflect.Type, checkNotNil bool) {
+ // We only encode as nil if a nillable value.
+ // This removes some of the wasted checks for TryDecodeAsNil.
+ // We need to think about this more, to see what happens if omitempty, etc
+ // cause a nil value to be stored when something is expected.
+ // This could happen when decoding from a struct encoded as an array.
+ // For that, decVar should be called with canNil=true, to force true as its value.
+ var varname2 string
+ if t.Kind() != reflect.Ptr {
+ if t.PkgPath() != "" || !x.decTryAssignPrimitive(varname, t, false) {
+ x.dec(varname, t, false)
+ }
+ } else {
+ if checkNotNil {
+ x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem()))
+ }
+ // Ensure we set underlying ptr to a non-nil value (so we can deref to it later).
+ // There's a chance of a **T in here which is nil.
+ var ptrPfx string
+ for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() {
+ ptrPfx += "*"
+ if checkNotNil {
+ x.linef("if %s%s == nil { %s%s = new(%s)}", ptrPfx, varname, ptrPfx, varname, x.genTypeName(t))
+ }
+ }
+ // Should we create temp var if a slice/map indexing? No. dec(...) can now handle it.
+
+ if ptrPfx == "" {
+ x.dec(varname, t, true)
+ } else {
+ varname2 = genTempVarPfx + "z" + rand
+ x.line(varname2 + " := " + ptrPfx + varname)
+ x.dec(varname2, t, true)
+ }
+ }
+}
+
+// decVar takes a variable called varname, of type t
+func (x *genRunner) decVar(varname, nilvar string, t reflect.Type, canBeNil, checkNotNil bool) {
+
+ // We only encode as nil if a nillable value.
+ // This removes some of the wasted checks for TryDecodeAsNil.
+ // We need to think about this more, to see what happens if omitempty, etc
+ // cause a nil value to be stored when something is expected.
+ // This could happen when decoding from a struct encoded as an array.
+ // For that, decVar should be called with canNil=true, to force true as its value.
+
+ i := x.varsfx()
+ if t.Kind() == reflect.Ptr {
+ var buf genBuf
+ x.decVarInitPtr(varname, nilvar, t, nil, nil, &buf)
+ x.linef("if r.TryNil() { %s } else {", buf.buf)
+ x.decVarMain(varname, i, t, checkNotNil)
+ x.line("} ")
+ } else {
+ x.decVarMain(varname, i, t, checkNotNil)
+ }
+}
+
+// dec will decode a variable (varname) of type t or ptrTo(t) if isptr==true.
+func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) {
+ // assumptions:
+ // - the varname is to a pointer already. No need to take address of it
+ // - t is always a baseType T (not a *T, etc).
+ rtid := rt2id(t)
+ ti2 := x.ti.get(rtid, t)
+
+ // check if
+ // - type is time.Time, Raw, RawExt
+ // - the type implements (Text|JSON|Binary)(Unm|M)arshal
+
+ mi := x.varsfx()
+
+ var hasIf genIfClause
+ defer hasIf.end(x)
+
+ var ptrPfx, addrPfx string
+ if isptr {
+ ptrPfx = "*"
+ } else {
+ addrPfx = "&"
+ }
+ if t == timeTyp {
+ x.linef("%s z.DecBasicHandle().TimeBuiltin() { %s%v = r.DecodeTime()", hasIf.c(false), ptrPfx, varname)
+ // return
+ }
+ if t == rawTyp {
+ x.linef("%s %s%v = z.DecRaw()", hasIf.c(true), ptrPfx, varname)
+ return
+ }
+
+ if t == rawExtTyp {
+ x.linef("%s r.DecodeExt(%s%v, 0, nil)", hasIf.c(true), addrPfx, varname)
+ return
+ }
+
+ // only check for extensions if extensions are configured,
+ // and the type is named, and has a packagePath,
+ // and this is not the CodecEncodeSelf or CodecDecodeSelf method (i.e. it is not a Selfer)
+ // xdebugf("genRunner.dec: varname: %v, t: %v, genImportPath: %v, t.Name: %v", varname, t, genImportPath(t), t.Name())
+ if !x.nx && varname != genTopLevelVarName && t != genStringDecAsBytesTyp &&
+ t != genStringDecZCTyp && genImportPath(t) != "" && t.Name() != "" {
+ // first check if extensions are configued, before doing the interface conversion
+ yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi)
+ x.linef("%s %s := z.Extension(%s); %s != nil { z.DecExtension(%s%s, %s) ", hasIf.c(false), yy, varname, yy, addrPfx, varname, yy)
+ }
+
+ if x.checkForSelfer(t, varname) {
+ if ti2.flagSelfer {
+ x.linef("%s %s.CodecDecodeSelf(d)", hasIf.c(true), varname)
+ return
+ }
+ if ti2.flagSelferPtr {
+ x.linef("%s %s.CodecDecodeSelf(d)", hasIf.c(true), varname)
+ return
+ }
+ if _, ok := x.td[rtid]; ok {
+ x.linef("%s %s.CodecDecodeSelf(d)", hasIf.c(true), varname)
+ return
+ }
+ }
+
+ inlist := false
+ for _, t0 := range x.t {
+ if t == t0 {
+ inlist = true
+ if x.checkForSelfer(t, varname) {
+ x.linef("%s %s.CodecDecodeSelf(d)", hasIf.c(true), varname)
+ return
+ }
+ break
+ }
+ }
+
+ var rtidAdded bool
+ if t == x.tc {
+ x.td[rtid] = true
+ rtidAdded = true
+ }
+
+ if ti2.flagBinaryUnmarshaler {
+ x.linef("%s z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", hasIf.c(false), ptrPfx, varname)
+ } else if ti2.flagBinaryUnmarshalerPtr {
+ x.linef("%s z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", hasIf.c(false), addrPfx, varname)
+ }
+ if ti2.flagJsonUnmarshaler {
+ x.linef("%s !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", hasIf.c(false), ptrPfx, varname)
+ } else if ti2.flagJsonUnmarshalerPtr {
+ x.linef("%s !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", hasIf.c(false), addrPfx, varname)
+ } else if ti2.flagTextUnmarshaler {
+ x.linef("%s !z.DecBinary() { z.DecTextUnmarshal(%s%v)", hasIf.c(false), ptrPfx, varname)
+ } else if ti2.flagTextUnmarshalerPtr {
+ x.linef("%s !z.DecBinary() { z.DecTextUnmarshal(%s%v)", hasIf.c(false), addrPfx, varname)
+ }
+
+ x.lineIf(hasIf.c(true))
+
+ if x.decTryAssignPrimitive(varname, t, isptr) {
+ return
+ }
+
+ switch t.Kind() {
+ case reflect.Chan:
+ x.xtraSM(varname, t, ti2, false, isptr)
+ case reflect.Array:
+ _, rtidu := genFastpathUnderlying(t, rtid, ti2)
+ if fastpathAvIndex(rtidu) != -1 {
+ g := x.newFastpathGenV(ti2.key)
+ x.linef("z.F.%sN((%s)(%s[:]), d)", g.MethodNamePfx("Dec", false), x.genTypeName(ti2.key), varname)
+ } else {
+ x.xtraSM(varname, t, ti2, false, isptr)
+ }
+ case reflect.Slice:
+ // if a []byte, call dedicated function
+ // if a known fastpath slice, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+
+ if rtid == uint8SliceTypId {
+ x.linef("%s%s = z.DecodeBytesInto(%s(%s[]byte)(%s))", ptrPfx, varname, ptrPfx, ptrPfx, varname)
+ } else {
+ tu, rtidu := genFastpathUnderlying(t, rtid, ti2)
+ if fastpathAvIndex(rtidu) != -1 {
+ g := x.newFastpathGenV(tu)
+ if rtid == rtidu {
+ x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname)
+ } else {
+ x.linef("z.F.%sX((*%s)(%s%s), d)", g.MethodNamePfx("Dec", false), x.genTypeName(tu), addrPfx, varname)
+ }
+ } else {
+ x.xtraSM(varname, t, ti2, false, isptr)
+ // x.decListFallback(varname, rtid, false, t)
+ }
+ }
+ case reflect.Map:
+ // if a known fastpath map, call dedicated function
+ // else write encode function in-line.
+ // - if elements are primitives or Selfers, call dedicated function on each member.
+ // - else call Encoder.encode(XXX) on it.
+
+ tu, rtidu := genFastpathUnderlying(t, rtid, ti2)
+ if fastpathAvIndex(rtidu) != -1 {
+ g := x.newFastpathGenV(tu)
+ if rtid == rtidu {
+ x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname)
+ } else {
+ x.linef("z.F.%sX((*%s)(%s%s), d)", g.MethodNamePfx("Dec", false), x.genTypeName(tu), addrPfx, varname)
+ }
+ } else {
+ x.xtraSM(varname, t, ti2, false, isptr)
+ }
+ case reflect.Struct:
+ if inlist {
+ // no need to create temp variable if isptr, or x.F or x[F]
+ if isptr || strings.IndexByte(varname, '.') != -1 || strings.IndexByte(varname, '[') != -1 {
+ x.decStruct(varname, rtid, t)
+ } else {
+ varname2 := genTempVarPfx + "j" + mi
+ x.line(varname2 + " := &" + varname)
+ x.decStruct(varname2, rtid, t)
+ }
+ } else {
+ // delete(x.td, rtid)
+ x.line("z.DecFallback(" + addrPfx + varname + ", false)")
+ }
+ default:
+ if rtidAdded {
+ delete(x.te, rtid)
+ }
+ x.line("z.DecFallback(" + addrPfx + varname + ", true)")
+ }
+}
+
+func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type, isptr bool) (done bool) {
+ // This should only be used for exact primitives (ie un-named types).
+ // Named types may be implementations of Selfer, Unmarshaler, etc.
+ // They should be handled by dec(...)
+
+ var ptr string
+ if isptr {
+ ptr = "*"
+ }
+ switch t.Kind() {
+ case reflect.Int:
+ x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs)
+ case reflect.Int8:
+ x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 8))", ptr, varname, x.genTypeName(t))
+ case reflect.Int16:
+ x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 16))", ptr, varname, x.genTypeName(t))
+ case reflect.Int32:
+ x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 32))", ptr, varname, x.genTypeName(t))
+ case reflect.Int64:
+ x.linef("%s%s = (%s)(r.DecodeInt64())", ptr, varname, x.genTypeName(t))
+
+ case reflect.Uint:
+ x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs)
+ case reflect.Uint8:
+ x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 8))", ptr, varname, x.genTypeName(t))
+ case reflect.Uint16:
+ x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 16))", ptr, varname, x.genTypeName(t))
+ case reflect.Uint32:
+ x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 32))", ptr, varname, x.genTypeName(t))
+ case reflect.Uint64:
+ x.linef("%s%s = (%s)(r.DecodeUint64())", ptr, varname, x.genTypeName(t))
+ case reflect.Uintptr:
+ x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs)
+
+ case reflect.Float32:
+ x.linef("%s%s = (%s)(z.DecDecodeFloat32())", ptr, varname, x.genTypeName(t))
+ case reflect.Float64:
+ x.linef("%s%s = (%s)(r.DecodeFloat64())", ptr, varname, x.genTypeName(t))
+
+ case reflect.Complex64:
+ x.linef("%s%s = (%s)(complex(z.DecDecodeFloat32(), 0))", ptr, varname, x.genTypeName(t))
+ case reflect.Complex128:
+ x.linef("%s%s = (%s)(complex(r.DecodeFloat64(), 0))", ptr, varname, x.genTypeName(t))
+
+ case reflect.Bool:
+ x.linef("%s%s = (%s)(r.DecodeBool())", ptr, varname, x.genTypeName(t))
+ case reflect.String:
+ if t == genStringDecAsBytesTyp {
+ x.linef("%s%s = r.DecodeStringAsBytes()", ptr, varname)
+ } else if t == genStringDecZCTyp {
+ x.linef("%s%s = (string)(z.DecStringZC(r.DecodeStringAsBytes()))", ptr, varname)
+ } else {
+ x.linef("%s%s = (%s)(z.DecStringZC(r.DecodeStringAsBytes()))", ptr, varname, x.genTypeName(t))
+ }
+ default:
+ return false
+ }
+ return true
+}
+
+func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) {
+ if t.AssignableTo(uint8SliceTyp) {
+ x.line("*" + varname + " = z.DecodeBytesInto(*((*[]byte)(" + varname + ")))")
+ return
+ }
+ if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
+ x.linef("r.DecodeBytes( ((*[%d]byte)(%s))[:])", t.Len(), varname)
+ return
+ }
+ type tstruc struct {
+ TempVar string
+ Sfx string
+ Rand string
+ Varname string
+ CTyp string
+ Typ string
+ Immutable bool
+ Size int
+ }
+ telem := t.Elem()
+ ts := tstruc{genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())}
+
+ funcs := make(template.FuncMap)
+
+ funcs["decLineVar"] = func(varname string) string {
+ x.decVar(varname, "", telem, false, true)
+ return ""
+ }
+ funcs["var"] = func(s string) string {
+ return ts.TempVar + s + ts.Rand
+ }
+ funcs["xs"] = func() string {
+ return ts.Sfx
+ }
+ funcs["zero"] = func() string {
+ return x.genZeroValueR(telem)
+ }
+ funcs["isArray"] = func() bool {
+ return t.Kind() == reflect.Array
+ }
+ funcs["isSlice"] = func() bool {
+ return t.Kind() == reflect.Slice
+ }
+ funcs["isChan"] = func() bool {
+ return t.Kind() == reflect.Chan
+ }
+ tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl)
+ genCheckErr(err)
+ genCheckErr(tm.Execute(x.w, &ts))
+}
+
+func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) {
+ type tstruc struct {
+ TempVar string
+ Sfx string
+ Rand string
+ Varname string
+ KTyp string
+ Typ string
+ Size int
+ }
+ telem := t.Elem()
+ tkey := t.Key()
+ ts := tstruc{
+ genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey),
+ x.genTypeName(telem), int(telem.Size() + tkey.Size()),
+ }
+
+ funcs := make(template.FuncMap)
+ funcs["decElemZero"] = func() string {
+ return x.genZeroValueR(telem)
+ }
+ funcs["decElemKindImmutable"] = func() bool {
+ return genIsImmutable(telem)
+ }
+ funcs["decElemKindPtr"] = func() bool {
+ return telem.Kind() == reflect.Ptr
+ }
+ funcs["decElemKindIntf"] = func() bool {
+ return telem.Kind() == reflect.Interface
+ }
+ funcs["decLineVarKStrBytes"] = func(varname string) string {
+ x.decVar(varname, "", genStringDecAsBytesTyp, false, true)
+ return ""
+ }
+ funcs["decLineVarKStrZC"] = func(varname string) string {
+ x.decVar(varname, "", genStringDecZCTyp, false, true)
+ return ""
+ }
+ funcs["decLineVarK"] = func(varname string) string {
+ x.decVar(varname, "", tkey, false, true)
+ return ""
+ }
+ funcs["decLineVar"] = func(varname, decodedNilVarname string) string {
+ x.decVar(varname, decodedNilVarname, telem, false, true)
+ return ""
+ }
+ funcs["var"] = func(s string) string {
+ return ts.TempVar + s + ts.Rand
+ }
+ funcs["xs"] = func() string {
+ return ts.Sfx
+ }
+
+ tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl)
+ genCheckErr(err)
+ genCheckErr(tm.Execute(x.w, &ts))
+}
+
+func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) {
+ ti := x.ti.get(rtid, t)
+ tisfi := ti.sfi.source() // always use sequence from file. decStruct expects same thing.
+ x.line("switch string(" + kName + ") {")
+ var newbuf, nilbuf genBuf
+ for _, si := range tisfi {
+ x.line("case \"" + si.encName + "\":")
+ newbuf.reset()
+ nilbuf.reset()
+ varname3, t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf)
+ if len(newbuf.buf) > 0 {
+ x.linef("if r.TryNil() { %s } else { %s", nilbuf.buf, newbuf.buf)
+ }
+ x.decVarMain(varname3, x.varsfx(), t2.Type, false)
+ if len(newbuf.buf) > 0 {
+ x.line("}")
+ }
+ }
+ x.line("default:")
+ // pass the slice here, so that the string will not escape, and maybe save allocation
+ x.linef("z.DecStructFieldNotFound(-1, string(%s))", kName)
+ x.linef("} // end switch %s", kName)
+}
+
+func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) {
+ tpfx := genTempVarPfx
+ ti := x.ti.get(rtid, t)
+ i := x.varsfx()
+ kName := tpfx + "s" + i
+
+ switch style {
+ case genStructMapStyleLenPrefix:
+ x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i)
+ case genStructMapStyleCheckBreak:
+ x.linef("for %sj%s := 0; !z.DecCheckBreak(); %sj%s++ {", tpfx, i, tpfx, i)
+ default: // 0, otherwise.
+ x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
+ x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i)
+ x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname)
+ x.line("} else { if z.DecCheckBreak() { break }; }")
+ }
+ x.line("z.DecReadMapElemKey()")
+
+ // emulate decstructfieldkey
+ switch ti.keyType {
+ case valueTypeInt:
+ x.linef("%s := strconv.AppendInt(z.DecScratchArrayBuffer()[:0], r.DecodeInt64(), 10)", kName)
+ case valueTypeUint:
+ x.linef("%s := strconv.AppendUint(z.DecScratchArrayBuffer()[:0], r.DecodeUint64(), 10)", kName)
+ case valueTypeFloat:
+ x.linef("%s := strconv.AppendFloat(z.DecScratchArrayBuffer()[:0], r.DecodeFloat64(), 'f', -1, 64)", kName)
+ default: // string
+ x.linef("%s := r.DecodeStringAsBytes()", kName)
+ }
+
+ x.line("z.DecReadMapElemValue()")
+ x.decStructMapSwitch(kName, varname, rtid, t)
+
+ x.line("} // end for " + tpfx + "j" + i)
+}
+
+func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) {
+ tpfx := genTempVarPfx
+ i := x.varsfx()
+ ti := x.ti.get(rtid, t)
+ tisfi := ti.sfi.source() // always use sequence from file. decStruct expects same thing.
+ x.linef("var %sj%s int", tpfx, i)
+ x.linef("var %sb%s bool", tpfx, i) // break
+ x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
+ if !genDecStructArrayInlineLoopCheck {
+ x.linef("var %sfn%s = func() bool { ", tpfx, i)
+ x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = z.DecCheckBreak() };",
+ tpfx, i, tpfx, i, tpfx, i,
+ tpfx, i, lenvarname, tpfx, i)
+ x.linef("if %sb%s { z.DecReadArrayEnd(); return true }; return false", tpfx, i)
+ x.linef("} // end func %sfn%s", tpfx, i)
+ }
+ var newbuf, nilbuf genBuf
+ for _, si := range tisfi {
+ if genDecStructArrayInlineLoopCheck {
+ x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = z.DecCheckBreak() }",
+ tpfx, i, tpfx, i, tpfx, i,
+ tpfx, i, lenvarname, tpfx, i)
+ x.linef("if %sb%s { z.DecReadArrayEnd(); %s }", tpfx, i, breakString)
+ } else {
+ x.linef("if %sfn%s() { %s }", tpfx, i, breakString)
+ }
+ x.line("z.DecReadArrayElem()")
+ newbuf.reset()
+ nilbuf.reset()
+ varname3, t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf)
+ if len(newbuf.buf) > 0 {
+ x.linef("if r.TryNil() { %s } else { %s", nilbuf.buf, newbuf.buf)
+ }
+ x.decVarMain(varname3, x.varsfx(), t2.Type, false)
+ if len(newbuf.buf) > 0 {
+ x.line("}")
+ }
+ }
+ // read remaining values and throw away.
+ x.line("for {")
+ x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = z.DecCheckBreak() }",
+ tpfx, i, tpfx, i, tpfx, i,
+ tpfx, i, lenvarname, tpfx, i)
+ x.linef("if %sb%s { break }", tpfx, i)
+ x.line("z.DecReadArrayElem()")
+ x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i)
+ x.line("}")
+}
+
+func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) {
+ // varname MUST be a ptr, or a struct field or a slice element.
+ i := x.varsfx()
+ x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i)
+ x.linef("if %sct%s == codecSelferValueTypeNil%s {", genTempVarPfx, i, x.xs)
+ x.linef("*(%s) = %s{}", varname, x.genTypeName(t))
+ x.linef("} else if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs)
+ x.line(genTempVarPfx + "l" + i + " := z.DecReadMapStart()")
+ x.linef("if %sl%s == 0 {", genTempVarPfx, i)
+ if genUseOneFunctionForDecStructMap {
+ x.line("} else { ")
+ x.linef("%s.codecDecodeSelfFromMap(%sl%s, d)", varname, genTempVarPfx, i)
+ } else {
+ x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ")
+ x.line(varname + ".codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)")
+ x.line("} else {")
+ x.line(varname + ".codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)")
+ }
+ x.line("}")
+ x.line("z.DecReadMapEnd()")
+
+ // else if container is array
+ x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs)
+ x.line(genTempVarPfx + "l" + i + " := z.DecReadArrayStart()")
+ x.linef("if %sl%s != 0 {", genTempVarPfx, i)
+ x.linef("%s.codecDecodeSelfFromArray(%sl%s, d)", varname, genTempVarPfx, i)
+ x.line("}")
+ x.line("z.DecReadArrayEnd()")
+ // else panic
+ x.line("} else { ")
+ x.line("panic(errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + ")")
+ x.line("} ")
+}
+
+// --------
+
+type fastpathGenV struct {
+ // fastpathGenV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice
+ MapKey string
+ Elem string
+ Primitive string
+ Size int
+ NoCanonical bool
+}
+
+func (x *genRunner) newFastpathGenV(t reflect.Type) (v fastpathGenV) {
+ v.NoCanonical = !genFastpathCanonical
+ switch t.Kind() {
+ case reflect.Slice, reflect.Array:
+ te := t.Elem()
+ v.Elem = x.genTypeName(te)
+ v.Size = int(te.Size())
+ case reflect.Map:
+ te := t.Elem()
+ tk := t.Key()
+ v.Elem = x.genTypeName(te)
+ v.MapKey = x.genTypeName(tk)
+ v.Size = int(te.Size() + tk.Size())
+ default:
+ halt.onerror(errGenUnexpectedTypeFastpath)
+ }
+ return
+}
+
+func (x *fastpathGenV) MethodNamePfx(prefix string, prim bool) string {
+ var name []byte
+ if prefix != "" {
+ name = append(name, prefix...)
+ }
+ if prim {
+ name = append(name, genTitleCaseName(x.Primitive)...)
+ } else {
+ if x.MapKey == "" {
+ name = append(name, "Slice"...)
+ } else {
+ name = append(name, "Map"...)
+ name = append(name, genTitleCaseName(x.MapKey)...)
+ }
+ name = append(name, genTitleCaseName(x.Elem)...)
+ }
+ return string(name)
+}
+
+// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise.
+//
+// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled,
+// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped.
+// We strip it here.
+func genImportPath(t reflect.Type) (s string) {
+ s = t.PkgPath()
+ if genCheckVendor {
+ // HACK: always handle vendoring. It should be typically on in go 1.6, 1.7
+ s = genStripVendor(s)
+ }
+ return
+}
+
+// A go identifier is (letter|_)[letter|number|_]*
+func genGoIdentifier(s string, checkFirstChar bool) string {
+ b := make([]byte, 0, len(s))
+ t := make([]byte, 4)
+ var n int
+ for i, r := range s {
+ if checkFirstChar && i == 0 && !unicode.IsLetter(r) {
+ b = append(b, '_')
+ }
+ // r must be unicode_letter, unicode_digit or _
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ n = utf8.EncodeRune(t, r)
+ b = append(b, t[:n]...)
+ } else {
+ b = append(b, '_')
+ }
+ }
+ return string(b)
+}
+
+func genNonPtr(t reflect.Type) reflect.Type {
+ for t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return t
+}
+
+func genFastpathUnderlying(t reflect.Type, rtid uintptr, ti *typeInfo) (tu reflect.Type, rtidu uintptr) {
+ tu = t
+ rtidu = rtid
+ if ti.flagHasPkgPath {
+ tu = ti.fastpathUnderlying
+ rtidu = rt2id(tu)
+ }
+ return
+}
+
+func genTitleCaseName(s string) string {
+ switch s {
+ case "interface{}", "interface {}":
+ return "Intf"
+ case "[]byte", "[]uint8", "bytes":
+ return "Bytes"
+ default:
+ return strings.ToUpper(s[0:1]) + s[1:]
+ }
+}
+
+func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) {
+ var ptrPfx string
+ for t.Kind() == reflect.Ptr {
+ ptrPfx += "Ptrto"
+ t = t.Elem()
+ }
+ tstr := t.String()
+ if tn := t.Name(); tn != "" {
+ if tRef != nil && genImportPath(t) == genImportPath(tRef) {
+ return ptrPfx + tn
+ } else {
+ if genQNameRegex.MatchString(tstr) {
+ return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ }
+ }
+ switch t.Kind() {
+ case reflect.Map:
+ return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef)
+ case reflect.Slice:
+ return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef)
+ case reflect.Array:
+ return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef)
+ case reflect.Chan:
+ var cx string
+ switch t.ChanDir() {
+ case reflect.SendDir:
+ cx = "ChanSend"
+ case reflect.RecvDir:
+ cx = "ChanRecv"
+ default:
+ cx = "Chan"
+ }
+ return ptrPfx + cx + genMethodNameT(t.Elem(), tRef)
+ default:
+ if t == intfTyp {
+ return ptrPfx + "Interface"
+ } else {
+ if tRef != nil && genImportPath(t) == genImportPath(tRef) {
+ if t.Name() != "" {
+ return ptrPfx + t.Name()
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ } else {
+ // best way to get the package name inclusive
+ // return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr))
+ if t.Name() != "" && genQNameRegex.MatchString(tstr) {
+ return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
+ } else {
+ return ptrPfx + genCustomTypeName(tstr)
+ }
+ }
+ }
+ }
+}
+
+// genCustomNameForType base64encodes the t.String() value in such a way
+// that it can be used within a function name.
+func genCustomTypeName(tstr string) string {
+ len2 := genBase64enc.EncodedLen(len(tstr))
+ bufx := make([]byte, len2)
+ genBase64enc.Encode(bufx, []byte(tstr))
+ for i := len2 - 1; i >= 0; i-- {
+ if bufx[i] == '=' {
+ len2--
+ } else {
+ break
+ }
+ }
+ return string(bufx[:len2])
+}
+
+func genIsImmutable(t reflect.Type) (v bool) {
+ return scalarBitset.isset(byte(t.Kind()))
+}
+
+type genInternal struct {
+ Version int
+ Values []fastpathGenV
+ Formats []string
+}
+
+func (x genInternal) FastpathLen() (l int) {
+ for _, v := range x.Values {
+ // if v.Primitive == "" && !(v.MapKey == "" && v.Elem == "uint8") {
+ if v.Primitive == "" {
+ l++
+ }
+ }
+ return
+}
+
+func genInternalZeroValue(s string) string {
+ switch s {
+ case "interface{}", "interface {}":
+ return "nil"
+ case "[]byte", "[]uint8", "bytes":
+ return "nil"
+ case "bool":
+ return "false"
+ case "string":
+ return `""`
+ default:
+ return "0"
+ }
+}
+
+var genInternalNonZeroValueIdx [6]uint64
+var genInternalNonZeroValueStrs = [...][6]string{
+ {`"string-is-an-interface-1"`, "true", `"some-string-1"`, `[]byte("some-string-1")`, "11.1", "111"},
+ {`"string-is-an-interface-2"`, "false", `"some-string-2"`, `[]byte("some-string-2")`, "22.2", "77"},
+ {`"string-is-an-interface-3"`, "true", `"some-string-3"`, `[]byte("some-string-3")`, "33.3e3", "127"},
+}
+
+// Note: last numbers must be in range: 0-127 (as they may be put into a int8, uint8, etc)
+
+func genInternalNonZeroValue(s string) string {
+ var i int
+ switch s {
+ case "interface{}", "interface {}":
+ i = 0
+ case "bool":
+ i = 1
+ case "string":
+ i = 2
+ case "bytes", "[]byte", "[]uint8":
+ i = 3
+ case "float32", "float64", "float", "double", "complex", "complex64", "complex128":
+ i = 4
+ default:
+ i = 5
+ }
+ genInternalNonZeroValueIdx[i]++
+ idx := genInternalNonZeroValueIdx[i]
+ slen := uint64(len(genInternalNonZeroValueStrs))
+ return genInternalNonZeroValueStrs[idx%slen][i] // return string, to remove ambiguity
+}
+
+// Note: used for fastpath only
+func genInternalEncCommandAsString(s string, vname string) string {
+ switch s {
+ case "uint64":
+ return "e.e.EncodeUint(" + vname + ")"
+ case "uint", "uint8", "uint16", "uint32":
+ return "e.e.EncodeUint(uint64(" + vname + "))"
+ case "int64":
+ return "e.e.EncodeInt(" + vname + ")"
+ case "int", "int8", "int16", "int32":
+ return "e.e.EncodeInt(int64(" + vname + "))"
+ case "[]byte", "[]uint8", "bytes":
+ return "e.e.EncodeStringBytesRaw(" + vname + ")"
+ case "string":
+ return "e.e.EncodeString(" + vname + ")"
+ case "float32":
+ return "e.e.EncodeFloat32(" + vname + ")"
+ case "float64":
+ return "e.e.EncodeFloat64(" + vname + ")"
+ case "bool":
+ return "e.e.EncodeBool(" + vname + ")"
+ // case "symbol":
+ // return "e.e.EncodeSymbol(" + vname + ")"
+ default:
+ return "e.encode(" + vname + ")"
+ }
+}
+
+// Note: used for fastpath only
+func genInternalDecCommandAsString(s string, mapkey bool) string {
+ switch s {
+ case "uint":
+ return "uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))"
+ case "uint8":
+ return "uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))"
+ case "uint16":
+ return "uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))"
+ case "uint32":
+ return "uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))"
+ case "uint64":
+ return "d.d.DecodeUint64()"
+ case "uintptr":
+ return "uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))"
+ case "int":
+ return "int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))"
+ case "int8":
+ return "int8(chkOvf.IntV(d.d.DecodeInt64(), 8))"
+ case "int16":
+ return "int16(chkOvf.IntV(d.d.DecodeInt64(), 16))"
+ case "int32":
+ return "int32(chkOvf.IntV(d.d.DecodeInt64(), 32))"
+ case "int64":
+ return "d.d.DecodeInt64()"
+
+ case "string":
+ // if mapkey {
+ // return "d.stringZC(d.d.DecodeStringAsBytes())"
+ // }
+ // return "string(d.d.DecodeStringAsBytes())"
+ return "d.stringZC(d.d.DecodeStringAsBytes())"
+ case "[]byte", "[]uint8", "bytes":
+ return "d.d.DecodeBytes([]byte{})"
+ case "float32":
+ return "float32(d.decodeFloat32())"
+ case "float64":
+ return "d.d.DecodeFloat64()"
+ case "complex64":
+ return "complex(d.decodeFloat32(), 0)"
+ case "complex128":
+ return "complex(d.d.DecodeFloat64(), 0)"
+ case "bool":
+ return "d.d.DecodeBool()"
+ default:
+ halt.onerror(errors.New("gen internal: unknown type for decode: " + s))
+ }
+ return ""
+}
+
+// func genInternalSortType(s string, elem bool) string {
+// for _, v := range [...]string{
+// "int",
+// "uint",
+// "float",
+// "bool",
+// "string",
+// "bytes", "[]uint8", "[]byte",
+// } {
+// if v == "[]byte" || v == "[]uint8" {
+// v = "bytes"
+// }
+// if strings.HasPrefix(s, v) {
+// if v == "int" || v == "uint" || v == "float" {
+// v += "64"
+// }
+// if elem {
+// return v
+// }
+// return v + "Slice"
+// }
+// }
+// halt.onerror(errors.New("sorttype: unexpected type: " + s))
+// }
+
+func genInternalSortType(s string, elem bool) string {
+ if elem {
+ return s
+ }
+ return s + "Slice"
+}
+
+// MARKER: keep in sync with codecgen/gen.go
+func genStripVendor(s string) string {
+ // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later.
+ // if s contains /vendor/ OR startsWith vendor/, then return everything after it.
+ const vendorStart = "vendor/"
+ const vendorInline = "/vendor/"
+ if i := strings.LastIndex(s, vendorInline); i >= 0 {
+ s = s[i+len(vendorInline):]
+ } else if strings.HasPrefix(s, vendorStart) {
+ s = s[len(vendorStart):]
+ }
+ return s
+}
+
+// var genInternalMu sync.Mutex
+var genInternalV = genInternal{Version: genVersion}
+var genInternalTmplFuncs template.FuncMap
+var genInternalOnce sync.Once
+
+func genInternalInit() {
+ wordSizeBytes := int(intBitsize) / 8
+
+ typesizes := map[string]int{
+ "interface{}": 2 * wordSizeBytes,
+ "string": 2 * wordSizeBytes,
+ "[]byte": 3 * wordSizeBytes,
+ "uint": 1 * wordSizeBytes,
+ "uint8": 1,
+ "uint16": 2,
+ "uint32": 4,
+ "uint64": 8,
+ "uintptr": 1 * wordSizeBytes,
+ "int": 1 * wordSizeBytes,
+ "int8": 1,
+ "int16": 2,
+ "int32": 4,
+ "int64": 8,
+ "float32": 4,
+ "float64": 8,
+ "complex64": 8,
+ "complex128": 16,
+ "bool": 1,
+ }
+
+ // keep as slice, so it is in specific iteration order.
+ // Initial order was uint64, string, interface{}, int, int64, ...
+
+ var types = [...]string{
+ "interface{}",
+ "string",
+ "[]byte",
+ "float32",
+ "float64",
+ "uint",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "uintptr",
+ "int",
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "bool",
+ }
+
+ var primitivetypes, slicetypes, mapkeytypes, mapvaltypes []string
+
+ primitivetypes = types[:]
+ slicetypes = types[:]
+ mapkeytypes = types[:]
+ mapvaltypes = types[:]
+
+ if genFastpathTrimTypes {
+ // Note: we only create fast-paths for commonly used types.
+ // Consequently, things like int8, uint16, uint, etc are commented out.
+
+ slicetypes = genInternalFastpathSliceTypes()
+ mapkeytypes = genInternalFastpathMapKeyTypes()
+ mapvaltypes = genInternalFastpathMapValueTypes()
+ }
+
+ // var mapkeytypes [len(&types) - 1]string // skip bool
+ // copy(mapkeytypes[:], types[:])
+
+ // var mb []byte
+ // mb = append(mb, '|')
+ // for _, s := range mapkeytypes {
+ // mb = append(mb, s...)
+ // mb = append(mb, '|')
+ // }
+ // var mapkeytypestr = string(mb)
+
+ var gt = genInternal{Version: genVersion, Formats: genFormats}
+
+ // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function
+
+ for _, s := range primitivetypes {
+ gt.Values = append(gt.Values,
+ fastpathGenV{Primitive: s, Size: typesizes[s], NoCanonical: !genFastpathCanonical})
+ }
+ for _, s := range slicetypes {
+ // if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already.
+ // gt.Values = append(gt.Values, fastpathGenV{Elem: s, Size: typesizes[s]})
+ // }
+ gt.Values = append(gt.Values,
+ fastpathGenV{Elem: s, Size: typesizes[s], NoCanonical: !genFastpathCanonical})
+ }
+ for _, s := range mapkeytypes {
+ // if _, ok := typesizes[s]; !ok {
+ // if strings.Contains(mapkeytypestr, "|"+s+"|") {
+ // gt.Values = append(gt.Values, fastpathGenV{MapKey: s, Elem: s, Size: 2 * typesizes[s]})
+ // }
+ for _, ms := range mapvaltypes {
+ gt.Values = append(gt.Values,
+ fastpathGenV{MapKey: s, Elem: ms, Size: typesizes[s] + typesizes[ms], NoCanonical: !genFastpathCanonical})
+ }
+ }
+
+ funcs := make(template.FuncMap)
+ // funcs["haspfx"] = strings.HasPrefix
+ funcs["encmd"] = genInternalEncCommandAsString
+ funcs["decmd"] = genInternalDecCommandAsString
+ funcs["zerocmd"] = genInternalZeroValue
+ funcs["nonzerocmd"] = genInternalNonZeroValue
+ funcs["hasprefix"] = strings.HasPrefix
+ funcs["sorttype"] = genInternalSortType
+
+ genInternalV = gt
+ genInternalTmplFuncs = funcs
+}
+
+// genInternalGoFile is used to generate source files from templates.
+func genInternalGoFile(r io.Reader, w io.Writer) (err error) {
+ genInternalOnce.Do(genInternalInit)
+
+ gt := genInternalV
+
+ t := template.New("").Funcs(genInternalTmplFuncs)
+
+ tmplstr, err := ioutil.ReadAll(r)
+ if err != nil {
+ return
+ }
+
+ if t, err = t.Parse(string(tmplstr)); err != nil {
+ return
+ }
+
+ var out bytes.Buffer
+ err = t.Execute(&out, gt)
+ if err != nil {
+ return
+ }
+
+ bout, err := format.Source(out.Bytes())
+ if err != nil {
+ w.Write(out.Bytes()) // write out if error, so we can still see.
+ // w.Write(bout) // write out if error, as much as possible, so we can still see.
+ return
+ }
+ w.Write(bout)
+ return
+}
+
+func genInternalFastpathSliceTypes() []string {
+ return []string{
+ "interface{}",
+ "string",
+ "[]byte",
+ "float32",
+ "float64",
+ // "uint",
+ // "uint8", // no need for fastpath of []uint8, as it is handled specially
+ "uint8", // keep fast-path, so it doesn't have to go through reflection
+ // "uint16",
+ // "uint32",
+ "uint64",
+ // "uintptr",
+ "int",
+ // "int8",
+ // "int16",
+ "int32", // rune
+ "int64",
+ "bool",
+ }
+}
+
+func genInternalFastpathMapKeyTypes() []string {
+ return []string{
+ // "interface{}",
+ "string",
+ // "[]byte",
+ // "float32",
+ // "float64",
+ // "uint",
+ "uint8", // byte
+ // "uint16",
+ // "uint32",
+ "uint64", // used for keys
+ // "uintptr",
+ "int", // default number key
+ // "int8",
+ // "int16",
+ "int32", // rune
+ // "int64",
+ // "bool",
+ }
+}
+
+func genInternalFastpathMapValueTypes() []string {
+ return []string{
+ "interface{}",
+ "string",
+ "[]byte",
+ // "uint",
+ "uint8", // byte
+ // "uint16",
+ // "uint32",
+ "uint64", // used for keys, etc
+ // "uintptr",
+ "int", // default number
+ //"int8",
+ // "int16",
+ "int32", // rune (mostly used for unicode)
+ // "int64",
+ // "float32",
+ "float64",
+ "bool",
+ }
+}
+
+// sort-slice ...
+// generates sort implementations for
+// various slice types and combination slice+reflect.Value types.
+//
+// The combination slice+reflect.Value types are used
+// during canonical encode, and the others are used during fast-path
+// encoding of map keys.
+
+// genInternalSortableTypes returns the types
+// that are used for fast-path canonical's encoding of maps.
+//
+// For now, we only support the highest sizes for
+// int64, uint64, float64, bool, string, bytes.
+func genInternalSortableTypes() []string {
+ return genInternalFastpathMapKeyTypes()
+}
+
+// genInternalSortablePlusTypes returns the types
+// that are used for reflection-based canonical's encoding of maps.
+//
+// For now, we only support the highest sizes for
+// int64, uint64, float64, bool, string, bytes.
+func genInternalSortablePlusTypes() []string {
+ return []string{
+ "string",
+ "float64",
+ "uint64",
+ // "uintptr",
+ "int64",
+ "bool",
+ "time",
+ "bytes",
+ }
+}
+
+func genTypeForShortName(s string) string {
+ switch s {
+ case "time":
+ return "time.Time"
+ case "bytes":
+ return "[]byte"
+ }
+ return s
+}
+
+func genArgs(args ...interface{}) map[string]interface{} {
+ m := make(map[string]interface{}, len(args)/2)
+ for i := 0; i < len(args); {
+ m[args[i].(string)] = args[i+1]
+ i += 2
+ }
+ return m
+}
+
+func genEndsWith(s0 string, sn ...string) bool {
+ for _, s := range sn {
+ if strings.HasSuffix(s0, s) {
+ return true
+ }
+ }
+ return false
+}
+
+func genCheckErr(err error) {
+ halt.onerror(err)
+}
+
+func genRunSortTmpl2Go(fnameIn, fnameOut string) {
+ var err error
+
+ funcs := make(template.FuncMap)
+ funcs["sortables"] = genInternalSortableTypes
+ funcs["sortablesplus"] = genInternalSortablePlusTypes
+ funcs["tshort"] = genTypeForShortName
+ funcs["endswith"] = genEndsWith
+ funcs["args"] = genArgs
+
+ t := template.New("").Funcs(funcs)
+ fin, err := os.Open(fnameIn)
+ genCheckErr(err)
+ defer fin.Close()
+ fout, err := os.Create(fnameOut)
+ genCheckErr(err)
+ defer fout.Close()
+ tmplstr, err := ioutil.ReadAll(fin)
+ genCheckErr(err)
+ t, err = t.Parse(string(tmplstr))
+ genCheckErr(err)
+ var out bytes.Buffer
+ err = t.Execute(&out, 0)
+ genCheckErr(err)
+ bout, err := format.Source(out.Bytes())
+ if err != nil {
+ fout.Write(out.Bytes()) // write out if error, so we can still see.
+ }
+ genCheckErr(err)
+ // write out if error, as much as possible, so we can still see.
+ _, err = fout.Write(bout)
+ genCheckErr(err)
+}
+
+func genRunTmpl2Go(fnameIn, fnameOut string) {
+ // println("____ " + fnameIn + " --> " + fnameOut + " ______")
+ fin, err := os.Open(fnameIn)
+ genCheckErr(err)
+ defer fin.Close()
+ fout, err := os.Create(fnameOut)
+ genCheckErr(err)
+ defer fout.Close()
+ err = genInternalGoFile(fin, fout)
+ genCheckErr(err)
+}
+
+// --- some methods here for other types, which are only used in codecgen
+
+// depth returns number of valid nodes in the hierachy
+func (path *structFieldInfoPathNode) root() *structFieldInfoPathNode {
+TOP:
+ if path.parent != nil {
+ path = path.parent
+ goto TOP
+ }
+ return path
+}
+
+func (path *structFieldInfoPathNode) fullpath() (p []*structFieldInfoPathNode) {
+ // this method is mostly called by a command-line tool - it's not optimized, and that's ok.
+ // it shouldn't be used in typical runtime use - as it does unnecessary allocation.
+ d := path.depth()
+ p = make([]*structFieldInfoPathNode, d)
+ for d--; d >= 0; d-- {
+ p[d] = path
+ path = path.parent
+ }
+ return
+}
diff --git a/vendor/github.com/ugorji/go/codec/go.mod b/vendor/github.com/ugorji/go/codec/go.mod
new file mode 100644
index 000000000..7fcabb473
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/go.mod
@@ -0,0 +1,5 @@
+module github.com/ugorji/go/codec
+
+go 1.11
+
+require github.com/ugorji/go v1.2.6
diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go
new file mode 100644
index 000000000..25c5b0208
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go
@@ -0,0 +1,15 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build go1.5
+// +build go1.5
+
+package codec
+
+import "reflect"
+
+const reflectArrayOfSupported = true
+
+func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
+ return reflect.ArrayOf(count, elem)
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go
new file mode 100644
index 000000000..a32dfd7de
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build !go1.5
+// +build !go1.5
+
+package codec
+
+import (
+ "errors"
+ "reflect"
+)
+
+const reflectArrayOfSupported = false
+
+var errNoReflectArrayOf = errors.New("codec: reflect.ArrayOf unsupported by this go version")
+
+func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
+ panic(errNoReflectArrayOf)
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_fmt_time_gte_go15.go b/vendor/github.com/ugorji/go/codec/goversion_fmt_time_gte_go15.go
new file mode 100644
index 000000000..688d6b62d
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_fmt_time_gte_go15.go
@@ -0,0 +1,13 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build go1.5
+// +build go1.5
+
+package codec
+
+import "time"
+
+func fmtTime(t time.Time, fmt string, b []byte) []byte {
+ return t.AppendFormat(b, fmt)
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_fmt_time_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_fmt_time_lt_go15.go
new file mode 100644
index 000000000..a1b8b973e
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_fmt_time_lt_go15.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build !go1.5
+// +build !go1.5
+
+package codec
+
+import "time"
+
+func fmtTime(t time.Time, fmt string, b []byte) []byte {
+ s := t.Format(fmt)
+ b = b[:len(s)]
+ copy(b, s)
+ return b
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go110.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go110.go
new file mode 100644
index 000000000..805303172
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go110.go
@@ -0,0 +1,13 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build !go1.10
+// +build !go1.10
+
+package codec
+
+import "reflect"
+
+func makeMapReflect(t reflect.Type, size int) reflect.Value {
+ return reflect.MakeMap(t)
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_not_unsafe_gte_go110.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_not_unsafe_gte_go110.go
new file mode 100644
index 000000000..46f787db3
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_makemap_not_unsafe_gte_go110.go
@@ -0,0 +1,14 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build go1.10 && (safe || codec.safe || appengine)
+// +build go1.10
+// +build safe codec.safe appengine
+
+package codec
+
+import "reflect"
+
+func makeMapReflect(t reflect.Type, size int) reflect.Value {
+ return reflect.MakeMapWithSize(t, size)
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_unsafe_gte_go110.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_unsafe_gte_go110.go
new file mode 100644
index 000000000..03c069f0f
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_makemap_unsafe_gte_go110.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build go1.10 && !safe && !codec.safe && !appengine
+// +build go1.10,!safe,!codec.safe,!appengine
+
+package codec
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+func makeMapReflect(typ reflect.Type, size int) (rv reflect.Value) {
+ t := (*unsafeIntf)(unsafe.Pointer(&typ)).ptr
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ urv.typ = t
+ urv.flag = uintptr(reflect.Map)
+ urv.ptr = makemap(t, size, nil)
+ return
+}
+
+//go:linkname makemap runtime.makemap
+//go:noescape
+func makemap(typ unsafe.Pointer, size int, h unsafe.Pointer) unsafe.Pointer
diff --git a/vendor/github.com/ugorji/go/codec/goversion_maprange_gte_go112.go b/vendor/github.com/ugorji/go/codec/goversion_maprange_gte_go112.go
new file mode 100644
index 000000000..16c8921ba
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_maprange_gte_go112.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build go1.12 && (safe || codec.safe || appengine)
+// +build go1.12
+// +build safe codec.safe appengine
+
+package codec
+
+import "reflect"
+
+type mapIter struct {
+ t *reflect.MapIter
+ m reflect.Value
+ values bool
+}
+
+func (t *mapIter) Next() (r bool) {
+ return t.t.Next()
+}
+
+func (t *mapIter) Key() reflect.Value {
+ return t.t.Key()
+}
+
+func (t *mapIter) Value() (r reflect.Value) {
+ if t.values {
+ return t.t.Value()
+ }
+ return
+}
+
+func (t *mapIter) Done() {}
+
+func mapRange(t *mapIter, m, k, v reflect.Value, values bool) {
+ *t = mapIter{
+ m: m,
+ t: m.MapRange(),
+ values: values,
+ }
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_maprange_lt_go112.go b/vendor/github.com/ugorji/go/codec/goversion_maprange_lt_go112.go
new file mode 100644
index 000000000..85c8ea72f
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_maprange_lt_go112.go
@@ -0,0 +1,45 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build go1.7 && !go1.12 && (safe || codec.safe || appengine)
+// +build go1.7
+// +build !go1.12
+// +build safe codec.safe appengine
+
+package codec
+
+import "reflect"
+
+type mapIter struct {
+ m reflect.Value
+ keys []reflect.Value
+ j int
+ values bool
+}
+
+func (t *mapIter) Next() (r bool) {
+ t.j++
+ return t.j < len(t.keys)
+}
+
+func (t *mapIter) Key() reflect.Value {
+ return t.keys[t.j]
+}
+
+func (t *mapIter) Value() (r reflect.Value) {
+ if t.values {
+ return t.m.MapIndex(t.keys[t.j])
+ }
+ return
+}
+
+func (t *mapIter) Done() {}
+
+func mapRange(t *mapIter, m, k, v reflect.Value, values bool) {
+ *t = mapIter{
+ m: m,
+ keys: m.MapKeys(),
+ values: values,
+ j: -1,
+ }
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go
new file mode 100644
index 000000000..c894a30c1
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go
@@ -0,0 +1,9 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build go1.10
+// +build go1.10
+
+package codec
+
+const allowSetUnexportedEmbeddedPtr = false
diff --git a/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go
new file mode 100644
index 000000000..1476eac01
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go
@@ -0,0 +1,9 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build !go1.10
+// +build !go1.10
+
+package codec
+
+const allowSetUnexportedEmbeddedPtr = true
diff --git a/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go
new file mode 100644
index 000000000..c093eebd2
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go
@@ -0,0 +1,22 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build !go1.4
+// +build !go1.4
+
+package codec
+
+import "errors"
+
+// This codec package will only work for go1.4 and above.
+// This is for the following reasons:
+// - go 1.4 was released in 2014
+// - go runtime is written fully in go
+// - interface only holds pointers
+// - reflect.Value is stabilized as 3 words
+
+var errCodecSupportedOnlyFromGo14 = errors.New("codec: go 1.3 and below are not supported")
+
+func init() {
+ panic(errCodecSupportedOnlyFromGo14)
+}
diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go
new file mode 100644
index 000000000..e1dfce4a7
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build go1.5 && !go1.6
+// +build go1.5,!go1.6
+
+package codec
+
+import "os"
+
+var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"
diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go
new file mode 100644
index 000000000..5cb4564d2
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build go1.6 && !go1.7
+// +build go1.6,!go1.7
+
+package codec
+
+import "os"
+
+var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0"
diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go
new file mode 100644
index 000000000..82ef3ef88
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go
@@ -0,0 +1,9 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build go1.7
+// +build go1.7
+
+package codec
+
+const genCheckVendor = true
diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go
new file mode 100644
index 000000000..10274048a
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go
@@ -0,0 +1,9 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build !go1.5
+// +build !go1.5
+
+package codec
+
+var genCheckVendor = false
diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go
new file mode 100644
index 000000000..68025c5d8
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper.go
@@ -0,0 +1,2972 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// Contains code shared by both encode and decode.
+
+// Some shared ideas around encoding/decoding
+// ------------------------------------------
+//
+// If an interface{} is passed, we first do a type assertion to see if it is
+// a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
+//
+// If we start with a reflect.Value, we are already in reflect.Value land and
+// will try to grab the function for the underlying Type and directly call that function.
+// This is more performant than calling reflect.Value.Interface().
+//
+// This still helps us bypass many layers of reflection, and give best performance.
+//
+// Containers
+// ------------
+// Containers in the stream are either associative arrays (key-value pairs) or
+// regular arrays (indexed by incrementing integers).
+//
+// Some streams support indefinite-length containers, and use a breaking
+// byte-sequence to denote that the container has come to an end.
+//
+// Some streams also are text-based, and use explicit separators to denote the
+// end/beginning of different values.
+//
+// Philosophy
+// ------------
+// On decode, this codec will update containers appropriately:
+// - If struct, update fields from stream into fields of struct.
+// If field in stream not found in struct, handle appropriately (based on option).
+// If a struct field has no corresponding value in the stream, leave it AS IS.
+// If nil in stream, set value to nil/zero value.
+// - If map, update map from stream.
+// If the stream value is NIL, set the map to nil.
+// - if slice, try to update up to length of array in stream.
+// if container len is less than stream array length,
+// and container cannot be expanded, handled (based on option).
+// This means you can decode 4-element stream array into 1-element array.
+//
+// ------------------------------------
+// On encode, user can specify omitEmpty. This means that the value will be omitted
+// if the zero value. The problem may occur during decode, where omitted values do not affect
+// the value being decoded into. This means that if decoding into a struct with an
+// int field with current value=5, and the field is omitted in the stream, then after
+// decoding, the value will still be 5 (not 0).
+// omitEmpty only works if you guarantee that you always decode into zero-values.
+//
+// ------------------------------------
+// We could have truncated a map to remove keys not available in the stream,
+// or set values in the struct which are not in the stream to their zero values.
+// We decided against it because there is no efficient way to do it.
+// We may introduce it as an option later.
+// However, that will require enabling it for both runtime and code generation modes.
+//
+// To support truncate, we need to do 2 passes over the container:
+// map
+// - first collect all keys (e.g. in k1)
+// - for each key in stream, mark k1 that the key should not be removed
+// - after updating map, do second pass and call delete for all keys in k1 which are not marked
+// struct:
+// - for each field, track the *typeInfo s1
+// - iterate through all s1, and for each one not marked, set value to zero
+// - this involves checking the possible anonymous fields which are nil ptrs.
+// too much work.
+//
+// ------------------------------------------
+// Error Handling is done within the library using panic.
+//
+// This way, the code doesn't have to keep checking if an error has happened,
+// and we don't have to keep sending the error value along with each call
+// or storing it in the En|Decoder and checking it constantly along the way.
+//
+// We considered storing the error is En|Decoder.
+// - once it has its err field set, it cannot be used again.
+// - panicing will be optional, controlled by const flag.
+// - code should always check error first and return early.
+//
+// We eventually decided against it as it makes the code clumsier to always
+// check for these error conditions.
+//
+// ------------------------------------------
+// We use sync.Pool only for the aid of long-lived objects shared across multiple goroutines.
+// Encoder, Decoder, enc|decDriver, reader|writer, etc do not fall into this bucket.
+//
+// Also, GC is much better now, eliminating some of the reasons to use a shared pool structure.
+// Instead, the short-lived objects use free-lists that live as long as the object exists.
+//
+// ------------------------------------------
+// Performance is affected by the following:
+// - Bounds Checking
+// - Inlining
+// - Pointer chasing
+// This package tries hard to manage the performance impact of these.
+//
+// ------------------------------------------
+// To alleviate performance due to pointer-chasing:
+// - Prefer non-pointer values in a struct field
+// - Refer to these directly within helper classes
+// e.g. json.go refers directly to d.d.decRd
+//
+// We made the changes to embed En/Decoder in en/decDriver,
+// but we had to explicitly reference the fields as opposed to using a function
+// to get the better performance that we were looking for.
+// For example, we explicitly call d.d.decRd.fn() instead of d.d.r().fn().
+//
+// ------------------------------------------
+// Bounds Checking
+// - Allow bytesDecReader to incur "bounds check error", and
+// recover that as an io.EOF.
+// This allows the bounds check branch to always be taken by the branch predictor,
+// giving better performance (in theory), while ensuring that the code is shorter.
+//
+// ------------------------------------------
+// Escape Analysis
+// - Prefer to return non-pointers if the value is used right away.
+// Newly allocated values returned as pointers will be heap-allocated as they escape.
+//
+// Prefer functions and methods that
+// - take no parameters and
+// - return no results and
+// - do not allocate.
+// These are optimized by the runtime.
+// For example, in json, we have dedicated functions for ReadMapElemKey, etc
+// which do not delegate to readDelim, as readDelim takes a parameter.
+// The difference in runtime was as much as 5%.
+//
+// ------------------------------------------
+// Handling Nil
+// - In dynamic (reflection) mode, decodeValue and encodeValue handle nil at the top
+// - Consequently, methods used with them as a parent in the chain e.g. kXXX
+// methods do not handle nil.
+// - Fastpath methods also do not handle nil.
+// The switch called in (en|de)code(...) handles it so the dependent calls don't have to.
+// - codecgen will handle nil before calling into the library for further work also.
+//
+// ------------------------------------------
+// Passing reflect.Kind to functions that take a reflect.Value
+// - Note that reflect.Value.Kind() is very cheap, as its fundamentally a binary AND of 2 numbers
+//
+// ------------------------------------------
+// Transient values during decoding
+//
+// With reflection, the stack is not used. Consequently, values which may be stack-allocated in
+// normal use will cause a heap allocation when using reflection.
+//
+// There are cases where we know that a value is transient, and we just need to decode into it
+// temporarily so we can right away use its value for something else.
+//
+// In these situations, we can elide the heap allocation by being deliberate with use of a pre-cached
+// scratch memory or scratch value.
+//
+// We use this for situations:
+// - decode into a temp value x, and then set x into an interface
+// - decode into a temp value, for use as a map key, to lookup up a map value
+// - decode into a temp value, for use as a map value, to set into a map
+// - decode into a temp value, for sending into a channel
+//
+// By definition, Transient values are NEVER pointer-shaped values,
+// like pointer, func, map, chan. Using transient for pointer-shaped values
+// can lead to data corruption when GC tries to follow what it saw as a pointer at one point.
+//
+// In general, transient values are values which can be decoded as an atomic value
+// using a single call to the decDriver. This naturally includes bool or numeric types.
+//
+// Note that some values which "contain" pointers, specifically string and slice,
+// can also be transient. In the case of string, it is decoded as an atomic value.
+// In the case of a slice, decoding into its elements always uses an addressable
+// value in memory ie we grow the slice, and then decode directly into the memory
+// address corresponding to that index in the slice.
+//
+// To handle these string and slice values, we have to use a scratch value
+// which has the same shape of a string or slice.
+//
+// Consequently, the full range of types which can be transient is:
+// - numbers
+// - bool
+// - string
+// - slice
+//
+// and whbut we MUST use a scratch space with that element
+// being defined as an unsafe.Pointer to start with.
+//
+// We have to be careful with maps. Because we iterate map keys and values during a range,
+// we must have 2 variants of the scratch space/value for maps and keys separately.
+//
+// These are the TransientAddrK and TransientAddr2K methods of decPerType.
+
+import (
+ "encoding"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode/utf8"
+)
+
+// if debugging is true, then
+// - within Encode/Decode, do not recover from panic's
+// - etc
+//
+// Note: Negative tests that check for errors will fail, so only use this
+// when debugging, and run only one test at a time preferably.
+//
+// Note: RPC tests espeially fail, as they depend on getting the error from an Encode/Decode call.
+const debugging = false
+
+const (
+ // containerLenUnknown is length returned from Read(Map|Array)Len
+ // when a format doesn't know apiori.
+ // For example, json doesn't pre-determine the length of a container (sequence/map).
+ containerLenUnknown = -1
+
+ // containerLenNil is length returned from Read(Map|Array)Len
+ // when a 'nil' was encountered in the stream.
+ containerLenNil = math.MinInt32
+
+ // [N]byte is handled by converting to []byte first,
+ // and sending to the dedicated fast-path function for []byte.
+ //
+ // Code exists in case our understanding is wrong.
+ // keep the defensive code behind this flag, so we can remove/hide it if needed.
+ // For now, we enable the defensive code (ie set it to true).
+ handleBytesWithinKArray = true
+
+ // Support encoding.(Binary|Text)(Unm|M)arshaler.
+ // This constant flag will enable or disable it.
+ supportMarshalInterfaces = true
+
+ // bytesFreeListNoCache is used for debugging, when we want to skip using a cache of []byte.
+ bytesFreeListNoCache = false
+
+ // size of the cacheline: defaulting to value for archs: amd64, arm64, 386
+ // should use "runtime/internal/sys".CacheLineSize, but that is not exposed.
+ cacheLineSize = 64
+
+ wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize
+ wordSize = wordSizeBits / 8
+
+ // MARKER: determines whether to skip calling fastpath(En|De)codeTypeSwitch.
+ // Calling the fastpath switch in encode() or decode() could be redundant,
+ // as we still have to introspect it again within fnLoad
+ // to determine the function to use for values of that type.
+ skipFastpathTypeSwitchInDirectCall = false
+)
+
+const cpu32Bit = ^uint(0)>>32 == 0
+
+type rkind byte
+
+const (
+ rkindPtr = rkind(reflect.Ptr)
+ rkindString = rkind(reflect.String)
+ rkindChan = rkind(reflect.Chan)
+)
+
+type mapKeyFastKind uint8
+
+const (
+ mapKeyFastKind32 = iota + 1
+ mapKeyFastKind32ptr
+ mapKeyFastKind64
+ mapKeyFastKind64ptr
+ mapKeyFastKindStr
+)
+
+var (
+ // use a global mutex to ensure each Handle is initialized.
+ // We do this, so we don't have to store the basicHandle mutex
+ // directly in BasicHandle, so it can be shallow-copied.
+ handleInitMu sync.Mutex
+
+ must mustHdl
+ halt panicHdl
+
+ digitCharBitset bitset256
+ numCharBitset bitset256
+ whitespaceCharBitset bitset256
+ asciiAlphaNumBitset bitset256
+
+ // numCharWithExpBitset64 bitset64
+ // numCharNoExpBitset64 bitset64
+ // whitespaceCharBitset64 bitset64
+ //
+ // // hasptrBitset sets bit for all kinds which always have internal pointers
+ // hasptrBitset bitset32
+
+ // refBitset sets bit for all kinds which are direct internal references
+ refBitset bitset32
+
+ // isnilBitset sets bit for all kinds which can be compared to nil
+ isnilBitset bitset32
+
+ // numBoolBitset sets bit for all number and bool kinds
+ numBoolBitset bitset32
+
+ // numBoolStrSliceBitset sets bits for all kinds which are numbers, bool, strings and slices
+ numBoolStrSliceBitset bitset32
+
+ // scalarBitset sets bit for all kinds which are scalars/primitives and thus immutable
+ scalarBitset bitset32
+
+ mapKeyFastKindVals [32]mapKeyFastKind
+
+ // codecgen is set to true by codecgen, so that tests, etc can use this information as needed.
+ codecgen bool
+
+ oneByteArr [1]byte
+ zeroByteSlice = oneByteArr[:0:0]
+
+ eofReader devNullReader
+)
+
+var (
+ errMapTypeNotMapKind = errors.New("MapType MUST be of Map Kind")
+ errSliceTypeNotSliceKind = errors.New("SliceType MUST be of Slice Kind")
+
+ errExtFnWriteExtUnsupported = errors.New("BytesExt.WriteExt is not supported")
+ errExtFnReadExtUnsupported = errors.New("BytesExt.ReadExt is not supported")
+ errExtFnConvertExtUnsupported = errors.New("InterfaceExt.ConvertExt is not supported")
+ errExtFnUpdateExtUnsupported = errors.New("InterfaceExt.UpdateExt is not supported")
+
+ errPanicUndefined = errors.New("panic: undefined error")
+
+ errHandleInited = errors.New("cannot modify initialized Handle")
+
+ errNoFormatHandle = errors.New("no handle (cannot identify format)")
+)
+
+var pool4tiload = sync.Pool{
+ New: func() interface{} {
+ return &typeInfoLoad{
+ etypes: make([]uintptr, 0, 4),
+ sfis: make([]structFieldInfo, 0, 4),
+ sfiNames: make(map[string]uint16, 4),
+ }
+ },
+}
+
+func init() {
+ xx := func(f mapKeyFastKind, k ...reflect.Kind) {
+ for _, v := range k {
+ mapKeyFastKindVals[byte(v)&31] = f // 'v % 32' equal to 'v & 31'
+ }
+ }
+
+ var f mapKeyFastKind
+
+ f = mapKeyFastKind64
+ if wordSizeBits == 32 {
+ f = mapKeyFastKind32
+ }
+ xx(f, reflect.Int, reflect.Uint, reflect.Uintptr)
+
+ f = mapKeyFastKind64ptr
+ if wordSizeBits == 32 {
+ f = mapKeyFastKind32ptr
+ }
+ xx(f, reflect.Ptr)
+
+ xx(mapKeyFastKindStr, reflect.String)
+ xx(mapKeyFastKind32, reflect.Uint32, reflect.Int32, reflect.Float32)
+ xx(mapKeyFastKind64, reflect.Uint64, reflect.Int64, reflect.Float64)
+
+ numBoolBitset.
+ set(byte(reflect.Bool)).
+ set(byte(reflect.Int)).
+ set(byte(reflect.Int8)).
+ set(byte(reflect.Int16)).
+ set(byte(reflect.Int32)).
+ set(byte(reflect.Int64)).
+ set(byte(reflect.Uint)).
+ set(byte(reflect.Uint8)).
+ set(byte(reflect.Uint16)).
+ set(byte(reflect.Uint32)).
+ set(byte(reflect.Uint64)).
+ set(byte(reflect.Uintptr)).
+ set(byte(reflect.Float32)).
+ set(byte(reflect.Float64)).
+ set(byte(reflect.Complex64)).
+ set(byte(reflect.Complex128))
+
+ numBoolStrSliceBitset = numBoolBitset
+
+ numBoolStrSliceBitset.
+ set(byte(reflect.String)).
+ set(byte(reflect.Slice))
+
+ scalarBitset = numBoolBitset
+
+ scalarBitset.
+ set(byte(reflect.String))
+
+ // MARKER: reflect.Array is not a scalar, as its contents can be modified.
+
+ refBitset.
+ set(byte(reflect.Map)).
+ set(byte(reflect.Ptr)).
+ set(byte(reflect.Func)).
+ set(byte(reflect.Chan)).
+ set(byte(reflect.UnsafePointer))
+
+ isnilBitset = refBitset
+
+ isnilBitset.
+ set(byte(reflect.Interface)).
+ set(byte(reflect.Slice))
+
+ // hasptrBitset = isnilBitset
+ //
+ // hasptrBitset.
+ // set(byte(reflect.String))
+
+ for i := byte(0); i <= utf8.RuneSelf; i++ {
+ if (i >= '0' && i <= '9') || (i >= 'a' && i <= 'z') || (i >= 'A' && i <= 'Z') {
+ asciiAlphaNumBitset.set(i)
+ }
+ switch i {
+ case ' ', '\t', '\r', '\n':
+ whitespaceCharBitset.set(i)
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ digitCharBitset.set(i)
+ numCharBitset.set(i)
+ case '.', '+', '-':
+ numCharBitset.set(i)
+ case 'e', 'E':
+ numCharBitset.set(i)
+ }
+ }
+}
+
+// driverStateManager supports the runtime state of an (enc|dec)Driver.
+//
+// During a side(En|De)code call, we can capture the state, reset it,
+// and then restore it later to continue the primary encoding/decoding.
+type driverStateManager interface {
+ resetState()
+ captureState() interface{}
+ restoreState(state interface{})
+}
+
+type bdAndBdread struct {
+ bdRead bool
+ bd byte
+}
+
+func (x bdAndBdread) captureState() interface{} { return x }
+func (x *bdAndBdread) resetState() { x.bd, x.bdRead = 0, false }
+func (x *bdAndBdread) reset() { x.resetState() }
+func (x *bdAndBdread) restoreState(v interface{}) { *x = v.(bdAndBdread) }
+
+type clsErr struct {
+ err error // error on closing
+ closed bool // is it closed?
+}
+
+type charEncoding uint8
+
+const (
+ _ charEncoding = iota // make 0 unset
+ cUTF8
+ cUTF16LE
+ cUTF16BE
+ cUTF32LE
+ cUTF32BE
+ // Deprecated: not a true char encoding value
+ cRAW charEncoding = 255
+)
+
+// valueType is the stream type
+type valueType uint8
+
+const (
+ valueTypeUnset valueType = iota
+ valueTypeNil
+ valueTypeInt
+ valueTypeUint
+ valueTypeFloat
+ valueTypeBool
+ valueTypeString
+ valueTypeSymbol
+ valueTypeBytes
+ valueTypeMap
+ valueTypeArray
+ valueTypeTime
+ valueTypeExt
+
+ // valueTypeInvalid = 0xff
+)
+
+var valueTypeStrings = [...]string{
+ "Unset",
+ "Nil",
+ "Int",
+ "Uint",
+ "Float",
+ "Bool",
+ "String",
+ "Symbol",
+ "Bytes",
+ "Map",
+ "Array",
+ "Timestamp",
+ "Ext",
+}
+
+func (x valueType) String() string {
+ if int(x) < len(valueTypeStrings) {
+ return valueTypeStrings[x]
+ }
+ return strconv.FormatInt(int64(x), 10)
+}
+
+// note that containerMapStart and containerArraySend are not sent.
+// This is because the ReadXXXStart and EncodeXXXStart already does these.
+type containerState uint8
+
+const (
+ _ containerState = iota
+
+ containerMapStart
+ containerMapKey
+ containerMapValue
+ containerMapEnd
+ containerArrayStart
+ containerArrayElem
+ containerArrayEnd
+)
+
+// do not recurse if a containing type refers to an embedded type
+// which refers back to its containing type (via a pointer).
+// The second time this back-reference happens, break out,
+// so as not to cause an infinite loop.
+const rgetMaxRecursion = 2
+
+// fauxUnion is used to keep track of the primitives decoded.
+//
+// Without it, we would have to decode each primitive and wrap it
+// in an interface{}, causing an allocation.
+// In this model, the primitives are decoded in a "pseudo-atomic" fashion,
+// so we can rest assured that no other decoding happens while these
+// primitives are being decoded.
+//
+// maps and arrays are not handled by this mechanism.
+type fauxUnion struct {
+ // r RawExt // used for RawExt, uint, []byte.
+
+ // primitives below
+ u uint64
+ i int64
+ f float64
+ l []byte
+ s string
+
+ // ---- cpu cache line boundary?
+ t time.Time
+ b bool
+
+ // state
+ v valueType
+}
+
+// typeInfoLoad is a transient object used while loading up a typeInfo.
+type typeInfoLoad struct {
+ etypes []uintptr
+ sfis []structFieldInfo
+ sfiNames map[string]uint16
+}
+
+func (x *typeInfoLoad) reset() {
+ x.etypes = x.etypes[:0]
+ x.sfis = x.sfis[:0]
+ for k := range x.sfiNames { // optimized to zero the map
+ delete(x.sfiNames, k)
+ }
+}
+
+// mirror json.Marshaler and json.Unmarshaler here,
+// so we don't import the encoding/json package
+
+type jsonMarshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+type jsonUnmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+type isZeroer interface {
+ IsZero() bool
+}
+
+type isCodecEmptyer interface {
+ IsCodecEmpty() bool
+}
+
+type codecError struct {
+ err error
+ name string
+ pos int
+ encode bool
+}
+
+func (e *codecError) Cause() error {
+ return e.err
+}
+
+func (e *codecError) Error() string {
+ if e.encode {
+ return fmt.Sprintf("%s encode error: %v", e.name, e.err)
+ }
+ return fmt.Sprintf("%s decode error [pos %d]: %v", e.name, e.pos, e.err)
+}
+
+func wrapCodecErr(in error, name string, numbytesread int, encode bool) (out error) {
+ x, ok := in.(*codecError)
+ if ok && x.pos == numbytesread && x.name == name && x.encode == encode {
+ return in
+ }
+ return &codecError{in, name, numbytesread, encode}
+}
+
+var (
+ bigen bigenHelper
+
+ bigenstd = binary.BigEndian
+
+ structInfoFieldName = "_struct"
+
+ mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
+ mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
+ intfSliceTyp = reflect.TypeOf([]interface{}(nil))
+ intfTyp = intfSliceTyp.Elem()
+
+ reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem()
+
+ stringTyp = reflect.TypeOf("")
+ timeTyp = reflect.TypeOf(time.Time{})
+ rawExtTyp = reflect.TypeOf(RawExt{})
+ rawTyp = reflect.TypeOf(Raw{})
+ uintptrTyp = reflect.TypeOf(uintptr(0))
+ uint8Typ = reflect.TypeOf(uint8(0))
+ uint8SliceTyp = reflect.TypeOf([]uint8(nil))
+ uintTyp = reflect.TypeOf(uint(0))
+ intTyp = reflect.TypeOf(int(0))
+
+ mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
+
+ binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
+ binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
+
+ textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+ textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+
+ jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
+ jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
+
+ selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
+ missingFielderTyp = reflect.TypeOf((*MissingFielder)(nil)).Elem()
+ iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem()
+ isCodecEmptyerTyp = reflect.TypeOf((*isCodecEmptyer)(nil)).Elem()
+ isSelferViaCodecgenerTyp = reflect.TypeOf((*isSelferViaCodecgener)(nil)).Elem()
+
+ uint8TypId = rt2id(uint8Typ)
+ uint8SliceTypId = rt2id(uint8SliceTyp)
+ rawExtTypId = rt2id(rawExtTyp)
+ rawTypId = rt2id(rawTyp)
+ intfTypId = rt2id(intfTyp)
+ timeTypId = rt2id(timeTyp)
+ stringTypId = rt2id(stringTyp)
+
+ mapStrIntfTypId = rt2id(mapStrIntfTyp)
+ mapIntfIntfTypId = rt2id(mapIntfIntfTyp)
+ intfSliceTypId = rt2id(intfSliceTyp)
+ // mapBySliceTypId = rt2id(mapBySliceTyp)
+
+ intBitsize = uint8(intTyp.Bits())
+ uintBitsize = uint8(uintTyp.Bits())
+
+ // bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
+ bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+ chkOvf checkOverflow
+)
+
+var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
+
+// SelfExt is a sentinel extension signifying that types
+// registered with it SHOULD be encoded and decoded
+// based on the native mode of the format.
+//
+// This allows users to define a tag for an extension,
+// but signify that the types should be encoded/decoded as the native encoding.
+// This way, users need not also define how to encode or decode the extension.
+var SelfExt = &extFailWrapper{}
+
+// Selfer defines methods by which a value can encode or decode itself.
+//
+// Any type which implements Selfer will be able to encode or decode itself.
+// Consequently, during (en|de)code, this takes precedence over
+// (text|binary)(M|Unm)arshal or extension support.
+//
+// By definition, it is not allowed for a Selfer to directly call Encode or Decode on itself.
+// If that is done, Encode/Decode will rightfully fail with a Stack Overflow style error.
+// For example, the snippet below will cause such an error.
+// type testSelferRecur struct{}
+// func (s *testSelferRecur) CodecEncodeSelf(e *Encoder) { e.MustEncode(s) }
+// func (s *testSelferRecur) CodecDecodeSelf(d *Decoder) { d.MustDecode(s) }
+//
+// Note: *the first set of bytes of any value MUST NOT represent nil in the format*.
+// This is because, during each decode, we first check the the next set of bytes
+// represent nil, and if so, we just set the value to nil.
+type Selfer interface {
+ CodecEncodeSelf(*Encoder)
+ CodecDecodeSelf(*Decoder)
+}
+
+type isSelferViaCodecgener interface {
+ codecSelferViaCodecgen()
+}
+
+// MissingFielder defines the interface allowing structs to internally decode or encode
+// values which do not map to struct fields.
+//
+// We expect that this interface is bound to a pointer type (so the mutation function works).
+//
+// A use-case is if a version of a type unexports a field, but you want compatibility between
+// both versions during encoding and decoding.
+//
+// Note that the interface is completely ignored during codecgen.
+type MissingFielder interface {
+ // CodecMissingField is called to set a missing field and value pair.
+ //
+ // It returns true if the missing field was set on the struct.
+ CodecMissingField(field []byte, value interface{}) bool
+
+ // CodecMissingFields returns the set of fields which are not struct fields.
+ //
+ // Note that the returned map may be mutated by the caller.
+ CodecMissingFields() map[string]interface{}
+}
+
+// MapBySlice is a tag interface that denotes the slice or array value should encode as a map
+// in the stream, and can be decoded from a map in the stream.
+//
+// The slice or array must contain a sequence of key-value pairs.
+// The length of the slice or array must be even (fully divisible by 2).
+//
+// This affords storing a map in a specific sequence in the stream.
+//
+// Example usage:
+// type T1 []string // or []int or []Point or any other "slice" type
+// func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map
+// type T2 struct { KeyValues T1 }
+//
+// var kvs = []string{"one", "1", "two", "2", "three", "3"}
+// var v2 = T2{ KeyValues: T1(kvs) }
+// // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} }
+//
+// The support of MapBySlice affords the following:
+// - A slice or array type which implements MapBySlice will be encoded as a map
+// - A slice can be decoded from a map in the stream
+type MapBySlice interface {
+ MapBySlice()
+}
+
+// basicHandleRuntimeState holds onto all BasicHandle runtime and cached config information.
+//
+// Storing this outside BasicHandle allows us create shallow copies of a Handle,
+// which can be used e.g. when we need to modify config fields temporarily.
+// Shallow copies are used within tests, so we can modify some config fields for a test
+// temporarily when running tests in parallel, without running the risk that a test executing
+// in parallel with other tests does not see a transient modified values not meant for it.
+type basicHandleRuntimeState struct {
+ // these are used during runtime.
+ // At init time, they should have nothing in them.
+ rtidFns atomicRtidFnSlice
+ rtidFnsNoExt atomicRtidFnSlice
+
+ // Note: basicHandleRuntimeState is not comparable, due to these slices here (extHandle, intf2impls).
+ // If *[]T is used instead, this becomes comparable, at the cost of extra indirection.
+ // Thses slices are used all the time, so keep as slices (not pointers).
+
+ extHandle
+
+ intf2impls
+
+ mu sync.Mutex
+
+ jsonHandle bool
+ binaryHandle bool
+
+ // timeBuiltin is initialized from TimeNotBuiltin, and used internally.
+ // once initialized, it cannot be changed, as the function for encoding/decoding time.Time
+ // will have been cached and the TimeNotBuiltin value will not be consulted thereafter.
+ timeBuiltin bool
+ _ bool // padding
+}
+
+// BasicHandle encapsulates the common options and extension functions.
+//
+// Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
+type BasicHandle struct {
+ // BasicHandle is always a part of a different type.
+ // It doesn't have to fit into it own cache lines.
+
+ // TypeInfos is used to get the type info for any type.
+ //
+ // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
+ TypeInfos *TypeInfos
+
+ *basicHandleRuntimeState
+
+ // ---- cache line
+
+ DecodeOptions
+
+ // ---- cache line
+
+ EncodeOptions
+
+ RPCOptions
+
+ // TimeNotBuiltin configures whether time.Time should be treated as a builtin type.
+ //
+ // All Handlers should know how to encode/decode time.Time as part of the core
+ // format specification, or as a standard extension defined by the format.
+ //
+ // However, users can elect to handle time.Time as a custom extension, or via the
+ // standard library's encoding.Binary(M|Unm)arshaler or Text(M|Unm)arshaler interface.
+ // To elect this behavior, users can set TimeNotBuiltin=true.
+ //
+ // Note: Setting TimeNotBuiltin=true can be used to enable the legacy behavior
+ // (for Cbor and Msgpack), where time.Time was not a builtin supported type.
+ //
+ // Note: DO NOT CHANGE AFTER FIRST USE.
+ //
+ // Once a Handle has been initialized (used), do not modify this option. It will be ignored.
+ TimeNotBuiltin bool
+
+ // ExplicitRelease configures whether Release() is implicitly called after an encode or
+ // decode call.
+ //
+ // If you will hold onto an Encoder or Decoder for re-use, by calling Reset(...)
+ // on it or calling (Must)Encode repeatedly into a given []byte or io.Writer,
+ // then you do not want it to be implicitly closed after each Encode/Decode call.
+ // Doing so will unnecessarily return resources to the shared pool, only for you to
+ // grab them right after again to do another Encode/Decode call.
+ //
+ // Instead, you configure ExplicitRelease=true, and you explicitly call Release() when
+ // you are truly done.
+ //
+ // As an alternative, you can explicitly set a finalizer - so its resources
+ // are returned to the shared pool before it is garbage-collected. Do it as below:
+ // runtime.SetFinalizer(e, (*Encoder).Release)
+ // runtime.SetFinalizer(d, (*Decoder).Release)
+ //
+ // Deprecated: This is not longer used as pools are only used for long-lived objects
+ // which are shared across goroutines.
+ // Setting this value has no effect. It is maintained for backward compatibility.
+ ExplicitRelease bool
+
+ // ---- cache line
+ inited uint32 // holds if inited, and also handle flags (binary encoding, json handler, etc)
+
+}
+
+// initHandle does a one-time initialization of the handle.
+// After this is run, do not modify the Handle, as some modifications are ignored
+// e.g. extensions, registered interfaces, TimeNotBuiltIn, etc
+func initHandle(hh Handle) {
+ x := hh.getBasicHandle()
+
+ // MARKER: We need to simulate once.Do, to ensure no data race within the block.
+ // Consequently, below would not work.
+ //
+ // if atomic.CompareAndSwapUint32(&x.inited, 0, 1) {
+ // x.be = hh.isBinary()
+ // x.js = hh.isJson
+ // x.n = hh.Name()[0]
+ // }
+
+ // simulate once.Do using our own stored flag and mutex as a CompareAndSwap
+ // is not sufficient, since a race condition can occur within init(Handle) function.
+ // init is made noinline, so that this function can be inlined by its caller.
+ if atomic.LoadUint32(&x.inited) == 0 {
+ x.initHandle(hh)
+ }
+}
+
+func (x *BasicHandle) basicInit() {
+ x.rtidFns.store(nil)
+ x.rtidFnsNoExt.store(nil)
+ x.timeBuiltin = !x.TimeNotBuiltin
+}
+
+func (x *BasicHandle) init() {}
+
+func (x *BasicHandle) isInited() bool {
+ return atomic.LoadUint32(&x.inited) != 0
+}
+
+// clearInited: DANGEROUS - only use in testing, etc
+func (x *BasicHandle) clearInited() {
+ atomic.StoreUint32(&x.inited, 0)
+}
+
+// TimeBuiltin returns whether time.Time OOTB support is used,
+// based on the initial configuration of TimeNotBuiltin
+func (x *basicHandleRuntimeState) TimeBuiltin() bool {
+ return x.timeBuiltin
+}
+
+func (x *basicHandleRuntimeState) isJs() bool {
+ return x.jsonHandle
+}
+
+func (x *basicHandleRuntimeState) isBe() bool {
+ return x.binaryHandle
+}
+
+func (x *basicHandleRuntimeState) setExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
+ rk := rt.Kind()
+ for rk == reflect.Ptr {
+ rt = rt.Elem()
+ rk = rt.Kind()
+ }
+
+ if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr {
+ return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt)
+ }
+
+ rtid := rt2id(rt)
+ switch rtid {
+ case timeTypId, rawTypId, rawExtTypId:
+ // these are all natively supported type, so they cannot have an extension.
+ // However, we do not return an error for these, as we do not document that.
+ // Instead, we silently treat as a no-op, and return.
+ return
+ }
+ for i := range x.extHandle {
+ v := &x.extHandle[i]
+ if v.rtid == rtid {
+ v.tag, v.ext = tag, ext
+ return
+ }
+ }
+ rtidptr := rt2id(reflect.PtrTo(rt))
+ x.extHandle = append(x.extHandle, extTypeTagFn{rtid, rtidptr, rt, tag, ext})
+ return
+}
+
+// initHandle should be called only from codec.initHandle global function.
+// make it uninlineable, as it is called at most once for each handle.
+//go:noinline
+func (x *BasicHandle) initHandle(hh Handle) {
+ handleInitMu.Lock()
+ defer handleInitMu.Unlock() // use defer, as halt may panic below
+ if x.inited == 0 {
+ if x.basicHandleRuntimeState == nil {
+ x.basicHandleRuntimeState = new(basicHandleRuntimeState)
+ }
+ x.jsonHandle = hh.isJson()
+ x.binaryHandle = hh.isBinary()
+ // ensure MapType and SliceType are of correct type
+ if x.MapType != nil && x.MapType.Kind() != reflect.Map {
+ halt.onerror(errMapTypeNotMapKind)
+ }
+ if x.SliceType != nil && x.SliceType.Kind() != reflect.Slice {
+ halt.onerror(errSliceTypeNotSliceKind)
+ }
+ x.basicInit()
+ hh.init()
+ atomic.StoreUint32(&x.inited, 1)
+ }
+}
+
+func (x *BasicHandle) getBasicHandle() *BasicHandle {
+ return x
+}
+
+func (x *BasicHandle) typeInfos() *TypeInfos {
+ if x.TypeInfos != nil {
+ return x.TypeInfos
+ }
+ return defTypeInfos
+}
+
+func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+ return x.typeInfos().get(rtid, rt)
+}
+
+func findRtidFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) {
+ // binary search. adapted from sort/search.go.
+ // Note: we use goto (instead of for loop) so this can be inlined.
+
+ // h, i, j := 0, 0, len(s)
+ var h uint // var h, i uint
+ var j = uint(len(s))
+LOOP:
+ if i < j {
+ h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
+ if s[h].rtid < rtid {
+ i = h + 1
+ } else {
+ j = h
+ }
+ goto LOOP
+ }
+ if i < uint(len(s)) && s[i].rtid == rtid {
+ fn = s[i].fn
+ }
+ return
+}
+
+func (x *BasicHandle) fn(rt reflect.Type) (fn *codecFn) {
+ return x.fnVia(rt, x.typeInfos(), &x.rtidFns, x.CheckCircularRef, true)
+}
+
+func (x *BasicHandle) fnNoExt(rt reflect.Type) (fn *codecFn) {
+ return x.fnVia(rt, x.typeInfos(), &x.rtidFnsNoExt, x.CheckCircularRef, false)
+}
+
+func (x *basicHandleRuntimeState) fnVia(rt reflect.Type, tinfos *TypeInfos, fs *atomicRtidFnSlice, checkCircularRef, checkExt bool) (fn *codecFn) {
+ rtid := rt2id(rt)
+ sp := fs.load()
+ if sp != nil {
+ if _, fn = findRtidFn(sp, rtid); fn != nil {
+ return
+ }
+ }
+
+ fn = x.fnLoad(rt, rtid, tinfos, checkCircularRef, checkExt)
+ x.mu.Lock()
+ sp = fs.load()
+ // since this is an atomic load/store, we MUST use a different array each time,
+ // else we have a data race when a store is happening simultaneously with a findRtidFn call.
+ if sp == nil {
+ sp = []codecRtidFn{{rtid, fn}}
+ fs.store(sp)
+ } else {
+ idx, fn2 := findRtidFn(sp, rtid)
+ if fn2 == nil {
+ sp2 := make([]codecRtidFn, len(sp)+1)
+ copy(sp2[idx+1:], sp[idx:])
+ copy(sp2, sp[:idx])
+ sp2[idx] = codecRtidFn{rtid, fn}
+ fs.store(sp2)
+ }
+ }
+ x.mu.Unlock()
+ return
+}
+
+func fnloadFastpathUnderlying(ti *typeInfo) (f *fastpathE, u reflect.Type) {
+ var rtid uintptr
+ var idx int
+ rtid = rt2id(ti.fastpathUnderlying)
+ idx = fastpathAvIndex(rtid)
+ if idx == -1 {
+ return
+ }
+ f = &fastpathAv[idx]
+ if uint8(reflect.Array) == ti.kind {
+ u = reflectArrayOf(ti.rt.Len(), ti.elem)
+ } else {
+ u = f.rt
+ }
+ return
+}
+
+func (x *basicHandleRuntimeState) fnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, checkCircularRef, checkExt bool) (fn *codecFn) {
+ fn = new(codecFn)
+ fi := &(fn.i)
+ ti := tinfos.get(rtid, rt)
+ fi.ti = ti
+ rk := reflect.Kind(ti.kind)
+
+ // anything can be an extension except the built-in ones: time, raw and rawext.
+ // ensure we check for these types, then if extension, before checking if
+ // it implementes one of the pre-declared interfaces.
+
+ fi.addrDf = true
+ // fi.addrEf = true
+
+ if rtid == timeTypId && x.timeBuiltin {
+ fn.fe = (*Encoder).kTime
+ fn.fd = (*Decoder).kTime
+ } else if rtid == rawTypId {
+ fn.fe = (*Encoder).raw
+ fn.fd = (*Decoder).raw
+ } else if rtid == rawExtTypId {
+ fn.fe = (*Encoder).rawExt
+ fn.fd = (*Decoder).rawExt
+ fi.addrD = true
+ fi.addrE = true
+ } else if xfFn := x.getExt(rtid, checkExt); xfFn != nil {
+ fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
+ fn.fe = (*Encoder).ext
+ fn.fd = (*Decoder).ext
+ fi.addrD = true
+ if rk == reflect.Struct || rk == reflect.Array {
+ fi.addrE = true
+ }
+ } else if (ti.flagSelfer || ti.flagSelferPtr) &&
+ !(checkCircularRef && ti.flagSelferViaCodecgen && ti.kind == byte(reflect.Struct)) {
+ // do not use Selfer generated by codecgen if it is a struct and CheckCircularRef=true
+ fn.fe = (*Encoder).selferMarshal
+ fn.fd = (*Decoder).selferUnmarshal
+ fi.addrD = ti.flagSelferPtr
+ fi.addrE = ti.flagSelferPtr
+ } else if supportMarshalInterfaces && x.isBe() &&
+ (ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) &&
+ (ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) {
+ fn.fe = (*Encoder).binaryMarshal
+ fn.fd = (*Decoder).binaryUnmarshal
+ fi.addrD = ti.flagBinaryUnmarshalerPtr
+ fi.addrE = ti.flagBinaryMarshalerPtr
+ } else if supportMarshalInterfaces && !x.isBe() && x.isJs() &&
+ (ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) &&
+ (ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) {
+ //If JSON, we should check JSONMarshal before textMarshal
+ fn.fe = (*Encoder).jsonMarshal
+ fn.fd = (*Decoder).jsonUnmarshal
+ fi.addrD = ti.flagJsonUnmarshalerPtr
+ fi.addrE = ti.flagJsonMarshalerPtr
+ } else if supportMarshalInterfaces && !x.isBe() &&
+ (ti.flagTextMarshaler || ti.flagTextMarshalerPtr) &&
+ (ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) {
+ fn.fe = (*Encoder).textMarshal
+ fn.fd = (*Decoder).textUnmarshal
+ fi.addrD = ti.flagTextUnmarshalerPtr
+ fi.addrE = ti.flagTextMarshalerPtr
+ } else {
+ if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) {
+ // by default (without using unsafe),
+ // if an array is not addressable, converting from an array to a slice
+ // requires an allocation (see helper_not_unsafe.go: func rvGetSlice4Array).
+ //
+ // (Non-addressable arrays mostly occur as keys/values from a map).
+ //
+ // However, fastpath functions are mostly for slices of numbers or strings,
+ // which are small by definition and thus allocation should be fast/cheap in time.
+ //
+ // Consequently, the value of doing this quick allocation to elide the overhead cost of
+ // non-optimized (not-unsafe) reflection is a fair price.
+ var rtid2 uintptr
+ if !ti.flagHasPkgPath { // un-named type (slice or mpa or array)
+ rtid2 = rtid
+ if rk == reflect.Array {
+ rtid2 = rt2id(ti.key) // ti.key for arrays = reflect.SliceOf(ti.elem)
+ }
+ if idx := fastpathAvIndex(rtid2); idx != -1 {
+ fn.fe = fastpathAv[idx].encfn
+ fn.fd = fastpathAv[idx].decfn
+ fi.addrD = true
+ fi.addrDf = false
+ if rk == reflect.Array {
+ fi.addrD = false // decode directly into array value (slice made from it)
+ }
+ }
+ } else { // named type (with underlying type of map or slice or array)
+ // try to use mapping for underlying type
+ xfe, xrt := fnloadFastpathUnderlying(ti)
+ if xfe != nil {
+ xfnf := xfe.encfn
+ xfnf2 := xfe.decfn
+ if rk == reflect.Array {
+ fi.addrD = false // decode directly into array value (slice made from it)
+ fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
+ xfnf2(d, xf, rvConvert(xrv, xrt))
+ }
+ } else {
+ fi.addrD = true
+ fi.addrDf = false // meaning it can be an address(ptr) or a value
+ xptr2rt := reflect.PtrTo(xrt)
+ fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
+ if xrv.Kind() == reflect.Ptr {
+ xfnf2(d, xf, rvConvert(xrv, xptr2rt))
+ } else {
+ xfnf2(d, xf, rvConvert(xrv, xrt))
+ }
+ }
+ }
+ fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) {
+ xfnf(e, xf, rvConvert(xrv, xrt))
+ }
+ }
+ }
+ }
+ if fn.fe == nil && fn.fd == nil {
+ switch rk {
+ case reflect.Bool:
+ fn.fe = (*Encoder).kBool
+ fn.fd = (*Decoder).kBool
+ case reflect.String:
+ // Do not use different functions based on StringToRaw option, as that will statically
+ // set the function for a string type, and if the Handle is modified thereafter,
+ // behaviour is non-deterministic
+ // i.e. DO NOT DO:
+ // if x.StringToRaw {
+ // fn.fe = (*Encoder).kStringToRaw
+ // } else {
+ // fn.fe = (*Encoder).kStringEnc
+ // }
+
+ fn.fe = (*Encoder).kString
+ fn.fd = (*Decoder).kString
+ case reflect.Int:
+ fn.fd = (*Decoder).kInt
+ fn.fe = (*Encoder).kInt
+ case reflect.Int8:
+ fn.fe = (*Encoder).kInt8
+ fn.fd = (*Decoder).kInt8
+ case reflect.Int16:
+ fn.fe = (*Encoder).kInt16
+ fn.fd = (*Decoder).kInt16
+ case reflect.Int32:
+ fn.fe = (*Encoder).kInt32
+ fn.fd = (*Decoder).kInt32
+ case reflect.Int64:
+ fn.fe = (*Encoder).kInt64
+ fn.fd = (*Decoder).kInt64
+ case reflect.Uint:
+ fn.fd = (*Decoder).kUint
+ fn.fe = (*Encoder).kUint
+ case reflect.Uint8:
+ fn.fe = (*Encoder).kUint8
+ fn.fd = (*Decoder).kUint8
+ case reflect.Uint16:
+ fn.fe = (*Encoder).kUint16
+ fn.fd = (*Decoder).kUint16
+ case reflect.Uint32:
+ fn.fe = (*Encoder).kUint32
+ fn.fd = (*Decoder).kUint32
+ case reflect.Uint64:
+ fn.fe = (*Encoder).kUint64
+ fn.fd = (*Decoder).kUint64
+ case reflect.Uintptr:
+ fn.fe = (*Encoder).kUintptr
+ fn.fd = (*Decoder).kUintptr
+ case reflect.Float32:
+ fn.fe = (*Encoder).kFloat32
+ fn.fd = (*Decoder).kFloat32
+ case reflect.Float64:
+ fn.fe = (*Encoder).kFloat64
+ fn.fd = (*Decoder).kFloat64
+ case reflect.Complex64:
+ fn.fe = (*Encoder).kComplex64
+ fn.fd = (*Decoder).kComplex64
+ case reflect.Complex128:
+ fn.fe = (*Encoder).kComplex128
+ fn.fd = (*Decoder).kComplex128
+ case reflect.Chan:
+ fn.fe = (*Encoder).kChan
+ fn.fd = (*Decoder).kChan
+ case reflect.Slice:
+ fn.fe = (*Encoder).kSlice
+ fn.fd = (*Decoder).kSlice
+ case reflect.Array:
+ fi.addrD = false // decode directly into array value (slice made from it)
+ fn.fe = (*Encoder).kArray
+ fn.fd = (*Decoder).kArray
+ case reflect.Struct:
+ if ti.anyOmitEmpty ||
+ ti.flagMissingFielder ||
+ ti.flagMissingFielderPtr {
+ fn.fe = (*Encoder).kStruct
+ } else {
+ fn.fe = (*Encoder).kStructNoOmitempty
+ }
+ fn.fd = (*Decoder).kStruct
+ case reflect.Map:
+ fn.fe = (*Encoder).kMap
+ fn.fd = (*Decoder).kMap
+ case reflect.Interface:
+ // encode: reflect.Interface are handled already by preEncodeValue
+ fn.fd = (*Decoder).kInterface
+ fn.fe = (*Encoder).kErr
+ default:
+ // reflect.Ptr and reflect.Interface are handled already by preEncodeValue
+ fn.fe = (*Encoder).kErr
+ fn.fd = (*Decoder).kErr
+ }
+ }
+ }
+ return
+}
+
+// Handle defines a specific encoding format. It also stores any runtime state
+// used during an Encoding or Decoding session e.g. stored state about Types, etc.
+//
+// Once a handle is configured, it can be shared across multiple Encoders and Decoders.
+//
+// Note that a Handle is NOT safe for concurrent modification.
+//
+// A Handle also should not be modified after it is configured and has
+// been used at least once. This is because stored state may be out of sync with the
+// new configuration, and a data race can occur when multiple goroutines access it.
+// i.e. multiple Encoders or Decoders in different goroutines.
+//
+// Consequently, the typical usage model is that a Handle is pre-configured
+// before first time use, and not modified while in use.
+// Such a pre-configured Handle is safe for concurrent access.
+type Handle interface {
+ Name() string
+ getBasicHandle() *BasicHandle
+ newEncDriver() encDriver
+ newDecDriver() decDriver
+ isBinary() bool
+ isJson() bool // json is special for now, so track it
+ // desc describes the current byte descriptor, or returns "unknown[XXX]" if not understood.
+ desc(bd byte) string
+ // init initializes the handle based on handle-specific info (beyond what is in BasicHandle)
+ init()
+}
+
+// Raw represents raw formatted bytes.
+// We "blindly" store it during encode and retrieve the raw bytes during decode.
+// Note: it is dangerous during encode, so we may gate the behaviour
+// behind an Encode flag which must be explicitly set.
+type Raw []byte
+
+// RawExt represents raw unprocessed extension data.
+// Some codecs will decode extension data as a *RawExt
+// if there is no registered extension for the tag.
+//
+// Only one of Data or Value is nil.
+// If Data is nil, then the content of the RawExt is in the Value.
+type RawExt struct {
+ Tag uint64
+ // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value.
+ // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types
+ Data []byte
+ // Value represents the extension, if Data is nil.
+ // Value is used by codecs (e.g. cbor, json) which leverage the format to do
+ // custom serialization of the types.
+ Value interface{}
+}
+
+func (re *RawExt) setData(xbs []byte, zerocopy bool) {
+ if zerocopy {
+ re.Data = xbs
+ } else {
+ re.Data = append(re.Data[:0], xbs...)
+ }
+}
+
+// BytesExt handles custom (de)serialization of types to/from []byte.
+// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
+type BytesExt interface {
+ // WriteExt converts a value to a []byte.
+ //
+ // Note: v is a pointer iff the registered extension type is a struct or array kind.
+ WriteExt(v interface{}) []byte
+
+ // ReadExt updates a value from a []byte.
+ //
+ // Note: dst is always a pointer kind to the registered extension type.
+ ReadExt(dst interface{}, src []byte)
+}
+
+// InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
+// The Encoder or Decoder will then handle the further (de)serialization of that known type.
+//
+// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types.
+type InterfaceExt interface {
+ // ConvertExt converts a value into a simpler interface for easy encoding
+ // e.g. convert time.Time to int64.
+ //
+ // Note: v is a pointer iff the registered extension type is a struct or array kind.
+ ConvertExt(v interface{}) interface{}
+
+ // UpdateExt updates a value from a simpler interface for easy decoding
+ // e.g. convert int64 to time.Time.
+ //
+ // Note: dst is always a pointer kind to the registered extension type.
+ UpdateExt(dst interface{}, src interface{})
+}
+
+// Ext handles custom (de)serialization of custom types / extensions.
+type Ext interface {
+ BytesExt
+ InterfaceExt
+}
+
+// addExtWrapper is a wrapper implementation to support former AddExt exported method.
+type addExtWrapper struct {
+ encFn func(reflect.Value) ([]byte, error)
+ decFn func(reflect.Value, []byte) error
+}
+
+func (x addExtWrapper) WriteExt(v interface{}) []byte {
+ bs, err := x.encFn(reflect.ValueOf(v))
+ halt.onerror(err)
+ return bs
+}
+
+func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
+ halt.onerror(x.decFn(reflect.ValueOf(v), bs))
+}
+
+func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
+ return x.WriteExt(v)
+}
+
+func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
+ x.ReadExt(dest, v.([]byte))
+}
+
+type bytesExtFailer struct{}
+
+func (bytesExtFailer) WriteExt(v interface{}) []byte {
+ halt.onerror(errExtFnWriteExtUnsupported)
+ return nil
+}
+func (bytesExtFailer) ReadExt(v interface{}, bs []byte) {
+ halt.onerror(errExtFnReadExtUnsupported)
+}
+
+type interfaceExtFailer struct{}
+
+func (interfaceExtFailer) ConvertExt(v interface{}) interface{} {
+ halt.onerror(errExtFnConvertExtUnsupported)
+ return nil
+}
+func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) {
+ halt.onerror(errExtFnUpdateExtUnsupported)
+}
+
+type bytesExtWrapper struct {
+ interfaceExtFailer
+ BytesExt
+}
+
+type interfaceExtWrapper struct {
+ bytesExtFailer
+ InterfaceExt
+}
+
+type extFailWrapper struct {
+ bytesExtFailer
+ interfaceExtFailer
+}
+
+type binaryEncodingType struct{}
+
+func (binaryEncodingType) isBinary() bool { return true }
+func (binaryEncodingType) isJson() bool { return false }
+
+type textEncodingType struct{}
+
+func (textEncodingType) isBinary() bool { return false }
+func (textEncodingType) isJson() bool { return false }
+
+type notJsonType struct{}
+
+func (notJsonType) isJson() bool { return false }
+
+// noBuiltInTypes is embedded into many types which do not support builtins
+// e.g. msgpack, simple, cbor.
+
+type noBuiltInTypes struct{}
+
+func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
+func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
+
+// bigenHelper handles ByteOrder operations directly using
+// arrays of bytes (not slice of bytes).
+//
+// Since byteorder operations are very common for encoding and decoding
+// numbers, lengths, etc - it is imperative that this operation is as
+// fast as possible. Removing indirection (pointer chasing) to look
+// at up to 8 bytes helps a lot here.
+//
+// For times where it is expedient to use a slice, delegate to the
+// bigenstd (equal to the binary.BigEndian value).
+//
+// retrofitted from stdlib: encoding/binary/BigEndian (ByteOrder)
+type bigenHelper struct{}
+
+func (z bigenHelper) PutUint16(v uint16) (b [2]byte) {
+ return [...]byte{
+ byte(v >> 8),
+ byte(v),
+ }
+}
+
+func (z bigenHelper) PutUint32(v uint32) (b [4]byte) {
+ return [...]byte{
+ byte(v >> 24),
+ byte(v >> 16),
+ byte(v >> 8),
+ byte(v),
+ }
+}
+
+func (z bigenHelper) PutUint64(v uint64) (b [8]byte) {
+ return [...]byte{
+ byte(v >> 56),
+ byte(v >> 48),
+ byte(v >> 40),
+ byte(v >> 32),
+ byte(v >> 24),
+ byte(v >> 16),
+ byte(v >> 8),
+ byte(v),
+ }
+}
+
+func (z bigenHelper) Uint16(b [2]byte) (v uint16) {
+ return uint16(b[1]) |
+ uint16(b[0])<<8
+}
+
+func (z bigenHelper) Uint32(b [4]byte) (v uint32) {
+ return uint32(b[3]) |
+ uint32(b[2])<<8 |
+ uint32(b[1])<<16 |
+ uint32(b[0])<<24
+}
+
+func (z bigenHelper) Uint64(b [8]byte) (v uint64) {
+ return uint64(b[7]) |
+ uint64(b[6])<<8 |
+ uint64(b[5])<<16 |
+ uint64(b[4])<<24 |
+ uint64(b[3])<<32 |
+ uint64(b[2])<<40 |
+ uint64(b[1])<<48 |
+ uint64(b[0])<<56
+}
+
+func (z bigenHelper) writeUint16(w *encWr, v uint16) {
+ x := z.PutUint16(v)
+ w.writen2(x[0], x[1])
+}
+
+func (z bigenHelper) writeUint32(w *encWr, v uint32) {
+ w.writen4(z.PutUint32(v))
+}
+
+func (z bigenHelper) writeUint64(w *encWr, v uint64) {
+ w.writen8(z.PutUint64(v))
+}
+
+type extTypeTagFn struct {
+ rtid uintptr
+ rtidptr uintptr
+ rt reflect.Type
+ tag uint64
+ ext Ext
+}
+
+type extHandle []extTypeTagFn
+
+// AddExt registes an encode and decode function for a reflect.Type.
+// To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
+//
+// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+func (x *BasicHandle) AddExt(rt reflect.Type, tag byte,
+ encfn func(reflect.Value) ([]byte, error),
+ decfn func(reflect.Value, []byte) error) (err error) {
+ if encfn == nil || decfn == nil {
+ return x.SetExt(rt, uint64(tag), nil)
+ }
+ return x.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
+}
+
+// SetExt will set the extension for a tag and reflect.Type.
+// Note that the type must be a named type, and specifically not a pointer or Interface.
+// An error is returned if that is not honored.
+// To Deregister an ext, call SetExt with nil Ext.
+//
+// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
+func (x *BasicHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
+ if x.isInited() {
+ return errHandleInited
+ }
+ if x.basicHandleRuntimeState == nil {
+ x.basicHandleRuntimeState = new(basicHandleRuntimeState)
+ }
+ return x.basicHandleRuntimeState.setExt(rt, tag, ext)
+}
+
+func (o extHandle) getExtForI(x interface{}) (v *extTypeTagFn) {
+ if len(o) > 0 {
+ v = o.getExt(i2rtid(x), true)
+ }
+ return
+}
+
+func (o extHandle) getExt(rtid uintptr, check bool) (v *extTypeTagFn) {
+ if !check {
+ return
+ }
+ for i := range o {
+ v = &o[i]
+ if v.rtid == rtid || v.rtidptr == rtid {
+ return
+ }
+ }
+ return nil
+}
+
+func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) {
+ for i := range o {
+ v = &o[i]
+ if v.tag == tag {
+ return
+ }
+ }
+ return nil
+}
+
+type intf2impl struct {
+ rtid uintptr // for intf
+ impl reflect.Type
+}
+
+type intf2impls []intf2impl
+
+// Intf2Impl maps an interface to an implementing type.
+// This allows us support infering the concrete type
+// and populating it when passed an interface.
+// e.g. var v io.Reader can be decoded as a bytes.Buffer, etc.
+//
+// Passing a nil impl will clear the mapping.
+func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) {
+ if impl != nil && !impl.Implements(intf) {
+ return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf)
+ }
+ rtid := rt2id(intf)
+ o2 := *o
+ for i := range o2 {
+ v := &o2[i]
+ if v.rtid == rtid {
+ v.impl = impl
+ return
+ }
+ }
+ *o = append(o2, intf2impl{rtid, impl})
+ return
+}
+
+func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) {
+ for i := range o {
+ v := &o[i]
+ if v.rtid == rtid {
+ if v.impl == nil {
+ return
+ }
+ vkind := v.impl.Kind()
+ if vkind == reflect.Ptr {
+ return reflect.New(v.impl.Elem())
+ }
+ return rvZeroAddrK(v.impl, vkind)
+ }
+ }
+ return
+}
+
+// structFieldinfopathNode is a node in a tree, which allows us easily
+// walk the anonymous path.
+//
+// In the typical case, the node is not embedded/anonymous, and thus the parent
+// will be nil and this information becomes a value (not needing any indirection).
+type structFieldInfoPathNode struct {
+ parent *structFieldInfoPathNode
+
+ offset uint16
+ index uint16
+ kind uint8
+ numderef uint8
+
+ // encNameAsciiAlphaNum and omitEmpty should be in structFieldInfo,
+ // but are kept here for tighter packaging.
+
+ encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers
+ omitEmpty bool
+
+ typ reflect.Type
+}
+
+// depth returns number of valid nodes in the hierachy
+func (path *structFieldInfoPathNode) depth() (d int) {
+TOP:
+ if path != nil {
+ d++
+ path = path.parent
+ goto TOP
+ }
+ return
+}
+
+// field returns the field of the struct.
+func (path *structFieldInfoPathNode) field(v reflect.Value) (rv2 reflect.Value) {
+ if parent := path.parent; parent != nil {
+ v = parent.field(v)
+ for j, k := uint8(0), parent.numderef; j < k; j++ {
+ if rvIsNil(v) {
+ return
+ }
+ v = v.Elem()
+ }
+ }
+ return path.rvField(v)
+}
+
+// fieldAlloc returns the field of the struct.
+// It allocates if a nil value was seen while searching.
+func (path *structFieldInfoPathNode) fieldAlloc(v reflect.Value) (rv2 reflect.Value) {
+ if parent := path.parent; parent != nil {
+ v = parent.fieldAlloc(v)
+ for j, k := uint8(0), parent.numderef; j < k; j++ {
+ if rvIsNil(v) {
+ rvSetDirect(v, reflect.New(rvType(v).Elem()))
+ }
+ v = v.Elem()
+ }
+ }
+ return path.rvField(v)
+}
+
+type structFieldInfo struct {
+ encName string // encode name
+
+ // encNameHash uintptr
+
+ // fieldName string // currently unused
+
+ // encNameAsciiAlphaNum and omitEmpty should be here,
+ // but are stored in structFieldInfoPathNode for tighter packaging.
+
+ path structFieldInfoPathNode
+}
+
+func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) {
+ keytype = valueTypeString // default
+ if stag == "" {
+ return
+ }
+ ss := strings.Split(stag, ",")
+ if len(ss) < 2 {
+ return
+ }
+ for _, s := range ss[1:] {
+ switch s {
+ case "omitempty":
+ omitEmpty = true
+ case "toarray":
+ toArray = true
+ case "int":
+ keytype = valueTypeInt
+ case "uint":
+ keytype = valueTypeUint
+ case "float":
+ keytype = valueTypeFloat
+ // case "bool":
+ // keytype = valueTypeBool
+ case "string":
+ keytype = valueTypeString
+ }
+ }
+ return
+}
+
+func (si *structFieldInfo) parseTag(stag string) {
+ if stag == "" {
+ return
+ }
+ for i, s := range strings.Split(stag, ",") {
+ if i == 0 {
+ if s != "" {
+ si.encName = s
+ }
+ } else {
+ switch s {
+ case "omitempty":
+ si.path.omitEmpty = true
+ }
+ }
+ }
+}
+
+type sfiSortedByEncName []*structFieldInfo
+
+func (p sfiSortedByEncName) Len() int { return len(p) }
+func (p sfiSortedByEncName) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p sfiSortedByEncName) Less(i, j int) bool { return p[uint(i)].encName < p[uint(j)].encName }
+
+// typeInfo4Container holds information that is only available for
+// containers like map, array, chan, slice.
+type typeInfo4Container struct {
+ elem reflect.Type
+ // key is:
+ // - if map kind: map key
+ // - if array kind: sliceOf(elem)
+ // - if chan kind: sliceof(elem)
+ key reflect.Type
+
+ // fastpathUnderlying is underlying type of a named slice/map/array, as defined by go spec,
+ // that is used by fastpath where we defined fastpath functions for the underlying type.
+ //
+ // for a map, it's a map; for a slice or array, it's a slice; else its nil.
+ fastpathUnderlying reflect.Type
+
+ tikey *typeInfo
+ tielem *typeInfo
+}
+
+// typeInfo keeps static (non-changing readonly)information
+// about each (non-ptr) type referenced in the encode/decode sequence.
+//
+// During an encode/decode sequence, we work as below:
+// - If base is a built in type, en/decode base value
+// - If base is registered as an extension, en/decode base value
+// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
+// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
+// - Else decode appropriately based on the reflect.Kind
+type typeInfo struct {
+ rt reflect.Type
+ ptr reflect.Type
+
+ // pkgpath string
+
+ rtid uintptr
+
+ numMeth uint16 // number of methods
+ kind uint8
+ chandir uint8
+
+ anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty"
+ toArray bool // whether this (struct) type should be encoded as an array
+ keyType valueType // if struct, how is the field name stored in a stream? default is string
+ mbs bool // base type (T or *T) is a MapBySlice
+
+ sfi4Name map[string]*structFieldInfo // map. used for finding sfi given a name
+
+ *typeInfo4Container
+
+ // ---- cpu cache line boundary?
+
+ size, keysize, elemsize uint32
+
+ keykind, elemkind uint8
+
+ flagHasPkgPath bool // Type.PackagePath != ""
+ flagCustom bool // does this have custom implementation?
+ flagComparable bool
+ flagCanTransient bool
+
+ flagSelferViaCodecgen bool
+
+ // custom implementation flags
+ flagIsZeroer bool
+ flagIsZeroerPtr bool
+
+ flagIsCodecEmptyer bool
+ flagIsCodecEmptyerPtr bool
+
+ flagBinaryMarshaler bool
+ flagBinaryMarshalerPtr bool
+
+ flagBinaryUnmarshaler bool
+ flagBinaryUnmarshalerPtr bool
+
+ flagTextMarshaler bool
+ flagTextMarshalerPtr bool
+
+ flagTextUnmarshaler bool
+ flagTextUnmarshalerPtr bool
+
+ flagJsonMarshaler bool
+ flagJsonMarshalerPtr bool
+
+ flagJsonUnmarshaler bool
+ flagJsonUnmarshalerPtr bool
+
+ flagSelfer bool
+ flagSelferPtr bool
+
+ flagMissingFielder bool
+ flagMissingFielderPtr bool
+
+ infoFieldOmitempty bool
+
+ sfi structFieldInfos
+}
+
+func (ti *typeInfo) siForEncName(name []byte) (si *structFieldInfo) {
+ return ti.sfi4Name[string(name)]
+}
+
+func (ti *typeInfo) resolve(x []structFieldInfo, ss map[string]uint16) (n int) {
+ n = len(x)
+
+ for i := range x {
+ ui := uint16(i)
+ xn := x[i].encName
+ j, ok := ss[xn]
+ if ok {
+ i2clear := ui // index to be cleared
+ if x[i].path.depth() < x[j].path.depth() { // this one is shallower
+ ss[xn] = ui
+ i2clear = j
+ }
+ if x[i2clear].encName != "" {
+ x[i2clear].encName = ""
+ n--
+ }
+ } else {
+ ss[xn] = ui
+ }
+ }
+
+ return
+}
+
+func (ti *typeInfo) init(x []structFieldInfo, n int) {
+ var anyOmitEmpty bool
+
+ // remove all the nils (non-ready)
+ m := make(map[string]*structFieldInfo, n)
+ w := make([]structFieldInfo, n)
+ y := make([]*structFieldInfo, n+n)
+ z := y[n:]
+ y = y[:n]
+ n = 0
+ for i := range x {
+ if x[i].encName == "" {
+ continue
+ }
+ if !anyOmitEmpty && x[i].path.omitEmpty {
+ anyOmitEmpty = true
+ }
+ w[n] = x[i]
+ y[n] = &w[n]
+ m[x[i].encName] = &w[n]
+ n++
+ }
+ if n != len(y) {
+ halt.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", ti.rt, len(y), len(x), n)
+ }
+
+ copy(z, y)
+ sort.Sort(sfiSortedByEncName(z))
+
+ ti.anyOmitEmpty = anyOmitEmpty
+ ti.sfi.load(y, z)
+ ti.sfi4Name = m
+}
+
+// Handling flagCanTransient
+//
+// We support transient optimization if the kind of the type is
+// a number, bool, string, or slice.
+// In addition, we also support if the kind is struct or array,
+// and the type does not contain any pointers recursively).
+//
+// Noteworthy that all reference types (string, slice, func, map, ptr, interface, etc) have pointers.
+//
+// If using transient for a type with a pointer, there is the potential for data corruption
+// when GC tries to follow a "transient" pointer which may become a non-pointer soon after.
+//
+
+func isCanTransient(t reflect.Type, k reflect.Kind) (v bool) {
+ var bs *bitset32
+ if transientValueHasStringSlice {
+ bs = &numBoolStrSliceBitset
+ } else {
+ bs = &numBoolBitset
+ }
+ if bs.isset(byte(k)) {
+ v = true
+ } else if k == reflect.Array {
+ elem := t.Elem()
+ v = isCanTransient(elem, elem.Kind())
+ } else if k == reflect.Struct {
+ v = true
+ for j, jlen := 0, t.NumField(); j < jlen; j++ {
+ f := t.Field(j)
+ if !isCanTransient(f.Type, f.Type.Kind()) {
+ v = false
+ return
+ }
+ }
+ } else {
+ v = false
+ }
+ return
+}
+
+func (ti *typeInfo) doSetFlagCanTransient() {
+ if transientSizeMax > 0 {
+ ti.flagCanTransient = ti.size <= transientSizeMax
+ } else {
+ ti.flagCanTransient = true
+ }
+ if ti.flagCanTransient {
+ // if ti kind is a num, bool, string or slice, then it is flagCanTransient
+ if !numBoolStrSliceBitset.isset(ti.kind) {
+ ti.flagCanTransient = isCanTransient(ti.rt, reflect.Kind(ti.kind))
+ }
+ }
+}
+
+type rtid2ti struct {
+ rtid uintptr
+ ti *typeInfo
+}
+
+// TypeInfos caches typeInfo for each type on first inspection.
+//
+// It is configured with a set of tag keys, which are used to get
+// configuration for the type.
+type TypeInfos struct {
+ infos atomicTypeInfoSlice
+ mu sync.Mutex
+ _ uint64 // padding (cache-aligned)
+ tags []string
+ _ uint64 // padding (cache-aligned)
+}
+
+// NewTypeInfos creates a TypeInfos given a set of struct tags keys.
+//
+// This allows users customize the struct tag keys which contain configuration
+// of their types.
+func NewTypeInfos(tags []string) *TypeInfos {
+ return &TypeInfos{tags: tags}
+}
+
+func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
+ // check for tags: codec, json, in that order.
+ // this allows seamless support for many configured structs.
+ for _, x := range x.tags {
+ s = t.Get(x)
+ if s != "" {
+ return s
+ }
+ }
+ return
+}
+
+func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) {
+ // binary search. adapted from sort/search.go.
+ // Note: we use goto (instead of for loop) so this can be inlined.
+
+ var h uint
+ var j = uint(len(s))
+LOOP:
+ if i < j {
+ h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
+ if s[h].rtid < rtid {
+ i = h + 1
+ } else {
+ j = h
+ }
+ goto LOOP
+ }
+ if i < uint(len(s)) && s[i].rtid == rtid {
+ ti = s[i].ti
+ }
+ return
+}
+
+func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
+ if pti = x.find(rtid); pti == nil {
+ pti = x.load(rt)
+ }
+ return
+}
+
+func (x *TypeInfos) find(rtid uintptr) (pti *typeInfo) {
+ sp := x.infos.load()
+ if sp != nil {
+ _, pti = findTypeInfo(sp, rtid)
+ }
+ return
+}
+
+func (x *TypeInfos) load(rt reflect.Type) (pti *typeInfo) {
+ rk := rt.Kind()
+
+ if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) {
+ halt.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)
+ }
+
+ rtid := rt2id(rt)
+
+ // do not hold lock while computing this.
+ // it may lead to duplication, but that's ok.
+ ti := typeInfo{
+ rt: rt,
+ ptr: reflect.PtrTo(rt),
+ rtid: rtid,
+ kind: uint8(rk),
+ size: uint32(rt.Size()),
+ numMeth: uint16(rt.NumMethod()),
+ keyType: valueTypeString, // default it - so it's never 0
+
+ // pkgpath: rt.PkgPath(),
+ flagHasPkgPath: rt.PkgPath() != "",
+ }
+
+ // bset sets custom implementation flags
+ bset := func(when bool, b *bool) {
+ if when {
+ *b = true
+ ti.flagCustom = true
+ }
+ }
+
+ var b1, b2 bool
+
+ b1, b2 = implIntf(rt, binaryMarshalerTyp)
+ bset(b1, &ti.flagBinaryMarshaler)
+ bset(b2, &ti.flagBinaryMarshalerPtr)
+ b1, b2 = implIntf(rt, binaryUnmarshalerTyp)
+ bset(b1, &ti.flagBinaryUnmarshaler)
+ bset(b2, &ti.flagBinaryUnmarshalerPtr)
+ b1, b2 = implIntf(rt, textMarshalerTyp)
+ bset(b1, &ti.flagTextMarshaler)
+ bset(b2, &ti.flagTextMarshalerPtr)
+ b1, b2 = implIntf(rt, textUnmarshalerTyp)
+ bset(b1, &ti.flagTextUnmarshaler)
+ bset(b2, &ti.flagTextUnmarshalerPtr)
+ b1, b2 = implIntf(rt, jsonMarshalerTyp)
+ bset(b1, &ti.flagJsonMarshaler)
+ bset(b2, &ti.flagJsonMarshalerPtr)
+ b1, b2 = implIntf(rt, jsonUnmarshalerTyp)
+ bset(b1, &ti.flagJsonUnmarshaler)
+ bset(b2, &ti.flagJsonUnmarshalerPtr)
+ b1, b2 = implIntf(rt, selferTyp)
+ bset(b1, &ti.flagSelfer)
+ bset(b2, &ti.flagSelferPtr)
+ b1, b2 = implIntf(rt, missingFielderTyp)
+ bset(b1, &ti.flagMissingFielder)
+ bset(b2, &ti.flagMissingFielderPtr)
+ b1, b2 = implIntf(rt, iszeroTyp)
+ bset(b1, &ti.flagIsZeroer)
+ bset(b2, &ti.flagIsZeroerPtr)
+ b1, b2 = implIntf(rt, isCodecEmptyerTyp)
+ bset(b1, &ti.flagIsCodecEmptyer)
+ bset(b2, &ti.flagIsCodecEmptyerPtr)
+
+ b1, b2 = implIntf(rt, isSelferViaCodecgenerTyp)
+ ti.flagSelferViaCodecgen = b1 || b2
+
+ b1 = rt.Comparable()
+ // bset(b1, &ti.flagComparable)
+ ti.flagComparable = b1
+
+ ti.doSetFlagCanTransient()
+
+ var tt reflect.Type
+ switch rk {
+ case reflect.Struct:
+ var omitEmpty bool
+ if f, ok := rt.FieldByName(structInfoFieldName); ok {
+ ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag))
+ ti.infoFieldOmitempty = omitEmpty
+ } else {
+ ti.keyType = valueTypeString
+ }
+ pp, pi := &pool4tiload, pool4tiload.Get()
+ pv := pi.(*typeInfoLoad)
+ pv.reset()
+ pv.etypes = append(pv.etypes, ti.rtid)
+ x.rget(rt, rtid, nil, pv, omitEmpty)
+ n := ti.resolve(pv.sfis, pv.sfiNames)
+ ti.init(pv.sfis, n)
+ pp.Put(pi)
+ case reflect.Map:
+ ti.typeInfo4Container = new(typeInfo4Container)
+ ti.elem = rt.Elem()
+ for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
+ }
+ ti.tielem = x.get(rt2id(tt), tt)
+ ti.elemkind = uint8(ti.elem.Kind())
+ ti.elemsize = uint32(ti.elem.Size())
+ ti.key = rt.Key()
+ for tt = ti.key; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
+ }
+ ti.tikey = x.get(rt2id(tt), tt)
+ ti.keykind = uint8(ti.key.Kind())
+ ti.keysize = uint32(ti.key.Size())
+ if ti.flagHasPkgPath {
+ ti.fastpathUnderlying = reflect.MapOf(ti.key, ti.elem)
+ }
+ case reflect.Slice:
+ ti.typeInfo4Container = new(typeInfo4Container)
+ ti.mbs, b2 = implIntf(rt, mapBySliceTyp)
+ if !ti.mbs && b2 {
+ ti.mbs = b2
+ }
+ ti.elem = rt.Elem()
+ for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
+ }
+ ti.tielem = x.get(rt2id(tt), tt)
+ ti.elemkind = uint8(ti.elem.Kind())
+ ti.elemsize = uint32(ti.elem.Size())
+ if ti.flagHasPkgPath {
+ ti.fastpathUnderlying = reflect.SliceOf(ti.elem)
+ }
+ case reflect.Chan:
+ ti.typeInfo4Container = new(typeInfo4Container)
+ ti.elem = rt.Elem()
+ for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
+ }
+ ti.tielem = x.get(rt2id(tt), tt)
+ ti.elemkind = uint8(ti.elem.Kind())
+ ti.elemsize = uint32(ti.elem.Size())
+ ti.chandir = uint8(rt.ChanDir())
+ ti.key = reflect.SliceOf(ti.elem)
+ ti.keykind = uint8(reflect.Slice)
+ case reflect.Array:
+ ti.typeInfo4Container = new(typeInfo4Container)
+ ti.mbs, b2 = implIntf(rt, mapBySliceTyp)
+ if !ti.mbs && b2 {
+ ti.mbs = b2
+ }
+ ti.elem = rt.Elem()
+ ti.elemkind = uint8(ti.elem.Kind())
+ ti.elemsize = uint32(ti.elem.Size())
+ for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
+ }
+ ti.tielem = x.get(rt2id(tt), tt)
+ ti.key = reflect.SliceOf(ti.elem)
+ ti.keykind = uint8(reflect.Slice)
+ ti.keysize = uint32(ti.key.Size())
+ if ti.flagHasPkgPath {
+ ti.fastpathUnderlying = ti.key
+ }
+
+ // MARKER: reflect.Ptr cannot happen here, as we halt early if reflect.Ptr passed in
+ // case reflect.Ptr:
+ // ti.elem = rt.Elem()
+ // ti.elemkind = uint8(ti.elem.Kind())
+ // ti.elemsize = uint32(ti.elem.Size())
+ }
+
+ x.mu.Lock()
+ sp := x.infos.load()
+ // since this is an atomic load/store, we MUST use a different array each time,
+ // else we have a data race when a store is happening simultaneously with a findRtidFn call.
+ if sp == nil {
+ pti = &ti
+ sp = []rtid2ti{{rtid, pti}}
+ x.infos.store(sp)
+ } else {
+ var idx uint
+ idx, pti = findTypeInfo(sp, rtid)
+ if pti == nil {
+ pti = &ti
+ sp2 := make([]rtid2ti, len(sp)+1)
+ copy(sp2[idx+1:], sp[idx:])
+ copy(sp2, sp[:idx])
+ sp2[idx] = rtid2ti{rtid, pti}
+ x.infos.store(sp2)
+ }
+ }
+ x.mu.Unlock()
+ return
+}
+
+func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr,
+ path *structFieldInfoPathNode, pv *typeInfoLoad, omitEmpty bool) {
+ // Read up fields and store how to access the value.
+ //
+ // It uses go's rules for message selectors,
+ // which say that the field with the shallowest depth is selected.
+ //
+ // Note: we consciously use slices, not a map, to simulate a set.
+ // Typically, types have < 16 fields,
+ // and iteration using equals is faster than maps there
+ flen := rt.NumField()
+LOOP:
+ for j, jlen := uint16(0), uint16(flen); j < jlen; j++ {
+ f := rt.Field(int(j))
+ fkind := f.Type.Kind()
+
+ // skip if a func type, or is unexported, or structTag value == "-"
+ switch fkind {
+ case reflect.Func, reflect.UnsafePointer:
+ continue LOOP
+ }
+
+ isUnexported := f.PkgPath != ""
+ if isUnexported && !f.Anonymous {
+ continue
+ }
+ stag := x.structTag(f.Tag)
+ if stag == "-" {
+ continue
+ }
+ var si structFieldInfo
+
+ var numderef uint8 = 0
+ for xft := f.Type; xft.Kind() == reflect.Ptr; xft = xft.Elem() {
+ numderef++
+ }
+
+ var parsed bool
+ // if anonymous and no struct tag (or it's blank),
+ // and a struct (or pointer to struct), inline it.
+ if f.Anonymous && fkind != reflect.Interface {
+ // ^^ redundant but ok: per go spec, an embedded pointer type cannot be to an interface
+ ft := f.Type
+ isPtr := ft.Kind() == reflect.Ptr
+ for ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ isStruct := ft.Kind() == reflect.Struct
+
+ // Ignore embedded fields of unexported non-struct types.
+ // Also, from go1.10, ignore pointers to unexported struct types
+ // because unmarshal cannot assign a new struct to an unexported field.
+ // See https://golang.org/issue/21357
+ if (isUnexported && !isStruct) || (!allowSetUnexportedEmbeddedPtr && isUnexported && isPtr) {
+ continue
+ }
+ doInline := stag == ""
+ if !doInline {
+ si.parseTag(stag)
+ parsed = true
+ doInline = si.encName == "" // si.isZero()
+ }
+ if doInline && isStruct {
+ // if etypes contains this, don't call rget again (as fields are already seen here)
+ ftid := rt2id(ft)
+ // We cannot recurse forever, but we need to track other field depths.
+ // So - we break if we see a type twice (not the first time).
+ // This should be sufficient to handle an embedded type that refers to its
+ // owning type, which then refers to its embedded type.
+ processIt := true
+ numk := 0
+ for _, k := range pv.etypes {
+ if k == ftid {
+ numk++
+ if numk == rgetMaxRecursion {
+ processIt = false
+ break
+ }
+ }
+ }
+ if processIt {
+ pv.etypes = append(pv.etypes, ftid)
+ path2 := &structFieldInfoPathNode{
+ parent: path,
+ typ: f.Type,
+ offset: uint16(f.Offset),
+ index: j,
+ kind: uint8(fkind),
+ numderef: numderef,
+ }
+ x.rget(ft, ftid, path2, pv, omitEmpty)
+ }
+ continue
+ }
+ }
+
+ // after the anonymous dance: if an unexported field, skip
+ if isUnexported || f.Name == "" { // f.Name cannot be "", but defensively handle it
+ continue
+ }
+
+ si.path = structFieldInfoPathNode{
+ parent: path,
+ typ: f.Type,
+ offset: uint16(f.Offset),
+ index: j,
+ kind: uint8(fkind),
+ numderef: numderef,
+ // set asciiAlphaNum to true (default); checked and may be set to false below
+ encNameAsciiAlphaNum: true,
+ // note: omitEmpty might have been set in an earlier parseTag call, etc - so carry it forward
+ omitEmpty: si.path.omitEmpty,
+ }
+
+ if !parsed {
+ si.encName = f.Name
+ si.parseTag(stag)
+ parsed = true
+ } else if si.encName == "" {
+ si.encName = f.Name
+ }
+
+ // si.encNameHash = maxUintptr() // hashShortString(bytesView(si.encName))
+
+ if omitEmpty {
+ si.path.omitEmpty = true
+ }
+
+ for i := len(si.encName) - 1; i >= 0; i-- { // bounds-check elimination
+ if !asciiAlphaNumBitset.isset(si.encName[i]) {
+ si.path.encNameAsciiAlphaNum = false
+ break
+ }
+ }
+
+ pv.sfis = append(pv.sfis, si)
+ }
+}
+
+func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) {
+ // return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp)
+
+ // if I's method is defined on T (ie T implements I), then *T implements I.
+ // The converse is not true.
+
+ // Type.Implements can be expensive, as it does a simulataneous linear search across 2 lists
+ // with alphanumeric string comparisons.
+ // If we can avoid running one of these 2 calls, we should.
+
+ base = rt.Implements(iTyp)
+ if base {
+ indir = true
+ } else {
+ indir = reflect.PtrTo(rt).Implements(iTyp)
+ }
+ return
+}
+
+func isSliceBoundsError(s string) bool {
+ return strings.Contains(s, "index out of range") ||
+ strings.Contains(s, "slice bounds out of range")
+}
+
+func sprintf(format string, v ...interface{}) string {
+ return fmt.Sprintf(format, v...)
+}
+
+func panicValToErr(h errDecorator, v interface{}, err *error) {
+ if v == *err {
+ return
+ }
+ switch xerr := v.(type) {
+ case nil:
+ case runtime.Error:
+ d, dok := h.(*Decoder)
+ if dok && d.bytes && isSliceBoundsError(xerr.Error()) {
+ *err = io.EOF
+ } else {
+ h.wrapErr(xerr, err)
+ }
+ case error:
+ switch xerr {
+ case nil:
+ case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized:
+ // treat as special (bubble up)
+ *err = xerr
+ default:
+ h.wrapErr(xerr, err)
+ }
+ default:
+ // we don't expect this to happen (as this library always panics with an error)
+ h.wrapErr(fmt.Errorf("%v", v), err)
+ }
+}
+
+func usableByteSlice(bs []byte, slen int) (out []byte, changed bool) {
+ if slen <= 0 {
+ return []byte{}, true
+ }
+ if cap(bs) < slen {
+ return make([]byte, slen), true
+ }
+ return bs[:slen], false
+}
+
+func mapKeyFastKindFor(k reflect.Kind) mapKeyFastKind {
+ return mapKeyFastKindVals[k&31]
+}
+
+// ----
+
+type codecFnInfo struct {
+ ti *typeInfo
+ xfFn Ext
+ xfTag uint64
+ addrD bool
+ addrDf bool // force: if addrD, then decode function MUST take a ptr
+ addrE bool
+ // addrEf bool // force: if addrE, then encode function MUST take a ptr
+}
+
+// codecFn encapsulates the captured variables and the encode function.
+// This way, we only do some calculations one times, and pass to the
+// code block that should be called (encapsulated in a function)
+// instead of executing the checks every time.
+type codecFn struct {
+ i codecFnInfo
+ fe func(*Encoder, *codecFnInfo, reflect.Value)
+ fd func(*Decoder, *codecFnInfo, reflect.Value)
+ // _ [1]uint64 // padding (cache-aligned)
+}
+
+type codecRtidFn struct {
+ rtid uintptr
+ fn *codecFn
+}
+
+func makeExt(ext interface{}) Ext {
+ switch t := ext.(type) {
+ case Ext:
+ return t
+ case BytesExt:
+ return &bytesExtWrapper{BytesExt: t}
+ case InterfaceExt:
+ return &interfaceExtWrapper{InterfaceExt: t}
+ }
+ return &extFailWrapper{}
+}
+
+func baseRV(v interface{}) (rv reflect.Value) {
+ // use reflect.ValueOf, not rv4i, as of go 1.16beta, rv4i was not inlineable
+ for rv = reflect.ValueOf(v); rv.Kind() == reflect.Ptr; rv = rv.Elem() {
+ }
+ return
+}
+
+// ----
+
+// these "checkOverflow" functions must be inlinable, and not call anybody.
+// Overflow means that the value cannot be represented without wrapping/overflow.
+// Overflow=false does not mean that the value can be represented without losing precision
+// (especially for floating point).
+
+type checkOverflow struct{}
+
+func (checkOverflow) Float32(v float64) (overflow bool) {
+ if v < 0 {
+ v = -v
+ }
+ return math.MaxFloat32 < v && v <= math.MaxFloat64
+}
+func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
+ if v != 0 && v != (v<<(64-bitsize))>>(64-bitsize) {
+ overflow = true
+ }
+ return
+}
+func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
+ if v != 0 && v != (v<<(64-bitsize))>>(64-bitsize) {
+ overflow = true
+ }
+ return
+}
+
+func (checkOverflow) Uint2Int(v uint64, neg bool) (overflow bool) {
+ return (neg && v > 1<<63) || (!neg && v >= 1<<63)
+}
+
+func (checkOverflow) SignedInt(v uint64) (overflow bool) {
+ //e.g. -127 to 128 for int8
+ pos := (v >> 63) == 0
+ ui2 := v & 0x7fffffffffffffff
+ if pos {
+ if ui2 > math.MaxInt64 {
+ overflow = true
+ }
+ } else {
+ if ui2 > math.MaxInt64-1 {
+ overflow = true
+ }
+ }
+ return
+}
+
+func (x checkOverflow) Float32V(v float64) float64 {
+ if x.Float32(v) {
+ halt.errorf("float32 overflow: %v", v)
+ }
+ return v
+}
+func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 {
+ if x.Uint(v, bitsize) {
+ halt.errorf("uint64 overflow: %v", v)
+ }
+ return v
+}
+func (x checkOverflow) IntV(v int64, bitsize uint8) int64 {
+ if x.Int(v, bitsize) {
+ halt.errorf("int64 overflow: %v", v)
+ }
+ return v
+}
+func (x checkOverflow) SignedIntV(v uint64) int64 {
+ if x.SignedInt(v) {
+ halt.errorf("uint64 to int64 overflow: %v", v)
+ }
+ return int64(v)
+}
+
+// ------------------ FLOATING POINT -----------------
+
+func isNaN64(f float64) bool { return f != f }
+
+func isWhitespaceChar(v byte) bool {
+ // these are in order of speed below ...
+
+ return v < 33
+ // return v < 33 && whitespaceCharBitset64.isset(v)
+ // return v < 33 && (v == ' ' || v == '\n' || v == '\t' || v == '\r')
+ // return v == ' ' || v == '\n' || v == '\t' || v == '\r'
+ // return whitespaceCharBitset.isset(v)
+}
+
+func isNumberChar(v byte) bool {
+ // these are in order of speed below ...
+
+ return numCharBitset.isset(v)
+ // return v < 64 && numCharNoExpBitset64.isset(v) || v == 'e' || v == 'E'
+ // return v > 42 && v < 102 && numCharWithExpBitset64.isset(v-42)
+}
+
+// -----------------------
+
+type ioFlusher interface {
+ Flush() error
+}
+
+type ioBuffered interface {
+ Buffered() int
+}
+
+// -----------------------
+
+type sfiRv struct {
+ v *structFieldInfo
+ r reflect.Value
+}
+
+// ------
+
+// bitset types are better than [256]bool, because they permit the whole
+// bitset array being on a single cache line and use less memory.
+//
+// Also, since pos is a byte (0-255), there's no bounds checks on indexing (cheap).
+//
+// We previously had bitset128 [16]byte, and bitset32 [4]byte, but those introduces
+// bounds checking, so we discarded them, and everyone uses bitset256.
+//
+// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1).
+// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7
+//
+// Note that using >> or & is faster than using / or %, as division is quite expensive if not optimized.
+
+// MARKER:
+// We noticed a little performance degradation when using bitset256 as [32]byte (or bitset32 as uint32).
+// For example, json encoding went from 188K ns/op to 168K ns/op (~ 10% reduction).
+// Consequently, we are using a [NNN]bool for bitsetNNN.
+// To eliminate bounds-checking, we use x % v as that is guaranteed to be within bounds.
+
+// ----
+type bitset32 [32]bool
+
+func (x *bitset32) set(pos byte) *bitset32 {
+ x[pos&31] = true // x[pos%32] = true
+ return x
+}
+func (x *bitset32) isset(pos byte) bool {
+ return x[pos&31] // x[pos%32]
+}
+
+type bitset256 [256]bool
+
+func (x *bitset256) set(pos byte) *bitset256 {
+ x[pos] = true
+ return x
+}
+func (x *bitset256) isset(pos byte) bool {
+ return x[pos]
+}
+
+// ------------
+
+type panicHdl struct{}
+
+// errorv will panic if err is defined (not nil)
+func (panicHdl) onerror(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+// errorf will always panic, using the parameters passed.
+//
+// Note: it is ok to pass in a stringView, as it will just pass it directly
+// to a fmt.Sprintf call and not hold onto it.
+//
+//go:noinline
+func (panicHdl) errorf(format string, params ...interface{}) {
+ if format == "" {
+ panic(errPanicUndefined)
+ }
+ if len(params) == 0 {
+ panic(errors.New(format))
+ }
+ panic(fmt.Errorf(format, params...))
+}
+
+// ----------------------------------------------------
+
+type errDecorator interface {
+ wrapErr(in error, out *error)
+}
+
+type errDecoratorDef struct{}
+
+func (errDecoratorDef) wrapErr(v error, e *error) { *e = v }
+
+// ----------------------------------------------------
+
+type mustHdl struct{}
+
+func (mustHdl) String(s string, err error) string {
+ halt.onerror(err)
+ return s
+}
+func (mustHdl) Int(s int64, err error) int64 {
+ halt.onerror(err)
+ return s
+}
+func (mustHdl) Uint(s uint64, err error) uint64 {
+ halt.onerror(err)
+ return s
+}
+func (mustHdl) Float(s float64, err error) float64 {
+ halt.onerror(err)
+ return s
+}
+
+// -------------------
+
+func freelistCapacity(length int) (capacity int) {
+ for capacity = 8; capacity <= length; capacity *= 2 {
+ }
+ return
+}
+
+// bytesFreelist is a list of byte buffers, sorted by cap.
+//
+// In anecdotal testing (running go test -tsd 1..6), we couldn't get
+// the length ofthe list > 4 at any time. So we believe a linear search
+// without bounds checking is sufficient.
+//
+// Typical usage model:
+// peek may go together with put, iff pop=true. peek gets largest byte slice temporarily.
+// check is used to switch a []byte if necessary
+// get/put go together
+//
+// Given that folks may get a []byte, and then append to it a lot which may re-allocate
+// a new []byte, we should try to return both (one received from blist and new one allocated).
+//
+// Typical usage model for get/put, when we don't know whether we may need more than requested
+// v0 := blist.get()
+// v1 := v0
+// ... use v1 ...
+// blist.put(v1)
+// if byteSliceAddr(v0) != byteSliceAddr(v1) {
+// blist.put(v0)
+// }
+//
+type bytesFreelist [][]byte
+
+// peek returns a slice of possibly non-zero'ed bytes, with len=0,
+// and with the largest capacity from the list.
+func (x *bytesFreelist) peek(length int, pop bool) (out []byte) {
+ if bytesFreeListNoCache {
+ return make([]byte, 0, freelistCapacity(length))
+ }
+ y := *x
+ if len(y) > 0 {
+ out = y[len(y)-1]
+ }
+ // start buf with a minimum of 64 bytes
+ const minLenBytes = 64
+ if length < minLenBytes {
+ length = minLenBytes
+ }
+ if cap(out) < length {
+ out = make([]byte, 0, freelistCapacity(length))
+ y = append(y, out)
+ *x = y
+ }
+ if pop && len(y) > 0 {
+ y = y[:len(y)-1]
+ *x = y
+ }
+ return
+}
+
+// get returns a slice of possibly non-zero'ed bytes, with len=0,
+// and with cap >= length requested.
+func (x *bytesFreelist) get(length int) (out []byte) {
+ if bytesFreeListNoCache {
+ return make([]byte, 0, freelistCapacity(length))
+ }
+ y := *x
+ // MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
+ // for i, v := range y {
+ for i := 0; i < len(y); i++ {
+ v := y[i]
+ if cap(v) >= length {
+ // *x = append(y[:i], y[i+1:]...)
+ copy(y[i:], y[i+1:])
+ *x = y[:len(y)-1]
+ return v
+ }
+ }
+ return make([]byte, 0, freelistCapacity(length))
+}
+
+func (x *bytesFreelist) put(v []byte) {
+ if bytesFreeListNoCache || cap(v) == 0 {
+ return
+ }
+ if len(v) != 0 {
+ v = v[:0]
+ }
+ // append the new value, then try to put it in a better position
+ y := append(*x, v)
+ *x = y
+ // MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
+ // for i, z := range y[:len(y)-1] {
+ for i := 0; i < len(y)-1; i++ {
+ z := y[i]
+ if cap(z) > cap(v) {
+ copy(y[i+1:], y[i:])
+ y[i] = v
+ return
+ }
+ }
+}
+
+func (x *bytesFreelist) check(v []byte, length int) (out []byte) {
+ // ensure inlineable, by moving slow-path out to its own function
+ if cap(v) >= length {
+ return v[:0]
+ }
+ return x.checkPutGet(v, length)
+}
+
+func (x *bytesFreelist) checkPutGet(v []byte, length int) []byte {
+ // checkPutGet broken out into its own function, so check is inlineable in general case
+ const useSeparateCalls = false
+
+ if useSeparateCalls {
+ x.put(v)
+ return x.get(length)
+ }
+
+ if bytesFreeListNoCache {
+ return make([]byte, 0, freelistCapacity(length))
+ }
+
+ // assume cap(v) < length, so put must happen before get
+ y := *x
+ var put = cap(v) == 0 // if empty, consider it already put
+ if !put {
+ y = append(y, v)
+ *x = y
+ }
+ for i := 0; i < len(y); i++ {
+ z := y[i]
+ if put {
+ if cap(z) >= length {
+ copy(y[i:], y[i+1:])
+ y = y[:len(y)-1]
+ *x = y
+ return z
+ }
+ } else {
+ if cap(z) > cap(v) {
+ copy(y[i+1:], y[i:])
+ y[i] = v
+ put = true
+ }
+ }
+ }
+ return make([]byte, 0, freelistCapacity(length))
+}
+
+// -------------------------
+
+// sfiRvFreelist is used by Encoder for encoding structs,
+// where we have to gather the fields first and then
+// analyze them for omitEmpty, before knowing the length of the array/map to encode.
+//
+// Typically, the length here will depend on the number of cycles e.g.
+// if type T1 has reference to T1, or T1 has reference to type T2 which has reference to T1.
+//
+// In the general case, the length of this list at most times is 1,
+// so linear search is fine.
+type sfiRvFreelist [][]sfiRv
+
+func (x *sfiRvFreelist) get(length int) (out []sfiRv) {
+ y := *x
+
+ // MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
+ // for i, v := range y {
+ for i := 0; i < len(y); i++ {
+ v := y[i]
+ if cap(v) >= length {
+ // *x = append(y[:i], y[i+1:]...)
+ copy(y[i:], y[i+1:])
+ *x = y[:len(y)-1]
+ return v
+ }
+ }
+ return make([]sfiRv, 0, freelistCapacity(length))
+}
+
+func (x *sfiRvFreelist) put(v []sfiRv) {
+ if len(v) != 0 {
+ v = v[:0]
+ }
+ // append the new value, then try to put it in a better position
+ y := append(*x, v)
+ *x = y
+ // MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
+ // for i, z := range y[:len(y)-1] {
+ for i := 0; i < len(y)-1; i++ {
+ z := y[i]
+ if cap(z) > cap(v) {
+ copy(y[i+1:], y[i:])
+ y[i] = v
+ return
+ }
+ }
+}
+
+// ---- multiple interner implementations ----
+
+// Hard to tell which is most performant:
+// - use a map[string]string - worst perf, no collisions, and unlimited entries
+// - use a linear search with move to front heuristics - no collisions, and maxed at 64 entries
+// - use a computationally-intensive hash - best performance, some collisions, maxed at 64 entries
+
+const (
+ internMaxStrLen = 16 // if more than 16 bytes, faster to copy than compare bytes
+ internCap = 64 * 2 // 64 uses 1K bytes RAM, so 128 (anecdotal sweet spot) uses 2K bytes
+)
+
+type internerMap map[string]string
+
+func (x *internerMap) init() {
+ *x = make(map[string]string, internCap)
+}
+
+func (x internerMap) string(v []byte) (s string) {
+ s, ok := x[string(v)] // no allocation here, per go implementation
+ if !ok {
+ s = string(v) // new allocation here
+ x[s] = s
+ }
+ return
+}
diff --git a/vendor/github.com/ugorji/go/codec/helper.s b/vendor/github.com/ugorji/go/codec/helper.s
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper.s
diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go
new file mode 100644
index 000000000..e646249c7
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_internal.go
@@ -0,0 +1,147 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// maxArrayLen is the size of uint, which determines
+// the maximum length of any array.
+const maxArrayLen = 1<<((32<<(^uint(0)>>63))-1) - 1
+
+// All non-std package dependencies live in this file,
+// so porting to different environment is easy (just update functions).
+
+func pruneSignExt(v []byte, pos bool) (n int) {
+ if len(v) < 2 {
+ } else if pos && v[0] == 0 {
+ for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
+ }
+ } else if !pos && v[0] == 0xff {
+ for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
+ }
+ }
+ return
+}
+
+func halfFloatToFloatBits(h uint16) (f uint32) {
+ // retrofitted from:
+ // - OGRE (Object-Oriented Graphics Rendering Engine)
+ // function: halfToFloatI https://www.ogre3d.org/docs/api/1.9/_ogre_bitwise_8h_source.html
+
+ s := uint32(h >> 15)
+ m := uint32(h & 0x03ff)
+ e := int32((h >> 10) & 0x1f)
+
+ if e == 0 {
+ if m == 0 { // plus or minus 0
+ return s << 31
+ }
+ // Denormalized number -- renormalize it
+ for (m & 0x0400) == 0 {
+ m <<= 1
+ e -= 1
+ }
+ e += 1
+ m &= ^uint32(0x0400)
+ } else if e == 31 {
+ if m == 0 { // Inf
+ return (s << 31) | 0x7f800000
+ }
+ return (s << 31) | 0x7f800000 | (m << 13) // NaN
+ }
+ e = e + (127 - 15)
+ m = m << 13
+ return (s << 31) | (uint32(e) << 23) | m
+}
+
+func floatToHalfFloatBits(i uint32) (h uint16) {
+ // retrofitted from:
+ // - OGRE (Object-Oriented Graphics Rendering Engine)
+ // function: halfToFloatI https://www.ogre3d.org/docs/api/1.9/_ogre_bitwise_8h_source.html
+ // - http://www.java2s.com/example/java-utility-method/float-to/floattohalf-float-f-fae00.html
+ s := (i >> 16) & 0x8000
+ e := int32(((i >> 23) & 0xff) - (127 - 15))
+ m := i & 0x7fffff
+
+ var h32 uint32
+
+ if e <= 0 {
+ if e < -10 { // zero
+ h32 = s // track -0 vs +0
+ } else {
+ m = (m | 0x800000) >> uint32(1-e)
+ h32 = s | (m >> 13)
+ }
+ } else if e == 0xff-(127-15) {
+ if m == 0 { // Inf
+ h32 = s | 0x7c00
+ } else { // NAN
+ m >>= 13
+ var me uint32
+ if m == 0 {
+ me = 1
+ }
+ h32 = s | 0x7c00 | m | me
+ }
+ } else {
+ if e > 30 { // Overflow
+ h32 = s | 0x7c00
+ } else {
+ h32 = s | (uint32(e) << 10) | (m >> 13)
+ }
+ }
+ h = uint16(h32)
+ return
+}
+
+// growCap will return a new capacity for a slice, given the following:
+// - oldCap: current capacity
+// - unit: in-memory size of an element
+// - num: number of elements to add
+func growCap(oldCap, unit, num uint) (newCap uint) {
+ // appendslice logic (if cap < 1024, *2, else *1.25):
+ // leads to many copy calls, especially when copying bytes.
+ // bytes.Buffer model (2*cap + n): much better for bytes.
+ // smarter way is to take the byte-size of the appended element(type) into account
+
+ // maintain 1 thresholds:
+ // t1: if cap <= t1, newcap = 2x
+ // else newcap = 1.5x
+ //
+ // t1 is always >= 1024.
+ // This means that, if unit size >= 16, then always do 2x or 1.5x (ie t1, t2, t3 are all same)
+ //
+ // With this, appending for bytes increase by:
+ // 100% up to 4K
+ // 50% beyond that
+
+ // unit can be 0 e.g. for struct{}{}; handle that appropriately
+ maxCap := num + (oldCap * 3 / 2)
+ if unit == 0 || maxCap > maxArrayLen || maxCap < oldCap { // handle wraparound, etc
+ return maxArrayLen
+ }
+
+ var t1 uint = 1024 // default thresholds for large values
+ if unit <= 4 {
+ t1 = 8 * 1024
+ } else if unit <= 16 {
+ t1 = 2 * 1024
+ }
+
+ newCap = 2 + num
+ if oldCap > 0 {
+ if oldCap <= t1 { // [0,t1]
+ newCap = num + (oldCap * 2)
+ } else { // (t1,infinity]
+ newCap = maxCap
+ }
+ }
+
+ // ensure newCap takes multiples of a cache line (size is a multiple of 64)
+ t1 = newCap * unit
+ if t2 := t1 % 64; t2 != 0 {
+ t1 += 64 - t2
+ newCap = t1 / unit
+ }
+
+ return
+}
diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
new file mode 100644
index 000000000..57a3fe26d
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go
@@ -0,0 +1,670 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build !go1.9 || safe || codec.safe || appengine
+// +build !go1.9 safe codec.safe appengine
+
+package codec
+
+import (
+ // "hash/adler32"
+ "math"
+ "reflect"
+ "sync/atomic"
+ "time"
+)
+
+// This file has safe variants of some helper functions.
+// MARKER: See helper_unsafe.go for the usage documentation.
+
+const safeMode = true
+
+const transientSizeMax = 0
+const transientValueHasStringSlice = true
+
+func stringView(v []byte) string {
+ return string(v)
+}
+
+func bytesView(v string) []byte {
+ return []byte(v)
+}
+
+func byteSliceSameData(v1 []byte, v2 []byte) bool {
+ return cap(v1) != 0 && cap(v2) != 0 && &(v1[:1][0]) == &(v2[:1][0])
+}
+
+func okBytes3(b []byte) (v [4]byte) {
+ copy(v[1:], b)
+ return
+}
+
+func okBytes4(b []byte) (v [4]byte) {
+ copy(v[:], b)
+ return
+}
+
+func okBytes8(b []byte) (v [8]byte) {
+ copy(v[:], b)
+ return
+}
+
+func isNil(v interface{}) (rv reflect.Value, isnil bool) {
+ rv = reflect.ValueOf(v)
+ if isnilBitset.isset(byte(rv.Kind())) {
+ isnil = rv.IsNil()
+ }
+ return
+}
+
+func eq4i(i0, i1 interface{}) bool {
+ return i0 == i1
+}
+
+func rv4iptr(i interface{}) reflect.Value { return reflect.ValueOf(i) }
+func rv4istr(i interface{}) reflect.Value { return reflect.ValueOf(i) }
+
+// func rv4i(i interface{}) reflect.Value { return reflect.ValueOf(i) }
+// func rv4iK(i interface{}, kind byte, isref bool) reflect.Value { return reflect.ValueOf(i) }
+
+func rv2i(rv reflect.Value) interface{} {
+ return rv.Interface()
+}
+
+func rvAddr(rv reflect.Value, ptrType reflect.Type) reflect.Value {
+ return rv.Addr()
+}
+
+func rvIsNil(rv reflect.Value) bool {
+ return rv.IsNil()
+}
+
+func rvSetSliceLen(rv reflect.Value, length int) {
+ rv.SetLen(length)
+}
+
+func rvZeroAddrK(t reflect.Type, k reflect.Kind) reflect.Value {
+ return reflect.New(t).Elem()
+}
+
+func rvZeroK(t reflect.Type, k reflect.Kind) reflect.Value {
+ return reflect.Zero(t)
+}
+
+func rvConvert(v reflect.Value, t reflect.Type) (rv reflect.Value) {
+ // Note that reflect.Value.Convert(...) will make a copy if it is addressable.
+ // Since we decode into the passed value, we must try to convert the addressable value..
+ if v.CanAddr() {
+ return v.Addr().Convert(reflect.PtrTo(t)).Elem()
+ }
+ return v.Convert(t)
+}
+
+func rt2id(rt reflect.Type) uintptr {
+ return reflect.ValueOf(rt).Pointer()
+}
+
+func i2rtid(i interface{}) uintptr {
+ return reflect.ValueOf(reflect.TypeOf(i)).Pointer()
+}
+
+// --------------------------
+
+func isEmptyValue(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
+ switch v.Kind() {
+ case reflect.Invalid:
+ return true
+ case reflect.Array, reflect.String:
+ return v.Len() == 0
+ case reflect.Map, reflect.Slice, reflect.Chan:
+ return v.IsNil() || v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Complex64, reflect.Complex128:
+ c := v.Complex()
+ return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Func, reflect.UnsafePointer:
+ return v.IsNil()
+ case reflect.Interface, reflect.Ptr:
+ isnil := v.IsNil()
+ if recursive && !isnil {
+ return isEmptyValue(v.Elem(), tinfos, recursive)
+ }
+ return isnil
+ case reflect.Struct:
+ return isEmptyStruct(v, tinfos, recursive)
+ }
+ return false
+}
+
+// isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty:
+// - does it implement IsZero() bool
+// - is it comparable, and can i compare directly using ==
+// - if checkStruct, then walk through the encodable fields
+// and check if they are empty or not.
+func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
+ // v is a struct kind - no need to check again.
+ // We only check isZero on a struct kind, to reduce the amount of times
+ // that we lookup the rtid and typeInfo for each type as we walk the tree.
+
+ vt := rvType(v)
+ rtid := rt2id(vt)
+ if tinfos == nil {
+ tinfos = defTypeInfos
+ }
+ ti := tinfos.get(rtid, vt)
+ if ti.rtid == timeTypId {
+ return rv2i(v).(time.Time).IsZero()
+ }
+ if ti.flagIsZeroer {
+ return rv2i(v).(isZeroer).IsZero()
+ }
+ if ti.flagIsZeroerPtr && v.CanAddr() {
+ return rv2i(v.Addr()).(isZeroer).IsZero()
+ }
+ if ti.flagIsCodecEmptyer {
+ return rv2i(v).(isCodecEmptyer).IsCodecEmpty()
+ }
+ if ti.flagIsCodecEmptyerPtr && v.CanAddr() {
+ return rv2i(v.Addr()).(isCodecEmptyer).IsCodecEmpty()
+ }
+ if ti.flagComparable {
+ return rv2i(v) == rv2i(rvZeroK(vt, reflect.Struct))
+ }
+ if !recursive {
+ return false
+ }
+ // We only care about what we can encode/decode,
+ // so that is what we use to check omitEmpty.
+ for _, si := range ti.sfi.source() {
+ sfv := si.path.field(v)
+ if sfv.IsValid() && !isEmptyValue(sfv, tinfos, recursive) {
+ return false
+ }
+ }
+ return true
+}
+
+// --------------------------
+
+type perTypeElem struct {
+ t reflect.Type
+ rtid uintptr
+ zero reflect.Value
+ addr [2]reflect.Value
+}
+
+func (x *perTypeElem) get(index uint8) (v reflect.Value) {
+ v = x.addr[index%2]
+ if v.IsValid() {
+ v.Set(x.zero)
+ } else {
+ v = reflect.New(x.t).Elem()
+ x.addr[index%2] = v
+ }
+ return
+}
+
+type perType struct {
+ v []perTypeElem
+}
+
+type decPerType struct {
+ perType
+}
+
+type encPerType struct {
+ perType
+}
+
+func (x *perType) elem(t reflect.Type) *perTypeElem {
+ rtid := rt2id(t)
+ var h, i uint
+ var j = uint(len(x.v))
+LOOP:
+ if i < j {
+ h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
+ if x.v[h].rtid < rtid {
+ i = h + 1
+ } else {
+ j = h
+ }
+ goto LOOP
+ }
+ if i < uint(len(x.v)) {
+ if x.v[i].rtid != rtid {
+ x.v = append(x.v, perTypeElem{})
+ copy(x.v[i+1:], x.v[i:])
+ x.v[i] = perTypeElem{t: t, rtid: rtid, zero: reflect.Zero(t)}
+ }
+ } else {
+ x.v = append(x.v, perTypeElem{t: t, rtid: rtid, zero: reflect.Zero(t)})
+ }
+ return &x.v[i]
+}
+
+func (x *perType) TransientAddrK(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
+ return x.elem(t).get(0)
+}
+
+func (x *perType) TransientAddr2K(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
+ return x.elem(t).get(1)
+}
+
+func (x *perType) AddressableRO(v reflect.Value) (rv reflect.Value) {
+ rv = x.elem(v.Type()).get(0)
+ rvSetDirect(rv, v)
+ return
+}
+
+// --------------------------
+type structFieldInfos struct {
+ c []*structFieldInfo
+ s []*structFieldInfo
+}
+
+func (x *structFieldInfos) load(source, sorted []*structFieldInfo) {
+ x.c = source
+ x.s = sorted
+}
+
+func (x *structFieldInfos) sorted() (v []*structFieldInfo) { return x.s }
+func (x *structFieldInfos) source() (v []*structFieldInfo) { return x.c }
+
+type atomicClsErr struct {
+ v atomic.Value
+}
+
+func (x *atomicClsErr) load() (e clsErr) {
+ if i := x.v.Load(); i != nil {
+ e = i.(clsErr)
+ }
+ return
+}
+
+func (x *atomicClsErr) store(p clsErr) {
+ x.v.Store(p)
+}
+
+// --------------------------
+type atomicTypeInfoSlice struct {
+ v atomic.Value
+}
+
+func (x *atomicTypeInfoSlice) load() (e []rtid2ti) {
+ if i := x.v.Load(); i != nil {
+ e = i.([]rtid2ti)
+ }
+ return
+}
+
+func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
+ x.v.Store(p)
+}
+
+// --------------------------
+type atomicRtidFnSlice struct {
+ v atomic.Value
+}
+
+func (x *atomicRtidFnSlice) load() (e []codecRtidFn) {
+ if i := x.v.Load(); i != nil {
+ e = i.([]codecRtidFn)
+ }
+ return
+}
+
+func (x *atomicRtidFnSlice) store(p []codecRtidFn) {
+ x.v.Store(p)
+}
+
+// --------------------------
+func (n *fauxUnion) ru() reflect.Value {
+ return reflect.ValueOf(&n.u).Elem()
+}
+func (n *fauxUnion) ri() reflect.Value {
+ return reflect.ValueOf(&n.i).Elem()
+}
+func (n *fauxUnion) rf() reflect.Value {
+ return reflect.ValueOf(&n.f).Elem()
+}
+func (n *fauxUnion) rl() reflect.Value {
+ return reflect.ValueOf(&n.l).Elem()
+}
+func (n *fauxUnion) rs() reflect.Value {
+ return reflect.ValueOf(&n.s).Elem()
+}
+func (n *fauxUnion) rt() reflect.Value {
+ return reflect.ValueOf(&n.t).Elem()
+}
+func (n *fauxUnion) rb() reflect.Value {
+ return reflect.ValueOf(&n.b).Elem()
+}
+
+// --------------------------
+func rvSetBytes(rv reflect.Value, v []byte) {
+ rv.SetBytes(v)
+}
+
+func rvSetString(rv reflect.Value, v string) {
+ rv.SetString(v)
+}
+
+func rvSetBool(rv reflect.Value, v bool) {
+ rv.SetBool(v)
+}
+
+func rvSetTime(rv reflect.Value, v time.Time) {
+ rv.Set(reflect.ValueOf(v))
+}
+
+func rvSetFloat32(rv reflect.Value, v float32) {
+ rv.SetFloat(float64(v))
+}
+
+func rvSetFloat64(rv reflect.Value, v float64) {
+ rv.SetFloat(v)
+}
+
+func rvSetComplex64(rv reflect.Value, v complex64) {
+ rv.SetComplex(complex128(v))
+}
+
+func rvSetComplex128(rv reflect.Value, v complex128) {
+ rv.SetComplex(v)
+}
+
+func rvSetInt(rv reflect.Value, v int) {
+ rv.SetInt(int64(v))
+}
+
+func rvSetInt8(rv reflect.Value, v int8) {
+ rv.SetInt(int64(v))
+}
+
+func rvSetInt16(rv reflect.Value, v int16) {
+ rv.SetInt(int64(v))
+}
+
+func rvSetInt32(rv reflect.Value, v int32) {
+ rv.SetInt(int64(v))
+}
+
+func rvSetInt64(rv reflect.Value, v int64) {
+ rv.SetInt(v)
+}
+
+func rvSetUint(rv reflect.Value, v uint) {
+ rv.SetUint(uint64(v))
+}
+
+func rvSetUintptr(rv reflect.Value, v uintptr) {
+ rv.SetUint(uint64(v))
+}
+
+func rvSetUint8(rv reflect.Value, v uint8) {
+ rv.SetUint(uint64(v))
+}
+
+func rvSetUint16(rv reflect.Value, v uint16) {
+ rv.SetUint(uint64(v))
+}
+
+func rvSetUint32(rv reflect.Value, v uint32) {
+ rv.SetUint(uint64(v))
+}
+
+func rvSetUint64(rv reflect.Value, v uint64) {
+ rv.SetUint(v)
+}
+
+// ----------------
+
+func rvSetDirect(rv reflect.Value, v reflect.Value) {
+ rv.Set(v)
+}
+
+func rvSetDirectZero(rv reflect.Value) {
+ rv.Set(reflect.Zero(rv.Type()))
+}
+
+// func rvSet(rv reflect.Value, v reflect.Value) {
+// rv.Set(v)
+// }
+
+func rvSetIntf(rv reflect.Value, v reflect.Value) {
+ rv.Set(v)
+}
+
+func rvSetZero(rv reflect.Value) {
+ rv.Set(reflect.Zero(rv.Type()))
+}
+
+func rvSlice(rv reflect.Value, length int) reflect.Value {
+ return rv.Slice(0, length)
+}
+
+func rvMakeSlice(rv reflect.Value, ti *typeInfo, xlen, xcap int) (v reflect.Value, set bool) {
+ v = reflect.MakeSlice(ti.rt, xlen, xcap)
+ if rv.Len() > 0 {
+ reflect.Copy(v, rv)
+ }
+ return
+}
+
+func rvGrowSlice(rv reflect.Value, ti *typeInfo, cap, incr int) (v reflect.Value, newcap int, set bool) {
+ newcap = int(growCap(uint(cap), uint(ti.elemsize), uint(incr)))
+ v = reflect.MakeSlice(ti.rt, newcap, newcap)
+ if rv.Len() > 0 {
+ reflect.Copy(v, rv)
+ }
+ return
+}
+
+// ----------------
+
+func rvSliceIndex(rv reflect.Value, i int, ti *typeInfo) reflect.Value {
+ return rv.Index(i)
+}
+
+func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) reflect.Value {
+ return rv.Index(i)
+}
+
+func rvSliceZeroCap(t reflect.Type) (v reflect.Value) {
+ return reflect.MakeSlice(t, 0, 0)
+}
+
+func rvLenSlice(rv reflect.Value) int {
+ return rv.Len()
+}
+
+func rvCapSlice(rv reflect.Value) int {
+ return rv.Cap()
+}
+
+func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) {
+ l := rv.Len()
+ if scratch == nil || rv.CanAddr() {
+ return rv.Slice(0, l).Bytes()
+ }
+
+ if l <= cap(scratch) {
+ bs = scratch[:l]
+ } else {
+ bs = make([]byte, l)
+ }
+ reflect.Copy(reflect.ValueOf(bs), rv)
+ return
+}
+
+func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
+ v = rvZeroAddrK(reflectArrayOf(rvLenSlice(rv), rvType(rv).Elem()), reflect.Array)
+ reflect.Copy(v, rv)
+ return
+}
+
+func rvGetSlice4Array(rv reflect.Value, v interface{}) {
+ // v is a pointer to a slice to be populated
+
+ // rv.Slice fails if address is not addressable, which can occur during encoding.
+ // Consequently, check if non-addressable, and if so, make new slice and copy into it first.
+ // MARKER: this *may* cause allocation if non-addressable, unfortunately.
+
+ rve := reflect.ValueOf(v).Elem()
+ l := rv.Len()
+ if rv.CanAddr() {
+ rve.Set(rv.Slice(0, l))
+ } else {
+ rvs := reflect.MakeSlice(rve.Type(), l, l)
+ reflect.Copy(rvs, rv)
+ rve.Set(rvs)
+ }
+ // reflect.ValueOf(v).Elem().Set(rv.Slice(0, rv.Len()))
+}
+
+func rvCopySlice(dest, src reflect.Value, _ reflect.Type) {
+ reflect.Copy(dest, src)
+}
+
+// ------------
+
+func rvGetBool(rv reflect.Value) bool {
+ return rv.Bool()
+}
+
+func rvGetBytes(rv reflect.Value) []byte {
+ return rv.Bytes()
+}
+
+func rvGetTime(rv reflect.Value) time.Time {
+ return rv2i(rv).(time.Time)
+}
+
+func rvGetString(rv reflect.Value) string {
+ return rv.String()
+}
+
+func rvGetFloat64(rv reflect.Value) float64 {
+ return rv.Float()
+}
+
+func rvGetFloat32(rv reflect.Value) float32 {
+ return float32(rv.Float())
+}
+
+func rvGetComplex64(rv reflect.Value) complex64 {
+ return complex64(rv.Complex())
+}
+
+func rvGetComplex128(rv reflect.Value) complex128 {
+ return rv.Complex()
+}
+
+func rvGetInt(rv reflect.Value) int {
+ return int(rv.Int())
+}
+
+func rvGetInt8(rv reflect.Value) int8 {
+ return int8(rv.Int())
+}
+
+func rvGetInt16(rv reflect.Value) int16 {
+ return int16(rv.Int())
+}
+
+func rvGetInt32(rv reflect.Value) int32 {
+ return int32(rv.Int())
+}
+
+func rvGetInt64(rv reflect.Value) int64 {
+ return rv.Int()
+}
+
+func rvGetUint(rv reflect.Value) uint {
+ return uint(rv.Uint())
+}
+
+func rvGetUint8(rv reflect.Value) uint8 {
+ return uint8(rv.Uint())
+}
+
+func rvGetUint16(rv reflect.Value) uint16 {
+ return uint16(rv.Uint())
+}
+
+func rvGetUint32(rv reflect.Value) uint32 {
+ return uint32(rv.Uint())
+}
+
+func rvGetUint64(rv reflect.Value) uint64 {
+ return rv.Uint()
+}
+
+func rvGetUintptr(rv reflect.Value) uintptr {
+ return uintptr(rv.Uint())
+}
+
+func rvLenMap(rv reflect.Value) int {
+ return rv.Len()
+}
+
+// func rvLenArray(rv reflect.Value) int { return rv.Len() }
+
+// ------------ map range and map indexing ----------
+
+func mapStoresElemIndirect(elemsize uintptr) bool { return false }
+
+func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, _, _ bool) {
+ m.SetMapIndex(k, v)
+}
+
+func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, _, _ bool) (vv reflect.Value) {
+ return m.MapIndex(k)
+}
+
+// func mapDelete(m, k reflect.Value) {
+// m.SetMapIndex(k, reflect.Value{})
+// }
+
+func mapAddrLoopvarRV(t reflect.Type, k reflect.Kind) (r reflect.Value) {
+ return // reflect.New(t).Elem()
+}
+
+// ---------- ENCODER optimized ---------------
+
+func (e *Encoder) jsondriver() *jsonEncDriver {
+ return e.e.(*jsonEncDriver)
+}
+
+// ---------- DECODER optimized ---------------
+
+func (d *Decoder) checkBreak() bool {
+ return d.d.CheckBreak()
+}
+
+func (d *Decoder) jsondriver() *jsonDecDriver {
+ return d.d.(*jsonDecDriver)
+}
+
+func (d *Decoder) stringZC(v []byte) (s string) {
+ return d.string(v)
+}
+
+func (d *Decoder) mapKeyString(callFnRvk *bool, kstrbs, kstr2bs *[]byte) string {
+ return d.string(*kstr2bs)
+}
+
+// ---------- structFieldInfo optimized ---------------
+
+func (n *structFieldInfoPathNode) rvField(v reflect.Value) reflect.Value {
+ return v.Field(int(n.index))
+}
+
+// ---------- others ---------------
diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe_not_gc.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe_not_gc.go
new file mode 100644
index 000000000..e3fdc8854
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe_not_gc.go
@@ -0,0 +1,19 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build !go1.9 || safe || codec.safe || appengine || !gc
+// +build !go1.9 safe codec.safe appengine !gc
+
+package codec
+
+import "reflect"
+
+// This files contains safe versions of the code where the unsafe versions are not supported
+// in either gccgo or gollvm.
+//
+// - rvType:
+// reflect.toType is not supported in gccgo, gollvm.
+
+func rvType(rv reflect.Value) reflect.Type {
+ return rv.Type()
+}
diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go
new file mode 100644
index 000000000..352cf4b3c
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go
@@ -0,0 +1,1301 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build !safe && !codec.safe && !appengine && go1.9
+// +build !safe,!codec.safe,!appengine,go1.9
+
+// minimum of go 1.9 is needed, as that is the minimum for all features and linked functions we need
+// - typedmemclr was introduced in go 1.8
+// - mapassign_fastXXX was introduced in go 1.9
+// etc
+
+package codec
+
+import (
+ "reflect"
+ _ "runtime" // needed for go linkname(s)
+ "sync/atomic"
+ "time"
+ "unsafe"
+)
+
+// This file has unsafe variants of some helper functions.
+// MARKER: See helper_unsafe.go for the usage documentation.
+
+// There are a number of helper_*unsafe*.go files.
+//
+// - helper_unsafe
+// unsafe variants of dependent functions
+// - helper_unsafe_compiler_gc (gc)
+// unsafe variants of dependent functions which cannot be shared with gollvm or gccgo
+// - helper_not_unsafe_not_gc (gccgo/gollvm or safe)
+// safe variants of functions in helper_unsafe_compiler_gc
+// - helper_not_unsafe (safe)
+// safe variants of functions in helper_unsafe
+// - helper_unsafe_compiler_not_gc (gccgo, gollvm)
+// unsafe variants of functions/variables which non-standard compilers need
+//
+// This way, we can judiciously use build tags to include the right set of files
+// for any compiler, and make it run optimally in unsafe mode.
+//
+// As of March 2021, we cannot differentiate whether running with gccgo or gollvm
+// using a build constraint, as both satisfy 'gccgo' build tag.
+// Consequently, we must use the lowest common denominator to support both.
+
+// For reflect.Value code, we decided to do the following:
+// - if we know the kind, we can elide conditional checks for
+// - SetXXX (Int, Uint, String, Bool, etc)
+// - SetLen
+//
+// We can also optimize
+// - IsNil
+
+// MARKER: Some functions here will not be hit during code coverage runs due to optimizations, e.g.
+// - rvCopySlice: decode calls it if rvGrowSlice didn't set the new slice into the pointer to the orig slice.
+// however, helper_unsafe sets it, so there's no need to call rvCopySlice later
+// - rvSlice: same as above
+// - rvGetArray4Bytes: only called within kArray for []byte, but that is now handled
+// within the fast-path directly
+
+const safeMode = false
+
+// helperUnsafeDirectAssignMapEntry says that we should not copy the pointer in the map
+// to another value during mapRange/iteration and mapGet calls, but directly assign it.
+//
+// The only callers of mapRange/iteration is encode.
+// Here, we just walk through the values and encode them
+//
+// The only caller of mapGet is decode.
+// Here, it does a Get if the underlying value is a pointer, and decodes into that.
+//
+// For both users, we are very careful NOT to modify or keep the pointers around.
+// Consequently, it is ok for take advantage of the performance that the map is not modified
+// during an iteration and we can just "peek" at the internal value" in the map and use it.
+const helperUnsafeDirectAssignMapEntry = true
+
+// MARKER: keep in sync with GO_ROOT/src/reflect/value.go
+const (
+ unsafeFlagStickyRO = 1 << 5
+ unsafeFlagEmbedRO = 1 << 6
+ unsafeFlagIndir = 1 << 7
+ unsafeFlagAddr = 1 << 8
+ unsafeFlagRO = unsafeFlagStickyRO | unsafeFlagEmbedRO
+ // unsafeFlagKindMask = (1 << 5) - 1 // 5 bits for 27 kinds (up to 31)
+ // unsafeTypeKindDirectIface = 1 << 5
+)
+
+// transientSizeMax below is used in TransientAddr as the backing storage.
+//
+// Must be >= 16 as the maximum size is a complex128 (or string on 64-bit machines).
+const transientSizeMax = 64
+
+// should struct/array support internal strings and slices?
+const transientValueHasStringSlice = false
+
+type unsafeString struct {
+ Data unsafe.Pointer
+ Len int
+}
+
+type unsafeSlice struct {
+ Data unsafe.Pointer
+ Len int
+ Cap int
+}
+
+type unsafeIntf struct {
+ typ unsafe.Pointer
+ ptr unsafe.Pointer
+}
+
+type unsafeReflectValue struct {
+ unsafeIntf
+ flag uintptr
+}
+
+// keep in sync with stdlib runtime/type.go
+type unsafeRuntimeType struct {
+ size uintptr
+ // ... many other fields here
+}
+
+// unsafeZeroAddr and unsafeZeroSlice points to a read-only block of memory
+// used for setting a zero value for most types or creating a read-only
+// zero value for a given type.
+var (
+ unsafeZeroAddr = unsafe.Pointer(&unsafeZeroArr[0])
+ unsafeZeroSlice = unsafeSlice{unsafeZeroAddr, 0, 0}
+)
+
+// We use a scratch memory and an unsafeSlice for transient values:
+//
+// unsafeSlice is used for standalone strings and slices (outside an array or struct).
+// scratch memory is used for other kinds, based on contract below:
+// - numbers, bool are always transient
+// - structs and arrays are transient iff they have no pointers i.e.
+// no string, slice, chan, func, interface, map, etc only numbers and bools.
+// - slices and strings are transient (using the unsafeSlice)
+
+type unsafePerTypeElem struct {
+ arr [transientSizeMax]byte // for bool, number, struct, array kinds
+ slice unsafeSlice // for string and slice kinds
+}
+
+func (x *unsafePerTypeElem) addrFor(k reflect.Kind) unsafe.Pointer {
+ if k == reflect.String || k == reflect.Slice {
+ x.slice = unsafeSlice{} // memclr
+ return unsafe.Pointer(&x.slice)
+ }
+ x.arr = [transientSizeMax]byte{} // memclr
+ return unsafe.Pointer(&x.arr)
+}
+
+type perType struct {
+ elems [2]unsafePerTypeElem
+}
+
+type decPerType struct {
+ perType
+}
+
+type encPerType struct{}
+
+// TransientAddrK is used for getting a *transient* value to be decoded into,
+// which will right away be used for something else.
+//
+// See notes in helper.go about "Transient values during decoding"
+
+func (x *perType) TransientAddrK(t reflect.Type, k reflect.Kind) reflect.Value {
+ return rvZeroAddrTransientAnyK(t, k, x.elems[0].addrFor(k))
+}
+
+func (x *perType) TransientAddr2K(t reflect.Type, k reflect.Kind) reflect.Value {
+ return rvZeroAddrTransientAnyK(t, k, x.elems[1].addrFor(k))
+}
+
+func (encPerType) AddressableRO(v reflect.Value) reflect.Value {
+ return rvAddressableReadonly(v)
+}
+
+// stringView returns a view of the []byte as a string.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func stringView(v []byte) string {
+ return *(*string)(unsafe.Pointer(&v))
+}
+
+// bytesView returns a view of the string as a []byte.
+// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
+// In regular safe mode, it is an allocation and copy.
+func bytesView(v string) (b []byte) {
+ sx := (*unsafeString)(unsafe.Pointer(&v))
+ bx := (*unsafeSlice)(unsafe.Pointer(&b))
+ bx.Data, bx.Len, bx.Cap = sx.Data, sx.Len, sx.Len
+ return
+}
+
+func byteSliceSameData(v1 []byte, v2 []byte) bool {
+ return (*unsafeSlice)(unsafe.Pointer(&v1)).Data == (*unsafeSlice)(unsafe.Pointer(&v2)).Data
+}
+
+// MARKER: okBytesN functions will copy N bytes into the top slots of the return array.
+// These functions expect that the bounds are valid, and have been checked before this is called.
+// copy(...) does a number of checks which are unnecessary in this situation when in bounds.
+
+func okBytes3(b []byte) (v [4]byte) {
+ *(*[3]byte)(unsafe.Pointer(&v[1])) = *((*[3]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
+ return
+}
+
+func okBytes4(b []byte) [4]byte {
+ return *((*[4]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
+}
+
+func okBytes8(b []byte) [8]byte {
+ return *((*[8]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
+}
+
+// isNil says whether the value v is nil.
+// This applies to references like map/ptr/unsafepointer/chan/func,
+// and non-reference values like interface/slice.
+func isNil(v interface{}) (rv reflect.Value, isnil bool) {
+ var ui = (*unsafeIntf)(unsafe.Pointer(&v))
+ isnil = ui.ptr == nil
+ if !isnil {
+ rv, isnil = unsafeIsNilIntfOrSlice(ui, v)
+ }
+ return
+}
+
+func unsafeIsNilIntfOrSlice(ui *unsafeIntf, v interface{}) (rv reflect.Value, isnil bool) {
+ rv = reflect.ValueOf(v) // reflect.ValueOf is currently not inline'able - so call it directly
+ tk := rv.Kind()
+ isnil = (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.ptr) == nil
+ return
+}
+
+// return the pointer for a reference (map/chan/func/pointer/unsafe.Pointer).
+// true references (map, func, chan, ptr - NOT slice) may be double-referenced? as flagIndir
+//
+// Assumes that v is a reference (map/func/chan/ptr/func)
+func rvRefPtr(v *unsafeReflectValue) unsafe.Pointer {
+ if v.flag&unsafeFlagIndir != 0 {
+ return *(*unsafe.Pointer)(v.ptr)
+ }
+ return v.ptr
+}
+
+func eq4i(i0, i1 interface{}) bool {
+ v0 := (*unsafeIntf)(unsafe.Pointer(&i0))
+ v1 := (*unsafeIntf)(unsafe.Pointer(&i1))
+ return v0.typ == v1.typ && v0.ptr == v1.ptr
+}
+
+func rv4iptr(i interface{}) (v reflect.Value) {
+ // Main advantage here is that it is inlined, nothing escapes to heap, i is never nil
+ uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
+ uv.unsafeIntf = *(*unsafeIntf)(unsafe.Pointer(&i))
+ uv.flag = uintptr(rkindPtr)
+ return
+}
+
+func rv4istr(i interface{}) (v reflect.Value) {
+ // Main advantage here is that it is inlined, nothing escapes to heap, i is never nil
+ uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
+ uv.unsafeIntf = *(*unsafeIntf)(unsafe.Pointer(&i))
+ uv.flag = uintptr(rkindString) | unsafeFlagIndir
+ return
+}
+
+func rv2i(rv reflect.Value) (i interface{}) {
+ // We tap into implememtation details from
+ // the source go stdlib reflect/value.go, and trims the implementation.
+ //
+ // e.g.
+ // - a map/ptr is a reference, thus flagIndir is not set on it
+ // - an int/slice is not a reference, thus flagIndir is set on it
+
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ if refBitset.isset(byte(rv.Kind())) && urv.flag&unsafeFlagIndir != 0 {
+ urv.ptr = *(*unsafe.Pointer)(urv.ptr)
+ }
+ return *(*interface{})(unsafe.Pointer(&urv.unsafeIntf))
+}
+
+func rvAddr(rv reflect.Value, ptrType reflect.Type) reflect.Value {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ urv.flag = (urv.flag & unsafeFlagRO) | uintptr(reflect.Ptr)
+ urv.typ = ((*unsafeIntf)(unsafe.Pointer(&ptrType))).ptr
+ return rv
+}
+
+func rvIsNil(rv reflect.Value) bool {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ if urv.flag&unsafeFlagIndir != 0 {
+ return *(*unsafe.Pointer)(urv.ptr) == nil
+ }
+ return urv.ptr == nil
+}
+
+func rvSetSliceLen(rv reflect.Value, length int) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ (*unsafeString)(urv.ptr).Len = length
+}
+
+func rvZeroAddrK(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
+ urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
+ urv.ptr = unsafeNew(urv.typ)
+ return
+}
+
+func rvZeroAddrTransientAnyK(t reflect.Type, k reflect.Kind, addr unsafe.Pointer) (rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
+ urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
+ urv.ptr = addr
+ return
+}
+
+func rvZeroK(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
+ if refBitset.isset(byte(k)) {
+ urv.flag = uintptr(k)
+ } else if rtsize2(urv.typ) <= uintptr(len(unsafeZeroArr)) {
+ urv.flag = uintptr(k) | unsafeFlagIndir
+ urv.ptr = unsafeZeroAddr
+ } else { // meaning struct or array
+ urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
+ urv.ptr = unsafeNew(urv.typ)
+ }
+ return
+}
+
+// rvConvert will convert a value to a different type directly,
+// ensuring that they still point to the same underlying value.
+func rvConvert(v reflect.Value, t reflect.Type) reflect.Value {
+ uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
+ uv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
+ return v
+}
+
+// rvAddressableReadonly returns an addressable reflect.Value.
+//
+// Use it within encode calls, when you just want to "read" the underlying ptr
+// without modifying the value.
+//
+// Note that it cannot be used for r/w use, as those non-addressable values
+// may have been stored in read-only memory, and trying to write the pointer
+// may cause a segfault.
+func rvAddressableReadonly(v reflect.Value) reflect.Value {
+ // hack to make an addressable value out of a non-addressable one.
+ // Assume folks calling it are passing a value that can be addressable, but isn't.
+ // This assumes that the flagIndir is already set on it.
+ // so we just set the flagAddr bit on the flag (and do not set the flagIndir).
+
+ uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
+ uv.flag = uv.flag | unsafeFlagAddr // | unsafeFlagIndir
+
+ return v
+}
+
+func rtsize2(rt unsafe.Pointer) uintptr {
+ return ((*unsafeRuntimeType)(rt)).size
+}
+
+func rt2id(rt reflect.Type) uintptr {
+ return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).ptr)
+}
+
+func i2rtid(i interface{}) uintptr {
+ return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ)
+}
+
+// --------------------------
+
+func unsafeCmpZero(ptr unsafe.Pointer, size int) bool {
+ var s1 = unsafeString{ptr, size}
+ var s2 = unsafeString{unsafeZeroAddr, size}
+ if size > len(unsafeZeroArr) {
+ arr := make([]byte, size)
+ s2.Data = unsafe.Pointer(&arr[0])
+ }
+ return *(*string)(unsafe.Pointer(&s1)) == *(*string)(unsafe.Pointer(&s2)) // memcmp
+}
+
+func isEmptyValue(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
+ if urv.flag == 0 {
+ return true
+ }
+ if recursive {
+ return isEmptyValueFallbackRecur(urv, v, tinfos)
+ }
+ return unsafeCmpZero(urv.ptr, int(rtsize2(urv.typ)))
+}
+
+func isEmptyValueFallbackRecur(urv *unsafeReflectValue, v reflect.Value, tinfos *TypeInfos) bool {
+ const recursive = true
+
+ switch v.Kind() {
+ case reflect.Invalid:
+ return true
+ case reflect.String:
+ return (*unsafeString)(urv.ptr).Len == 0
+ case reflect.Slice:
+ return (*unsafeSlice)(urv.ptr).Len == 0
+ case reflect.Bool:
+ return !*(*bool)(urv.ptr)
+ case reflect.Int:
+ return *(*int)(urv.ptr) == 0
+ case reflect.Int8:
+ return *(*int8)(urv.ptr) == 0
+ case reflect.Int16:
+ return *(*int16)(urv.ptr) == 0
+ case reflect.Int32:
+ return *(*int32)(urv.ptr) == 0
+ case reflect.Int64:
+ return *(*int64)(urv.ptr) == 0
+ case reflect.Uint:
+ return *(*uint)(urv.ptr) == 0
+ case reflect.Uint8:
+ return *(*uint8)(urv.ptr) == 0
+ case reflect.Uint16:
+ return *(*uint16)(urv.ptr) == 0
+ case reflect.Uint32:
+ return *(*uint32)(urv.ptr) == 0
+ case reflect.Uint64:
+ return *(*uint64)(urv.ptr) == 0
+ case reflect.Uintptr:
+ return *(*uintptr)(urv.ptr) == 0
+ case reflect.Float32:
+ return *(*float32)(urv.ptr) == 0
+ case reflect.Float64:
+ return *(*float64)(urv.ptr) == 0
+ case reflect.Complex64:
+ return unsafeCmpZero(urv.ptr, 8)
+ case reflect.Complex128:
+ return unsafeCmpZero(urv.ptr, 16)
+ case reflect.Struct:
+ // return isEmptyStruct(v, tinfos, recursive)
+ if tinfos == nil {
+ tinfos = defTypeInfos
+ }
+ ti := tinfos.find(uintptr(urv.typ))
+ if ti == nil {
+ ti = tinfos.load(rvType(v))
+ }
+ return unsafeCmpZero(urv.ptr, int(ti.size))
+ case reflect.Interface, reflect.Ptr:
+ // isnil := urv.ptr == nil // (not sufficient, as a pointer value encodes the type)
+ isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
+ if recursive && !isnil {
+ return isEmptyValue(v.Elem(), tinfos, recursive)
+ }
+ return isnil
+ case reflect.UnsafePointer:
+ return urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
+ case reflect.Chan:
+ return urv.ptr == nil || len_chan(rvRefPtr(urv)) == 0
+ case reflect.Map:
+ return urv.ptr == nil || len_map(rvRefPtr(urv)) == 0
+ case reflect.Array:
+ return v.Len() == 0
+ }
+ return false
+}
+
+// --------------------------
+
+type structFieldInfos struct {
+ c unsafe.Pointer // source
+ s unsafe.Pointer // sorted
+ length int
+}
+
+func (x *structFieldInfos) load(source, sorted []*structFieldInfo) {
+ s := (*unsafeSlice)(unsafe.Pointer(&sorted))
+ x.s = s.Data
+ x.length = s.Len
+ s = (*unsafeSlice)(unsafe.Pointer(&source))
+ x.c = s.Data
+}
+
+func (x *structFieldInfos) sorted() (v []*structFieldInfo) {
+ *(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.s, x.length, x.length}
+ // s := (*unsafeSlice)(unsafe.Pointer(&v))
+ // s.Data = x.sorted0
+ // s.Len = x.length
+ // s.Cap = s.Len
+ return
+}
+
+func (x *structFieldInfos) source() (v []*structFieldInfo) {
+ *(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.c, x.length, x.length}
+ return
+}
+
+// atomicXXX is expected to be 2 words (for symmetry with atomic.Value)
+//
+// Note that we do not atomically load/store length and data pointer separately,
+// as this could lead to some races. Instead, we atomically load/store cappedSlice.
+//
+// Note: with atomic.(Load|Store)Pointer, we MUST work with an unsafe.Pointer directly.
+
+// ----------------------
+type atomicTypeInfoSlice struct {
+ v unsafe.Pointer // *[]rtid2ti
+}
+
+func (x *atomicTypeInfoSlice) load() (s []rtid2ti) {
+ x2 := atomic.LoadPointer(&x.v)
+ if x2 != nil {
+ s = *(*[]rtid2ti)(x2)
+ }
+ return
+}
+
+func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
+ atomic.StorePointer(&x.v, unsafe.Pointer(&p))
+}
+
+// MARKER: in safe mode, atomicXXX are atomic.Value, which contains an interface{}.
+// This is 2 words.
+// consider padding atomicXXX here with a uintptr, so they fit into 2 words also.
+
+// --------------------------
+type atomicRtidFnSlice struct {
+ v unsafe.Pointer // *[]codecRtidFn
+}
+
+func (x *atomicRtidFnSlice) load() (s []codecRtidFn) {
+ x2 := atomic.LoadPointer(&x.v)
+ if x2 != nil {
+ s = *(*[]codecRtidFn)(x2)
+ }
+ return
+}
+
+func (x *atomicRtidFnSlice) store(p []codecRtidFn) {
+ atomic.StorePointer(&x.v, unsafe.Pointer(&p))
+}
+
+// --------------------------
+type atomicClsErr struct {
+ v unsafe.Pointer // *clsErr
+}
+
+func (x *atomicClsErr) load() (e clsErr) {
+ x2 := (*clsErr)(atomic.LoadPointer(&x.v))
+ if x2 != nil {
+ e = *x2
+ }
+ return
+}
+
+func (x *atomicClsErr) store(p clsErr) {
+ atomic.StorePointer(&x.v, unsafe.Pointer(&p))
+}
+
+// --------------------------
+
+// to create a reflect.Value for each member field of fauxUnion,
+// we first create a global fauxUnion, and create reflect.Value
+// for them all.
+// This way, we have the flags and type in the reflect.Value.
+// Then, when a reflect.Value is called, we just copy it,
+// update the ptr to the fauxUnion's, and return it.
+
+type unsafeDecNakedWrapper struct {
+ fauxUnion
+ ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above
+}
+
+func (n *unsafeDecNakedWrapper) init() {
+ n.ru = rv4iptr(&n.u).Elem()
+ n.ri = rv4iptr(&n.i).Elem()
+ n.rf = rv4iptr(&n.f).Elem()
+ n.rl = rv4iptr(&n.l).Elem()
+ n.rs = rv4iptr(&n.s).Elem()
+ n.rt = rv4iptr(&n.t).Elem()
+ n.rb = rv4iptr(&n.b).Elem()
+ // n.rr[] = reflect.ValueOf(&n.)
+}
+
+var defUnsafeDecNakedWrapper unsafeDecNakedWrapper
+
+func init() {
+ defUnsafeDecNakedWrapper.init()
+}
+
+func (n *fauxUnion) ru() (v reflect.Value) {
+ v = defUnsafeDecNakedWrapper.ru
+ ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.u)
+ return
+}
+func (n *fauxUnion) ri() (v reflect.Value) {
+ v = defUnsafeDecNakedWrapper.ri
+ ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.i)
+ return
+}
+func (n *fauxUnion) rf() (v reflect.Value) {
+ v = defUnsafeDecNakedWrapper.rf
+ ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.f)
+ return
+}
+func (n *fauxUnion) rl() (v reflect.Value) {
+ v = defUnsafeDecNakedWrapper.rl
+ ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.l)
+ return
+}
+func (n *fauxUnion) rs() (v reflect.Value) {
+ v = defUnsafeDecNakedWrapper.rs
+ ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.s)
+ return
+}
+func (n *fauxUnion) rt() (v reflect.Value) {
+ v = defUnsafeDecNakedWrapper.rt
+ ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.t)
+ return
+}
+func (n *fauxUnion) rb() (v reflect.Value) {
+ v = defUnsafeDecNakedWrapper.rb
+ ((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.b)
+ return
+}
+
+// --------------------------
+func rvSetBytes(rv reflect.Value, v []byte) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*[]byte)(urv.ptr) = v
+}
+
+func rvSetString(rv reflect.Value, v string) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*string)(urv.ptr) = v
+}
+
+func rvSetBool(rv reflect.Value, v bool) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*bool)(urv.ptr) = v
+}
+
+func rvSetTime(rv reflect.Value, v time.Time) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*time.Time)(urv.ptr) = v
+}
+
+func rvSetFloat32(rv reflect.Value, v float32) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*float32)(urv.ptr) = v
+}
+
+func rvSetFloat64(rv reflect.Value, v float64) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*float64)(urv.ptr) = v
+}
+
+func rvSetComplex64(rv reflect.Value, v complex64) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*complex64)(urv.ptr) = v
+}
+
+func rvSetComplex128(rv reflect.Value, v complex128) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*complex128)(urv.ptr) = v
+}
+
+func rvSetInt(rv reflect.Value, v int) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*int)(urv.ptr) = v
+}
+
+func rvSetInt8(rv reflect.Value, v int8) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*int8)(urv.ptr) = v
+}
+
+func rvSetInt16(rv reflect.Value, v int16) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*int16)(urv.ptr) = v
+}
+
+func rvSetInt32(rv reflect.Value, v int32) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*int32)(urv.ptr) = v
+}
+
+func rvSetInt64(rv reflect.Value, v int64) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*int64)(urv.ptr) = v
+}
+
+func rvSetUint(rv reflect.Value, v uint) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*uint)(urv.ptr) = v
+}
+
+func rvSetUintptr(rv reflect.Value, v uintptr) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*uintptr)(urv.ptr) = v
+}
+
+func rvSetUint8(rv reflect.Value, v uint8) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*uint8)(urv.ptr) = v
+}
+
+func rvSetUint16(rv reflect.Value, v uint16) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*uint16)(urv.ptr) = v
+}
+
+func rvSetUint32(rv reflect.Value, v uint32) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*uint32)(urv.ptr) = v
+}
+
+func rvSetUint64(rv reflect.Value, v uint64) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ *(*uint64)(urv.ptr) = v
+}
+
+// ----------------
+
+// rvSetZero is rv.Set(reflect.Zero(rv.Type()) for all kinds (including reflect.Interface).
+func rvSetZero(rv reflect.Value) {
+ rvSetDirectZero(rv)
+}
+
+func rvSetIntf(rv reflect.Value, v reflect.Value) {
+ rv.Set(v)
+}
+
+// rvSetDirect is rv.Set for all kinds except reflect.Interface.
+//
+// Callers MUST not pass a value of kind reflect.Interface, as it may cause unexpected segfaults.
+func rvSetDirect(rv reflect.Value, v reflect.Value) {
+ // MARKER: rv.Set for kind reflect.Interface may do a separate allocation if a scalar value.
+ // The book-keeping is onerous, so we just do the simple ones where a memmove is sufficient.
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
+ if uv.flag&unsafeFlagIndir == 0 {
+ *(*unsafe.Pointer)(urv.ptr) = uv.ptr
+ } else if uv.ptr == unsafeZeroAddr {
+ if urv.ptr != unsafeZeroAddr {
+ typedmemclr(urv.typ, urv.ptr)
+ }
+ } else {
+ typedmemmove(urv.typ, urv.ptr, uv.ptr)
+ }
+}
+
+// rvSetDirectZero is rv.Set(reflect.Zero(rv.Type()) for all kinds except reflect.Interface.
+func rvSetDirectZero(rv reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ if urv.ptr != unsafeZeroAddr {
+ typedmemclr(urv.typ, urv.ptr)
+ }
+}
+
+// rvMakeSlice updates the slice to point to a new array.
+// It copies data from old slice to new slice.
+// It returns set=true iff it updates it, else it just returns a new slice pointing to a newly made array.
+func rvMakeSlice(rv reflect.Value, ti *typeInfo, xlen, xcap int) (_ reflect.Value, set bool) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ ux := (*unsafeSlice)(urv.ptr)
+ t := ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
+ s := unsafeSlice{newarray(t, xcap), xlen, xcap}
+ if ux.Len > 0 {
+ typedslicecopy(t, s, *ux)
+ }
+ *ux = s
+ return rv, true
+}
+
+// rvSlice returns a sub-slice of the slice given new lenth,
+// without modifying passed in value.
+// It is typically called when we know that SetLen(...) cannot be done.
+func rvSlice(rv reflect.Value, length int) reflect.Value {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ var x []struct{}
+ ux := (*unsafeSlice)(unsafe.Pointer(&x))
+ *ux = *(*unsafeSlice)(urv.ptr)
+ ux.Len = length
+ urv.ptr = unsafe.Pointer(ux)
+ return rv
+}
+
+// rcGrowSlice updates the slice to point to a new array with the cap incremented, and len set to the new cap value.
+// It copies data from old slice to new slice.
+// It returns set=true iff it updates it, else it just returns a new slice pointing to a newly made array.
+func rvGrowSlice(rv reflect.Value, ti *typeInfo, cap, incr int) (v reflect.Value, newcap int, set bool) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ ux := (*unsafeSlice)(urv.ptr)
+ t := ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
+ *ux = unsafeGrowslice(t, *ux, cap, incr)
+ ux.Len = ux.Cap
+ return rv, ux.Cap, true
+}
+
+// ------------
+
+func rvSliceIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
+ uv.ptr = unsafe.Pointer(uintptr(((*unsafeSlice)(urv.ptr)).Data) + uintptr(int(ti.elemsize)*i))
+ uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
+ uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
+ return
+}
+
+func rvSliceZeroCap(t reflect.Type) (v reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
+ urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
+ urv.flag = uintptr(reflect.Slice) | unsafeFlagIndir
+ urv.ptr = unsafe.Pointer(&unsafeZeroSlice)
+ return
+}
+
+func rvLenSlice(rv reflect.Value) int {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return (*unsafeSlice)(urv.ptr).Len
+}
+
+func rvCapSlice(rv reflect.Value) int {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return (*unsafeSlice)(urv.ptr).Cap
+}
+
+func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
+ uv.ptr = unsafe.Pointer(uintptr(urv.ptr) + uintptr(int(ti.elemsize)*i))
+ uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
+ uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
+ return
+}
+
+// if scratch is nil, then return a writable view (assuming canAddr=true)
+func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) {
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ bx := (*unsafeSlice)(unsafe.Pointer(&bs))
+ bx.Data = urv.ptr
+ bx.Len = rv.Len()
+ bx.Cap = bx.Len
+ return
+}
+
+func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
+ // It is possible that this slice is based off an array with a larger
+ // len that we want (where array len == slice cap).
+ // However, it is ok to create an array type that is a subset of the full
+ // e.g. full slice is based off a *[16]byte, but we can create a *[4]byte
+ // off of it. That is ok.
+ //
+ // Consequently, we use rvLenSlice, not rvCapSlice.
+
+ t := reflectArrayOf(rvLenSlice(rv), rvType(rv).Elem())
+ // v = rvZeroAddrK(t, reflect.Array)
+
+ uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
+ uv.flag = uintptr(reflect.Array) | unsafeFlagIndir | unsafeFlagAddr
+ uv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
+
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ uv.ptr = *(*unsafe.Pointer)(urv.ptr) // slice rv has a ptr to the slice.
+
+ return
+}
+
+func rvGetSlice4Array(rv reflect.Value, v interface{}) {
+ // v is a pointer to a slice to be populated
+ uv := (*unsafeIntf)(unsafe.Pointer(&v))
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+
+ s := (*unsafeSlice)(uv.ptr)
+ s.Data = urv.ptr
+ s.Len = rv.Len()
+ s.Cap = s.Len
+}
+
+func rvCopySlice(dest, src reflect.Value, elemType reflect.Type) {
+ typedslicecopy((*unsafeIntf)(unsafe.Pointer(&elemType)).ptr,
+ *(*unsafeSlice)((*unsafeReflectValue)(unsafe.Pointer(&dest)).ptr),
+ *(*unsafeSlice)((*unsafeReflectValue)(unsafe.Pointer(&src)).ptr))
+}
+
+// ------------
+
+func rvGetBool(rv reflect.Value) bool {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*bool)(v.ptr)
+}
+
+func rvGetBytes(rv reflect.Value) []byte {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*[]byte)(v.ptr)
+}
+
+func rvGetTime(rv reflect.Value) time.Time {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*time.Time)(v.ptr)
+}
+
+func rvGetString(rv reflect.Value) string {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*string)(v.ptr)
+}
+
+func rvGetFloat64(rv reflect.Value) float64 {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*float64)(v.ptr)
+}
+
+func rvGetFloat32(rv reflect.Value) float32 {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*float32)(v.ptr)
+}
+
+func rvGetComplex64(rv reflect.Value) complex64 {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*complex64)(v.ptr)
+}
+
+func rvGetComplex128(rv reflect.Value) complex128 {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*complex128)(v.ptr)
+}
+
+func rvGetInt(rv reflect.Value) int {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*int)(v.ptr)
+}
+
+func rvGetInt8(rv reflect.Value) int8 {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*int8)(v.ptr)
+}
+
+func rvGetInt16(rv reflect.Value) int16 {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*int16)(v.ptr)
+}
+
+func rvGetInt32(rv reflect.Value) int32 {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*int32)(v.ptr)
+}
+
+func rvGetInt64(rv reflect.Value) int64 {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*int64)(v.ptr)
+}
+
+func rvGetUint(rv reflect.Value) uint {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*uint)(v.ptr)
+}
+
+func rvGetUint8(rv reflect.Value) uint8 {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*uint8)(v.ptr)
+}
+
+func rvGetUint16(rv reflect.Value) uint16 {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*uint16)(v.ptr)
+}
+
+func rvGetUint32(rv reflect.Value) uint32 {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*uint32)(v.ptr)
+}
+
+func rvGetUint64(rv reflect.Value) uint64 {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*uint64)(v.ptr)
+}
+
+func rvGetUintptr(rv reflect.Value) uintptr {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ return *(*uintptr)(v.ptr)
+}
+
+func rvLenMap(rv reflect.Value) int {
+ // maplen is not inlined, because as of go1.16beta, go:linkname's are not inlined.
+ // thus, faster to call rv.Len() directly.
+ //
+ // MARKER: review after https://github.com/golang/go/issues/20019 fixed.
+
+ // return rv.Len()
+
+ return len_map(rvRefPtr((*unsafeReflectValue)(unsafe.Pointer(&rv))))
+}
+
+// Note: it is hard to find len(...) of an array type,
+// as that is a field in the arrayType representing the array, and hard to introspect.
+//
+// func rvLenArray(rv reflect.Value) int { return rv.Len() }
+
+// ------------ map range and map indexing ----------
+
+// regular calls to map via reflection: MapKeys, MapIndex, MapRange/MapIter etc
+// will always allocate for each map key or value.
+//
+// It is more performant to provide a value that the map entry is set into,
+// and that elides the allocation.
+
+// go 1.4+ has runtime/hashmap.go or runtime/map.go which has a
+// hIter struct with the first 2 values being key and value
+// of the current iteration.
+//
+// This *hIter is passed to mapiterinit, mapiternext, mapiterkey, mapiterelem.
+// We bypass the reflect wrapper functions and just use the *hIter directly.
+//
+// Though *hIter has many fields, we only care about the first 2.
+//
+// We directly embed this in unsafeMapIter below
+//
+// hiter is typically about 12 words, but we just fill up unsafeMapIter to 32 words,
+// so it fills multiple cache lines and can give some extra space to accomodate small growth.
+
+type unsafeMapIter struct {
+ mtyp, mptr unsafe.Pointer
+ k, v reflect.Value
+ kisref bool
+ visref bool
+ mapvalues bool
+ done bool
+ started bool
+ _ [3]byte // padding
+ it struct {
+ key unsafe.Pointer
+ value unsafe.Pointer
+ _ [20]uintptr // padding for other fields (to make up 32 words for enclosing struct)
+ }
+}
+
+func (t *unsafeMapIter) Next() (r bool) {
+ if t == nil || t.done {
+ return
+ }
+ if t.started {
+ mapiternext((unsafe.Pointer)(&t.it))
+ } else {
+ t.started = true
+ }
+
+ t.done = t.it.key == nil
+ if t.done {
+ return
+ }
+
+ if helperUnsafeDirectAssignMapEntry || t.kisref {
+ (*unsafeReflectValue)(unsafe.Pointer(&t.k)).ptr = t.it.key
+ } else {
+ k := (*unsafeReflectValue)(unsafe.Pointer(&t.k))
+ typedmemmove(k.typ, k.ptr, t.it.key)
+ }
+
+ if t.mapvalues {
+ if helperUnsafeDirectAssignMapEntry || t.visref {
+ (*unsafeReflectValue)(unsafe.Pointer(&t.v)).ptr = t.it.value
+ } else {
+ v := (*unsafeReflectValue)(unsafe.Pointer(&t.v))
+ typedmemmove(v.typ, v.ptr, t.it.value)
+ }
+ }
+
+ return true
+}
+
+func (t *unsafeMapIter) Key() (r reflect.Value) {
+ return t.k
+}
+
+func (t *unsafeMapIter) Value() (r reflect.Value) {
+ return t.v
+}
+
+func (t *unsafeMapIter) Done() {}
+
+type mapIter struct {
+ unsafeMapIter
+}
+
+func mapRange(t *mapIter, m, k, v reflect.Value, mapvalues bool) {
+ if rvIsNil(m) {
+ t.done = true
+ return
+ }
+ t.done = false
+ t.started = false
+ t.mapvalues = mapvalues
+
+ // var urv *unsafeReflectValue
+
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&m))
+ t.mtyp = urv.typ
+ t.mptr = rvRefPtr(urv)
+
+ // t.it = (*unsafeMapHashIter)(reflect_mapiterinit(t.mtyp, t.mptr))
+ mapiterinit(t.mtyp, t.mptr, unsafe.Pointer(&t.it))
+
+ t.k = k
+ t.kisref = refBitset.isset(byte(k.Kind()))
+
+ if mapvalues {
+ t.v = v
+ t.visref = refBitset.isset(byte(v.Kind()))
+ } else {
+ t.v = reflect.Value{}
+ }
+}
+
+// unsafeMapKVPtr returns the pointer if flagIndir, else it returns a pointer to the pointer.
+// It is needed as maps always keep a reference to the underlying value.
+func unsafeMapKVPtr(urv *unsafeReflectValue) unsafe.Pointer {
+ if urv.flag&unsafeFlagIndir == 0 {
+ return unsafe.Pointer(&urv.ptr)
+ }
+ return urv.ptr
+}
+
+// func mapDelete(m, k reflect.Value) {
+// var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
+// var kptr = unsafeMapKVPtr(urv)
+// urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
+// mapdelete(urv.typ, rv2ptr(urv), kptr)
+// }
+
+// return an addressable reflect value that can be used in mapRange and mapGet operations.
+//
+// all calls to mapGet or mapRange will call here to get an addressable reflect.Value.
+func mapAddrLoopvarRV(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
+ // return rvZeroAddrK(t, k)
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
+ urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
+ // since we always set the ptr when helperUnsafeDirectAssignMapEntry=true,
+ // we should only allocate if it is not true
+ if !helperUnsafeDirectAssignMapEntry {
+ urv.ptr = unsafeNew(urv.typ)
+ }
+ return
+}
+
+// ---------- ENCODER optimized ---------------
+
+func (e *Encoder) jsondriver() *jsonEncDriver {
+ return (*jsonEncDriver)((*unsafeIntf)(unsafe.Pointer(&e.e)).ptr)
+}
+
+func (d *Decoder) zerocopystate() bool {
+ return d.decByteState == decByteStateZerocopy && d.h.ZeroCopy
+}
+
+func (d *Decoder) stringZC(v []byte) (s string) {
+ if d.zerocopystate() {
+ return stringView(v)
+ }
+ return d.string(v)
+}
+
+func (d *Decoder) mapKeyString(callFnRvk *bool, kstrbs, kstr2bs *[]byte) string {
+ if !d.zerocopystate() {
+ *callFnRvk = true
+ if d.decByteState == decByteStateReuseBuf {
+ *kstrbs = append((*kstrbs)[:0], (*kstr2bs)...)
+ *kstr2bs = *kstrbs
+ }
+ }
+ return stringView(*kstr2bs)
+}
+
+// ---------- DECODER optimized ---------------
+
+func (d *Decoder) checkBreak() bool {
+ // MARKER: jsonDecDriver.CheckBreak() costs over 80, and this isn't inlined.
+ // Consequently, there's no benefit in incurring the cost of this
+ // wrapping function checkBreak.
+ //
+ // It is faster to just call the interface method directly.
+
+ // if d.js {
+ // return d.jsondriver().CheckBreak()
+ // }
+ // if d.cbor {
+ // return d.cbordriver().CheckBreak()
+ // }
+ return d.d.CheckBreak()
+}
+
+func (d *Decoder) jsondriver() *jsonDecDriver {
+ return (*jsonDecDriver)((*unsafeIntf)(unsafe.Pointer(&d.d)).ptr)
+}
+
+// ---------- structFieldInfo optimized ---------------
+
+func (n *structFieldInfoPathNode) rvField(v reflect.Value) (rv reflect.Value) {
+ // we already know this is exported, and maybe embedded (based on what si says)
+ uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
+ urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
+ // clear flagEmbedRO if necessary, and inherit permission bits from v
+ urv.flag = uv.flag&(unsafeFlagStickyRO|unsafeFlagIndir|unsafeFlagAddr) | uintptr(n.kind)
+ urv.typ = ((*unsafeIntf)(unsafe.Pointer(&n.typ))).ptr
+ urv.ptr = unsafe.Pointer(uintptr(uv.ptr) + uintptr(n.offset))
+ return
+}
+
+// runtime chan and map are designed such that the first field is the count.
+// len builtin uses this to get the length of a chan/map easily.
+// leverage this knowledge, since maplen and chanlen functions from runtime package
+// are go:linkname'd here, and thus not inlined as of go1.16beta
+
+func len_map_chan(m unsafe.Pointer) int {
+ if m == nil {
+ return 0
+ }
+ return *((*int)(m))
+}
+
+func len_map(m unsafe.Pointer) int {
+ // return maplen(m)
+ return len_map_chan(m)
+}
+func len_chan(m unsafe.Pointer) int {
+ // return chanlen(m)
+ return len_map_chan(m)
+}
+
+func unsafeNew(typ unsafe.Pointer) unsafe.Pointer {
+ return mallocgc(rtsize2(typ), typ, true)
+}
+
+// ---------- go linknames (LINKED to runtime/reflect) ---------------
+
+// MARKER: always check that these linknames match subsequent versions of go
+//
+// Note that as of Jan 2021 (go 1.16 release), go:linkname(s) are not inlined
+// outside of the standard library use (e.g. within sync, reflect, etc).
+// If these link'ed functions were normally inlined, calling them here would
+// not necessarily give a performance boost, due to function overhead.
+//
+// However, it seems most of these functions are not inlined anyway,
+// as only maplen, chanlen and mapaccess are small enough to get inlined.
+//
+// We checked this by going into $GOROOT/src/runtime and running:
+// $ go build -tags codec.notfastpath -gcflags "-m=2"
+
+// reflect.{unsafe_New, unsafe_NewArray} are not supported in gollvm,
+// failing with "error: undefined reference" error.
+// however, runtime.{mallocgc, newarray} are supported, so use that instead.
+
+//go:linkname mallocgc runtime.mallocgc
+//go:noescape
+func mallocgc(size uintptr, typ unsafe.Pointer, needzero bool) unsafe.Pointer
+
+//go:linkname newarray runtime.newarray
+//go:noescape
+func newarray(typ unsafe.Pointer, n int) unsafe.Pointer
+
+//go:linkname mapiterinit runtime.mapiterinit
+//go:noescape
+func mapiterinit(typ unsafe.Pointer, m unsafe.Pointer, it unsafe.Pointer)
+
+//go:linkname mapiternext runtime.mapiternext
+//go:noescape
+func mapiternext(it unsafe.Pointer) (key unsafe.Pointer)
+
+//go:linkname mapdelete runtime.mapdelete
+//go:noescape
+func mapdelete(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer)
+
+//go:linkname mapassign runtime.mapassign
+//go:noescape
+func mapassign(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
+
+//go:linkname mapaccess2 runtime.mapaccess2
+//go:noescape
+func mapaccess2(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer, ok bool)
+
+// reflect.typed{memmove, memclr, slicecopy} will handle checking if the type has pointers or not,
+// and if a writeBarrier is needed, before delegating to the right method in the runtime.
+//
+// This is why we use the functions in reflect, and not the ones in runtime directly.
+// Calling runtime.XXX here will lead to memory issues.
+
+//go:linkname typedslicecopy reflect.typedslicecopy
+//go:noescape
+func typedslicecopy(elemType unsafe.Pointer, dst, src unsafeSlice) int
+
+//go:linkname typedmemmove reflect.typedmemmove
+//go:noescape
+func typedmemmove(typ unsafe.Pointer, dst, src unsafe.Pointer)
+
+//go:linkname typedmemclr reflect.typedmemclr
+//go:noescape
+func typedmemclr(typ unsafe.Pointer, dst unsafe.Pointer)
diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_gc.go b/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_gc.go
new file mode 100644
index 000000000..1cbce9d22
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_gc.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build !safe && !codec.safe && !appengine && go1.9 && gc
+// +build !safe,!codec.safe,!appengine,go1.9,gc
+
+package codec
+
+import (
+ "reflect"
+ _ "runtime" // needed for go linkname(s)
+ "unsafe"
+)
+
+// keep in sync with
+// $GOROOT/src/cmd/compile/internal/gc/reflect.go: MAXKEYSIZE, MAXELEMSIZE
+// $GOROOT/src/runtime/map.go: maxKeySize, maxElemSize
+// $GOROOT/src/reflect/type.go: maxKeySize, maxElemSize
+//
+// We use these to determine whether the type is stored indirectly in the map or not.
+const (
+ // mapMaxKeySize = 128
+ mapMaxElemSize = 128
+)
+
+func unsafeGrowslice(typ unsafe.Pointer, old unsafeSlice, cap, incr int) (v unsafeSlice) {
+ return growslice(typ, old, cap+incr)
+}
+
+func rvType(rv reflect.Value) reflect.Type {
+ return rvPtrToType(((*unsafeReflectValue)(unsafe.Pointer(&rv))).typ) // rv.Type()
+}
+
+// mapStoresElemIndirect tells if the element type is stored indirectly in the map.
+//
+// This is used to determine valIsIndirect which is passed into mapSet/mapGet calls.
+//
+// If valIsIndirect doesn't matter, then just return false and ignore the value
+// passed in mapGet/mapSet calls
+func mapStoresElemIndirect(elemsize uintptr) bool {
+ return elemsize > mapMaxElemSize
+}
+
+func mapSet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, valIsRef bool) {
+ var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
+ var kptr = unsafeMapKVPtr(urv)
+ urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
+ var vtyp = urv.typ
+ var vptr = unsafeMapKVPtr(urv)
+
+ urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
+ mptr := rvRefPtr(urv)
+
+ var vvptr unsafe.Pointer
+
+ // mapassign_fastXXX don't take indirect into account.
+ // It was hard to infer what makes it work all the time.
+ // Sometimes, we got vvptr == nil when we dereferenced vvptr (if valIsIndirect).
+ // Consequently, only use fastXXX functions if !valIsIndirect
+
+ if valIsIndirect {
+ vvptr = mapassign(urv.typ, mptr, kptr)
+ typedmemmove(vtyp, vvptr, vptr)
+ // reflect_mapassign(urv.typ, mptr, kptr, vptr)
+ return
+ }
+
+ switch keyFastKind {
+ case mapKeyFastKind32:
+ vvptr = mapassign_fast32(urv.typ, mptr, *(*uint32)(kptr))
+ case mapKeyFastKind32ptr:
+ vvptr = mapassign_fast32ptr(urv.typ, mptr, *(*unsafe.Pointer)(kptr))
+ case mapKeyFastKind64:
+ vvptr = mapassign_fast64(urv.typ, mptr, *(*uint64)(kptr))
+ case mapKeyFastKind64ptr:
+ vvptr = mapassign_fast64ptr(urv.typ, mptr, *(*unsafe.Pointer)(kptr))
+ case mapKeyFastKindStr:
+ vvptr = mapassign_faststr(urv.typ, mptr, *(*string)(kptr))
+ default:
+ vvptr = mapassign(urv.typ, mptr, kptr)
+ }
+
+ // if keyFastKind != 0 && valIsIndirect {
+ // vvptr = *(*unsafe.Pointer)(vvptr)
+ // }
+
+ typedmemmove(vtyp, vvptr, vptr)
+}
+
+func mapGet(m, k, v reflect.Value, keyFastKind mapKeyFastKind, valIsIndirect, valIsRef bool) (_ reflect.Value) {
+ var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
+ var kptr = unsafeMapKVPtr(urv)
+ urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
+ mptr := rvRefPtr(urv)
+
+ var vvptr unsafe.Pointer
+ var ok bool
+
+ // Note that mapaccess2_fastXXX functions do not check if the value needs to be copied.
+ // if they do, we should dereference the pointer and return that
+
+ switch keyFastKind {
+ case mapKeyFastKind32, mapKeyFastKind32ptr:
+ vvptr, ok = mapaccess2_fast32(urv.typ, mptr, *(*uint32)(kptr))
+ case mapKeyFastKind64, mapKeyFastKind64ptr:
+ vvptr, ok = mapaccess2_fast64(urv.typ, mptr, *(*uint64)(kptr))
+ case mapKeyFastKindStr:
+ vvptr, ok = mapaccess2_faststr(urv.typ, mptr, *(*string)(kptr))
+ default:
+ vvptr, ok = mapaccess2(urv.typ, mptr, kptr)
+ }
+
+ if !ok {
+ return
+ }
+
+ urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
+
+ if keyFastKind != 0 && valIsIndirect {
+ urv.ptr = *(*unsafe.Pointer)(vvptr)
+ } else if helperUnsafeDirectAssignMapEntry || valIsRef {
+ urv.ptr = vvptr
+ } else {
+ typedmemmove(urv.typ, urv.ptr, vvptr)
+ }
+
+ return v
+}
+
+//go:linkname unsafeZeroArr runtime.zeroVal
+var unsafeZeroArr [1024]byte
+
+//go:linkname rvPtrToType reflect.toType
+//go:noescape
+func rvPtrToType(typ unsafe.Pointer) reflect.Type
+
+//go:linkname growslice runtime.growslice
+//go:noescape
+func growslice(typ unsafe.Pointer, old unsafeSlice, cap int) unsafeSlice
+
+//go:linkname mapassign_fast32 runtime.mapassign_fast32
+//go:noescape
+func mapassign_fast32(typ unsafe.Pointer, m unsafe.Pointer, key uint32) unsafe.Pointer
+
+//go:linkname mapassign_fast32ptr runtime.mapassign_fast32ptr
+//go:noescape
+func mapassign_fast32ptr(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
+
+//go:linkname mapassign_fast64 runtime.mapassign_fast64
+//go:noescape
+func mapassign_fast64(typ unsafe.Pointer, m unsafe.Pointer, key uint64) unsafe.Pointer
+
+//go:linkname mapassign_fast64ptr runtime.mapassign_fast64ptr
+//go:noescape
+func mapassign_fast64ptr(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
+
+//go:linkname mapassign_faststr runtime.mapassign_faststr
+//go:noescape
+func mapassign_faststr(typ unsafe.Pointer, m unsafe.Pointer, s string) unsafe.Pointer
+
+//go:linkname mapaccess2_fast32 runtime.mapaccess2_fast32
+//go:noescape
+func mapaccess2_fast32(typ unsafe.Pointer, m unsafe.Pointer, key uint32) (val unsafe.Pointer, ok bool)
+
+//go:linkname mapaccess2_fast64 runtime.mapaccess2_fast64
+//go:noescape
+func mapaccess2_fast64(typ unsafe.Pointer, m unsafe.Pointer, key uint64) (val unsafe.Pointer, ok bool)
+
+//go:linkname mapaccess2_faststr runtime.mapaccess2_faststr
+//go:noescape
+func mapaccess2_faststr(typ unsafe.Pointer, m unsafe.Pointer, key string) (val unsafe.Pointer, ok bool)
diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_not_gc.go b/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_not_gc.go
new file mode 100644
index 000000000..bd9fdedb6
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_not_gc.go
@@ -0,0 +1,80 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build !safe && !codec.safe && !appengine && go1.9 && !gc
+// +build !safe,!codec.safe,!appengine,go1.9,!gc
+
+package codec
+
+import (
+ "reflect"
+ _ "runtime" // needed for go linkname(s)
+ "unsafe"
+)
+
+var unsafeZeroArr [1024]byte
+
+// runtime.growslice does not work with gccgo, failing with "growslice: cap out of range" error.
+// consequently, we just call newarray followed by typedslicecopy directly.
+
+func unsafeGrowslice(typ unsafe.Pointer, old unsafeSlice, cap, incr int) (v unsafeSlice) {
+ size := rtsize2(typ)
+ if size == 0 {
+ return unsafeSlice{unsafe.Pointer(&unsafeZeroArr[0]), old.Len, cap + incr}
+ }
+ newcap := int(growCap(uint(cap), uint(size), uint(incr)))
+ v = unsafeSlice{Data: newarray(typ, newcap), Len: old.Len, Cap: newcap}
+ if old.Len > 0 {
+ typedslicecopy(typ, v, old)
+ }
+ // memmove(v.Data, old.Data, size*uintptr(old.Len))
+ return
+}
+
+// func unsafeNew(t reflect.Type, typ unsafe.Pointer) unsafe.Pointer {
+// rv := reflect.New(t)
+// return ((*unsafeReflectValue)(unsafe.Pointer(&rv))).ptr
+// }
+
+// runtime.{mapassign_fastXXX, mapaccess2_fastXXX} are not supported in gollvm,
+// failing with "error: undefined reference" error.
+// so we just use runtime.{mapassign, mapaccess2} directly
+
+func mapStoresElemIndirect(elemsize uintptr) bool { return false }
+
+func mapSet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) {
+ var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
+ var kptr = unsafeMapKVPtr(urv)
+ urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
+ var vtyp = urv.typ
+ var vptr = unsafeMapKVPtr(urv)
+
+ urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
+ mptr := rvRefPtr(urv)
+
+ vvptr := mapassign(urv.typ, mptr, kptr)
+ typedmemmove(vtyp, vvptr, vptr)
+}
+
+func mapGet(m, k, v reflect.Value, _ mapKeyFastKind, _, valIsRef bool) (_ reflect.Value) {
+ var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
+ var kptr = unsafeMapKVPtr(urv)
+ urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
+ mptr := rvRefPtr(urv)
+
+ vvptr, ok := mapaccess2(urv.typ, mptr, kptr)
+
+ if !ok {
+ return
+ }
+
+ urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
+
+ if helperUnsafeDirectAssignMapEntry || valIsRef {
+ urv.ptr = vvptr
+ } else {
+ typedmemmove(urv.typ, urv.ptr, vvptr)
+ }
+
+ return v
+}
diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go
new file mode 100644
index 000000000..8bd151f90
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/json.go
@@ -0,0 +1,1457 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+// By default, this json support uses base64 encoding for bytes, because you cannot
+// store and read any arbitrary string in json (only unicode).
+// However, the user can configre how to encode/decode bytes.
+//
+// This library specifically supports UTF-8 for encoding and decoding only.
+//
+// Note that the library will happily encode/decode things which are not valid
+// json e.g. a map[int64]string. We do it for consistency. With valid json,
+// we will encode and decode appropriately.
+// Users can specify their map type if necessary to force it.
+//
+// We cannot use strconv.(Q|Unq)uote because json quotes/unquotes differently.
+
+import (
+ "bytes"
+ "encoding/base64"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+//--------------------------------
+
+var jsonLiterals = [...]byte{
+ '"', 't', 'r', 'u', 'e', '"',
+ '"', 'f', 'a', 'l', 's', 'e', '"',
+ '"', 'n', 'u', 'l', 'l', '"',
+}
+
+const (
+ jsonLitTrueQ = 0
+ jsonLitTrue = 1
+ jsonLitFalseQ = 6
+ jsonLitFalse = 7
+ jsonLitNullQ = 13
+ jsonLitNull = 14
+)
+
+var (
+ // jsonLiteralTrueQ = jsonLiterals[jsonLitTrueQ : jsonLitTrueQ+6]
+ // jsonLiteralFalseQ = jsonLiterals[jsonLitFalseQ : jsonLitFalseQ+7]
+ // jsonLiteralNullQ = jsonLiterals[jsonLitNullQ : jsonLitNullQ+6]
+
+ jsonLiteralTrue = jsonLiterals[jsonLitTrue : jsonLitTrue+4]
+ jsonLiteralFalse = jsonLiterals[jsonLitFalse : jsonLitFalse+5]
+ jsonLiteralNull = jsonLiterals[jsonLitNull : jsonLitNull+4]
+
+ // these are used, after consuming the first char
+ jsonLiteral4True = jsonLiterals[jsonLitTrue+1 : jsonLitTrue+4]
+ jsonLiteral4False = jsonLiterals[jsonLitFalse+1 : jsonLitFalse+5]
+ jsonLiteral4Null = jsonLiterals[jsonLitNull+1 : jsonLitNull+4]
+)
+
+const (
+ jsonU4Chk2 = '0'
+ jsonU4Chk1 = 'a' - 10
+ jsonU4Chk0 = 'A' - 10
+)
+
+const (
+ // If !jsonValidateSymbols, decoding will be faster, by skipping some checks:
+ // - If we see first character of null, false or true,
+ // do not validate subsequent characters.
+ // - e.g. if we see a n, assume null and skip next 3 characters,
+ // and do not validate they are ull.
+ // P.S. Do not expect a significant decoding boost from this.
+ jsonValidateSymbols = true
+
+ // jsonEscapeMultiByteUnicodeSep controls whether some unicode characters
+ // that are valid json but may bomb in some contexts are escaped during encoeing.
+ //
+ // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
+ // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally.
+ jsonEscapeMultiByteUnicodeSep = true
+
+ // jsonManualInlineDecRdInHotZones controls whether we manually inline some decReader calls.
+ //
+ // encode performance is at par with libraries that just iterate over bytes directly,
+ // because encWr (with inlined bytesEncAppender calls) is inlined.
+ // Conversely, decode performance suffers because decRd (with inlined bytesDecReader calls)
+ // isn't inlinable.
+ //
+ // To improve decode performamnce from json:
+ // - readn1 is only called for \u
+ // - consequently, to optimize json decoding, we specifically need inlining
+ // for bytes use-case of some other decReader methods:
+ // - jsonReadAsisChars, skipWhitespace (advance) and jsonReadNum
+ // - AND THEN readn3, readn4 (for ull, rue and alse).
+ // - (readn1 is only called when a char is escaped).
+ // - without inlining, we still pay the cost of a method invocationK, and this dominates time
+ // - To mitigate, we manually inline in hot zones
+ // *excluding places where used sparingly (e.g. nextValueBytes, and other atypical cases)*.
+ // - jsonReadAsisChars *only* called in: appendStringAsBytes
+ // - advance called: everywhere
+ // - jsonReadNum: decNumBytes, DecodeNaked
+ // - From running go test (our anecdotal findings):
+ // - calling jsonReadAsisChars in appendStringAsBytes: 23431
+ // - calling jsonReadNum in decNumBytes: 15251
+ // - calling jsonReadNum in DecodeNaked: 612
+ // Consequently, we manually inline jsonReadAsisChars (in appendStringAsBytes)
+ // and jsonReadNum (in decNumbytes)
+ jsonManualInlineDecRdInHotZones = true
+
+ jsonSpacesOrTabsLen = 128
+
+ // jsonAlwaysReturnInternString = false
+)
+
+var (
+ // jsonTabs and jsonSpaces are used as caches for indents
+ jsonTabs, jsonSpaces [jsonSpacesOrTabsLen]byte
+
+ jsonCharHtmlSafeSet bitset256
+ jsonCharSafeSet bitset256
+)
+
+func init() {
+ var i byte
+ for i = 0; i < jsonSpacesOrTabsLen; i++ {
+ jsonSpaces[i] = ' '
+ jsonTabs[i] = '\t'
+ }
+
+ // populate the safe values as true: note: ASCII control characters are (0-31)
+ // jsonCharSafeSet: all true except (0-31) " \
+ // jsonCharHtmlSafeSet: all true except (0-31) " \ < > &
+ for i = 32; i < utf8.RuneSelf; i++ {
+ switch i {
+ case '"', '\\':
+ case '<', '>', '&':
+ jsonCharSafeSet.set(i) // = true
+ default:
+ jsonCharSafeSet.set(i)
+ jsonCharHtmlSafeSet.set(i)
+ }
+ }
+}
+
+// ----------------
+
+type jsonEncState struct {
+ di int8 // indent per: if negative, use tabs
+ d bool // indenting?
+ dl uint16 // indent level
+}
+
+func (x jsonEncState) captureState() interface{} { return x }
+func (x *jsonEncState) restoreState(v interface{}) { *x = v.(jsonEncState) }
+
+type jsonEncDriver struct {
+ noBuiltInTypes
+ h *JsonHandle
+
+ // se interfaceExtWrapper
+
+ // ---- cpu cache line boundary?
+ jsonEncState
+
+ ks bool // map key as string
+ is byte // integer as string
+
+ typical bool
+ rawext bool // rawext configured on the handle
+
+ s *bitset256 // safe set for characters (taking h.HTMLAsIs into consideration)
+
+ // buf *[]byte // used mostly for encoding []byte
+
+ // scratch buffer for: encode time, numbers, etc
+ //
+ // RFC3339Nano uses 35 chars: 2006-01-02T15:04:05.999999999Z07:00
+ // MaxUint64 uses 20 chars: 18446744073709551615
+ // floats are encoded using: f/e fmt, and -1 precision, or 1 if no fractions.
+ // This means we are limited by the number of characters for the
+ // mantissa (up to 17), exponent (up to 3), signs (up to 3), dot (up to 1), E (up to 1)
+ // for a total of 24 characters.
+ // -xxx.yyyyyyyyyyyye-zzz
+ // Consequently, 35 characters should be sufficient for encoding time, integers or floats.
+ // We use up all the remaining bytes to make this use full cache lines.
+ b [56]byte
+
+ e Encoder
+}
+
+func (e *jsonEncDriver) encoder() *Encoder { return &e.e }
+
+func (e *jsonEncDriver) writeIndent() {
+ e.e.encWr.writen1('\n')
+ x := int(e.di) * int(e.dl)
+ if e.di < 0 {
+ x = -x
+ for x > jsonSpacesOrTabsLen {
+ e.e.encWr.writeb(jsonTabs[:])
+ x -= jsonSpacesOrTabsLen
+ }
+ e.e.encWr.writeb(jsonTabs[:x])
+ } else {
+ for x > jsonSpacesOrTabsLen {
+ e.e.encWr.writeb(jsonSpaces[:])
+ x -= jsonSpacesOrTabsLen
+ }
+ e.e.encWr.writeb(jsonSpaces[:x])
+ }
+}
+
+func (e *jsonEncDriver) WriteArrayElem() {
+ if e.e.c != containerArrayStart {
+ e.e.encWr.writen1(',')
+ }
+ if e.d {
+ e.writeIndent()
+ }
+}
+
+func (e *jsonEncDriver) WriteMapElemKey() {
+ if e.e.c != containerMapStart {
+ e.e.encWr.writen1(',')
+ }
+ if e.d {
+ e.writeIndent()
+ }
+}
+
+func (e *jsonEncDriver) WriteMapElemValue() {
+ if e.d {
+ e.e.encWr.writen2(':', ' ')
+ } else {
+ e.e.encWr.writen1(':')
+ }
+}
+
+func (e *jsonEncDriver) EncodeNil() {
+ // We always encode nil as just null (never in quotes)
+ // This allows us to easily decode if a nil in the json stream
+ // ie if initial token is n.
+
+ // e.e.encWr.writeb(jsonLiteralNull)
+ e.e.encWr.writen4([4]byte{'n', 'u', 'l', 'l'})
+}
+
+func (e *jsonEncDriver) EncodeTime(t time.Time) {
+ // Do NOT use MarshalJSON, as it allocates internally.
+ // instead, we call AppendFormat directly, using our scratch buffer (e.b)
+
+ if t.IsZero() {
+ e.EncodeNil()
+ } else {
+ e.b[0] = '"'
+ b := fmtTime(t, time.RFC3339Nano, e.b[1:1])
+ e.b[len(b)+1] = '"'
+ e.e.encWr.writeb(e.b[:len(b)+2])
+ }
+}
+
+func (e *jsonEncDriver) EncodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
+ if ext == SelfExt {
+ e.e.encodeValue(baseRV(rv), e.h.fnNoExt(basetype))
+ } else if v := ext.ConvertExt(rv); v == nil {
+ e.EncodeNil()
+ } else {
+ e.e.encode(v)
+ }
+}
+
+func (e *jsonEncDriver) EncodeRawExt(re *RawExt) {
+ // only encodes re.Value (never re.Data)
+ if re.Value == nil {
+ e.EncodeNil()
+ } else {
+ e.e.encode(re.Value)
+ }
+}
+
+func (e *jsonEncDriver) EncodeBool(b bool) {
+ // Use writen with an array instead of writeb with a slice
+ // i.e. in place of e.e.encWr.writeb(jsonLiteralTrueQ)
+ // OR jsonLiteralTrue, jsonLiteralFalse, jsonLiteralFalseQ, etc
+
+ if e.ks && e.e.c == containerMapKey {
+ if b {
+ e.e.encWr.writen4([4]byte{'"', 't', 'r', 'u'})
+ e.e.encWr.writen2('e', '"')
+ } else {
+ e.e.encWr.writen4([4]byte{'"', 'f', 'a', 'l'})
+ e.e.encWr.writen2('s', 'e')
+ e.e.encWr.writen1('"')
+ }
+ } else {
+ if b {
+ e.e.encWr.writen4([4]byte{'t', 'r', 'u', 'e'})
+ } else {
+ e.e.encWr.writen4([4]byte{'f', 'a', 'l', 's'})
+ e.e.encWr.writen1('e')
+ }
+ }
+}
+
+func (e *jsonEncDriver) encodeFloat(f float64, bitsize, fmt byte, prec int8) {
+ var blen uint
+ if e.ks && e.e.c == containerMapKey {
+ blen = 2 + uint(len(strconv.AppendFloat(e.b[1:1], f, fmt, int(prec), int(bitsize))))
+ // _ = e.b[:blen]
+ e.b[0] = '"'
+ e.b[blen-1] = '"'
+ e.e.encWr.writeb(e.b[:blen])
+ } else {
+ e.e.encWr.writeb(strconv.AppendFloat(e.b[:0], f, fmt, int(prec), int(bitsize)))
+ }
+}
+
+func (e *jsonEncDriver) EncodeFloat64(f float64) {
+ if math.IsNaN(f) || math.IsInf(f, 0) {
+ e.EncodeNil()
+ return
+ }
+ fmt, prec := jsonFloatStrconvFmtPrec64(f)
+ e.encodeFloat(f, 64, fmt, prec)
+}
+
+func (e *jsonEncDriver) EncodeFloat32(f float32) {
+ if math.IsNaN(float64(f)) || math.IsInf(float64(f), 0) {
+ e.EncodeNil()
+ return
+ }
+ fmt, prec := jsonFloatStrconvFmtPrec32(f)
+ e.encodeFloat(float64(f), 32, fmt, prec)
+}
+
+func (e *jsonEncDriver) encodeUint(neg bool, quotes bool, u uint64) {
+ // copied mostly from std library: strconv
+ // this should only be called on 64bit OS.
+
+ const smallsString = "00010203040506070809" +
+ "10111213141516171819" +
+ "20212223242526272829" +
+ "30313233343536373839" +
+ "40414243444546474849" +
+ "50515253545556575859" +
+ "60616263646566676869" +
+ "70717273747576777879" +
+ "80818283848586878889" +
+ "90919293949596979899"
+
+ // typically, 19 or 20 bytes sufficient for decimal encoding a uint64
+ // var a [24]byte
+ var a = e.b[0:24]
+ var i = uint8(len(a))
+
+ if quotes {
+ i--
+ a[i] = '"'
+ }
+ // u guaranteed to fit into a uint (as we are not 32bit OS)
+ var is uint
+ var us = uint(u)
+ for us >= 100 {
+ is = us % 100 * 2
+ us /= 100
+ i -= 2
+ a[i+1] = smallsString[is+1]
+ a[i+0] = smallsString[is+0]
+ }
+
+ // us < 100
+ is = us * 2
+ i--
+ a[i] = smallsString[is+1]
+ if us >= 10 {
+ i--
+ a[i] = smallsString[is]
+ }
+ if neg {
+ i--
+ a[i] = '-'
+ }
+ if quotes {
+ i--
+ a[i] = '"'
+ }
+ e.e.encWr.writeb(a[i:])
+}
+
+func (e *jsonEncDriver) EncodeInt(v int64) {
+ quotes := e.is == 'A' || e.is == 'L' && (v > 1<<53 || v < -(1<<53)) ||
+ (e.ks && e.e.c == containerMapKey)
+
+ if cpu32Bit {
+ if quotes {
+ blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10))
+ e.b[0] = '"'
+ e.b[blen-1] = '"'
+ e.e.encWr.writeb(e.b[:blen])
+ } else {
+ e.e.encWr.writeb(strconv.AppendInt(e.b[:0], v, 10))
+ }
+ return
+ }
+
+ if v < 0 {
+ e.encodeUint(true, quotes, uint64(-v))
+ } else {
+ e.encodeUint(false, quotes, uint64(v))
+ }
+}
+
+func (e *jsonEncDriver) EncodeUint(v uint64) {
+ quotes := e.is == 'A' || e.is == 'L' && v > 1<<53 || (e.ks && e.e.c == containerMapKey)
+
+ if cpu32Bit {
+ // use strconv directly, as optimized encodeUint only works on 64-bit alone
+ if quotes {
+ blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10))
+ e.b[0] = '"'
+ e.b[blen-1] = '"'
+ e.e.encWr.writeb(e.b[:blen])
+ } else {
+ e.e.encWr.writeb(strconv.AppendUint(e.b[:0], v, 10))
+ }
+ return
+ }
+
+ e.encodeUint(false, quotes, v)
+}
+
+func (e *jsonEncDriver) EncodeString(v string) {
+ if e.h.StringToRaw {
+ e.EncodeStringBytesRaw(bytesView(v))
+ return
+ }
+ e.quoteStr(v)
+}
+
+func (e *jsonEncDriver) EncodeStringBytesRaw(v []byte) {
+ // if encoding raw bytes and RawBytesExt is configured, use it to encode
+ if v == nil {
+ e.EncodeNil()
+ return
+ }
+
+ if e.rawext {
+ iv := e.h.RawBytesExt.ConvertExt(v)
+ if iv == nil {
+ e.EncodeNil()
+ } else {
+ e.e.encode(iv)
+ }
+ return
+ }
+
+ slen := base64.StdEncoding.EncodedLen(len(v)) + 2
+
+ // bs := e.e.blist.check(*e.buf, n)[:slen]
+ // *e.buf = bs
+
+ bs := e.e.blist.peek(slen, false)[:slen]
+
+ bs[0] = '"'
+ base64.StdEncoding.Encode(bs[1:], v)
+ bs[len(bs)-1] = '"'
+ e.e.encWr.writeb(bs)
+}
+
+// indent is done as below:
+// - newline and indent are added before each mapKey or arrayElem
+// - newline and indent are added before each ending,
+// except there was no entry (so we can have {} or [])
+
+func (e *jsonEncDriver) WriteArrayStart(length int) {
+ if e.d {
+ e.dl++
+ }
+ e.e.encWr.writen1('[')
+}
+
+func (e *jsonEncDriver) WriteArrayEnd() {
+ if e.d {
+ e.dl--
+ e.writeIndent()
+ }
+ e.e.encWr.writen1(']')
+}
+
+func (e *jsonEncDriver) WriteMapStart(length int) {
+ if e.d {
+ e.dl++
+ }
+ e.e.encWr.writen1('{')
+}
+
+func (e *jsonEncDriver) WriteMapEnd() {
+ if e.d {
+ e.dl--
+ if e.e.c != containerMapStart {
+ e.writeIndent()
+ }
+ }
+ e.e.encWr.writen1('}')
+}
+
+func (e *jsonEncDriver) quoteStr(s string) {
+ // adapted from std pkg encoding/json
+ const hex = "0123456789abcdef"
+ w := e.e.w()
+ w.writen1('"')
+ var i, start uint
+ for i < uint(len(s)) {
+ // encode all bytes < 0x20 (except \r, \n).
+ // also encode < > & to prevent security holes when served to some browsers.
+
+ // We optimize for ascii, by assumining that most characters are in the BMP
+ // and natively consumed by json without much computation.
+
+ // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ // if (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b) {
+ if e.s.isset(s[i]) {
+ i++
+ continue
+ }
+ // b := s[i]
+ if s[i] < utf8.RuneSelf {
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ switch s[i] {
+ case '\\', '"':
+ w.writen2('\\', s[i])
+ case '\n':
+ w.writen2('\\', 'n')
+ case '\r':
+ w.writen2('\\', 'r')
+ case '\b':
+ w.writen2('\\', 'b')
+ case '\f':
+ w.writen2('\\', 'f')
+ case '\t':
+ w.writen2('\\', 't')
+ default:
+ w.writestr(`\u00`)
+ w.writen2(hex[s[i]>>4], hex[s[i]&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 { // meaning invalid encoding (so output as-is)
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ w.writestr(`\uFFFD`)
+ i++
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
+ // Both technically valid JSON, but bomb on JSONP, so fix here *unconditionally*.
+ if jsonEscapeMultiByteUnicodeSep && (c == '\u2028' || c == '\u2029') {
+ if start < i {
+ w.writestr(s[start:i])
+ }
+ w.writestr(`\u202`)
+ w.writen1(hex[c&0xF])
+ i += uint(size)
+ start = i
+ continue
+ }
+ i += uint(size)
+ }
+ if start < uint(len(s)) {
+ w.writestr(s[start:])
+ }
+ w.writen1('"')
+}
+
+func (e *jsonEncDriver) atEndOfEncode() {
+ if e.h.TermWhitespace {
+ var c byte = ' ' // default is that scalar is written, so output space
+ if e.e.c != 0 {
+ c = '\n' // for containers (map/list), output a newline
+ }
+ e.e.encWr.writen1(c)
+ }
+}
+
+// ----------
+
+type jsonDecState struct {
+ rawext bool // rawext configured on the handle
+
+ tok uint8 // used to store the token read right after skipWhiteSpace
+ _ bool // found null
+ _ byte // padding
+ bstr [4]byte // scratch used for string \UXXX parsing
+
+ // scratch buffer used for base64 decoding (DecodeBytes in reuseBuf mode),
+ // or reading doubleQuoted string (DecodeStringAsBytes, DecodeNaked)
+ buf *[]byte
+}
+
+func (x jsonDecState) captureState() interface{} { return x }
+func (x *jsonDecState) restoreState(v interface{}) { *x = v.(jsonDecState) }
+
+type jsonDecDriver struct {
+ noBuiltInTypes
+ decDriverNoopNumberHelper
+ h *JsonHandle
+
+ jsonDecState
+
+ // se interfaceExtWrapper
+
+ // ---- cpu cache line boundary?
+
+ d Decoder
+}
+
+func (d *jsonDecDriver) descBd() (s string) { panic("descBd unsupported") }
+
+func (d *jsonDecDriver) decoder() *Decoder {
+ return &d.d
+}
+
+func (d *jsonDecDriver) ReadMapStart() int {
+ d.advance()
+ if d.tok == 'n' {
+ d.readLit4Null(d.d.decRd.readn3())
+ return containerLenNil
+ }
+ if d.tok != '{' {
+ d.d.errorf("read map - expect char '%c' but got char '%c'", '{', d.tok)
+ }
+ d.tok = 0
+ return containerLenUnknown
+}
+
+func (d *jsonDecDriver) ReadArrayStart() int {
+ d.advance()
+ if d.tok == 'n' {
+ d.readLit4Null(d.d.decRd.readn3())
+ return containerLenNil
+ }
+ if d.tok != '[' {
+ d.d.errorf("read array - expect char '%c' but got char '%c'", '[', d.tok)
+ }
+ d.tok = 0
+ return containerLenUnknown
+}
+
+func (d *jsonDecDriver) CheckBreak() bool {
+ d.advance()
+ return d.tok == '}' || d.tok == ']'
+}
+
+func (d *jsonDecDriver) ReadArrayElem() {
+ const xc uint8 = ','
+ if d.d.c != containerArrayStart {
+ d.advance()
+ if d.tok != xc {
+ d.readDelimError(xc)
+ }
+ d.tok = 0
+ }
+}
+
+func (d *jsonDecDriver) ReadArrayEnd() {
+ const xc uint8 = ']'
+ d.advance()
+ if d.tok != xc {
+ d.readDelimError(xc)
+ }
+ d.tok = 0
+}
+
+func (d *jsonDecDriver) ReadMapElemKey() {
+ const xc uint8 = ','
+ if d.d.c != containerMapStart {
+ d.advance()
+ if d.tok != xc {
+ d.readDelimError(xc)
+ }
+ d.tok = 0
+ }
+}
+
+func (d *jsonDecDriver) ReadMapElemValue() {
+ const xc uint8 = ':'
+ d.advance()
+ if d.tok != xc {
+ d.readDelimError(xc)
+ }
+ d.tok = 0
+}
+
+func (d *jsonDecDriver) ReadMapEnd() {
+ const xc uint8 = '}'
+ d.advance()
+ if d.tok != xc {
+ d.readDelimError(xc)
+ }
+ d.tok = 0
+}
+
+func (d *jsonDecDriver) readDelimError(xc uint8) {
+ d.d.errorf("read json delimiter - expect char '%c' but got char '%c'", xc, d.tok)
+}
+
+// MARKER: readLit4XXX takes the readn(3|4) as a parameter so they can be inlined.
+// We pass the array directly to errorf, as passing slice pushes past inlining threshold,
+// and passing slice also might cause allocation of the bs array on the heap.
+
+func (d *jsonDecDriver) readLit4True(bs [4]byte) {
+ // bs := d.d.decRd.readn3()
+ d.tok = 0
+ if jsonValidateSymbols && bs != [...]byte{0, 'r', 'u', 'e'} { // !Equal jsonLiteral4True
+ // d.d.errorf("expecting %s: got %s", jsonLiteral4True, bs[:])
+ d.d.errorf("expecting true: got t%s", bs)
+ }
+}
+
+func (d *jsonDecDriver) readLit4False(bs [4]byte) {
+ // bs := d.d.decRd.readn4()
+ d.tok = 0
+ if jsonValidateSymbols && bs != [4]byte{'a', 'l', 's', 'e'} { // !Equal jsonLiteral4False
+ // d.d.errorf("expecting %s: got %s", jsonLiteral4False, bs)
+ d.d.errorf("expecting false: got f%s", bs)
+ }
+}
+
+func (d *jsonDecDriver) readLit4Null(bs [4]byte) {
+ // bs := d.d.decRd.readn3() // readx(3)
+ d.tok = 0
+ if jsonValidateSymbols && bs != [...]byte{0, 'u', 'l', 'l'} { // !Equal jsonLiteral4Null
+ // d.d.errorf("expecting %s: got %s", jsonLiteral4Null, bs[:])
+ d.d.errorf("expecting null: got n%s", bs)
+ }
+}
+
+func (d *jsonDecDriver) advance() {
+ if d.tok == 0 {
+ d.tok = d.d.decRd.skipWhitespace() // skip(&whitespaceCharBitset)
+ }
+}
+
+func (d *jsonDecDriver) nextValueBytes(v []byte) []byte {
+ v, cursor := d.nextValueBytesR(v)
+ decNextValueBytesHelper{d: &d.d}.bytesRdV(&v, cursor)
+ return v
+}
+
+func (d *jsonDecDriver) nextValueBytesR(v0 []byte) (v []byte, cursor uint) {
+ v = v0
+ var h = decNextValueBytesHelper{d: &d.d}
+ dr := &d.d.decRd
+
+ consumeString := func() {
+ TOP:
+ bs := dr.jsonReadAsisChars()
+ h.appendN(&v, bs...)
+ if bs[len(bs)-1] != '"' {
+ // last char is '\', so consume next one and try again
+ h.append1(&v, dr.readn1())
+ goto TOP
+ }
+ }
+
+ d.advance() // ignore leading whitespace
+ cursor = d.d.rb.c - 1 // cursor starts just before non-whitespace token
+
+ switch d.tok {
+ default:
+ h.appendN(&v, dr.jsonReadNum()...)
+ case 'n':
+ d.readLit4Null(d.d.decRd.readn3())
+ h.appendN(&v, jsonLiteralNull...)
+ case 'f':
+ d.readLit4False(d.d.decRd.readn4())
+ h.appendN(&v, jsonLiteralFalse...)
+ case 't':
+ d.readLit4True(d.d.decRd.readn3())
+ h.appendN(&v, jsonLiteralTrue...)
+ case '"':
+ h.append1(&v, '"')
+ consumeString()
+ case '{', '[':
+ var elem struct{}
+ var stack []struct{}
+
+ stack = append(stack, elem)
+
+ h.append1(&v, d.tok)
+
+ for len(stack) != 0 {
+ c := dr.readn1()
+ h.append1(&v, c)
+ switch c {
+ case '"':
+ consumeString()
+ case '{', '[':
+ stack = append(stack, elem)
+ case '}', ']':
+ stack = stack[:len(stack)-1]
+ }
+ }
+ }
+ d.tok = 0
+ return
+}
+
+func (d *jsonDecDriver) TryNil() bool {
+ d.advance()
+ // we shouldn't try to see if quoted "null" was here, right?
+ // only the plain string: `null` denotes a nil (ie not quotes)
+ if d.tok == 'n' {
+ d.readLit4Null(d.d.decRd.readn3())
+ return true
+ }
+ return false
+}
+
+func (d *jsonDecDriver) DecodeBool() (v bool) {
+ d.advance()
+ if d.tok == 'n' {
+ d.readLit4Null(d.d.decRd.readn3())
+ return
+ }
+ fquot := d.d.c == containerMapKey && d.tok == '"'
+ if fquot {
+ d.tok = d.d.decRd.readn1()
+ }
+ switch d.tok {
+ case 'f':
+ d.readLit4False(d.d.decRd.readn4())
+ // v = false
+ case 't':
+ d.readLit4True(d.d.decRd.readn3())
+ v = true
+ default:
+ d.d.errorf("decode bool: got first char %c", d.tok)
+ // v = false // "unreachable"
+ }
+ if fquot {
+ d.d.decRd.readn1()
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeTime() (t time.Time) {
+ // read string, and pass the string into json.unmarshal
+ d.advance()
+ if d.tok == 'n' {
+ d.readLit4Null(d.d.decRd.readn3())
+ return
+ }
+ d.ensureReadingString()
+ bs := d.readUnescapedString()
+ t, err := time.Parse(time.RFC3339, stringView(bs))
+ d.d.onerror(err)
+ return
+}
+
+func (d *jsonDecDriver) ContainerType() (vt valueType) {
+ // check container type by checking the first char
+ d.advance()
+
+ // optimize this, so we don't do 4 checks but do one computation.
+ // return jsonContainerSet[d.tok]
+
+ // ContainerType is mostly called for Map and Array,
+ // so this conditional is good enough (max 2 checks typically)
+ if d.tok == '{' {
+ return valueTypeMap
+ } else if d.tok == '[' {
+ return valueTypeArray
+ } else if d.tok == 'n' {
+ d.readLit4Null(d.d.decRd.readn3())
+ return valueTypeNil
+ } else if d.tok == '"' {
+ return valueTypeString
+ }
+ return valueTypeUnset
+}
+
+func (d *jsonDecDriver) decNumBytes() (bs []byte) {
+ d.advance()
+ dr := &d.d.decRd
+ if d.tok == '"' {
+ bs = dr.readUntil('"')
+ } else if d.tok == 'n' {
+ d.readLit4Null(d.d.decRd.readn3())
+ } else {
+ if jsonManualInlineDecRdInHotZones {
+ if dr.bytes {
+ bs = dr.rb.jsonReadNum()
+ } else if dr.bufio {
+ bs = dr.bi.jsonReadNum()
+ } else {
+ bs = dr.ri.jsonReadNum()
+ }
+ } else {
+ bs = dr.jsonReadNum()
+ }
+ }
+ d.tok = 0
+ return
+}
+
+func (d *jsonDecDriver) DecodeUint64() (u uint64) {
+ b := d.decNumBytes()
+ u, neg, ok := parseInteger_bytes(b)
+ if neg {
+ d.d.errorf("negative number cannot be decoded as uint64")
+ }
+ if !ok {
+ d.d.onerror(strconvParseErr(b, "ParseUint"))
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeInt64() (v int64) {
+ b := d.decNumBytes()
+ u, neg, ok := parseInteger_bytes(b)
+ if !ok {
+ d.d.onerror(strconvParseErr(b, "ParseInt"))
+ }
+ if chkOvf.Uint2Int(u, neg) {
+ d.d.errorf("overflow decoding number from %s", b)
+ }
+ if neg {
+ v = -int64(u)
+ } else {
+ v = int64(u)
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeFloat64() (f float64) {
+ var err error
+ bs := d.decNumBytes()
+ if len(bs) == 0 {
+ return
+ }
+ f, err = parseFloat64(bs)
+ d.d.onerror(err)
+ return
+}
+
+func (d *jsonDecDriver) DecodeFloat32() (f float32) {
+ var err error
+ bs := d.decNumBytes()
+ if len(bs) == 0 {
+ return
+ }
+ f, err = parseFloat32(bs)
+ d.d.onerror(err)
+ return
+}
+
+func (d *jsonDecDriver) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
+ d.advance()
+ if d.tok == 'n' {
+ d.readLit4Null(d.d.decRd.readn3())
+ return
+ }
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = xtag
+ d.d.decode(&re.Value)
+ } else if ext == SelfExt {
+ d.d.decodeValue(baseRV(rv), d.h.fnNoExt(basetype))
+ } else {
+ d.d.interfaceExtConvertAndDecode(rv, ext)
+ }
+}
+
+func (d *jsonDecDriver) decBytesFromArray(bs []byte) []byte {
+ if bs == nil {
+ bs = []byte{}
+ } else {
+ bs = bs[:0]
+ }
+ d.tok = 0
+ bs = append(bs, uint8(d.DecodeUint64()))
+ d.tok = d.d.decRd.skipWhitespace() // skip(&whitespaceCharBitset)
+ for d.tok != ']' {
+ if d.tok != ',' {
+ d.d.errorf("read array element - expect char '%c' but got char '%c'", ',', d.tok)
+ }
+ d.tok = 0
+ bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8)))
+ d.tok = d.d.decRd.skipWhitespace() // skip(&whitespaceCharBitset)
+ }
+ d.tok = 0
+ return bs
+}
+
+func (d *jsonDecDriver) DecodeBytes(bs []byte) (bsOut []byte) {
+ d.d.decByteState = decByteStateNone
+ d.advance()
+ if d.tok == 'n' {
+ d.readLit4Null(d.d.decRd.readn3())
+ return nil
+ }
+ // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode.
+ if d.rawext {
+ bsOut = bs
+ d.d.interfaceExtConvertAndDecode(&bsOut, d.h.RawBytesExt)
+ return
+ }
+ // check if an "array" of uint8's (see ContainerType for how to infer if an array)
+ if d.tok == '[' {
+ // bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
+ if bs == nil {
+ d.d.decByteState = decByteStateReuseBuf
+ bs = d.d.b[:]
+ }
+ return d.decBytesFromArray(bs)
+ }
+
+ // base64 encodes []byte{} as "", and we encode nil []byte as null.
+ // Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}.
+
+ d.ensureReadingString()
+ bs1 := d.readUnescapedString()
+ slen := base64.StdEncoding.DecodedLen(len(bs1))
+ if slen == 0 {
+ bsOut = []byte{}
+ } else if slen <= cap(bs) {
+ bsOut = bs[:slen]
+ } else if bs == nil {
+ d.d.decByteState = decByteStateReuseBuf
+ bsOut = d.d.blist.check(*d.buf, slen)[:slen]
+ *d.buf = bsOut
+ } else {
+ bsOut = make([]byte, slen)
+ }
+ slen2, err := base64.StdEncoding.Decode(bsOut, bs1)
+ if err != nil {
+ d.d.errorf("error decoding base64 binary '%s': %v", bs1, err)
+ }
+ if slen != slen2 {
+ bsOut = bsOut[:slen2]
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) {
+ d.d.decByteState = decByteStateNone
+ d.advance()
+
+ // common case
+ if d.tok == '"' {
+ return d.dblQuoteStringAsBytes()
+ }
+
+ // handle non-string scalar: null, true, false or a number
+ switch d.tok {
+ case 'n':
+ d.readLit4Null(d.d.decRd.readn3())
+ return nil // []byte{}
+ case 'f':
+ d.readLit4False(d.d.decRd.readn4())
+ return jsonLiteralFalse
+ case 't':
+ d.readLit4True(d.d.decRd.readn3())
+ return jsonLiteralTrue
+ }
+
+ // try to parse a valid number
+ d.tok = 0
+ return d.d.decRd.jsonReadNum()
+}
+
+func (d *jsonDecDriver) ensureReadingString() {
+ if d.tok != '"' {
+ d.d.errorf("expecting string starting with '\"'; got '%c'", d.tok)
+ }
+}
+
+func (d *jsonDecDriver) readUnescapedString() (bs []byte) {
+ // d.ensureReadingString()
+ bs = d.d.decRd.readUntil('"')
+ d.tok = 0
+ return
+}
+
+func (d *jsonDecDriver) dblQuoteStringAsBytes() (buf []byte) {
+ d.d.decByteState = decByteStateNone
+ // use a local buf variable, so we don't do pointer chasing within loop
+ buf = (*d.buf)[:0]
+ dr := &d.d.decRd
+ d.tok = 0
+
+ var bs []byte
+ var c byte
+ var firstTime bool = true
+
+ for {
+ if firstTime {
+ firstTime = false
+ if dr.bytes {
+ bs = dr.rb.jsonReadAsisChars()
+ if bs[len(bs)-1] == '"' {
+ d.d.decByteState = decByteStateZerocopy
+ return bs[:len(bs)-1]
+ }
+ goto APPEND
+ }
+ }
+
+ if jsonManualInlineDecRdInHotZones {
+ if dr.bytes {
+ bs = dr.rb.jsonReadAsisChars()
+ } else if dr.bufio {
+ bs = dr.bi.jsonReadAsisChars()
+ } else {
+ bs = dr.ri.jsonReadAsisChars()
+ }
+ } else {
+ bs = dr.jsonReadAsisChars()
+ }
+
+ APPEND:
+ buf = append(buf, bs[:len(bs)-1]...)
+ c = bs[len(bs)-1]
+
+ if c == '"' {
+ break
+ }
+
+ // c is now '\'
+ c = dr.readn1()
+
+ switch c {
+ case '"', '\\', '/', '\'':
+ buf = append(buf, c)
+ case 'b':
+ buf = append(buf, '\b')
+ case 'f':
+ buf = append(buf, '\f')
+ case 'n':
+ buf = append(buf, '\n')
+ case 'r':
+ buf = append(buf, '\r')
+ case 't':
+ buf = append(buf, '\t')
+ case 'u':
+ buf = append(buf, d.bstr[:utf8.EncodeRune(d.bstr[:], d.appendStringAsBytesSlashU())]...)
+ default:
+ *d.buf = buf
+ d.d.errorf("unsupported escaped value: %c", c)
+ }
+ }
+ *d.buf = buf
+ d.d.decByteState = decByteStateReuseBuf
+ return
+}
+
+func (d *jsonDecDriver) appendStringAsBytesSlashU() (r rune) {
+ var rr uint32
+ var csu [2]byte
+ var cs [4]byte = d.d.decRd.readn4()
+ if rr = jsonSlashURune(cs); rr == unicode.ReplacementChar {
+ return unicode.ReplacementChar
+ }
+ r = rune(rr)
+ if utf16.IsSurrogate(r) {
+ csu = d.d.decRd.readn2()
+ cs = d.d.decRd.readn4()
+ if csu[0] == '\\' && csu[1] == 'u' {
+ if rr = jsonSlashURune(cs); rr == unicode.ReplacementChar {
+ return unicode.ReplacementChar
+ }
+ return utf16.DecodeRune(r, rune(rr))
+ }
+ return unicode.ReplacementChar
+ }
+ return
+}
+
+func jsonSlashURune(cs [4]byte) (rr uint32) {
+ for _, c := range cs {
+ // best to use explicit if-else
+ // - not a table, etc which involve memory loads, array lookup with bounds checks, etc
+ if c >= '0' && c <= '9' {
+ rr = rr*16 + uint32(c-jsonU4Chk2)
+ } else if c >= 'a' && c <= 'f' {
+ rr = rr*16 + uint32(c-jsonU4Chk1)
+ } else if c >= 'A' && c <= 'F' {
+ rr = rr*16 + uint32(c-jsonU4Chk0)
+ } else {
+ return unicode.ReplacementChar
+ }
+ }
+ return
+}
+
+func (d *jsonDecDriver) nakedNum(z *fauxUnion, bs []byte) (err error) {
+ // Note: nakedNum is NEVER called with a zero-length []byte
+ if d.h.PreferFloat {
+ z.v = valueTypeFloat
+ z.f, err = parseFloat64(bs)
+ } else {
+ err = parseNumber(bs, z, d.h.SignedInteger)
+ }
+ return
+}
+
+func (d *jsonDecDriver) DecodeNaked() {
+ z := d.d.naked()
+
+ d.advance()
+ var bs []byte
+ switch d.tok {
+ case 'n':
+ d.readLit4Null(d.d.decRd.readn3())
+ z.v = valueTypeNil
+ case 'f':
+ d.readLit4False(d.d.decRd.readn4())
+ z.v = valueTypeBool
+ z.b = false
+ case 't':
+ d.readLit4True(d.d.decRd.readn3())
+ z.v = valueTypeBool
+ z.b = true
+ case '{':
+ z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart
+ case '[':
+ z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart
+ case '"':
+ // if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first
+ bs = d.dblQuoteStringAsBytes()
+ if len(bs) > 0 && d.d.c == containerMapKey && d.h.MapKeyAsString {
+ if bytes.Equal(bs, jsonLiteralNull) {
+ z.v = valueTypeNil
+ } else if bytes.Equal(bs, jsonLiteralTrue) {
+ z.v = valueTypeBool
+ z.b = true
+ } else if bytes.Equal(bs, jsonLiteralFalse) {
+ z.v = valueTypeBool
+ z.b = false
+ } else {
+ // check if a number: float, int or uint
+ if err := d.nakedNum(z, bs); err != nil {
+ z.v = valueTypeString
+ z.s = d.d.stringZC(bs)
+ }
+ }
+ } else {
+ z.v = valueTypeString
+ z.s = d.d.stringZC(bs)
+ }
+ default: // number
+ bs = d.d.decRd.jsonReadNum()
+ d.tok = 0
+ if len(bs) == 0 {
+ d.d.errorf("decode number from empty string")
+ }
+ if err := d.nakedNum(z, bs); err != nil {
+ d.d.errorf("decode number from %s: %v", bs, err)
+ }
+ }
+}
+
+//----------------------
+
+// JsonHandle is a handle for JSON encoding format.
+//
+// Json is comprehensively supported:
+// - decodes numbers into interface{} as int, uint or float64
+// based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc.
+// - decode integers from float formatted numbers e.g. 1.27e+8
+// - decode any json value (numbers, bool, etc) from quoted strings
+// - configurable way to encode/decode []byte .
+// by default, encodes and decodes []byte using base64 Std Encoding
+// - UTF-8 support for encoding and decoding
+//
+// It has better performance than the json library in the standard library,
+// by leveraging the performance improvements of the codec library.
+//
+// In addition, it doesn't read more bytes than necessary during a decode, which allows
+// reading multiple values from a stream containing json and non-json content.
+// For example, a user can read a json value, then a cbor value, then a msgpack value,
+// all from the same stream in sequence.
+//
+// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are
+// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD.
+//
+// Note also that the float values for NaN, +Inf or -Inf are encoded as null,
+// as suggested by NOTE 4 of the ECMA-262 ECMAScript Language Specification 5.1 edition.
+// see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf .
+type JsonHandle struct {
+ textEncodingType
+ BasicHandle
+
+ // Indent indicates how a value is encoded.
+ // - If positive, indent by that number of spaces.
+ // - If negative, indent by that number of tabs.
+ Indent int8
+
+ // IntegerAsString controls how integers (signed and unsigned) are encoded.
+ //
+ // Per the JSON Spec, JSON numbers are 64-bit floating point numbers.
+ // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision.
+ // This can be mitigated by configuring how to encode integers.
+ //
+ // IntegerAsString interpretes the following values:
+ // - if 'L', then encode integers > 2^53 as a json string.
+ // - if 'A', then encode all integers as a json string
+ // containing the exact integer representation as a decimal.
+ // - else encode all integers as a json number (default)
+ IntegerAsString byte
+
+ // HTMLCharsAsIs controls how to encode some special characters to html: < > &
+ //
+ // By default, we encode them as \uXXX
+ // to prevent security holes when served from some browsers.
+ HTMLCharsAsIs bool
+
+ // PreferFloat says that we will default to decoding a number as a float.
+ // If not set, we will examine the characters of the number and decode as an
+ // integer type if it doesn't have any of the characters [.eE].
+ PreferFloat bool
+
+ // TermWhitespace says that we add a whitespace character
+ // at the end of an encoding.
+ //
+ // The whitespace is important, especially if using numbers in a context
+ // where multiple items are written to a stream.
+ TermWhitespace bool
+
+ // MapKeyAsString says to encode all map keys as strings.
+ //
+ // Use this to enforce strict json output.
+ // The only caveat is that nil value is ALWAYS written as null (never as "null")
+ MapKeyAsString bool
+
+ // _ uint64 // padding (cache line)
+
+ // Note: below, we store hardly-used items e.g. RawBytesExt.
+ // These values below may straddle a cache line, but they are hardly-used,
+ // so shouldn't contribute to false-sharing except in rare cases.
+
+ // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way.
+ // If not configured, raw bytes are encoded to/from base64 text.
+ RawBytesExt InterfaceExt
+}
+
+func (h *JsonHandle) isJson() bool { return true }
+
+// Name returns the name of the handle: json
+func (h *JsonHandle) Name() string { return "json" }
+
+func (h *JsonHandle) desc(bd byte) string { return string(bd) }
+
+func (h *JsonHandle) typical() bool {
+ return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L'
+}
+
+func (h *JsonHandle) newEncDriver() encDriver {
+ var e = &jsonEncDriver{h: h}
+ // var x []byte
+ // e.buf = &x
+ e.e.e = e
+ e.e.js = true
+ e.e.init(h)
+ e.reset()
+ return e
+}
+
+func (h *JsonHandle) newDecDriver() decDriver {
+ var d = &jsonDecDriver{h: h}
+ var x []byte
+ d.buf = &x
+ d.d.d = d
+ d.d.js = true
+ d.d.jsms = h.MapKeyAsString
+ d.d.init(h)
+ d.reset()
+ return d
+}
+
+func (e *jsonEncDriver) resetState() {
+ e.dl = 0
+}
+
+func (e *jsonEncDriver) reset() {
+ e.resetState()
+ // (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b)
+ // cache values from the handle
+ e.typical = e.h.typical()
+ if e.h.HTMLCharsAsIs {
+ e.s = &jsonCharSafeSet
+ } else {
+ e.s = &jsonCharHtmlSafeSet
+ }
+ e.rawext = e.h.RawBytesExt != nil
+ e.di = int8(e.h.Indent)
+ e.d = e.h.Indent != 0
+ e.ks = e.h.MapKeyAsString
+ e.is = e.h.IntegerAsString
+}
+
+func (d *jsonDecDriver) resetState() {
+ *d.buf = d.d.blist.check(*d.buf, 256)
+ d.tok = 0
+}
+
+func (d *jsonDecDriver) reset() {
+ d.resetState()
+ d.rawext = d.h.RawBytesExt != nil
+}
+
+func jsonFloatStrconvFmtPrec64(f float64) (fmt byte, prec int8) {
+ fmt = 'f'
+ prec = -1
+ fbits := math.Float64bits(f)
+ abs := math.Float64frombits(fbits &^ (1 << 63))
+ if abs == 0 || abs == 1 {
+ prec = 1
+ } else if abs < 1e-6 || abs >= 1e21 {
+ fmt = 'e'
+ } else if noFrac64(fbits) {
+ prec = 1
+ }
+ return
+}
+
+func jsonFloatStrconvFmtPrec32(f float32) (fmt byte, prec int8) {
+ fmt = 'f'
+ prec = -1
+ // directly handle Modf (to get fractions) and Abs (to get absolute)
+ fbits := math.Float32bits(f)
+ abs := math.Float32frombits(fbits &^ (1 << 31))
+ if abs == 0 || abs == 1 {
+ prec = 1
+ } else if abs < 1e-6 || abs >= 1e21 {
+ fmt = 'e'
+ } else if noFrac32(fbits) {
+ prec = 1
+ }
+ return
+}
+
+var _ decDriverContainerTracker = (*jsonDecDriver)(nil)
+var _ encDriverContainerTracker = (*jsonEncDriver)(nil)
+var _ decDriver = (*jsonDecDriver)(nil)
+
+var _ encDriver = (*jsonEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl b/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl
new file mode 100644
index 000000000..53198064d
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl
@@ -0,0 +1,235 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+//go:build !codec.notmammoth
+// +build codec.notmammoth
+
+// Code generated from mammoth-test.go.tmpl - DO NOT EDIT.
+
+package codec
+
+import "testing"
+import "fmt"
+import "reflect"
+
+// TestMammoth has all the different paths optimized in fast-path
+// It has all the primitives, slices and maps.
+//
+// For each of those types, it has a pointer and a non-pointer field.
+
+func init() { _ = fmt.Printf } // so we can include fmt as needed
+
+type TestMammoth struct {
+
+{{range .Values }}{{if .Primitive -}}
+{{ .MethodNamePfx "F" true }} {{ .Primitive }}
+{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
+{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}}
+{{ .MethodNamePfx "F" false }} []{{ .Elem }}
+{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
+{{ .MethodNamePfx "Farr4" false }} [4]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if .MapKey -}}
+{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
+{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+}
+
+{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}}
+type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }}
+func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { }
+{{end}}{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if .MapKey -}}
+type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+func __doTestMammothSlices(t *testing.T, h Handle) {
+{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey -}}
+ var v{{$i}}va [8]{{ .Elem }}
+ for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } {
+ {{/*
+ // fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v)
+ // - encode value to some []byte
+ // - decode into a length-wise-equal []byte
+ // - check if equal to initial slice
+ // - encode ptr to the value
+ // - check if encode bytes are same
+ // - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice
+ // - decode into non-addressable slice of equal length, then larger len
+ // - for each decode, compare elem-by-elem to the original slice
+ // -
+ // - rinse and repeat for a MapBySlice version
+ // -
+ */ -}}
+ var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }}
+ var bs{{$i}} []byte
+ v{{$i}}v1 = v
+ bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}")
+ if v == nil {
+ v{{$i}}v2 = make([]{{ .Elem }}, 2)
+ testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
+ testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}") // should not change
+ testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change
+ v{{$i}}v2 = make([]{{ .Elem }}, 2)
+ testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
+ testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}-noaddr") // should not change
+ testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change
+ } else {
+ v{{$i}}v2 = make([]{{ .Elem }}, len(v))
+ testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}")
+ v{{$i}}v2 = make([]{{ .Elem }}, len(v))
+ testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-noaddr")
+ }
+ testReleaseBytes(bs{{$i}})
+ // ...
+ bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p")
+ v{{$i}}v2 = nil
+ testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p")
+ v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+ testUnmarshalErr(&v{{$i}}va, bs{{$i}}, h, t, "dec-array-v{{$i}}-p-1")
+ if v{{$i}}v1 == nil && v{{$i}}v2 == nil { v{{$i}}v2 = []{{ .Elem }}{} } // so we can compare to zero len slice below
+ testDeepEqualErr(v{{$i}}va[:len(v{{$i}}v2)], v{{$i}}v2, t, "equal-array-v{{$i}}-p-1")
+ v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+ v{{$i}}v2 = v{{$i}}va[:1:1]
+ testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-1")
+ v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+ v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)]
+ testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-len")
+ v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+ v{{$i}}v2 = v{{$i}}va[:]
+ testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-slice-v{{$i}}-p-cap")
+ if len(v{{$i}}v1) > 1 {
+ v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+ testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-len-noaddr")
+ v{{$i}}va = [8]{{ .Elem }}{} // clear the array
+ testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], t, "equal-slice-v{{$i}}-p-cap-noaddr")
+ }
+ testReleaseBytes(bs{{$i}})
+ // ...
+ var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }}
+ v{{$i}}v2 = nil
+ if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
+ v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1)
+ v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
+ if v != nil {
+ bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom")
+ testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom")
+ testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom")
+ testReleaseBytes(bs{{$i}})
+ }
+ bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p")
+ v{{$i}}v2 = nil
+ v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
+ testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p")
+ testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-slice-v{{$i}}-custom-p")
+ testReleaseBytes(bs{{$i}})
+ }
+{{end}}{{end}}{{end}}
+}
+
+func __doTestMammothMaps(t *testing.T, h Handle) {
+{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey -}}
+ for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } {
+ // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v)
+ var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }}
+ var bs{{$i}} []byte
+ v{{$i}}v1 = v
+ bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}")
+ if v != nil {
+ if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
+ testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}")
+ if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
+ testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-noaddr")
+ }
+ if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
+ testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-len")
+ testReleaseBytes(bs{{$i}})
+ bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p")
+ v{{$i}}v2 = nil
+ testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil")
+ testDeepEqualErr(v{{$i}}v1, v{{$i}}v2, t, "equal-map-v{{$i}}-p-nil")
+ testReleaseBytes(bs{{$i}})
+ // ...
+ if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
+ var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }}
+ v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1)
+ v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2)
+ if v != nil {
+ bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom")
+ testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
+ testDeepEqualErr(v{{$i}}v3, v{{$i}}v4, t, "equal-map-v{{$i}}-p-len")
+ testReleaseBytes(bs{{$i}})
+ }
+ }
+{{end}}{{end}}{{end}}
+
+}
+
+func doTestMammothMapsAndSlices(t *testing.T, h Handle) {
+ defer testSetup(t, &h)()
+ if mh, ok := h.(*MsgpackHandle); ok {
+ defer func(b bool) { mh.RawToString = b }(mh.RawToString)
+ mh.RawToString = true
+ }
+ __doTestMammothSlices(t, h)
+ __doTestMammothMaps(t, h)
+}
+
+func doTestMammoth(t *testing.T, h Handle) {
+ defer testSetup(t, &h)()
+ if mh, ok := h.(*MsgpackHandle); ok {
+ defer func(b bool) { mh.RawToString = b }(mh.RawToString)
+ mh.RawToString = true
+ }
+
+ name := h.Name()
+ var b []byte
+
+ var m, m2 TestMammoth
+ testRandomFillRV(reflect.ValueOf(&m).Elem())
+ b = testMarshalErr(&m, h, t, "mammoth-"+name)
+
+ testUnmarshalErr(&m2, b, h, t, "mammoth-"+name)
+ testDeepEqualErr(&m, &m2, t, "mammoth-"+name)
+ testReleaseBytes(b)
+
+ if testing.Short() {
+ t.Skipf("skipping rest of mammoth test in -short mode")
+ }
+
+ var mm, mm2 TestMammoth2Wrapper
+ testRandomFillRV(reflect.ValueOf(&mm).Elem())
+ b = testMarshalErr(&mm, h, t, "mammoth2-"+name)
+ // os.Stderr.Write([]byte("\n\n\n\n" + string(b) + "\n\n\n\n"))
+ testUnmarshalErr(&mm2, b, h, t, "mammoth2-"+name)
+ testDeepEqualErr(&mm, &mm2, t, "mammoth2-"+name)
+ // testMammoth2(t, name, h)
+ testReleaseBytes(b)
+}
+
+{{range $i, $e := .Formats -}}
+func Test{{ . }}Mammoth(t *testing.T) {
+ doTestMammoth(t, test{{ . }}H)
+}
+{{end}}
+{{range $i, $e := .Formats -}}
+func Test{{ . }}MammothMapsAndSlices(t *testing.T) {
+ doTestMammothMapsAndSlices(t, test{{ . }}H)
+}
+{{end}}
diff --git a/vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl b/vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl
new file mode 100644
index 000000000..9fe56ec7f
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl
@@ -0,0 +1,101 @@
+// +build !codec.notmammoth
+
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from mammoth2-test.go.tmpl - DO NOT EDIT.
+
+package codec
+
+// Increase codecoverage by covering all the codecgen paths, in fast-path and gen-helper.go....
+//
+// Note: even though this is built based on fast-path and gen-helper, we will run these tests
+// in all modes, including notfastpath, etc.
+//
+// Add test file for creating a mammoth generated file as _mammoth_generated.go
+// - generate a second mammoth files in a different file: mammoth2_generated_test.go
+// mammoth-test.go.tmpl will do this
+// - run codecgen on it, into mammoth2_codecgen_generated_test.go (no build tags)
+// - as part of TestMammoth, run it also
+// - this will cover all the codecgen, gen-helper, etc in one full run
+// - check in mammoth* files into github also
+//
+// Now, add some types:
+// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it
+// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types
+// - this wrapper object is what we work encode/decode (so that the codecgen methods are called)
+
+
+// import "encoding/binary"
+
+import "fmt"
+
+type TestMammoth2 struct {
+
+{{range .Values }}{{if .Primitive }}{{/*
+*/}}{{ .MethodNamePfx "F" true }} {{ .Primitive }}
+{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
+{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if not .MapKey }}{{/*
+*/}}{{ .MethodNamePfx "F" false }} []{{ .Elem }}
+{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+{{range .Values }}{{if not .Primitive }}{{if .MapKey }}{{/*
+*/}}{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
+{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
+{{end}}{{end}}{{end}}
+
+}
+
+// -----------
+
+type testMammoth2Binary uint64
+func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) {
+data = make([]byte, 8)
+bigenstd.PutUint64(data, uint64(x))
+return
+}
+func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) {
+*x = testMammoth2Binary(bigenstd.Uint64(data))
+return
+}
+
+type testMammoth2Text uint64
+func (x testMammoth2Text) MarshalText() (data []byte, err error) {
+data = []byte(fmt.Sprintf("%b", uint64(x)))
+return
+}
+func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) {
+_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x))
+return
+}
+
+type testMammoth2Json uint64
+func (x testMammoth2Json) MarshalJSON() (data []byte, err error) {
+data = []byte(fmt.Sprintf("%v", uint64(x)))
+return
+}
+func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) {
+_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x))
+return
+}
+
+type testMammoth2Basic [4]uint64
+
+type TestMammoth2Wrapper struct {
+ V TestMammoth2
+ T testMammoth2Text
+ B testMammoth2Binary
+ J testMammoth2Json
+ C testMammoth2Basic
+ M map[testMammoth2Basic]TestMammoth2
+ L []TestMammoth2
+ A [4]int64
+
+ Tcomplex128 complex128
+ Tcomplex64 complex64
+ Tbytes []uint8
+ Tpbytes *[]uint8
+}
diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go
new file mode 100644
index 000000000..df78885a9
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/msgpack.go
@@ -0,0 +1,1229 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+/*
+Msgpack-c implementation powers the c, c++, python, ruby, etc libraries.
+We need to maintain compatibility with it and how it encodes integer values
+without caring about the type.
+
+For compatibility with behaviour of msgpack-c reference implementation:
+ - Go intX (>0) and uintX
+ IS ENCODED AS
+ msgpack +ve fixnum, unsigned
+ - Go intX (<0)
+ IS ENCODED AS
+ msgpack -ve fixnum, signed
+*/
+
+package codec
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "net/rpc"
+ "reflect"
+ "time"
+)
+
+const (
+ mpPosFixNumMin byte = 0x00
+ mpPosFixNumMax byte = 0x7f
+ mpFixMapMin byte = 0x80
+ mpFixMapMax byte = 0x8f
+ mpFixArrayMin byte = 0x90
+ mpFixArrayMax byte = 0x9f
+ mpFixStrMin byte = 0xa0
+ mpFixStrMax byte = 0xbf
+ mpNil byte = 0xc0
+ _ byte = 0xc1
+ mpFalse byte = 0xc2
+ mpTrue byte = 0xc3
+ mpFloat byte = 0xca
+ mpDouble byte = 0xcb
+ mpUint8 byte = 0xcc
+ mpUint16 byte = 0xcd
+ mpUint32 byte = 0xce
+ mpUint64 byte = 0xcf
+ mpInt8 byte = 0xd0
+ mpInt16 byte = 0xd1
+ mpInt32 byte = 0xd2
+ mpInt64 byte = 0xd3
+
+ // extensions below
+ mpBin8 byte = 0xc4
+ mpBin16 byte = 0xc5
+ mpBin32 byte = 0xc6
+ mpExt8 byte = 0xc7
+ mpExt16 byte = 0xc8
+ mpExt32 byte = 0xc9
+ mpFixExt1 byte = 0xd4
+ mpFixExt2 byte = 0xd5
+ mpFixExt4 byte = 0xd6
+ mpFixExt8 byte = 0xd7
+ mpFixExt16 byte = 0xd8
+
+ mpStr8 byte = 0xd9 // new
+ mpStr16 byte = 0xda
+ mpStr32 byte = 0xdb
+
+ mpArray16 byte = 0xdc
+ mpArray32 byte = 0xdd
+
+ mpMap16 byte = 0xde
+ mpMap32 byte = 0xdf
+
+ mpNegFixNumMin byte = 0xe0
+ mpNegFixNumMax byte = 0xff
+)
+
+var mpTimeExtTag int8 = -1
+var mpTimeExtTagU = uint8(mpTimeExtTag)
+
+var mpdescNames = map[byte]string{
+ mpNil: "nil",
+ mpFalse: "false",
+ mpTrue: "true",
+ mpFloat: "float",
+ mpDouble: "float",
+ mpUint8: "uuint",
+ mpUint16: "uint",
+ mpUint32: "uint",
+ mpUint64: "uint",
+ mpInt8: "int",
+ mpInt16: "int",
+ mpInt32: "int",
+ mpInt64: "int",
+
+ mpStr8: "string|bytes",
+ mpStr16: "string|bytes",
+ mpStr32: "string|bytes",
+
+ mpBin8: "bytes",
+ mpBin16: "bytes",
+ mpBin32: "bytes",
+
+ mpArray16: "array",
+ mpArray32: "array",
+
+ mpMap16: "map",
+ mpMap32: "map",
+}
+
+func mpdesc(bd byte) (s string) {
+ s = mpdescNames[bd]
+ if s == "" {
+ switch {
+ case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax,
+ bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
+ s = "int"
+ case bd >= mpFixStrMin && bd <= mpFixStrMax:
+ s = "string|bytes"
+ case bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+ s = "array"
+ case bd >= mpFixMapMin && bd <= mpFixMapMax:
+ s = "map"
+ case bd >= mpFixExt1 && bd <= mpFixExt16,
+ bd >= mpExt8 && bd <= mpExt32:
+ s = "ext"
+ default:
+ s = "unknown"
+ }
+ }
+ return
+}
+
+// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
+// that the backend RPC service takes multiple arguments, which have been arranged
+// in sequence in the slice.
+//
+// The Codec then passes it AS-IS to the rpc service (without wrapping it in an
+// array of 1 element).
+type MsgpackSpecRpcMultiArgs []interface{}
+
+// A MsgpackContainer type specifies the different types of msgpackContainers.
+type msgpackContainerType struct {
+ fixCutoff, bFixMin, b8, b16, b32 byte
+ // hasFixMin, has8, has8Always bool
+}
+
+var (
+ msgpackContainerRawLegacy = msgpackContainerType{
+ 32, mpFixStrMin, 0, mpStr16, mpStr32,
+ }
+ msgpackContainerStr = msgpackContainerType{
+ 32, mpFixStrMin, mpStr8, mpStr16, mpStr32, // true, true, false,
+ }
+ msgpackContainerBin = msgpackContainerType{
+ 0, 0, mpBin8, mpBin16, mpBin32, // false, true, true,
+ }
+ msgpackContainerList = msgpackContainerType{
+ 16, mpFixArrayMin, 0, mpArray16, mpArray32, // true, false, false,
+ }
+ msgpackContainerMap = msgpackContainerType{
+ 16, mpFixMapMin, 0, mpMap16, mpMap32, // true, false, false,
+ }
+)
+
+//---------------------------------------------
+
+type msgpackEncDriver struct {
+ noBuiltInTypes
+ encDriverNoopContainerWriter
+ encDriverNoState
+ h *MsgpackHandle
+ // x [8]byte
+ e Encoder
+}
+
+func (e *msgpackEncDriver) encoder() *Encoder {
+ return &e.e
+}
+
+func (e *msgpackEncDriver) EncodeNil() {
+ e.e.encWr.writen1(mpNil)
+}
+
+func (e *msgpackEncDriver) EncodeInt(i int64) {
+ if e.h.PositiveIntUnsigned && i >= 0 {
+ e.EncodeUint(uint64(i))
+ } else if i > math.MaxInt8 {
+ if i <= math.MaxInt16 {
+ e.e.encWr.writen1(mpInt16)
+ bigen.writeUint16(e.e.w(), uint16(i))
+ } else if i <= math.MaxInt32 {
+ e.e.encWr.writen1(mpInt32)
+ bigen.writeUint32(e.e.w(), uint32(i))
+ } else {
+ e.e.encWr.writen1(mpInt64)
+ bigen.writeUint64(e.e.w(), uint64(i))
+ }
+ } else if i >= -32 {
+ if e.h.NoFixedNum {
+ e.e.encWr.writen2(mpInt8, byte(i))
+ } else {
+ e.e.encWr.writen1(byte(i))
+ }
+ } else if i >= math.MinInt8 {
+ e.e.encWr.writen2(mpInt8, byte(i))
+ } else if i >= math.MinInt16 {
+ e.e.encWr.writen1(mpInt16)
+ bigen.writeUint16(e.e.w(), uint16(i))
+ } else if i >= math.MinInt32 {
+ e.e.encWr.writen1(mpInt32)
+ bigen.writeUint32(e.e.w(), uint32(i))
+ } else {
+ e.e.encWr.writen1(mpInt64)
+ bigen.writeUint64(e.e.w(), uint64(i))
+ }
+}
+
+func (e *msgpackEncDriver) EncodeUint(i uint64) {
+ if i <= math.MaxInt8 {
+ if e.h.NoFixedNum {
+ e.e.encWr.writen2(mpUint8, byte(i))
+ } else {
+ e.e.encWr.writen1(byte(i))
+ }
+ } else if i <= math.MaxUint8 {
+ e.e.encWr.writen2(mpUint8, byte(i))
+ } else if i <= math.MaxUint16 {
+ e.e.encWr.writen1(mpUint16)
+ bigen.writeUint16(e.e.w(), uint16(i))
+ } else if i <= math.MaxUint32 {
+ e.e.encWr.writen1(mpUint32)
+ bigen.writeUint32(e.e.w(), uint32(i))
+ } else {
+ e.e.encWr.writen1(mpUint64)
+ bigen.writeUint64(e.e.w(), uint64(i))
+ }
+}
+
+func (e *msgpackEncDriver) EncodeBool(b bool) {
+ if b {
+ e.e.encWr.writen1(mpTrue)
+ } else {
+ e.e.encWr.writen1(mpFalse)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeFloat32(f float32) {
+ e.e.encWr.writen1(mpFloat)
+ bigen.writeUint32(e.e.w(), math.Float32bits(f))
+}
+
+func (e *msgpackEncDriver) EncodeFloat64(f float64) {
+ e.e.encWr.writen1(mpDouble)
+ bigen.writeUint64(e.e.w(), math.Float64bits(f))
+}
+
+func (e *msgpackEncDriver) EncodeTime(t time.Time) {
+ if t.IsZero() {
+ e.EncodeNil()
+ return
+ }
+ t = t.UTC()
+ sec, nsec := t.Unix(), uint64(t.Nanosecond())
+ var data64 uint64
+ var l = 4
+ if sec >= 0 && sec>>34 == 0 {
+ data64 = (nsec << 34) | uint64(sec)
+ if data64&0xffffffff00000000 != 0 {
+ l = 8
+ }
+ } else {
+ l = 12
+ }
+ if e.h.WriteExt {
+ e.encodeExtPreamble(mpTimeExtTagU, l)
+ } else {
+ e.writeContainerLen(msgpackContainerRawLegacy, l)
+ }
+ switch l {
+ case 4:
+ bigen.writeUint32(e.e.w(), uint32(data64))
+ case 8:
+ bigen.writeUint64(e.e.w(), data64)
+ case 12:
+ bigen.writeUint32(e.e.w(), uint32(nsec))
+ bigen.writeUint64(e.e.w(), uint64(sec))
+ }
+}
+
+func (e *msgpackEncDriver) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
+ var bs0, bs []byte
+ if ext == SelfExt {
+ bs0 = e.e.blist.get(1024)
+ bs = bs0
+ e.e.sideEncode(v, basetype, &bs)
+ } else {
+ bs = ext.WriteExt(v)
+ }
+ if bs == nil {
+ e.EncodeNil()
+ goto END
+ }
+ if e.h.WriteExt {
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.e.encWr.writeb(bs)
+ } else {
+ e.EncodeStringBytesRaw(bs)
+ }
+END:
+ if ext == SelfExt {
+ e.e.blist.put(bs)
+ if !byteSliceSameData(bs0, bs) {
+ e.e.blist.put(bs0)
+ }
+ }
+}
+
+func (e *msgpackEncDriver) EncodeRawExt(re *RawExt) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.e.encWr.writeb(re.Data)
+}
+
+func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) {
+ if l == 1 {
+ e.e.encWr.writen2(mpFixExt1, xtag)
+ } else if l == 2 {
+ e.e.encWr.writen2(mpFixExt2, xtag)
+ } else if l == 4 {
+ e.e.encWr.writen2(mpFixExt4, xtag)
+ } else if l == 8 {
+ e.e.encWr.writen2(mpFixExt8, xtag)
+ } else if l == 16 {
+ e.e.encWr.writen2(mpFixExt16, xtag)
+ } else if l < 256 {
+ e.e.encWr.writen2(mpExt8, byte(l))
+ e.e.encWr.writen1(xtag)
+ } else if l < 65536 {
+ e.e.encWr.writen1(mpExt16)
+ bigen.writeUint16(e.e.w(), uint16(l))
+ e.e.encWr.writen1(xtag)
+ } else {
+ e.e.encWr.writen1(mpExt32)
+ bigen.writeUint32(e.e.w(), uint32(l))
+ e.e.encWr.writen1(xtag)
+ }
+}
+
+func (e *msgpackEncDriver) WriteArrayStart(length int) {
+ e.writeContainerLen(msgpackContainerList, length)
+}
+
+func (e *msgpackEncDriver) WriteMapStart(length int) {
+ e.writeContainerLen(msgpackContainerMap, length)
+}
+
+func (e *msgpackEncDriver) EncodeString(s string) {
+ var ct msgpackContainerType
+ if e.h.WriteExt {
+ if e.h.StringToRaw {
+ ct = msgpackContainerBin
+ } else {
+ ct = msgpackContainerStr
+ }
+ } else {
+ ct = msgpackContainerRawLegacy
+ }
+ e.writeContainerLen(ct, len(s))
+ if len(s) > 0 {
+ e.e.encWr.writestr(s)
+ }
+}
+
+func (e *msgpackEncDriver) EncodeStringBytesRaw(bs []byte) {
+ if bs == nil {
+ e.EncodeNil()
+ return
+ }
+ if e.h.WriteExt {
+ e.writeContainerLen(msgpackContainerBin, len(bs))
+ } else {
+ e.writeContainerLen(msgpackContainerRawLegacy, len(bs))
+ }
+ if len(bs) > 0 {
+ e.e.encWr.writeb(bs)
+ }
+}
+
+func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) {
+ if ct.fixCutoff > 0 && l < int(ct.fixCutoff) {
+ e.e.encWr.writen1(ct.bFixMin | byte(l))
+ } else if ct.b8 > 0 && l < 256 {
+ e.e.encWr.writen2(ct.b8, uint8(l))
+ } else if l < 65536 {
+ e.e.encWr.writen1(ct.b16)
+ bigen.writeUint16(e.e.w(), uint16(l))
+ } else {
+ e.e.encWr.writen1(ct.b32)
+ bigen.writeUint32(e.e.w(), uint32(l))
+ }
+}
+
+//---------------------------------------------
+
+type msgpackDecDriver struct {
+ decDriverNoopContainerReader
+ decDriverNoopNumberHelper
+ h *MsgpackHandle
+ bdAndBdread
+ _ bool
+ noBuiltInTypes
+ d Decoder
+}
+
+func (d *msgpackDecDriver) decoder() *Decoder {
+ return &d.d
+}
+
+// Note: This returns either a primitive (int, bool, etc) for non-containers,
+// or a containerType, or a specific type denoting nil or extension.
+// It is called when a nil interface{} is passed, leaving it up to the DecDriver
+// to introspect the stream and decide how best to decode.
+// It deciphers the value by looking at the stream first.
+func (d *msgpackDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ bd := d.bd
+ n := d.d.naked()
+ var decodeFurther bool
+
+ switch bd {
+ case mpNil:
+ n.v = valueTypeNil
+ d.bdRead = false
+ case mpFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case mpTrue:
+ n.v = valueTypeBool
+ n.b = true
+
+ case mpFloat:
+ n.v = valueTypeFloat
+ n.f = float64(math.Float32frombits(bigen.Uint32(d.d.decRd.readn4())))
+ case mpDouble:
+ n.v = valueTypeFloat
+ n.f = math.Float64frombits(bigen.Uint64(d.d.decRd.readn8()))
+
+ case mpUint8:
+ n.v = valueTypeUint
+ n.u = uint64(d.d.decRd.readn1())
+ case mpUint16:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint16(d.d.decRd.readn2()))
+ case mpUint32:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint32(d.d.decRd.readn4()))
+ case mpUint64:
+ n.v = valueTypeUint
+ n.u = uint64(bigen.Uint64(d.d.decRd.readn8()))
+
+ case mpInt8:
+ n.v = valueTypeInt
+ n.i = int64(int8(d.d.decRd.readn1()))
+ case mpInt16:
+ n.v = valueTypeInt
+ n.i = int64(int16(bigen.Uint16(d.d.decRd.readn2())))
+ case mpInt32:
+ n.v = valueTypeInt
+ n.i = int64(int32(bigen.Uint32(d.d.decRd.readn4())))
+ case mpInt64:
+ n.v = valueTypeInt
+ n.i = int64(int64(bigen.Uint64(d.d.decRd.readn8())))
+
+ default:
+ switch {
+ case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
+ // positive fixnum (always signed)
+ n.v = valueTypeInt
+ n.i = int64(int8(bd))
+ case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
+ // negative fixnum
+ n.v = valueTypeInt
+ n.i = int64(int8(bd))
+ case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
+ d.d.fauxUnionReadRawBytes(d.h.WriteExt)
+ // if d.h.WriteExt || d.h.RawToString {
+ // n.v = valueTypeString
+ // n.s = d.d.stringZC(d.DecodeStringAsBytes())
+ // } else {
+ // n.v = valueTypeBytes
+ // n.l = d.DecodeBytes([]byte{})
+ // }
+ case bd == mpBin8, bd == mpBin16, bd == mpBin32:
+ d.d.fauxUnionReadRawBytes(false)
+ case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
+ n.v = valueTypeMap
+ decodeFurther = true
+ case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
+ n.v = valueTypeExt
+ clen := d.readExtLen()
+ n.u = uint64(d.d.decRd.readn1())
+ if n.u == uint64(mpTimeExtTagU) {
+ n.v = valueTypeTime
+ n.t = d.decodeTime(clen)
+ } else if d.d.bytes {
+ n.l = d.d.decRd.rb.readx(uint(clen))
+ } else {
+ n.l = decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, d.d.b[:])
+ }
+ default:
+ d.d.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd))
+ }
+ }
+ if !decodeFurther {
+ d.bdRead = false
+ }
+ if n.v == valueTypeUint && d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = int64(n.u)
+ }
+}
+
+func (d *msgpackDecDriver) nextValueBytes(v0 []byte) (v []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ v = v0
+ var h = decNextValueBytesHelper{d: &d.d}
+ var cursor = d.d.rb.c - 1
+ h.append1(&v, d.bd)
+ v = d.nextValueBytesBdReadR(v)
+ d.bdRead = false
+ h.bytesRdV(&v, cursor)
+ return
+}
+
+func (d *msgpackDecDriver) nextValueBytesR(v0 []byte) (v []byte) {
+ d.readNextBd()
+ v = v0
+ var h = decNextValueBytesHelper{d: &d.d}
+ h.append1(&v, d.bd)
+ return d.nextValueBytesBdReadR(v)
+}
+
+func (d *msgpackDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
+ v = v0
+ var h = decNextValueBytesHelper{d: &d.d}
+
+ bd := d.bd
+
+ var clen uint
+
+ switch bd {
+ case mpNil, mpFalse, mpTrue: // pass
+ case mpUint8, mpInt8:
+ h.append1(&v, d.d.decRd.readn1())
+ case mpUint16, mpInt16:
+ h.appendN(&v, d.d.decRd.readx(2)...)
+ case mpFloat, mpUint32, mpInt32:
+ h.appendN(&v, d.d.decRd.readx(4)...)
+ case mpDouble, mpUint64, mpInt64:
+ h.appendN(&v, d.d.decRd.readx(8)...)
+ case mpStr8, mpBin8:
+ clen = uint(d.d.decRd.readn1())
+ h.append1(&v, byte(clen))
+ h.appendN(&v, d.d.decRd.readx(clen)...)
+ case mpStr16, mpBin16:
+ x := d.d.decRd.readn2()
+ h.appendN(&v, x[:]...)
+ clen = uint(bigen.Uint16(x))
+ h.appendN(&v, d.d.decRd.readx(clen)...)
+ case mpStr32, mpBin32:
+ x := d.d.decRd.readn4()
+ h.appendN(&v, x[:]...)
+ clen = uint(bigen.Uint32(x))
+ h.appendN(&v, d.d.decRd.readx(clen)...)
+ case mpFixExt1:
+ h.append1(&v, d.d.decRd.readn1()) // tag
+ h.append1(&v, d.d.decRd.readn1())
+ case mpFixExt2:
+ h.append1(&v, d.d.decRd.readn1()) // tag
+ h.appendN(&v, d.d.decRd.readx(2)...)
+ case mpFixExt4:
+ h.append1(&v, d.d.decRd.readn1()) // tag
+ h.appendN(&v, d.d.decRd.readx(4)...)
+ case mpFixExt8:
+ h.append1(&v, d.d.decRd.readn1()) // tag
+ h.appendN(&v, d.d.decRd.readx(8)...)
+ case mpFixExt16:
+ h.append1(&v, d.d.decRd.readn1()) // tag
+ h.appendN(&v, d.d.decRd.readx(16)...)
+ case mpExt8:
+ clen = uint(d.d.decRd.readn1())
+ h.append1(&v, uint8(clen))
+ h.append1(&v, d.d.decRd.readn1()) // tag
+ h.appendN(&v, d.d.decRd.readx(clen)...)
+ case mpExt16:
+ x := d.d.decRd.readn2()
+ clen = uint(bigen.Uint16(x))
+ h.appendN(&v, x[:]...)
+ h.append1(&v, d.d.decRd.readn1()) // tag
+ h.appendN(&v, d.d.decRd.readx(clen)...)
+ case mpExt32:
+ x := d.d.decRd.readn4()
+ clen = uint(bigen.Uint32(x))
+ h.appendN(&v, x[:]...)
+ h.append1(&v, d.d.decRd.readn1()) // tag
+ h.appendN(&v, d.d.decRd.readx(clen)...)
+ case mpArray16:
+ x := d.d.decRd.readn2()
+ clen = uint(bigen.Uint16(x))
+ h.appendN(&v, x[:]...)
+ for i := uint(0); i < clen; i++ {
+ v = d.nextValueBytesR(v)
+ }
+ case mpArray32:
+ x := d.d.decRd.readn4()
+ clen = uint(bigen.Uint32(x))
+ h.appendN(&v, x[:]...)
+ for i := uint(0); i < clen; i++ {
+ v = d.nextValueBytesR(v)
+ }
+ case mpMap16:
+ x := d.d.decRd.readn2()
+ clen = uint(bigen.Uint16(x))
+ h.appendN(&v, x[:]...)
+ for i := uint(0); i < clen; i++ {
+ v = d.nextValueBytesR(v)
+ v = d.nextValueBytesR(v)
+ }
+ case mpMap32:
+ x := d.d.decRd.readn4()
+ clen = uint(bigen.Uint32(x))
+ h.appendN(&v, x[:]...)
+ for i := uint(0); i < clen; i++ {
+ v = d.nextValueBytesR(v)
+ v = d.nextValueBytesR(v)
+ }
+ default:
+ switch {
+ case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: // pass
+ case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: // pass
+ case bd >= mpFixStrMin && bd <= mpFixStrMax:
+ clen = uint(mpFixStrMin ^ bd)
+ h.appendN(&v, d.d.decRd.readx(clen)...)
+ case bd >= mpFixArrayMin && bd <= mpFixArrayMax:
+ clen = uint(mpFixArrayMin ^ bd)
+ for i := uint(0); i < clen; i++ {
+ v = d.nextValueBytesR(v)
+ }
+ case bd >= mpFixMapMin && bd <= mpFixMapMax:
+ clen = uint(mpFixMapMin ^ bd)
+ for i := uint(0); i < clen; i++ {
+ v = d.nextValueBytesR(v)
+ v = d.nextValueBytesR(v)
+ }
+ default:
+ d.d.errorf("nextValueBytes: cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd))
+ }
+ }
+ return
+}
+
+func (d *msgpackDecDriver) decFloat4Int32() (f float32) {
+ fbits := bigen.Uint32(d.d.decRd.readn4())
+ f = math.Float32frombits(fbits)
+ if !noFrac32(fbits) {
+ d.d.errorf("assigning integer value from float32 with a fraction: %v", f)
+ }
+ return
+}
+
+func (d *msgpackDecDriver) decFloat4Int64() (f float64) {
+ fbits := bigen.Uint64(d.d.decRd.readn8())
+ f = math.Float64frombits(fbits)
+ if !noFrac64(fbits) {
+ d.d.errorf("assigning integer value from float64 with a fraction: %v", f)
+ }
+ return
+}
+
+// int can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) DecodeInt64() (i int64) {
+ if d.advanceNil() {
+ return
+ }
+ switch d.bd {
+ case mpUint8:
+ i = int64(uint64(d.d.decRd.readn1()))
+ case mpUint16:
+ i = int64(uint64(bigen.Uint16(d.d.decRd.readn2())))
+ case mpUint32:
+ i = int64(uint64(bigen.Uint32(d.d.decRd.readn4())))
+ case mpUint64:
+ i = int64(bigen.Uint64(d.d.decRd.readn8()))
+ case mpInt8:
+ i = int64(int8(d.d.decRd.readn1()))
+ case mpInt16:
+ i = int64(int16(bigen.Uint16(d.d.decRd.readn2())))
+ case mpInt32:
+ i = int64(int32(bigen.Uint32(d.d.decRd.readn4())))
+ case mpInt64:
+ i = int64(bigen.Uint64(d.d.decRd.readn8()))
+ case mpFloat:
+ i = int64(d.decFloat4Int32())
+ case mpDouble:
+ i = int64(d.decFloat4Int64())
+ default:
+ switch {
+ case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+ i = int64(int8(d.bd))
+ case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+ i = int64(int8(d.bd))
+ default:
+ d.d.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+// uint can be decoded from msgpack type: intXXX or uintXXX
+func (d *msgpackDecDriver) DecodeUint64() (ui uint64) {
+ if d.advanceNil() {
+ return
+ }
+ switch d.bd {
+ case mpUint8:
+ ui = uint64(d.d.decRd.readn1())
+ case mpUint16:
+ ui = uint64(bigen.Uint16(d.d.decRd.readn2()))
+ case mpUint32:
+ ui = uint64(bigen.Uint32(d.d.decRd.readn4()))
+ case mpUint64:
+ ui = bigen.Uint64(d.d.decRd.readn8())
+ case mpInt8:
+ if i := int64(int8(d.d.decRd.readn1())); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
+ }
+ case mpInt16:
+ if i := int64(int16(bigen.Uint16(d.d.decRd.readn2()))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
+ }
+ case mpInt32:
+ if i := int64(int32(bigen.Uint32(d.d.decRd.readn4()))); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
+ }
+ case mpInt64:
+ if i := int64(bigen.Uint64(d.d.decRd.readn8())); i >= 0 {
+ ui = uint64(i)
+ } else {
+ d.d.errorf("assigning negative signed value: %v, to unsigned type", i)
+ }
+ case mpFloat:
+ if f := d.decFloat4Int32(); f >= 0 {
+ ui = uint64(f)
+ } else {
+ d.d.errorf("assigning negative float value: %v, to unsigned type", f)
+ }
+ case mpDouble:
+ if f := d.decFloat4Int64(); f >= 0 {
+ ui = uint64(f)
+ } else {
+ d.d.errorf("assigning negative float value: %v, to unsigned type", f)
+ }
+ default:
+ switch {
+ case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
+ ui = uint64(d.bd)
+ case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
+ d.d.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd))
+ default:
+ d.d.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+// float can either be decoded from msgpack type: float, double or intX
+func (d *msgpackDecDriver) DecodeFloat64() (f float64) {
+ if d.advanceNil() {
+ return
+ }
+ if d.bd == mpFloat {
+ f = float64(math.Float32frombits(bigen.Uint32(d.d.decRd.readn4())))
+ } else if d.bd == mpDouble {
+ f = math.Float64frombits(bigen.Uint64(d.d.decRd.readn8()))
+ } else {
+ f = float64(d.DecodeInt64())
+ }
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool, fixnum 0 or 1.
+func (d *msgpackDecDriver) DecodeBool() (b bool) {
+ if d.advanceNil() {
+ return
+ }
+ if d.bd == mpFalse || d.bd == 0 {
+ // b = false
+ } else if d.bd == mpTrue || d.bd == 1 {
+ b = true
+ } else {
+ d.d.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd))
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *msgpackDecDriver) DecodeBytes(bs []byte) (bsOut []byte) {
+ d.d.decByteState = decByteStateNone
+ if d.advanceNil() {
+ return
+ }
+
+ bd := d.bd
+ var clen int
+ if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
+ clen = d.readContainerLen(msgpackContainerBin) // binary
+ } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 ||
+ (bd >= mpFixStrMin && bd <= mpFixStrMax) {
+ clen = d.readContainerLen(msgpackContainerStr) // string/raw
+ } else if bd == mpArray16 || bd == mpArray32 ||
+ (bd >= mpFixArrayMin && bd <= mpFixArrayMax) {
+ // check if an "array" of uint8's
+ if bs == nil {
+ d.d.decByteState = decByteStateReuseBuf
+ bs = d.d.b[:]
+ }
+ // bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d)
+ slen := d.ReadArrayStart()
+ var changed bool
+ if bs, changed = usableByteSlice(bs, slen); changed {
+ d.d.decByteState = decByteStateNone
+ }
+ for i := 0; i < len(bs); i++ {
+ bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8))
+ }
+ return bs
+ } else {
+ d.d.errorf("invalid byte descriptor for decoding bytes, got: 0x%x", d.bd)
+ }
+
+ d.bdRead = false
+ if d.d.zerocopy() {
+ d.d.decByteState = decByteStateZerocopy
+ return d.d.decRd.rb.readx(uint(clen))
+ }
+ if bs == nil {
+ d.d.decByteState = decByteStateReuseBuf
+ bs = d.d.b[:]
+ }
+ return decByteSlice(d.d.r(), clen, d.h.MaxInitLen, bs)
+}
+
+func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) {
+ return d.DecodeBytes(nil)
+}
+
+func (d *msgpackDecDriver) descBd() string {
+ return sprintf("%v (%s)", d.bd, mpdesc(d.bd))
+}
+
+func (d *msgpackDecDriver) readNextBd() {
+ d.bd = d.d.decRd.readn1()
+ d.bdRead = true
+}
+
+func (d *msgpackDecDriver) advanceNil() (null bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == mpNil {
+ d.bdRead = false
+ return true // null = true
+ }
+ return
+}
+
+func (d *msgpackDecDriver) TryNil() (v bool) {
+ return d.advanceNil()
+}
+
+func (d *msgpackDecDriver) ContainerType() (vt valueType) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ bd := d.bd
+ if bd == mpNil {
+ d.bdRead = false
+ return valueTypeNil
+ } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
+ return valueTypeBytes
+ } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 ||
+ (bd >= mpFixStrMin && bd <= mpFixStrMax) {
+ if d.h.WriteExt || d.h.RawToString { // UTF-8 string (new spec)
+ return valueTypeString
+ }
+ return valueTypeBytes // raw (old spec)
+ } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) {
+ return valueTypeArray
+ } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) {
+ return valueTypeMap
+ }
+ return valueTypeUnset
+}
+
+func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) {
+ bd := d.bd
+ if bd == ct.b8 {
+ clen = int(d.d.decRd.readn1())
+ } else if bd == ct.b16 {
+ clen = int(bigen.Uint16(d.d.decRd.readn2()))
+ } else if bd == ct.b32 {
+ clen = int(bigen.Uint32(d.d.decRd.readn4()))
+ } else if (ct.bFixMin & bd) == ct.bFixMin {
+ clen = int(ct.bFixMin ^ bd)
+ } else {
+ d.d.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *msgpackDecDriver) ReadMapStart() int {
+ if d.advanceNil() {
+ return containerLenNil
+ }
+ return d.readContainerLen(msgpackContainerMap)
+}
+
+func (d *msgpackDecDriver) ReadArrayStart() int {
+ if d.advanceNil() {
+ return containerLenNil
+ }
+ return d.readContainerLen(msgpackContainerList)
+}
+
+func (d *msgpackDecDriver) readExtLen() (clen int) {
+ switch d.bd {
+ case mpFixExt1:
+ clen = 1
+ case mpFixExt2:
+ clen = 2
+ case mpFixExt4:
+ clen = 4
+ case mpFixExt8:
+ clen = 8
+ case mpFixExt16:
+ clen = 16
+ case mpExt8:
+ clen = int(d.d.decRd.readn1())
+ case mpExt16:
+ clen = int(bigen.Uint16(d.d.decRd.readn2()))
+ case mpExt32:
+ clen = int(bigen.Uint32(d.d.decRd.readn4()))
+ default:
+ d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd)
+ }
+ return
+}
+
+func (d *msgpackDecDriver) DecodeTime() (t time.Time) {
+ // decode time from string bytes or ext
+ if d.advanceNil() {
+ return
+ }
+ bd := d.bd
+ var clen int
+ if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
+ clen = d.readContainerLen(msgpackContainerBin) // binary
+ } else if bd == mpStr8 || bd == mpStr16 || bd == mpStr32 ||
+ (bd >= mpFixStrMin && bd <= mpFixStrMax) {
+ clen = d.readContainerLen(msgpackContainerStr) // string/raw
+ } else {
+ // expect to see mpFixExt4,-1 OR mpFixExt8,-1 OR mpExt8,12,-1
+ d.bdRead = false
+ b2 := d.d.decRd.readn1()
+ if d.bd == mpFixExt4 && b2 == mpTimeExtTagU {
+ clen = 4
+ } else if d.bd == mpFixExt8 && b2 == mpTimeExtTagU {
+ clen = 8
+ } else if d.bd == mpExt8 && b2 == 12 && d.d.decRd.readn1() == mpTimeExtTagU {
+ clen = 12
+ } else {
+ d.d.errorf("invalid stream for decoding time as extension: got 0x%x, 0x%x", d.bd, b2)
+ }
+ }
+ return d.decodeTime(clen)
+}
+
+func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) {
+ d.bdRead = false
+ switch clen {
+ case 4:
+ t = time.Unix(int64(bigen.Uint32(d.d.decRd.readn4())), 0).UTC()
+ case 8:
+ tv := bigen.Uint64(d.d.decRd.readn8())
+ t = time.Unix(int64(tv&0x00000003ffffffff), int64(tv>>34)).UTC()
+ case 12:
+ nsec := bigen.Uint32(d.d.decRd.readn4())
+ sec := bigen.Uint64(d.d.decRd.readn8())
+ t = time.Unix(int64(sec), int64(nsec)).UTC()
+ default:
+ d.d.errorf("invalid length of bytes for decoding time - expecting 4 or 8 or 12, got %d", clen)
+ }
+ return
+}
+
+func (d *msgpackDecDriver) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
+ if xtag > 0xff {
+ d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
+ }
+ if d.advanceNil() {
+ return
+ }
+ xbs, realxtag1, zerocopy := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag := uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.setData(xbs, zerocopy)
+ } else if ext == SelfExt {
+ d.d.sideDecode(rv, basetype, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+}
+
+func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xbs []byte, xtag byte, zerocopy bool) {
+ xbd := d.bd
+ if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 {
+ xbs = d.DecodeBytes(nil)
+ } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 ||
+ (xbd >= mpFixStrMin && xbd <= mpFixStrMax) {
+ xbs = d.DecodeStringAsBytes()
+ } else {
+ clen := d.readExtLen()
+ xtag = d.d.decRd.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("wrong extension tag - got %b, expecting %v", xtag, tag)
+ }
+ if d.d.bytes {
+ xbs = d.d.decRd.rb.readx(uint(clen))
+ zerocopy = true
+ } else {
+ xbs = decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, d.d.b[:])
+ }
+ }
+ d.bdRead = false
+ return
+}
+
+//--------------------------------------------------
+
+//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
+type MsgpackHandle struct {
+ binaryEncodingType
+ BasicHandle
+
+ // NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum.
+ NoFixedNum bool
+
+ // WriteExt controls whether the new spec is honored.
+ //
+ // With WriteExt=true, we can encode configured extensions with extension tags
+ // and encode string/[]byte/extensions in a way compatible with the new spec
+ // but incompatible with the old spec.
+ //
+ // For compatibility with the old spec, set WriteExt=false.
+ //
+ // With WriteExt=false:
+ // configured extensions are serialized as raw bytes (not msgpack extensions).
+ // reserved byte descriptors like Str8 and those enabling the new msgpack Binary type
+ // are not encoded.
+ WriteExt bool
+
+ // PositiveIntUnsigned says to encode positive integers as unsigned.
+ PositiveIntUnsigned bool
+}
+
+// Name returns the name of the handle: msgpack
+func (h *MsgpackHandle) Name() string { return "msgpack" }
+
+func (h *MsgpackHandle) desc(bd byte) string { return mpdesc(bd) }
+
+func (h *MsgpackHandle) newEncDriver() encDriver {
+ var e = &msgpackEncDriver{h: h}
+ e.e.e = e
+ e.e.init(h)
+ e.reset()
+ return e
+}
+
+func (h *MsgpackHandle) newDecDriver() decDriver {
+ d := &msgpackDecDriver{h: h}
+ d.d.d = d
+ d.d.init(h)
+ d.reset()
+ return d
+}
+
+//--------------------------------------------------
+
+type msgpackSpecRpcCodec struct {
+ rpcCodec
+}
+
+// /////////////// Spec RPC Codec ///////////////////
+func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+ // WriteRequest can write to both a Go service, and other services that do
+ // not abide by the 1 argument rule of a Go service.
+ // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs
+ var bodyArr []interface{}
+ if m, ok := body.(MsgpackSpecRpcMultiArgs); ok {
+ bodyArr = ([]interface{})(m)
+ } else {
+ bodyArr = []interface{}{body}
+ }
+ r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
+ return c.write(r2)
+}
+
+func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+ var moe interface{}
+ if r.Error != "" {
+ moe = r.Error
+ }
+ if moe != nil && body != nil {
+ body = nil
+ }
+ r2 := []interface{}{1, uint32(r.Seq), moe, body}
+ return c.write(r2)
+}
+
+func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+ return c.parseCustomHeader(1, &r.Seq, &r.Error)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+ return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
+}
+
+func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
+ if body == nil { // read and discard
+ return c.read(nil)
+ }
+ bodyArr := []interface{}{body}
+ return c.read(&bodyArr)
+}
+
+func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
+ if cls := c.cls.load(); cls.closed {
+ return io.EOF
+ }
+
+ // We read the response header by hand
+ // so that the body can be decoded on its own from the stream at a later time.
+
+ const fia byte = 0x94 //four item array descriptor value
+
+ var ba [1]byte
+ var n int
+ for {
+ n, err = c.r.Read(ba[:])
+ if err != nil {
+ return
+ }
+ if n == 1 {
+ break
+ }
+ }
+
+ var b = ba[0]
+ if b != fia {
+ err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b))
+ } else {
+ err = c.read(&b)
+ if err == nil {
+ if b != expectTypeByte {
+ err = fmt.Errorf("%s - expecting %v but got %x/%s", msgBadDesc, expectTypeByte, b, mpdesc(b))
+ } else {
+ err = c.read(msgid)
+ if err == nil {
+ err = c.read(methodOrError)
+ }
+ }
+ }
+ }
+ return
+}
+
+//--------------------------------------------------
+
+// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol
+// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
+type msgpackSpecRpc struct{}
+
+// MsgpackSpecRpc implements Rpc using the communication protocol defined in
+// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
+//
+// See GoRpc documentation, for information on buffering for better performance.
+var MsgpackSpecRpc msgpackSpecRpc
+
+func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+ return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+ return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
+}
+
+var _ decDriver = (*msgpackDecDriver)(nil)
+var _ encDriver = (*msgpackEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/reader.go b/vendor/github.com/ugorji/go/codec/reader.go
new file mode 100644
index 000000000..802938c7d
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/reader.go
@@ -0,0 +1,816 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import "io"
+
+// decReader abstracts the reading source, allowing implementations that can
+// read from an io.Reader or directly off a byte slice with zero-copying.
+type decReader interface {
+ // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR
+ // just return a view of the []byte being decoded from.
+ readx(n uint) []byte
+ readb([]byte)
+
+ readn1() byte
+ readn2() [2]byte
+ // readn3 will read 3 bytes into the top-most elements of a 4-byte array
+ readn3() [4]byte
+ readn4() [4]byte
+ readn8() [8]byte
+ // readn1eof() (v uint8, eof bool)
+
+ // // read up to 8 bytes at a time
+ // readn(num uint8) (v [8]byte)
+
+ numread() uint // number of bytes read
+
+ // skip any whitespace characters, and return the first non-matching byte
+ skipWhitespace() (token byte)
+
+ // jsonReadNum will include last read byte in first element of slice,
+ // and continue numeric characters until it sees a non-numeric char
+ // or EOF. If it sees a non-numeric character, it will unread that.
+ jsonReadNum() []byte
+
+ // jsonReadAsisChars will read json plain characters (anything but " or \)
+ // and return a slice terminated by a non-json asis character.
+ jsonReadAsisChars() []byte
+
+ // skip will skip any byte that matches, and return the first non-matching byte
+ // skip(accept *bitset256) (token byte)
+
+ // readTo will read any byte that matches, stopping once no-longer matching.
+ // readTo(accept *bitset256) (out []byte)
+
+ // readUntil will read, only stopping once it matches the 'stop' byte (which it excludes).
+ readUntil(stop byte) (out []byte)
+}
+
+// ------------------------------------------------
+
+type unreadByteStatus uint8
+
+// unreadByteStatus goes from
+// undefined (when initialized) -- (read) --> canUnread -- (unread) --> canRead ...
+const (
+ unreadByteUndefined unreadByteStatus = iota
+ unreadByteCanRead
+ unreadByteCanUnread
+)
+
+// --------------------
+
+type ioDecReaderCommon struct {
+ r io.Reader // the reader passed in
+
+ n uint // num read
+
+ l byte // last byte
+ ls unreadByteStatus // last byte status
+
+ b [6]byte // tiny buffer for reading single bytes
+
+ blist *bytesFreelist
+
+ bufr []byte // buffer for readTo/readUntil
+}
+
+func (z *ioDecReaderCommon) reset(r io.Reader, blist *bytesFreelist) {
+ z.blist = blist
+ z.r = r
+ z.ls = unreadByteUndefined
+ z.l, z.n = 0, 0
+ z.bufr = z.blist.check(z.bufr, 256)
+}
+
+func (z *ioDecReaderCommon) numread() uint {
+ return z.n
+}
+
+// ------------------------------------------
+
+// ioDecReader is a decReader that reads off an io.Reader.
+//
+// It also has a fallback implementation of ByteScanner if needed.
+type ioDecReader struct {
+ ioDecReaderCommon
+
+ br io.ByteScanner
+
+ x [64 + 48]byte // for: get struct field name, swallow valueTypeBytes, etc
+}
+
+func (z *ioDecReader) reset(r io.Reader, blist *bytesFreelist) {
+ z.ioDecReaderCommon.reset(r, blist)
+
+ z.br, _ = r.(io.ByteScanner)
+}
+
+func (z *ioDecReader) Read(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ return
+ }
+ var firstByte bool
+ if z.ls == unreadByteCanRead {
+ z.ls = unreadByteCanUnread
+ p[0] = z.l
+ if len(p) == 1 {
+ n = 1
+ return
+ }
+ firstByte = true
+ p = p[1:]
+ }
+ n, err = z.r.Read(p)
+ if n > 0 {
+ if err == io.EOF && n == len(p) {
+ err = nil // read was successful, so postpone EOF (till next time)
+ }
+ z.l = p[n-1]
+ z.ls = unreadByteCanUnread
+ }
+ if firstByte {
+ n++
+ }
+ return
+}
+
+func (z *ioDecReader) ReadByte() (c byte, err error) {
+ if z.br != nil {
+ c, err = z.br.ReadByte()
+ if err == nil {
+ z.l = c
+ z.ls = unreadByteCanUnread
+ }
+ return
+ }
+
+ n, err := z.Read(z.b[:1])
+ if n == 1 {
+ c = z.b[0]
+ if err == io.EOF {
+ err = nil // read was successful, so postpone EOF (till next time)
+ }
+ }
+ return
+}
+
+func (z *ioDecReader) UnreadByte() (err error) {
+ if z.br != nil {
+ err = z.br.UnreadByte()
+ if err == nil {
+ z.ls = unreadByteCanRead
+ }
+ return
+ }
+
+ switch z.ls {
+ case unreadByteCanUnread:
+ z.ls = unreadByteCanRead
+ case unreadByteCanRead:
+ err = errDecUnreadByteLastByteNotRead
+ case unreadByteUndefined:
+ err = errDecUnreadByteNothingToRead
+ default:
+ err = errDecUnreadByteUnknown
+ }
+ return
+}
+
+func (z *ioDecReader) readn2() (bs [2]byte) {
+ z.readb(bs[:])
+ return
+}
+
+func (z *ioDecReader) readn3() (bs [4]byte) {
+ z.readb(bs[1:])
+ return
+}
+
+func (z *ioDecReader) readn4() (bs [4]byte) {
+ z.readb(bs[:])
+ return
+}
+
+func (z *ioDecReader) readn8() (bs [8]byte) {
+ z.readb(bs[:])
+ return
+}
+
+func (z *ioDecReader) readx(n uint) (bs []byte) {
+ if n == 0 {
+ return
+ }
+ if n < uint(len(z.x)) {
+ bs = z.x[:n]
+ } else {
+ bs = make([]byte, n)
+ }
+ _, err := readFull(z.r, bs)
+ halt.onerror(err)
+ z.n += uint(len(bs))
+ return
+}
+
+func (z *ioDecReader) readb(bs []byte) {
+ if len(bs) == 0 {
+ return
+ }
+ _, err := readFull(z.r, bs)
+ halt.onerror(err)
+ z.n += uint(len(bs))
+}
+
+func (z *ioDecReader) readn1() (b uint8) {
+ b, err := z.ReadByte()
+ halt.onerror(err)
+ z.n++
+ return
+}
+
+func (z *ioDecReader) readn1eof() (b uint8, eof bool) {
+ b, err := z.ReadByte()
+ if err == nil {
+ z.n++
+ } else if err == io.EOF {
+ eof = true
+ } else {
+ halt.onerror(err)
+ }
+ return
+}
+
+func (z *ioDecReader) jsonReadNum() (bs []byte) {
+ z.unreadn1()
+ z.bufr = z.bufr[:0]
+LOOP:
+ i, eof := z.readn1eof()
+ if eof {
+ return z.bufr
+ }
+ if isNumberChar(i) {
+ z.bufr = append(z.bufr, i)
+ goto LOOP
+ }
+ z.unreadn1()
+ return z.bufr
+}
+
+func (z *ioDecReader) jsonReadAsisChars() (bs []byte) {
+ z.bufr = z.bufr[:0]
+LOOP:
+ i := z.readn1()
+ z.bufr = append(z.bufr, i)
+ if i == '"' || i == '\\' {
+ return z.bufr
+ }
+ goto LOOP
+}
+
+func (z *ioDecReader) skipWhitespace() (token byte) {
+LOOP:
+ token = z.readn1()
+ if isWhitespaceChar(token) {
+ goto LOOP
+ }
+ return
+}
+
+func (z *ioDecReader) readUntil(stop byte) []byte {
+ z.bufr = z.bufr[:0]
+LOOP:
+ token := z.readn1()
+ z.bufr = append(z.bufr, token)
+ if token == stop {
+ return z.bufr[:len(z.bufr)-1]
+ }
+ goto LOOP
+}
+
+func (z *ioDecReader) unreadn1() {
+ err := z.UnreadByte()
+ halt.onerror(err)
+ z.n--
+}
+
+// ------------------------------------
+
+type bufioDecReader struct {
+ ioDecReaderCommon
+
+ c uint // cursor
+ buf []byte
+}
+
+func (z *bufioDecReader) reset(r io.Reader, bufsize int, blist *bytesFreelist) {
+ z.ioDecReaderCommon.reset(r, blist)
+ z.c = 0
+ if cap(z.buf) < bufsize {
+ z.buf = blist.get(bufsize)
+ } else {
+ z.buf = z.buf[:0]
+ }
+}
+
+func (z *bufioDecReader) readb(p []byte) {
+ var n = uint(copy(p, z.buf[z.c:]))
+ z.n += n
+ z.c += n
+ if len(p) != int(n) {
+ z.readbFill(p, n, true, false)
+ }
+}
+
+func readbFillHandleErr(err error, must, eof bool) (isEOF bool) {
+ if err == io.EOF {
+ isEOF = true
+ }
+ if must && !(eof && isEOF) {
+ halt.onerror(err)
+ }
+ return
+}
+
+func (z *bufioDecReader) readbFill(p0 []byte, n uint, must, eof bool) (isEOF bool, err error) {
+ // at this point, there's nothing in z.buf to read (z.buf is fully consumed)
+ var p []byte
+ if p0 != nil {
+ p = p0[n:]
+ }
+ var n2 uint
+ if len(p) > cap(z.buf) {
+ n2, err = readFull(z.r, p)
+ if err != nil {
+ isEOF = readbFillHandleErr(err, must, eof)
+ return
+ }
+ n += n2
+ z.n += n2
+ // always keep last byte in z.buf
+ z.buf = z.buf[:1]
+ z.buf[0] = p[len(p)-1]
+ z.c = 1
+ return
+ }
+ // z.c is now 0, and len(p) <= cap(z.buf)
+ var n1 int
+LOOP:
+ // for len(p) > 0 && z.err == nil {
+ z.buf = z.buf[0:cap(z.buf)]
+ n1, err = z.r.Read(z.buf)
+ n2 = uint(n1)
+ if n2 == 0 && err != nil {
+ isEOF = readbFillHandleErr(err, must, eof)
+ return
+ }
+ err = nil
+ z.buf = z.buf[:n2]
+ z.c = 0
+ if len(p) > 0 {
+ n2 = uint(copy(p, z.buf))
+ z.c = n2
+ n += n2
+ z.n += n2
+ p = p[n2:]
+ if len(p) > 0 {
+ goto LOOP
+ }
+ if z.c == 0 {
+ z.buf = z.buf[:1]
+ z.buf[0] = p[len(p)-1]
+ z.c = 1
+ }
+ }
+ return
+}
+
+func (z *bufioDecReader) readn1() (b byte) {
+ if z.c >= uint(len(z.buf)) {
+ z.readbFill(nil, 0, true, false)
+ }
+ b = z.buf[z.c]
+ z.c++
+ z.n++
+ return
+}
+
+func (z *bufioDecReader) readn1eof() (b byte, eof bool) {
+ if z.c >= uint(len(z.buf)) {
+ eof, _ = z.readbFill(nil, 0, true, true)
+ if eof {
+ return
+ }
+ }
+ b = z.buf[z.c]
+ z.c++
+ z.n++
+ return
+}
+
+func (z *bufioDecReader) unreadn1() {
+ if z.c == 0 {
+ halt.onerror(errDecUnreadByteNothingToRead)
+ }
+ z.c--
+ z.n--
+}
+
+func (z *bufioDecReader) readn2() (bs [2]byte) {
+ z.readb(bs[:])
+ return
+}
+
+func (z *bufioDecReader) readn3() (bs [4]byte) {
+ z.readb(bs[1:])
+ return
+}
+
+func (z *bufioDecReader) readn4() (bs [4]byte) {
+ z.readb(bs[:])
+ return
+}
+
+func (z *bufioDecReader) readn8() (bs [8]byte) {
+ z.readb(bs[:])
+ return
+}
+
+func (z *bufioDecReader) readx(n uint) (bs []byte) {
+ if n == 0 {
+ // return
+ } else if z.c+n <= uint(len(z.buf)) {
+ bs = z.buf[z.c : z.c+n]
+ z.n += n
+ z.c += n
+ } else {
+ bs = make([]byte, n)
+ // n no longer used - can reuse
+ n = uint(copy(bs, z.buf[z.c:]))
+ z.n += n
+ z.c += n
+ z.readbFill(bs, n, true, false)
+ }
+ return
+}
+
+func (z *bufioDecReader) jsonReadNum() (bs []byte) {
+ z.unreadn1()
+ z.bufr = z.bufr[:0]
+LOOP:
+ i, eof := z.readn1eof()
+ if eof {
+ return z.bufr
+ }
+ if isNumberChar(i) {
+ z.bufr = append(z.bufr, i)
+ goto LOOP
+ }
+ z.unreadn1()
+ return z.bufr
+}
+
+func (z *bufioDecReader) jsonReadAsisChars() (bs []byte) {
+ z.bufr = z.bufr[:0]
+LOOP:
+ i := z.readn1()
+ z.bufr = append(z.bufr, i)
+ if i == '"' || i == '\\' {
+ return z.bufr
+ }
+ goto LOOP
+}
+
+func (z *bufioDecReader) skipWhitespace() (token byte) {
+ i := z.c
+LOOP:
+ if i < uint(len(z.buf)) {
+ // inline z.skipLoopFn(i) and refactor, so cost is within inline budget
+ token = z.buf[i]
+ i++
+ if isWhitespaceChar(token) {
+ goto LOOP
+ }
+ z.n += i - 2 - z.c
+ z.c = i
+ return
+ }
+ return z.skipFillWhitespace()
+}
+
+func (z *bufioDecReader) skipFillWhitespace() (token byte) {
+ z.n += uint(len(z.buf)) - z.c
+ var i, n2 int
+ var err error
+ for {
+ z.c = 0
+ z.buf = z.buf[0:cap(z.buf)]
+ n2, err = z.r.Read(z.buf)
+ if n2 == 0 {
+ halt.onerror(err)
+ }
+ z.buf = z.buf[:n2]
+ for i, token = range z.buf {
+ if !isWhitespaceChar(token) {
+ z.n += (uint(i) - z.c) - 1
+ z.loopFn(uint(i + 1))
+ return
+ }
+ }
+ z.n += uint(n2)
+ }
+}
+
+func (z *bufioDecReader) loopFn(i uint) {
+ z.c = i
+}
+
+func (z *bufioDecReader) readUntil(stop byte) (out []byte) {
+ i := z.c
+LOOP:
+ if i < uint(len(z.buf)) {
+ if z.buf[i] == stop {
+ z.n += (i - z.c) - 1
+ i++
+ out = z.buf[z.c:i]
+ z.c = i
+ goto FINISH
+ }
+ i++
+ goto LOOP
+ }
+ out = z.readUntilFill(stop)
+FINISH:
+ return out[:len(out)-1]
+}
+
+func (z *bufioDecReader) readUntilFill(stop byte) []byte {
+ z.bufr = z.bufr[:0]
+ z.n += uint(len(z.buf)) - z.c
+ z.bufr = append(z.bufr, z.buf[z.c:]...)
+ for {
+ z.c = 0
+ z.buf = z.buf[0:cap(z.buf)]
+ n1, err := z.r.Read(z.buf)
+ if n1 == 0 {
+ halt.onerror(err)
+ }
+ n2 := uint(n1)
+ z.buf = z.buf[:n2]
+ for i, token := range z.buf {
+ if token == stop {
+ z.n += (uint(i) - z.c) - 1
+ z.bufr = append(z.bufr, z.buf[z.c:i+1]...)
+ z.loopFn(uint(i + 1))
+ return z.bufr
+ }
+ }
+ z.bufr = append(z.bufr, z.buf...)
+ z.n += n2
+ }
+}
+
+// ------------------------------------
+
+// bytesDecReader is a decReader that reads off a byte slice with zero copying
+//
+// Note: we do not try to convert index'ing out of bounds to an io.EOF.
+// instead, we let it bubble up to the exported Encode/Decode method
+// and recover it as an io.EOF.
+//
+// see panicValToErr(...) function in helper.go.
+type bytesDecReader struct {
+ b []byte // data
+ c uint // cursor
+}
+
+func (z *bytesDecReader) reset(in []byte) {
+ z.b = in[:len(in):len(in)] // reslicing must not go past capacity
+ z.c = 0
+}
+
+func (z *bytesDecReader) numread() uint {
+ return z.c
+}
+
+// Note: slicing from a non-constant start position is more expensive,
+// as more computation is required to decipher the pointer start position.
+// However, we do it only once, and it's better than reslicing both z.b and return value.
+
+func (z *bytesDecReader) readx(n uint) (bs []byte) {
+ x := z.c + n
+ bs = z.b[z.c:x]
+ z.c = x
+ return
+}
+
+func (z *bytesDecReader) readb(bs []byte) {
+ copy(bs, z.readx(uint(len(bs))))
+}
+
+// MARKER: do not use this - as it calls into memmove (as the size of data to move is unknown)
+// func (z *bytesDecReader) readnn(bs []byte, n uint) {
+// x := z.c
+// copy(bs, z.b[x:x+n])
+// z.c += n
+// }
+
+// func (z *bytesDecReader) readn(num uint8) (bs [8]byte) {
+// x := z.c + uint(num)
+// copy(bs[:], z.b[z.c:x]) // slice z.b completely, so we get bounds error if past
+// z.c = x
+// return
+// }
+
+// func (z *bytesDecReader) readn1() uint8 {
+// z.c++
+// return z.b[z.c-1]
+// }
+
+func (z *bytesDecReader) readn1() (v uint8) {
+ v = z.b[z.c]
+ z.c++
+ return
+}
+
+// MARKER: for readn{2,3,4,8}, ensure you slice z.b completely so we get bounds error if past end.
+
+func (z *bytesDecReader) readn2() (bs [2]byte) {
+ // copy(bs[:], z.b[z.c:z.c+2])
+ bs[1] = z.b[z.c+1]
+ bs[0] = z.b[z.c]
+ z.c += 2
+ return
+}
+
+func (z *bytesDecReader) readn3() (bs [4]byte) {
+ // copy(bs[1:], z.b[z.c:z.c+3])
+ bs = okBytes3(z.b[z.c : z.c+3])
+ z.c += 3
+ return
+}
+
+func (z *bytesDecReader) readn4() (bs [4]byte) {
+ // copy(bs[:], z.b[z.c:z.c+4])
+ bs = okBytes4(z.b[z.c : z.c+4])
+ z.c += 4
+ return
+}
+
+func (z *bytesDecReader) readn8() (bs [8]byte) {
+ // copy(bs[:], z.b[z.c:z.c+8])
+ bs = okBytes8(z.b[z.c : z.c+8])
+ z.c += 8
+ return
+}
+
+func (z *bytesDecReader) jsonReadNum() []byte {
+ z.c--
+ i := z.c
+LOOP:
+ if i < uint(len(z.b)) && isNumberChar(z.b[i]) {
+ i++
+ goto LOOP
+ }
+ z.c, i = i, z.c
+ return z.b[i:z.c]
+}
+
+func (z *bytesDecReader) jsonReadAsisChars() []byte {
+ i := z.c
+LOOP:
+ token := z.b[i]
+ i++
+ if token == '"' || token == '\\' {
+ z.c, i = i, z.c
+ return z.b[i:z.c]
+ }
+ goto LOOP
+}
+
+func (z *bytesDecReader) skipWhitespace() (token byte) {
+ i := z.c
+LOOP:
+ token = z.b[i]
+ if isWhitespaceChar(token) {
+ i++
+ goto LOOP
+ }
+ z.c = i + 1
+ return
+}
+
+func (z *bytesDecReader) readUntil(stop byte) (out []byte) {
+ i := z.c
+LOOP:
+ if z.b[i] == stop {
+ out = z.b[z.c:i]
+ z.c = i + 1
+ return
+ }
+ i++
+ goto LOOP
+}
+
+// --------------
+
+type decRd struct {
+ mtr bool // is maptype a known type?
+ str bool // is slicetype a known type?
+
+ be bool // is binary encoding
+ js bool // is json handle
+ jsms bool // is json handle, and MapKeyAsString
+ cbor bool // is cbor handle
+
+ bytes bool // is bytes reader
+ bufio bool // is this a bufioDecReader?
+
+ rb bytesDecReader
+ ri *ioDecReader
+ bi *bufioDecReader
+
+ decReader
+}
+
+// From out benchmarking, we see the following in terms of performance:
+//
+// - interface calls
+// - branch that can inline what it calls
+//
+// the if/else-if/else block is expensive to inline.
+// Each node of this construct costs a lot and dominates the budget.
+// Best to only do an if fast-path else block (so fast-path is inlined).
+// This is irrespective of inlineExtraCallCost set in $GOROOT/src/cmd/compile/internal/gc/inl.go
+//
+// In decRd methods below, we delegate all IO functions into their own methods.
+// This allows for the inlining of the common path when z.bytes=true.
+// Go 1.12+ supports inlining methods with up to 1 inlined function (or 2 if no other constructs).
+//
+// However, up through Go 1.13, decRd's readXXX, skip and unreadXXX methods are not inlined.
+// Consequently, there is no benefit to do the xxxIO methods for decRd at this time.
+// Instead, we have a if/else-if/else block so that IO calls do not have to jump through
+// a second unnecessary function call.
+//
+// If golang inlining gets better and bytesDecReader methods can be inlined,
+// then we can revert to using these 2 functions so the bytesDecReader
+// methods are inlined and the IO paths call out to a function.
+//
+// decRd is designed to embed a decReader, and then re-implement some of the decReader
+// methods using a conditional branch. We only override the ones that have a bytes version
+// that is small enough to be inlined. We use ./run.sh -z to check.
+// Right now, only numread and readn1 can be inlined.
+
+func (z *decRd) numread() uint {
+ if z.bytes {
+ return z.rb.numread()
+ } else if z.bufio {
+ return z.bi.numread()
+ } else {
+ return z.ri.numread()
+ }
+}
+
+func (z *decRd) readn1() (v uint8) {
+ if z.bytes {
+ // MARKER: manually inline, else this function is not inlined.
+ // Keep in sync with bytesDecReader.readn1
+ // return z.rb.readn1()
+ v = z.rb.b[z.rb.c]
+ z.rb.c++
+ } else {
+ v = z.readn1IO()
+ }
+ return
+}
+func (z *decRd) readn1IO() uint8 {
+ if z.bufio {
+ return z.bi.readn1()
+ }
+ return z.ri.readn1()
+}
+
+type devNullReader struct{}
+
+func (devNullReader) Read(p []byte) (int, error) { return 0, io.EOF }
+func (devNullReader) Close() error { return nil }
+
+func readFull(r io.Reader, bs []byte) (n uint, err error) {
+ var nn int
+ for n < uint(len(bs)) && err == nil {
+ nn, err = r.Read(bs[n:])
+ if nn > 0 {
+ if err == io.EOF {
+ // leave EOF for next time
+ err = nil
+ }
+ n += uint(nn)
+ }
+ }
+ // do not do this below - it serves no purpose
+ // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF }
+ return
+}
+
+var _ decReader = (*decRd)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/register_ext.go b/vendor/github.com/ugorji/go/codec/register_ext.go
new file mode 100644
index 000000000..65e455377
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/register_ext.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import "reflect"
+
+// This file exists, so that the files for specific formats do not all import reflect.
+// This just helps us ensure that reflect package is isolated to a few files.
+
+// SetInterfaceExt sets an extension
+func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+ return h.SetExt(rt, tag, makeExt(ext))
+}
+
+// SetInterfaceExt sets an extension
+func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+ return h.SetExt(rt, tag, makeExt(ext))
+}
+
+// SetBytesExt sets an extension
+func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, makeExt(ext))
+}
+
+// SetBytesExt sets an extension
+func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, makeExt(ext))
+}
+
+// SetBytesExt sets an extension
+func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
+ return h.SetExt(rt, tag, makeExt(ext))
+}
+
+// func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
+// return h.SetExt(rt, tag, &interfaceExtWrapper{InterfaceExt: ext})
+// }
diff --git a/vendor/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/ugorji/go/codec/rpc.go
new file mode 100644
index 000000000..cf72377c7
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/rpc.go
@@ -0,0 +1,232 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "net/rpc"
+)
+
+var (
+ errRpcIsClosed = errors.New("rpc - connection has been closed")
+ errRpcNoConn = errors.New("rpc - no connection")
+
+ rpcSpaceArr = [1]byte{' '}
+)
+
+// Rpc provides a rpc Server or Client Codec for rpc communication.
+type Rpc interface {
+ ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
+ ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
+}
+
+// RPCOptions holds options specific to rpc functionality
+type RPCOptions struct {
+ // RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls.
+ //
+ // Set RPCNoBuffer=true to turn buffering off.
+ // Buffering can still be done if buffered connections are passed in, or
+ // buffering is configured on the handle.
+ RPCNoBuffer bool
+}
+
+// rpcCodec defines the struct members and common methods.
+type rpcCodec struct {
+ c io.Closer
+ r io.Reader
+ w io.Writer
+ f ioFlusher
+
+ dec *Decoder
+ enc *Encoder
+ h Handle
+
+ cls atomicClsErr
+}
+
+func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
+ return newRPCCodec2(conn, conn, conn, h)
+}
+
+func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec {
+ bh := h.getBasicHandle()
+ // if the writer can flush, ensure we leverage it, else
+ // we may hang waiting on read if write isn't flushed.
+ // var f ioFlusher
+ f, ok := w.(ioFlusher)
+ if !bh.RPCNoBuffer {
+ if bh.WriterBufferSize <= 0 {
+ if !ok { // a flusher means there's already a buffer
+ bw := bufio.NewWriter(w)
+ f, w = bw, bw
+ }
+ }
+ if bh.ReaderBufferSize <= 0 {
+ if _, ok = w.(ioBuffered); !ok {
+ r = bufio.NewReader(r)
+ }
+ }
+ }
+ return rpcCodec{
+ c: c,
+ w: w,
+ r: r,
+ f: f,
+ h: h,
+ enc: NewEncoder(w, h),
+ dec: NewDecoder(r, h),
+ }
+}
+
+func (c *rpcCodec) write(obj ...interface{}) (err error) {
+ err = c.ready()
+ if err != nil {
+ return
+ }
+ if c.f != nil {
+ defer func() {
+ flushErr := c.f.Flush()
+ if flushErr != nil && err == nil {
+ err = flushErr
+ }
+ }()
+ }
+
+ for _, o := range obj {
+ err = c.enc.Encode(o)
+ if err != nil {
+ return
+ }
+ // defensive: ensure a space is always written after each encoding,
+ // in case the value was a number, and encoding a value right after
+ // without a space will lead to invalid output.
+ if c.h.isJson() {
+ _, err = c.w.Write(rpcSpaceArr[:])
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+func (c *rpcCodec) read(obj interface{}) (err error) {
+ err = c.ready()
+ if err == nil {
+ //If nil is passed in, we should read and discard
+ if obj == nil {
+ // return c.dec.Decode(&obj)
+ err = c.dec.swallowErr()
+ } else {
+ err = c.dec.Decode(obj)
+ }
+ }
+ return
+}
+
+func (c *rpcCodec) Close() (err error) {
+ if c.c != nil {
+ cls := c.cls.load()
+ if !cls.closed {
+ cls.err = c.c.Close()
+ cls.closed = true
+ c.cls.store(cls)
+ }
+ err = cls.err
+ }
+ return
+}
+
+func (c *rpcCodec) ready() (err error) {
+ if c.c == nil {
+ err = errRpcNoConn
+ } else {
+ cls := c.cls.load()
+ if cls.closed {
+ if err = cls.err; err == nil {
+ err = errRpcIsClosed
+ }
+ }
+ }
+ return
+}
+
+func (c *rpcCodec) ReadResponseBody(body interface{}) error {
+ return c.read(body)
+}
+
+// -------------------------------------
+
+type goRpcCodec struct {
+ rpcCodec
+}
+
+func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
+ return c.write(r, body)
+}
+
+func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
+ return c.write(r, body)
+}
+
+func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
+ return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
+ return c.read(r)
+}
+
+func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
+ return c.read(body)
+}
+
+// -------------------------------------
+
+// goRpc is the implementation of Rpc that uses the communication protocol
+// as defined in net/rpc package.
+type goRpc struct{}
+
+// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
+//
+// Note: network connection (from net.Dial, of type io.ReadWriteCloser) is not buffered.
+//
+// For performance, you should configure WriterBufferSize and ReaderBufferSize on the handle.
+// This ensures we use an adequate buffer during reading and writing.
+// If not configured, we will internally initialize and use a buffer during reads and writes.
+// This can be turned off via the RPCNoBuffer option on the Handle.
+// var handle codec.JsonHandle
+// handle.RPCNoBuffer = true // turns off attempt by rpc module to initialize a buffer
+//
+// Example 1: one way of configuring buffering explicitly:
+// var handle codec.JsonHandle // codec handle
+// handle.ReaderBufferSize = 1024
+// handle.WriterBufferSize = 1024
+// var conn io.ReadWriteCloser // connection got from a socket
+// var serverCodec = GoRpc.ServerCodec(conn, handle)
+// var clientCodec = GoRpc.ClientCodec(conn, handle)
+//
+// Example 2: you can also explicitly create a buffered connection yourself,
+// and not worry about configuring the buffer sizes in the Handle.
+// var handle codec.Handle // codec handle
+// var conn io.ReadWriteCloser // connection got from a socket
+// var bufconn = struct { // bufconn here is a buffered io.ReadWriteCloser
+// io.Closer
+// *bufio.Reader
+// *bufio.Writer
+// }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)}
+// var serverCodec = GoRpc.ServerCodec(bufconn, handle)
+// var clientCodec = GoRpc.ClientCodec(bufconn, handle)
+//
+var GoRpc goRpc
+
+func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
+ return &goRpcCodec{newRPCCodec(conn, h)}
+}
+
+func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
+ return &goRpcCodec{newRPCCodec(conn, h)}
+}
diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go
new file mode 100644
index 000000000..c3c09f1cf
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/simple.go
@@ -0,0 +1,747 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import (
+ "math"
+ "reflect"
+ "time"
+)
+
+const (
+ _ uint8 = iota
+ simpleVdNil = 1
+ simpleVdFalse = 2
+ simpleVdTrue = 3
+ simpleVdFloat32 = 4
+ simpleVdFloat64 = 5
+
+ // each lasts for 4 (ie n, n+1, n+2, n+3)
+ simpleVdPosInt = 8
+ simpleVdNegInt = 12
+
+ simpleVdTime = 24
+
+ // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
+ simpleVdString = 216
+ simpleVdByteArray = 224
+ simpleVdArray = 232
+ simpleVdMap = 240
+ simpleVdExt = 248
+)
+
+var simpledescNames = map[byte]string{
+ simpleVdNil: "null",
+ simpleVdFalse: "false",
+ simpleVdTrue: "true",
+ simpleVdFloat32: "float32",
+ simpleVdFloat64: "float64",
+
+ simpleVdPosInt: "+int",
+ simpleVdNegInt: "-int",
+
+ simpleVdTime: "time",
+
+ simpleVdString: "string",
+ simpleVdByteArray: "binary",
+ simpleVdArray: "array",
+ simpleVdMap: "map",
+ simpleVdExt: "ext",
+}
+
+func simpledesc(bd byte) (s string) {
+ s = simpledescNames[bd]
+ if s == "" {
+ s = "unknown"
+ }
+ return
+}
+
+type simpleEncDriver struct {
+ noBuiltInTypes
+ encDriverNoopContainerWriter
+ encDriverNoState
+ h *SimpleHandle
+ // b [8]byte
+ e Encoder
+}
+
+func (e *simpleEncDriver) encoder() *Encoder {
+ return &e.e
+}
+
+func (e *simpleEncDriver) EncodeNil() {
+ e.e.encWr.writen1(simpleVdNil)
+}
+
+func (e *simpleEncDriver) EncodeBool(b bool) {
+ if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && !b {
+ e.EncodeNil()
+ return
+ }
+ if b {
+ e.e.encWr.writen1(simpleVdTrue)
+ } else {
+ e.e.encWr.writen1(simpleVdFalse)
+ }
+}
+
+func (e *simpleEncDriver) EncodeFloat32(f float32) {
+ if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 {
+ e.EncodeNil()
+ return
+ }
+ e.e.encWr.writen1(simpleVdFloat32)
+ bigen.writeUint32(e.e.w(), math.Float32bits(f))
+}
+
+func (e *simpleEncDriver) EncodeFloat64(f float64) {
+ if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 {
+ e.EncodeNil()
+ return
+ }
+ e.e.encWr.writen1(simpleVdFloat64)
+ bigen.writeUint64(e.e.w(), math.Float64bits(f))
+}
+
+func (e *simpleEncDriver) EncodeInt(v int64) {
+ if v < 0 {
+ e.encUint(uint64(-v), simpleVdNegInt)
+ } else {
+ e.encUint(uint64(v), simpleVdPosInt)
+ }
+}
+
+func (e *simpleEncDriver) EncodeUint(v uint64) {
+ e.encUint(v, simpleVdPosInt)
+}
+
+func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
+ if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == 0 {
+ e.EncodeNil()
+ return
+ }
+ if v <= math.MaxUint8 {
+ e.e.encWr.writen2(bd, uint8(v))
+ } else if v <= math.MaxUint16 {
+ e.e.encWr.writen1(bd + 1)
+ bigen.writeUint16(e.e.w(), uint16(v))
+ } else if v <= math.MaxUint32 {
+ e.e.encWr.writen1(bd + 2)
+ bigen.writeUint32(e.e.w(), uint32(v))
+ } else { // if v <= math.MaxUint64 {
+ e.e.encWr.writen1(bd + 3)
+ bigen.writeUint64(e.e.w(), v)
+ }
+}
+
+func (e *simpleEncDriver) encLen(bd byte, length int) {
+ if length == 0 {
+ e.e.encWr.writen1(bd)
+ } else if length <= math.MaxUint8 {
+ e.e.encWr.writen1(bd + 1)
+ e.e.encWr.writen1(uint8(length))
+ } else if length <= math.MaxUint16 {
+ e.e.encWr.writen1(bd + 2)
+ bigen.writeUint16(e.e.w(), uint16(length))
+ } else if int64(length) <= math.MaxUint32 {
+ e.e.encWr.writen1(bd + 3)
+ bigen.writeUint32(e.e.w(), uint32(length))
+ } else {
+ e.e.encWr.writen1(bd + 4)
+ bigen.writeUint64(e.e.w(), uint64(length))
+ }
+}
+
+func (e *simpleEncDriver) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
+ var bs0, bs []byte
+ if ext == SelfExt {
+ bs0 = e.e.blist.get(1024)
+ bs = bs0
+ e.e.sideEncode(v, basetype, &bs)
+ } else {
+ bs = ext.WriteExt(v)
+ }
+ if bs == nil {
+ e.EncodeNil()
+ goto END
+ }
+ e.encodeExtPreamble(uint8(xtag), len(bs))
+ e.e.encWr.writeb(bs)
+END:
+ if ext == SelfExt {
+ e.e.blist.put(bs)
+ if !byteSliceSameData(bs0, bs) {
+ e.e.blist.put(bs0)
+ }
+ }
+}
+
+func (e *simpleEncDriver) EncodeRawExt(re *RawExt) {
+ e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
+ e.e.encWr.writeb(re.Data)
+}
+
+func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
+ e.encLen(simpleVdExt, length)
+ e.e.encWr.writen1(xtag)
+}
+
+func (e *simpleEncDriver) WriteArrayStart(length int) {
+ e.encLen(simpleVdArray, length)
+}
+
+func (e *simpleEncDriver) WriteMapStart(length int) {
+ e.encLen(simpleVdMap, length)
+}
+
+func (e *simpleEncDriver) EncodeString(v string) {
+ if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == "" {
+ e.EncodeNil()
+ return
+ }
+ if e.h.StringToRaw {
+ e.encLen(simpleVdByteArray, len(v))
+ } else {
+ e.encLen(simpleVdString, len(v))
+ }
+ e.e.encWr.writestr(v)
+}
+
+func (e *simpleEncDriver) EncodeStringBytesRaw(v []byte) {
+ // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == nil {
+ if v == nil {
+ e.EncodeNil()
+ return
+ }
+ e.encLen(simpleVdByteArray, len(v))
+ e.e.encWr.writeb(v)
+}
+
+func (e *simpleEncDriver) EncodeTime(t time.Time) {
+ // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && t.IsZero() {
+ if t.IsZero() {
+ e.EncodeNil()
+ return
+ }
+ v, err := t.MarshalBinary()
+ e.e.onerror(err)
+ e.e.encWr.writen2(simpleVdTime, uint8(len(v)))
+ e.e.encWr.writeb(v)
+}
+
+//------------------------------------
+
+type simpleDecDriver struct {
+ h *SimpleHandle
+ bdAndBdread
+ _ bool
+ noBuiltInTypes
+ decDriverNoopContainerReader
+ decDriverNoopNumberHelper
+ d Decoder
+}
+
+func (d *simpleDecDriver) decoder() *Decoder {
+ return &d.d
+}
+
+func (d *simpleDecDriver) descBd() string {
+ return sprintf("%v (%s)", d.bd, simpledesc(d.bd))
+}
+
+func (d *simpleDecDriver) readNextBd() {
+ d.bd = d.d.decRd.readn1()
+ d.bdRead = true
+}
+
+func (d *simpleDecDriver) advanceNil() (null bool) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ if d.bd == simpleVdNil {
+ d.bdRead = false
+ return true // null = true
+ }
+ return
+}
+
+func (d *simpleDecDriver) ContainerType() (vt valueType) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ switch d.bd {
+ case simpleVdNil:
+ d.bdRead = false
+ return valueTypeNil
+ case simpleVdByteArray, simpleVdByteArray + 1,
+ simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ return valueTypeBytes
+ case simpleVdString, simpleVdString + 1,
+ simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
+ return valueTypeString
+ case simpleVdArray, simpleVdArray + 1,
+ simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
+ return valueTypeArray
+ case simpleVdMap, simpleVdMap + 1,
+ simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
+ return valueTypeMap
+ }
+ return valueTypeUnset
+}
+
+func (d *simpleDecDriver) TryNil() bool {
+ return d.advanceNil()
+}
+
+func (d *simpleDecDriver) decFloat() (f float64, ok bool) {
+ ok = true
+ switch d.bd {
+ case simpleVdFloat32:
+ f = float64(math.Float32frombits(bigen.Uint32(d.d.decRd.readn4())))
+ case simpleVdFloat64:
+ f = math.Float64frombits(bigen.Uint64(d.d.decRd.readn8()))
+ default:
+ ok = false
+ }
+ return
+}
+
+func (d *simpleDecDriver) decInteger() (ui uint64, neg, ok bool) {
+ ok = true
+ switch d.bd {
+ case simpleVdPosInt:
+ ui = uint64(d.d.decRd.readn1())
+ case simpleVdPosInt + 1:
+ ui = uint64(bigen.Uint16(d.d.decRd.readn2()))
+ case simpleVdPosInt + 2:
+ ui = uint64(bigen.Uint32(d.d.decRd.readn4()))
+ case simpleVdPosInt + 3:
+ ui = uint64(bigen.Uint64(d.d.decRd.readn8()))
+ case simpleVdNegInt:
+ ui = uint64(d.d.decRd.readn1())
+ neg = true
+ case simpleVdNegInt + 1:
+ ui = uint64(bigen.Uint16(d.d.decRd.readn2()))
+ neg = true
+ case simpleVdNegInt + 2:
+ ui = uint64(bigen.Uint32(d.d.decRd.readn4()))
+ neg = true
+ case simpleVdNegInt + 3:
+ ui = uint64(bigen.Uint64(d.d.decRd.readn8()))
+ neg = true
+ default:
+ ok = false
+ // d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
+ }
+ // DO NOT do this check below, because callers may only want the unsigned value:
+ //
+ // if ui > math.MaxInt64 {
+ // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
+ // return
+ // }
+ return
+}
+
+func (d *simpleDecDriver) DecodeInt64() (i int64) {
+ if d.advanceNil() {
+ return
+ }
+ i = decNegintPosintFloatNumberHelper{&d.d}.int64(d.decInteger())
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeUint64() (ui uint64) {
+ if d.advanceNil() {
+ return
+ }
+ ui = decNegintPosintFloatNumberHelper{&d.d}.uint64(d.decInteger())
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeFloat64() (f float64) {
+ if d.advanceNil() {
+ return
+ }
+ f = decNegintPosintFloatNumberHelper{&d.d}.float64(d.decFloat())
+ d.bdRead = false
+ return
+}
+
+// bool can be decoded from bool only (single byte).
+func (d *simpleDecDriver) DecodeBool() (b bool) {
+ if d.advanceNil() {
+ return
+ }
+ if d.bd == simpleVdFalse {
+ } else if d.bd == simpleVdTrue {
+ b = true
+ } else {
+ d.d.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) ReadMapStart() (length int) {
+ if d.advanceNil() {
+ return containerLenNil
+ }
+ d.bdRead = false
+ return d.decLen()
+}
+
+func (d *simpleDecDriver) ReadArrayStart() (length int) {
+ if d.advanceNil() {
+ return containerLenNil
+ }
+ d.bdRead = false
+ return d.decLen()
+}
+
+func (d *simpleDecDriver) uint2Len(ui uint64) int {
+ if chkOvf.Uint(ui, intBitsize) {
+ d.d.errorf("overflow integer: %v", ui)
+ }
+ return int(ui)
+}
+
+func (d *simpleDecDriver) decLen() int {
+ switch d.bd & 7 { // d.bd % 8 {
+ case 0:
+ return 0
+ case 1:
+ return int(d.d.decRd.readn1())
+ case 2:
+ return int(bigen.Uint16(d.d.decRd.readn2()))
+ case 3:
+ return d.uint2Len(uint64(bigen.Uint32(d.d.decRd.readn4())))
+ case 4:
+ return d.uint2Len(bigen.Uint64(d.d.decRd.readn8()))
+ }
+ d.d.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
+ return -1
+}
+
+func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) {
+ return d.DecodeBytes(nil)
+}
+
+func (d *simpleDecDriver) DecodeBytes(bs []byte) (bsOut []byte) {
+ d.d.decByteState = decByteStateNone
+ if d.advanceNil() {
+ return
+ }
+ // check if an "array" of uint8's (see ContainerType for how to infer if an array)
+ if d.bd >= simpleVdArray && d.bd <= simpleVdMap+4 {
+ if bs == nil {
+ d.d.decByteState = decByteStateReuseBuf
+ bs = d.d.b[:]
+ }
+ slen := d.ReadArrayStart()
+ var changed bool
+ if bs, changed = usableByteSlice(bs, slen); changed {
+ d.d.decByteState = decByteStateNone
+ }
+ for i := 0; i < len(bs); i++ {
+ bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8))
+ }
+ return bs
+ }
+
+ clen := d.decLen()
+ d.bdRead = false
+ if d.d.zerocopy() {
+ d.d.decByteState = decByteStateZerocopy
+ return d.d.decRd.rb.readx(uint(clen))
+ }
+ if bs == nil {
+ d.d.decByteState = decByteStateReuseBuf
+ bs = d.d.b[:]
+ }
+ return decByteSlice(d.d.r(), clen, d.d.h.MaxInitLen, bs)
+}
+
+func (d *simpleDecDriver) DecodeTime() (t time.Time) {
+ if d.advanceNil() {
+ return
+ }
+ if d.bd != simpleVdTime {
+ d.d.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd)
+ }
+ d.bdRead = false
+ clen := uint(d.d.decRd.readn1())
+ b := d.d.decRd.readx(clen)
+ d.d.onerror((&t).UnmarshalBinary(b))
+ return
+}
+
+func (d *simpleDecDriver) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
+ if xtag > 0xff {
+ d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag)
+ }
+ if d.advanceNil() {
+ return
+ }
+ xbs, realxtag1, zerocopy := d.decodeExtV(ext != nil, uint8(xtag))
+ realxtag := uint64(realxtag1)
+ if ext == nil {
+ re := rv.(*RawExt)
+ re.Tag = realxtag
+ re.setData(xbs, zerocopy)
+ } else if ext == SelfExt {
+ d.d.sideDecode(rv, basetype, xbs)
+ } else {
+ ext.ReadExt(rv, xbs)
+ }
+}
+
+func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xbs []byte, xtag byte, zerocopy bool) {
+ switch d.bd {
+ case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+ l := d.decLen()
+ xtag = d.d.decRd.readn1()
+ if verifyTag && xtag != tag {
+ d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag)
+ }
+ if d.d.bytes {
+ xbs = d.d.decRd.rb.readx(uint(l))
+ zerocopy = true
+ } else {
+ xbs = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:])
+ }
+ case simpleVdByteArray, simpleVdByteArray + 1,
+ simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ xbs = d.DecodeBytes(nil)
+ default:
+ d.d.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd)
+ }
+ d.bdRead = false
+ return
+}
+
+func (d *simpleDecDriver) DecodeNaked() {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+
+ n := d.d.naked()
+ var decodeFurther bool
+
+ switch d.bd {
+ case simpleVdNil:
+ n.v = valueTypeNil
+ case simpleVdFalse:
+ n.v = valueTypeBool
+ n.b = false
+ case simpleVdTrue:
+ n.v = valueTypeBool
+ n.b = true
+ case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
+ if d.h.SignedInteger {
+ n.v = valueTypeInt
+ n.i = d.DecodeInt64()
+ } else {
+ n.v = valueTypeUint
+ n.u = d.DecodeUint64()
+ }
+ case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
+ n.v = valueTypeInt
+ n.i = d.DecodeInt64()
+ case simpleVdFloat32:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat64()
+ case simpleVdFloat64:
+ n.v = valueTypeFloat
+ n.f = d.DecodeFloat64()
+ case simpleVdTime:
+ n.v = valueTypeTime
+ n.t = d.DecodeTime()
+ case simpleVdString, simpleVdString + 1,
+ simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
+ n.v = valueTypeString
+ n.s = d.d.stringZC(d.DecodeStringAsBytes())
+ case simpleVdByteArray, simpleVdByteArray + 1,
+ simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
+ d.d.fauxUnionReadRawBytes(false)
+ case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
+ n.v = valueTypeExt
+ l := d.decLen()
+ n.u = uint64(d.d.decRd.readn1())
+ if d.d.bytes {
+ n.l = d.d.decRd.rb.readx(uint(l))
+ } else {
+ n.l = decByteSlice(d.d.r(), l, d.d.h.MaxInitLen, d.d.b[:])
+ }
+ case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2,
+ simpleVdArray + 3, simpleVdArray + 4:
+ n.v = valueTypeArray
+ decodeFurther = true
+ case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
+ n.v = valueTypeMap
+ decodeFurther = true
+ default:
+ d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd)
+ }
+
+ if !decodeFurther {
+ d.bdRead = false
+ }
+}
+
+func (d *simpleDecDriver) nextValueBytes(v0 []byte) (v []byte) {
+ if !d.bdRead {
+ d.readNextBd()
+ }
+ v = v0
+ var h = decNextValueBytesHelper{d: &d.d}
+ var cursor = d.d.rb.c - 1
+ h.append1(&v, d.bd)
+ v = d.nextValueBytesBdReadR(v)
+ d.bdRead = false
+ h.bytesRdV(&v, cursor)
+ return
+}
+
+func (d *simpleDecDriver) nextValueBytesR(v0 []byte) (v []byte) {
+ d.readNextBd()
+ v = v0
+ var h = decNextValueBytesHelper{d: &d.d}
+ h.append1(&v, d.bd)
+ return d.nextValueBytesBdReadR(v)
+}
+
+func (d *simpleDecDriver) nextValueBytesBdReadR(v0 []byte) (v []byte) {
+ v = v0
+ var h = decNextValueBytesHelper{d: &d.d}
+
+ c := d.bd
+
+ var length uint
+
+ switch c {
+ case simpleVdNil, simpleVdFalse, simpleVdTrue, simpleVdString, simpleVdByteArray:
+ // pass
+ case simpleVdPosInt, simpleVdNegInt:
+ h.append1(&v, d.d.decRd.readn1())
+ case simpleVdPosInt + 1, simpleVdNegInt + 1:
+ h.appendN(&v, d.d.decRd.readx(2)...)
+ case simpleVdPosInt + 2, simpleVdNegInt + 2, simpleVdFloat32:
+ h.appendN(&v, d.d.decRd.readx(4)...)
+ case simpleVdPosInt + 3, simpleVdNegInt + 3, simpleVdFloat64:
+ h.appendN(&v, d.d.decRd.readx(8)...)
+ case simpleVdTime:
+ c = d.d.decRd.readn1()
+ h.append1(&v, c)
+ h.appendN(&v, d.d.decRd.readx(uint(c))...)
+
+ default:
+ switch c & 7 { // c % 8 {
+ case 0:
+ length = 0
+ case 1:
+ b := d.d.decRd.readn1()
+ length = uint(b)
+ h.append1(&v, b)
+ case 2:
+ x := d.d.decRd.readn2()
+ length = uint(bigen.Uint16(x))
+ h.appendN(&v, x[:]...)
+ case 3:
+ x := d.d.decRd.readn4()
+ length = uint(bigen.Uint32(x))
+ h.appendN(&v, x[:]...)
+ case 4:
+ x := d.d.decRd.readn8()
+ length = uint(bigen.Uint64(x))
+ h.appendN(&v, x[:]...)
+ }
+
+ bExt := c >= simpleVdExt && c <= simpleVdExt+7
+ bStr := c >= simpleVdString && c <= simpleVdString+7
+ bByteArray := c >= simpleVdByteArray && c <= simpleVdByteArray+7
+ bArray := c >= simpleVdArray && c <= simpleVdArray+7
+ bMap := c >= simpleVdMap && c <= simpleVdMap+7
+
+ if !(bExt || bStr || bByteArray || bArray || bMap) {
+ d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, c)
+ }
+
+ if bExt {
+ h.append1(&v, d.d.decRd.readn1()) // tag
+ }
+
+ if length == 0 {
+ break
+ }
+
+ if bArray {
+ for i := uint(0); i < length; i++ {
+ v = d.nextValueBytesR(v)
+ }
+ } else if bMap {
+ for i := uint(0); i < length; i++ {
+ v = d.nextValueBytesR(v)
+ v = d.nextValueBytesR(v)
+ }
+ } else {
+ h.appendN(&v, d.d.decRd.readx(length)...)
+ }
+ }
+ return
+}
+
+//------------------------------------
+
+// SimpleHandle is a Handle for a very simple encoding format.
+//
+// simple is a simplistic codec similar to binc, but not as compact.
+// - Encoding of a value is always preceded by the descriptor byte (bd)
+// - True, false, nil are encoded fully in 1 byte (the descriptor)
+// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
+// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
+// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
+// - Length of containers (strings, bytes, array, map, extensions)
+// are encoded in 0, 1, 2, 4 or 8 bytes.
+// Zero-length containers have no length encoded.
+// For others, the number of bytes is given by pow(2, bd%3)
+// - maps are encoded as [bd] [length] [[key][value]]...
+// - arrays are encoded as [bd] [length] [value]...
+// - extensions are encoded as [bd] [length] [tag] [byte]...
+// - strings/bytearrays are encoded as [bd] [length] [byte]...
+// - time.Time are encoded as [bd] [length] [byte]...
+//
+// The full spec will be published soon.
+type SimpleHandle struct {
+ binaryEncodingType
+ BasicHandle
+ // EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil
+ EncZeroValuesAsNil bool
+}
+
+// Name returns the name of the handle: simple
+func (h *SimpleHandle) Name() string { return "simple" }
+
+func (h *SimpleHandle) desc(bd byte) string { return simpledesc(bd) }
+
+func (h *SimpleHandle) newEncDriver() encDriver {
+ var e = &simpleEncDriver{h: h}
+ e.e.e = e
+ e.e.init(h)
+ e.reset()
+ return e
+}
+
+func (h *SimpleHandle) newDecDriver() decDriver {
+ d := &simpleDecDriver{h: h}
+ d.d.d = d
+ d.d.init(h)
+ d.reset()
+ return d
+}
+
+var _ decDriver = (*simpleDecDriver)(nil)
+var _ encDriver = (*simpleEncDriver)(nil)
diff --git a/vendor/github.com/ugorji/go/codec/sort-slice.generated.go b/vendor/github.com/ugorji/go/codec/sort-slice.generated.go
new file mode 100644
index 000000000..0c43bdaae
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/sort-slice.generated.go
@@ -0,0 +1,158 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from sort-slice.go.tmpl - DO NOT EDIT.
+
+package codec
+
+import "time"
+import "reflect"
+import "bytes"
+
+type stringSlice []string
+
+func (p stringSlice) Len() int { return len(p) }
+func (p stringSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p stringSlice) Less(i, j int) bool {
+ return p[uint(i)] < p[uint(j)]
+}
+
+type uint8Slice []uint8
+
+func (p uint8Slice) Len() int { return len(p) }
+func (p uint8Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p uint8Slice) Less(i, j int) bool {
+ return p[uint(i)] < p[uint(j)]
+}
+
+type uint64Slice []uint64
+
+func (p uint64Slice) Len() int { return len(p) }
+func (p uint64Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p uint64Slice) Less(i, j int) bool {
+ return p[uint(i)] < p[uint(j)]
+}
+
+type intSlice []int
+
+func (p intSlice) Len() int { return len(p) }
+func (p intSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p intSlice) Less(i, j int) bool {
+ return p[uint(i)] < p[uint(j)]
+}
+
+type int32Slice []int32
+
+func (p int32Slice) Len() int { return len(p) }
+func (p int32Slice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p int32Slice) Less(i, j int) bool {
+ return p[uint(i)] < p[uint(j)]
+}
+
+type stringRv struct {
+ v string
+ r reflect.Value
+}
+type stringRvSlice []stringRv
+
+func (p stringRvSlice) Len() int { return len(p) }
+func (p stringRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p stringRvSlice) Less(i, j int) bool {
+ return p[uint(i)].v < p[uint(j)].v
+}
+
+type stringIntf struct {
+ v string
+ i interface{}
+}
+type stringIntfSlice []stringIntf
+
+func (p stringIntfSlice) Len() int { return len(p) }
+func (p stringIntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p stringIntfSlice) Less(i, j int) bool {
+ return p[uint(i)].v < p[uint(j)].v
+}
+
+type float64Rv struct {
+ v float64
+ r reflect.Value
+}
+type float64RvSlice []float64Rv
+
+func (p float64RvSlice) Len() int { return len(p) }
+func (p float64RvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p float64RvSlice) Less(i, j int) bool {
+ return p[uint(i)].v < p[uint(j)].v || isNaN64(p[uint(i)].v) && !isNaN64(p[uint(j)].v)
+}
+
+type uint64Rv struct {
+ v uint64
+ r reflect.Value
+}
+type uint64RvSlice []uint64Rv
+
+func (p uint64RvSlice) Len() int { return len(p) }
+func (p uint64RvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p uint64RvSlice) Less(i, j int) bool {
+ return p[uint(i)].v < p[uint(j)].v
+}
+
+type int64Rv struct {
+ v int64
+ r reflect.Value
+}
+type int64RvSlice []int64Rv
+
+func (p int64RvSlice) Len() int { return len(p) }
+func (p int64RvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p int64RvSlice) Less(i, j int) bool {
+ return p[uint(i)].v < p[uint(j)].v
+}
+
+type boolRv struct {
+ v bool
+ r reflect.Value
+}
+type boolRvSlice []boolRv
+
+func (p boolRvSlice) Len() int { return len(p) }
+func (p boolRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p boolRvSlice) Less(i, j int) bool {
+ return !p[uint(i)].v && p[uint(j)].v
+}
+
+type timeRv struct {
+ v time.Time
+ r reflect.Value
+}
+type timeRvSlice []timeRv
+
+func (p timeRvSlice) Len() int { return len(p) }
+func (p timeRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p timeRvSlice) Less(i, j int) bool {
+ return p[uint(i)].v.Before(p[uint(j)].v)
+}
+
+type bytesRv struct {
+ v []byte
+ r reflect.Value
+}
+type bytesRvSlice []bytesRv
+
+func (p bytesRvSlice) Len() int { return len(p) }
+func (p bytesRvSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p bytesRvSlice) Less(i, j int) bool {
+ return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1
+}
+
+type bytesIntf struct {
+ v []byte
+ i interface{}
+}
+type bytesIntfSlice []bytesIntf
+
+func (p bytesIntfSlice) Len() int { return len(p) }
+func (p bytesIntfSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p bytesIntfSlice) Less(i, j int) bool {
+ return bytes.Compare(p[uint(i)].v, p[uint(j)].v) == -1
+}
diff --git a/vendor/github.com/ugorji/go/codec/sort-slice.go.tmpl b/vendor/github.com/ugorji/go/codec/sort-slice.go.tmpl
new file mode 100644
index 000000000..81bf4b488
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/sort-slice.go.tmpl
@@ -0,0 +1,66 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+// Code generated from sort-slice.go.tmpl - DO NOT EDIT.
+
+{{/*
+xxxSlice
+xxxIntf
+xxxIntfSlice
+xxxRv
+xxxRvSlice
+
+I'm now going to create them for
+- sortables
+- sortablesplus
+
+With the parameters passed in sortables or sortablesplus,
+'time, 'bytes' are special, and correspond to time.Time and []byte respectively.
+*/}}
+
+package codec
+
+import "time"
+import "reflect"
+import "bytes"
+
+{{/* func init() { _ = time.Unix } */}}
+
+{{define "T"}}
+func (p {{ .Type }}) Len() int { return len(p) }
+func (p {{ .Type }}) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
+func (p {{ .Type }}) Less(i, j int) bool {
+ {{ if eq .Kind "bool" }} return !p[uint(i)]{{.V}} && p[uint(j)]{{.V}}
+ {{ else if eq .Kind "float32" }} return p[uint(i)]{{.V}} < p[uint(j)]{{.V}} || isNaN32(p[uint(i)]{{.V}}) && !isNaN32(p[uint(j)]{{.V}})
+ {{ else if eq .Kind "float64" }} return p[uint(i)]{{.V}} < p[uint(j)]{{.V}} || isNaN64(p[uint(i)]{{.V}}) && !isNaN64(p[uint(j)]{{.V}})
+ {{ else if eq .Kind "time" }} return p[uint(i)]{{.V}}.Before(p[uint(j)]{{.V}})
+ {{ else if eq .Kind "bytes" }} return bytes.Compare(p[uint(i)]{{.V}}, p[uint(j)]{{.V}}) == -1
+ {{ else }} return p[uint(i)]{{.V}} < p[uint(j)]{{.V}}
+ {{ end -}}
+}
+{{end}}
+
+{{range $i, $v := sortables }}{{ $t := tshort $v }}
+type {{ $v }}Slice []{{ $t }}
+{{template "T" args "Kind" $v "Type" (print $v "Slice") "V" ""}}
+{{end}}
+
+{{range $i, $v := sortablesplus }}{{ $t := tshort $v }}
+
+type {{ $v }}Rv struct {
+ v {{ $t }}
+ r reflect.Value
+}
+type {{ $v }}RvSlice []{{ $v }}Rv
+{{template "T" args "Kind" $v "Type" (print $v "RvSlice") "V" ".v"}}
+
+{{if eq $v "bytes" "string" -}}
+type {{ $v }}Intf struct {
+ v {{ $t }}
+ i interface{}
+}
+type {{ $v }}IntfSlice []{{ $v }}Intf
+{{template "T" args "Kind" $v "Type" (print $v "IntfSlice") "V" ".v"}}
+{{end}}
+
+{{end}}
diff --git a/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json b/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json
new file mode 100644
index 000000000..902858671
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json
@@ -0,0 +1,639 @@
+[
+ {
+ "cbor": "AA==",
+ "hex": "00",
+ "roundtrip": true,
+ "decoded": 0
+ },
+ {
+ "cbor": "AQ==",
+ "hex": "01",
+ "roundtrip": true,
+ "decoded": 1
+ },
+ {
+ "cbor": "Cg==",
+ "hex": "0a",
+ "roundtrip": true,
+ "decoded": 10
+ },
+ {
+ "cbor": "Fw==",
+ "hex": "17",
+ "roundtrip": true,
+ "decoded": 23
+ },
+ {
+ "cbor": "GBg=",
+ "hex": "1818",
+ "roundtrip": true,
+ "decoded": 24
+ },
+ {
+ "cbor": "GBk=",
+ "hex": "1819",
+ "roundtrip": true,
+ "decoded": 25
+ },
+ {
+ "cbor": "GGQ=",
+ "hex": "1864",
+ "roundtrip": true,
+ "decoded": 100
+ },
+ {
+ "cbor": "GQPo",
+ "hex": "1903e8",
+ "roundtrip": true,
+ "decoded": 1000
+ },
+ {
+ "cbor": "GgAPQkA=",
+ "hex": "1a000f4240",
+ "roundtrip": true,
+ "decoded": 1000000
+ },
+ {
+ "cbor": "GwAAAOjUpRAA",
+ "hex": "1b000000e8d4a51000",
+ "roundtrip": true,
+ "decoded": 1000000000000
+ },
+ {
+ "cbor": "G///////////",
+ "hex": "1bffffffffffffffff",
+ "roundtrip": true,
+ "decoded": 18446744073709551615
+ },
+ {
+ "cbor": "wkkBAAAAAAAAAAA=",
+ "hex": "c249010000000000000000",
+ "roundtrip": true,
+ "decoded": 18446744073709551616
+ },
+ {
+ "cbor": "O///////////",
+ "hex": "3bffffffffffffffff",
+ "roundtrip": true,
+ "decoded": -18446744073709551616,
+ "skip": true
+ },
+ {
+ "cbor": "w0kBAAAAAAAAAAA=",
+ "hex": "c349010000000000000000",
+ "roundtrip": true,
+ "decoded": -18446744073709551617
+ },
+ {
+ "cbor": "IA==",
+ "hex": "20",
+ "roundtrip": true,
+ "decoded": -1
+ },
+ {
+ "cbor": "KQ==",
+ "hex": "29",
+ "roundtrip": true,
+ "decoded": -10
+ },
+ {
+ "cbor": "OGM=",
+ "hex": "3863",
+ "roundtrip": true,
+ "decoded": -100
+ },
+ {
+ "cbor": "OQPn",
+ "hex": "3903e7",
+ "roundtrip": true,
+ "decoded": -1000
+ },
+ {
+ "cbor": "+QAA",
+ "hex": "f90000",
+ "roundtrip": true,
+ "decoded": 0.0
+ },
+ {
+ "cbor": "+YAA",
+ "hex": "f98000",
+ "roundtrip": true,
+ "decoded": -0.0
+ },
+ {
+ "cbor": "+TwA",
+ "hex": "f93c00",
+ "roundtrip": true,
+ "decoded": 1.0
+ },
+ {
+ "cbor": "+z/xmZmZmZma",
+ "hex": "fb3ff199999999999a",
+ "roundtrip": true,
+ "decoded": 1.1
+ },
+ {
+ "cbor": "+T4A",
+ "hex": "f93e00",
+ "roundtrip": true,
+ "decoded": 1.5
+ },
+ {
+ "cbor": "+Xv/",
+ "hex": "f97bff",
+ "roundtrip": true,
+ "decoded": 65504.0
+ },
+ {
+ "cbor": "+kfDUAA=",
+ "hex": "fa47c35000",
+ "roundtrip": true,
+ "decoded": 100000.0
+ },
+ {
+ "cbor": "+n9///8=",
+ "hex": "fa7f7fffff",
+ "roundtrip": true,
+ "decoded": 3.4028234663852886e+38
+ },
+ {
+ "cbor": "+3435DyIAHWc",
+ "hex": "fb7e37e43c8800759c",
+ "roundtrip": true,
+ "decoded": 1.0e+300
+ },
+ {
+ "cbor": "+QAB",
+ "hex": "f90001",
+ "roundtrip": true,
+ "decoded": 5.960464477539063e-08
+ },
+ {
+ "cbor": "+QQA",
+ "hex": "f90400",
+ "roundtrip": true,
+ "decoded": 6.103515625e-05
+ },
+ {
+ "cbor": "+cQA",
+ "hex": "f9c400",
+ "roundtrip": true,
+ "decoded": -4.0
+ },
+ {
+ "cbor": "+8AQZmZmZmZm",
+ "hex": "fbc010666666666666",
+ "roundtrip": true,
+ "decoded": -4.1
+ },
+ {
+ "cbor": "+XwA",
+ "hex": "f97c00",
+ "roundtrip": true,
+ "diagnostic": "Infinity"
+ },
+ {
+ "cbor": "+X4A",
+ "hex": "f97e00",
+ "roundtrip": true,
+ "diagnostic": "NaN"
+ },
+ {
+ "cbor": "+fwA",
+ "hex": "f9fc00",
+ "roundtrip": true,
+ "diagnostic": "-Infinity"
+ },
+ {
+ "cbor": "+n+AAAA=",
+ "hex": "fa7f800000",
+ "roundtrip": false,
+ "diagnostic": "Infinity"
+ },
+ {
+ "cbor": "+n/AAAA=",
+ "hex": "fa7fc00000",
+ "roundtrip": false,
+ "diagnostic": "NaN"
+ },
+ {
+ "cbor": "+v+AAAA=",
+ "hex": "faff800000",
+ "roundtrip": false,
+ "diagnostic": "-Infinity"
+ },
+ {
+ "cbor": "+3/wAAAAAAAA",
+ "hex": "fb7ff0000000000000",
+ "roundtrip": false,
+ "diagnostic": "Infinity"
+ },
+ {
+ "cbor": "+3/4AAAAAAAA",
+ "hex": "fb7ff8000000000000",
+ "roundtrip": false,
+ "diagnostic": "NaN"
+ },
+ {
+ "cbor": "+//wAAAAAAAA",
+ "hex": "fbfff0000000000000",
+ "roundtrip": false,
+ "diagnostic": "-Infinity"
+ },
+ {
+ "cbor": "9A==",
+ "hex": "f4",
+ "roundtrip": true,
+ "decoded": false
+ },
+ {
+ "cbor": "9Q==",
+ "hex": "f5",
+ "roundtrip": true,
+ "decoded": true
+ },
+ {
+ "cbor": "9g==",
+ "hex": "f6",
+ "roundtrip": true,
+ "decoded": null
+ },
+ {
+ "cbor": "9w==",
+ "hex": "f7",
+ "roundtrip": true,
+ "diagnostic": "undefined"
+ },
+ {
+ "cbor": "8A==",
+ "hex": "f0",
+ "roundtrip": true,
+ "diagnostic": "simple(16)"
+ },
+ {
+ "cbor": "+Bg=",
+ "hex": "f818",
+ "roundtrip": true,
+ "diagnostic": "simple(24)"
+ },
+ {
+ "cbor": "+P8=",
+ "hex": "f8ff",
+ "roundtrip": true,
+ "diagnostic": "simple(255)"
+ },
+ {
+ "cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==",
+ "hex": "c074323031332d30332d32315432303a30343a30305a",
+ "roundtrip": true,
+ "diagnostic": "0(\"2013-03-21T20:04:00Z\")"
+ },
+ {
+ "cbor": "wRpRS2ew",
+ "hex": "c11a514b67b0",
+ "roundtrip": true,
+ "diagnostic": "1(1363896240)"
+ },
+ {
+ "cbor": "wftB1FLZ7CAAAA==",
+ "hex": "c1fb41d452d9ec200000",
+ "roundtrip": true,
+ "diagnostic": "1(1363896240.5)"
+ },
+ {
+ "cbor": "10QBAgME",
+ "hex": "d74401020304",
+ "roundtrip": true,
+ "diagnostic": "23(h'01020304')"
+ },
+ {
+ "cbor": "2BhFZElFVEY=",
+ "hex": "d818456449455446",
+ "roundtrip": true,
+ "diagnostic": "24(h'6449455446')"
+ },
+ {
+ "cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==",
+ "hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d",
+ "roundtrip": true,
+ "diagnostic": "32(\"http://www.example.com\")"
+ },
+ {
+ "cbor": "QA==",
+ "hex": "40",
+ "roundtrip": true,
+ "diagnostic": "h''"
+ },
+ {
+ "cbor": "RAECAwQ=",
+ "hex": "4401020304",
+ "roundtrip": true,
+ "diagnostic": "h'01020304'"
+ },
+ {
+ "cbor": "YA==",
+ "hex": "60",
+ "roundtrip": true,
+ "decoded": ""
+ },
+ {
+ "cbor": "YWE=",
+ "hex": "6161",
+ "roundtrip": true,
+ "decoded": "a"
+ },
+ {
+ "cbor": "ZElFVEY=",
+ "hex": "6449455446",
+ "roundtrip": true,
+ "decoded": "IETF"
+ },
+ {
+ "cbor": "YiJc",
+ "hex": "62225c",
+ "roundtrip": true,
+ "decoded": "\"\\"
+ },
+ {
+ "cbor": "YsO8",
+ "hex": "62c3bc",
+ "roundtrip": true,
+ "decoded": "ü"
+ },
+ {
+ "cbor": "Y+awtA==",
+ "hex": "63e6b0b4",
+ "roundtrip": true,
+ "decoded": "水"
+ },
+ {
+ "cbor": "ZPCQhZE=",
+ "hex": "64f0908591",
+ "roundtrip": true,
+ "decoded": "𐅑"
+ },
+ {
+ "cbor": "gA==",
+ "hex": "80",
+ "roundtrip": true,
+ "decoded": [
+
+ ]
+ },
+ {
+ "cbor": "gwECAw==",
+ "hex": "83010203",
+ "roundtrip": true,
+ "decoded": [
+ 1,
+ 2,
+ 3
+ ]
+ },
+ {
+ "cbor": "gwGCAgOCBAU=",
+ "hex": "8301820203820405",
+ "roundtrip": true,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=",
+ "hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819",
+ "roundtrip": true,
+ "decoded": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25
+ ]
+ },
+ {
+ "cbor": "oA==",
+ "hex": "a0",
+ "roundtrip": true,
+ "decoded": {
+ }
+ },
+ {
+ "cbor": "ogECAwQ=",
+ "hex": "a201020304",
+ "roundtrip": true,
+ "skip": true,
+ "diagnostic": "{1: 2, 3: 4}"
+ },
+ {
+ "cbor": "omFhAWFiggID",
+ "hex": "a26161016162820203",
+ "roundtrip": true,
+ "decoded": {
+ "a": 1,
+ "b": [
+ 2,
+ 3
+ ]
+ }
+ },
+ {
+ "cbor": "gmFhoWFiYWM=",
+ "hex": "826161a161626163",
+ "roundtrip": true,
+ "decoded": [
+ "a",
+ {
+ "b": "c"
+ }
+ ]
+ },
+ {
+ "cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF",
+ "hex": "a56161614161626142616361436164614461656145",
+ "roundtrip": true,
+ "decoded": {
+ "a": "A",
+ "b": "B",
+ "c": "C",
+ "d": "D",
+ "e": "E"
+ }
+ },
+ {
+ "cbor": "X0IBAkMDBAX/",
+ "hex": "5f42010243030405ff",
+ "roundtrip": false,
+ "skip": true,
+ "diagnostic": "(_ h'0102', h'030405')"
+ },
+ {
+ "cbor": "f2VzdHJlYWRtaW5n/w==",
+ "hex": "7f657374726561646d696e67ff",
+ "roundtrip": false,
+ "decoded": "streaming"
+ },
+ {
+ "cbor": "n/8=",
+ "hex": "9fff",
+ "roundtrip": false,
+ "decoded": [
+
+ ]
+ },
+ {
+ "cbor": "nwGCAgOfBAX//w==",
+ "hex": "9f018202039f0405ffff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "nwGCAgOCBAX/",
+ "hex": "9f01820203820405ff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "gwGCAgOfBAX/",
+ "hex": "83018202039f0405ff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "gwGfAgP/ggQF",
+ "hex": "83019f0203ff820405",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ [
+ 2,
+ 3
+ ],
+ [
+ 4,
+ 5
+ ]
+ ]
+ },
+ {
+ "cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=",
+ "hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff",
+ "roundtrip": false,
+ "decoded": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25
+ ]
+ },
+ {
+ "cbor": "v2FhAWFinwID//8=",
+ "hex": "bf61610161629f0203ffff",
+ "roundtrip": false,
+ "decoded": {
+ "a": 1,
+ "b": [
+ 2,
+ 3
+ ]
+ }
+ },
+ {
+ "cbor": "gmFhv2FiYWP/",
+ "hex": "826161bf61626163ff",
+ "roundtrip": false,
+ "decoded": [
+ "a",
+ {
+ "b": "c"
+ }
+ ]
+ },
+ {
+ "cbor": "v2NGdW71Y0FtdCH/",
+ "hex": "bf6346756ef563416d7421ff",
+ "roundtrip": false,
+ "decoded": {
+ "Fun": true,
+ "Amt": -2
+ }
+ }
+]
diff --git a/vendor/github.com/ugorji/go/codec/test.py b/vendor/github.com/ugorji/go/codec/test.py
new file mode 100644
index 000000000..f00ff5946
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/test.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python
+
+# This will create golden files in a directory passed to it.
+# A Test calls this internally to create the golden files
+# So it can process them (so we don't have to checkin the files).
+
+# Ensure msgpack-python and cbor are installed first, using:
+# sudo apt install python-dev (may not be necessary)
+# sudo apt install python-pip # or python3-pip
+# pip install --user msgpack-python msgpack-rpc-python cbor
+
+# Ensure all "string" keys are utf strings (else encoded as bytes)
+
+from __future__ import print_function
+import cbor, msgpack, msgpackrpc, sys, os, threading
+
+mylocaladdr="127.0.0.1" # localhost.localdomain localhost 127.0.0.1
+
+def get_test_data_list():
+ # get list with all primitive types, and a combo type
+ l0 = [
+ -8,
+ -1616,
+ -32323232,
+ -6464646464646464,
+ 192,
+ 1616,
+ 32323232,
+ 6464646464646464,
+ 192,
+ -3232.0,
+ -6464646464.0,
+ 3232.0,
+ 6464.0,
+ 6464646464.0,
+ 160.0,
+ 1616.0,
+ False,
+ True,
+ u"null",
+ None,
+ u"some&day>some<day",
+ 1328176922000002000,
+ u"",
+ -2206187877999998000,
+ u"bytestring",
+ 270,
+ u"none",
+ -2013855847999995777,
+ #-6795364578871345152,
+ ]
+ l1 = [
+ { "true": True,
+ "false": False },
+ { "true": u"True",
+ "false": False,
+ "uint16(1616)": 1616 },
+ { "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
+ "int32":32323232, "bool": True,
+ "LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
+ "SHORT STRING": u"1234567890" },
+ { True: "true", 138: False, "false": 200 }
+ ]
+
+ l = []
+ l.extend(l0)
+ l.append(l0)
+ l.append(1)
+ l.extend(l1)
+ return l
+
+def build_test_data(destdir):
+ l = get_test_data_list()
+ for i in range(len(l)):
+ # packer = msgpack.Packer()
+ serialized = msgpack.dumps(l[i])
+ with open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb') as f:
+ f.write(serialized)
+ serialized = cbor.dumps(l[i])
+ with open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb') as f:
+ f.write(serialized)
+
+def doRpcServer(port, stopTimeSec):
+ class EchoHandler(object):
+ def Echo123(self, msg1, msg2, msg3):
+ return ("1:%s 2:%s 3:%s" % (msg1.decode("utf-8"), msg2.decode("utf-8"), msg3.decode("utf-8")))
+ def EchoStruct(self, msg):
+ return ("%s" % msg)
+
+ addr = msgpackrpc.Address(mylocaladdr, port)
+ server = msgpackrpc.Server(EchoHandler())
+ server.listen(addr)
+ # run thread to stop it after stopTimeSec seconds if > 0
+ if stopTimeSec > 0:
+ def myStopRpcServer():
+ server.stop()
+ t = threading.Timer(stopTimeSec, myStopRpcServer)
+ t.start()
+ server.start()
+
+def doRpcClientToPythonSvc(port):
+ address = msgpackrpc.Address(mylocaladdr, port)
+ client = msgpackrpc.Client(address, unpack_encoding='utf-8')
+ print(client.call("Echo123", "A1", "B2", "C3"))
+ print(client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}))
+
+# def doCheckSocket(port):
+# print(">>>> port: ", port, " <<<<<")
+# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+# result = sock.connect_ex(('127.0.0.1', port))
+# if result == 0:
+# print("\t>>>> Port is open")
+# else:
+# print("\t>>>> Port is not open")
+# sock.close()
+
+def doRpcClientToGoSvc(port):
+ # doCheckSocket(port)
+ address = msgpackrpc.Address(mylocaladdr, port)
+ client = msgpackrpc.Client(address, unpack_encoding='utf-8')
+ print(client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]))
+ print(client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}))
+
+def doMain(args):
+ if len(args) == 2 and args[0] == "testdata":
+ build_test_data(args[1])
+ elif len(args) == 3 and args[0] == "rpc-server":
+ doRpcServer(int(args[1]), int(args[2]))
+ elif len(args) == 2 and args[0] == "rpc-client-python-service":
+ doRpcClientToPythonSvc(int(args[1]))
+ elif len(args) == 2 and args[0] == "rpc-client-go-service":
+ doRpcClientToGoSvc(int(args[1]))
+ else:
+ print("Usage: test.py " +
+ "[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
+
+if __name__ == "__main__":
+ doMain(sys.argv[1:])
diff --git a/vendor/github.com/ugorji/go/codec/writer.go b/vendor/github.com/ugorji/go/codec/writer.go
new file mode 100644
index 000000000..4d2c9fe10
--- /dev/null
+++ b/vendor/github.com/ugorji/go/codec/writer.go
@@ -0,0 +1,289 @@
+// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
+// Use of this source code is governed by a MIT license found in the LICENSE file.
+
+package codec
+
+import "io"
+
+// encWriter abstracts writing to a byte array or to an io.Writer.
+type encWriter interface {
+ writeb([]byte)
+ writestr(string)
+ writeqstr(string) // write string wrapped in quotes ie "..."
+ writen1(byte)
+
+ // add convenience functions for writing 2,4
+ writen2(byte, byte)
+ writen4([4]byte)
+ writen8([8]byte)
+
+ end()
+}
+
+// ---------------------------------------------
+
+type bufioEncWriter struct {
+ w io.Writer
+
+ buf []byte
+
+ n int
+
+ b [16]byte // scratch buffer and padding (cache-aligned)
+}
+
+func (z *bufioEncWriter) reset(w io.Writer, bufsize int, blist *bytesFreelist) {
+ z.w = w
+ z.n = 0
+ if bufsize <= 0 {
+ bufsize = defEncByteBufSize
+ }
+ // bufsize must be >= 8, to accomodate writen methods (where n <= 8)
+ if bufsize <= 8 {
+ bufsize = 8
+ }
+ if cap(z.buf) < bufsize {
+ if len(z.buf) > 0 && &z.buf[0] != &z.b[0] {
+ blist.put(z.buf)
+ }
+ if len(z.b) > bufsize {
+ z.buf = z.b[:]
+ } else {
+ z.buf = blist.get(bufsize)
+ }
+ }
+ z.buf = z.buf[:cap(z.buf)]
+}
+
+func (z *bufioEncWriter) flushErr() (err error) {
+ n, err := z.w.Write(z.buf[:z.n])
+ z.n -= n
+ if z.n > 0 {
+ if err == nil {
+ err = io.ErrShortWrite
+ }
+ if n > 0 {
+ copy(z.buf, z.buf[n:z.n+n])
+ }
+ }
+ return err
+}
+
+func (z *bufioEncWriter) flush() {
+ halt.onerror(z.flushErr())
+}
+
+func (z *bufioEncWriter) writeb(s []byte) {
+LOOP:
+ a := len(z.buf) - z.n
+ if len(s) > a {
+ z.n += copy(z.buf[z.n:], s[:a])
+ s = s[a:]
+ z.flush()
+ goto LOOP
+ }
+ z.n += copy(z.buf[z.n:], s)
+}
+
+func (z *bufioEncWriter) writestr(s string) {
+ // z.writeb(bytesView(s)) // inlined below
+LOOP:
+ a := len(z.buf) - z.n
+ if len(s) > a {
+ z.n += copy(z.buf[z.n:], s[:a])
+ s = s[a:]
+ z.flush()
+ goto LOOP
+ }
+ z.n += copy(z.buf[z.n:], s)
+}
+
+func (z *bufioEncWriter) writeqstr(s string) {
+ // z.writen1('"')
+ // z.writestr(s)
+ // z.writen1('"')
+
+ if z.n+len(s)+2 > len(z.buf) {
+ z.flush()
+ }
+ z.buf[z.n] = '"'
+ z.n++
+LOOP:
+ a := len(z.buf) - z.n
+ if len(s)+1 > a {
+ z.n += copy(z.buf[z.n:], s[:a])
+ s = s[a:]
+ z.flush()
+ goto LOOP
+ }
+ z.n += copy(z.buf[z.n:], s)
+ z.buf[z.n] = '"'
+ z.n++
+}
+
+func (z *bufioEncWriter) writen1(b1 byte) {
+ if 1 > len(z.buf)-z.n {
+ z.flush()
+ }
+ z.buf[z.n] = b1
+ z.n++
+}
+func (z *bufioEncWriter) writen2(b1, b2 byte) {
+ if 2 > len(z.buf)-z.n {
+ z.flush()
+ }
+ z.buf[z.n+1] = b2
+ z.buf[z.n] = b1
+ z.n += 2
+}
+func (z *bufioEncWriter) writen4(b [4]byte) {
+ if 4 > len(z.buf)-z.n {
+ z.flush()
+ }
+ copy(z.buf[z.n:], b[:])
+ z.n += 4
+}
+
+func (z *bufioEncWriter) writen8(b [8]byte) {
+ if 8 > len(z.buf)-z.n {
+ z.flush()
+ }
+ copy(z.buf[z.n:], b[:])
+ z.n += 8
+}
+
+func (z *bufioEncWriter) endErr() (err error) {
+ if z.n > 0 {
+ err = z.flushErr()
+ }
+ return
+}
+
+// ---------------------------------------------
+
+// bytesEncAppender implements encWriter and can write to an byte slice.
+type bytesEncAppender struct {
+ b []byte
+ out *[]byte
+}
+
+func (z *bytesEncAppender) writeb(s []byte) {
+ z.b = append(z.b, s...)
+}
+func (z *bytesEncAppender) writestr(s string) {
+ z.b = append(z.b, s...)
+}
+func (z *bytesEncAppender) writeqstr(s string) {
+ z.b = append(append(append(z.b, '"'), s...), '"')
+ // z.b = append(z.b, '"')
+ // z.b = append(z.b, s...)
+ // z.b = append(z.b, '"')
+}
+func (z *bytesEncAppender) writen1(b1 byte) {
+ z.b = append(z.b, b1)
+}
+func (z *bytesEncAppender) writen2(b1, b2 byte) {
+ z.b = append(z.b, b1, b2)
+}
+func (z *bytesEncAppender) writen4(b [4]byte) {
+ z.b = append(z.b, b[:]...)
+ // z.b = append(z.b, b[0], b[1], b[2], b[3]) // prevents inlining encWr.writen4
+}
+func (z *bytesEncAppender) writen8(b [8]byte) {
+ z.b = append(z.b, b[:]...)
+ // z.b = append(z.b, b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]) // prevents inlining encWr.writen4
+}
+func (z *bytesEncAppender) endErr() error {
+ *(z.out) = z.b
+ return nil
+}
+func (z *bytesEncAppender) reset(in []byte, out *[]byte) {
+ z.b = in[:0]
+ z.out = out
+}
+
+// --------------------------------------------------
+
+type encWr struct {
+ bytes bool // encoding to []byte
+ js bool // is json encoder?
+ be bool // is binary encoder?
+
+ c containerState
+
+ calls uint16
+ seq uint16 // sequencer (e.g. used by binc for symbols, etc)
+ wb bytesEncAppender
+ wf *bufioEncWriter
+}
+
+// MARKER: manually inline bytesEncAppender.writenx/writeqstr methods,
+// as calling them causes encWr.writenx/writeqstr methods to not be inlined (cost > 80).
+//
+// i.e. e.g. instead of writing z.wb.writen2(b1, b2), use z.wb.b = append(z.wb.b, b1, b2)
+
+func (z *encWr) writeb(s []byte) {
+ if z.bytes {
+ z.wb.writeb(s)
+ } else {
+ z.wf.writeb(s)
+ }
+}
+func (z *encWr) writeqstr(s string) {
+ if z.bytes {
+ // MARKER: z.wb.writeqstr(s)
+ z.wb.b = append(append(append(z.wb.b, '"'), s...), '"')
+ } else {
+ z.wf.writeqstr(s)
+ }
+}
+func (z *encWr) writestr(s string) {
+ if z.bytes {
+ z.wb.writestr(s)
+ } else {
+ z.wf.writestr(s)
+ }
+}
+func (z *encWr) writen1(b1 byte) {
+ if z.bytes {
+ z.wb.writen1(b1)
+ } else {
+ z.wf.writen1(b1)
+ }
+}
+
+func (z *encWr) writen2(b1, b2 byte) {
+ if z.bytes {
+ // MARKER: z.wb.writen2(b1, b2)
+ z.wb.b = append(z.wb.b, b1, b2)
+ } else {
+ z.wf.writen2(b1, b2)
+ }
+}
+func (z *encWr) writen4(b [4]byte) {
+ if z.bytes {
+ z.wb.writen4(b)
+ } else {
+ z.wf.writen4(b)
+ }
+}
+func (z *encWr) writen8(b [8]byte) {
+ if z.bytes {
+ z.wb.writen8(b)
+ } else {
+ z.wf.writen8(b)
+ }
+}
+
+func (z *encWr) endErr() error {
+ if z.bytes {
+ return z.wb.endErr()
+ }
+ return z.wf.endErr()
+}
+
+func (z *encWr) end() {
+ halt.onerror(z.endErr())
+}
+
+var _ encWriter = (*encWr)(nil)