summaryrefslogtreecommitdiff
path: root/vendor/github.com/bytedance/sonic/encoder
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/bytedance/sonic/encoder')
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/asm.s0
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go116.go1198
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go117.go1201
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/compiler.go885
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/debug_go116.go66
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/debug_go117.go205
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/encoder.go311
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/errors.go65
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/mapiter.go199
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/pools.go194
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/primitives.go168
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/sort.go206
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/stream.go84
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/stubs_go116.go65
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/stubs_go117.go66
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/stubs_go120.go66
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/types.go47
-rw-r--r--vendor/github.com/bytedance/sonic/encoder/utils.go52
18 files changed, 5078 insertions, 0 deletions
diff --git a/vendor/github.com/bytedance/sonic/encoder/asm.s b/vendor/github.com/bytedance/sonic/encoder/asm.s
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/asm.s
diff --git a/vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go116.go b/vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go116.go
new file mode 100644
index 000000000..9b5978431
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go116.go
@@ -0,0 +1,1198 @@
+// +build go1.15,!go1.17
+
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `fmt`
+ `reflect`
+ `strconv`
+ `unsafe`
+
+ `github.com/bytedance/sonic/internal/cpu`
+ `github.com/bytedance/sonic/internal/jit`
+ `github.com/bytedance/sonic/internal/native/types`
+ `github.com/twitchyliquid64/golang-asm/obj`
+ `github.com/twitchyliquid64/golang-asm/obj/x86`
+
+ `github.com/bytedance/sonic/internal/native`
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+/** Register Allocations
+ *
+ * State Registers:
+ *
+ * %rbx : stack base
+ * %rdi : result pointer
+ * %rsi : result length
+ * %rdx : result capacity
+ * %r12 : sp->p
+ * %r13 : sp->q
+ * %r14 : sp->x
+ * %r15 : sp->f
+ *
+ * Error Registers:
+ *
+ * %r10 : error type register
+ * %r11 : error pointer register
+ */
+
+/** Function Prototype & Stack Map
+ *
+ * func (buf *[]byte, p unsafe.Pointer, sb *_Stack, fv uint64) (err error)
+ *
+ * buf : (FP)
+ * p : 8(FP)
+ * sb : 16(FP)
+ * fv : 24(FP)
+ * err.vt : 32(FP)
+ * err.vp : 40(FP)
+ */
+
+const (
+ _S_cond = iota
+ _S_init
+)
+
+const (
+ _FP_args = 48 // 48 bytes for passing arguments to this function
+ _FP_fargs = 64 // 64 bytes for passing arguments to other Go functions
+ _FP_saves = 64 // 64 bytes for saving the registers before CALL instructions
+ _FP_locals = 24 // 24 bytes for local variables
+)
+
+const (
+ _FP_offs = _FP_fargs + _FP_saves + _FP_locals
+ _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer
+ _FP_base = _FP_size + 8 // 8 bytes for the return address
+)
+
+const (
+ _FM_exp32 = 0x7f800000
+ _FM_exp64 = 0x7ff0000000000000
+)
+
+const (
+ _IM_null = 0x6c6c756e // 'null'
+ _IM_true = 0x65757274 // 'true'
+ _IM_fals = 0x736c6166 // 'fals' ('false' without the 'e')
+ _IM_open = 0x00225c22 // '"\"∅'
+ _IM_array = 0x5d5b // '[]'
+ _IM_object = 0x7d7b // '{}'
+ _IM_mulv = -0x5555555555555555
+)
+
+const (
+ _LB_more_space = "_more_space"
+ _LB_more_space_return = "_more_space_return_"
+)
+
+const (
+ _LB_error = "_error"
+ _LB_error_too_deep = "_error_too_deep"
+ _LB_error_invalid_number = "_error_invalid_number"
+ _LB_error_nan_or_infinite = "_error_nan_or_infinite"
+ _LB_panic = "_panic"
+)
+
+var (
+ _AX = jit.Reg("AX")
+ _CX = jit.Reg("CX")
+ _DX = jit.Reg("DX")
+ _DI = jit.Reg("DI")
+ _SI = jit.Reg("SI")
+ _BP = jit.Reg("BP")
+ _SP = jit.Reg("SP")
+ _R8 = jit.Reg("R8")
+)
+
+var (
+ _X0 = jit.Reg("X0")
+ _Y0 = jit.Reg("Y0")
+)
+
+var (
+ _ST = jit.Reg("BX")
+ _RP = jit.Reg("DI")
+ _RL = jit.Reg("SI")
+ _RC = jit.Reg("DX")
+)
+
+var (
+ _LR = jit.Reg("R9")
+ _R10 = jit.Reg("R10") // used for gcWriterBarrier
+ _ET = jit.Reg("R10")
+ _EP = jit.Reg("R11")
+)
+
+var (
+ _SP_p = jit.Reg("R12")
+ _SP_q = jit.Reg("R13")
+ _SP_x = jit.Reg("R14")
+ _SP_f = jit.Reg("R15")
+)
+
+var (
+ _ARG_rb = jit.Ptr(_SP, _FP_base)
+ _ARG_vp = jit.Ptr(_SP, _FP_base + 8)
+ _ARG_sb = jit.Ptr(_SP, _FP_base + 16)
+ _ARG_fv = jit.Ptr(_SP, _FP_base + 24)
+)
+
+var (
+ _RET_et = jit.Ptr(_SP, _FP_base + 32)
+ _RET_ep = jit.Ptr(_SP, _FP_base + 40)
+)
+
+var (
+ _VAR_sp = jit.Ptr(_SP, _FP_fargs + _FP_saves)
+ _VAR_dn = jit.Ptr(_SP, _FP_fargs + _FP_saves + 8)
+ _VAR_vp = jit.Ptr(_SP, _FP_fargs + _FP_saves + 16)
+)
+
+var (
+ _REG_ffi = []obj.Addr{_RP, _RL, _RC}
+ _REG_enc = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RL}
+ _REG_jsr = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _LR}
+ _REG_all = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RP, _RL, _RC}
+)
+
+type _Assembler struct {
+ jit.BaseAssembler
+ p _Program
+ x int
+ name string
+}
+
+func newAssembler(p _Program) *_Assembler {
+ return new(_Assembler).Init(p)
+}
+
+/** Assembler Interface **/
+func (self *_Assembler) Load() _Encoder {
+ return ptoenc(self.BaseAssembler.Load("encode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs))
+}
+
+func (self *_Assembler) Init(p _Program) *_Assembler {
+ self.p = p
+ self.BaseAssembler.Init(self.compile)
+ return self
+}
+
+func (self *_Assembler) compile() {
+ self.prologue()
+ self.instrs()
+ self.epilogue()
+ self.builtins()
+}
+
+/** Assembler Stages **/
+
+var _OpFuncTab = [256]func(*_Assembler, *_Instr) {
+ _OP_null : (*_Assembler)._asm_OP_null,
+ _OP_empty_arr : (*_Assembler)._asm_OP_empty_arr,
+ _OP_empty_obj : (*_Assembler)._asm_OP_empty_obj,
+ _OP_bool : (*_Assembler)._asm_OP_bool,
+ _OP_i8 : (*_Assembler)._asm_OP_i8,
+ _OP_i16 : (*_Assembler)._asm_OP_i16,
+ _OP_i32 : (*_Assembler)._asm_OP_i32,
+ _OP_i64 : (*_Assembler)._asm_OP_i64,
+ _OP_u8 : (*_Assembler)._asm_OP_u8,
+ _OP_u16 : (*_Assembler)._asm_OP_u16,
+ _OP_u32 : (*_Assembler)._asm_OP_u32,
+ _OP_u64 : (*_Assembler)._asm_OP_u64,
+ _OP_f32 : (*_Assembler)._asm_OP_f32,
+ _OP_f64 : (*_Assembler)._asm_OP_f64,
+ _OP_str : (*_Assembler)._asm_OP_str,
+ _OP_bin : (*_Assembler)._asm_OP_bin,
+ _OP_quote : (*_Assembler)._asm_OP_quote,
+ _OP_number : (*_Assembler)._asm_OP_number,
+ _OP_eface : (*_Assembler)._asm_OP_eface,
+ _OP_iface : (*_Assembler)._asm_OP_iface,
+ _OP_byte : (*_Assembler)._asm_OP_byte,
+ _OP_text : (*_Assembler)._asm_OP_text,
+ _OP_deref : (*_Assembler)._asm_OP_deref,
+ _OP_index : (*_Assembler)._asm_OP_index,
+ _OP_load : (*_Assembler)._asm_OP_load,
+ _OP_save : (*_Assembler)._asm_OP_save,
+ _OP_drop : (*_Assembler)._asm_OP_drop,
+ _OP_drop_2 : (*_Assembler)._asm_OP_drop_2,
+ _OP_recurse : (*_Assembler)._asm_OP_recurse,
+ _OP_is_nil : (*_Assembler)._asm_OP_is_nil,
+ _OP_is_nil_p1 : (*_Assembler)._asm_OP_is_nil_p1,
+ _OP_is_zero_1 : (*_Assembler)._asm_OP_is_zero_1,
+ _OP_is_zero_2 : (*_Assembler)._asm_OP_is_zero_2,
+ _OP_is_zero_4 : (*_Assembler)._asm_OP_is_zero_4,
+ _OP_is_zero_8 : (*_Assembler)._asm_OP_is_zero_8,
+ _OP_is_zero_map : (*_Assembler)._asm_OP_is_zero_map,
+ _OP_goto : (*_Assembler)._asm_OP_goto,
+ _OP_map_iter : (*_Assembler)._asm_OP_map_iter,
+ _OP_map_stop : (*_Assembler)._asm_OP_map_stop,
+ _OP_map_check_key : (*_Assembler)._asm_OP_map_check_key,
+ _OP_map_write_key : (*_Assembler)._asm_OP_map_write_key,
+ _OP_map_value_next : (*_Assembler)._asm_OP_map_value_next,
+ _OP_slice_len : (*_Assembler)._asm_OP_slice_len,
+ _OP_slice_next : (*_Assembler)._asm_OP_slice_next,
+ _OP_marshal : (*_Assembler)._asm_OP_marshal,
+ _OP_marshal_p : (*_Assembler)._asm_OP_marshal_p,
+ _OP_marshal_text : (*_Assembler)._asm_OP_marshal_text,
+ _OP_marshal_text_p : (*_Assembler)._asm_OP_marshal_text_p,
+ _OP_cond_set : (*_Assembler)._asm_OP_cond_set,
+ _OP_cond_testc : (*_Assembler)._asm_OP_cond_testc,
+}
+
+func (self *_Assembler) instr(v *_Instr) {
+ if fn := _OpFuncTab[v.op()]; fn != nil {
+ fn(self, v)
+ } else {
+ panic(fmt.Sprintf("invalid opcode: %d", v.op()))
+ }
+}
+
+func (self *_Assembler) instrs() {
+ for i, v := range self.p {
+ self.Mark(i)
+ self.instr(&v)
+ self.debug_instr(i, &v)
+ }
+}
+
+func (self *_Assembler) builtins() {
+ self.more_space()
+ self.error_too_deep()
+ self.error_invalid_number()
+ self.error_nan_or_infinite()
+ self.go_panic()
+}
+
+func (self *_Assembler) epilogue() {
+ self.Mark(len(self.p))
+ self.Emit("XORL", _ET, _ET)
+ self.Emit("XORL", _EP, _EP)
+ self.Link(_LB_error)
+ self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX
+ self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX)
+ self.Emit("MOVQ", _ET, _RET_et) // MOVQ ET, et<>+24(FP)
+ self.Emit("MOVQ", _EP, _RET_ep) // MOVQ EP, ep<>+32(FP)
+ self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP
+ self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP
+ self.Emit("RET") // RET
+}
+
+func (self *_Assembler) prologue() {
+ self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP
+ self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP)
+ self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP
+ self.load_buffer() // LOAD {buf}
+ self.Emit("MOVQ", _ARG_vp, _SP_p) // MOVQ vp<>+8(FP), SP.p
+ self.Emit("MOVQ", _ARG_sb, _ST) // MOVQ sb<>+16(FP), ST
+ self.Emit("XORL", _SP_x, _SP_x) // XORL SP.x, SP.x
+ self.Emit("XORL", _SP_f, _SP_f) // XORL SP.f, SP.f
+ self.Emit("XORL", _SP_q, _SP_q) // XORL SP.q, SP.q
+}
+
+/** Assembler Inline Functions **/
+
+func (self *_Assembler) xsave(reg ...obj.Addr) {
+ for i, v := range reg {
+ if i > _FP_saves / 8 - 1 {
+ panic("too many registers to save")
+ } else {
+ self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + int64(i) * 8))
+ }
+ }
+}
+
+func (self *_Assembler) xload(reg ...obj.Addr) {
+ for i, v := range reg {
+ if i > _FP_saves / 8 - 1 {
+ panic("too many registers to load")
+ } else {
+ self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + int64(i) * 8), v)
+ }
+ }
+}
+
+func (self *_Assembler) rbuf_di() {
+ if _RP.Reg != x86.REG_DI {
+ panic("register allocation messed up: RP != DI")
+ } else {
+ self.Emit("ADDQ", _RL, _RP)
+ }
+}
+
+func (self *_Assembler) store_int(nd int, fn obj.Addr, ins string) {
+ self.check_size(nd)
+ self.save_c() // SAVE $C_regs
+ self.rbuf_di() // MOVQ RP, DI
+ self.Emit(ins, jit.Ptr(_SP_p, 0), _SI) // $ins (SP.p), SI
+ self.call_c(fn) // CALL_C $fn
+ self.Emit("ADDQ", _AX, _RL) // ADDQ AX, RL
+}
+
+func (self *_Assembler) store_str(s string) {
+ i := 0
+ m := rt.Str2Mem(s)
+
+ /* 8-byte stores */
+ for i <= len(m) - 8 {
+ self.Emit("MOVQ", jit.Imm(rt.Get64(m[i:])), _AX) // MOVQ $s[i:], AX
+ self.Emit("MOVQ", _AX, jit.Sib(_RP, _RL, 1, int64(i))) // MOVQ AX, i(RP)(RL)
+ i += 8
+ }
+
+ /* 4-byte stores */
+ if i <= len(m) - 4 {
+ self.Emit("MOVL", jit.Imm(int64(rt.Get32(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVL $s[i:], i(RP)(RL)
+ i += 4
+ }
+
+ /* 2-byte stores */
+ if i <= len(m) - 2 {
+ self.Emit("MOVW", jit.Imm(int64(rt.Get16(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVW $s[i:], i(RP)(RL)
+ i += 2
+ }
+
+ /* last byte */
+ if i < len(m) {
+ self.Emit("MOVB", jit.Imm(int64(m[i])), jit.Sib(_RP, _RL, 1, int64(i))) // MOVB $s[i:], i(RP)(RL)
+ }
+}
+
+func (self *_Assembler) check_size(n int) {
+ self.check_size_rl(jit.Ptr(_RL, int64(n)))
+}
+
+func (self *_Assembler) check_size_r(r obj.Addr, d int) {
+ self.check_size_rl(jit.Sib(_RL, r, 1, int64(d)))
+}
+
+func (self *_Assembler) check_size_rl(v obj.Addr) {
+ idx := self.x
+ key := _LB_more_space_return + strconv.Itoa(idx)
+
+ /* the following code relies on LR == R9 to work */
+ if _LR.Reg != x86.REG_R9 {
+ panic("register allocation messed up: LR != R9")
+ }
+
+ /* check for buffer capacity */
+ self.x++
+ self.Emit("LEAQ", v, _AX) // LEAQ $v, AX
+ self.Emit("CMPQ", _AX, _RC) // CMPQ AX, RC
+ self.Sjmp("JBE" , key) // JBE _more_space_return_{n}
+ self.slice_grow_ax(key) // GROW $key
+ self.Link(key) // _more_space_return_{n}:
+}
+
+func (self *_Assembler) slice_grow_ax(ret string) {
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ ?(PC), R9
+ self.Sref(ret, 4) // .... &ret
+ self.Sjmp("JMP" , _LB_more_space) // JMP _more_space
+}
+
+/** State Stack Helpers **/
+
+const (
+ _StateSize = int64(unsafe.Sizeof(_State{}))
+ _StackLimit = _MaxStack * _StateSize
+)
+
+func (self *_Assembler) save_state() {
+ self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX
+ self.Emit("LEAQ", jit.Ptr(_CX, _StateSize), _R8) // LEAQ _StateSize(CX), R8
+ self.Emit("CMPQ", _R8, jit.Imm(_StackLimit)) // CMPQ R8, $_StackLimit
+ self.Sjmp("JAE" , _LB_error_too_deep) // JA _error_too_deep
+ self.Emit("MOVQ", _SP_x, jit.Sib(_ST, _CX, 1, 8)) // MOVQ SP.x, 8(ST)(CX)
+ self.Emit("MOVQ", _SP_f, jit.Sib(_ST, _CX, 1, 16)) // MOVQ SP.f, 16(ST)(CX)
+ self.WriteRecNotAX(0, _SP_p, jit.Sib(_ST, _CX, 1, 24)) // MOVQ SP.p, 24(ST)(CX)
+ self.WriteRecNotAX(1, _SP_q, jit.Sib(_ST, _CX, 1, 32)) // MOVQ SP.q, 32(ST)(CX)
+ self.Emit("MOVQ", _R8, jit.Ptr(_ST, 0)) // MOVQ R8, (ST)
+}
+
+func (self *_Assembler) drop_state(decr int64) {
+ self.Emit("MOVQ" , jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX
+ self.Emit("SUBQ" , jit.Imm(decr), _AX) // SUBQ $decr, AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST)
+ self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 8), _SP_x) // MOVQ 8(ST)(AX), SP.x
+ self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 16), _SP_f) // MOVQ 16(ST)(AX), SP.f
+ self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 24), _SP_p) // MOVQ 24(ST)(AX), SP.p
+ self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 32), _SP_q) // MOVQ 32(ST)(AX), SP.q
+ self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0
+ self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX)
+ self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 24)) // MOVOU X0, 24(ST)(AX)
+}
+
+/** Buffer Helpers **/
+
+func (self *_Assembler) add_char(ch byte) {
+ self.Emit("MOVB", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVB $ch, (RP)(RL)
+ self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL
+}
+
+func (self *_Assembler) add_long(ch uint32, n int64) {
+ self.Emit("MOVL", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVL $ch, (RP)(RL)
+ self.Emit("ADDQ", jit.Imm(n), _RL) // ADDQ $n, RL
+}
+
+func (self *_Assembler) add_text(ss string) {
+ self.store_str(ss) // TEXT $ss
+ self.Emit("ADDQ", jit.Imm(int64(len(ss))), _RL) // ADDQ ${len(ss)}, RL
+}
+
+func (self *_Assembler) prep_buffer() {
+ self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX
+ self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX)
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+}
+
+func (self *_Assembler) prep_buffer_c() {
+ self.Emit("MOVQ", _ARG_rb, _DI) // MOVQ rb<>+0(FP), DI
+ self.Emit("MOVQ", _RL, jit.Ptr(_DI, 8)) // MOVQ RL, 8(DI)
+}
+
+func (self *_Assembler) save_buffer() {
+ self.Emit("MOVQ", _ARG_rb, _CX) // MOVQ rb<>+0(FP), CX
+ self.Emit("MOVQ", _RP, jit.Ptr(_CX, 0)) // MOVQ RP, (CX)
+ self.Emit("MOVQ", _RL, jit.Ptr(_CX, 8)) // MOVQ RL, 8(CX)
+ self.Emit("MOVQ", _RC, jit.Ptr(_CX, 16)) // MOVQ RC, 16(CX)
+}
+
+func (self *_Assembler) load_buffer() {
+ self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX
+ self.Emit("MOVQ", jit.Ptr(_AX, 0), _RP) // MOVQ (AX), RP
+ self.Emit("MOVQ", jit.Ptr(_AX, 8), _RL) // MOVQ 8(AX), RL
+ self.Emit("MOVQ", jit.Ptr(_AX, 16), _RC) // MOVQ 16(AX), RC
+}
+
+/** Function Interface Helpers **/
+
+func (self *_Assembler) call(pc obj.Addr) {
+ self.Emit("MOVQ", pc, _AX) // MOVQ $pc, AX
+ self.Rjmp("CALL", _AX) // CALL AX
+}
+
+func (self *_Assembler) save_c() {
+ self.xsave(_REG_ffi...) // SAVE $REG_ffi
+}
+
+func (self *_Assembler) call_c(pc obj.Addr) {
+ self.call(pc) // CALL $pc
+ self.xload(_REG_ffi...) // LOAD $REG_ffi
+}
+
+func (self *_Assembler) call_go(pc obj.Addr) {
+ self.xsave(_REG_all...) // SAVE $REG_all
+ self.call(pc) // CALL $pc
+ self.xload(_REG_all...) // LOAD $REG_all
+}
+
+func (self *_Assembler) call_encoder(pc obj.Addr) {
+ self.xsave(_REG_enc...) // SAVE $REG_enc
+ self.call(pc) // CALL $pc
+ self.xload(_REG_enc...) // LOAD $REG_enc
+ self.load_buffer() // LOAD {buf}
+}
+
+func (self *_Assembler) call_marshaler(fn obj.Addr, it *rt.GoType, vt reflect.Type) {
+ switch vt.Kind() {
+ case reflect.Interface : self.call_marshaler_i(fn, it)
+ case reflect.Ptr, reflect.Map: self.call_marshaler_v(fn, it, vt, true)
+ default : self.call_marshaler_v(fn, it, vt, false)
+ }
+}
+
+func (self *_Assembler) call_marshaler_i(fn obj.Addr, it *rt.GoType) {
+ self.Emit("MOVQ" , jit.Gtype(it), _AX) // MOVQ $it, AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _CX) // MOVQ 8(SP.p), CX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JZ" , "_null_{n}") // JZ _null_{n}
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP)
+ self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 16)) // MOVQ CX, 16(SP)
+ self.call_go(_F_assertI2I) // CALL_GO assertI2I
+ self.prep_buffer() // MOVE {buf}, (SP)
+ self.Emit("MOVOU", jit.Ptr(_SP, 24), _X0) // MOVOU 24(SP), X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP)
+ self.Emit("MOVQ", _ARG_fv, _CX) // MOVQ ARG.fv, CX
+ self.Emit("MOVQ", _CX, jit.Ptr(_SP, 24)) // MOVQ CX, 24(SP)
+ self.call_encoder(fn) // CALL $fn
+ self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET
+ self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+ self.Sjmp("JMP" , "_done_{n}") // JMP _done_{n}
+ self.Link("_null_{n}") // _null_{n}:
+ self.check_size(4) // SIZE $4
+ self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1)
+ self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL
+ self.Link("_done_{n}") // _done_{n}:
+}
+
+func (self *_Assembler) call_marshaler_v(fn obj.Addr, it *rt.GoType, vt reflect.Type, deref bool) {
+ self.prep_buffer() // MOVE {buf}, (SP)
+ self.Emit("MOVQ", jit.Itab(it, vt), _AX) // MOVQ $(itab(it, vt)), AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP)
+
+ /* dereference the pointer if needed */
+ if !deref {
+ self.Emit("MOVQ", _SP_p, jit.Ptr(_SP, 16)) // MOVQ SP.p, 16(SP)
+ } else {
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP)
+ }
+
+ /* call the encoder, and perform error checks */
+ self.Emit("MOVQ", _ARG_fv, _CX) // MOVQ ARG.fv, CX
+ self.Emit("MOVQ", _CX, jit.Ptr(_SP, 24)) // MOVQ CX, 24(SP)
+ self.call_encoder(fn) // CALL $fn
+ self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET
+ self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+}
+
+/** Builtin: _more_space **/
+
+var (
+ _T_byte = jit.Type(byteType)
+ _F_growslice = jit.Func(growslice)
+)
+
+func (self *_Assembler) more_space() {
+ self.Link(_LB_more_space)
+ self.Emit("MOVQ", _T_byte, jit.Ptr(_SP, 0)) // MOVQ $_T_byte, (SP)
+ self.Emit("MOVQ", _RP, jit.Ptr(_SP, 8)) // MOVQ RP, 8(SP)
+ self.Emit("MOVQ", _RL, jit.Ptr(_SP, 16)) // MOVQ RL, 16(SP)
+ self.Emit("MOVQ", _RC, jit.Ptr(_SP, 24)) // MOVQ RC, 24(SP)
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP)
+ self.xsave(_REG_jsr...) // SAVE $REG_jsr
+ self.call(_F_growslice) // CALL $pc
+ self.xload(_REG_jsr...) // LOAD $REG_jsr
+ self.Emit("MOVQ", jit.Ptr(_SP, 40), _RP) // MOVQ 40(SP), RP
+ self.Emit("MOVQ", jit.Ptr(_SP, 48), _RL) // MOVQ 48(SP), RL
+ self.Emit("MOVQ", jit.Ptr(_SP, 56), _RC) // MOVQ 56(SP), RC
+ self.save_buffer() // SAVE {buf}
+ self.Rjmp("JMP" , _LR) // JMP LR
+}
+
+/** Builtin Errors **/
+
+var (
+ _V_ERR_too_deep = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_too_deep))))
+ _V_ERR_nan_or_infinite = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_nan_or_infinite))))
+ _I_json_UnsupportedValueError = jit.Itab(rt.UnpackType(errorType), jsonUnsupportedValueType)
+)
+
+func (self *_Assembler) error_too_deep() {
+ self.Link(_LB_error_too_deep)
+ self.Emit("MOVQ", _V_ERR_too_deep, _EP) // MOVQ $_V_ERR_too_deep, EP
+ self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) error_invalid_number() {
+ self.Link(_LB_error_invalid_number)
+ self.call_go(_F_error_number) // CALL_GO error_number
+ self.Emit("MOVQ", jit.Ptr(_SP, 16), _ET) // MOVQ 16(SP), ET
+ self.Emit("MOVQ", jit.Ptr(_SP, 24), _EP) // MOVQ 24(SP), EP
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) error_nan_or_infinite() {
+ self.Link(_LB_error_nan_or_infinite)
+ self.Emit("MOVQ", _V_ERR_nan_or_infinite, _EP) // MOVQ $_V_ERR_nan_or_infinite, EP
+ self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+/** String Encoding Routine **/
+
+var (
+ _F_quote = jit.Imm(int64(native.S_quote))
+ _F_panic = jit.Func(goPanic)
+)
+
+func (self *_Assembler) go_panic() {
+ self.Link(_LB_panic)
+ self.Emit("MOVQ", _SP_p, jit.Ptr(_SP, 8))
+ self.call_go(_F_panic)
+}
+
+func (self *_Assembler) encode_string(doubleQuote bool) {
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JZ" , "_str_empty_{n}") // JZ _str_empty_{n}
+ self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0))
+ self.Sjmp("JNE" , "_str_next_{n}")
+ self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), jit.Ptr(_SP, 0))
+ self.Sjmp("JMP", _LB_panic)
+ self.Link("_str_next_{n}")
+
+ /* openning quote, check for double quote */
+ if !doubleQuote {
+ self.check_size_r(_AX, 2) // SIZE $2
+ self.add_char('"') // CHAR $'"'
+ } else {
+ self.check_size_r(_AX, 6) // SIZE $6
+ self.add_long(_IM_open, 3) // TEXT $`"\"`
+ }
+
+ /* quoting loop */
+ self.Emit("XORL", _AX, _AX) // XORL AX, AX
+ self.Emit("MOVQ", _AX, _VAR_sp) // MOVQ AX, sp
+ self.Link("_str_loop_{n}") // _str_loop_{n}:
+ self.save_c() // SAVE $REG_ffi
+
+ /* load the output buffer first, and then input buffer,
+ * because the parameter registers collide with RP / RL / RC */
+ self.Emit("MOVQ", _RC, _CX) // MOVQ RC, CX
+ self.Emit("SUBQ", _RL, _CX) // SUBQ RL, CX
+ self.Emit("MOVQ", _CX, _VAR_dn) // MOVQ CX, dn
+ self.Emit("LEAQ", jit.Sib(_RP, _RL, 1, 0), _DX) // LEAQ (RP)(RL), DX
+ self.Emit("LEAQ", _VAR_dn, _CX) // LEAQ dn, CX
+ self.Emit("MOVQ", _VAR_sp, _AX) // MOVQ sp, AX
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _DI) // MOVQ (SP.p), DI
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _SI) // MOVQ 8(SP.p), SI
+ self.Emit("ADDQ", _AX, _DI) // ADDQ AX, DI
+ self.Emit("SUBQ", _AX, _SI) // SUBQ AX, SI
+
+ /* set the flags based on `doubleQuote` */
+ if !doubleQuote {
+ self.Emit("XORL", _R8, _R8) // XORL R8, R8
+ } else {
+ self.Emit("MOVL", jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8
+ }
+
+ /* call the native quoter */
+ self.call_c(_F_quote) // CALL quote
+ self.Emit("ADDQ" , _VAR_dn, _RL) // ADDQ dn, RL
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , "_str_space_{n}") // JS _str_space_{n}
+
+ /* close the string, check for double quote */
+ if !doubleQuote {
+ self.check_size(1) // SIZE $1
+ self.add_char('"') // CHAR $'"'
+ self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n}
+ } else {
+ self.check_size(3) // SIZE $3
+ self.add_text("\\\"\"") // TEXT $'\""'
+ self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n}
+ }
+
+ /* not enough space to contain the quoted string */
+ self.Link("_str_space_{n}") // _str_space_{n}:
+ self.Emit("NOTQ", _AX) // NOTQ AX
+ self.Emit("ADDQ", _AX, _VAR_sp) // ADDQ AX, sp
+ self.Emit("LEAQ", jit.Sib(_RC, _RC, 1, 0), _AX) // LEAQ (RC)(RC), AX
+ self.slice_grow_ax("_str_loop_{n}") // GROW _str_loop_{n}
+
+ /* empty string, check for double quote */
+ if !doubleQuote {
+ self.Link("_str_empty_{n}") // _str_empty_{n}:
+ self.check_size(2) // SIZE $2
+ self.add_text("\"\"") // TEXT $'""'
+ self.Link("_str_end_{n}") // _str_end_{n}:
+ } else {
+ self.Link("_str_empty_{n}") // _str_empty_{n}:
+ self.check_size(6) // SIZE $6
+ self.add_text("\"\\\"\\\"\"") // TEXT $'"\"\""'
+ self.Link("_str_end_{n}") // _str_end_{n}:
+ }
+}
+
+/** OpCode Assembler Functions **/
+
+var (
+ _T_json_Marshaler = rt.UnpackType(jsonMarshalerType)
+ _T_encoding_TextMarshaler = rt.UnpackType(encodingTextMarshalerType)
+)
+
+var (
+ _F_f64toa = jit.Imm(int64(native.S_f64toa))
+ _F_f32toa = jit.Imm(int64(native.S_f32toa))
+ _F_i64toa = jit.Imm(int64(native.S_i64toa))
+ _F_u64toa = jit.Imm(int64(native.S_u64toa))
+ _F_b64encode = jit.Imm(int64(_subr__b64encode))
+)
+
+var (
+ _F_memmove = jit.Func(memmove)
+ _F_error_number = jit.Func(error_number)
+ _F_isValidNumber = jit.Func(isValidNumber)
+)
+
+var (
+ _F_iteratorStop = jit.Func(iteratorStop)
+ _F_iteratorNext = jit.Func(iteratorNext)
+ _F_iteratorStart = jit.Func(iteratorStart)
+)
+
+var (
+ _F_encodeTypedPointer obj.Addr
+ _F_encodeJsonMarshaler obj.Addr
+ _F_encodeTextMarshaler obj.Addr
+)
+
+const (
+ _MODE_AVX2 = 1 << 2
+)
+
+func init() {
+ _F_encodeTypedPointer = jit.Func(encodeTypedPointer)
+ _F_encodeJsonMarshaler = jit.Func(encodeJsonMarshaler)
+ _F_encodeTextMarshaler = jit.Func(encodeTextMarshaler)
+}
+
+func (self *_Assembler) _asm_OP_null(_ *_Instr) {
+ self.check_size(4)
+ self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1)
+ self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL
+}
+
+func (self *_Assembler) _asm_OP_empty_arr(_ *_Instr) {
+ self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv)
+ self.Sjmp("JC", "_empty_arr_{n}")
+ self._asm_OP_null(nil)
+ self.Sjmp("JMP", "_empty_arr_end_{n}")
+ self.Link("_empty_arr_{n}")
+ self.check_size(2)
+ self.Emit("MOVW", jit.Imm(_IM_array), jit.Sib(_RP, _RL, 1, 0))
+ self.Emit("ADDQ", jit.Imm(2), _RL)
+ self.Link("_empty_arr_end_{n}")
+}
+
+func (self *_Assembler) _asm_OP_empty_obj(_ *_Instr) {
+ self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv)
+ self.Sjmp("JC", "_empty_obj_{n}")
+ self._asm_OP_null(nil)
+ self.Sjmp("JMP", "_empty_obj_end_{n}")
+ self.Link("_empty_obj_{n}")
+ self.check_size(2)
+ self.Emit("MOVW", jit.Imm(_IM_object), jit.Sib(_RP, _RL, 1, 0))
+ self.Emit("ADDQ", jit.Imm(2), _RL)
+ self.Link("_empty_obj_end_{n}")
+}
+
+func (self *_Assembler) _asm_OP_bool(_ *_Instr) {
+ self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0
+ self.Sjmp("JE" , "_false_{n}") // JE _false_{n}
+ self.check_size(4) // SIZE $4
+ self.Emit("MOVL", jit.Imm(_IM_true), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'true', (RP)(RL*1)
+ self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL
+ self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n}
+ self.Link("_false_{n}") // _false_{n}:
+ self.check_size(5) // SIZE $5
+ self.Emit("MOVL", jit.Imm(_IM_fals), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'fals', (RP)(RL*1)
+ self.Emit("MOVB", jit.Imm('e'), jit.Sib(_RP, _RL, 1, 4)) // MOVB $'e', 4(RP)(RL*1)
+ self.Emit("ADDQ", jit.Imm(5), _RL) // ADDQ $5, RL
+ self.Link("_end_{n}") // _end_{n}:
+}
+
+func (self *_Assembler) _asm_OP_i8(_ *_Instr) {
+ self.store_int(4, _F_i64toa, "MOVBQSX")
+}
+
+func (self *_Assembler) _asm_OP_i16(_ *_Instr) {
+ self.store_int(6, _F_i64toa, "MOVWQSX")
+}
+
+func (self *_Assembler) _asm_OP_i32(_ *_Instr) {
+ self.store_int(17, _F_i64toa, "MOVLQSX")
+}
+
+func (self *_Assembler) _asm_OP_i64(_ *_Instr) {
+ self.store_int(21, _F_i64toa, "MOVQ")
+}
+
+func (self *_Assembler) _asm_OP_u8(_ *_Instr) {
+ self.store_int(3, _F_u64toa, "MOVBQZX")
+}
+
+func (self *_Assembler) _asm_OP_u16(_ *_Instr) {
+ self.store_int(5, _F_u64toa, "MOVWQZX")
+}
+
+func (self *_Assembler) _asm_OP_u32(_ *_Instr) {
+ self.store_int(16, _F_u64toa, "MOVLQZX")
+}
+
+func (self *_Assembler) _asm_OP_u64(_ *_Instr) {
+ self.store_int(20, _F_u64toa, "MOVQ")
+}
+
+func (self *_Assembler) _asm_OP_f32(_ *_Instr) {
+ self.check_size(32)
+ self.Emit("MOVL" , jit.Ptr(_SP_p, 0), _AX) // MOVL (SP.p), AX
+ self.Emit("ANDL" , jit.Imm(_FM_exp32), _AX) // ANDL $_FM_exp32, AX
+ self.Emit("XORL" , jit.Imm(_FM_exp32), _AX) // XORL $_FM_exp32, AX
+ self.Sjmp("JZ" , _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite
+ self.save_c() // SAVE $C_regs
+ self.rbuf_di() // MOVQ RP, DI
+ self.Emit("MOVSS" , jit.Ptr(_SP_p, 0), _X0) // MOVSS (SP.p), X0
+ self.call_c(_F_f32toa) // CALL_C f64toa
+ self.Emit("ADDQ" , _AX, _RL) // ADDQ AX, RL
+}
+
+func (self *_Assembler) _asm_OP_f64(_ *_Instr) {
+ self.check_size(32)
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX
+ self.Emit("MOVQ" , jit.Imm(_FM_exp64), _CX) // MOVQ $_FM_exp64, CX
+ self.Emit("ANDQ" , _CX, _AX) // ANDQ CX, AX
+ self.Emit("XORQ" , _CX, _AX) // XORQ CX, AX
+ self.Sjmp("JZ" , _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite
+ self.save_c() // SAVE $C_regs
+ self.rbuf_di() // MOVQ RP, DI
+ self.Emit("MOVSD" , jit.Ptr(_SP_p, 0), _X0) // MOVSD (SP.p), X0
+ self.call_c(_F_f64toa) // CALL_C f64toa
+ self.Emit("ADDQ" , _AX, _RL) // ADDQ AX, RL
+}
+
+func (self *_Assembler) _asm_OP_str(_ *_Instr) {
+ self.encode_string(false)
+}
+
+func (self *_Assembler) _asm_OP_bin(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX
+ self.Emit("ADDQ", jit.Imm(2), _AX) // ADDQ $2, AX
+ self.Emit("MOVQ", jit.Imm(_IM_mulv), _CX) // MOVQ $_MF_mulv, CX
+ self.Emit("MOVQ", _DX, _R8) // MOVQ DX, R8
+ self.From("MULQ", _CX) // MULQ CX
+ self.Emit("LEAQ", jit.Sib(_DX, _DX, 1, 1), _AX) // LEAQ 1(DX)(DX), AX
+ self.Emit("ORQ" , jit.Imm(2), _AX) // ORQ $2, AX
+ self.Emit("MOVQ", _R8, _DX) // MOVQ R8, DX
+ self.check_size_r(_AX, 0) // SIZE AX
+ self.add_char('"') // CHAR $'"'
+ self.save_c() // SAVE $REG_ffi
+ self.prep_buffer_c() // MOVE {buf}, DI
+ self.Emit("MOVQ", _SP_p, _SI) // MOVQ SP.p, SI
+
+ /* check for AVX2 support */
+ if !cpu.HasAVX2 {
+ self.Emit("XORL", _DX, _DX) // XORL DX, DX
+ } else {
+ self.Emit("MOVL", jit.Imm(_MODE_AVX2), _DX) // MOVL $_MODE_AVX2, DX
+ }
+
+ /* call the encoder */
+ self.call_c(_F_b64encode) // CALL b64encode
+ self.load_buffer() // LOAD {buf}
+ self.add_char('"') // CHAR $'"'
+}
+
+func (self *_Assembler) _asm_OP_quote(_ *_Instr) {
+ self.encode_string(true)
+}
+
+func (self *_Assembler) _asm_OP_number(_ *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _CX) // MOVQ (SP.p), CX
+ self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX
+ self.Sjmp("JZ" , "_empty_{n}") // JZ _empty_{n}
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JNZ" , "_number_next_{n}")
+ self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), jit.Ptr(_SP, 0))
+ self.Sjmp("JMP", _LB_panic)
+ self.Link("_number_next_{n}")
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP)
+ self.call_go(_F_isValidNumber) // CALL_GO isValidNumber
+ self.Emit("CMPB" , jit.Ptr(_SP, 16), jit.Imm(0)) // CMPB 16(SP), $0
+ self.Sjmp("JE" , _LB_error_invalid_number) // JE _error_invalid_number
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX
+ self.check_size_r(_AX, 0) // SIZE AX
+ self.Emit("LEAQ" , jit.Sib(_RP, _RL, 1, 0), _AX) // LEAQ (RP)(RL), AX
+ self.Emit("ADDQ" , jit.Ptr(_SP_p, 8), _RL) // ADDQ 8(SP.p), RL
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVOU", jit.Ptr(_SP_p, 0), _X0) // MOVOU (SP.p), X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP)
+ self.call_go(_F_memmove) // CALL_GO memmove
+ self.Sjmp("JMP" , "_done_{n}") // JMP _done_{n}
+ self.Link("_empty_{n}") // _empty_{n}:
+ self.check_size(1) // SIZE $1
+ self.add_char('0') // CHAR $'0'
+ self.Link("_done_{n}") // _done_{n}:
+}
+
+func (self *_Assembler) _asm_OP_eface(_ *_Instr) {
+ self.prep_buffer() // MOVE {buf}, (SP)s
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP)
+ self.Emit("LEAQ" , jit.Ptr(_SP_p, 8), _AX) // LEAQ 8(SP.p), AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP)
+ self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 24)) // MOVQ ST, 24(SP)
+ self.Emit("MOVQ" , _ARG_fv, _AX) // MOVQ fv, AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP)
+ self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer
+ self.Emit("MOVQ" , jit.Ptr(_SP, 40), _ET) // MOVQ 40(SP), ET
+ self.Emit("MOVQ" , jit.Ptr(_SP, 48), _EP) // MOVQ 48(SP), EP
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+}
+
+func (self *_Assembler) _asm_OP_iface(_ *_Instr) {
+ self.prep_buffer() // MOVE {buf}, (SP)
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX
+ self.Emit("MOVQ" , jit.Ptr(_AX, 8), _AX) // MOVQ 8(AX), AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP)
+ self.Emit("LEAQ" , jit.Ptr(_SP_p, 8), _AX) // LEAQ 8(SP.p), AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP)
+ self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 24)) // MOVQ ST, 24(SP)
+ self.Emit("MOVQ" , _ARG_fv, _AX) // MOVQ fv, AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP)
+ self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer
+ self.Emit("MOVQ" , jit.Ptr(_SP, 40), _ET) // MOVQ 40(SP), ET
+ self.Emit("MOVQ" , jit.Ptr(_SP, 48), _EP) // MOVQ 48(SP), EP
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+}
+
+func (self *_Assembler) _asm_OP_byte(p *_Instr) {
+ self.check_size(1)
+ self.Emit("MOVB", jit.Imm(p.i64()), jit.Sib(_RP, _RL, 1, 0)) // MOVL p.vi(), (RP)(RL*1)
+ self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL
+}
+
+func (self *_Assembler) _asm_OP_text(p *_Instr) {
+ self.check_size(len(p.vs())) // SIZE ${len(p.vs())}
+ self.add_text(p.vs()) // TEXT ${p.vs()}
+}
+
+func (self *_Assembler) _asm_OP_deref(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p
+}
+
+func (self *_Assembler) _asm_OP_index(p *_Instr) {
+ self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ $p.vi(), AX
+ self.Emit("ADDQ", _AX, _SP_p) // ADDQ AX, SP.p
+}
+
+func (self *_Assembler) _asm_OP_load(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX
+ self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -24), _SP_x) // MOVQ -24(ST)(AX), SP.x
+ self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -8), _SP_p) // MOVQ -8(ST)(AX), SP.p
+ self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _SP_q) // MOVQ (ST)(AX), SP.q
+}
+
+func (self *_Assembler) _asm_OP_save(_ *_Instr) {
+ self.save_state()
+}
+
+func (self *_Assembler) _asm_OP_drop(_ *_Instr) {
+ self.drop_state(_StateSize)
+}
+
+func (self *_Assembler) _asm_OP_drop_2(_ *_Instr) {
+ self.drop_state(_StateSize * 2) // DROP $(_StateSize * 2)
+ self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 56)) // MOVOU X0, 56(ST)(AX)
+}
+
+func (self *_Assembler) _asm_OP_recurse(p *_Instr) {
+ self.prep_buffer() // MOVE {buf}, (SP)
+ vt, pv := p.vp()
+ self.Emit("MOVQ", jit.Type(vt), _AX) // MOVQ $(type(p.vt())), AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP)
+
+ /* check for indirection */
+ if !rt.UnpackType(vt).Indirect() {
+ self.Emit("MOVQ", _SP_p, _AX) // MOVQ SP.p, AX
+ } else {
+ self.Emit("MOVQ", _SP_p, _VAR_vp) // MOVQ SP.p, 48(SP)
+ self.Emit("LEAQ", _VAR_vp, _AX) // LEAQ 48(SP), AX
+ }
+
+ /* call the encoder */
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP)
+ self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 24)) // MOVQ ST, 24(SP)
+ self.Emit("MOVQ" , _ARG_fv, _AX) // MOVQ fv, AX
+ if pv {
+ self.Emit("BTCQ", jit.Imm(bitPointerValue), _AX) // BTCQ $1, AX
+ }
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP)
+ self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer
+ self.Emit("MOVQ" , jit.Ptr(_SP, 40), _ET) // MOVQ 40(SP), ET
+ self.Emit("MOVQ" , jit.Ptr(_SP, 48), _EP) // MOVQ 48(SP), EP
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+}
+
+func (self *_Assembler) _asm_OP_is_nil(p *_Instr) {
+ self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_is_nil_p1(p *_Instr) {
+ self.Emit("CMPQ", jit.Ptr(_SP_p, 8), jit.Imm(0)) // CMPQ 8(SP.p), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_is_zero_1(p *_Instr) {
+ self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_is_zero_2(p *_Instr) {
+ self.Emit("CMPW", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPW (SP.p), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_is_zero_4(p *_Instr) {
+ self.Emit("CMPL", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPL (SP.p), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_is_zero_8(p *_Instr) {
+ self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_is_zero_map(p *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Xjmp("JZ" , p.vi()) // JZ p.vi()
+ self.Emit("CMPQ" , jit.Ptr(_AX, 0), jit.Imm(0)) // CMPQ (AX), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_goto(p *_Instr) {
+ self.Xjmp("JMP", p.vi())
+}
+
+func (self *_Assembler) _asm_OP_map_iter(p *_Instr) {
+ self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ $p.vt(), AX
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _CX) // MOVQ (SP.p), CX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP)
+ self.Emit("MOVQ" , _ARG_fv, _AX) // MOVQ fv, AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP)
+ self.call_go(_F_iteratorStart) // CALL_GO iteratorStart
+ self.Emit("MOVQ" , jit.Ptr(_SP, 24), _SP_q) // MOVQ 24(SP), SP.q
+ self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET
+ self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+}
+
+func (self *_Assembler) _asm_OP_map_stop(_ *_Instr) {
+ self.Emit("MOVQ", _SP_q, jit.Ptr(_SP, 0)) // MOVQ SP.q, 0(SP)
+ self.call_go(_F_iteratorStop) // CALL_GO iteratorStop
+ self.Emit("XORL", _SP_q, _SP_q) // XORL SP.q, SP.q
+}
+
+func (self *_Assembler) _asm_OP_map_check_key(p *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_SP_q, 0), _SP_p) // MOVQ (SP.q), SP.p
+ self.Emit("TESTQ", _SP_p, _SP_p) // TESTQ SP.p, SP.p
+ self.Xjmp("JZ" , p.vi()) // JNZ p.vi()
+}
+
+func (self *_Assembler) _asm_OP_map_write_key(p *_Instr) {
+ self.Emit("BTQ", jit.Imm(bitSortMapKeys), _ARG_fv) // BTQ ${SortMapKeys}, fv
+ self.Sjmp("JNC", "_unordered_key_{n}") // JNC _unordered_key_{n}
+ self.encode_string(false) // STR $false
+ self.Xjmp("JMP", p.vi()) // JMP ${p.vi()}
+ self.Link("_unordered_key_{n}") // _unordered_key_{n}:
+}
+
+func (self *_Assembler) _asm_OP_map_value_next(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_SP_q, 8), _SP_p) // MOVQ 8(SP.q), SP.p
+ self.Emit("MOVQ", _SP_q, jit.Ptr(_SP, 0)) // MOVQ SP.q, (SP)
+ self.call_go(_F_iteratorNext) // CALL_GO iteratorNext
+}
+
+func (self *_Assembler) _asm_OP_slice_len(_ *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _SP_x) // MOVQ 8(SP.p), SP.x
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p
+ self.Emit("ORQ" , jit.Imm(1 << _S_init), _SP_f) // ORQ $(1<<_S_init), SP.f
+}
+
+func (self *_Assembler) _asm_OP_slice_next(p *_Instr) {
+ self.Emit("TESTQ" , _SP_x, _SP_x) // TESTQ SP.x, SP.x
+ self.Xjmp("JZ" , p.vi()) // JZ p.vi()
+ self.Emit("SUBQ" , jit.Imm(1), _SP_x) // SUBQ $1, SP.x
+ self.Emit("BTRQ" , jit.Imm(_S_init), _SP_f) // BTRQ $_S_init, SP.f
+ self.Emit("LEAQ" , jit.Ptr(_SP_p, int64(p.vlen())), _AX) // LEAQ $(p.vlen())(SP.p), AX
+ self.Emit("CMOVQCC", _AX, _SP_p) // CMOVQNC AX, SP.p
+}
+
+func (self *_Assembler) _asm_OP_marshal(p *_Instr) {
+ self.call_marshaler(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt())
+}
+
+func (self *_Assembler) _asm_OP_marshal_p(p *_Instr) {
+ if p.vk() != reflect.Ptr {
+ panic("marshal_p: invalid type")
+ } else {
+ self.call_marshaler_v(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt(), false)
+ }
+}
+
+func (self *_Assembler) _asm_OP_marshal_text(p *_Instr) {
+ self.call_marshaler(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt())
+}
+
+func (self *_Assembler) _asm_OP_marshal_text_p(p *_Instr) {
+ if p.vk() != reflect.Ptr {
+ panic("marshal_text_p: invalid type")
+ } else {
+ self.call_marshaler_v(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt(), false)
+ }
+}
+
+func (self *_Assembler) _asm_OP_cond_set(_ *_Instr) {
+ self.Emit("ORQ", jit.Imm(1 << _S_cond), _SP_f) // ORQ $(1<<_S_cond), SP.f
+}
+
+func (self *_Assembler) _asm_OP_cond_testc(p *_Instr) {
+ self.Emit("BTRQ", jit.Imm(_S_cond), _SP_f) // BTRQ $_S_cond, SP.f
+ self.Xjmp("JC" , p.vi())
+}
+
+func (self *_Assembler) print_gc(i int, p1 *_Instr, p2 *_Instr) {
+ self.Emit("MOVQ", jit.Imm(int64(p2.op())), jit.Ptr(_SP, 16))// MOVQ $(p2.op()), 16(SP)
+ self.Emit("MOVQ", jit.Imm(int64(p1.op())), jit.Ptr(_SP, 8)) // MOVQ $(p1.op()), 8(SP)
+ self.Emit("MOVQ", jit.Imm(int64(i)), jit.Ptr(_SP, 0)) // MOVQ $(i), (SP)
+ self.call_go(_F_println)
+}
+
+var (
+ _V_writeBarrier = jit.Imm(int64(_runtime_writeBarrier))
+
+ _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX)
+)
+
+func (self *_Assembler) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr) {
+ if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX {
+ panic("rec contains AX!")
+ }
+ self.Emit("MOVQ", _V_writeBarrier, _R10)
+ self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0))
+ self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Emit("MOVQ", ptr, _AX)
+ self.xsave(_DI)
+ self.Emit("LEAQ", rec, _DI)
+ self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX
+ self.Rjmp("CALL", _R10)
+ self.xload(_DI)
+ self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Emit("MOVQ", ptr, rec)
+ self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go117.go b/vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go117.go
new file mode 100644
index 000000000..8cd83e868
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/assembler_amd64_go117.go
@@ -0,0 +1,1201 @@
+//go:build go1.17 && !go1.21
+// +build go1.17,!go1.21
+
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `fmt`
+ `reflect`
+ `strconv`
+ `unsafe`
+
+ `github.com/bytedance/sonic/internal/cpu`
+ `github.com/bytedance/sonic/internal/jit`
+ `github.com/bytedance/sonic/internal/native/types`
+ `github.com/twitchyliquid64/golang-asm/obj`
+ `github.com/twitchyliquid64/golang-asm/obj/x86`
+
+ `github.com/bytedance/sonic/internal/native`
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+/** Register Allocations
+ *
+ * State Registers:
+ *
+ * %rbx : stack base
+ * %rdi : result pointer
+ * %rsi : result length
+ * %rdx : result capacity
+ * %r12 : sp->p
+ * %r13 : sp->q
+ * %r14 : sp->x
+ * %r15 : sp->f
+ *
+ * Error Registers:
+ *
+ * %r10 : error type register
+ * %r11 : error pointer register
+ */
+
+/** Function Prototype & Stack Map
+ *
+ * func (buf *[]byte, p unsafe.Pointer, sb *_Stack, fv uint64) (err error)
+ *
+ * buf : (FP)
+ * p : 8(FP)
+ * sb : 16(FP)
+ * fv : 24(FP)
+ * err.vt : 32(FP)
+ * err.vp : 40(FP)
+ */
+
+const (
+ _S_cond = iota
+ _S_init
+)
+
+const (
+ _FP_args = 32 // 32 bytes for spill registers of arguments
+ _FP_fargs = 40 // 40 bytes for passing arguments to other Go functions
+ _FP_saves = 64 // 64 bytes for saving the registers before CALL instructions
+ _FP_locals = 24 // 24 bytes for local variables
+)
+
+const (
+ _FP_loffs = _FP_fargs + _FP_saves
+ _FP_offs = _FP_loffs + _FP_locals
+ // _FP_offs = _FP_loffs + _FP_locals + _FP_debug
+ _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer
+ _FP_base = _FP_size + 8 // 8 bytes for the return address
+)
+
+const (
+ _FM_exp32 = 0x7f800000
+ _FM_exp64 = 0x7ff0000000000000
+)
+
+const (
+ _IM_null = 0x6c6c756e // 'null'
+ _IM_true = 0x65757274 // 'true'
+ _IM_fals = 0x736c6166 // 'fals' ('false' without the 'e')
+ _IM_open = 0x00225c22 // '"\"∅'
+ _IM_array = 0x5d5b // '[]'
+ _IM_object = 0x7d7b // '{}'
+ _IM_mulv = -0x5555555555555555
+)
+
+const (
+ _LB_more_space = "_more_space"
+ _LB_more_space_return = "_more_space_return_"
+)
+
+const (
+ _LB_error = "_error"
+ _LB_error_too_deep = "_error_too_deep"
+ _LB_error_invalid_number = "_error_invalid_number"
+ _LB_error_nan_or_infinite = "_error_nan_or_infinite"
+ _LB_panic = "_panic"
+)
+
+var (
+ _AX = jit.Reg("AX")
+ _BX = jit.Reg("BX")
+ _CX = jit.Reg("CX")
+ _DX = jit.Reg("DX")
+ _DI = jit.Reg("DI")
+ _SI = jit.Reg("SI")
+ _BP = jit.Reg("BP")
+ _SP = jit.Reg("SP")
+ _R8 = jit.Reg("R8")
+ _R9 = jit.Reg("R9")
+)
+
+var (
+ _X0 = jit.Reg("X0")
+ _Y0 = jit.Reg("Y0")
+)
+
+var (
+ _ST = jit.Reg("R15") // can't use R14 since it's always scratched by Go...
+ _RP = jit.Reg("DI")
+ _RL = jit.Reg("SI")
+ _RC = jit.Reg("DX")
+)
+
+var (
+ _LR = jit.Reg("R9")
+ _ET = jit.Reg("AX")
+ _EP = jit.Reg("BX")
+)
+
+var (
+ _SP_p = jit.Reg("R10") // saved on BX when call_c
+ _SP_q = jit.Reg("R11") // saved on BP when call_c
+ _SP_x = jit.Reg("R12")
+ _SP_f = jit.Reg("R13")
+)
+
+var (
+ _ARG_rb = jit.Ptr(_SP, _FP_base)
+ _ARG_vp = jit.Ptr(_SP, _FP_base + 8)
+ _ARG_sb = jit.Ptr(_SP, _FP_base + 16)
+ _ARG_fv = jit.Ptr(_SP, _FP_base + 24)
+)
+
+var (
+ _RET_et = _ET
+ _RET_ep = _EP
+)
+
+var (
+ _VAR_sp = jit.Ptr(_SP, _FP_fargs + _FP_saves)
+ _VAR_dn = jit.Ptr(_SP, _FP_fargs + _FP_saves + 8)
+ _VAR_vp = jit.Ptr(_SP, _FP_fargs + _FP_saves + 16)
+)
+
+var (
+ _REG_ffi = []obj.Addr{ _RP, _RL, _RC}
+ _REG_b64 = []obj.Addr{_SP_p, _SP_q}
+
+ _REG_all = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RP, _RL, _RC}
+ _REG_ms = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _LR}
+ _REG_enc = []obj.Addr{_ST, _SP_x, _SP_f, _SP_p, _SP_q, _RL}
+)
+
+type _Assembler struct {
+ jit.BaseAssembler
+ p _Program
+ x int
+ name string
+}
+
+func newAssembler(p _Program) *_Assembler {
+ return new(_Assembler).Init(p)
+}
+
+/** Assembler Interface **/
+
+func (self *_Assembler) Load() _Encoder {
+ return ptoenc(self.BaseAssembler.Load("encode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs))
+}
+
+func (self *_Assembler) Init(p _Program) *_Assembler {
+ self.p = p
+ self.BaseAssembler.Init(self.compile)
+ return self
+}
+
+func (self *_Assembler) compile() {
+ self.prologue()
+ self.instrs()
+ self.epilogue()
+ self.builtins()
+}
+
+/** Assembler Stages **/
+
+var _OpFuncTab = [256]func(*_Assembler, *_Instr) {
+ _OP_null : (*_Assembler)._asm_OP_null,
+ _OP_empty_arr : (*_Assembler)._asm_OP_empty_arr,
+ _OP_empty_obj : (*_Assembler)._asm_OP_empty_obj,
+ _OP_bool : (*_Assembler)._asm_OP_bool,
+ _OP_i8 : (*_Assembler)._asm_OP_i8,
+ _OP_i16 : (*_Assembler)._asm_OP_i16,
+ _OP_i32 : (*_Assembler)._asm_OP_i32,
+ _OP_i64 : (*_Assembler)._asm_OP_i64,
+ _OP_u8 : (*_Assembler)._asm_OP_u8,
+ _OP_u16 : (*_Assembler)._asm_OP_u16,
+ _OP_u32 : (*_Assembler)._asm_OP_u32,
+ _OP_u64 : (*_Assembler)._asm_OP_u64,
+ _OP_f32 : (*_Assembler)._asm_OP_f32,
+ _OP_f64 : (*_Assembler)._asm_OP_f64,
+ _OP_str : (*_Assembler)._asm_OP_str,
+ _OP_bin : (*_Assembler)._asm_OP_bin,
+ _OP_quote : (*_Assembler)._asm_OP_quote,
+ _OP_number : (*_Assembler)._asm_OP_number,
+ _OP_eface : (*_Assembler)._asm_OP_eface,
+ _OP_iface : (*_Assembler)._asm_OP_iface,
+ _OP_byte : (*_Assembler)._asm_OP_byte,
+ _OP_text : (*_Assembler)._asm_OP_text,
+ _OP_deref : (*_Assembler)._asm_OP_deref,
+ _OP_index : (*_Assembler)._asm_OP_index,
+ _OP_load : (*_Assembler)._asm_OP_load,
+ _OP_save : (*_Assembler)._asm_OP_save,
+ _OP_drop : (*_Assembler)._asm_OP_drop,
+ _OP_drop_2 : (*_Assembler)._asm_OP_drop_2,
+ _OP_recurse : (*_Assembler)._asm_OP_recurse,
+ _OP_is_nil : (*_Assembler)._asm_OP_is_nil,
+ _OP_is_nil_p1 : (*_Assembler)._asm_OP_is_nil_p1,
+ _OP_is_zero_1 : (*_Assembler)._asm_OP_is_zero_1,
+ _OP_is_zero_2 : (*_Assembler)._asm_OP_is_zero_2,
+ _OP_is_zero_4 : (*_Assembler)._asm_OP_is_zero_4,
+ _OP_is_zero_8 : (*_Assembler)._asm_OP_is_zero_8,
+ _OP_is_zero_map : (*_Assembler)._asm_OP_is_zero_map,
+ _OP_goto : (*_Assembler)._asm_OP_goto,
+ _OP_map_iter : (*_Assembler)._asm_OP_map_iter,
+ _OP_map_stop : (*_Assembler)._asm_OP_map_stop,
+ _OP_map_check_key : (*_Assembler)._asm_OP_map_check_key,
+ _OP_map_write_key : (*_Assembler)._asm_OP_map_write_key,
+ _OP_map_value_next : (*_Assembler)._asm_OP_map_value_next,
+ _OP_slice_len : (*_Assembler)._asm_OP_slice_len,
+ _OP_slice_next : (*_Assembler)._asm_OP_slice_next,
+ _OP_marshal : (*_Assembler)._asm_OP_marshal,
+ _OP_marshal_p : (*_Assembler)._asm_OP_marshal_p,
+ _OP_marshal_text : (*_Assembler)._asm_OP_marshal_text,
+ _OP_marshal_text_p : (*_Assembler)._asm_OP_marshal_text_p,
+ _OP_cond_set : (*_Assembler)._asm_OP_cond_set,
+ _OP_cond_testc : (*_Assembler)._asm_OP_cond_testc,
+}
+
+func (self *_Assembler) instr(v *_Instr) {
+ if fn := _OpFuncTab[v.op()]; fn != nil {
+ fn(self, v)
+ } else {
+ panic(fmt.Sprintf("invalid opcode: %d", v.op()))
+ }
+}
+
+func (self *_Assembler) instrs() {
+ for i, v := range self.p {
+ self.Mark(i)
+ self.instr(&v)
+ self.debug_instr(i, &v)
+ }
+}
+
+func (self *_Assembler) builtins() {
+ self.more_space()
+ self.error_too_deep()
+ self.error_invalid_number()
+ self.error_nan_or_infinite()
+ self.go_panic()
+}
+
+func (self *_Assembler) epilogue() {
+ self.Mark(len(self.p))
+ self.Emit("XORL", _ET, _ET)
+ self.Emit("XORL", _EP, _EP)
+ self.Link(_LB_error)
+ self.Emit("MOVQ", _ARG_rb, _CX) // MOVQ rb<>+0(FP), CX
+ self.Emit("MOVQ", _RL, jit.Ptr(_CX, 8)) // MOVQ RL, 8(CX)
+ self.Emit("MOVQ", jit.Imm(0), _ARG_rb) // MOVQ AX, rb<>+0(FP)
+ self.Emit("MOVQ", jit.Imm(0), _ARG_vp) // MOVQ BX, vp<>+8(FP)
+ self.Emit("MOVQ", jit.Imm(0), _ARG_sb) // MOVQ CX, sb<>+16(FP)
+ self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP
+ self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP
+ self.Emit("RET") // RET
+}
+
+func (self *_Assembler) prologue() {
+ self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP
+ self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP)
+ self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP
+ self.Emit("MOVQ", _AX, _ARG_rb) // MOVQ AX, rb<>+0(FP)
+ self.Emit("MOVQ", _BX, _ARG_vp) // MOVQ BX, vp<>+8(FP)
+ self.Emit("MOVQ", _CX, _ARG_sb) // MOVQ CX, sb<>+16(FP)
+ self.Emit("MOVQ", _DI, _ARG_fv) // MOVQ DI, rb<>+24(FP)
+ self.Emit("MOVQ", jit.Ptr(_AX, 0), _RP) // MOVQ (AX) , DI
+ self.Emit("MOVQ", jit.Ptr(_AX, 8), _RL) // MOVQ 8(AX) , SI
+ self.Emit("MOVQ", jit.Ptr(_AX, 16), _RC) // MOVQ 16(AX), DX
+ self.Emit("MOVQ", _BX, _SP_p) // MOVQ BX, R10
+ self.Emit("MOVQ", _CX, _ST) // MOVQ CX, R8
+ self.Emit("XORL", _SP_x, _SP_x) // XORL R10, R12
+ self.Emit("XORL", _SP_f, _SP_f) // XORL R11, R13
+ self.Emit("XORL", _SP_q, _SP_q) // XORL R13, R11
+}
+
+/** Assembler Inline Functions **/
+
+func (self *_Assembler) xsave(reg ...obj.Addr) {
+ for i, v := range reg {
+ if i > _FP_saves / 8 - 1 {
+ panic("too many registers to save")
+ } else {
+ self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + int64(i) * 8))
+ }
+ }
+}
+
+func (self *_Assembler) xload(reg ...obj.Addr) {
+ for i, v := range reg {
+ if i > _FP_saves / 8 - 1 {
+ panic("too many registers to load")
+ } else {
+ self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + int64(i) * 8), v)
+ }
+ }
+}
+
+func (self *_Assembler) rbuf_di() {
+ if _RP.Reg != x86.REG_DI {
+ panic("register allocation messed up: RP != DI")
+ } else {
+ self.Emit("ADDQ", _RL, _RP)
+ }
+}
+
+func (self *_Assembler) store_int(nd int, fn obj.Addr, ins string) {
+ self.check_size(nd)
+ self.save_c() // SAVE $C_regs
+ self.rbuf_di() // MOVQ RP, DI
+ self.Emit(ins, jit.Ptr(_SP_p, 0), _SI) // $ins (SP.p), SI
+ self.call_c(fn) // CALL_C $fn
+ self.Emit("ADDQ", _AX, _RL) // ADDQ AX, RL
+}
+
+func (self *_Assembler) store_str(s string) {
+ i := 0
+ m := rt.Str2Mem(s)
+
+ /* 8-byte stores */
+ for i <= len(m) - 8 {
+ self.Emit("MOVQ", jit.Imm(rt.Get64(m[i:])), _AX) // MOVQ $s[i:], AX
+ self.Emit("MOVQ", _AX, jit.Sib(_RP, _RL, 1, int64(i))) // MOVQ AX, i(RP)(RL)
+ i += 8
+ }
+
+ /* 4-byte stores */
+ if i <= len(m) - 4 {
+ self.Emit("MOVL", jit.Imm(int64(rt.Get32(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVL $s[i:], i(RP)(RL)
+ i += 4
+ }
+
+ /* 2-byte stores */
+ if i <= len(m) - 2 {
+ self.Emit("MOVW", jit.Imm(int64(rt.Get16(m[i:]))), jit.Sib(_RP, _RL, 1, int64(i))) // MOVW $s[i:], i(RP)(RL)
+ i += 2
+ }
+
+ /* last byte */
+ if i < len(m) {
+ self.Emit("MOVB", jit.Imm(int64(m[i])), jit.Sib(_RP, _RL, 1, int64(i))) // MOVB $s[i:], i(RP)(RL)
+ }
+}
+
+func (self *_Assembler) check_size(n int) {
+ self.check_size_rl(jit.Ptr(_RL, int64(n)))
+}
+
+func (self *_Assembler) check_size_r(r obj.Addr, d int) {
+ self.check_size_rl(jit.Sib(_RL, r, 1, int64(d)))
+}
+
+func (self *_Assembler) check_size_rl(v obj.Addr) {
+ idx := self.x
+ key := _LB_more_space_return + strconv.Itoa(idx)
+
+ /* the following code relies on LR == R9 to work */
+ if _LR.Reg != x86.REG_R9 {
+ panic("register allocation messed up: LR != R9")
+ }
+
+ /* check for buffer capacity */
+ self.x++
+ self.Emit("LEAQ", v, _AX) // LEAQ $v, AX
+ self.Emit("CMPQ", _AX, _RC) // CMPQ AX, RC
+ self.Sjmp("JBE" , key) // JBE _more_space_return_{n}
+ self.slice_grow_ax(key) // GROW $key
+ self.Link(key) // _more_space_return_{n}:
+}
+
+func (self *_Assembler) slice_grow_ax(ret string) {
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ ?(PC), R9
+ self.Sref(ret, 4) // .... &ret
+ self.Sjmp("JMP" , _LB_more_space) // JMP _more_space
+}
+
+/** State Stack Helpers **/
+
+const (
+ _StateSize = int64(unsafe.Sizeof(_State{}))
+ _StackLimit = _MaxStack * _StateSize
+)
+
+func (self *_Assembler) save_state() {
+ self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX
+ self.Emit("LEAQ", jit.Ptr(_CX, _StateSize), _R9) // LEAQ _StateSize(CX), R9
+ self.Emit("CMPQ", _R9, jit.Imm(_StackLimit)) // CMPQ R9, $_StackLimit
+ self.Sjmp("JAE" , _LB_error_too_deep) // JA _error_too_deep
+ self.Emit("MOVQ", _SP_x, jit.Sib(_ST, _CX, 1, 8)) // MOVQ SP.x, 8(ST)(CX)
+ self.Emit("MOVQ", _SP_f, jit.Sib(_ST, _CX, 1, 16)) // MOVQ SP.f, 16(ST)(CX)
+ self.WriteRecNotAX(0, _SP_p, jit.Sib(_ST, _CX, 1, 24)) // MOVQ SP.p, 24(ST)(CX)
+ self.WriteRecNotAX(1, _SP_q, jit.Sib(_ST, _CX, 1, 32)) // MOVQ SP.q, 32(ST)(CX)
+ self.Emit("MOVQ", _R9, jit.Ptr(_ST, 0)) // MOVQ R9, (ST)
+}
+
+func (self *_Assembler) drop_state(decr int64) {
+ self.Emit("MOVQ" , jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX
+ self.Emit("SUBQ" , jit.Imm(decr), _AX) // SUBQ $decr, AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST)
+ self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 8), _SP_x) // MOVQ 8(ST)(AX), SP.x
+ self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 16), _SP_f) // MOVQ 16(ST)(AX), SP.f
+ self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 24), _SP_p) // MOVQ 24(ST)(AX), SP.p
+ self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 32), _SP_q) // MOVQ 32(ST)(AX), SP.q
+ self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0
+ self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX)
+ self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 24)) // MOVOU X0, 24(ST)(AX)
+}
+
+/** Buffer Helpers **/
+
+func (self *_Assembler) add_char(ch byte) {
+ self.Emit("MOVB", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVB $ch, (RP)(RL)
+ self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL
+}
+
+func (self *_Assembler) add_long(ch uint32, n int64) {
+ self.Emit("MOVL", jit.Imm(int64(ch)), jit.Sib(_RP, _RL, 1, 0)) // MOVL $ch, (RP)(RL)
+ self.Emit("ADDQ", jit.Imm(n), _RL) // ADDQ $n, RL
+}
+
+func (self *_Assembler) add_text(ss string) {
+ self.store_str(ss) // TEXT $ss
+ self.Emit("ADDQ", jit.Imm(int64(len(ss))), _RL) // ADDQ ${len(ss)}, RL
+}
+
+// get *buf at AX
+func (self *_Assembler) prep_buffer_AX() {
+ self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX
+ self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX)
+}
+
+func (self *_Assembler) save_buffer() {
+ self.Emit("MOVQ", _ARG_rb, _CX) // MOVQ rb<>+0(FP), CX
+ self.Emit("MOVQ", _RP, jit.Ptr(_CX, 0)) // MOVQ RP, (CX)
+ self.Emit("MOVQ", _RL, jit.Ptr(_CX, 8)) // MOVQ RL, 8(CX)
+ self.Emit("MOVQ", _RC, jit.Ptr(_CX, 16)) // MOVQ RC, 16(CX)
+}
+
+// get *buf at AX
+func (self *_Assembler) load_buffer_AX() {
+ self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX
+ self.Emit("MOVQ", jit.Ptr(_AX, 0), _RP) // MOVQ (AX), RP
+ self.Emit("MOVQ", jit.Ptr(_AX, 8), _RL) // MOVQ 8(AX), RL
+ self.Emit("MOVQ", jit.Ptr(_AX, 16), _RC) // MOVQ 16(AX), RC
+}
+
+/** Function Interface Helpers **/
+
+func (self *_Assembler) call(pc obj.Addr) {
+ self.Emit("MOVQ", pc, _LR) // MOVQ $pc, AX
+ self.Rjmp("CALL", _LR) // CALL AX
+}
+
+func (self *_Assembler) save_c() {
+ self.xsave(_REG_ffi...) // SAVE $REG_ffi
+}
+
+func (self *_Assembler) call_b64(pc obj.Addr) {
+ self.xsave(_REG_b64...) // SAVE $REG_all
+ self.call(pc) // CALL $pc
+ self.xload(_REG_b64...) // LOAD $REG_ffi
+}
+
+func (self *_Assembler) call_c(pc obj.Addr) {
+ self.Emit("XCHGQ", _SP_p, _BX)
+ self.Emit("XCHGQ", _SP_q, _BP)
+ self.call(pc) // CALL $pc
+ self.xload(_REG_ffi...) // LOAD $REG_ffi
+ self.Emit("XCHGQ", _SP_p, _BX)
+ self.Emit("XCHGQ", _SP_q, _BP)
+}
+
+func (self *_Assembler) call_go(pc obj.Addr) {
+ self.xsave(_REG_all...) // SAVE $REG_all
+ self.call(pc) // CALL $pc
+ self.xload(_REG_all...) // LOAD $REG_all
+}
+
+func (self *_Assembler) call_more_space(pc obj.Addr) {
+ self.xsave(_REG_ms...) // SAVE $REG_all
+ self.call(pc) // CALL $pc
+ self.xload(_REG_ms...) // LOAD $REG_all
+}
+
+func (self *_Assembler) call_encoder(pc obj.Addr) {
+ self.xsave(_REG_enc...) // SAVE $REG_all
+ self.call(pc) // CALL $pc
+ self.xload(_REG_enc...) // LOAD $REG_all
+}
+
+func (self *_Assembler) call_marshaler(fn obj.Addr, it *rt.GoType, vt reflect.Type) {
+ switch vt.Kind() {
+ case reflect.Interface : self.call_marshaler_i(fn, it)
+ case reflect.Ptr, reflect.Map : self.call_marshaler_v(fn, it, vt, true)
+ default : self.call_marshaler_v(fn, it, vt, false)
+ }
+}
+
+func (self *_Assembler) call_marshaler_i(fn obj.Addr, it *rt.GoType) {
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JZ" , "_null_{n}") // JZ _null_{n}
+ self.Emit("MOVQ" , _AX, _BX) // MOVQ AX, BX
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _CX) // MOVQ 8(SP.p), CX
+ self.Emit("MOVQ" , jit.Gtype(it), _AX) // MOVQ $it, AX
+ self.call_go(_F_assertI2I) // CALL_GO assertI2I
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JZ" , "_null_{n}") // JZ _null_{n}
+ self.Emit("MOVQ", _BX, _CX) // MOVQ BX, CX
+ self.Emit("MOVQ", _AX, _BX) // MOVQ AX, BX
+ self.prep_buffer_AX()
+ self.Emit("MOVQ", _ARG_fv, _DI) // MOVQ ARG.fv, DI
+ self.call_go(fn) // CALL $fn
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+ self.load_buffer_AX()
+ self.Sjmp("JMP" , "_done_{n}") // JMP _done_{n}
+ self.Link("_null_{n}") // _null_{n}:
+ self.check_size(4) // SIZE $4
+ self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1)
+ self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL
+ self.Link("_done_{n}") // _done_{n}:
+}
+
+func (self *_Assembler) call_marshaler_v(fn obj.Addr, it *rt.GoType, vt reflect.Type, deref bool) {
+ self.prep_buffer_AX() // MOVE {buf}, (SP)
+ self.Emit("MOVQ", jit.Itab(it, vt), _BX) // MOVQ $(itab(it, vt)), BX
+
+ /* dereference the pointer if needed */
+ if !deref {
+ self.Emit("MOVQ", _SP_p, _CX) // MOVQ SP.p, CX
+ } else {
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _CX) // MOVQ 0(SP.p), CX
+ }
+
+ /* call the encoder, and perform error checks */
+ self.Emit("MOVQ", _ARG_fv, _DI) // MOVQ ARG.fv, DI
+ self.call_go(fn) // CALL $fn
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+ self.load_buffer_AX()
+}
+
+/** Builtin: _more_space **/
+
+var (
+ _T_byte = jit.Type(byteType)
+ _F_growslice = jit.Func(growslice)
+)
+
+// AX must saving n
+func (self *_Assembler) more_space() {
+ self.Link(_LB_more_space)
+ self.Emit("MOVQ", _RP, _BX) // MOVQ DI, BX
+ self.Emit("MOVQ", _RL, _CX) // MOVQ SI, CX
+ self.Emit("MOVQ", _RC, _DI) // MOVQ DX, DI
+ self.Emit("MOVQ", _AX, _SI) // MOVQ AX, SI
+ self.Emit("MOVQ", _T_byte, _AX) // MOVQ $_T_byte, AX
+ self.call_more_space(_F_growslice) // CALL $pc
+ self.Emit("MOVQ", _AX, _RP) // MOVQ AX, DI
+ self.Emit("MOVQ", _BX, _RL) // MOVQ BX, SI
+ self.Emit("MOVQ", _CX, _RC) // MOVQ CX, DX
+ self.save_buffer() // SAVE {buf}
+ self.Rjmp("JMP" , _LR) // JMP LR
+}
+
+/** Builtin Errors **/
+
+var (
+ _V_ERR_too_deep = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_too_deep))))
+ _V_ERR_nan_or_infinite = jit.Imm(int64(uintptr(unsafe.Pointer(_ERR_nan_or_infinite))))
+ _I_json_UnsupportedValueError = jit.Itab(rt.UnpackType(errorType), jsonUnsupportedValueType)
+)
+
+func (self *_Assembler) error_too_deep() {
+ self.Link(_LB_error_too_deep)
+ self.Emit("MOVQ", _V_ERR_too_deep, _EP) // MOVQ $_V_ERR_too_deep, EP
+ self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) error_invalid_number() {
+ self.Link(_LB_error_invalid_number)
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _AX) // MOVQ 0(SP), AX
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _BX) // MOVQ 8(SP), BX
+ self.call_go(_F_error_number) // CALL_GO error_number
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) error_nan_or_infinite() {
+ self.Link(_LB_error_nan_or_infinite)
+ self.Emit("MOVQ", _V_ERR_nan_or_infinite, _EP) // MOVQ $_V_ERR_nan_or_infinite, EP
+ self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ $_I_json_UnsupportedValuError, ET
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+/** String Encoding Routine **/
+
+var (
+ _F_quote = jit.Imm(int64(native.S_quote))
+ _F_panic = jit.Func(goPanic)
+)
+
+func (self *_Assembler) go_panic() {
+ self.Link(_LB_panic)
+ self.Emit("MOVQ", _SP_p, _BX)
+ self.call_go(_F_panic)
+}
+
+func (self *_Assembler) encode_string(doubleQuote bool) {
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JZ" , "_str_empty_{n}") // JZ _str_empty_{n}
+ self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0))
+ self.Sjmp("JNE" , "_str_next_{n}")
+ self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), _AX)
+ self.Sjmp("JMP", _LB_panic)
+ self.Link("_str_next_{n}")
+
+ /* openning quote, check for double quote */
+ if !doubleQuote {
+ self.check_size_r(_AX, 2) // SIZE $2
+ self.add_char('"') // CHAR $'"'
+ } else {
+ self.check_size_r(_AX, 6) // SIZE $6
+ self.add_long(_IM_open, 3) // TEXT $`"\"`
+ }
+
+ /* quoting loop */
+ self.Emit("XORL", _AX, _AX) // XORL AX, AX
+ self.Emit("MOVQ", _AX, _VAR_sp) // MOVQ AX, sp
+ self.Link("_str_loop_{n}") // _str_loop_{n}:
+ self.save_c() // SAVE $REG_ffi
+
+ /* load the output buffer first, and then input buffer,
+ * because the parameter registers collide with RP / RL / RC */
+ self.Emit("MOVQ", _RC, _CX) // MOVQ RC, CX
+ self.Emit("SUBQ", _RL, _CX) // SUBQ RL, CX
+ self.Emit("MOVQ", _CX, _VAR_dn) // MOVQ CX, dn
+ self.Emit("LEAQ", jit.Sib(_RP, _RL, 1, 0), _DX) // LEAQ (RP)(RL), DX
+ self.Emit("LEAQ", _VAR_dn, _CX) // LEAQ dn, CX
+ self.Emit("MOVQ", _VAR_sp, _AX) // MOVQ sp, AX
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _DI) // MOVQ (SP.p), DI
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _SI) // MOVQ 8(SP.p), SI
+ self.Emit("ADDQ", _AX, _DI) // ADDQ AX, DI
+ self.Emit("SUBQ", _AX, _SI) // SUBQ AX, SI
+
+ /* set the flags based on `doubleQuote` */
+ if !doubleQuote {
+ self.Emit("XORL", _R8, _R8) // XORL R8, R8
+ } else {
+ self.Emit("MOVL", jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8
+ }
+
+ /* call the native quoter */
+ self.call_c(_F_quote) // CALL quote
+ self.Emit("ADDQ" , _VAR_dn, _RL) // ADDQ dn, RL
+
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , "_str_space_{n}") // JS _str_space_{n}
+
+ /* close the string, check for double quote */
+ if !doubleQuote {
+ self.check_size(1) // SIZE $1
+ self.add_char('"') // CHAR $'"'
+ self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n}
+ } else {
+ self.check_size(3) // SIZE $3
+ self.add_text("\\\"\"") // TEXT $'\""'
+ self.Sjmp("JMP", "_str_end_{n}") // JMP _str_end_{n}
+ }
+
+ /* not enough space to contain the quoted string */
+ self.Link("_str_space_{n}") // _str_space_{n}:
+ self.Emit("NOTQ", _AX) // NOTQ AX
+ self.Emit("ADDQ", _AX, _VAR_sp) // ADDQ AX, sp
+ self.Emit("LEAQ", jit.Sib(_RC, _RC, 1, 0), _AX) // LEAQ (RC)(RC), AX
+ self.slice_grow_ax("_str_loop_{n}") // GROW _str_loop_{n}
+
+ /* empty string, check for double quote */
+ if !doubleQuote {
+ self.Link("_str_empty_{n}") // _str_empty_{n}:
+ self.check_size(2) // SIZE $2
+ self.add_text("\"\"") // TEXT $'""'
+ self.Link("_str_end_{n}") // _str_end_{n}:
+ } else {
+ self.Link("_str_empty_{n}") // _str_empty_{n}:
+ self.check_size(6) // SIZE $6
+ self.add_text("\"\\\"\\\"\"") // TEXT $'"\"\""'
+ self.Link("_str_end_{n}") // _str_end_{n}:
+ }
+}
+
+/** OpCode Assembler Functions **/
+
+var (
+ _T_json_Marshaler = rt.UnpackType(jsonMarshalerType)
+ _T_encoding_TextMarshaler = rt.UnpackType(encodingTextMarshalerType)
+)
+
+var (
+ _F_f64toa = jit.Imm(int64(native.S_f64toa))
+ _F_f32toa = jit.Imm(int64(native.S_f32toa))
+ _F_i64toa = jit.Imm(int64(native.S_i64toa))
+ _F_u64toa = jit.Imm(int64(native.S_u64toa))
+ _F_b64encode = jit.Imm(int64(_subr__b64encode))
+)
+
+var (
+ _F_memmove = jit.Func(memmove)
+ _F_error_number = jit.Func(error_number)
+ _F_isValidNumber = jit.Func(isValidNumber)
+)
+
+var (
+ _F_iteratorStop = jit.Func(iteratorStop)
+ _F_iteratorNext = jit.Func(iteratorNext)
+ _F_iteratorStart = jit.Func(iteratorStart)
+)
+
+var (
+ _F_encodeTypedPointer obj.Addr
+ _F_encodeJsonMarshaler obj.Addr
+ _F_encodeTextMarshaler obj.Addr
+)
+
+const (
+ _MODE_AVX2 = 1 << 2
+)
+
+func init() {
+ _F_encodeTypedPointer = jit.Func(encodeTypedPointer)
+ _F_encodeJsonMarshaler = jit.Func(encodeJsonMarshaler)
+ _F_encodeTextMarshaler = jit.Func(encodeTextMarshaler)
+}
+
+func (self *_Assembler) _asm_OP_null(_ *_Instr) {
+ self.check_size(4)
+ self.Emit("MOVL", jit.Imm(_IM_null), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'null', (RP)(RL*1)
+ self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL
+}
+
+func (self *_Assembler) _asm_OP_empty_arr(_ *_Instr) {
+ self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv)
+ self.Sjmp("JC", "_empty_arr_{n}")
+ self._asm_OP_null(nil)
+ self.Sjmp("JMP", "_empty_arr_end_{n}")
+ self.Link("_empty_arr_{n}")
+ self.check_size(2)
+ self.Emit("MOVW", jit.Imm(_IM_array), jit.Sib(_RP, _RL, 1, 0))
+ self.Emit("ADDQ", jit.Imm(2), _RL)
+ self.Link("_empty_arr_end_{n}")
+}
+
+func (self *_Assembler) _asm_OP_empty_obj(_ *_Instr) {
+ self.Emit("BTQ", jit.Imm(int64(bitNoNullSliceOrMap)), _ARG_fv)
+ self.Sjmp("JC", "_empty_obj_{n}")
+ self._asm_OP_null(nil)
+ self.Sjmp("JMP", "_empty_obj_end_{n}")
+ self.Link("_empty_obj_{n}")
+ self.check_size(2)
+ self.Emit("MOVW", jit.Imm(_IM_object), jit.Sib(_RP, _RL, 1, 0))
+ self.Emit("ADDQ", jit.Imm(2), _RL)
+ self.Link("_empty_obj_end_{n}")
+}
+
+func (self *_Assembler) _asm_OP_bool(_ *_Instr) {
+ self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0
+ self.Sjmp("JE" , "_false_{n}") // JE _false_{n}
+ self.check_size(4) // SIZE $4
+ self.Emit("MOVL", jit.Imm(_IM_true), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'true', (RP)(RL*1)
+ self.Emit("ADDQ", jit.Imm(4), _RL) // ADDQ $4, RL
+ self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n}
+ self.Link("_false_{n}") // _false_{n}:
+ self.check_size(5) // SIZE $5
+ self.Emit("MOVL", jit.Imm(_IM_fals), jit.Sib(_RP, _RL, 1, 0)) // MOVL $'fals', (RP)(RL*1)
+ self.Emit("MOVB", jit.Imm('e'), jit.Sib(_RP, _RL, 1, 4)) // MOVB $'e', 4(RP)(RL*1)
+ self.Emit("ADDQ", jit.Imm(5), _RL) // ADDQ $5, RL
+ self.Link("_end_{n}") // _end_{n}:
+}
+
+func (self *_Assembler) _asm_OP_i8(_ *_Instr) {
+ self.store_int(4, _F_i64toa, "MOVBQSX")
+}
+
+func (self *_Assembler) _asm_OP_i16(_ *_Instr) {
+ self.store_int(6, _F_i64toa, "MOVWQSX")
+}
+
+func (self *_Assembler) _asm_OP_i32(_ *_Instr) {
+ self.store_int(17, _F_i64toa, "MOVLQSX")
+}
+
+func (self *_Assembler) _asm_OP_i64(_ *_Instr) {
+ self.store_int(21, _F_i64toa, "MOVQ")
+}
+
+func (self *_Assembler) _asm_OP_u8(_ *_Instr) {
+ self.store_int(3, _F_u64toa, "MOVBQZX")
+}
+
+func (self *_Assembler) _asm_OP_u16(_ *_Instr) {
+ self.store_int(5, _F_u64toa, "MOVWQZX")
+}
+
+func (self *_Assembler) _asm_OP_u32(_ *_Instr) {
+ self.store_int(16, _F_u64toa, "MOVLQZX")
+}
+
+func (self *_Assembler) _asm_OP_u64(_ *_Instr) {
+ self.store_int(20, _F_u64toa, "MOVQ")
+}
+
+func (self *_Assembler) _asm_OP_f32(_ *_Instr) {
+ self.check_size(32)
+ self.Emit("MOVL" , jit.Ptr(_SP_p, 0), _AX) // MOVL (SP.p), AX
+ self.Emit("ANDL" , jit.Imm(_FM_exp32), _AX) // ANDL $_FM_exp32, AX
+ self.Emit("XORL" , jit.Imm(_FM_exp32), _AX) // XORL $_FM_exp32, AX
+ self.Sjmp("JZ" , _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite
+ self.save_c() // SAVE $C_regs
+ self.rbuf_di() // MOVQ RP, DI
+ self.Emit("MOVSS" , jit.Ptr(_SP_p, 0), _X0) // MOVSS (SP.p), X0
+ self.call_c(_F_f32toa) // CALL_C f64toa
+ self.Emit("ADDQ" , _AX, _RL) // ADDQ AX, RL
+}
+
+func (self *_Assembler) _asm_OP_f64(_ *_Instr) {
+ self.check_size(32)
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX
+ self.Emit("MOVQ" , jit.Imm(_FM_exp64), _CX) // MOVQ $_FM_exp64, CX
+ self.Emit("ANDQ" , _CX, _AX) // ANDQ CX, AX
+ self.Emit("XORQ" , _CX, _AX) // XORQ CX, AX
+ self.Sjmp("JZ" , _LB_error_nan_or_infinite) // JZ _error_nan_or_infinite
+ self.save_c() // SAVE $C_regs
+ self.rbuf_di() // MOVQ RP, DI
+ self.Emit("MOVSD" , jit.Ptr(_SP_p, 0), _X0) // MOVSD (SP.p), X0
+ self.call_c(_F_f64toa) // CALL_C f64toa
+ self.Emit("ADDQ" , _AX, _RL) // ADDQ AX, RL
+}
+
+func (self *_Assembler) _asm_OP_str(_ *_Instr) {
+ self.encode_string(false)
+}
+
+func (self *_Assembler) _asm_OP_bin(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _AX) // MOVQ 8(SP.p), AX
+ self.Emit("ADDQ", jit.Imm(2), _AX) // ADDQ $2, AX
+ self.Emit("MOVQ", jit.Imm(_IM_mulv), _CX) // MOVQ $_MF_mulv, CX
+ self.Emit("MOVQ", _DX, _BX) // MOVQ DX, BX
+ self.From("MULQ", _CX) // MULQ CX
+ self.Emit("LEAQ", jit.Sib(_DX, _DX, 1, 1), _AX) // LEAQ 1(DX)(DX), AX
+ self.Emit("ORQ" , jit.Imm(2), _AX) // ORQ $2, AX
+ self.Emit("MOVQ", _BX, _DX) // MOVQ BX, DX
+ self.check_size_r(_AX, 0) // SIZE AX
+ self.add_char('"') // CHAR $'"'
+ self.Emit("MOVQ", _ARG_rb, _DI) // MOVQ rb<>+0(FP), DI
+ self.Emit("MOVQ", _RL, jit.Ptr(_DI, 8)) // MOVQ SI, 8(DI)
+ self.Emit("MOVQ", _SP_p, _SI) // MOVQ SP.p, SI
+
+ /* check for AVX2 support */
+ if !cpu.HasAVX2 {
+ self.Emit("XORL", _DX, _DX) // XORL DX, DX
+ } else {
+ self.Emit("MOVL", jit.Imm(_MODE_AVX2), _DX) // MOVL $_MODE_AVX2, DX
+ }
+
+ /* call the encoder */
+ self.call_b64(_F_b64encode) // CALL b64encode
+ self.load_buffer_AX() // LOAD {buf}
+ self.add_char('"') // CHAR $'"'
+}
+
+func (self *_Assembler) _asm_OP_quote(_ *_Instr) {
+ self.encode_string(true)
+}
+
+func (self *_Assembler) _asm_OP_number(_ *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _BX) // MOVQ (SP.p), BX
+ self.Emit("TESTQ", _BX, _BX) // TESTQ BX, BX
+ self.Sjmp("JZ" , "_empty_{n}")
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JNZ" , "_number_next_{n}")
+ self.Emit("MOVQ", jit.Imm(int64(panicNilPointerOfNonEmptyString)), _AX)
+ self.Sjmp("JMP", _LB_panic)
+ self.Link("_number_next_{n}")
+ self.call_go(_F_isValidNumber) // CALL_GO isValidNumber
+ self.Emit("CMPB" , _AX, jit.Imm(0)) // CMPB AX, $0
+ self.Sjmp("JE" , _LB_error_invalid_number) // JE _error_invalid_number
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _BX) // MOVQ (SP.p), BX
+ self.check_size_r(_BX, 0) // SIZE BX
+ self.Emit("LEAQ" , jit.Sib(_RP, _RL, 1, 0), _AX) // LEAQ (RP)(RL), AX
+ self.Emit("ADDQ" , jit.Ptr(_SP_p, 8), _RL) // ADDQ 8(SP.p), RL
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _BX) // MOVOU (SP.p), BX
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 8), _CX) // MOVOU X0, 8(SP)
+ self.call_go(_F_memmove) // CALL_GO memmove
+ self.Emit("MOVQ", _ARG_rb, _AX) // MOVQ rb<>+0(FP), AX
+ self.Emit("MOVQ", _RL, jit.Ptr(_AX, 8)) // MOVQ RL, 8(AX)
+ self.Sjmp("JMP" , "_done_{n}") // JMP _done_{n}
+ self.Link("_empty_{n}") // _empty_{n}
+ self.check_size(1) // SIZE $1
+ self.add_char('0') // CHAR $'0'
+ self.Link("_done_{n}") // _done_{n}:
+}
+
+func (self *_Assembler) _asm_OP_eface(_ *_Instr) {
+ self.prep_buffer_AX() // MOVE {buf}, AX
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _BX) // MOVQ (SP.p), BX
+ self.Emit("LEAQ" , jit.Ptr(_SP_p, 8), _CX) // LEAQ 8(SP.p), CX
+ self.Emit("MOVQ" , _ST, _DI) // MOVQ ST, DI
+ self.Emit("MOVQ" , _ARG_fv, _SI) // MOVQ fv, AX
+ self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+ self.load_buffer_AX()
+}
+
+func (self *_Assembler) _asm_OP_iface(_ *_Instr) {
+ self.prep_buffer_AX() // MOVE {buf}, AX
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _CX) // MOVQ (SP.p), CX
+ self.Emit("MOVQ" , jit.Ptr(_CX, 8), _BX) // MOVQ 8(CX), BX
+ self.Emit("LEAQ" , jit.Ptr(_SP_p, 8), _CX) // LEAQ 8(SP.p), CX
+ self.Emit("MOVQ" , _ST, _DI) // MOVQ ST, DI
+ self.Emit("MOVQ" , _ARG_fv, _SI) // MOVQ fv, AX
+ self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+ self.load_buffer_AX()
+}
+
+func (self *_Assembler) _asm_OP_byte(p *_Instr) {
+ self.check_size(1)
+ self.Emit("MOVB", jit.Imm(p.i64()), jit.Sib(_RP, _RL, 1, 0)) // MOVL p.vi(), (RP)(RL*1)
+ self.Emit("ADDQ", jit.Imm(1), _RL) // ADDQ $1, RL
+}
+
+func (self *_Assembler) _asm_OP_text(p *_Instr) {
+ self.check_size(len(p.vs())) // SIZE ${len(p.vs())}
+ self.add_text(p.vs()) // TEXT ${p.vs()}
+}
+
+func (self *_Assembler) _asm_OP_deref(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p
+}
+
+func (self *_Assembler) _asm_OP_index(p *_Instr) {
+ self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ $p.vi(), AX
+ self.Emit("ADDQ", _AX, _SP_p) // ADDQ AX, SP.p
+}
+
+func (self *_Assembler) _asm_OP_load(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX
+ self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -24), _SP_x) // MOVQ -24(ST)(AX), SP.x
+ self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, -8), _SP_p) // MOVQ -8(ST)(AX), SP.p
+ self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _SP_q) // MOVQ (ST)(AX), SP.q
+}
+
+func (self *_Assembler) _asm_OP_save(_ *_Instr) {
+ self.save_state()
+}
+
+func (self *_Assembler) _asm_OP_drop(_ *_Instr) {
+ self.drop_state(_StateSize)
+}
+
+func (self *_Assembler) _asm_OP_drop_2(_ *_Instr) {
+ self.drop_state(_StateSize * 2) // DROP $(_StateSize * 2)
+ self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 56)) // MOVOU X0, 56(ST)(AX)
+}
+
+func (self *_Assembler) _asm_OP_recurse(p *_Instr) {
+ self.prep_buffer_AX() // MOVE {buf}, (SP)
+ vt, pv := p.vp()
+ self.Emit("MOVQ", jit.Type(vt), _BX) // MOVQ $(type(p.vt())), BX
+
+ /* check for indirection */
+ if !rt.UnpackType(vt).Indirect() {
+ self.Emit("MOVQ", _SP_p, _CX) // MOVQ SP.p, CX
+ } else {
+ self.Emit("MOVQ", _SP_p, _VAR_vp) // MOVQ SP.p, VAR.vp
+ self.Emit("LEAQ", _VAR_vp, _CX) // LEAQ VAR.vp, CX
+ }
+
+ /* call the encoder */
+ self.Emit("MOVQ" , _ST, _DI) // MOVQ ST, DI
+ self.Emit("MOVQ" , _ARG_fv, _SI) // MOVQ $fv, SI
+ if pv {
+ self.Emit("BTCQ", jit.Imm(bitPointerValue), _SI) // BTCQ $1, SI
+ }
+ self.call_encoder(_F_encodeTypedPointer) // CALL encodeTypedPointer
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+ self.load_buffer_AX()
+}
+
+func (self *_Assembler) _asm_OP_is_nil(p *_Instr) {
+ self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_is_nil_p1(p *_Instr) {
+ self.Emit("CMPQ", jit.Ptr(_SP_p, 8), jit.Imm(0)) // CMPQ 8(SP.p), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_is_zero_1(p *_Instr) {
+ self.Emit("CMPB", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPB (SP.p), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_is_zero_2(p *_Instr) {
+ self.Emit("CMPW", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPW (SP.p), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_is_zero_4(p *_Instr) {
+ self.Emit("CMPL", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPL (SP.p), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_is_zero_8(p *_Instr) {
+ self.Emit("CMPQ", jit.Ptr(_SP_p, 0), jit.Imm(0)) // CMPQ (SP.p), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_is_zero_map(p *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _AX) // MOVQ (SP.p), AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Xjmp("JZ" , p.vi()) // JZ p.vi()
+ self.Emit("CMPQ" , jit.Ptr(_AX, 0), jit.Imm(0)) // CMPQ (AX), $0
+ self.Xjmp("JE" , p.vi()) // JE p.vi()
+}
+
+func (self *_Assembler) _asm_OP_goto(p *_Instr) {
+ self.Xjmp("JMP", p.vi())
+}
+
+func (self *_Assembler) _asm_OP_map_iter(p *_Instr) {
+ self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ $p.vt(), AX
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _BX) // MOVQ (SP.p), BX
+ self.Emit("MOVQ" , _ARG_fv, _CX) // MOVQ fv, CX
+ self.call_go(_F_iteratorStart) // CALL_GO iteratorStart
+ self.Emit("MOVQ" , _AX, _SP_q) // MOVQ AX, SP.q
+ self.Emit("MOVQ" , _BX, _ET) // MOVQ 32(SP), ET
+ self.Emit("MOVQ" , _CX, _EP) // MOVQ 40(SP), EP
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+}
+
+func (self *_Assembler) _asm_OP_map_stop(_ *_Instr) {
+ self.Emit("MOVQ", _SP_q, _AX) // MOVQ SP.q, AX
+ self.call_go(_F_iteratorStop) // CALL_GO iteratorStop
+ self.Emit("XORL", _SP_q, _SP_q) // XORL SP.q, SP.q
+}
+
+func (self *_Assembler) _asm_OP_map_check_key(p *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_SP_q, 0), _SP_p) // MOVQ (SP.q), SP.p
+ self.Emit("TESTQ", _SP_p, _SP_p) // TESTQ SP.p, SP.p
+ self.Xjmp("JZ" , p.vi()) // JNZ p.vi()
+}
+
+func (self *_Assembler) _asm_OP_map_write_key(p *_Instr) {
+ self.Emit("BTQ", jit.Imm(bitSortMapKeys), _ARG_fv) // BTQ ${SortMapKeys}, fv
+ self.Sjmp("JNC", "_unordered_key_{n}") // JNC _unordered_key_{n}
+ self.encode_string(false) // STR $false
+ self.Xjmp("JMP", p.vi()) // JMP ${p.vi()}
+ self.Link("_unordered_key_{n}") // _unordered_key_{n}:
+}
+
+func (self *_Assembler) _asm_OP_map_value_next(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_SP_q, 8), _SP_p) // MOVQ 8(SP.q), SP.p
+ self.Emit("MOVQ", _SP_q, _AX) // MOVQ SP.q, AX
+ self.call_go(_F_iteratorNext) // CALL_GO iteratorNext
+}
+
+func (self *_Assembler) _asm_OP_slice_len(_ *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 8), _SP_x) // MOVQ 8(SP.p), SP.x
+ self.Emit("MOVQ" , jit.Ptr(_SP_p, 0), _SP_p) // MOVQ (SP.p), SP.p
+ self.Emit("ORQ" , jit.Imm(1 << _S_init), _SP_f) // ORQ $(1<<_S_init), SP.f
+}
+
+func (self *_Assembler) _asm_OP_slice_next(p *_Instr) {
+ self.Emit("TESTQ" , _SP_x, _SP_x) // TESTQ SP.x, SP.x
+ self.Xjmp("JZ" , p.vi()) // JZ p.vi()
+ self.Emit("SUBQ" , jit.Imm(1), _SP_x) // SUBQ $1, SP.x
+ self.Emit("BTRQ" , jit.Imm(_S_init), _SP_f) // BTRQ $_S_init, SP.f
+ self.Emit("LEAQ" , jit.Ptr(_SP_p, int64(p.vlen())), _AX) // LEAQ $(p.vlen())(SP.p), AX
+ self.Emit("CMOVQCC", _AX, _SP_p) // CMOVQNC AX, SP.p
+}
+
+func (self *_Assembler) _asm_OP_marshal(p *_Instr) {
+ self.call_marshaler(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt())
+}
+
+func (self *_Assembler) _asm_OP_marshal_p(p *_Instr) {
+ if p.vk() != reflect.Ptr {
+ panic("marshal_p: invalid type")
+ } else {
+ self.call_marshaler_v(_F_encodeJsonMarshaler, _T_json_Marshaler, p.vt(), false)
+ }
+}
+
+func (self *_Assembler) _asm_OP_marshal_text(p *_Instr) {
+ self.call_marshaler(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt())
+}
+
+func (self *_Assembler) _asm_OP_marshal_text_p(p *_Instr) {
+ if p.vk() != reflect.Ptr {
+ panic("marshal_text_p: invalid type")
+ } else {
+ self.call_marshaler_v(_F_encodeTextMarshaler, _T_encoding_TextMarshaler, p.vt(), false)
+ }
+}
+
+func (self *_Assembler) _asm_OP_cond_set(_ *_Instr) {
+ self.Emit("ORQ", jit.Imm(1 << _S_cond), _SP_f) // ORQ $(1<<_S_cond), SP.f
+}
+
+func (self *_Assembler) _asm_OP_cond_testc(p *_Instr) {
+ self.Emit("BTRQ", jit.Imm(_S_cond), _SP_f) // BTRQ $_S_cond, SP.f
+ self.Xjmp("JC" , p.vi())
+}
+
+func (self *_Assembler) print_gc(i int, p1 *_Instr, p2 *_Instr) {
+ self.Emit("MOVQ", jit.Imm(int64(p2.op())), _CX) // MOVQ $(p2.op()), AX
+ self.Emit("MOVQ", jit.Imm(int64(p1.op())), _BX) // MOVQ $(p1.op()), BX
+ self.Emit("MOVQ", jit.Imm(int64(i)), _AX) // MOVQ $(i), CX
+ self.call_go(_F_println)
+}
+
+var (
+ _V_writeBarrier = jit.Imm(int64(uintptr(unsafe.Pointer(&_runtime_writeBarrier))))
+
+ _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX)
+)
+
+func (self *_Assembler) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr) {
+ if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX {
+ panic("rec contains AX!")
+ }
+ self.Emit("MOVQ", _V_writeBarrier, _BX)
+ self.Emit("CMPL", jit.Ptr(_BX, 0), jit.Imm(0))
+ self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.xsave(_DI)
+ self.Emit("MOVQ", ptr, _AX)
+ self.Emit("LEAQ", rec, _DI)
+ self.Emit("MOVQ", _F_gcWriteBarrierAX, _BX) // MOVQ ${fn}, AX
+ self.Rjmp("CALL", _BX)
+ self.xload(_DI)
+ self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Emit("MOVQ", ptr, rec)
+ self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/encoder/compiler.go b/vendor/github.com/bytedance/sonic/encoder/compiler.go
new file mode 100644
index 000000000..a949c90f7
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/compiler.go
@@ -0,0 +1,885 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `fmt`
+ `reflect`
+ `strconv`
+ `strings`
+ `unsafe`
+
+ `github.com/bytedance/sonic/internal/resolver`
+ `github.com/bytedance/sonic/internal/rt`
+ `github.com/bytedance/sonic/option`
+)
+
+type _Op uint8
+
+const (
+ _OP_null _Op = iota + 1
+ _OP_empty_arr
+ _OP_empty_obj
+ _OP_bool
+ _OP_i8
+ _OP_i16
+ _OP_i32
+ _OP_i64
+ _OP_u8
+ _OP_u16
+ _OP_u32
+ _OP_u64
+ _OP_f32
+ _OP_f64
+ _OP_str
+ _OP_bin
+ _OP_quote
+ _OP_number
+ _OP_eface
+ _OP_iface
+ _OP_byte
+ _OP_text
+ _OP_deref
+ _OP_index
+ _OP_load
+ _OP_save
+ _OP_drop
+ _OP_drop_2
+ _OP_recurse
+ _OP_is_nil
+ _OP_is_nil_p1
+ _OP_is_zero_1
+ _OP_is_zero_2
+ _OP_is_zero_4
+ _OP_is_zero_8
+ _OP_is_zero_map
+ _OP_goto
+ _OP_map_iter
+ _OP_map_stop
+ _OP_map_check_key
+ _OP_map_write_key
+ _OP_map_value_next
+ _OP_slice_len
+ _OP_slice_next
+ _OP_marshal
+ _OP_marshal_p
+ _OP_marshal_text
+ _OP_marshal_text_p
+ _OP_cond_set
+ _OP_cond_testc
+)
+
+const (
+ _INT_SIZE = 32 << (^uint(0) >> 63)
+ _PTR_SIZE = 32 << (^uintptr(0) >> 63)
+ _PTR_BYTE = unsafe.Sizeof(uintptr(0))
+)
+
+const (
+ _MAX_ILBUF = 100000 // cutoff at 100k of IL instructions
+ _MAX_FIELDS = 50 // cutoff at 50 fields struct
+)
+
+var _OpNames = [256]string {
+ _OP_null : "null",
+ _OP_empty_arr : "empty_arr",
+ _OP_empty_obj : "empty_obj",
+ _OP_bool : "bool",
+ _OP_i8 : "i8",
+ _OP_i16 : "i16",
+ _OP_i32 : "i32",
+ _OP_i64 : "i64",
+ _OP_u8 : "u8",
+ _OP_u16 : "u16",
+ _OP_u32 : "u32",
+ _OP_u64 : "u64",
+ _OP_f32 : "f32",
+ _OP_f64 : "f64",
+ _OP_str : "str",
+ _OP_bin : "bin",
+ _OP_quote : "quote",
+ _OP_number : "number",
+ _OP_eface : "eface",
+ _OP_iface : "iface",
+ _OP_byte : "byte",
+ _OP_text : "text",
+ _OP_deref : "deref",
+ _OP_index : "index",
+ _OP_load : "load",
+ _OP_save : "save",
+ _OP_drop : "drop",
+ _OP_drop_2 : "drop_2",
+ _OP_recurse : "recurse",
+ _OP_is_nil : "is_nil",
+ _OP_is_nil_p1 : "is_nil_p1",
+ _OP_is_zero_1 : "is_zero_1",
+ _OP_is_zero_2 : "is_zero_2",
+ _OP_is_zero_4 : "is_zero_4",
+ _OP_is_zero_8 : "is_zero_8",
+ _OP_is_zero_map : "is_zero_map",
+ _OP_goto : "goto",
+ _OP_map_iter : "map_iter",
+ _OP_map_stop : "map_stop",
+ _OP_map_check_key : "map_check_key",
+ _OP_map_write_key : "map_write_key",
+ _OP_map_value_next : "map_value_next",
+ _OP_slice_len : "slice_len",
+ _OP_slice_next : "slice_next",
+ _OP_marshal : "marshal",
+ _OP_marshal_p : "marshal_p",
+ _OP_marshal_text : "marshal_text",
+ _OP_marshal_text_p : "marshal_text_p",
+ _OP_cond_set : "cond_set",
+ _OP_cond_testc : "cond_testc",
+}
+
+func (self _Op) String() string {
+ if ret := _OpNames[self]; ret != "" {
+ return ret
+ } else {
+ return "<invalid>"
+ }
+}
+
+func _OP_int() _Op {
+ switch _INT_SIZE {
+ case 32: return _OP_i32
+ case 64: return _OP_i64
+ default: panic("unsupported int size")
+ }
+}
+
+func _OP_uint() _Op {
+ switch _INT_SIZE {
+ case 32: return _OP_u32
+ case 64: return _OP_u64
+ default: panic("unsupported uint size")
+ }
+}
+
+func _OP_uintptr() _Op {
+ switch _PTR_SIZE {
+ case 32: return _OP_u32
+ case 64: return _OP_u64
+ default: panic("unsupported pointer size")
+ }
+}
+
+func _OP_is_zero_ints() _Op {
+ switch _INT_SIZE {
+ case 32: return _OP_is_zero_4
+ case 64: return _OP_is_zero_8
+ default: panic("unsupported integer size")
+ }
+}
+
+type _Instr struct {
+ u uint64 // union {op: 8, _: 8, vi: 48}, vi maybe int or len(str)
+ p unsafe.Pointer // maybe GoString.Ptr, or *GoType
+}
+
+func packOp(op _Op) uint64 {
+ return uint64(op) << 56
+}
+
+func newInsOp(op _Op) _Instr {
+ return _Instr{u: packOp(op)}
+}
+
+func newInsVi(op _Op, vi int) _Instr {
+ return _Instr{u: packOp(op) | rt.PackInt(vi)}
+}
+
+func newInsVs(op _Op, vs string) _Instr {
+ return _Instr {
+ u: packOp(op) | rt.PackInt(len(vs)),
+ p: (*rt.GoString)(unsafe.Pointer(&vs)).Ptr,
+ }
+}
+
+func newInsVt(op _Op, vt reflect.Type) _Instr {
+ return _Instr {
+ u: packOp(op),
+ p: unsafe.Pointer(rt.UnpackType(vt)),
+ }
+}
+
+func newInsVp(op _Op, vt reflect.Type, pv bool) _Instr {
+ i := 0
+ if pv {
+ i = 1
+ }
+ return _Instr {
+ u: packOp(op) | rt.PackInt(i),
+ p: unsafe.Pointer(rt.UnpackType(vt)),
+ }
+}
+
+func (self _Instr) op() _Op {
+ return _Op(self.u >> 56)
+}
+
+func (self _Instr) vi() int {
+ return rt.UnpackInt(self.u)
+}
+
+func (self _Instr) vf() uint8 {
+ return (*rt.GoType)(self.p).KindFlags
+}
+
+func (self _Instr) vs() (v string) {
+ (*rt.GoString)(unsafe.Pointer(&v)).Ptr = self.p
+ (*rt.GoString)(unsafe.Pointer(&v)).Len = self.vi()
+ return
+}
+
+func (self _Instr) vk() reflect.Kind {
+ return (*rt.GoType)(self.p).Kind()
+}
+
+func (self _Instr) vt() reflect.Type {
+ return (*rt.GoType)(self.p).Pack()
+}
+
+func (self _Instr) vp() (vt reflect.Type, pv bool) {
+ return (*rt.GoType)(self.p).Pack(), rt.UnpackInt(self.u) == 1
+}
+
+func (self _Instr) i64() int64 {
+ return int64(self.vi())
+}
+
+func (self _Instr) vlen() int {
+ return int((*rt.GoType)(self.p).Size)
+}
+
+func (self _Instr) isBranch() bool {
+ switch self.op() {
+ case _OP_goto : fallthrough
+ case _OP_is_nil : fallthrough
+ case _OP_is_nil_p1 : fallthrough
+ case _OP_is_zero_1 : fallthrough
+ case _OP_is_zero_2 : fallthrough
+ case _OP_is_zero_4 : fallthrough
+ case _OP_is_zero_8 : fallthrough
+ case _OP_map_check_key : fallthrough
+ case _OP_map_write_key : fallthrough
+ case _OP_slice_next : fallthrough
+ case _OP_cond_testc : return true
+ default : return false
+ }
+}
+
+func (self _Instr) disassemble() string {
+ switch self.op() {
+ case _OP_byte : return fmt.Sprintf("%-18s%s", self.op().String(), strconv.QuoteRune(rune(self.vi())))
+ case _OP_text : return fmt.Sprintf("%-18s%s", self.op().String(), strconv.Quote(self.vs()))
+ case _OP_index : return fmt.Sprintf("%-18s%d", self.op().String(), self.vi())
+ case _OP_recurse : fallthrough
+ case _OP_map_iter : fallthrough
+ case _OP_marshal : fallthrough
+ case _OP_marshal_p : fallthrough
+ case _OP_marshal_text : fallthrough
+ case _OP_marshal_text_p : return fmt.Sprintf("%-18s%s", self.op().String(), self.vt())
+ case _OP_goto : fallthrough
+ case _OP_is_nil : fallthrough
+ case _OP_is_nil_p1 : fallthrough
+ case _OP_is_zero_1 : fallthrough
+ case _OP_is_zero_2 : fallthrough
+ case _OP_is_zero_4 : fallthrough
+ case _OP_is_zero_8 : fallthrough
+ case _OP_is_zero_map : fallthrough
+ case _OP_cond_testc : fallthrough
+ case _OP_map_check_key : fallthrough
+ case _OP_map_write_key : return fmt.Sprintf("%-18sL_%d", self.op().String(), self.vi())
+ case _OP_slice_next : return fmt.Sprintf("%-18sL_%d, %s", self.op().String(), self.vi(), self.vt())
+ default : return self.op().String()
+ }
+}
+
+type (
+ _Program []_Instr
+)
+
+func (self _Program) pc() int {
+ return len(self)
+}
+
+func (self _Program) tag(n int) {
+ if n >= _MaxStack {
+ panic("type nesting too deep")
+ }
+}
+
+func (self _Program) pin(i int) {
+ v := &self[i]
+ v.u &= 0xffff000000000000
+ v.u |= rt.PackInt(self.pc())
+}
+
+func (self _Program) rel(v []int) {
+ for _, i := range v {
+ self.pin(i)
+ }
+}
+
+func (self *_Program) add(op _Op) {
+ *self = append(*self, newInsOp(op))
+}
+
+func (self *_Program) key(op _Op) {
+ *self = append(*self,
+ newInsVi(_OP_byte, '"'),
+ newInsOp(op),
+ newInsVi(_OP_byte, '"'),
+ )
+}
+
+func (self *_Program) int(op _Op, vi int) {
+ *self = append(*self, newInsVi(op, vi))
+}
+
+func (self *_Program) str(op _Op, vs string) {
+ *self = append(*self, newInsVs(op, vs))
+}
+
+func (self *_Program) rtt(op _Op, vt reflect.Type) {
+ *self = append(*self, newInsVt(op, vt))
+}
+
+func (self *_Program) vp(op _Op, vt reflect.Type, pv bool) {
+ *self = append(*self, newInsVp(op, vt, pv))
+}
+
+func (self _Program) disassemble() string {
+ nb := len(self)
+ tab := make([]bool, nb + 1)
+ ret := make([]string, 0, nb + 1)
+
+ /* prescan to get all the labels */
+ for _, ins := range self {
+ if ins.isBranch() {
+ tab[ins.vi()] = true
+ }
+ }
+
+ /* disassemble each instruction */
+ for i, ins := range self {
+ if !tab[i] {
+ ret = append(ret, "\t" + ins.disassemble())
+ } else {
+ ret = append(ret, fmt.Sprintf("L_%d:\n\t%s", i, ins.disassemble()))
+ }
+ }
+
+ /* add the last label, if needed */
+ if tab[nb] {
+ ret = append(ret, fmt.Sprintf("L_%d:", nb))
+ }
+
+ /* add an "end" indicator, and join all the strings */
+ return strings.Join(append(ret, "\tend"), "\n")
+}
+
+type _Compiler struct {
+ opts option.CompileOptions
+ pv bool
+ tab map[reflect.Type]bool
+ rec map[reflect.Type]uint8
+}
+
+func newCompiler() *_Compiler {
+ return &_Compiler {
+ opts: option.DefaultCompileOptions(),
+ tab: map[reflect.Type]bool{},
+ rec: map[reflect.Type]uint8{},
+ }
+}
+
+func (self *_Compiler) apply(opts option.CompileOptions) *_Compiler {
+ self.opts = opts
+ if self.opts.RecursiveDepth > 0 {
+ self.rec = map[reflect.Type]uint8{}
+ }
+ return self
+}
+
+func (self *_Compiler) rescue(ep *error) {
+ if val := recover(); val != nil {
+ if err, ok := val.(error); ok {
+ *ep = err
+ } else {
+ panic(val)
+ }
+ }
+}
+
+func (self *_Compiler) compile(vt reflect.Type, pv bool) (ret _Program, err error) {
+ defer self.rescue(&err)
+ self.compileOne(&ret, 0, vt, pv)
+ return
+}
+
+func (self *_Compiler) compileOne(p *_Program, sp int, vt reflect.Type, pv bool) {
+ if self.tab[vt] {
+ p.vp(_OP_recurse, vt, pv)
+ } else {
+ self.compileRec(p, sp, vt, pv)
+ }
+}
+
+func (self *_Compiler) compileRec(p *_Program, sp int, vt reflect.Type, pv bool) {
+ pr := self.pv
+ pt := reflect.PtrTo(vt)
+
+ /* check for addressable `json.Marshaler` with pointer receiver */
+ if pv && pt.Implements(jsonMarshalerType) {
+ p.rtt(_OP_marshal_p, pt)
+ return
+ }
+
+ /* check for `json.Marshaler` */
+ if vt.Implements(jsonMarshalerType) {
+ self.compileMarshaler(p, _OP_marshal, vt, jsonMarshalerType)
+ return
+ }
+
+ /* check for addressable `encoding.TextMarshaler` with pointer receiver */
+ if pv && pt.Implements(encodingTextMarshalerType) {
+ p.rtt(_OP_marshal_text_p, pt)
+ return
+ }
+
+ /* check for `encoding.TextMarshaler` */
+ if vt.Implements(encodingTextMarshalerType) {
+ self.compileMarshaler(p, _OP_marshal_text, vt, encodingTextMarshalerType)
+ return
+ }
+
+ /* enter the recursion, and compile the type */
+ self.pv = pv
+ self.tab[vt] = true
+ self.compileOps(p, sp, vt)
+
+ /* exit the recursion */
+ self.pv = pr
+ delete(self.tab, vt)
+}
+
+func (self *_Compiler) compileOps(p *_Program, sp int, vt reflect.Type) {
+ switch vt.Kind() {
+ case reflect.Bool : p.add(_OP_bool)
+ case reflect.Int : p.add(_OP_int())
+ case reflect.Int8 : p.add(_OP_i8)
+ case reflect.Int16 : p.add(_OP_i16)
+ case reflect.Int32 : p.add(_OP_i32)
+ case reflect.Int64 : p.add(_OP_i64)
+ case reflect.Uint : p.add(_OP_uint())
+ case reflect.Uint8 : p.add(_OP_u8)
+ case reflect.Uint16 : p.add(_OP_u16)
+ case reflect.Uint32 : p.add(_OP_u32)
+ case reflect.Uint64 : p.add(_OP_u64)
+ case reflect.Uintptr : p.add(_OP_uintptr())
+ case reflect.Float32 : p.add(_OP_f32)
+ case reflect.Float64 : p.add(_OP_f64)
+ case reflect.String : self.compileString (p, vt)
+ case reflect.Array : self.compileArray (p, sp, vt.Elem(), vt.Len())
+ case reflect.Interface : self.compileInterface (p, vt)
+ case reflect.Map : self.compileMap (p, sp, vt)
+ case reflect.Ptr : self.compilePtr (p, sp, vt.Elem())
+ case reflect.Slice : self.compileSlice (p, sp, vt.Elem())
+ case reflect.Struct : self.compileStruct (p, sp, vt)
+ default : panic (error_type(vt))
+ }
+}
+
+func (self *_Compiler) compileNil(p *_Program, sp int, vt reflect.Type, nil_op _Op, fn func(*_Program, int, reflect.Type)) {
+ x := p.pc()
+ p.add(_OP_is_nil)
+ fn(p, sp, vt)
+ e := p.pc()
+ p.add(_OP_goto)
+ p.pin(x)
+ p.add(nil_op)
+ p.pin(e)
+}
+
+func (self *_Compiler) compilePtr(p *_Program, sp int, vt reflect.Type) {
+ self.compileNil(p, sp, vt, _OP_null, self.compilePtrBody)
+}
+
+func (self *_Compiler) compilePtrBody(p *_Program, sp int, vt reflect.Type) {
+ p.tag(sp)
+ p.add(_OP_save)
+ p.add(_OP_deref)
+ self.compileOne(p, sp + 1, vt, true)
+ p.add(_OP_drop)
+}
+
+func (self *_Compiler) compileMap(p *_Program, sp int, vt reflect.Type) {
+ self.compileNil(p, sp, vt, _OP_empty_obj, self.compileMapBody)
+}
+
+func (self *_Compiler) compileMapBody(p *_Program, sp int, vt reflect.Type) {
+ p.tag(sp + 1)
+ p.int(_OP_byte, '{')
+ p.add(_OP_save)
+ p.rtt(_OP_map_iter, vt)
+ p.add(_OP_save)
+ i := p.pc()
+ p.add(_OP_map_check_key)
+ u := p.pc()
+ p.add(_OP_map_write_key)
+ self.compileMapBodyKey(p, vt.Key())
+ p.pin(u)
+ p.int(_OP_byte, ':')
+ p.add(_OP_map_value_next)
+ self.compileOne(p, sp + 2, vt.Elem(), false)
+ j := p.pc()
+ p.add(_OP_map_check_key)
+ p.int(_OP_byte, ',')
+ v := p.pc()
+ p.add(_OP_map_write_key)
+ self.compileMapBodyKey(p, vt.Key())
+ p.pin(v)
+ p.int(_OP_byte, ':')
+ p.add(_OP_map_value_next)
+ self.compileOne(p, sp + 2, vt.Elem(), false)
+ p.int(_OP_goto, j)
+ p.pin(i)
+ p.pin(j)
+ p.add(_OP_map_stop)
+ p.add(_OP_drop_2)
+ p.int(_OP_byte, '}')
+}
+
+func (self *_Compiler) compileMapBodyKey(p *_Program, vk reflect.Type) {
+ if !vk.Implements(encodingTextMarshalerType) {
+ self.compileMapBodyTextKey(p, vk)
+ } else {
+ self.compileMapBodyUtextKey(p, vk)
+ }
+}
+
+func (self *_Compiler) compileMapBodyTextKey(p *_Program, vk reflect.Type) {
+ switch vk.Kind() {
+ case reflect.Invalid : panic("map key is nil")
+ case reflect.Bool : p.key(_OP_bool)
+ case reflect.Int : p.key(_OP_int())
+ case reflect.Int8 : p.key(_OP_i8)
+ case reflect.Int16 : p.key(_OP_i16)
+ case reflect.Int32 : p.key(_OP_i32)
+ case reflect.Int64 : p.key(_OP_i64)
+ case reflect.Uint : p.key(_OP_uint())
+ case reflect.Uint8 : p.key(_OP_u8)
+ case reflect.Uint16 : p.key(_OP_u16)
+ case reflect.Uint32 : p.key(_OP_u32)
+ case reflect.Uint64 : p.key(_OP_u64)
+ case reflect.Uintptr : p.key(_OP_uintptr())
+ case reflect.Float32 : p.key(_OP_f32)
+ case reflect.Float64 : p.key(_OP_f64)
+ case reflect.String : self.compileString(p, vk)
+ default : panic(error_type(vk))
+ }
+}
+
+func (self *_Compiler) compileMapBodyUtextKey(p *_Program, vk reflect.Type) {
+ if vk.Kind() != reflect.Ptr {
+ p.rtt(_OP_marshal_text, vk)
+ } else {
+ self.compileMapBodyUtextPtr(p, vk)
+ }
+}
+
+func (self *_Compiler) compileMapBodyUtextPtr(p *_Program, vk reflect.Type) {
+ i := p.pc()
+ p.add(_OP_is_nil)
+ p.rtt(_OP_marshal_text, vk)
+ j := p.pc()
+ p.add(_OP_goto)
+ p.pin(i)
+ p.str(_OP_text, "\"\"")
+ p.pin(j)
+}
+
+func (self *_Compiler) compileSlice(p *_Program, sp int, vt reflect.Type) {
+ self.compileNil(p, sp, vt, _OP_empty_arr, self.compileSliceBody)
+}
+
+func (self *_Compiler) compileSliceBody(p *_Program, sp int, vt reflect.Type) {
+ if isSimpleByte(vt) {
+ p.add(_OP_bin)
+ } else {
+ self.compileSliceArray(p, sp, vt)
+ }
+}
+
+func (self *_Compiler) compileSliceArray(p *_Program, sp int, vt reflect.Type) {
+ p.tag(sp)
+ p.int(_OP_byte, '[')
+ p.add(_OP_save)
+ p.add(_OP_slice_len)
+ i := p.pc()
+ p.rtt(_OP_slice_next, vt)
+ self.compileOne(p, sp + 1, vt, true)
+ j := p.pc()
+ p.rtt(_OP_slice_next, vt)
+ p.int(_OP_byte, ',')
+ self.compileOne(p, sp + 1, vt, true)
+ p.int(_OP_goto, j)
+ p.pin(i)
+ p.pin(j)
+ p.add(_OP_drop)
+ p.int(_OP_byte, ']')
+}
+
+func (self *_Compiler) compileArray(p *_Program, sp int, vt reflect.Type, nb int) {
+ p.tag(sp)
+ p.int(_OP_byte, '[')
+ p.add(_OP_save)
+
+ /* first item */
+ if nb != 0 {
+ self.compileOne(p, sp + 1, vt, self.pv)
+ p.add(_OP_load)
+ }
+
+ /* remaining items */
+ for i := 1; i < nb; i++ {
+ p.int(_OP_byte, ',')
+ p.int(_OP_index, i * int(vt.Size()))
+ self.compileOne(p, sp + 1, vt, self.pv)
+ p.add(_OP_load)
+ }
+
+ /* end of array */
+ p.add(_OP_drop)
+ p.int(_OP_byte, ']')
+}
+
+func (self *_Compiler) compileString(p *_Program, vt reflect.Type) {
+ if vt != jsonNumberType {
+ p.add(_OP_str)
+ } else {
+ p.add(_OP_number)
+ }
+}
+
+func (self *_Compiler) compileStruct(p *_Program, sp int, vt reflect.Type) {
+ if sp >= self.opts.MaxInlineDepth || p.pc() >= _MAX_ILBUF || (sp > 0 && vt.NumField() >= _MAX_FIELDS) {
+ p.vp(_OP_recurse, vt, self.pv)
+ if self.opts.RecursiveDepth > 0 {
+ if self.pv {
+ self.rec[vt] = 1
+ } else {
+ self.rec[vt] = 0
+ }
+ }
+ } else {
+ self.compileStructBody(p, sp, vt)
+ }
+}
+
+func (self *_Compiler) compileStructBody(p *_Program, sp int, vt reflect.Type) {
+ p.tag(sp)
+ p.int(_OP_byte, '{')
+ p.add(_OP_save)
+ p.add(_OP_cond_set)
+
+ /* compile each field */
+ for _, fv := range resolver.ResolveStruct(vt) {
+ var s []int
+ var o resolver.Offset
+
+ /* "omitempty" for arrays */
+ if fv.Type.Kind() == reflect.Array {
+ if fv.Type.Len() == 0 && (fv.Opts & resolver.F_omitempty) != 0 {
+ continue
+ }
+ }
+
+ /* index to the field */
+ for _, o = range fv.Path {
+ if p.int(_OP_index, int(o.Size)); o.Kind == resolver.F_deref {
+ s = append(s, p.pc())
+ p.add(_OP_is_nil)
+ p.add(_OP_deref)
+ }
+ }
+
+ /* check for "omitempty" option */
+ if fv.Type.Kind() != reflect.Struct && fv.Type.Kind() != reflect.Array && (fv.Opts & resolver.F_omitempty) != 0 {
+ s = append(s, p.pc())
+ self.compileStructFieldZero(p, fv.Type)
+ }
+
+ /* add the comma if not the first element */
+ i := p.pc()
+ p.add(_OP_cond_testc)
+ p.int(_OP_byte, ',')
+ p.pin(i)
+
+ /* compile the key and value */
+ ft := fv.Type
+ p.str(_OP_text, Quote(fv.Name) + ":")
+
+ /* check for "stringnize" option */
+ if (fv.Opts & resolver.F_stringize) == 0 {
+ self.compileOne(p, sp + 1, ft, self.pv)
+ } else {
+ self.compileStructFieldStr(p, sp + 1, ft)
+ }
+
+ /* patch the skipping jumps and reload the struct pointer */
+ p.rel(s)
+ p.add(_OP_load)
+ }
+
+ /* end of object */
+ p.add(_OP_drop)
+ p.int(_OP_byte, '}')
+}
+
+func (self *_Compiler) compileStructFieldStr(p *_Program, sp int, vt reflect.Type) {
+ pc := -1
+ ft := vt
+ sv := false
+
+ /* dereference the pointer if needed */
+ if ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+
+ /* check if it can be stringized */
+ switch ft.Kind() {
+ case reflect.Bool : sv = true
+ case reflect.Int : sv = true
+ case reflect.Int8 : sv = true
+ case reflect.Int16 : sv = true
+ case reflect.Int32 : sv = true
+ case reflect.Int64 : sv = true
+ case reflect.Uint : sv = true
+ case reflect.Uint8 : sv = true
+ case reflect.Uint16 : sv = true
+ case reflect.Uint32 : sv = true
+ case reflect.Uint64 : sv = true
+ case reflect.Uintptr : sv = true
+ case reflect.Float32 : sv = true
+ case reflect.Float64 : sv = true
+ case reflect.String : sv = true
+ }
+
+ /* if it's not, ignore the "string" and follow the regular path */
+ if !sv {
+ self.compileOne(p, sp, vt, self.pv)
+ return
+ }
+
+ /* dereference the pointer */
+ if vt.Kind() == reflect.Ptr {
+ pc = p.pc()
+ vt = vt.Elem()
+ p.add(_OP_is_nil)
+ p.add(_OP_deref)
+ }
+
+ /* special case of a double-quoted string */
+ if ft != jsonNumberType && ft.Kind() == reflect.String {
+ p.add(_OP_quote)
+ } else {
+ self.compileStructFieldQuoted(p, sp, vt)
+ }
+
+ /* the "null" case of the pointer */
+ if pc != -1 {
+ e := p.pc()
+ p.add(_OP_goto)
+ p.pin(pc)
+ p.add(_OP_null)
+ p.pin(e)
+ }
+}
+
+func (self *_Compiler) compileStructFieldZero(p *_Program, vt reflect.Type) {
+ switch vt.Kind() {
+ case reflect.Bool : p.add(_OP_is_zero_1)
+ case reflect.Int : p.add(_OP_is_zero_ints())
+ case reflect.Int8 : p.add(_OP_is_zero_1)
+ case reflect.Int16 : p.add(_OP_is_zero_2)
+ case reflect.Int32 : p.add(_OP_is_zero_4)
+ case reflect.Int64 : p.add(_OP_is_zero_8)
+ case reflect.Uint : p.add(_OP_is_zero_ints())
+ case reflect.Uint8 : p.add(_OP_is_zero_1)
+ case reflect.Uint16 : p.add(_OP_is_zero_2)
+ case reflect.Uint32 : p.add(_OP_is_zero_4)
+ case reflect.Uint64 : p.add(_OP_is_zero_8)
+ case reflect.Uintptr : p.add(_OP_is_nil)
+ case reflect.Float32 : p.add(_OP_is_zero_4)
+ case reflect.Float64 : p.add(_OP_is_zero_8)
+ case reflect.String : p.add(_OP_is_nil_p1)
+ case reflect.Interface : p.add(_OP_is_nil_p1)
+ case reflect.Map : p.add(_OP_is_zero_map)
+ case reflect.Ptr : p.add(_OP_is_nil)
+ case reflect.Slice : p.add(_OP_is_nil_p1)
+ default : panic(error_type(vt))
+ }
+}
+
+func (self *_Compiler) compileStructFieldQuoted(p *_Program, sp int, vt reflect.Type) {
+ p.int(_OP_byte, '"')
+ self.compileOne(p, sp, vt, self.pv)
+ p.int(_OP_byte, '"')
+}
+
+func (self *_Compiler) compileInterface(p *_Program, vt reflect.Type) {
+ x := p.pc()
+ p.add(_OP_is_nil_p1)
+
+ /* iface and efaces are different */
+ if vt.NumMethod() == 0 {
+ p.add(_OP_eface)
+ } else {
+ p.add(_OP_iface)
+ }
+
+ /* the "null" value */
+ e := p.pc()
+ p.add(_OP_goto)
+ p.pin(x)
+ p.add(_OP_null)
+ p.pin(e)
+}
+
+func (self *_Compiler) compileMarshaler(p *_Program, op _Op, vt reflect.Type, mt reflect.Type) {
+ pc := p.pc()
+ vk := vt.Kind()
+
+ /* direct receiver */
+ if vk != reflect.Ptr {
+ p.rtt(op, vt)
+ return
+ }
+
+ /* value receiver with a pointer type, check for nil before calling the marshaler */
+ p.add(_OP_is_nil)
+ p.rtt(op, vt)
+ i := p.pc()
+ p.add(_OP_goto)
+ p.pin(pc)
+ p.add(_OP_null)
+ p.pin(i)
+}
diff --git a/vendor/github.com/bytedance/sonic/encoder/debug_go116.go b/vendor/github.com/bytedance/sonic/encoder/debug_go116.go
new file mode 100644
index 000000000..4bc9c15c1
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/debug_go116.go
@@ -0,0 +1,66 @@
+// +build go1.15,!go1.17
+
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `os`
+ `strings`
+ `runtime`
+ `runtime/debug`
+
+ `github.com/bytedance/sonic/internal/jit`
+)
+
+var (
+ debugSyncGC = os.Getenv("SONIC_SYNC_GC") != ""
+ debugAsyncGC = os.Getenv("SONIC_NO_ASYNC_GC") == ""
+)
+
+var (
+ _Instr_End _Instr = newInsOp(_OP_null)
+
+ _F_gc = jit.Func(runtime.GC)
+ _F_force_gc = jit.Func(debug.FreeOSMemory)
+ _F_println = jit.Func(println_wrapper)
+)
+
+func println_wrapper(i int, op1 int, op2 int){
+ println(i, " Intrs ", op1, _OpNames[op1], "next: ", op2, _OpNames[op2])
+}
+
+func (self *_Assembler) force_gc() {
+ self.call_go(_F_gc)
+ self.call_go(_F_force_gc)
+}
+
+func (self *_Assembler) debug_instr(i int, v *_Instr) {
+ if debugSyncGC {
+ if (i+1 == len(self.p)) {
+ self.print_gc(i, v, &_Instr_End)
+ } else {
+ next := &(self.p[i+1])
+ self.print_gc(i, v, next)
+ name := _OpNames[next.op()]
+ if strings.Contains(name, "save") {
+ return
+ }
+ }
+ self.force_gc()
+ }
+}
diff --git a/vendor/github.com/bytedance/sonic/encoder/debug_go117.go b/vendor/github.com/bytedance/sonic/encoder/debug_go117.go
new file mode 100644
index 000000000..e1016de32
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/debug_go117.go
@@ -0,0 +1,205 @@
+// +build go1.17,!go1.21
+
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `fmt`
+ `os`
+ `runtime`
+ `strings`
+ `unsafe`
+
+ `github.com/bytedance/sonic/internal/jit`
+ `github.com/twitchyliquid64/golang-asm/obj`
+)
+
+const _FP_debug = 128
+
+var (
+ debugSyncGC = os.Getenv("SONIC_SYNC_GC") != ""
+ debugAsyncGC = os.Getenv("SONIC_NO_ASYNC_GC") == ""
+ debugCheckPtr = os.Getenv("SONIC_CHECK_POINTER") != ""
+)
+
+var (
+ _Instr_End = newInsOp(_OP_is_nil)
+
+ _F_gc = jit.Func(gc)
+ _F_println = jit.Func(println_wrapper)
+ _F_print = jit.Func(print)
+)
+
+func (self *_Assembler) dsave(r ...obj.Addr) {
+ for i, v := range r {
+ if i > _FP_debug / 8 - 1 {
+ panic("too many registers to save")
+ } else {
+ self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + _FP_saves + _FP_locals + int64(i) * 8))
+ }
+ }
+}
+
+func (self *_Assembler) dload(r ...obj.Addr) {
+ for i, v := range r {
+ if i > _FP_debug / 8 - 1 {
+ panic("too many registers to load")
+ } else {
+ self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + _FP_saves + _FP_locals + int64(i) * 8), v)
+ }
+ }
+}
+
+func println_wrapper(i int, op1 int, op2 int){
+ println(i, " Intrs ", op1, _OpNames[op1], "next: ", op2, _OpNames[op2])
+}
+
+func print(i int){
+ println(i)
+}
+
+func gc() {
+ if !debugSyncGC {
+ return
+ }
+ runtime.GC()
+ // debug.FreeOSMemory()
+}
+
+func (self *_Assembler) dcall(fn obj.Addr) {
+ self.Emit("MOVQ", fn, _R10) // MOVQ ${fn}, R10
+ self.Rjmp("CALL", _R10) // CALL R10
+}
+
+func (self *_Assembler) debug_gc() {
+ if !debugSyncGC {
+ return
+ }
+ self.dsave(_REG_debug...)
+ self.dcall(_F_gc)
+ self.dload(_REG_debug...)
+}
+
+func (self *_Assembler) debug_instr(i int, v *_Instr) {
+ if debugSyncGC {
+ if i+1 == len(self.p) {
+ self.print_gc(i, v, &_Instr_End)
+ } else {
+ next := &(self.p[i+1])
+ self.print_gc(i, v, next)
+ name := _OpNames[next.op()]
+ if strings.Contains(name, "save") {
+ return
+ }
+ }
+ // self.debug_gc()
+ }
+}
+
+//go:noescape
+//go:linkname checkptrBase runtime.checkptrBase
+func checkptrBase(p unsafe.Pointer) uintptr
+
+//go:noescape
+//go:linkname findObject runtime.findObject
+func findObject(p, refBase, refOff uintptr) (base uintptr, s unsafe.Pointer, objIndex uintptr)
+
+var (
+ _F_checkptr = jit.Func(checkptr)
+ _F_printptr = jit.Func(printptr)
+)
+
+var (
+ _R10 = jit.Reg("R10")
+)
+var _REG_debug = []obj.Addr {
+ jit.Reg("AX"),
+ jit.Reg("BX"),
+ jit.Reg("CX"),
+ jit.Reg("DX"),
+ jit.Reg("DI"),
+ jit.Reg("SI"),
+ jit.Reg("BP"),
+ jit.Reg("SP"),
+ jit.Reg("R8"),
+ jit.Reg("R9"),
+ jit.Reg("R10"),
+ jit.Reg("R11"),
+ jit.Reg("R12"),
+ jit.Reg("R13"),
+ jit.Reg("R14"),
+ jit.Reg("R15"),
+}
+
+func checkptr(ptr uintptr) {
+ if ptr == 0 {
+ return
+ }
+ fmt.Printf("pointer: %x\n", ptr)
+ f := checkptrBase(unsafe.Pointer(uintptr(ptr)))
+ if f == 0 {
+ fmt.Printf("! unknown-based pointer: %x\n", ptr)
+ } else if f == 1 {
+ fmt.Printf("! stack pointer: %x\n", ptr)
+ } else {
+ fmt.Printf("base: %x\n", f)
+ }
+ findobj(ptr)
+}
+
+func findobj(ptr uintptr) {
+ base, s, objIndex := findObject(ptr, 0, 0)
+ if s != nil && base == 0 {
+ fmt.Printf("! invalid pointer: %x\n", ptr)
+ }
+ fmt.Printf("objIndex: %d\n", objIndex)
+}
+
+func (self *_Assembler) check_ptr(ptr obj.Addr, lea bool) {
+ if !debugCheckPtr {
+ return
+ }
+
+ self.dsave(_REG_debug...)
+ if lea {
+ self.Emit("LEAQ", ptr, _R10)
+ } else {
+ self.Emit("MOVQ", ptr, _R10)
+ }
+ self.Emit("MOVQ", _R10, jit.Ptr(_SP, 0))
+ self.dcall(_F_checkptr)
+ self.dload(_REG_debug...)
+}
+
+func printptr(i int, ptr uintptr) {
+ fmt.Printf("[%d] ptr: %x\n", i, ptr)
+}
+
+func (self *_Assembler) print_ptr(i int, ptr obj.Addr, lea bool) {
+ self.dsave(_REG_debug...)
+ if lea {
+ self.Emit("LEAQ", ptr, _R10)
+ } else {
+ self.Emit("MOVQ", ptr, _R10)
+ }
+
+ self.Emit("MOVQ", jit.Imm(int64(i)), _AX)
+ self.Emit("MOVQ", _R10, _BX)
+ self.dcall(_F_printptr)
+ self.dload(_REG_debug...)
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/encoder/encoder.go b/vendor/github.com/bytedance/sonic/encoder/encoder.go
new file mode 100644
index 000000000..7a1330166
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/encoder.go
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `bytes`
+ `encoding/json`
+ `reflect`
+ `runtime`
+
+ `github.com/bytedance/sonic/internal/native`
+ `github.com/bytedance/sonic/internal/native/types`
+ `github.com/bytedance/sonic/internal/rt`
+ `github.com/bytedance/sonic/utf8`
+ `github.com/bytedance/sonic/option`
+)
+
+// Options is a set of encoding options.
+type Options uint64
+
+const (
+ bitSortMapKeys = iota
+ bitEscapeHTML
+ bitCompactMarshaler
+ bitNoQuoteTextMarshaler
+ bitNoNullSliceOrMap
+ bitValidateString
+
+ // used for recursive compile
+ bitPointerValue = 63
+)
+
+const (
+ // SortMapKeys indicates that the keys of a map needs to be sorted
+ // before serializing into JSON.
+ // WARNING: This hurts performance A LOT, USE WITH CARE.
+ SortMapKeys Options = 1 << bitSortMapKeys
+
+ // EscapeHTML indicates encoder to escape all HTML characters
+ // after serializing into JSON (see https://pkg.go.dev/encoding/json#HTMLEscape).
+ // WARNING: This hurts performance A LOT, USE WITH CARE.
+ EscapeHTML Options = 1 << bitEscapeHTML
+
+ // CompactMarshaler indicates that the output JSON from json.Marshaler
+ // is always compact and needs no validation
+ CompactMarshaler Options = 1 << bitCompactMarshaler
+
+ // NoQuoteTextMarshaler indicates that the output text from encoding.TextMarshaler
+ // is always escaped string and needs no quoting
+ NoQuoteTextMarshaler Options = 1 << bitNoQuoteTextMarshaler
+
+ // NoNullSliceOrMap indicates all empty Array or Object are encoded as '[]' or '{}',
+ // instead of 'null'
+ NoNullSliceOrMap Options = 1 << bitNoNullSliceOrMap
+
+ // ValidateString indicates that encoder should validate the input string
+ // before encoding it into JSON.
+ ValidateString Options = 1 << bitValidateString
+
+ // CompatibleWithStd is used to be compatible with std encoder.
+ CompatibleWithStd Options = SortMapKeys | EscapeHTML | CompactMarshaler
+)
+
+// Encoder represents a specific set of encoder configurations.
+type Encoder struct {
+ Opts Options
+ prefix string
+ indent string
+}
+
+// Encode returns the JSON encoding of v.
+func (self *Encoder) Encode(v interface{}) ([]byte, error) {
+ if self.indent != "" || self.prefix != "" {
+ return EncodeIndented(v, self.prefix, self.indent, self.Opts)
+ }
+ return Encode(v, self.Opts)
+}
+
+// SortKeys enables the SortMapKeys option.
+func (self *Encoder) SortKeys() *Encoder {
+ self.Opts |= SortMapKeys
+ return self
+}
+
+// SetEscapeHTML specifies if option EscapeHTML opens
+func (self *Encoder) SetEscapeHTML(f bool) {
+ if f {
+ self.Opts |= EscapeHTML
+ } else {
+ self.Opts &= ^EscapeHTML
+ }
+}
+
+// SetValidateString specifies if option ValidateString opens
+func (self *Encoder) SetValidateString(f bool) {
+ if f {
+ self.Opts |= ValidateString
+ } else {
+ self.Opts &= ^ValidateString
+ }
+}
+
+// SetCompactMarshaler specifies if option CompactMarshaler opens
+func (self *Encoder) SetCompactMarshaler(f bool) {
+ if f {
+ self.Opts |= CompactMarshaler
+ } else {
+ self.Opts &= ^CompactMarshaler
+ }
+}
+
+// SetNoQuoteTextMarshaler specifies if option NoQuoteTextMarshaler opens
+func (self *Encoder) SetNoQuoteTextMarshaler(f bool) {
+ if f {
+ self.Opts |= NoQuoteTextMarshaler
+ } else {
+ self.Opts &= ^NoQuoteTextMarshaler
+ }
+}
+
+// SetIndent instructs the encoder to format each subsequent encoded
+// value as if indented by the package-level function EncodeIndent().
+// Calling SetIndent("", "") disables indentation.
+func (enc *Encoder) SetIndent(prefix, indent string) {
+ enc.prefix = prefix
+ enc.indent = indent
+}
+
+// Quote returns the JSON-quoted version of s.
+func Quote(s string) string {
+ var n int
+ var p []byte
+
+ /* check for empty string */
+ if s == "" {
+ return `""`
+ }
+
+ /* allocate space for result */
+ n = len(s) + 2
+ p = make([]byte, 0, n)
+
+ /* call the encoder */
+ _ = encodeString(&p, s)
+ return rt.Mem2Str(p)
+}
+
+// Encode returns the JSON encoding of val, encoded with opts.
+func Encode(val interface{}, opts Options) ([]byte, error) {
+ buf := newBytes()
+ err := EncodeInto(&buf, val, opts)
+
+ /* check for errors */
+ if err != nil {
+ freeBytes(buf)
+ return nil, err
+ }
+
+ if opts & EscapeHTML != 0 || opts & ValidateString != 0 {
+ return buf, nil
+ }
+
+ /* make a copy of the result */
+ ret := make([]byte, len(buf))
+ copy(ret, buf)
+
+ freeBytes(buf)
+ /* return the buffer into pool */
+ return ret, nil
+}
+
+// EncodeInto is like Encode but uses a user-supplied buffer instead of allocating
+// a new one.
+func EncodeInto(buf *[]byte, val interface{}, opts Options) error {
+ stk := newStack()
+ efv := rt.UnpackEface(val)
+ err := encodeTypedPointer(buf, efv.Type, &efv.Value, stk, uint64(opts))
+
+ /* return the stack into pool */
+ if err != nil {
+ resetStack(stk)
+ }
+ freeStack(stk)
+
+ /* EscapeHTML needs to allocate a new buffer*/
+ if opts & EscapeHTML != 0 {
+ dest := HTMLEscape(nil, *buf)
+ freeBytes(*buf) // free origin used buffer
+ *buf = dest
+ }
+
+ if opts & ValidateString != 0 && !utf8.Validate(*buf) {
+ dest := utf8.CorrectWith(nil, *buf, `\ufffd`)
+ freeBytes(*buf) // free origin used buffer
+ *buf = dest
+ }
+
+ /* avoid GC ahead */
+ runtime.KeepAlive(buf)
+ runtime.KeepAlive(efv)
+ return err
+}
+
+var typeByte = rt.UnpackType(reflect.TypeOf(byte(0)))
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst []byte, src []byte) []byte {
+ return htmlEscape(dst, src)
+}
+
+// EncodeIndented is like Encode but applies Indent to format the output.
+// Each JSON element in the output will begin on a new line beginning with prefix
+// followed by one or more copies of indent according to the indentation nesting.
+func EncodeIndented(val interface{}, prefix string, indent string, opts Options) ([]byte, error) {
+ var err error
+ var out []byte
+ var buf *bytes.Buffer
+
+ /* encode into the buffer */
+ out = newBytes()
+ err = EncodeInto(&out, val, opts)
+
+ /* check for errors */
+ if err != nil {
+ freeBytes(out)
+ return nil, err
+ }
+
+ /* indent the JSON */
+ buf = newBuffer()
+ err = json.Indent(buf, out, prefix, indent)
+
+ /* check for errors */
+ if err != nil {
+ freeBytes(out)
+ freeBuffer(buf)
+ return nil, err
+ }
+
+ /* copy to the result buffer */
+ ret := make([]byte, buf.Len())
+ copy(ret, buf.Bytes())
+
+ /* return the buffers into pool */
+ freeBytes(out)
+ freeBuffer(buf)
+ return ret, nil
+}
+
+// Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in
+// order to reduce the first-hit latency.
+//
+// Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is
+// a compile option to set the depth of recursive compile for the nested struct type.
+func Pretouch(vt reflect.Type, opts ...option.CompileOption) error {
+ cfg := option.DefaultCompileOptions()
+ for _, opt := range opts {
+ opt(&cfg)
+ break
+ }
+ return pretouchRec(map[reflect.Type]uint8{vt: 0}, cfg)
+}
+
+// Valid validates json and returns first non-blank character position,
+// if it is only one valid json value.
+// Otherwise returns invalid character position using start.
+//
+// Note: it does not check for the invalid UTF-8 characters.
+func Valid(data []byte) (ok bool, start int) {
+ n := len(data)
+ if n == 0 {
+ return false, -1
+ }
+ s := rt.Mem2Str(data)
+ p := 0
+ m := types.NewStateMachine()
+ ret := native.ValidateOne(&s, &p, m)
+ types.FreeStateMachine(m)
+
+ if ret < 0 {
+ return false, p-1
+ }
+
+ /* check for trailing spaces */
+ for ;p < n; p++ {
+ if (types.SPACE_MASK & (1 << data[p])) == 0 {
+ return false, p
+ }
+ }
+
+ return true, ret
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/encoder/errors.go b/vendor/github.com/bytedance/sonic/encoder/errors.go
new file mode 100644
index 000000000..ac6848a5b
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/errors.go
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `encoding/json`
+ `fmt`
+ `reflect`
+ `strconv`
+ `unsafe`
+
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+var _ERR_too_deep = &json.UnsupportedValueError {
+ Str : "Value nesting too deep",
+ Value : reflect.ValueOf("..."),
+}
+
+var _ERR_nan_or_infinite = &json.UnsupportedValueError {
+ Str : "NaN or ±Infinite",
+ Value : reflect.ValueOf("NaN or ±Infinite"),
+}
+
+func error_type(vtype reflect.Type) error {
+ return &json.UnsupportedTypeError{Type: vtype}
+}
+
+func error_number(number json.Number) error {
+ return &json.UnsupportedValueError {
+ Str : "invalid number literal: " + strconv.Quote(string(number)),
+ Value : reflect.ValueOf(number),
+ }
+}
+
+func error_marshaler(ret []byte, pos int) error {
+ return fmt.Errorf("invalid Marshaler output json syntax at %d: %q", pos, ret)
+}
+
+const (
+ panicNilPointerOfNonEmptyString int = 1 + iota
+)
+
+func goPanic(code int, val unsafe.Pointer) {
+ switch(code){
+ case panicNilPointerOfNonEmptyString:
+ panic(fmt.Sprintf("val: %#v has nil pointer while its length is not zero!", (*rt.GoString)(val)))
+ default:
+ panic("encoder error!")
+ }
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/encoder/mapiter.go b/vendor/github.com/bytedance/sonic/encoder/mapiter.go
new file mode 100644
index 000000000..8a322b3af
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/mapiter.go
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ "encoding"
+ "reflect"
+ "sync"
+ "unsafe"
+
+ "github.com/bytedance/sonic/internal/native"
+ "github.com/bytedance/sonic/internal/rt"
+)
+
+type _MapPair struct {
+ k string // when the map key is integer, k is pointed to m
+ v unsafe.Pointer
+ m [32]byte
+}
+
+type _MapIterator struct {
+ it rt.GoMapIterator // must be the first field
+ kv rt.GoSlice // slice of _MapPair
+ ki int
+}
+
+var (
+ iteratorPool = sync.Pool{}
+ iteratorPair = rt.UnpackType(reflect.TypeOf(_MapPair{}))
+)
+
+func init() {
+ if unsafe.Offsetof(_MapIterator{}.it) != 0 {
+ panic("_MapIterator.it is not the first field")
+ }
+}
+
+
+func newIterator() *_MapIterator {
+ if v := iteratorPool.Get(); v == nil {
+ return new(_MapIterator)
+ } else {
+ return resetIterator(v.(*_MapIterator))
+ }
+}
+
+func resetIterator(p *_MapIterator) *_MapIterator {
+ p.ki = 0
+ p.it = rt.GoMapIterator{}
+ p.kv.Len = 0
+ return p
+}
+
+func (self *_MapIterator) at(i int) *_MapPair {
+ return (*_MapPair)(unsafe.Pointer(uintptr(self.kv.Ptr) + uintptr(i) * unsafe.Sizeof(_MapPair{})))
+}
+
+func (self *_MapIterator) add() (p *_MapPair) {
+ p = self.at(self.kv.Len)
+ self.kv.Len++
+ return
+}
+
+func (self *_MapIterator) data() (p []_MapPair) {
+ *(*rt.GoSlice)(unsafe.Pointer(&p)) = self.kv
+ return
+}
+
+func (self *_MapIterator) append(t *rt.GoType, k unsafe.Pointer, v unsafe.Pointer) (err error) {
+ p := self.add()
+ p.v = v
+
+ /* check for strings */
+ if tk := t.Kind(); tk != reflect.String {
+ return self.appendGeneric(p, t, tk, k)
+ }
+
+ /* fast path for strings */
+ p.k = *(*string)(k)
+ return nil
+}
+
+func (self *_MapIterator) appendGeneric(p *_MapPair, t *rt.GoType, v reflect.Kind, k unsafe.Pointer) error {
+ switch v {
+ case reflect.Int : p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], int64(*(*int)(k)))]) ; return nil
+ case reflect.Int8 : p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], int64(*(*int8)(k)))]) ; return nil
+ case reflect.Int16 : p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], int64(*(*int16)(k)))]) ; return nil
+ case reflect.Int32 : p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], int64(*(*int32)(k)))]) ; return nil
+ case reflect.Int64 : p.k = rt.Mem2Str(p.m[:native.I64toa(&p.m[0], *(*int64)(k))]) ; return nil
+ case reflect.Uint : p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uint)(k)))]) ; return nil
+ case reflect.Uint8 : p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uint8)(k)))]) ; return nil
+ case reflect.Uint16 : p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uint16)(k)))]) ; return nil
+ case reflect.Uint32 : p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uint32)(k)))]) ; return nil
+ case reflect.Uint64 : p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], *(*uint64)(k))]) ; return nil
+ case reflect.Uintptr : p.k = rt.Mem2Str(p.m[:native.U64toa(&p.m[0], uint64(*(*uintptr)(k)))]) ; return nil
+ case reflect.Interface : return self.appendInterface(p, t, k)
+ case reflect.Struct, reflect.Ptr : return self.appendConcrete(p, t, k)
+ default : panic("unexpected map key type")
+ }
+}
+
+func (self *_MapIterator) appendConcrete(p *_MapPair, t *rt.GoType, k unsafe.Pointer) (err error) {
+ // compiler has already checked that the type implements the encoding.MarshalText interface
+ if !t.Indirect() {
+ k = *(*unsafe.Pointer)(k)
+ }
+ eface := rt.GoEface{Value: k, Type: t}.Pack()
+ out, err := eface.(encoding.TextMarshaler).MarshalText()
+ if err != nil {
+ return err
+ }
+ p.k = rt.Mem2Str(out)
+ return
+}
+
+func (self *_MapIterator) appendInterface(p *_MapPair, t *rt.GoType, k unsafe.Pointer) (err error) {
+ if len(rt.IfaceType(t).Methods) == 0 {
+ panic("unexpected map key type")
+ } else if p.k, err = asText(k); err == nil {
+ return nil
+ } else {
+ return
+ }
+}
+
+func iteratorStop(p *_MapIterator) {
+ iteratorPool.Put(p)
+}
+
+func iteratorNext(p *_MapIterator) {
+ i := p.ki
+ t := &p.it
+
+ /* check for unordered iteration */
+ if i < 0 {
+ mapiternext(t)
+ return
+ }
+
+ /* check for end of iteration */
+ if p.ki >= p.kv.Len {
+ t.K = nil
+ t.V = nil
+ return
+ }
+
+ /* update the key-value pair, and increase the pointer */
+ t.K = unsafe.Pointer(&p.at(p.ki).k)
+ t.V = p.at(p.ki).v
+ p.ki++
+}
+
+func iteratorStart(t *rt.GoMapType, m *rt.GoMap, fv uint64) (*_MapIterator, error) {
+ it := newIterator()
+ mapiterinit(t, m, &it.it)
+
+ /* check for key-sorting, empty map don't need sorting */
+ if m.Count == 0 || (fv & uint64(SortMapKeys)) == 0 {
+ it.ki = -1
+ return it, nil
+ }
+
+ /* pre-allocate space if needed */
+ if m.Count > it.kv.Cap {
+ it.kv = growslice(iteratorPair, it.kv, m.Count)
+ }
+
+ /* dump all the key-value pairs */
+ for ; it.it.K != nil; mapiternext(&it.it) {
+ if err := it.append(t.Key, it.it.K, it.it.V); err != nil {
+ iteratorStop(it)
+ return nil, err
+ }
+ }
+
+ /* sort the keys, map with only 1 item don't need sorting */
+ if it.ki = 1; m.Count > 1 {
+ radixQsort(it.data(), 0, maxDepth(it.kv.Len))
+ }
+
+ /* load the first pair into iterator */
+ it.it.V = it.at(0).v
+ it.it.K = unsafe.Pointer(&it.at(0).k)
+ return it, nil
+}
diff --git a/vendor/github.com/bytedance/sonic/encoder/pools.go b/vendor/github.com/bytedance/sonic/encoder/pools.go
new file mode 100644
index 000000000..600605d7c
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/pools.go
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `bytes`
+ `sync`
+ `unsafe`
+ `errors`
+ `reflect`
+
+ `github.com/bytedance/sonic/internal/caching`
+ `github.com/bytedance/sonic/option`
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+const (
+ _MaxStack = 4096 // 4k states
+ _MaxBuffer = 1048576 // 1MB buffer size
+
+ _StackSize = unsafe.Sizeof(_Stack{})
+)
+
+var (
+ bytesPool = sync.Pool{}
+ stackPool = sync.Pool{}
+ bufferPool = sync.Pool{}
+ programCache = caching.CreateProgramCache()
+)
+
+type _State struct {
+ x int
+ f uint64
+ p unsafe.Pointer
+ q unsafe.Pointer
+}
+
+type _Stack struct {
+ sp uint64
+ sb [_MaxStack]_State
+}
+
+type _Encoder func(
+ rb *[]byte,
+ vp unsafe.Pointer,
+ sb *_Stack,
+ fv uint64,
+) error
+
+var _KeepAlive struct {
+ rb *[]byte
+ vp unsafe.Pointer
+ sb *_Stack
+ fv uint64
+ err error
+ frame [_FP_offs]byte
+}
+
+var errCallShadow = errors.New("DON'T CALL THIS!")
+
+// Faker func of _Encoder, used to export its stackmap as _Encoder's
+func _Encoder_Shadow(rb *[]byte, vp unsafe.Pointer, sb *_Stack, fv uint64) (err error) {
+ // align to assembler_amd64.go: _FP_offs
+ var frame [_FP_offs]byte
+
+ // must keep all args and frames noticeable to GC
+ _KeepAlive.rb = rb
+ _KeepAlive.vp = vp
+ _KeepAlive.sb = sb
+ _KeepAlive.fv = fv
+ _KeepAlive.err = err
+ _KeepAlive.frame = frame
+
+ return errCallShadow
+}
+
+func newBytes() []byte {
+ if ret := bytesPool.Get(); ret != nil {
+ return ret.([]byte)
+ } else {
+ return make([]byte, 0, _MaxBuffer)
+ }
+}
+
+func newStack() *_Stack {
+ if ret := stackPool.Get(); ret == nil {
+ return new(_Stack)
+ } else {
+ return ret.(*_Stack)
+ }
+}
+
+func resetStack(p *_Stack) {
+ memclrNoHeapPointers(unsafe.Pointer(p), _StackSize)
+}
+
+func newBuffer() *bytes.Buffer {
+ if ret := bufferPool.Get(); ret != nil {
+ return ret.(*bytes.Buffer)
+ } else {
+ return bytes.NewBuffer(make([]byte, 0, _MaxBuffer))
+ }
+}
+
+func freeBytes(p []byte) {
+ p = p[:0]
+ bytesPool.Put(p)
+}
+
+func freeStack(p *_Stack) {
+ p.sp = 0
+ stackPool.Put(p)
+}
+
+func freeBuffer(p *bytes.Buffer) {
+ p.Reset()
+ bufferPool.Put(p)
+}
+
+func makeEncoder(vt *rt.GoType, ex ...interface{}) (interface{}, error) {
+ if pp, err := newCompiler().compile(vt.Pack(), ex[0].(bool)); err != nil {
+ return nil, err
+ } else {
+ as := newAssembler(pp)
+ as.name = vt.String()
+ return as.Load(), nil
+ }
+}
+
+func findOrCompile(vt *rt.GoType, pv bool) (_Encoder, error) {
+ if val := programCache.Get(vt); val != nil {
+ return val.(_Encoder), nil
+ } else if ret, err := programCache.Compute(vt, makeEncoder, pv); err == nil {
+ return ret.(_Encoder), nil
+ } else {
+ return nil, err
+ }
+}
+
+func pretouchType(_vt reflect.Type, opts option.CompileOptions, v uint8) (map[reflect.Type]uint8, error) {
+ /* compile function */
+ compiler := newCompiler().apply(opts)
+ encoder := func(vt *rt.GoType, ex ...interface{}) (interface{}, error) {
+ if pp, err := compiler.compile(_vt, ex[0].(bool)); err != nil {
+ return nil, err
+ } else {
+ as := newAssembler(pp)
+ as.name = vt.String()
+ return as.Load(), nil
+ }
+ }
+
+ /* find or compile */
+ vt := rt.UnpackType(_vt)
+ if val := programCache.Get(vt); val != nil {
+ return nil, nil
+ } else if _, err := programCache.Compute(vt, encoder, v == 1); err == nil {
+ return compiler.rec, nil
+ } else {
+ return nil, err
+ }
+}
+
+func pretouchRec(vtm map[reflect.Type]uint8, opts option.CompileOptions) error {
+ if opts.RecursiveDepth < 0 || len(vtm) == 0 {
+ return nil
+ }
+ next := make(map[reflect.Type]uint8)
+ for vt, v := range vtm {
+ sub, err := pretouchType(vt, opts, v)
+ if err != nil {
+ return err
+ }
+ for svt, v := range sub {
+ next[svt] = v
+ }
+ }
+ opts.RecursiveDepth -= 1
+ return pretouchRec(next, opts)
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/encoder/primitives.go b/vendor/github.com/bytedance/sonic/encoder/primitives.go
new file mode 100644
index 000000000..78fb29ff6
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/primitives.go
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `encoding`
+ `encoding/json`
+ `unsafe`
+
+ `github.com/bytedance/sonic/internal/jit`
+ `github.com/bytedance/sonic/internal/native`
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+/** Encoder Primitives **/
+
+func encodeNil(rb *[]byte) error {
+ *rb = append(*rb, 'n', 'u', 'l', 'l')
+ return nil
+}
+
+func encodeString(buf *[]byte, val string) error {
+ var sidx int
+ var pbuf *rt.GoSlice
+ var pstr *rt.GoString
+
+ /* opening quote */
+ *buf = append(*buf, '"')
+ pbuf = (*rt.GoSlice)(unsafe.Pointer(buf))
+ pstr = (*rt.GoString)(unsafe.Pointer(&val))
+
+ /* encode with native library */
+ for sidx < pstr.Len {
+ sn := pstr.Len - sidx
+ dn := pbuf.Cap - pbuf.Len
+ sp := padd(pstr.Ptr, sidx)
+ dp := padd(pbuf.Ptr, pbuf.Len)
+ nb := native.Quote(sp, sn, dp, &dn, 0)
+
+ /* check for errors */
+ if pbuf.Len += dn; nb >= 0 {
+ break
+ }
+
+ /* not enough space, grow the slice and try again */
+ sidx += ^nb
+ *pbuf = growslice(rt.UnpackType(byteType), *pbuf, pbuf.Cap * 2)
+ }
+
+ /* closing quote */
+ *buf = append(*buf, '"')
+ return nil
+}
+
+func encodeTypedPointer(buf *[]byte, vt *rt.GoType, vp *unsafe.Pointer, sb *_Stack, fv uint64) error {
+ if vt == nil {
+ return encodeNil(buf)
+ } else if fn, err := findOrCompile(vt, (fv&(1<<bitPointerValue)) != 0); err != nil {
+ return err
+ } else if vt.Indirect() {
+ rt.MoreStack(_FP_size + native.MaxFrameSize)
+ rt.StopProf()
+ err := fn(buf, *vp, sb, fv)
+ rt.StartProf()
+ return err
+ } else {
+ rt.MoreStack(_FP_size + native.MaxFrameSize)
+ rt.StopProf()
+ err := fn(buf, unsafe.Pointer(vp), sb, fv)
+ rt.StartProf()
+ return err
+ }
+}
+
+func encodeJsonMarshaler(buf *[]byte, val json.Marshaler, opt Options) error {
+ if ret, err := val.MarshalJSON(); err != nil {
+ return err
+ } else {
+ if opt & CompactMarshaler != 0 {
+ return compact(buf, ret)
+ }
+ if ok, s := Valid(ret); !ok {
+ return error_marshaler(ret, s)
+ }
+ *buf = append(*buf, ret...)
+ return nil
+ }
+}
+
+func encodeTextMarshaler(buf *[]byte, val encoding.TextMarshaler, opt Options) error {
+ if ret, err := val.MarshalText(); err != nil {
+ return err
+ } else {
+ if opt & NoQuoteTextMarshaler != 0 {
+ *buf = append(*buf, ret...)
+ return nil
+ }
+ return encodeString(buf, rt.Mem2Str(ret) )
+ }
+}
+
+func htmlEscape(dst []byte, src []byte) []byte {
+ var sidx int
+
+ dst = append(dst, src[:0]...) // avoid check nil dst
+ sbuf := (*rt.GoSlice)(unsafe.Pointer(&src))
+ dbuf := (*rt.GoSlice)(unsafe.Pointer(&dst))
+
+ /* grow dst if it is shorter */
+ if cap(dst) - len(dst) < len(src) + native.BufPaddingSize {
+ cap := len(src) * 3 / 2 + native.BufPaddingSize
+ *dbuf = growslice(typeByte, *dbuf, cap)
+ }
+
+ for sidx < sbuf.Len {
+ sp := padd(sbuf.Ptr, sidx)
+ dp := padd(dbuf.Ptr, dbuf.Len)
+
+ sn := sbuf.Len - sidx
+ dn := dbuf.Cap - dbuf.Len
+ nb := native.HTMLEscape(sp, sn, dp, &dn)
+
+ /* check for errors */
+ if dbuf.Len += dn; nb >= 0 {
+ break
+ }
+
+ /* not enough space, grow the slice and try again */
+ sidx += ^nb
+ *dbuf = growslice(typeByte, *dbuf, dbuf.Cap * 2)
+ }
+ return dst
+}
+
+var (
+ argPtrs = []bool { true, true, true, false }
+ localPtrs = []bool{}
+)
+
+var (
+ _F_assertI2I = jit.Func(assertI2I)
+)
+
+func asText(v unsafe.Pointer) (string, error) {
+ text := assertI2I(_T_encoding_TextMarshaler, *(*rt.GoIface)(v))
+ r, e := (*(*encoding.TextMarshaler)(unsafe.Pointer(&text))).MarshalText()
+ return rt.Mem2Str(r), e
+}
+
+func asJson(v unsafe.Pointer) (string, error) {
+ text := assertI2I(_T_json_Marshaler, *(*rt.GoIface)(v))
+ r, e := (*(*json.Marshaler)(unsafe.Pointer(&text))).MarshalJSON()
+ return rt.Mem2Str(r), e
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/encoder/sort.go b/vendor/github.com/bytedance/sonic/encoder/sort.go
new file mode 100644
index 000000000..b1a67598b
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/sort.go
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+// Algorithm 3-way Radix Quicksort, d means the radix.
+// Reference: https://algs4.cs.princeton.edu/51radix/Quick3string.java.html
+func radixQsort(kvs []_MapPair, d, maxDepth int) {
+ for len(kvs) > 11 {
+ // To avoid the worst case of quickSort (time: O(n^2)), use introsort here.
+ // Reference: https://en.wikipedia.org/wiki/Introsort and
+ // https://github.com/golang/go/issues/467
+ if maxDepth == 0 {
+ heapSort(kvs, 0, len(kvs))
+ return
+ }
+ maxDepth--
+
+ p := pivot(kvs, d)
+ lt, i, gt := 0, 0, len(kvs)
+ for i < gt {
+ c := byteAt(kvs[i].k, d)
+ if c < p {
+ swap(kvs, lt, i)
+ i++
+ lt++
+ } else if c > p {
+ gt--
+ swap(kvs, i, gt)
+ } else {
+ i++
+ }
+ }
+
+ // kvs[0:lt] < v = kvs[lt:gt] < kvs[gt:len(kvs)]
+ // Native implemention:
+ // radixQsort(kvs[:lt], d, maxDepth)
+ // if p > -1 {
+ // radixQsort(kvs[lt:gt], d+1, maxDepth)
+ // }
+ // radixQsort(kvs[gt:], d, maxDepth)
+ // Optimize as follows: make recursive calls only for the smaller parts.
+ // Reference: https://www.geeksforgeeks.org/quicksort-tail-call-optimization-reducing-worst-case-space-log-n/
+ if p == -1 {
+ if lt > len(kvs) - gt {
+ radixQsort(kvs[gt:], d, maxDepth)
+ kvs = kvs[:lt]
+ } else {
+ radixQsort(kvs[:lt], d, maxDepth)
+ kvs = kvs[gt:]
+ }
+ } else {
+ ml := maxThree(lt, gt-lt, len(kvs)-gt)
+ if ml == lt {
+ radixQsort(kvs[lt:gt], d+1, maxDepth)
+ radixQsort(kvs[gt:], d, maxDepth)
+ kvs = kvs[:lt]
+ } else if ml == gt-lt {
+ radixQsort(kvs[:lt], d, maxDepth)
+ radixQsort(kvs[gt:], d, maxDepth)
+ kvs = kvs[lt:gt]
+ d += 1
+ } else {
+ radixQsort(kvs[:lt], d, maxDepth)
+ radixQsort(kvs[lt:gt], d+1, maxDepth)
+ kvs = kvs[gt:]
+ }
+ }
+ }
+ insertRadixSort(kvs, d)
+}
+
+func insertRadixSort(kvs []_MapPair, d int) {
+ for i := 1; i < len(kvs); i++ {
+ for j := i; j > 0 && lessFrom(kvs[j].k, kvs[j-1].k, d); j-- {
+ swap(kvs, j, j-1)
+ }
+ }
+}
+
+func pivot(kvs []_MapPair, d int) int {
+ m := len(kvs) >> 1
+ if len(kvs) > 40 {
+ // Tukey's ``Ninther,'' median of three mediankvs of three.
+ t := len(kvs) / 8
+ return medianThree(
+ medianThree(byteAt(kvs[0].k, d), byteAt(kvs[t].k, d), byteAt(kvs[2*t].k, d)),
+ medianThree(byteAt(kvs[m].k, d), byteAt(kvs[m-t].k, d), byteAt(kvs[m+t].k, d)),
+ medianThree(byteAt(kvs[len(kvs)-1].k, d),
+ byteAt(kvs[len(kvs)-1-t].k, d),
+ byteAt(kvs[len(kvs)-1-2*t].k, d)))
+ }
+ return medianThree(byteAt(kvs[0].k, d), byteAt(kvs[m].k, d), byteAt(kvs[len(kvs)-1].k, d))
+}
+
+func medianThree(i, j, k int) int {
+ if i > j {
+ i, j = j, i
+ } // i < j
+ if k < i {
+ return i
+ }
+ if k > j {
+ return j
+ }
+ return k
+}
+
+func maxThree(i, j, k int) int {
+ max := i
+ if max < j {
+ max = j
+ }
+ if max < k {
+ max = k
+ }
+ return max
+}
+
+// maxDepth returns a threshold at which quicksort should switch
+// to heapsort. It returnkvs 2*ceil(lg(n+1)).
+func maxDepth(n int) int {
+ var depth int
+ for i := n; i > 0; i >>= 1 {
+ depth++
+ }
+ return depth * 2
+}
+
+// siftDown implements the heap property on kvs[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDown(kvs []_MapPair, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && kvs[first+child].k < kvs[first+child+1].k {
+ child++
+ }
+ if kvs[first+root].k >= kvs[first+child].k {
+ return
+ }
+ swap(kvs, first+root, first+child)
+ root = child
+ }
+}
+
+func heapSort(kvs []_MapPair, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with the greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDown(kvs, i, hi, first)
+ }
+
+ // Pop elements, the largest first, into end of kvs.
+ for i := hi - 1; i >= 0; i-- {
+ swap(kvs, first, first+i)
+ siftDown(kvs, lo, i, first)
+ }
+}
+
+// Note that _MapPair.k is NOT pointed to _MapPair.m when map key is integer after swap
+func swap(kvs []_MapPair, a, b int) {
+ kvs[a].k, kvs[b].k = kvs[b].k, kvs[a].k
+ kvs[a].v, kvs[b].v = kvs[b].v, kvs[a].v
+}
+
+// Compare two strings from the pos d.
+func lessFrom(a, b string, d int) bool {
+ l := len(a)
+ if l > len(b) {
+ l = len(b)
+ }
+ for i := d; i < l; i++ {
+ if a[i] == b[i] {
+ continue
+ }
+ return a[i] < b[i]
+ }
+ return len(a) < len(b)
+}
+
+func byteAt(b string, p int) int {
+ if p < len(b) {
+ return int(b[p])
+ }
+ return -1
+}
diff --git a/vendor/github.com/bytedance/sonic/encoder/stream.go b/vendor/github.com/bytedance/sonic/encoder/stream.go
new file mode 100644
index 000000000..205232c71
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/stream.go
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `encoding/json`
+ `io`
+)
+
+// StreamEncoder uses io.Writer as
+type StreamEncoder struct {
+ w io.Writer
+ Encoder
+}
+
+// NewStreamEncoder adapts to encoding/json.NewDecoder API.
+//
+// NewStreamEncoder returns a new encoder that write to w.
+func NewStreamEncoder(w io.Writer) *StreamEncoder {
+ return &StreamEncoder{w: w}
+}
+
+// Encode encodes interface{} as JSON to io.Writer
+func (enc *StreamEncoder) Encode(val interface{}) (err error) {
+ out := newBytes()
+
+ /* encode into the buffer */
+ err = EncodeInto(&out, val, enc.Opts)
+ if err != nil {
+ goto free_bytes
+ }
+
+ if enc.indent != "" || enc.prefix != "" {
+ /* indent the JSON */
+ buf := newBuffer()
+ err = json.Indent(buf, out, enc.prefix, enc.indent)
+ if err != nil {
+ freeBuffer(buf)
+ goto free_bytes
+ }
+
+ // according to standard library, terminate each value with a newline...
+ buf.WriteByte('\n')
+
+ /* copy into io.Writer */
+ _, err = io.Copy(enc.w, buf)
+ if err != nil {
+ freeBuffer(buf)
+ goto free_bytes
+ }
+
+ } else {
+ /* copy into io.Writer */
+ var n int
+ for len(out) > 0 {
+ n, err = enc.w.Write(out)
+ out = out[n:]
+ if err != nil {
+ goto free_bytes
+ }
+ }
+
+ // according to standard library, terminate each value with a newline...
+ enc.w.Write([]byte{'\n'})
+ }
+
+free_bytes:
+ freeBytes(out)
+ return err
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/encoder/stubs_go116.go b/vendor/github.com/bytedance/sonic/encoder/stubs_go116.go
new file mode 100644
index 000000000..40d06f4af
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/stubs_go116.go
@@ -0,0 +1,65 @@
+// +build go1.15,!go1.17
+
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `unsafe`
+
+ _ `github.com/chenzhuoyu/base64x`
+
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+//go:linkname _subr__b64encode github.com/chenzhuoyu/base64x._subr__b64encode
+var _subr__b64encode uintptr
+
+//go:noescape
+//go:linkname memmove runtime.memmove
+//goland:noinspection GoUnusedParameter
+func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
+
+//go:linkname growslice runtime.growslice
+//goland:noinspection GoUnusedParameter
+func growslice(et *rt.GoType, old rt.GoSlice, cap int) rt.GoSlice
+
+//go:linkname assertI2I runtime.assertI2I
+//goland:noinspection GoUnusedParameter
+func assertI2I(inter *rt.GoType, i rt.GoIface) rt.GoIface
+
+//go:linkname mapiternext runtime.mapiternext
+//goland:noinspection GoUnusedParameter
+func mapiternext(it *rt.GoMapIterator)
+
+//go:linkname mapiterinit runtime.mapiterinit
+//goland:noinspection GoUnusedParameter
+func mapiterinit(t *rt.GoMapType, m *rt.GoMap, it *rt.GoMapIterator)
+
+//go:linkname isValidNumber encoding/json.isValidNumber
+//goland:noinspection GoUnusedParameter
+func isValidNumber(s string) bool
+
+//go:noescape
+//go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers
+//goland:noinspection GoUnusedParameter
+func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
+
+var _runtime_writeBarrier uintptr = rt.GcwbAddr()
+
+//go:linkname gcWriteBarrierAX runtime.gcWriteBarrier
+func gcWriteBarrierAX() \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/encoder/stubs_go117.go b/vendor/github.com/bytedance/sonic/encoder/stubs_go117.go
new file mode 100644
index 000000000..6c8c6ec75
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/stubs_go117.go
@@ -0,0 +1,66 @@
+// +build go1.17,!go1.20
+
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `unsafe`
+
+ _ `github.com/chenzhuoyu/base64x`
+
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+//go:linkname _subr__b64encode github.com/chenzhuoyu/base64x._subr__b64encode
+var _subr__b64encode uintptr
+
+//go:noescape
+//go:linkname memmove runtime.memmove
+//goland:noinspection GoUnusedParameter
+func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
+
+//go:linkname growslice runtime.growslice
+//goland:noinspection GoUnusedParameter
+func growslice(et *rt.GoType, old rt.GoSlice, cap int) rt.GoSlice
+
+//go:linkname assertI2I runtime.assertI2I2
+//goland:noinspection GoUnusedParameter
+func assertI2I(inter *rt.GoType, i rt.GoIface) rt.GoIface
+
+//go:linkname mapiternext runtime.mapiternext
+//goland:noinspection GoUnusedParameter
+func mapiternext(it *rt.GoMapIterator)
+
+//go:linkname mapiterinit runtime.mapiterinit
+//goland:noinspection GoUnusedParameter
+func mapiterinit(t *rt.GoMapType, m *rt.GoMap, it *rt.GoMapIterator)
+
+//go:linkname isValidNumber encoding/json.isValidNumber
+//goland:noinspection GoUnusedParameter
+func isValidNumber(s string) bool
+
+//go:noescape
+//go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers
+//goland:noinspection GoUnusedParameter
+func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
+
+//go:linkname _runtime_writeBarrier runtime.writeBarrier
+var _runtime_writeBarrier uintptr
+
+//go:linkname gcWriteBarrierAX runtime.gcWriteBarrier
+func gcWriteBarrierAX() \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/encoder/stubs_go120.go b/vendor/github.com/bytedance/sonic/encoder/stubs_go120.go
new file mode 100644
index 000000000..f1a7d10c7
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/stubs_go120.go
@@ -0,0 +1,66 @@
+// +build go1.20
+
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `unsafe`
+
+ _ `github.com/chenzhuoyu/base64x`
+
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+//go:linkname _subr__b64encode github.com/chenzhuoyu/base64x._subr__b64encode
+var _subr__b64encode uintptr
+
+//go:noescape
+//go:linkname memmove runtime.memmove
+//goland:noinspection GoUnusedParameter
+func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
+
+//go:linkname growslice reflect.growslice
+//goland:noinspection GoUnusedParameter
+func growslice(et *rt.GoType, old rt.GoSlice, cap int) rt.GoSlice
+
+//go:linkname assertI2I runtime.assertI2I2
+//goland:noinspection GoUnusedParameter
+func assertI2I(inter *rt.GoType, i rt.GoIface) rt.GoIface
+
+//go:linkname mapiternext runtime.mapiternext
+//goland:noinspection GoUnusedParameter
+func mapiternext(it *rt.GoMapIterator)
+
+//go:linkname mapiterinit runtime.mapiterinit
+//goland:noinspection GoUnusedParameter
+func mapiterinit(t *rt.GoMapType, m *rt.GoMap, it *rt.GoMapIterator)
+
+//go:linkname isValidNumber encoding/json.isValidNumber
+//goland:noinspection GoUnusedParameter
+func isValidNumber(s string) bool
+
+//go:noescape
+//go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers
+//goland:noinspection GoUnusedParameter
+func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
+
+//go:linkname _runtime_writeBarrier runtime.writeBarrier
+var _runtime_writeBarrier uintptr
+
+//go:linkname gcWriteBarrierAX runtime.gcWriteBarrier
+func gcWriteBarrierAX() \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/encoder/types.go b/vendor/github.com/bytedance/sonic/encoder/types.go
new file mode 100644
index 000000000..3d4a00668
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/types.go
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `encoding`
+ `encoding/json`
+ `reflect`
+)
+
+var (
+ byteType = reflect.TypeOf(byte(0))
+ jsonNumberType = reflect.TypeOf(json.Number(""))
+ jsonUnsupportedValueType = reflect.TypeOf(new(json.UnsupportedValueError))
+)
+
+var (
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+ jsonMarshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem()
+ encodingTextMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+)
+
+func isSimpleByte(vt reflect.Type) bool {
+ if vt.Kind() != byteType.Kind() {
+ return false
+ } else {
+ return !isEitherMarshaler(vt) && !isEitherMarshaler(reflect.PtrTo(vt))
+ }
+}
+
+func isEitherMarshaler(vt reflect.Type) bool {
+ return vt.Implements(jsonMarshalerType) || vt.Implements(encodingTextMarshalerType)
+}
diff --git a/vendor/github.com/bytedance/sonic/encoder/utils.go b/vendor/github.com/bytedance/sonic/encoder/utils.go
new file mode 100644
index 000000000..510596fda
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/encoder/utils.go
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encoder
+
+import (
+ `encoding/json`
+ `unsafe`
+
+ `github.com/bytedance/sonic/loader`
+)
+
+//go:nosplit
+func padd(p unsafe.Pointer, v int) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(p) + uintptr(v))
+}
+
+//go:nosplit
+func ptoenc(p loader.Function) _Encoder {
+ return *(*_Encoder)(unsafe.Pointer(&p))
+}
+
+func compact(p *[]byte, v []byte) error {
+ buf := newBuffer()
+ err := json.Compact(buf, v)
+
+ /* check for errors */
+ if err != nil {
+ return err
+ }
+
+ /* add to result */
+ v = buf.Bytes()
+ *p = append(*p, v...)
+
+ /* return the buffer into pool */
+ freeBuffer(buf)
+ return nil
+}