summaryrefslogtreecommitdiff
path: root/vendor/github.com/bytedance/sonic/decoder
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/bytedance/sonic/decoder')
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/asm.s0
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go116.go1943
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go117.go1922
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/compiler.go1136
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/debug.go70
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/decoder.go245
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/errors.go181
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/generic_amd64_go116.go776
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117.go772
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117_test.s37
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/generic_amd64_test.s37
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/pools.go143
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/primitives.go46
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/stream.go217
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/stubs_go115.go111
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/stubs_go120.go111
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/types.go58
-rw-r--r--vendor/github.com/bytedance/sonic/decoder/utils.go39
18 files changed, 7844 insertions, 0 deletions
diff --git a/vendor/github.com/bytedance/sonic/decoder/asm.s b/vendor/github.com/bytedance/sonic/decoder/asm.s
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/asm.s
diff --git a/vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go116.go b/vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go116.go
new file mode 100644
index 000000000..9ff1ad248
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go116.go
@@ -0,0 +1,1943 @@
+// +build go1.15,!go1.17
+
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `encoding/json`
+ `fmt`
+ `math`
+ `reflect`
+ `strconv`
+ `unsafe`
+
+ `github.com/bytedance/sonic/internal/caching`
+ `github.com/bytedance/sonic/internal/jit`
+ `github.com/bytedance/sonic/internal/native`
+ `github.com/bytedance/sonic/internal/native/types`
+ `github.com/bytedance/sonic/internal/rt`
+ `github.com/twitchyliquid64/golang-asm/obj`
+ `github.com/twitchyliquid64/golang-asm/obj/x86`
+)
+
+/** Register Allocations
+ *
+ * State Registers:
+ *
+ * %rbx : stack base
+ * %r12 : input pointer
+ * %r13 : input length
+ * %r14 : input cursor
+ * %r15 : value pointer
+ *
+ * Error Registers:
+ *
+ * %r10 : error type register
+ * %r11 : error pointer register
+ */
+
+/** Function Prototype & Stack Map
+ *
+ * func (s string, ic int, vp unsafe.Pointer, sb *_Stack, fv uint64, sv string) (rc int, err error)
+ *
+ * s.buf : (FP)
+ * s.len : 8(FP)
+ * ic : 16(FP)
+ * vp : 24(FP)
+ * sb : 32(FP)
+ * fv : 40(FP)
+ * sv : 56(FP)
+ * err.vt : 72(FP)
+ * err.vp : 80(FP)
+ */
+
+const (
+ _FP_args = 96 // 96 bytes to pass arguments and return values for this function
+ _FP_fargs = 80 // 80 bytes for passing arguments to other Go functions
+ _FP_saves = 40 // 40 bytes for saving the registers before CALL instructions
+ _FP_locals = 144 // 144 bytes for local variables
+)
+
+const (
+ _FP_offs = _FP_fargs + _FP_saves + _FP_locals
+ _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer
+ _FP_base = _FP_size + 8 // 8 bytes for the return address
+)
+
+const (
+ _IM_null = 0x6c6c756e // 'null'
+ _IM_true = 0x65757274 // 'true'
+ _IM_alse = 0x65736c61 // 'alse' ('false' without the 'f')
+)
+
+const (
+ _BM_space = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n')
+)
+
+const (
+ _MODE_JSON = 1 << 3 // base64 mode
+)
+
+const (
+ _LB_error = "_error"
+ _LB_im_error = "_im_error"
+ _LB_eof_error = "_eof_error"
+ _LB_type_error = "_type_error"
+ _LB_field_error = "_field_error"
+ _LB_range_error = "_range_error"
+ _LB_stack_error = "_stack_error"
+ _LB_base64_error = "_base64_error"
+ _LB_unquote_error = "_unquote_error"
+ _LB_parsing_error = "_parsing_error"
+ _LB_parsing_error_v = "_parsing_error_v"
+ _LB_mismatch_error = "_mismatch_error"
+)
+
+const (
+ _LB_char_0_error = "_char_0_error"
+ _LB_char_1_error = "_char_1_error"
+ _LB_char_2_error = "_char_2_error"
+ _LB_char_3_error = "_char_3_error"
+ _LB_char_4_error = "_char_4_error"
+ _LB_char_m2_error = "_char_m2_error"
+ _LB_char_m3_error = "_char_m3_error"
+)
+
+const (
+ _LB_skip_one = "_skip_one"
+ _LB_skip_key_value = "_skip_key_value"
+)
+
+var (
+ _AX = jit.Reg("AX")
+ _CX = jit.Reg("CX")
+ _DX = jit.Reg("DX")
+ _DI = jit.Reg("DI")
+ _SI = jit.Reg("SI")
+ _BP = jit.Reg("BP")
+ _SP = jit.Reg("SP")
+ _R8 = jit.Reg("R8")
+ _R9 = jit.Reg("R9")
+ _X0 = jit.Reg("X0")
+ _X1 = jit.Reg("X1")
+)
+
+var (
+ _ST = jit.Reg("BX")
+ _IP = jit.Reg("R12")
+ _IL = jit.Reg("R13")
+ _IC = jit.Reg("R14")
+ _VP = jit.Reg("R15")
+)
+
+var (
+ _R10 = jit.Reg("R10") // used for gcWriteBarrier
+ _DF = jit.Reg("R10") // reuse R10 in generic decoder for flags
+ _ET = jit.Reg("R10")
+ _EP = jit.Reg("R11")
+)
+
+var (
+ _ARG_s = _ARG_sp
+ _ARG_sp = jit.Ptr(_SP, _FP_base)
+ _ARG_sl = jit.Ptr(_SP, _FP_base + 8)
+ _ARG_ic = jit.Ptr(_SP, _FP_base + 16)
+ _ARG_vp = jit.Ptr(_SP, _FP_base + 24)
+ _ARG_sb = jit.Ptr(_SP, _FP_base + 32)
+ _ARG_fv = jit.Ptr(_SP, _FP_base + 40)
+)
+
+var (
+ _VAR_sv = _VAR_sv_p
+ _VAR_sv_p = jit.Ptr(_SP, _FP_base + 48)
+ _VAR_sv_n = jit.Ptr(_SP, _FP_base + 56)
+ _VAR_vk = jit.Ptr(_SP, _FP_base + 64)
+)
+
+var (
+ _RET_rc = jit.Ptr(_SP, _FP_base + 72)
+ _RET_et = jit.Ptr(_SP, _FP_base + 80)
+ _RET_ep = jit.Ptr(_SP, _FP_base + 88)
+)
+
+var (
+ _VAR_st = _VAR_st_Vt
+ _VAR_sr = jit.Ptr(_SP, _FP_fargs + _FP_saves)
+)
+
+
+var (
+ _VAR_st_Vt = jit.Ptr(_SP, _FP_fargs + _FP_saves + 0)
+ _VAR_st_Dv = jit.Ptr(_SP, _FP_fargs + _FP_saves + 8)
+ _VAR_st_Iv = jit.Ptr(_SP, _FP_fargs + _FP_saves + 16)
+ _VAR_st_Ep = jit.Ptr(_SP, _FP_fargs + _FP_saves + 24)
+ _VAR_st_Db = jit.Ptr(_SP, _FP_fargs + _FP_saves + 32)
+ _VAR_st_Dc = jit.Ptr(_SP, _FP_fargs + _FP_saves + 40)
+)
+
+var (
+ _VAR_ss_AX = jit.Ptr(_SP, _FP_fargs + _FP_saves + 48)
+ _VAR_ss_CX = jit.Ptr(_SP, _FP_fargs + _FP_saves + 56)
+ _VAR_ss_SI = jit.Ptr(_SP, _FP_fargs + _FP_saves + 64)
+ _VAR_ss_R8 = jit.Ptr(_SP, _FP_fargs + _FP_saves + 72)
+ _VAR_ss_R9 = jit.Ptr(_SP, _FP_fargs + _FP_saves + 80)
+)
+
+var (
+ _VAR_bs_p = jit.Ptr(_SP, _FP_fargs + _FP_saves + 88)
+ _VAR_bs_n = jit.Ptr(_SP, _FP_fargs + _FP_saves + 96)
+ _VAR_bs_LR = jit.Ptr(_SP, _FP_fargs + _FP_saves + 104)
+)
+
+var _VAR_fl = jit.Ptr(_SP, _FP_fargs + _FP_saves + 112)
+
+var (
+ _VAR_et = jit.Ptr(_SP, _FP_fargs + _FP_saves + 120) // save dismatched type
+ _VAR_ic = jit.Ptr(_SP, _FP_fargs + _FP_saves + 128) // save dismatched position
+ _VAR_pc = jit.Ptr(_SP, _FP_fargs + _FP_saves + 136) // save skip return pc
+)
+
+type _Assembler struct {
+ jit.BaseAssembler
+ p _Program
+ name string
+}
+
+func newAssembler(p _Program) *_Assembler {
+ return new(_Assembler).Init(p)
+}
+
+/** Assembler Interface **/
+
+func (self *_Assembler) Load() _Decoder {
+ return ptodec(self.BaseAssembler.Load("decode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs))
+}
+
+func (self *_Assembler) Init(p _Program) *_Assembler {
+ self.p = p
+ self.BaseAssembler.Init(self.compile)
+ return self
+}
+
+func (self *_Assembler) compile() {
+ self.prologue()
+ self.instrs()
+ self.epilogue()
+ self.copy_string()
+ self.escape_string()
+ self.escape_string_twice()
+ self.skip_one()
+ self.skip_key_value()
+ self.mismatch_error()
+ self.type_error()
+ self.field_error()
+ self.range_error()
+ self.stack_error()
+ self.base64_error()
+ self.parsing_error()
+}
+
+/** Assembler Stages **/
+
+var _OpFuncTab = [256]func(*_Assembler, *_Instr) {
+ _OP_any : (*_Assembler)._asm_OP_any,
+ _OP_dyn : (*_Assembler)._asm_OP_dyn,
+ _OP_str : (*_Assembler)._asm_OP_str,
+ _OP_bin : (*_Assembler)._asm_OP_bin,
+ _OP_bool : (*_Assembler)._asm_OP_bool,
+ _OP_num : (*_Assembler)._asm_OP_num,
+ _OP_i8 : (*_Assembler)._asm_OP_i8,
+ _OP_i16 : (*_Assembler)._asm_OP_i16,
+ _OP_i32 : (*_Assembler)._asm_OP_i32,
+ _OP_i64 : (*_Assembler)._asm_OP_i64,
+ _OP_u8 : (*_Assembler)._asm_OP_u8,
+ _OP_u16 : (*_Assembler)._asm_OP_u16,
+ _OP_u32 : (*_Assembler)._asm_OP_u32,
+ _OP_u64 : (*_Assembler)._asm_OP_u64,
+ _OP_f32 : (*_Assembler)._asm_OP_f32,
+ _OP_f64 : (*_Assembler)._asm_OP_f64,
+ _OP_unquote : (*_Assembler)._asm_OP_unquote,
+ _OP_nil_1 : (*_Assembler)._asm_OP_nil_1,
+ _OP_nil_2 : (*_Assembler)._asm_OP_nil_2,
+ _OP_nil_3 : (*_Assembler)._asm_OP_nil_3,
+ _OP_deref : (*_Assembler)._asm_OP_deref,
+ _OP_index : (*_Assembler)._asm_OP_index,
+ _OP_is_null : (*_Assembler)._asm_OP_is_null,
+ _OP_is_null_quote : (*_Assembler)._asm_OP_is_null_quote,
+ _OP_map_init : (*_Assembler)._asm_OP_map_init,
+ _OP_map_key_i8 : (*_Assembler)._asm_OP_map_key_i8,
+ _OP_map_key_i16 : (*_Assembler)._asm_OP_map_key_i16,
+ _OP_map_key_i32 : (*_Assembler)._asm_OP_map_key_i32,
+ _OP_map_key_i64 : (*_Assembler)._asm_OP_map_key_i64,
+ _OP_map_key_u8 : (*_Assembler)._asm_OP_map_key_u8,
+ _OP_map_key_u16 : (*_Assembler)._asm_OP_map_key_u16,
+ _OP_map_key_u32 : (*_Assembler)._asm_OP_map_key_u32,
+ _OP_map_key_u64 : (*_Assembler)._asm_OP_map_key_u64,
+ _OP_map_key_f32 : (*_Assembler)._asm_OP_map_key_f32,
+ _OP_map_key_f64 : (*_Assembler)._asm_OP_map_key_f64,
+ _OP_map_key_str : (*_Assembler)._asm_OP_map_key_str,
+ _OP_map_key_utext : (*_Assembler)._asm_OP_map_key_utext,
+ _OP_map_key_utext_p : (*_Assembler)._asm_OP_map_key_utext_p,
+ _OP_array_skip : (*_Assembler)._asm_OP_array_skip,
+ _OP_array_clear : (*_Assembler)._asm_OP_array_clear,
+ _OP_array_clear_p : (*_Assembler)._asm_OP_array_clear_p,
+ _OP_slice_init : (*_Assembler)._asm_OP_slice_init,
+ _OP_slice_append : (*_Assembler)._asm_OP_slice_append,
+ _OP_object_skip : (*_Assembler)._asm_OP_object_skip,
+ _OP_object_next : (*_Assembler)._asm_OP_object_next,
+ _OP_struct_field : (*_Assembler)._asm_OP_struct_field,
+ _OP_unmarshal : (*_Assembler)._asm_OP_unmarshal,
+ _OP_unmarshal_p : (*_Assembler)._asm_OP_unmarshal_p,
+ _OP_unmarshal_text : (*_Assembler)._asm_OP_unmarshal_text,
+ _OP_unmarshal_text_p : (*_Assembler)._asm_OP_unmarshal_text_p,
+ _OP_lspace : (*_Assembler)._asm_OP_lspace,
+ _OP_match_char : (*_Assembler)._asm_OP_match_char,
+ _OP_check_char : (*_Assembler)._asm_OP_check_char,
+ _OP_load : (*_Assembler)._asm_OP_load,
+ _OP_save : (*_Assembler)._asm_OP_save,
+ _OP_drop : (*_Assembler)._asm_OP_drop,
+ _OP_drop_2 : (*_Assembler)._asm_OP_drop_2,
+ _OP_recurse : (*_Assembler)._asm_OP_recurse,
+ _OP_goto : (*_Assembler)._asm_OP_goto,
+ _OP_switch : (*_Assembler)._asm_OP_switch,
+ _OP_check_char_0 : (*_Assembler)._asm_OP_check_char_0,
+ _OP_dismatch_err : (*_Assembler)._asm_OP_dismatch_err,
+ _OP_go_skip : (*_Assembler)._asm_OP_go_skip,
+ _OP_add : (*_Assembler)._asm_OP_add,
+}
+
+func (self *_Assembler) instr(v *_Instr) {
+ if fn := _OpFuncTab[v.op()]; fn != nil {
+ fn(self, v)
+ } else {
+ panic(fmt.Sprintf("invalid opcode: %d", v.op()))
+ }
+}
+
+func (self *_Assembler) instrs() {
+ for i, v := range self.p {
+ self.Mark(i)
+ self.instr(&v)
+ self.debug_instr(i, &v)
+ }
+}
+
+func (self *_Assembler) epilogue() {
+ self.Mark(len(self.p))
+ self.Emit("XORL", _EP, _EP) // XORL EP, EP
+ self.Emit("MOVQ", _VAR_et, _ET) // MOVQ VAR_et, ET
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ", _LB_mismatch_error) // JNZ _LB_mismatch_error
+ self.Link(_LB_error) // _error:
+ self.Emit("MOVQ", _IC, _RET_rc) // MOVQ IC, rc<>+40(FP)
+ self.Emit("MOVQ", _ET, _RET_et) // MOVQ ET, et<>+48(FP)
+ self.Emit("MOVQ", _EP, _RET_ep) // MOVQ EP, ep<>+56(FP)
+ self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP
+ self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP
+ self.Emit("RET") // RET
+}
+
+func (self *_Assembler) prologue() {
+ self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP
+ self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP)
+ self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP
+ self.Emit("MOVQ", _ARG_sp, _IP) // MOVQ s.p<>+0(FP), IP
+ self.Emit("MOVQ", _ARG_sl, _IL) // MOVQ s.l<>+8(FP), IL
+ self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC
+ self.Emit("MOVQ", _ARG_vp, _VP) // MOVQ vp<>+24(FP), VP
+ self.Emit("MOVQ", _ARG_sb, _ST) // MOVQ vp<>+32(FP), ST
+ // initialize digital buffer first
+ self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_st_Dc) // MOVQ $_MaxDigitNums, ss.Dcap
+ self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX
+ self.Emit("MOVQ", _AX, _VAR_st_Db) // MOVQ AX, ss.Dbuf
+ self.Emit("XORL", _AX, _AX) // XORL AX, AX
+ self.Emit("MOVQ", _AX, _VAR_et) // MOVQ AX, ss.Dp
+}
+
+/** Function Calling Helpers **/
+
+var _REG_go = []obj.Addr {
+ _ST,
+ _VP,
+ _IP,
+ _IL,
+ _IC,
+}
+
+func (self *_Assembler) save(r ...obj.Addr) {
+ for i, v := range r {
+ if i > _FP_saves / 8 - 1 {
+ panic("too many registers to save")
+ } else {
+ self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + int64(i) * 8))
+ }
+ }
+}
+
+func (self *_Assembler) load(r ...obj.Addr) {
+ for i, v := range r {
+ if i > _FP_saves / 8 - 1 {
+ panic("too many registers to load")
+ } else {
+ self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + int64(i) * 8), v)
+ }
+ }
+}
+
+func (self *_Assembler) call(fn obj.Addr) {
+ self.Emit("MOVQ", fn, _AX) // MOVQ ${fn}, AX
+ self.Rjmp("CALL", _AX) // CALL AX
+}
+
+func (self *_Assembler) call_go(fn obj.Addr) {
+ self.save(_REG_go...) // SAVE $REG_go
+ self.call(fn) // CALL ${fn}
+ self.load(_REG_go...) // LOAD $REG_go
+}
+
+func (self *_Assembler) call_sf(fn obj.Addr) {
+ self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI
+ self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP)
+ self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI
+ self.Emit("LEAQ", jit.Ptr(_ST, _FsmOffset), _DX) // LEAQ _FsmOffset(ST), DX
+ self.Emit("MOVQ", _ARG_fv, _CX)
+ self.call(fn) // CALL ${fn}
+ self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC
+}
+
+func (self *_Assembler) call_vf(fn obj.Addr) {
+ self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI
+ self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP)
+ self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI
+ self.Emit("LEAQ", _VAR_st, _DX) // LEAQ st, DX
+ self.call(fn) // CALL ${fn}
+ self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC
+}
+
+/** Assembler Error Handlers **/
+
+var (
+ _F_convT64 = jit.Func(convT64)
+ _F_error_wrap = jit.Func(error_wrap)
+ _F_error_type = jit.Func(error_type)
+ _F_error_field = jit.Func(error_field)
+ _F_error_value = jit.Func(error_value)
+ _F_error_mismatch = jit.Func(error_mismatch)
+)
+
+var (
+ _I_int8 , _T_int8 = rtype(reflect.TypeOf(int8(0)))
+ _I_int16 , _T_int16 = rtype(reflect.TypeOf(int16(0)))
+ _I_int32 , _T_int32 = rtype(reflect.TypeOf(int32(0)))
+ _I_uint8 , _T_uint8 = rtype(reflect.TypeOf(uint8(0)))
+ _I_uint16 , _T_uint16 = rtype(reflect.TypeOf(uint16(0)))
+ _I_uint32 , _T_uint32 = rtype(reflect.TypeOf(uint32(0)))
+ _I_float32 , _T_float32 = rtype(reflect.TypeOf(float32(0)))
+)
+
+var (
+ _T_error = rt.UnpackType(errorType)
+ _I_base64_CorruptInputError = jit.Itab(_T_error, base64CorruptInputError)
+)
+
+var (
+ _V_stackOverflow = jit.Imm(int64(uintptr(unsafe.Pointer(&stackOverflow))))
+ _I_json_UnsupportedValueError = jit.Itab(_T_error, reflect.TypeOf(new(json.UnsupportedValueError)))
+)
+
+func (self *_Assembler) type_error() {
+ self.Link(_LB_type_error) // _type_error:
+ self.Emit("MOVQ", _ET, jit.Ptr(_SP, 0)) // MOVQ ET, (SP)
+ self.call_go(_F_error_type) // CALL_GO error_type
+ self.Emit("MOVQ", jit.Ptr(_SP, 8), _ET) // MOVQ 8(SP), ET
+ self.Emit("MOVQ", jit.Ptr(_SP, 16), _EP) // MOVQ 16(SP), EP
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+
+func (self *_Assembler) mismatch_error() {
+ self.Link(_LB_mismatch_error) // _type_error:
+ self.Emit("MOVQ", _ARG_sp, _AX)
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVQ", _ARG_sl, _CX)
+ self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP)
+ self.Emit("MOVQ", _VAR_ic, _AX)
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP)
+ self.Emit("MOVQ", _VAR_et, _CX)
+ self.Emit("MOVQ", _CX, jit.Ptr(_SP, 24)) // MOVQ CX, 24(SP)
+ self.call_go(_F_error_mismatch) // CALL_GO error_type
+ self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET
+ self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) _asm_OP_dismatch_err(p *_Instr) {
+ self.Emit("MOVQ", _IC, _VAR_ic)
+ self.Emit("MOVQ", jit.Type(p.vt()), _ET)
+ self.Emit("MOVQ", _ET, _VAR_et)
+}
+
+func (self *_Assembler) _asm_OP_go_skip(p *_Instr) {
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Xref(p.vi(), 4)
+ self.Emit("MOVQ", _R9, _VAR_pc)
+ self.Sjmp("JMP" , _LB_skip_one) // JMP _skip_one
+}
+
+func (self *_Assembler) skip_one() {
+ self.Link(_LB_skip_one) // _skip:
+ self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC
+ self.call_sf(_F_skip_one) // CALL_SF skip_one
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+ self.Emit("MOVQ" , _VAR_pc, _R9) // MOVQ pc, R9
+ self.Rjmp("JMP" , _R9) // JMP (R9)
+}
+
+
+func (self *_Assembler) skip_key_value() {
+ self.Link(_LB_skip_key_value) // _skip:
+ // skip the key
+ self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC
+ self.call_sf(_F_skip_one) // CALL_SF skip_one
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+ // match char ':'
+ self.lspace("_global_1")
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(':'))
+ self.Sjmp("JNE" , _LB_parsing_error_v) // JNE _parse_error_v
+ self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC
+ self.lspace("_global_2")
+ // skip the value
+ self.call_sf(_F_skip_one) // CALL_SF skip_one
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+ // jump back to specified address
+ self.Emit("MOVQ" , _VAR_pc, _R9) // MOVQ pc, R9
+ self.Rjmp("JMP" , _R9) // JMP (R9)
+}
+
+func (self *_Assembler) field_error() {
+ self.Link(_LB_field_error) // _field_error:
+ self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_SP, 0)) // MOVOU X0, (SP)
+ self.call_go(_F_error_field) // CALL_GO error_field
+ self.Emit("MOVQ" , jit.Ptr(_SP, 16), _ET) // MOVQ 16(SP), ET
+ self.Emit("MOVQ" , jit.Ptr(_SP, 24), _EP) // MOVQ 24(SP), EP
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) range_error() {
+ self.Link(_LB_range_error) // _range_error:
+ self.slice_from(_VAR_st_Ep, 0) // SLICE st.Ep, $0
+ self.Emit("MOVQ", _DI, jit.Ptr(_SP, 0)) // MOVQ DI, (SP)
+ self.Emit("MOVQ", _SI, jit.Ptr(_SP, 8)) // MOVQ SI, 8(SP)
+ self.Emit("MOVQ", _ET, jit.Ptr(_SP, 16)) // MOVQ ET, 16(SP)
+ self.Emit("MOVQ", _EP, jit.Ptr(_SP, 24)) // MOVQ EP, 24(SP)
+ self.call_go(_F_error_value) // CALL_GO error_value
+ self.Emit("MOVQ", jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET
+ self.Emit("MOVQ", jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) stack_error() {
+ self.Link(_LB_stack_error) // _stack_error:
+ self.Emit("MOVQ", _V_stackOverflow, _EP) // MOVQ ${_V_stackOverflow}, EP
+ self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ ${_I_json_UnsupportedValueError}, ET
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) base64_error() {
+ self.Link(_LB_base64_error)
+ self.Emit("NEGQ", _AX) // NEGQ AX
+ self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.call_go(_F_convT64) // CALL_GO convT64
+ self.Emit("MOVQ", jit.Ptr(_SP, 8), _EP) // MOVQ 8(SP), EP
+ self.Emit("MOVQ", _I_base64_CorruptInputError, _ET) // MOVQ ${itab(base64.CorruptInputError)}, ET
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) parsing_error() {
+ self.Link(_LB_eof_error) // _eof_error:
+ self.Emit("MOVQ" , _IL, _IC) // MOVQ IL, IC
+ self.Emit("MOVL" , jit.Imm(int64(types.ERR_EOF)), _EP) // MOVL ${types.ERR_EOF}, EP
+ self.Sjmp("JMP" , _LB_parsing_error) // JMP _parsing_error
+ self.Link(_LB_unquote_error) // _unquote_error:
+ self.Emit("SUBQ" , _VAR_sr, _SI) // SUBQ sr, SI
+ self.Emit("SUBQ" , _SI, _IC) // SUBQ IL, IC
+ self.Link(_LB_parsing_error_v) // _parsing_error_v:
+ self.Emit("MOVQ" , _AX, _EP) // MOVQ AX, EP
+ self.Emit("NEGQ" , _EP) // NEGQ EP
+ self.Sjmp("JMP" , _LB_parsing_error) // JMP _parsing_error
+ self.Link(_LB_char_m3_error) // _char_m3_error:
+ self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC
+ self.Link(_LB_char_m2_error) // _char_m2_error:
+ self.Emit("SUBQ" , jit.Imm(2), _IC) // SUBQ $2, IC
+ self.Sjmp("JMP" , _LB_char_0_error) // JMP _char_0_error
+ self.Link(_LB_im_error) // _im_error:
+ self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPB CX, (IP)(IC)
+ self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error
+ self.Emit("SHRL" , jit.Imm(8), _CX) // SHRL $8, CX
+ self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 1)) // CMPB CX, 1(IP)(IC)
+ self.Sjmp("JNE" , _LB_char_1_error) // JNE _char_1_error
+ self.Emit("SHRL" , jit.Imm(8), _CX) // SHRL $8, CX
+ self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 2)) // CMPB CX, 2(IP)(IC)
+ self.Sjmp("JNE" , _LB_char_2_error) // JNE _char_2_error
+ self.Sjmp("JMP" , _LB_char_3_error) // JNE _char_3_error
+ self.Link(_LB_char_4_error) // _char_4_error:
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Link(_LB_char_3_error) // _char_3_error:
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Link(_LB_char_2_error) // _char_2_error:
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Link(_LB_char_1_error) // _char_1_error:
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Link(_LB_char_0_error) // _char_0_error:
+ self.Emit("MOVL" , jit.Imm(int64(types.ERR_INVALID_CHAR)), _EP) // MOVL ${types.ERR_INVALID_CHAR}, EP
+ self.Link(_LB_parsing_error) // _parsing_error:
+ self.Emit("MOVOU", _ARG_s, _X0) // MOVOU s, X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_SP, 0)) // MOVOU X0, (SP)
+ self.Emit("MOVQ" , _IC, jit.Ptr(_SP, 16)) // MOVQ IC, 16(SP)
+ self.Emit("MOVQ" , _EP, jit.Ptr(_SP, 24)) // MOVQ EP, 24(SP)
+ self.call_go(_F_error_wrap) // CALL_GO error_wrap
+ self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET
+ self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+/** Memory Management Routines **/
+
+var (
+ _T_byte = jit.Type(byteType)
+ _F_mallocgc = jit.Func(mallocgc)
+)
+
+func (self *_Assembler) malloc(nb obj.Addr, ret obj.Addr) {
+ self.Emit("XORL", _AX, _AX) // XORL AX, AX
+ self.Emit("MOVQ", _T_byte, _CX) // MOVQ ${type(byte)}, CX
+ self.Emit("MOVQ", nb, jit.Ptr(_SP, 0)) // MOVQ ${nb}, (SP)
+ self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP)
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP)
+ self.call_go(_F_mallocgc) // CALL_GO mallocgc
+ self.Emit("MOVQ", jit.Ptr(_SP, 24), ret) // MOVQ 24(SP), ${ret}
+}
+
+func (self *_Assembler) valloc(vt reflect.Type, ret obj.Addr) {
+ self.Emit("MOVQ", jit.Imm(int64(vt.Size())), _AX) // MOVQ ${vt.Size()}, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVQ", jit.Type(vt), _AX) // MOVQ ${vt}, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP)
+ self.Emit("MOVB", jit.Imm(1), jit.Ptr(_SP, 16)) // MOVB $1, 16(SP)
+ self.call_go(_F_mallocgc) // CALL_GO mallocgc
+ self.Emit("MOVQ", jit.Ptr(_SP, 24), ret) // MOVQ 24(SP), ${ret}
+}
+
+func (self *_Assembler) vfollow(vt reflect.Type) {
+ self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n}
+ self.valloc(vt, _AX) // VALLOC ${vt}, AX
+ self.WritePtrAX(1, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP)
+ self.Link("_end_{n}") // _end_{n}:
+ self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP
+}
+
+/** Value Parsing Routines **/
+
+var (
+ _F_vstring = jit.Imm(int64(native.S_vstring))
+ _F_vnumber = jit.Imm(int64(native.S_vnumber))
+ _F_vsigned = jit.Imm(int64(native.S_vsigned))
+ _F_vunsigned = jit.Imm(int64(native.S_vunsigned))
+)
+
+func (self *_Assembler) check_err(vt reflect.Type, pin string, pin2 int) {
+ self.Emit("MOVQ" , _VAR_st_Vt, _AX) // MOVQ st.Vt, AX
+ self.Emit("TESTQ", _AX, _AX) // CMPQ AX, ${native.V_STRING}
+ // try to skip the value
+ if vt != nil {
+ self.Sjmp("JNS" , "_check_err_{n}") // JNE _parsing_error_v
+ self.Emit("MOVQ", jit.Type(vt), _ET)
+ self.Emit("MOVQ", _ET, _VAR_et)
+ if pin2 != -1 {
+ self.Emit("SUBQ", jit.Imm(1), _BP)
+ self.Emit("MOVQ", _BP, _VAR_ic)
+ self.Byte(0x4c , 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Xref(pin2, 4)
+ self.Emit("MOVQ", _R9, _VAR_pc)
+ self.Sjmp("JMP" , _LB_skip_key_value)
+ } else {
+ self.Emit("MOVQ", _BP, _VAR_ic)
+ self.Byte(0x4c , 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref(pin, 4)
+ self.Emit("MOVQ", _R9, _VAR_pc)
+ self.Sjmp("JMP" , _LB_skip_one)
+ }
+ self.Link("_check_err_{n}")
+ } else {
+ self.Sjmp("JS" , _LB_parsing_error_v) // JNE _parsing_error_v
+ }
+}
+
+func (self *_Assembler) check_eof(d int64) {
+ if d == 1 {
+ self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL
+ self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error
+ } else {
+ self.Emit("LEAQ", jit.Ptr(_IC, d), _AX) // LEAQ ${d}(IC), AX
+ self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL
+ self.Sjmp("JA" , _LB_eof_error) // JA _eof_error
+ }
+}
+
+func (self *_Assembler) parse_string() { // parse_string has a validate flag params in the last
+ self.Emit("MOVQ", _ARG_fv, _CX)
+ self.call_vf(_F_vstring)
+ self.check_err(nil, "", -1)
+}
+
+func (self *_Assembler) parse_number(vt reflect.Type, pin string, pin2 int) {
+ self.Emit("MOVQ", _IC, _BP)
+ self.call_vf(_F_vnumber) // call vnumber
+ self.check_err(vt, pin, pin2)
+}
+
+func (self *_Assembler) parse_signed(vt reflect.Type, pin string, pin2 int) {
+ self.Emit("MOVQ", _IC, _BP)
+ self.call_vf(_F_vsigned)
+ self.check_err(vt, pin, pin2)
+}
+
+func (self *_Assembler) parse_unsigned(vt reflect.Type, pin string, pin2 int) {
+ self.Emit("MOVQ", _IC, _BP)
+ self.call_vf(_F_vunsigned)
+ self.check_err(vt, pin, pin2)
+}
+
+// Pointer: DI, Size: SI, Return: R9
+func (self *_Assembler) copy_string() {
+ self.Link("_copy_string")
+ self.Emit("MOVQ", _DI, _VAR_bs_p)
+ self.Emit("MOVQ", _SI, _VAR_bs_n)
+ self.Emit("MOVQ", _R9, _VAR_bs_LR)
+ self.malloc(_SI, _AX)
+ self.Emit("MOVQ", _AX, _VAR_sv_p)
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0))
+ self.Emit("MOVQ", _VAR_bs_p, _DI)
+ self.Emit("MOVQ", _DI, jit.Ptr(_SP, 8))
+ self.Emit("MOVQ", _VAR_bs_n, _SI)
+ self.Emit("MOVQ", _SI, jit.Ptr(_SP, 16))
+ self.call_go(_F_memmove)
+ self.Emit("MOVQ", _VAR_sv_p, _DI)
+ self.Emit("MOVQ", _VAR_bs_n, _SI)
+ self.Emit("MOVQ", _VAR_bs_LR, _R9)
+ self.Rjmp("JMP", _R9)
+}
+
+// Pointer: DI, Size: SI, Return: R9
+func (self *_Assembler) escape_string() {
+ self.Link("_escape_string")
+ self.Emit("MOVQ" , _DI, _VAR_bs_p)
+ self.Emit("MOVQ" , _SI, _VAR_bs_n)
+ self.Emit("MOVQ" , _R9, _VAR_bs_LR)
+ self.malloc(_SI, _DX) // MALLOC SI, DX
+ self.Emit("MOVQ" , _DX, _VAR_sv_p)
+ self.Emit("MOVQ" , _VAR_bs_p, _DI)
+ self.Emit("MOVQ" , _VAR_bs_n, _SI)
+ self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX
+ self.Emit("XORL" , _R8, _R8) // XORL R8, R8
+ self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, fv
+ self.Emit("SETCC", _R8) // SETCC R8
+ self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8
+ self.call(_F_unquote) // CALL unquote
+ self.Emit("MOVQ" , _VAR_bs_n, _SI) // MOVQ ${n}, SI
+ self.Emit("ADDQ" , jit.Imm(1), _SI) // ADDQ $1, SI
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_unquote_error) // JS _unquote_error
+ self.Emit("MOVQ" , _AX, _SI)
+ self.Emit("MOVQ" , _VAR_sv_p, _DI)
+ self.Emit("MOVQ" , _VAR_bs_LR, _R9)
+ self.Rjmp("JMP", _R9)
+}
+
+func (self *_Assembler) escape_string_twice() {
+ self.Link("_escape_string_twice")
+ self.Emit("MOVQ" , _DI, _VAR_bs_p)
+ self.Emit("MOVQ" , _SI, _VAR_bs_n)
+ self.Emit("MOVQ" , _R9, _VAR_bs_LR)
+ self.malloc(_SI, _DX) // MALLOC SI, DX
+ self.Emit("MOVQ" , _DX, _VAR_sv_p)
+ self.Emit("MOVQ" , _VAR_bs_p, _DI)
+ self.Emit("MOVQ" , _VAR_bs_n, _SI)
+ self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX
+ self.Emit("MOVL" , jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8
+ self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, AX
+ self.Emit("XORL" , _AX, _AX) // XORL AX, AX
+ self.Emit("SETCC", _AX) // SETCC AX
+ self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _AX) // SHLQ ${types.B_UNICODE_REPLACE}, AX
+ self.Emit("ORQ" , _AX, _R8) // ORQ AX, R8
+ self.call(_F_unquote) // CALL unquote
+ self.Emit("MOVQ" , _VAR_bs_n, _SI) // MOVQ ${n}, SI
+ self.Emit("ADDQ" , jit.Imm(3), _SI) // ADDQ $3, SI
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_unquote_error) // JS _unquote_error
+ self.Emit("MOVQ" , _AX, _SI)
+ self.Emit("MOVQ" , _VAR_sv_p, _DI)
+ self.Emit("MOVQ" , _VAR_bs_LR, _R9)
+ self.Rjmp("JMP", _R9)
+}
+
+/** Range Checking Routines **/
+
+var (
+ _V_max_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_max_f32))))
+ _V_min_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_min_f32))))
+)
+
+var (
+ _Vp_max_f32 = new(float64)
+ _Vp_min_f32 = new(float64)
+)
+
+func init() {
+ *_Vp_max_f32 = math.MaxFloat32
+ *_Vp_min_f32 = -math.MaxFloat32
+}
+
+func (self *_Assembler) range_single() {
+ self.Emit("MOVSD" , _VAR_st_Dv, _X0) // MOVSD st.Dv, X0
+ self.Emit("MOVQ" , _V_max_f32, _AX) // MOVQ _max_f32, AX
+ self.Emit("MOVQ" , jit.Gitab(_I_float32), _ET) // MOVQ ${itab(float32)}, ET
+ self.Emit("MOVQ" , jit.Gtype(_T_float32), _EP) // MOVQ ${type(float32)}, EP
+ self.Emit("UCOMISD" , jit.Ptr(_AX, 0), _X0) // UCOMISD (AX), X0
+ self.Sjmp("JA" , _LB_range_error) // JA _range_error
+ self.Emit("MOVQ" , _V_min_f32, _AX) // MOVQ _min_f32, AX
+ self.Emit("MOVSD" , jit.Ptr(_AX, 0), _X1) // MOVSD (AX), X1
+ self.Emit("UCOMISD" , _X0, _X1) // UCOMISD X0, X1
+ self.Sjmp("JA" , _LB_range_error) // JA _range_error
+ self.Emit("CVTSD2SS", _X0, _X0) // CVTSD2SS X0, X0
+}
+
+func (self *_Assembler) range_signed(i *rt.GoItab, t *rt.GoType, a int64, b int64) {
+ self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX
+ self.Emit("MOVQ", jit.Gitab(i), _ET) // MOVQ ${i}, ET
+ self.Emit("MOVQ", jit.Gtype(t), _EP) // MOVQ ${t}, EP
+ self.Emit("CMPQ", _AX, jit.Imm(a)) // CMPQ AX, ${a}
+ self.Sjmp("JL" , _LB_range_error) // JL _range_error
+ self.Emit("CMPQ", _AX, jit.Imm(b)) // CMPQ AX, ${B}
+ self.Sjmp("JG" , _LB_range_error) // JG _range_error
+}
+
+func (self *_Assembler) range_unsigned(i *rt.GoItab, t *rt.GoType, v uint64) {
+ self.Emit("MOVQ" , _VAR_st_Iv, _AX) // MOVQ st.Iv, AX
+ self.Emit("MOVQ" , jit.Gitab(i), _ET) // MOVQ ${i}, ET
+ self.Emit("MOVQ" , jit.Gtype(t), _EP) // MOVQ ${t}, EP
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_range_error) // JS _range_error
+ self.Emit("CMPQ" , _AX, jit.Imm(int64(v))) // CMPQ AX, ${a}
+ self.Sjmp("JA" , _LB_range_error) // JA _range_error
+}
+
+/** String Manipulating Routines **/
+
+var (
+ _F_unquote = jit.Imm(int64(native.S_unquote))
+)
+
+func (self *_Assembler) slice_from(p obj.Addr, d int64) {
+ self.Emit("MOVQ", p, _SI) // MOVQ ${p}, SI
+ self.slice_from_r(_SI, d) // SLICE_R SI, ${d}
+}
+
+func (self *_Assembler) slice_from_r(p obj.Addr, d int64) {
+ self.Emit("LEAQ", jit.Sib(_IP, p, 1, 0), _DI) // LEAQ (IP)(${p}), DI
+ self.Emit("NEGQ", p) // NEGQ ${p}
+ self.Emit("LEAQ", jit.Sib(_IC, p, 1, d), _SI) // LEAQ d(IC)(${p}), SI
+}
+
+func (self *_Assembler) unquote_once(p obj.Addr, n obj.Addr, stack bool, copy bool) {
+ self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1
+ self.Emit("CMPQ" , _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1
+ self.Sjmp("JE" , "_noescape_{n}") // JE _noescape_{n}
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref("_unquote_once_write_{n}", 4)
+ self.Sjmp("JMP" , "_escape_string")
+ self.Link("_noescape_{n}") // _noescape_{n}:
+ if copy {
+ self.Emit("BTQ" , jit.Imm(_F_copy_string), _ARG_fv)
+ self.Sjmp("JNC", "_unquote_once_write_{n}")
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref("_unquote_once_write_{n}", 4)
+ self.Sjmp("JMP", "_copy_string")
+ }
+ self.Link("_unquote_once_write_{n}")
+ self.Emit("MOVQ" , _SI, n) // MOVQ SI, ${n}
+ if stack {
+ self.Emit("MOVQ", _DI, p)
+ } else {
+ self.WriteRecNotAX(10, _DI, p, false, false)
+ }
+}
+
+func (self *_Assembler) unquote_twice(p obj.Addr, n obj.Addr, stack bool) {
+ self.Emit("CMPQ" , _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1
+ self.Sjmp("JE" , _LB_eof_error) // JE _eof_error
+ self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, -3), jit.Imm('\\')) // CMPB -3(IP)(IC), $'\\'
+ self.Sjmp("JNE" , _LB_char_m3_error) // JNE _char_m3_error
+ self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, -2), jit.Imm('"')) // CMPB -2(IP)(IC), $'"'
+ self.Sjmp("JNE" , _LB_char_m2_error) // JNE _char_m2_error
+ self.slice_from(_VAR_st_Iv, -3) // SLICE st.Iv, $-3
+ self.Emit("MOVQ" , _SI, _AX) // MOVQ SI, AX
+ self.Emit("ADDQ" , _VAR_st_Iv, _AX) // ADDQ st.Iv, AX
+ self.Emit("CMPQ" , _VAR_st_Ep, _AX) // CMPQ st.Ep, AX
+ self.Sjmp("JE" , "_noescape_{n}") // JE _noescape_{n}
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref("_unquote_twice_write_{n}", 4)
+ self.Sjmp("JMP" , "_escape_string_twice")
+ self.Link("_noescape_{n}") // _noescape_{n}:
+ self.Emit("BTQ" , jit.Imm(_F_copy_string), _ARG_fv)
+ self.Sjmp("JNC", "_unquote_twice_write_{n}")
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref("_unquote_twice_write_{n}", 4)
+ self.Sjmp("JMP", "_copy_string")
+ self.Link("_unquote_twice_write_{n}")
+ self.Emit("MOVQ" , _SI, n) // MOVQ SI, ${n}
+ if stack {
+ self.Emit("MOVQ", _DI, p)
+ } else {
+ self.WriteRecNotAX(12, _DI, p, false, false)
+ }
+}
+
+/** Memory Clearing Routines **/
+
+var (
+ _F_memclrHasPointers = jit.Func(memclrHasPointers)
+ _F_memclrNoHeapPointers = jit.Func(memclrNoHeapPointers)
+)
+
+func (self *_Assembler) mem_clear_fn(ptrfree bool) {
+ if !ptrfree {
+ self.call_go(_F_memclrHasPointers)
+ } else {
+ self.call_go(_F_memclrNoHeapPointers)
+ }
+}
+
+func (self *_Assembler) mem_clear_rem(size int64, ptrfree bool) {
+ self.Emit("MOVQ", jit.Imm(size), _CX) // MOVQ ${size}, CX
+ self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX
+ self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _AX) // MOVQ (ST)(AX), AX
+ self.Emit("SUBQ", _VP, _AX) // SUBQ VP, AX
+ self.Emit("ADDQ", _AX, _CX) // ADDQ AX, CX
+ self.Emit("MOVQ", _VP, jit.Ptr(_SP, 0)) // MOVQ VP, (SP)
+ self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP)
+ self.mem_clear_fn(ptrfree) // CALL_GO memclr{Has,NoHeap}Pointers
+}
+
+/** Map Assigning Routines **/
+
+var (
+ _F_mapassign = jit.Func(mapassign)
+ _F_mapassign_fast32 = jit.Func(mapassign_fast32)
+ _F_mapassign_faststr = jit.Func(mapassign_faststr)
+ _F_mapassign_fast64ptr = jit.Func(mapassign_fast64ptr)
+)
+
+var (
+ _F_decodeJsonUnmarshaler obj.Addr
+ _F_decodeTextUnmarshaler obj.Addr
+)
+
+func init() {
+ _F_decodeJsonUnmarshaler = jit.Func(decodeJsonUnmarshaler)
+ _F_decodeTextUnmarshaler = jit.Func(decodeTextUnmarshaler)
+}
+
+func (self *_Assembler) mapaccess_ptr(t reflect.Type) {
+ if rt.MapType(rt.UnpackType(t)).IndirectElem() {
+ self.vfollow(t.Elem())
+ }
+}
+
+func (self *_Assembler) mapassign_std(t reflect.Type, v obj.Addr) {
+ self.Emit("LEAQ", v, _AX) // LEAQ ${v}, AX
+ self.mapassign_call(t, _F_mapassign) // MAPASSIGN ${t}, mapassign
+}
+
+func (self *_Assembler) mapassign_str_fast(t reflect.Type, p obj.Addr, n obj.Addr) {
+ self.Emit("MOVQ", jit.Type(t), _AX) // MOVQ ${t}, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVQ", _VP, jit.Ptr(_SP, 8)) // MOVQ VP, 8(SP)
+ self.Emit("MOVQ", p, jit.Ptr(_SP, 16)) // MOVQ ${p}, 16(SP)
+ self.Emit("MOVQ", n, jit.Ptr(_SP, 24)) // MOVQ ${n}, 24(SP)
+ self.call_go(_F_mapassign_faststr) // CALL_GO ${fn}
+ self.Emit("MOVQ", jit.Ptr(_SP, 32), _VP) // MOVQ 32(SP), VP
+ self.mapaccess_ptr(t)
+}
+
+func (self *_Assembler) mapassign_call(t reflect.Type, fn obj.Addr) {
+ self.Emit("MOVQ", jit.Type(t), _SI) // MOVQ ${t}, SI
+ self.Emit("MOVQ", _SI, jit.Ptr(_SP, 0)) // MOVQ SI, (SP)
+ self.Emit("MOVQ", _VP, jit.Ptr(_SP, 8)) // MOVQ VP, 8(SP)
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16)) // MOVQ AX, 16(SP)
+ self.call_go(fn) // CALL_GO ${fn}
+ self.Emit("MOVQ", jit.Ptr(_SP, 24), _VP) // MOVQ 24(SP), VP
+}
+
+func (self *_Assembler) mapassign_fastx(t reflect.Type, fn obj.Addr) {
+ self.mapassign_call(t, fn)
+ self.mapaccess_ptr(t)
+}
+
+func (self *_Assembler) mapassign_utext(t reflect.Type, addressable bool) {
+ pv := false
+ vk := t.Key()
+ tk := t.Key()
+
+ /* deref pointer if needed */
+ if vk.Kind() == reflect.Ptr {
+ pv = true
+ vk = vk.Elem()
+ }
+
+ /* addressable value with pointer receiver */
+ if addressable {
+ pv = false
+ tk = reflect.PtrTo(tk)
+ }
+
+ /* allocate the key, and call the unmarshaler */
+ self.valloc(vk, _DI) // VALLOC ${vk}, DI
+ // must spill vk pointer since next call_go may invoke GC
+ self.Emit("MOVQ" , _DI, _VAR_vk)
+ self.Emit("MOVQ" , jit.Type(tk), _AX) // MOVQ ${tk}, AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVQ" , _DI, jit.Ptr(_SP, 8)) // MOVQ DI, 8(SP)
+ self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_SP, 16)) // MOVOU X0, 16(SP)
+ self.call_go(_F_decodeTextUnmarshaler) // CALL_GO decodeTextUnmarshaler
+ self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET
+ self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+ self.Emit("MOVQ" , _VAR_vk, _AX)
+
+ /* select the correct assignment function */
+ if !pv {
+ self.mapassign_call(t, _F_mapassign)
+ } else {
+ self.mapassign_fastx(t, _F_mapassign_fast64ptr)
+ }
+}
+
+/** External Unmarshaler Routines **/
+
+var (
+ _F_skip_one = jit.Imm(int64(native.S_skip_one))
+ _F_skip_number = jit.Imm(int64(native.S_skip_number))
+)
+
+func (self *_Assembler) unmarshal_json(t reflect.Type, deref bool) {
+ self.call_sf(_F_skip_one) // CALL_SF skip_one
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+ self.slice_from_r(_AX, 0) // SLICE_R AX, $0
+ self.Emit("MOVQ" , _DI, _VAR_sv_p) // MOVQ DI, sv.p
+ self.Emit("MOVQ" , _SI, _VAR_sv_n) // MOVQ SI, sv.n
+ self.unmarshal_func(t, _F_decodeJsonUnmarshaler, deref) // UNMARSHAL json, ${t}, ${deref}
+}
+
+func (self *_Assembler) unmarshal_text(t reflect.Type, deref bool) {
+ self.parse_string() // PARSE STRING
+ self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, true) // UNQUOTE once, sv.p, sv.n
+ self.unmarshal_func(t, _F_decodeTextUnmarshaler, deref) // UNMARSHAL text, ${t}, ${deref}
+}
+
+func (self *_Assembler) unmarshal_func(t reflect.Type, fn obj.Addr, deref bool) {
+ pt := t
+ vk := t.Kind()
+
+ /* allocate the field if needed */
+ if deref && vk == reflect.Ptr {
+ self.Emit("MOVQ" , _VP, _AX) // MOVQ VP, AX
+ self.Emit("MOVQ" , jit.Ptr(_AX, 0), _AX) // MOVQ (AX), AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JNZ" , "_deref_{n}") // JNZ _deref_{n}
+ self.valloc(t.Elem(), _AX) // VALLOC ${t.Elem()}, AX
+ self.WritePtrAX(3, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP)
+ self.Link("_deref_{n}") // _deref_{n}:
+ }
+
+ /* set value type */
+ self.Emit("MOVQ", jit.Type(pt), _CX) // MOVQ ${pt}, CX
+ self.Emit("MOVQ", _CX, jit.Ptr(_SP, 0)) // MOVQ CX, (SP)
+
+ /* set value pointer */
+ if deref && vk == reflect.Ptr {
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP)
+ } else {
+ self.Emit("MOVQ", _VP, jit.Ptr(_SP, 8)) // MOVQ VP, 8(SP)
+ }
+
+ /* set the source string and call the unmarshaler */
+ self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_SP, 16)) // MOVOU X0, 16(SP)
+ self.call_go(fn) // CALL_GO ${fn}
+ self.Emit("MOVQ" , jit.Ptr(_SP, 32), _ET) // MOVQ 32(SP), ET
+ self.Emit("MOVQ" , jit.Ptr(_SP, 40), _EP) // MOVQ 40(SP), EP
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+}
+
+/** Dynamic Decoding Routine **/
+
+var (
+ _F_decodeTypedPointer obj.Addr
+)
+
+func init() {
+ _F_decodeTypedPointer = jit.Func(decodeTypedPointer)
+}
+
+func (self *_Assembler) decode_dynamic(vt obj.Addr, vp obj.Addr) {
+ self.Emit("MOVQ" , _ARG_fv, _CX) // MOVQ fv, CX
+ self.Emit("MOVOU", _ARG_sp, _X0) // MOVOU sp, X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_SP, 0)) // MOVOU X0, (SP)
+ self.Emit("MOVQ" , _IC, jit.Ptr(_SP, 16)) // MOVQ IC, 16(SP)
+ self.Emit("MOVQ" , vt, jit.Ptr(_SP, 24)) // MOVQ ${vt}, 24(SP)
+ self.Emit("MOVQ" , vp, jit.Ptr(_SP, 32)) // MOVQ ${vp}, 32(SP)
+ self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 40)) // MOVQ ST, 40(SP)
+ self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 48)) // MOVQ CX, 48(SP)
+ self.call_go(_F_decodeTypedPointer) // CALL_GO decodeTypedPointer
+ self.Emit("MOVQ" , jit.Ptr(_SP, 64), _ET) // MOVQ 64(SP), ET
+ self.Emit("MOVQ" , jit.Ptr(_SP, 72), _EP) // MOVQ 72(SP), EP
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+ self.Emit("MOVQ" , jit.Ptr(_SP, 56), _IC) // MOVQ 56(SP), IC
+}
+
+/** OpCode Assembler Functions **/
+
+var (
+ _F_memequal = jit.Func(memequal)
+ _F_memmove = jit.Func(memmove)
+ _F_growslice = jit.Func(growslice)
+ _F_makeslice = jit.Func(makeslice)
+ _F_makemap_small = jit.Func(makemap_small)
+ _F_mapassign_fast64 = jit.Func(mapassign_fast64)
+)
+
+var (
+ _F_lspace = jit.Imm(int64(native.S_lspace))
+ _F_strhash = jit.Imm(int64(caching.S_strhash))
+)
+
+var (
+ _F_b64decode = jit.Imm(int64(_subr__b64decode))
+ _F_decodeValue = jit.Imm(int64(_subr_decode_value))
+)
+
+var (
+ _F_skip_array = jit.Imm(int64(native.S_skip_array))
+ _F_skip_object = jit.Imm(int64(native.S_skip_object))
+)
+
+var (
+ _F_FieldMap_GetCaseInsensitive obj.Addr
+)
+
+const (
+ _MODE_AVX2 = 1 << 2
+)
+
+const (
+ _Fe_ID = int64(unsafe.Offsetof(caching.FieldEntry{}.ID))
+ _Fe_Name = int64(unsafe.Offsetof(caching.FieldEntry{}.Name))
+ _Fe_Hash = int64(unsafe.Offsetof(caching.FieldEntry{}.Hash))
+)
+
+const (
+ _Vk_Ptr = int64(reflect.Ptr)
+ _Gt_KindFlags = int64(unsafe.Offsetof(rt.GoType{}.KindFlags))
+)
+
+func init() {
+ _F_FieldMap_GetCaseInsensitive = jit.Func((*caching.FieldMap).GetCaseInsensitive)
+}
+
+func (self *_Assembler) _asm_OP_any(_ *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_VP, 8), _CX) // MOVQ 8(VP), CX
+ self.Emit("TESTQ" , _CX, _CX) // TESTQ CX, CX
+ self.Sjmp("JZ" , "_decode_{n}") // JZ _decode_{n}
+ self.Emit("CMPQ" , _CX, _VP) // CMPQ CX, VP
+ self.Sjmp("JE" , "_decode_{n}") // JE _decode_{n}
+ self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX
+ self.Emit("MOVBLZX", jit.Ptr(_AX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(AX), DX
+ self.Emit("ANDL" , jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX
+ self.Emit("CMPL" , _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr}
+ self.Sjmp("JNE" , "_decode_{n}") // JNE _decode_{n}
+ self.Emit("LEAQ" , jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI
+ self.decode_dynamic(_AX, _DI) // DECODE AX, DI
+ self.Sjmp("JMP" , "_decode_end_{n}") // JMP _decode_end_{n}
+ self.Link("_decode_{n}") // _decode_{n}:
+ self.Emit("MOVQ" , _ARG_fv, _DF) // MOVQ fv, DF
+ self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 0)) // MOVQ _ST, (SP)
+ self.call(_F_decodeValue) // CALL decodeValue
+ self.Emit("TESTQ" , _EP, _EP) // TESTQ EP, EP
+ self.Sjmp("JNZ" , _LB_parsing_error) // JNZ _parsing_error
+ self.Link("_decode_end_{n}") // _decode_end_{n}:
+}
+
+func (self *_Assembler) _asm_OP_dyn(p *_Instr) {
+ self.Emit("MOVQ" , jit.Type(p.vt()), _ET) // MOVQ ${p.vt()}, ET
+ self.Emit("CMPQ" , jit.Ptr(_VP, 8), jit.Imm(0)) // CMPQ 8(VP), $0
+ self.Sjmp("JE" , _LB_type_error) // JE _type_error
+ self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX
+ self.Emit("MOVQ" , jit.Ptr(_AX, 8), _AX) // MOVQ 8(AX), AX
+ self.Emit("MOVBLZX", jit.Ptr(_AX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(AX), DX
+ self.Emit("ANDL" , jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX
+ self.Emit("CMPL" , _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr}
+ self.Sjmp("JNE" , _LB_type_error) // JNE _type_error
+ self.Emit("LEAQ" , jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI
+ self.decode_dynamic(_AX, _DI) // DECODE AX, DI
+ self.Link("_decode_end_{n}") // _decode_end_{n}:
+}
+
+func (self *_Assembler) _asm_OP_str(_ *_Instr) {
+ self.parse_string() // PARSE STRING
+ self.unquote_once(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false, true) // UNQUOTE once, (VP), 8(VP)
+}
+
+func (self *_Assembler) _asm_OP_bin(_ *_Instr) {
+ self.parse_string() // PARSE STRING
+ self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1
+ self.Emit("MOVQ" , _DI, jit.Ptr(_VP, 0)) // MOVQ DI, (VP)
+ self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP)
+ self.Emit("SHRQ" , jit.Imm(2), _SI) // SHRQ $2, SI
+ self.Emit("LEAQ" , jit.Sib(_SI, _SI, 2, 0), _SI) // LEAQ (SI)(SI*2), SI
+ self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP)
+ self.malloc(_SI, _SI) // MALLOC SI, SI
+
+ // TODO: due to base64x's bug, only use AVX mode now
+ self.Emit("MOVL", jit.Imm(_MODE_JSON), _CX) // MOVL $_MODE_JSON, CX
+
+ /* call the decoder */
+ self.Emit("XORL" , _DX, _DX) // XORL DX, DX
+ self.Emit("MOVQ" , _VP, _DI) // MOVQ VP, DI
+
+ self.Emit("MOVQ" , jit.Ptr(_VP, 0), _R9) // MOVQ SI, (VP)
+ self.WriteRecNotAX(4, _SI, jit.Ptr(_VP, 0), true, false) // XCHGQ SI, (VP)
+ self.Emit("MOVQ" , _R9, _SI)
+
+ self.Emit("XCHGQ", _DX, jit.Ptr(_VP, 8)) // XCHGQ DX, 8(VP)
+ self.call(_F_b64decode) // CALL b64decode
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_base64_error) // JS _base64_error
+ self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP)
+}
+
+func (self *_Assembler) _asm_OP_bool(_ *_Instr) {
+ self.Emit("LEAQ", jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX
+ self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL
+ self.Sjmp("JA" , _LB_eof_error) // JA _eof_error
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('f')) // CMPB (IP)(IC), $'f'
+ self.Sjmp("JE" , "_false_{n}") // JE _false_{n}
+ self.Emit("MOVL", jit.Imm(_IM_true), _CX) // MOVL $"true", CX
+ self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC)
+ self.Sjmp("JE" , "_bool_true_{n}")
+
+ // try to skip the value
+ self.Emit("MOVQ", _IC, _VAR_ic)
+ self.Emit("MOVQ", _T_bool, _ET)
+ self.Emit("MOVQ", _ET, _VAR_et)
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref("_end_{n}", 4)
+ self.Emit("MOVQ", _R9, _VAR_pc)
+ self.Sjmp("JMP" , _LB_skip_one)
+
+ self.Link("_bool_true_{n}")
+ self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC
+ self.Emit("MOVB", jit.Imm(1), jit.Ptr(_VP, 0)) // MOVB $1, (VP)
+ self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n}
+ self.Link("_false_{n}") // _false_{n}:
+ self.Emit("ADDQ", jit.Imm(1), _AX) // ADDQ $1, AX
+ self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL
+ self.Sjmp("JA" , _LB_eof_error) // JA _eof_error
+ self.Emit("MOVL", jit.Imm(_IM_alse), _CX) // MOVL $"alse", CX
+ self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC)
+ self.Sjmp("JNE" , _LB_im_error) // JNE _im_error
+ self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC
+ self.Emit("XORL", _AX, _AX) // XORL AX, AX
+ self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP)
+ self.Link("_end_{n}") // _end_{n}:
+}
+
+func (self *_Assembler) _asm_OP_num(_ *_Instr) {
+ self.Emit("MOVQ", jit.Imm(0), _VAR_fl)
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"'))
+ self.Emit("MOVQ", _IC, _BP)
+ self.Sjmp("JNE", "_skip_number_{n}")
+ self.Emit("MOVQ", jit.Imm(1), _VAR_fl)
+ self.Emit("ADDQ", jit.Imm(1), _IC)
+ self.Link("_skip_number_{n}")
+
+ /* call skip_number */
+ self.call_sf(_F_skip_number) // CALL_SF skip_one
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JNS" , "_num_next_{n}")
+
+ /* call skip one */
+ self.Emit("MOVQ", _BP, _VAR_ic)
+ self.Emit("MOVQ", _T_number, _ET)
+ self.Emit("MOVQ", _ET, _VAR_et)
+ self.Byte(0x4c, 0x8d, 0x0d)
+ self.Sref("_num_end_{n}", 4)
+ self.Emit("MOVQ", _R9, _VAR_pc)
+ self.Sjmp("JMP" , _LB_skip_one)
+
+ /* assgin string */
+ self.Link("_num_next_{n}")
+ self.slice_from_r(_AX, 0)
+ self.Emit("BTQ", jit.Imm(_F_copy_string), _ARG_fv)
+ self.Sjmp("JNC", "_num_write_{n}")
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref("_num_write_{n}", 4)
+ self.Sjmp("JMP", "_copy_string")
+ self.Link("_num_write_{n}")
+ self.Emit("MOVQ", _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP)
+ self.WriteRecNotAX(13, _DI, jit.Ptr(_VP, 0), false, false)
+
+ /* check if quoted */
+ self.Emit("CMPQ", _VAR_fl, jit.Imm(1))
+ self.Sjmp("JNE", "_num_end_{n}")
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"'))
+ self.Sjmp("JNE", _LB_char_0_error)
+ self.Emit("ADDQ", jit.Imm(1), _IC)
+ self.Link("_num_end_{n}")
+}
+
+func (self *_Assembler) _asm_OP_i8(ins *_Instr) {
+ var pin = "_i8_end_{n}"
+ self.parse_signed(int8Type, pin, -1) // PARSE int8
+ self.range_signed(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8
+ self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_i16(ins *_Instr) {
+ var pin = "_i16_end_{n}"
+ self.parse_signed(int16Type, pin, -1) // PARSE int16
+ self.range_signed(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16
+ self.Emit("MOVW", _AX, jit.Ptr(_VP, 0)) // MOVW AX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_i32(ins *_Instr) {
+ var pin = "_i32_end_{n}"
+ self.parse_signed(int32Type, pin, -1) // PARSE int32
+ self.range_signed(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32
+ self.Emit("MOVL", _AX, jit.Ptr(_VP, 0)) // MOVL AX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_i64(ins *_Instr) {
+ var pin = "_i64_end_{n}"
+ self.parse_signed(int64Type, pin, -1) // PARSE int64
+ self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_u8(ins *_Instr) {
+ var pin = "_u8_end_{n}"
+ self.parse_unsigned(uint8Type, pin, -1) // PARSE uint8
+ self.range_unsigned(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8
+ self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_u16(ins *_Instr) {
+ var pin = "_u16_end_{n}"
+ self.parse_unsigned(uint16Type, pin, -1) // PARSE uint16
+ self.range_unsigned(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16
+ self.Emit("MOVW", _AX, jit.Ptr(_VP, 0)) // MOVW AX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_u32(ins *_Instr) {
+ var pin = "_u32_end_{n}"
+ self.parse_unsigned(uint32Type, pin, -1) // PARSE uint32
+ self.range_unsigned(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32
+ self.Emit("MOVL", _AX, jit.Ptr(_VP, 0)) // MOVL AX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_u64(ins *_Instr) {
+ var pin = "_u64_end_{n}"
+ self.parse_unsigned(uint64Type, pin, -1) // PARSE uint64
+ self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_f32(ins *_Instr) {
+ var pin = "_f32_end_{n}"
+ self.parse_number(float32Type, pin, -1) // PARSE NUMBER
+ self.range_single() // RANGE float32
+ self.Emit("MOVSS", _X0, jit.Ptr(_VP, 0)) // MOVSS X0, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_f64(ins *_Instr) {
+ var pin = "_f64_end_{n}"
+ self.parse_number(float64Type, pin, -1) // PARSE NUMBER
+ self.Emit("MOVSD", _VAR_st_Dv, _X0) // MOVSD st.Dv, X0
+ self.Emit("MOVSD", _X0, jit.Ptr(_VP, 0)) // MOVSD X0, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_unquote(ins *_Instr) {
+ self.check_eof(2)
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('\\')) // CMPB (IP)(IC), $'\\'
+ self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 1), jit.Imm('"')) // CMPB 1(IP)(IC), $'"'
+ self.Sjmp("JNE" , _LB_char_1_error) // JNE _char_1_error
+ self.Emit("ADDQ", jit.Imm(2), _IC) // ADDQ $2, IC
+ self.parse_string() // PARSE STRING
+ self.unquote_twice(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false) // UNQUOTE twice, (VP), 8(VP)
+}
+
+func (self *_Assembler) _asm_OP_nil_1(_ *_Instr) {
+ self.Emit("XORL", _AX, _AX) // XORL AX, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP)
+}
+
+func (self *_Assembler) _asm_OP_nil_2(_ *_Instr) {
+ self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP)
+}
+
+func (self *_Assembler) _asm_OP_nil_3(_ *_Instr) {
+ self.Emit("XORL" , _AX, _AX) // XORL AX, AX
+ self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP)
+ self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 16)) // MOVOU X0, 16(VP)
+}
+
+func (self *_Assembler) _asm_OP_deref(p *_Instr) {
+ self.vfollow(p.vt())
+}
+
+func (self *_Assembler) _asm_OP_index(p *_Instr) {
+ self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ ${p.vi()}, AX
+ self.Emit("ADDQ", _AX, _VP) // ADDQ _AX, _VP
+}
+
+func (self *_Assembler) _asm_OP_is_null(p *_Instr) {
+ self.Emit("LEAQ" , jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX
+ self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL
+ self.Sjmp("JA" , "_not_null_{n}") // JA _not_null_{n}
+ self.Emit("CMPL" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null"
+ self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC
+ self.Xjmp("JE" , p.vi()) // JE {p.vi()}
+ self.Link("_not_null_{n}") // _not_null_{n}:
+}
+
+func (self *_Assembler) _asm_OP_is_null_quote(p *_Instr) {
+ self.Emit("LEAQ" , jit.Ptr(_IC, 5), _AX) // LEAQ 4(IC), AX
+ self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL
+ self.Sjmp("JA" , "_not_null_quote_{n}") // JA _not_null_quote_{n}
+ self.Emit("CMPL" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null"
+ self.Sjmp("JNE" , "_not_null_quote_{n}") // JNE _not_null_quote_{n}
+ self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, 4), jit.Imm('"')) // CMPB 4(IP)(IC), $'"'
+ self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC
+ self.Xjmp("JE" , p.vi()) // JE {p.vi()}
+ self.Link("_not_null_quote_{n}") // _not_null_quote_{n}:
+}
+
+func (self *_Assembler) _asm_OP_map_init(_ *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n}
+ self.call_go(_F_makemap_small) // CALL_GO makemap_small
+ self.Emit("MOVQ" , jit.Ptr(_SP, 0), _AX) // MOVQ (SP), AX
+ self.WritePtrAX(6, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP)
+ self.Link("_end_{n}") // _end_{n}:
+ self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP
+}
+
+func (self *_Assembler) _asm_OP_map_key_i8(p *_Instr) {
+ self.parse_signed(int8Type, "", p.vi()) // PARSE int8
+ self.range_signed(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8
+ self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int8, mapassign, st.Iv
+}
+
+func (self *_Assembler) _asm_OP_map_key_i16(p *_Instr) {
+ self.parse_signed(int16Type, "", p.vi()) // PARSE int16
+ self.range_signed(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16
+ self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int16, mapassign, st.Iv
+}
+
+func (self *_Assembler) _asm_OP_map_key_i32(p *_Instr) {
+ self.parse_signed(int32Type, "", p.vi()) // PARSE int32
+ self.range_signed(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32
+ if vt := p.vt(); !mapfast(vt) {
+ self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int32, mapassign, st.Iv
+ } else {
+ self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN int32, mapassign_fast32
+ }
+}
+
+func (self *_Assembler) _asm_OP_map_key_i64(p *_Instr) {
+ self.parse_signed(int64Type, "", p.vi()) // PARSE int64
+ if vt := p.vt(); !mapfast(vt) {
+ self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int64, mapassign, st.Iv
+ } else {
+ self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX
+ self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN int64, mapassign_fast64
+ }
+}
+
+func (self *_Assembler) _asm_OP_map_key_u8(p *_Instr) {
+ self.parse_unsigned(uint8Type, "", p.vi()) // PARSE uint8
+ self.range_unsigned(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8
+ self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint8, vt.Iv
+}
+
+func (self *_Assembler) _asm_OP_map_key_u16(p *_Instr) {
+ self.parse_unsigned(uint16Type, "", p.vi()) // PARSE uint16
+ self.range_unsigned(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16
+ self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint16, vt.Iv
+}
+
+func (self *_Assembler) _asm_OP_map_key_u32(p *_Instr) {
+ self.parse_unsigned(uint32Type, "", p.vi()) // PARSE uint32
+ self.range_unsigned(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32
+ if vt := p.vt(); !mapfast(vt) {
+ self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint32, vt.Iv
+ } else {
+ self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN uint32, mapassign_fast32
+ }
+}
+
+func (self *_Assembler) _asm_OP_map_key_u64(p *_Instr) {
+ self.parse_unsigned(uint64Type, "", p.vi()) // PARSE uint64
+ if vt := p.vt(); !mapfast(vt) {
+ self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint64, vt.Iv
+ } else {
+ self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX
+ self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN uint64, mapassign_fast64
+ }
+}
+
+func (self *_Assembler) _asm_OP_map_key_f32(p *_Instr) {
+ self.parse_number(float32Type, "", p.vi()) // PARSE NUMBER
+ self.range_single() // RANGE float32
+ self.Emit("MOVSS", _X0, _VAR_st_Dv) // MOVSS X0, st.Dv
+ self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv
+}
+
+func (self *_Assembler) _asm_OP_map_key_f64(p *_Instr) {
+ self.parse_number(float64Type, "", p.vi()) // PARSE NUMBER
+ self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv
+}
+
+func (self *_Assembler) _asm_OP_map_key_str(p *_Instr) {
+ self.parse_string() // PARSE STRING
+ self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, true) // UNQUOTE once, sv.p, sv.n
+ if vt := p.vt(); !mapfast(vt) {
+ self.valloc(vt.Key(), _DI)
+ self.Emit("MOVOU", _VAR_sv, _X0)
+ self.Emit("MOVOU", _X0, jit.Ptr(_DI, 0))
+ self.mapassign_std(vt, jit.Ptr(_DI, 0))
+ } else {
+ self.Emit("MOVQ", _VAR_sv_p, _DI) // MOVQ sv.p, DI
+ self.Emit("MOVQ", _VAR_sv_n, _SI) // MOVQ sv.n, SI
+ self.mapassign_str_fast(vt, _DI, _SI) // MAPASSIGN string, DI, SI
+ }
+}
+
+func (self *_Assembler) _asm_OP_map_key_utext(p *_Instr) {
+ self.parse_string() // PARSE STRING
+ self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, true) // UNQUOTE once, sv.p, sv.n
+ self.mapassign_utext(p.vt(), false) // MAPASSIGN utext, ${p.vt()}, false
+}
+
+func (self *_Assembler) _asm_OP_map_key_utext_p(p *_Instr) {
+ self.parse_string() // PARSE STRING
+ self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, false) // UNQUOTE once, sv.p, sv.n
+ self.mapassign_utext(p.vt(), true) // MAPASSIGN utext, ${p.vt()}, true
+}
+
+func (self *_Assembler) _asm_OP_array_skip(_ *_Instr) {
+ self.call_sf(_F_skip_array) // CALL_SF skip_array
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+}
+
+func (self *_Assembler) _asm_OP_array_clear(p *_Instr) {
+ self.mem_clear_rem(p.i64(), true)
+}
+
+func (self *_Assembler) _asm_OP_array_clear_p(p *_Instr) {
+ self.mem_clear_rem(p.i64(), false)
+}
+
+func (self *_Assembler) _asm_OP_slice_init(p *_Instr) {
+ self.Emit("XORL" , _AX, _AX) // XORL AX, AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP)
+ self.Emit("MOVQ" , jit.Ptr(_VP, 16), _AX) // MOVQ 16(VP), AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JNZ" , "_done_{n}") // JNZ _done_{n}
+ self.Emit("MOVQ" , jit.Imm(_MinSlice), _CX) // MOVQ ${_MinSlice}, CX
+ self.Emit("MOVQ" , _CX, jit.Ptr(_VP, 16)) // MOVQ CX, 16(VP)
+ self.Emit("MOVQ" , jit.Type(p.vt()), _DX) // MOVQ ${p.vt()}, DX
+ self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP)
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP)
+ self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 16)) // MOVQ CX, 16(SP)
+ self.call_go(_F_makeslice) // CALL_GO makeslice
+ self.Emit("MOVQ" , jit.Ptr(_SP, 24), _AX) // MOVQ 24(SP), AX
+ self.WritePtrAX(7, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP)
+ self.Link("_done_{n}") // _done_{n}:
+ self.Emit("XORL" , _AX, _AX) // XORL AX, AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP)
+}
+
+func (self *_Assembler) _asm_OP_slice_append(p *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_VP, 8), _AX) // MOVQ 8(VP), AX
+ self.Emit("CMPQ" , _AX, jit.Ptr(_VP, 16)) // CMPQ AX, 16(VP)
+ self.Sjmp("JB" , "_index_{n}") // JB _index_{n}
+ self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVOU", jit.Ptr(_VP, 0), _X0) // MOVOU (VP), X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP)
+ self.Emit("MOVQ" , jit.Ptr(_VP, 16), _AX) // MOVQ 16(VP), AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 24)) // MOVQ AX, 24(SP)
+ self.Emit("SHLQ" , jit.Imm(1), _AX) // SHLQ $1, AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 32)) // MOVQ AX, 32(SP)
+ self.call_go(_F_growslice) // CALL_GO growslice
+ self.Emit("MOVQ" , jit.Ptr(_SP, 40), _DI) // MOVQ 40(SP), DI
+ self.Emit("MOVQ" , jit.Ptr(_SP, 48), _AX) // MOVQ 48(SP), AX
+ self.Emit("MOVQ" , jit.Ptr(_SP, 56), _SI) // MOVQ 56(SP), SI
+ self.WriteRecNotAX(8, _DI, jit.Ptr(_VP, 0), true, true)// MOVQ DI, (VP)
+ self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP)
+ self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP)
+ self.Link("_index_{n}") // _index_{n}:
+ self.Emit("ADDQ" , jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP)
+ self.Emit("MOVQ" , jit.Ptr(_VP, 0), _VP) // MOVQ (VP), VP
+ self.Emit("MOVQ" , jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX
+ self.From("MULQ" , _CX) // MULQ CX
+ self.Emit("ADDQ" , _AX, _VP) // ADDQ AX, VP
+}
+
+func (self *_Assembler) _asm_OP_object_skip(_ *_Instr) {
+ self.call_sf(_F_skip_object) // CALL_SF skip_object
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+}
+
+func (self *_Assembler) _asm_OP_object_next(_ *_Instr) {
+ self.call_sf(_F_skip_one) // CALL_SF skip_one
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+}
+
+func (self *_Assembler) _asm_OP_struct_field(p *_Instr) {
+ assert_eq(caching.FieldEntrySize, 32, "invalid field entry size")
+ self.Emit("MOVQ" , jit.Imm(-1), _AX) // MOVQ $-1, AX
+ self.Emit("MOVQ" , _AX, _VAR_sr) // MOVQ AX, sr
+ self.parse_string() // PARSE STRING
+ self.unquote_once(_VAR_sv_p, _VAR_sv_n, true, false) // UNQUOTE once, sv.p, sv.n
+ self.Emit("LEAQ" , _VAR_sv, _AX) // LEAQ sv, AX
+ self.Emit("XORL" , _CX, _CX) // XORL CX, CX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP)
+ self.call_go(_F_strhash) // CALL_GO strhash
+ self.Emit("MOVQ" , jit.Ptr(_SP, 16), _AX) // MOVQ 16(SP), AX
+ self.Emit("MOVQ" , _AX, _R9) // MOVQ AX, R9
+ self.Emit("MOVQ" , jit.Imm(freezeFields(p.vf())), _CX) // MOVQ ${p.vf()}, CX
+ self.Emit("MOVQ" , jit.Ptr(_CX, caching.FieldMap_b), _SI) // MOVQ FieldMap.b(CX), SI
+ self.Emit("MOVQ" , jit.Ptr(_CX, caching.FieldMap_N), _CX) // MOVQ FieldMap.N(CX), CX
+ self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX
+ self.Sjmp("JZ" , "_try_lowercase_{n}") // JZ _try_lowercase_{n}
+ self.Link("_loop_{n}") // _loop_{n}:
+ self.Emit("XORL" , _DX, _DX) // XORL DX, DX
+ self.From("DIVQ" , _CX) // DIVQ CX
+ self.Emit("LEAQ" , jit.Ptr(_DX, 1), _AX) // LEAQ 1(DX), AX
+ self.Emit("SHLQ" , jit.Imm(5), _DX) // SHLQ $5, DX
+ self.Emit("LEAQ" , jit.Sib(_SI, _DX, 1, 0), _DI) // LEAQ (SI)(DX), DI
+ self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Hash), _R8) // MOVQ FieldEntry.Hash(DI), R8
+ self.Emit("TESTQ", _R8, _R8) // TESTQ R8, R8
+ self.Sjmp("JZ" , "_try_lowercase_{n}") // JZ _try_lowercase_{n}
+ self.Emit("CMPQ" , _R8, _R9) // CMPQ R8, R9
+ self.Sjmp("JNE" , "_loop_{n}") // JNE _loop_{n}
+ self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Name + 8), _DX) // MOVQ FieldEntry.Name+8(DI), DX
+ self.Emit("CMPQ" , _DX, _VAR_sv_n) // CMPQ DX, sv.n
+ self.Sjmp("JNE" , "_loop_{n}") // JNE _loop_{n}
+ self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_ID), _R8) // MOVQ FieldEntry.ID(DI), R8
+ self.Emit("MOVQ" , _AX, _VAR_ss_AX) // MOVQ AX, ss.AX
+ self.Emit("MOVQ" , _CX, _VAR_ss_CX) // MOVQ CX, ss.CX
+ self.Emit("MOVQ" , _SI, _VAR_ss_SI) // MOVQ SI, ss.SI
+ self.Emit("MOVQ" , _R8, _VAR_ss_R8) // MOVQ R8, ss.R8
+ self.Emit("MOVQ" , _R9, _VAR_ss_R9) // MOVQ R9, ss.R9
+ self.Emit("MOVQ" , _VAR_sv_p, _AX) // MOVQ _VAR_sv_p, AX
+ self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Name), _CX) // MOVQ FieldEntry.Name(DI), CX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVQ" , _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP)
+ self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 16)) // MOVQ DX, 16(SP)
+ self.call_go(_F_memequal) // CALL_GO memequal
+ self.Emit("MOVQ" , _VAR_ss_AX, _AX) // MOVQ ss.AX, AX
+ self.Emit("MOVQ" , _VAR_ss_CX, _CX) // MOVQ ss.CX, CX
+ self.Emit("MOVQ" , _VAR_ss_SI, _SI) // MOVQ ss.SI, SI
+ self.Emit("MOVQ" , _VAR_ss_R9, _R9) // MOVQ ss.R9, R9
+ self.Emit("MOVB" , jit.Ptr(_SP, 24), _DX) // MOVB 24(SP), DX
+ self.Emit("TESTB", _DX, _DX) // TESTB DX, DX
+ self.Sjmp("JZ" , "_loop_{n}") // JZ _loop_{n}
+ self.Emit("MOVQ" , _VAR_ss_R8, _R8) // MOVQ ss.R8, R8
+ self.Emit("MOVQ" , _R8, _VAR_sr) // MOVQ R8, sr
+ self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n}
+ self.Link("_try_lowercase_{n}") // _try_lowercase_{n}:
+ self.Emit("MOVQ" , jit.Imm(referenceFields(p.vf())), _AX) // MOVQ ${p.vf()}, AX
+ self.Emit("MOVOU", _VAR_sv, _X0) // MOVOU sv, X0
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP)
+ self.call_go(_F_FieldMap_GetCaseInsensitive) // CALL_GO FieldMap::GetCaseInsensitive
+ self.Emit("MOVQ" , jit.Ptr(_SP, 24), _AX) // MOVQ 24(SP), AX
+ self.Emit("MOVQ" , _AX, _VAR_sr) // MOVQ AX, _VAR_sr
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JNS" , "_end_{n}") // JNS _end_{n}
+ self.Emit("BTQ" , jit.Imm(_F_disable_unknown), _ARG_fv) // BTQ ${_F_disable_unknown}, fv
+ self.Sjmp("JC" , _LB_field_error) // JC _field_error
+ self.Link("_end_{n}") // _end_{n}:
+}
+
+func (self *_Assembler) _asm_OP_unmarshal(p *_Instr) {
+ self.unmarshal_json(p.vt(), true)
+}
+
+func (self *_Assembler) _asm_OP_unmarshal_p(p *_Instr) {
+ self.unmarshal_json(p.vt(), false)
+}
+
+func (self *_Assembler) _asm_OP_unmarshal_text(p *_Instr) {
+ self.unmarshal_text(p.vt(), true)
+}
+
+func (self *_Assembler) _asm_OP_unmarshal_text_p(p *_Instr) {
+ self.unmarshal_text(p.vt(), false)
+}
+
+func (self *_Assembler) _asm_OP_lspace(_ *_Instr) {
+ self.lspace("_{n}")
+}
+
+func (self *_Assembler) lspace(subfix string) {
+ var label = "_lspace" + subfix
+
+ self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL
+ self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error
+ self.Emit("MOVQ" , jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX
+ self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX
+ self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' '
+ self.Sjmp("JA" , label) // JA _nospace_{n}
+ self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX
+ self.Sjmp("JNC" , label) // JNC _nospace_{n}
+
+ /* test up to 4 characters */
+ for i := 0; i < 3; i++ {
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL
+ self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error
+ self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX
+ self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' '
+ self.Sjmp("JA" , label) // JA _nospace_{n}
+ self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX
+ self.Sjmp("JNC" , label) // JNC _nospace_{n}
+ }
+
+ /* handle over to the native function */
+ self.Emit("MOVQ" , _IP, _DI) // MOVQ IP, DI
+ self.Emit("MOVQ" , _IL, _SI) // MOVQ IL, SI
+ self.Emit("MOVQ" , _IC, _DX) // MOVQ IC, DX
+ self.call(_F_lspace) // CALL lspace
+ self.Emit("TESTQ" , _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parsing_error_v
+ self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL
+ self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error
+ self.Emit("MOVQ" , _AX, _IC) // MOVQ AX, IC
+ self.Link(label) // _nospace_{n}:
+}
+
+func (self *_Assembler) _asm_OP_match_char(p *_Instr) {
+ self.check_eof(1)
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()}
+ self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error
+ self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC
+}
+
+func (self *_Assembler) _asm_OP_check_char(p *_Instr) {
+ self.check_eof(1)
+ self.Emit("LEAQ" , jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX
+ self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()}
+ self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC
+ self.Xjmp("JE" , p.vi()) // JE {p.vi()}
+}
+
+func (self *_Assembler) _asm_OP_check_char_0(p *_Instr) {
+ self.check_eof(1)
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()}
+ self.Xjmp("JE" , p.vi()) // JE {p.vi()}
+}
+
+func (self *_Assembler) _asm_OP_add(p *_Instr) {
+ self.Emit("ADDQ", jit.Imm(int64(p.vi())), _IC) // ADDQ ${p.vi()}, IC
+}
+
+func (self *_Assembler) _asm_OP_load(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX
+ self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _VP) // MOVQ (ST)(AX), VP
+}
+
+func (self *_Assembler) _asm_OP_save(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX
+ self.Emit("CMPQ", _CX, jit.Imm(_MaxStackBytes)) // CMPQ CX, ${_MaxStackBytes}
+ self.Sjmp("JAE" , _LB_stack_error) // JA _stack_error
+ self.WriteRecNotAX(0 , _VP, jit.Sib(_ST, _CX, 1, 8), false, false) // MOVQ VP, 8(ST)(CX)
+ self.Emit("ADDQ", jit.Imm(8), _CX) // ADDQ $8, CX
+ self.Emit("MOVQ", _CX, jit.Ptr(_ST, 0)) // MOVQ CX, (ST)
+}
+
+func (self *_Assembler) _asm_OP_drop(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX
+ self.Emit("SUBQ", jit.Imm(8), _AX) // SUBQ $8, AX
+ self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP
+ self.Emit("MOVQ", _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST)
+ self.Emit("XORL", _ET, _ET) // XORL ET, ET
+ self.Emit("MOVQ", _ET, jit.Sib(_ST, _AX, 1, 8)) // MOVQ ET, 8(ST)(AX)
+}
+
+func (self *_Assembler) _asm_OP_drop_2(_ *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX
+ self.Emit("SUBQ" , jit.Imm(16), _AX) // SUBQ $16, AX
+ self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP
+ self.Emit("MOVQ" , _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST)
+ self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0
+ self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX)
+}
+
+func (self *_Assembler) _asm_OP_recurse(p *_Instr) {
+ self.Emit("MOVQ", jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX
+ self.decode_dynamic(_AX, _VP) // DECODE AX, VP
+}
+
+func (self *_Assembler) _asm_OP_goto(p *_Instr) {
+ self.Xjmp("JMP", p.vi())
+}
+
+func (self *_Assembler) _asm_OP_switch(p *_Instr) {
+ self.Emit("MOVQ", _VAR_sr, _AX) // MOVQ sr, AX
+ self.Emit("CMPQ", _AX, jit.Imm(p.i64())) // CMPQ AX, ${len(p.vs())}
+ self.Sjmp("JAE" , "_default_{n}") // JAE _default_{n}
+
+ /* jump table selector */
+ self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI
+ self.Sref("_switch_table_{n}", 4) // .... &_switch_table_{n}
+ self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX
+ self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX
+ self.Rjmp("JMP" , _AX) // JMP AX
+ self.Link("_switch_table_{n}") // _switch_table_{n}:
+
+ /* generate the jump table */
+ for i, v := range p.vs() {
+ self.Xref(v, int64(-i) * 4)
+ }
+
+ /* default case */
+ self.Link("_default_{n}")
+ self.NOP()
+}
+
+func (self *_Assembler) print_gc(i int, p1 *_Instr, p2 *_Instr) {
+ self.Emit("MOVQ", jit.Imm(int64(p2.op())), jit.Ptr(_SP, 16))// MOVQ $(p2.op()), 16(SP)
+ self.Emit("MOVQ", jit.Imm(int64(p1.op())), jit.Ptr(_SP, 8)) // MOVQ $(p1.op()), 8(SP)
+ self.Emit("MOVQ", jit.Imm(int64(i)), jit.Ptr(_SP, 0)) // MOVQ $(i), (SP)
+ self.call_go(_F_println)
+}
+
+var _runtime_writeBarrier uintptr = rt.GcwbAddr()
+
+//go:linkname gcWriteBarrierAX runtime.gcWriteBarrier
+func gcWriteBarrierAX()
+
+var (
+ _V_writeBarrier = jit.Imm(int64(_runtime_writeBarrier))
+
+ _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX)
+)
+
+func (self *_Assembler) WritePtrAX(i int, rec obj.Addr, saveDI bool) {
+ self.Emit("MOVQ", _V_writeBarrier, _R10)
+ self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0))
+ self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ if saveDI {
+ self.save(_DI)
+ }
+ self.Emit("LEAQ", rec, _DI)
+ self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX
+ self.Rjmp("CALL", _R10)
+ if saveDI {
+ self.load(_DI)
+ }
+ self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Emit("MOVQ", _AX, rec)
+ self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+}
+
+func (self *_Assembler) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool, saveAX bool) {
+ if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX {
+ panic("rec contains AX!")
+ }
+ self.Emit("MOVQ", _V_writeBarrier, _R10)
+ self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0))
+ self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ if saveAX {
+ self.Emit("XCHGQ", ptr, _AX)
+ } else {
+ self.Emit("MOVQ", ptr, _AX)
+ }
+ if saveDI {
+ self.save(_DI)
+ }
+ self.Emit("LEAQ", rec, _DI)
+ self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX
+ self.Rjmp("CALL", _R10)
+ if saveDI {
+ self.load(_DI)
+ }
+ if saveAX {
+ self.Emit("XCHGQ", ptr, _AX)
+ }
+ self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Emit("MOVQ", ptr, rec)
+ self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go117.go b/vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go117.go
new file mode 100644
index 000000000..8a70cfff6
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/assembler_amd64_go117.go
@@ -0,0 +1,1922 @@
+//go:build go1.17 && !go1.21
+// +build go1.17,!go1.21
+
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `encoding/json`
+ `fmt`
+ `math`
+ `reflect`
+ `strconv`
+ `unsafe`
+
+ `github.com/bytedance/sonic/internal/caching`
+ `github.com/bytedance/sonic/internal/jit`
+ `github.com/bytedance/sonic/internal/native`
+ `github.com/bytedance/sonic/internal/native/types`
+ `github.com/bytedance/sonic/internal/rt`
+ `github.com/twitchyliquid64/golang-asm/obj`
+ `github.com/twitchyliquid64/golang-asm/obj/x86`
+)
+
+/** Register Allocations
+ *
+ * State Registers:
+ *
+ * %r13 : stack base
+ * %r10 : input pointer
+ * %r12 : input length
+ * %r11 : input cursor
+ * %r15 : value pointer
+ *
+ * Error Registers:
+ *
+ * %rax : error type register
+ * %rbx : error pointer register
+ */
+
+/** Function Prototype & Stack Map
+ *
+ * func (s string, ic int, vp unsafe.Pointer, sb *_Stack, fv uint64, sv string) (rc int, err error)
+ *
+ * s.buf : (FP)
+ * s.len : 8(FP)
+ * ic : 16(FP)
+ * vp : 24(FP)
+ * sb : 32(FP)
+ * fv : 40(FP)
+ * sv : 56(FP)
+ * err.vt : 72(FP)
+ * err.vp : 80(FP)
+ */
+
+const (
+ _FP_args = 72 // 72 bytes to pass and spill register arguements
+ _FP_fargs = 80 // 80 bytes for passing arguments to other Go functions
+ _FP_saves = 48 // 48 bytes for saving the registers before CALL instructions
+ _FP_locals = 144 // 144 bytes for local variables
+)
+
+const (
+ _FP_offs = _FP_fargs + _FP_saves + _FP_locals
+ _FP_size = _FP_offs + 8 // 8 bytes for the parent frame pointer
+ _FP_base = _FP_size + 8 // 8 bytes for the return address
+)
+
+const (
+ _IM_null = 0x6c6c756e // 'null'
+ _IM_true = 0x65757274 // 'true'
+ _IM_alse = 0x65736c61 // 'alse' ('false' without the 'f')
+)
+
+const (
+ _BM_space = (1 << ' ') | (1 << '\t') | (1 << '\r') | (1 << '\n')
+)
+
+const (
+ _MODE_JSON = 1 << 3 // base64 mode
+)
+
+const (
+ _LB_error = "_error"
+ _LB_im_error = "_im_error"
+ _LB_eof_error = "_eof_error"
+ _LB_type_error = "_type_error"
+ _LB_field_error = "_field_error"
+ _LB_range_error = "_range_error"
+ _LB_stack_error = "_stack_error"
+ _LB_base64_error = "_base64_error"
+ _LB_unquote_error = "_unquote_error"
+ _LB_parsing_error = "_parsing_error"
+ _LB_parsing_error_v = "_parsing_error_v"
+ _LB_mismatch_error = "_mismatch_error"
+)
+
+const (
+ _LB_char_0_error = "_char_0_error"
+ _LB_char_1_error = "_char_1_error"
+ _LB_char_2_error = "_char_2_error"
+ _LB_char_3_error = "_char_3_error"
+ _LB_char_4_error = "_char_4_error"
+ _LB_char_m2_error = "_char_m2_error"
+ _LB_char_m3_error = "_char_m3_error"
+)
+
+const (
+ _LB_skip_one = "_skip_one"
+ _LB_skip_key_value = "_skip_key_value"
+)
+
+var (
+ _AX = jit.Reg("AX")
+ _BX = jit.Reg("BX")
+ _CX = jit.Reg("CX")
+ _DX = jit.Reg("DX")
+ _DI = jit.Reg("DI")
+ _SI = jit.Reg("SI")
+ _BP = jit.Reg("BP")
+ _SP = jit.Reg("SP")
+ _R8 = jit.Reg("R8")
+ _R9 = jit.Reg("R9")
+ _X0 = jit.Reg("X0")
+ _X1 = jit.Reg("X1")
+)
+
+var (
+ _IP = jit.Reg("R10") // saved on BP when callc
+ _IC = jit.Reg("R11") // saved on BX when call_c
+ _IL = jit.Reg("R12")
+ _ST = jit.Reg("R13")
+ _VP = jit.Reg("R15")
+)
+
+var (
+ _DF = jit.Reg("AX") // reuse AX in generic decoder for flags
+ _ET = jit.Reg("AX")
+ _EP = jit.Reg("BX")
+)
+
+
+
+var (
+ _ARG_s = _ARG_sp
+ _ARG_sp = jit.Ptr(_SP, _FP_base + 0)
+ _ARG_sl = jit.Ptr(_SP, _FP_base + 8)
+ _ARG_ic = jit.Ptr(_SP, _FP_base + 16)
+ _ARG_vp = jit.Ptr(_SP, _FP_base + 24)
+ _ARG_sb = jit.Ptr(_SP, _FP_base + 32)
+ _ARG_fv = jit.Ptr(_SP, _FP_base + 40)
+)
+
+var (
+ _ARG_sv = _ARG_sv_p
+ _ARG_sv_p = jit.Ptr(_SP, _FP_base + 48)
+ _ARG_sv_n = jit.Ptr(_SP, _FP_base + 56)
+ _ARG_vk = jit.Ptr(_SP, _FP_base + 64)
+)
+
+var (
+ _VAR_st = _VAR_st_Vt
+ _VAR_sr = jit.Ptr(_SP, _FP_fargs + _FP_saves)
+)
+
+var (
+ _VAR_st_Vt = jit.Ptr(_SP, _FP_fargs + _FP_saves + 0)
+ _VAR_st_Dv = jit.Ptr(_SP, _FP_fargs + _FP_saves + 8)
+ _VAR_st_Iv = jit.Ptr(_SP, _FP_fargs + _FP_saves + 16)
+ _VAR_st_Ep = jit.Ptr(_SP, _FP_fargs + _FP_saves + 24)
+ _VAR_st_Db = jit.Ptr(_SP, _FP_fargs + _FP_saves + 32)
+ _VAR_st_Dc = jit.Ptr(_SP, _FP_fargs + _FP_saves + 40)
+)
+
+var (
+ _VAR_ss_AX = jit.Ptr(_SP, _FP_fargs + _FP_saves + 48)
+ _VAR_ss_CX = jit.Ptr(_SP, _FP_fargs + _FP_saves + 56)
+ _VAR_ss_SI = jit.Ptr(_SP, _FP_fargs + _FP_saves + 64)
+ _VAR_ss_R8 = jit.Ptr(_SP, _FP_fargs + _FP_saves + 72)
+ _VAR_ss_R9 = jit.Ptr(_SP, _FP_fargs + _FP_saves + 80)
+)
+
+var (
+ _VAR_bs_p = jit.Ptr(_SP, _FP_fargs + _FP_saves + 88)
+ _VAR_bs_n = jit.Ptr(_SP, _FP_fargs + _FP_saves + 96)
+ _VAR_bs_LR = jit.Ptr(_SP, _FP_fargs + _FP_saves + 104)
+)
+
+var _VAR_fl = jit.Ptr(_SP, _FP_fargs + _FP_saves + 112)
+
+var (
+ _VAR_et = jit.Ptr(_SP, _FP_fargs + _FP_saves + 120) // save dismatched type
+ _VAR_pc = jit.Ptr(_SP, _FP_fargs + _FP_saves + 128) // save skip return pc
+ _VAR_ic = jit.Ptr(_SP, _FP_fargs + _FP_saves + 136) // save dismatched position
+)
+
+type _Assembler struct {
+ jit.BaseAssembler
+ p _Program
+ name string
+}
+
+func newAssembler(p _Program) *_Assembler {
+ return new(_Assembler).Init(p)
+}
+
+/** Assembler Interface **/
+
+func (self *_Assembler) Load() _Decoder {
+ return ptodec(self.BaseAssembler.Load("decode_"+self.name, _FP_size, _FP_args, argPtrs, localPtrs))
+}
+
+func (self *_Assembler) Init(p _Program) *_Assembler {
+ self.p = p
+ self.BaseAssembler.Init(self.compile)
+ return self
+}
+
+func (self *_Assembler) compile() {
+ self.prologue()
+ self.instrs()
+ self.epilogue()
+ self.copy_string()
+ self.escape_string()
+ self.escape_string_twice()
+ self.skip_one()
+ self.skip_key_value()
+ self.type_error()
+ self.mismatch_error()
+ self.field_error()
+ self.range_error()
+ self.stack_error()
+ self.base64_error()
+ self.parsing_error()
+}
+
+/** Assembler Stages **/
+
+var _OpFuncTab = [256]func(*_Assembler, *_Instr) {
+ _OP_any : (*_Assembler)._asm_OP_any,
+ _OP_dyn : (*_Assembler)._asm_OP_dyn,
+ _OP_str : (*_Assembler)._asm_OP_str,
+ _OP_bin : (*_Assembler)._asm_OP_bin,
+ _OP_bool : (*_Assembler)._asm_OP_bool,
+ _OP_num : (*_Assembler)._asm_OP_num,
+ _OP_i8 : (*_Assembler)._asm_OP_i8,
+ _OP_i16 : (*_Assembler)._asm_OP_i16,
+ _OP_i32 : (*_Assembler)._asm_OP_i32,
+ _OP_i64 : (*_Assembler)._asm_OP_i64,
+ _OP_u8 : (*_Assembler)._asm_OP_u8,
+ _OP_u16 : (*_Assembler)._asm_OP_u16,
+ _OP_u32 : (*_Assembler)._asm_OP_u32,
+ _OP_u64 : (*_Assembler)._asm_OP_u64,
+ _OP_f32 : (*_Assembler)._asm_OP_f32,
+ _OP_f64 : (*_Assembler)._asm_OP_f64,
+ _OP_unquote : (*_Assembler)._asm_OP_unquote,
+ _OP_nil_1 : (*_Assembler)._asm_OP_nil_1,
+ _OP_nil_2 : (*_Assembler)._asm_OP_nil_2,
+ _OP_nil_3 : (*_Assembler)._asm_OP_nil_3,
+ _OP_deref : (*_Assembler)._asm_OP_deref,
+ _OP_index : (*_Assembler)._asm_OP_index,
+ _OP_is_null : (*_Assembler)._asm_OP_is_null,
+ _OP_is_null_quote : (*_Assembler)._asm_OP_is_null_quote,
+ _OP_map_init : (*_Assembler)._asm_OP_map_init,
+ _OP_map_key_i8 : (*_Assembler)._asm_OP_map_key_i8,
+ _OP_map_key_i16 : (*_Assembler)._asm_OP_map_key_i16,
+ _OP_map_key_i32 : (*_Assembler)._asm_OP_map_key_i32,
+ _OP_map_key_i64 : (*_Assembler)._asm_OP_map_key_i64,
+ _OP_map_key_u8 : (*_Assembler)._asm_OP_map_key_u8,
+ _OP_map_key_u16 : (*_Assembler)._asm_OP_map_key_u16,
+ _OP_map_key_u32 : (*_Assembler)._asm_OP_map_key_u32,
+ _OP_map_key_u64 : (*_Assembler)._asm_OP_map_key_u64,
+ _OP_map_key_f32 : (*_Assembler)._asm_OP_map_key_f32,
+ _OP_map_key_f64 : (*_Assembler)._asm_OP_map_key_f64,
+ _OP_map_key_str : (*_Assembler)._asm_OP_map_key_str,
+ _OP_map_key_utext : (*_Assembler)._asm_OP_map_key_utext,
+ _OP_map_key_utext_p : (*_Assembler)._asm_OP_map_key_utext_p,
+ _OP_array_skip : (*_Assembler)._asm_OP_array_skip,
+ _OP_array_clear : (*_Assembler)._asm_OP_array_clear,
+ _OP_array_clear_p : (*_Assembler)._asm_OP_array_clear_p,
+ _OP_slice_init : (*_Assembler)._asm_OP_slice_init,
+ _OP_slice_append : (*_Assembler)._asm_OP_slice_append,
+ _OP_object_skip : (*_Assembler)._asm_OP_object_skip,
+ _OP_object_next : (*_Assembler)._asm_OP_object_next,
+ _OP_struct_field : (*_Assembler)._asm_OP_struct_field,
+ _OP_unmarshal : (*_Assembler)._asm_OP_unmarshal,
+ _OP_unmarshal_p : (*_Assembler)._asm_OP_unmarshal_p,
+ _OP_unmarshal_text : (*_Assembler)._asm_OP_unmarshal_text,
+ _OP_unmarshal_text_p : (*_Assembler)._asm_OP_unmarshal_text_p,
+ _OP_lspace : (*_Assembler)._asm_OP_lspace,
+ _OP_match_char : (*_Assembler)._asm_OP_match_char,
+ _OP_check_char : (*_Assembler)._asm_OP_check_char,
+ _OP_load : (*_Assembler)._asm_OP_load,
+ _OP_save : (*_Assembler)._asm_OP_save,
+ _OP_drop : (*_Assembler)._asm_OP_drop,
+ _OP_drop_2 : (*_Assembler)._asm_OP_drop_2,
+ _OP_recurse : (*_Assembler)._asm_OP_recurse,
+ _OP_goto : (*_Assembler)._asm_OP_goto,
+ _OP_switch : (*_Assembler)._asm_OP_switch,
+ _OP_check_char_0 : (*_Assembler)._asm_OP_check_char_0,
+ _OP_dismatch_err : (*_Assembler)._asm_OP_dismatch_err,
+ _OP_go_skip : (*_Assembler)._asm_OP_go_skip,
+ _OP_add : (*_Assembler)._asm_OP_add,
+ _OP_debug : (*_Assembler)._asm_OP_debug,
+}
+
+func (self *_Assembler) _asm_OP_debug(_ *_Instr) {
+ self.Byte(0xcc)
+}
+
+func (self *_Assembler) instr(v *_Instr) {
+ if fn := _OpFuncTab[v.op()]; fn != nil {
+ fn(self, v)
+ } else {
+ panic(fmt.Sprintf("invalid opcode: %d", v.op()))
+ }
+}
+
+func (self *_Assembler) instrs() {
+ for i, v := range self.p {
+ self.Mark(i)
+ self.instr(&v)
+ self.debug_instr(i, &v)
+ }
+}
+
+func (self *_Assembler) epilogue() {
+ self.Mark(len(self.p))
+ self.Emit("XORL", _EP, _EP) // XORL EP, EP
+ self.Emit("MOVQ", _VAR_et, _ET) // MOVQ VAR_et, ET
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ", _LB_mismatch_error) // JNZ _LB_mismatch_error
+ self.Link(_LB_error) // _error:
+ self.Emit("MOVQ", _EP, _CX) // MOVQ BX, CX
+ self.Emit("MOVQ", _ET, _BX) // MOVQ AX, BX
+ self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX
+ self.Emit("MOVQ", jit.Imm(0), _ARG_sp) // MOVQ $0, sv.p<>+48(FP)
+ self.Emit("MOVQ", jit.Imm(0), _ARG_vp) // MOVQ $0, sv.p<>+48(FP)
+ self.Emit("MOVQ", jit.Imm(0), _ARG_sv_p) // MOVQ $0, sv.p<>+48(FP)
+ self.Emit("MOVQ", jit.Imm(0), _ARG_vk) // MOVQ $0, vk<>+64(FP)
+ self.Emit("MOVQ", jit.Ptr(_SP, _FP_offs), _BP) // MOVQ _FP_offs(SP), BP
+ self.Emit("ADDQ", jit.Imm(_FP_size), _SP) // ADDQ $_FP_size, SP
+ self.Emit("RET") // RET
+}
+
+func (self *_Assembler) prologue() {
+ self.Emit("SUBQ", jit.Imm(_FP_size), _SP) // SUBQ $_FP_size, SP
+ self.Emit("MOVQ", _BP, jit.Ptr(_SP, _FP_offs)) // MOVQ BP, _FP_offs(SP)
+ self.Emit("LEAQ", jit.Ptr(_SP, _FP_offs), _BP) // LEAQ _FP_offs(SP), BP
+ self.Emit("MOVQ", _AX, _ARG_sp) // MOVQ AX, s.p<>+0(FP)
+ self.Emit("MOVQ", _AX, _IP) // MOVQ AX, IP
+ self.Emit("MOVQ", _BX, _ARG_sl) // MOVQ BX, s.l<>+8(FP)
+ self.Emit("MOVQ", _BX, _IL) // MOVQ BX, IL
+ self.Emit("MOVQ", _CX, _ARG_ic) // MOVQ CX, ic<>+16(FP)
+ self.Emit("MOVQ", _CX, _IC) // MOVQ CX, IC
+ self.Emit("MOVQ", _DI, _ARG_vp) // MOVQ DI, vp<>+24(FP)
+ self.Emit("MOVQ", _DI, _VP) // MOVQ DI, VP
+ self.Emit("MOVQ", _SI, _ARG_sb) // MOVQ SI, sb<>+32(FP)
+ self.Emit("MOVQ", _SI, _ST) // MOVQ SI, ST
+ self.Emit("MOVQ", _R8, _ARG_fv) // MOVQ R8, fv<>+40(FP)
+ self.Emit("MOVQ", jit.Imm(0), _ARG_sv_p) // MOVQ $0, sv.p<>+48(FP)
+ self.Emit("MOVQ", jit.Imm(0), _ARG_sv_n) // MOVQ $0, sv.n<>+56(FP)
+ self.Emit("MOVQ", jit.Imm(0), _ARG_vk) // MOVQ $0, vk<>+64(FP)
+ self.Emit("MOVQ", jit.Imm(0), _VAR_et) // MOVQ $0, et<>+120(FP)
+ // initialize digital buffer first
+ self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_st_Dc) // MOVQ $_MaxDigitNums, ss.Dcap
+ self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX
+ self.Emit("MOVQ", _AX, _VAR_st_Db) // MOVQ AX, ss.Dbuf
+}
+
+/** Function Calling Helpers **/
+
+var (
+ _REG_go = []obj.Addr { _ST, _VP, _IP, _IL, _IC }
+ _REG_rt = []obj.Addr { _ST, _VP, _IP, _IL, _IC, _IL }
+)
+
+func (self *_Assembler) save(r ...obj.Addr) {
+ for i, v := range r {
+ if i > _FP_saves / 8 - 1 {
+ panic("too many registers to save")
+ } else {
+ self.Emit("MOVQ", v, jit.Ptr(_SP, _FP_fargs + int64(i) * 8))
+ }
+ }
+}
+
+func (self *_Assembler) load(r ...obj.Addr) {
+ for i, v := range r {
+ if i > _FP_saves / 8 - 1 {
+ panic("too many registers to load")
+ } else {
+ self.Emit("MOVQ", jit.Ptr(_SP, _FP_fargs + int64(i) * 8), v)
+ }
+ }
+}
+
+func (self *_Assembler) call(fn obj.Addr) {
+ self.Emit("MOVQ", fn, _R9) // MOVQ ${fn}, R11
+ self.Rjmp("CALL", _R9) // CALL R11
+}
+
+func (self *_Assembler) call_go(fn obj.Addr) {
+ self.save(_REG_go...) // SAVE $REG_go
+ self.call(fn)
+ self.load(_REG_go...) // LOAD $REG_go
+}
+
+func (self *_Assembler) callc(fn obj.Addr) {
+ self.Emit("XCHGQ", _IP, _BP)
+ self.call(fn)
+ self.Emit("XCHGQ", _IP, _BP)
+}
+
+func (self *_Assembler) call_c(fn obj.Addr) {
+ self.Emit("XCHGQ", _IC, _BX)
+ self.callc(fn)
+ self.Emit("XCHGQ", _IC, _BX)
+}
+
+func (self *_Assembler) call_sf(fn obj.Addr) {
+ self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI
+ self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP)
+ self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI
+ self.Emit("LEAQ", jit.Ptr(_ST, _FsmOffset), _DX) // LEAQ _FsmOffset(ST), DX
+ self.Emit("MOVQ", _ARG_fv, _CX)
+ self.callc(fn)
+ self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC
+}
+
+func (self *_Assembler) call_vf(fn obj.Addr) {
+ self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI
+ self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP)
+ self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI
+ self.Emit("LEAQ", _VAR_st, _DX) // LEAQ st, DX
+ self.callc(fn)
+ self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC
+}
+
+/** Assembler Error Handlers **/
+
+var (
+ _F_convT64 = jit.Func(convT64)
+ _F_error_wrap = jit.Func(error_wrap)
+ _F_error_type = jit.Func(error_type)
+ _F_error_field = jit.Func(error_field)
+ _F_error_value = jit.Func(error_value)
+ _F_error_mismatch = jit.Func(error_mismatch)
+)
+
+var (
+ _I_int8 , _T_int8 = rtype(reflect.TypeOf(int8(0)))
+ _I_int16 , _T_int16 = rtype(reflect.TypeOf(int16(0)))
+ _I_int32 , _T_int32 = rtype(reflect.TypeOf(int32(0)))
+ _I_uint8 , _T_uint8 = rtype(reflect.TypeOf(uint8(0)))
+ _I_uint16 , _T_uint16 = rtype(reflect.TypeOf(uint16(0)))
+ _I_uint32 , _T_uint32 = rtype(reflect.TypeOf(uint32(0)))
+ _I_float32 , _T_float32 = rtype(reflect.TypeOf(float32(0)))
+)
+
+var (
+ _T_error = rt.UnpackType(errorType)
+ _I_base64_CorruptInputError = jit.Itab(_T_error, base64CorruptInputError)
+)
+
+var (
+ _V_stackOverflow = jit.Imm(int64(uintptr(unsafe.Pointer(&stackOverflow))))
+ _I_json_UnsupportedValueError = jit.Itab(_T_error, reflect.TypeOf(new(json.UnsupportedValueError)))
+)
+
+func (self *_Assembler) type_error() {
+ self.Link(_LB_type_error) // _type_error:
+ self.call_go(_F_error_type) // CALL_GO error_type
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) mismatch_error() {
+ self.Link(_LB_mismatch_error) // _type_error:
+ self.Emit("MOVQ", _ARG_sp, _AX)
+ self.Emit("MOVQ", _ARG_sl, _BX)
+ self.Emit("MOVQ", _VAR_ic, _CX)
+ self.Emit("MOVQ", _VAR_et, _DI)
+ self.call_go(_F_error_mismatch) // CALL_GO error_type
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) field_error() {
+ self.Link(_LB_field_error) // _field_error:
+ self.Emit("MOVQ", _ARG_sv_p, _AX) // MOVQ sv.p, AX
+ self.Emit("MOVQ", _ARG_sv_n, _BX) // MOVQ sv.n, BX
+ self.call_go(_F_error_field) // CALL_GO error_field
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) range_error() {
+ self.Link(_LB_range_error) // _range_error:
+ self.Emit("MOVQ", _ET, _CX) // MOVQ ET, CX
+ self.slice_from(_VAR_st_Ep, 0) // SLICE st.Ep, $0
+ self.Emit("MOVQ", _DI, _AX) // MOVQ DI, AX
+ self.Emit("MOVQ", _EP, _DI) // MOVQ EP, DI
+ self.Emit("MOVQ", _SI, _BX) // MOVQ SI, BX
+ self.call_go(_F_error_value) // CALL_GO error_value
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) stack_error() {
+ self.Link(_LB_stack_error) // _stack_error:
+ self.Emit("MOVQ", _V_stackOverflow, _EP) // MOVQ ${_V_stackOverflow}, EP
+ self.Emit("MOVQ", _I_json_UnsupportedValueError, _ET) // MOVQ ${_I_json_UnsupportedValueError}, ET
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) base64_error() {
+ self.Link(_LB_base64_error)
+ self.Emit("NEGQ", _AX) // NEGQ AX
+ self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX
+ self.call_go(_F_convT64) // CALL_GO convT64
+ self.Emit("MOVQ", _AX, _EP) // MOVQ AX, EP
+ self.Emit("MOVQ", _I_base64_CorruptInputError, _ET) // MOVQ ${itab(base64.CorruptInputError)}, ET
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) parsing_error() {
+ self.Link(_LB_eof_error) // _eof_error:
+ self.Emit("MOVQ" , _IL, _IC) // MOVQ IL, IC
+ self.Emit("MOVL" , jit.Imm(int64(types.ERR_EOF)), _EP) // MOVL ${types.ERR_EOF}, EP
+ self.Sjmp("JMP" , _LB_parsing_error) // JMP _parsing_error
+ self.Link(_LB_unquote_error) // _unquote_error:
+ self.Emit("SUBQ" , _VAR_sr, _SI) // SUBQ sr, SI
+ self.Emit("SUBQ" , _SI, _IC) // SUBQ IL, IC
+ self.Link(_LB_parsing_error_v) // _parsing_error_v:
+ self.Emit("MOVQ" , _AX, _EP) // MOVQ AX, EP
+ self.Emit("NEGQ" , _EP) // NEGQ EP
+ self.Sjmp("JMP" , _LB_parsing_error) // JMP _parsing_error
+ self.Link(_LB_char_m3_error) // _char_m3_error:
+ self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC
+ self.Link(_LB_char_m2_error) // _char_m2_error:
+ self.Emit("SUBQ" , jit.Imm(2), _IC) // SUBQ $2, IC
+ self.Sjmp("JMP" , _LB_char_0_error) // JMP _char_0_error
+ self.Link(_LB_im_error) // _im_error:
+ self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPB CX, (IP)(IC)
+ self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error
+ self.Emit("SHRL" , jit.Imm(8), _CX) // SHRL $8, CX
+ self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 1)) // CMPB CX, 1(IP)(IC)
+ self.Sjmp("JNE" , _LB_char_1_error) // JNE _char_1_error
+ self.Emit("SHRL" , jit.Imm(8), _CX) // SHRL $8, CX
+ self.Emit("CMPB" , _CX, jit.Sib(_IP, _IC, 1, 2)) // CMPB CX, 2(IP)(IC)
+ self.Sjmp("JNE" , _LB_char_2_error) // JNE _char_2_error
+ self.Sjmp("JMP" , _LB_char_3_error) // JNE _char_3_error
+ self.Link(_LB_char_4_error) // _char_4_error:
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Link(_LB_char_3_error) // _char_3_error:
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Link(_LB_char_2_error) // _char_2_error:
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Link(_LB_char_1_error) // _char_1_error:
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Link(_LB_char_0_error) // _char_0_error:
+ self.Emit("MOVL" , jit.Imm(int64(types.ERR_INVALID_CHAR)), _EP) // MOVL ${types.ERR_INVALID_CHAR}, EP
+ self.Link(_LB_parsing_error) // _parsing_error:
+ self.Emit("MOVQ" , _EP, _DI) // MOVQ EP, DI
+ self.Emit("MOVQ", _ARG_sp, _AX) // MOVQ sp, AX
+ self.Emit("MOVQ", _ARG_sl, _BX) // MOVQ sl, BX
+ self.Emit("MOVQ" , _IC, _CX) // MOVQ IC, CX
+ self.call_go(_F_error_wrap) // CALL_GO error_wrap
+ self.Sjmp("JMP" , _LB_error) // JMP _error
+}
+
+func (self *_Assembler) _asm_OP_dismatch_err(p *_Instr) {
+ self.Emit("MOVQ", _IC, _VAR_ic)
+ self.Emit("MOVQ", jit.Type(p.vt()), _ET)
+ self.Emit("MOVQ", _ET, _VAR_et)
+}
+
+func (self *_Assembler) _asm_OP_go_skip(p *_Instr) {
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Xref(p.vi(), 4)
+ // self.Byte(0xcc)
+ self.Emit("MOVQ", _R9, _VAR_pc)
+ self.Sjmp("JMP" , _LB_skip_one) // JMP _skip_one
+}
+
+func (self *_Assembler) skip_one() {
+ self.Link(_LB_skip_one) // _skip:
+ self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC
+ self.call_sf(_F_skip_one) // CALL_SF skip_one
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+ self.Emit("MOVQ" , _VAR_pc, _R9) // MOVQ pc, R9
+ // self.Byte(0xcc)
+ self.Rjmp("JMP" , _R9) // JMP (R9)
+}
+
+func (self *_Assembler) skip_key_value() {
+ self.Link(_LB_skip_key_value) // _skip:
+ // skip the key
+ self.Emit("MOVQ", _VAR_ic, _IC) // MOVQ _VAR_ic, IC
+ self.call_sf(_F_skip_one) // CALL_SF skip_one
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+ // match char ':'
+ self.lspace("_global_1")
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(':'))
+ self.Sjmp("JNE" , _LB_parsing_error_v) // JNE _parse_error_v
+ self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC
+ self.lspace("_global_2")
+ // skip the value
+ self.call_sf(_F_skip_one) // CALL_SF skip_one
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+ // jump back to specified address
+ self.Emit("MOVQ" , _VAR_pc, _R9) // MOVQ pc, R9
+ self.Rjmp("JMP" , _R9) // JMP (R9)
+}
+
+
+/** Memory Management Routines **/
+
+var (
+ _T_byte = jit.Type(byteType)
+ _F_mallocgc = jit.Func(mallocgc)
+)
+
+func (self *_Assembler) malloc_AX(nb obj.Addr, ret obj.Addr) {
+ self.Emit("MOVQ", nb, _AX) // MOVQ ${nb}, AX
+ self.Emit("MOVQ", _T_byte, _BX) // MOVQ ${type(byte)}, BX
+ self.Emit("XORL", _CX, _CX) // XORL CX, CX
+ self.call_go(_F_mallocgc) // CALL_GO mallocgc
+ self.Emit("MOVQ", _AX, ret) // MOVQ AX, ${ret}
+}
+
+func (self *_Assembler) valloc(vt reflect.Type, ret obj.Addr) {
+ self.Emit("MOVQ", jit.Imm(int64(vt.Size())), _AX) // MOVQ ${vt.Size()}, AX
+ self.Emit("MOVQ", jit.Type(vt), _BX) // MOVQ ${vt}, BX
+ self.Emit("MOVB", jit.Imm(1), _CX) // MOVB $1, CX
+ self.call_go(_F_mallocgc) // CALL_GO mallocgc
+ self.Emit("MOVQ", _AX, ret) // MOVQ AX, ${ret}
+}
+
+func (self *_Assembler) valloc_AX(vt reflect.Type) {
+ self.Emit("MOVQ", jit.Imm(int64(vt.Size())), _AX) // MOVQ ${vt.Size()}, AX
+ self.Emit("MOVQ", jit.Type(vt), _BX) // MOVQ ${vt}, BX
+ self.Emit("MOVB", jit.Imm(1), _CX) // MOVB $1, CX
+ self.call_go(_F_mallocgc) // CALL_GO mallocgc
+}
+
+func (self *_Assembler) vfollow(vt reflect.Type) {
+ self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n}
+ self.valloc_AX(vt) // VALLOC ${vt}, AX
+ self.WritePtrAX(1, jit.Ptr(_VP, 0), true) // MOVQ AX, (VP)
+ self.Link("_end_{n}") // _end_{n}:
+ self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP
+}
+
+/** Value Parsing Routines **/
+
+var (
+ _F_vstring = jit.Imm(int64(native.S_vstring))
+ _F_vnumber = jit.Imm(int64(native.S_vnumber))
+ _F_vsigned = jit.Imm(int64(native.S_vsigned))
+ _F_vunsigned = jit.Imm(int64(native.S_vunsigned))
+)
+
+func (self *_Assembler) check_err(vt reflect.Type, pin string, pin2 int) {
+ self.Emit("MOVQ" , _VAR_st_Vt, _AX) // MOVQ st.Vt, AX
+ self.Emit("TESTQ", _AX, _AX) // CMPQ AX, ${native.V_STRING}
+ // try to skip the value
+ if vt != nil {
+ self.Sjmp("JNS" , "_check_err_{n}") // JNE _parsing_error_v
+ self.Emit("MOVQ", jit.Type(vt), _ET)
+ self.Emit("MOVQ", _ET, _VAR_et)
+ if pin2 != -1 {
+ self.Emit("SUBQ", jit.Imm(1), _BX)
+ self.Emit("MOVQ", _BX, _VAR_ic)
+ self.Byte(0x4c , 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Xref(pin2, 4)
+ self.Emit("MOVQ", _R9, _VAR_pc)
+ self.Sjmp("JMP" , _LB_skip_key_value)
+ } else {
+ self.Emit("MOVQ", _BX, _VAR_ic)
+ self.Byte(0x4c , 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref(pin, 4)
+ self.Emit("MOVQ", _R9, _VAR_pc)
+ self.Sjmp("JMP" , _LB_skip_one)
+ }
+ self.Link("_check_err_{n}")
+ } else {
+ self.Sjmp("JS" , _LB_parsing_error_v) // JNE _parsing_error_v
+ }
+}
+
+func (self *_Assembler) check_eof(d int64) {
+ if d == 1 {
+ self.Emit("CMPQ", _IC, _IL) // CMPQ IC, IL
+ self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error
+ } else {
+ self.Emit("LEAQ", jit.Ptr(_IC, d), _AX) // LEAQ ${d}(IC), AX
+ self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL
+ self.Sjmp("JA" , _LB_eof_error) // JA _eof_error
+ }
+}
+
+
+func (self *_Assembler) parse_string() {
+ self.Emit("MOVQ", _ARG_fv, _CX)
+ self.call_vf(_F_vstring)
+ self.check_err(nil, "", -1)
+}
+
+func (self *_Assembler) parse_number(vt reflect.Type, pin string, pin2 int) {
+ self.Emit("MOVQ", _IC, _BX) // save ic when call native func
+ self.call_vf(_F_vnumber)
+ self.check_err(vt, pin, pin2)
+}
+
+func (self *_Assembler) parse_signed(vt reflect.Type, pin string, pin2 int) {
+ self.Emit("MOVQ", _IC, _BX) // save ic when call native func
+ self.call_vf(_F_vsigned)
+ self.check_err(vt, pin, pin2)
+}
+
+func (self *_Assembler) parse_unsigned(vt reflect.Type, pin string, pin2 int) {
+ self.Emit("MOVQ", _IC, _BX) // save ic when call native func
+ self.call_vf(_F_vunsigned)
+ self.check_err(vt, pin, pin2)
+}
+
+// Pointer: DI, Size: SI, Return: R9
+func (self *_Assembler) copy_string() {
+ self.Link("_copy_string")
+ self.Emit("MOVQ", _DI, _VAR_bs_p)
+ self.Emit("MOVQ", _SI, _VAR_bs_n)
+ self.Emit("MOVQ", _R9, _VAR_bs_LR)
+ self.malloc_AX(_SI, _ARG_sv_p)
+ self.Emit("MOVQ", _VAR_bs_p, _BX)
+ self.Emit("MOVQ", _VAR_bs_n, _CX)
+ self.call_go(_F_memmove)
+ self.Emit("MOVQ", _ARG_sv_p, _DI)
+ self.Emit("MOVQ", _VAR_bs_n, _SI)
+ self.Emit("MOVQ", _VAR_bs_LR, _R9)
+ self.Rjmp("JMP", _R9)
+}
+
+// Pointer: DI, Size: SI, Return: R9
+func (self *_Assembler) escape_string() {
+ self.Link("_escape_string")
+ self.Emit("MOVQ" , _DI, _VAR_bs_p)
+ self.Emit("MOVQ" , _SI, _VAR_bs_n)
+ self.Emit("MOVQ" , _R9, _VAR_bs_LR)
+ self.malloc_AX(_SI, _DX) // MALLOC SI, DX
+ self.Emit("MOVQ" , _DX, _ARG_sv_p)
+ self.Emit("MOVQ" , _VAR_bs_p, _DI)
+ self.Emit("MOVQ" , _VAR_bs_n, _SI)
+ self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX
+ self.Emit("XORL" , _R8, _R8) // XORL R8, R8
+ self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, fv
+ self.Emit("SETCC", _R8) // SETCC R8
+ self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8
+ self.call_c(_F_unquote) // CALL unquote
+ self.Emit("MOVQ" , _VAR_bs_n, _SI) // MOVQ ${n}, SI
+ self.Emit("ADDQ" , jit.Imm(1), _SI) // ADDQ $1, SI
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_unquote_error) // JS _unquote_error
+ self.Emit("MOVQ" , _AX, _SI)
+ self.Emit("MOVQ" , _ARG_sv_p, _DI)
+ self.Emit("MOVQ" , _VAR_bs_LR, _R9)
+ self.Rjmp("JMP", _R9)
+}
+
+func (self *_Assembler) escape_string_twice() {
+ self.Link("_escape_string_twice")
+ self.Emit("MOVQ" , _DI, _VAR_bs_p)
+ self.Emit("MOVQ" , _SI, _VAR_bs_n)
+ self.Emit("MOVQ" , _R9, _VAR_bs_LR)
+ self.malloc_AX(_SI, _DX) // MALLOC SI, DX
+ self.Emit("MOVQ" , _DX, _ARG_sv_p)
+ self.Emit("MOVQ" , _VAR_bs_p, _DI)
+ self.Emit("MOVQ" , _VAR_bs_n, _SI)
+ self.Emit("LEAQ" , _VAR_sr, _CX) // LEAQ sr, CX
+ self.Emit("MOVL" , jit.Imm(types.F_DOUBLE_UNQUOTE), _R8) // MOVL ${types.F_DOUBLE_UNQUOTE}, R8
+ self.Emit("BTQ" , jit.Imm(_F_disable_urc), _ARG_fv) // BTQ ${_F_disable_urc}, AX
+ self.Emit("XORL" , _AX, _AX) // XORL AX, AX
+ self.Emit("SETCC", _AX) // SETCC AX
+ self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _AX) // SHLQ ${types.B_UNICODE_REPLACE}, AX
+ self.Emit("ORQ" , _AX, _R8) // ORQ AX, R8
+ self.call_c(_F_unquote) // CALL unquote
+ self.Emit("MOVQ" , _VAR_bs_n, _SI) // MOVQ ${n}, SI
+ self.Emit("ADDQ" , jit.Imm(3), _SI) // ADDQ $3, SI
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_unquote_error) // JS _unquote_error
+ self.Emit("MOVQ" , _AX, _SI)
+ self.Emit("MOVQ" , _ARG_sv_p, _DI)
+ self.Emit("MOVQ" , _VAR_bs_LR, _R9)
+ self.Rjmp("JMP", _R9)
+}
+
+/** Range Checking Routines **/
+
+var (
+ _V_max_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_max_f32))))
+ _V_min_f32 = jit.Imm(int64(uintptr(unsafe.Pointer(_Vp_min_f32))))
+)
+
+var (
+ _Vp_max_f32 = new(float64)
+ _Vp_min_f32 = new(float64)
+)
+
+func init() {
+ *_Vp_max_f32 = math.MaxFloat32
+ *_Vp_min_f32 = -math.MaxFloat32
+}
+
+func (self *_Assembler) range_single_X0() {
+ self.Emit("MOVSD" , _VAR_st_Dv, _X0) // MOVSD st.Dv, X0
+ self.Emit("MOVQ" , _V_max_f32, _CX) // MOVQ _max_f32, CX
+ self.Emit("MOVQ" , jit.Gitab(_I_float32), _ET) // MOVQ ${itab(float32)}, ET
+ self.Emit("MOVQ" , jit.Gtype(_T_float32), _EP) // MOVQ ${type(float32)}, EP
+ self.Emit("UCOMISD" , jit.Ptr(_CX, 0), _X0) // UCOMISD (CX), X0
+ self.Sjmp("JA" , _LB_range_error) // JA _range_error
+ self.Emit("MOVQ" , _V_min_f32, _CX) // MOVQ _min_f32, CX
+ self.Emit("MOVSD" , jit.Ptr(_CX, 0), _X1) // MOVSD (CX), X1
+ self.Emit("UCOMISD" , _X0, _X1) // UCOMISD X0, X1
+ self.Sjmp("JA" , _LB_range_error) // JA _range_error
+ self.Emit("CVTSD2SS", _X0, _X0) // CVTSD2SS X0, X0
+}
+
+func (self *_Assembler) range_signed_CX(i *rt.GoItab, t *rt.GoType, a int64, b int64) {
+ self.Emit("MOVQ", _VAR_st_Iv, _CX) // MOVQ st.Iv, CX
+ self.Emit("MOVQ", jit.Gitab(i), _ET) // MOVQ ${i}, ET
+ self.Emit("MOVQ", jit.Gtype(t), _EP) // MOVQ ${t}, EP
+ self.Emit("CMPQ", _CX, jit.Imm(a)) // CMPQ CX, ${a}
+ self.Sjmp("JL" , _LB_range_error) // JL _range_error
+ self.Emit("CMPQ", _CX, jit.Imm(b)) // CMPQ CX, ${B}
+ self.Sjmp("JG" , _LB_range_error) // JG _range_error
+}
+
+func (self *_Assembler) range_unsigned_CX(i *rt.GoItab, t *rt.GoType, v uint64) {
+ self.Emit("MOVQ" , _VAR_st_Iv, _CX) // MOVQ st.Iv, CX
+ self.Emit("MOVQ" , jit.Gitab(i), _ET) // MOVQ ${i}, ET
+ self.Emit("MOVQ" , jit.Gtype(t), _EP) // MOVQ ${t}, EP
+ self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX
+ self.Sjmp("JS" , _LB_range_error) // JS _range_error
+ self.Emit("CMPQ" , _CX, jit.Imm(int64(v))) // CMPQ CX, ${a}
+ self.Sjmp("JA" , _LB_range_error) // JA _range_error
+}
+
+/** String Manipulating Routines **/
+
+var (
+ _F_unquote = jit.Imm(int64(native.S_unquote))
+)
+
+func (self *_Assembler) slice_from(p obj.Addr, d int64) {
+ self.Emit("MOVQ", p, _SI) // MOVQ ${p}, SI
+ self.slice_from_r(_SI, d) // SLICE_R SI, ${d}
+}
+
+func (self *_Assembler) slice_from_r(p obj.Addr, d int64) {
+ self.Emit("LEAQ", jit.Sib(_IP, p, 1, 0), _DI) // LEAQ (IP)(${p}), DI
+ self.Emit("NEGQ", p) // NEGQ ${p}
+ self.Emit("LEAQ", jit.Sib(_IC, p, 1, d), _SI) // LEAQ d(IC)(${p}), SI
+}
+
+func (self *_Assembler) unquote_once(p obj.Addr, n obj.Addr, stack bool, copy bool) {
+ self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1
+ self.Emit("CMPQ", _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1
+ self.Sjmp("JE" , "_noescape_{n}") // JE _escape_{n}
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref("_unquote_once_write_{n}", 4)
+ self.Sjmp("JMP" , "_escape_string")
+ self.Link("_noescape_{n}")
+ if copy {
+ self.Emit("BTQ" , jit.Imm(_F_copy_string), _ARG_fv)
+ self.Sjmp("JNC", "_unquote_once_write_{n}")
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref("_unquote_once_write_{n}", 4)
+ self.Sjmp("JMP", "_copy_string")
+ }
+ self.Link("_unquote_once_write_{n}")
+ self.Emit("MOVQ", _SI, n) // MOVQ SI, ${n}
+ if stack {
+ self.Emit("MOVQ", _DI, p)
+ } else {
+ self.WriteRecNotAX(10, _DI, p, false, false)
+ }
+}
+
+func (self *_Assembler) unquote_twice(p obj.Addr, n obj.Addr, stack bool) {
+ self.Emit("CMPQ" , _VAR_st_Ep, jit.Imm(-1)) // CMPQ st.Ep, $-1
+ self.Sjmp("JE" , _LB_eof_error) // JE _eof_error
+ self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, -3), jit.Imm('\\')) // CMPB -3(IP)(IC), $'\\'
+ self.Sjmp("JNE" , _LB_char_m3_error) // JNE _char_m3_error
+ self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, -2), jit.Imm('"')) // CMPB -2(IP)(IC), $'"'
+ self.Sjmp("JNE" , _LB_char_m2_error) // JNE _char_m2_error
+ self.slice_from(_VAR_st_Iv, -3) // SLICE st.Iv, $-3
+ self.Emit("MOVQ" , _SI, _AX) // MOVQ SI, AX
+ self.Emit("ADDQ" , _VAR_st_Iv, _AX) // ADDQ st.Iv, AX
+ self.Emit("CMPQ" , _VAR_st_Ep, _AX) // CMPQ st.Ep, AX
+ self.Sjmp("JE" , "_noescape_{n}") // JE _noescape_{n}
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref("_unquote_twice_write_{n}", 4)
+ self.Sjmp("JMP" , "_escape_string_twice")
+ self.Link("_noescape_{n}") // _noescape_{n}:
+ self.Emit("BTQ" , jit.Imm(_F_copy_string), _ARG_fv)
+ self.Sjmp("JNC", "_unquote_twice_write_{n}")
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref("_unquote_twice_write_{n}", 4)
+ self.Sjmp("JMP", "_copy_string")
+ self.Link("_unquote_twice_write_{n}")
+ self.Emit("MOVQ" , _SI, n) // MOVQ SI, ${n}
+ if stack {
+ self.Emit("MOVQ", _DI, p)
+ } else {
+ self.WriteRecNotAX(12, _DI, p, false, false)
+ }
+ self.Link("_unquote_twice_end_{n}")
+}
+
+/** Memory Clearing Routines **/
+
+var (
+ _F_memclrHasPointers = jit.Func(memclrHasPointers)
+ _F_memclrNoHeapPointers = jit.Func(memclrNoHeapPointers)
+)
+
+func (self *_Assembler) mem_clear_fn(ptrfree bool) {
+ if !ptrfree {
+ self.call_go(_F_memclrHasPointers)
+ } else {
+ self.call_go(_F_memclrNoHeapPointers)
+ }
+}
+
+func (self *_Assembler) mem_clear_rem(size int64, ptrfree bool) {
+ self.Emit("MOVQ", jit.Imm(size), _BX) // MOVQ ${size}, BX
+ self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX
+ self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _AX) // MOVQ (ST)(AX), AX
+ self.Emit("SUBQ", _VP, _AX) // SUBQ VP, AX
+ self.Emit("ADDQ", _AX, _BX) // ADDQ AX, BX
+ self.Emit("MOVQ", _VP, _AX) // MOVQ VP, (SP)
+ self.mem_clear_fn(ptrfree) // CALL_GO memclr{Has,NoHeap}Pointers
+}
+
+/** Map Assigning Routines **/
+
+var (
+ _F_mapassign = jit.Func(mapassign)
+ _F_mapassign_fast32 = jit.Func(mapassign_fast32)
+ _F_mapassign_faststr = jit.Func(mapassign_faststr)
+ _F_mapassign_fast64ptr = jit.Func(mapassign_fast64ptr)
+)
+
+var (
+ _F_decodeJsonUnmarshaler obj.Addr
+ _F_decodeTextUnmarshaler obj.Addr
+)
+
+func init() {
+ _F_decodeJsonUnmarshaler = jit.Func(decodeJsonUnmarshaler)
+ _F_decodeTextUnmarshaler = jit.Func(decodeTextUnmarshaler)
+}
+
+func (self *_Assembler) mapaccess_ptr(t reflect.Type) {
+ if rt.MapType(rt.UnpackType(t)).IndirectElem() {
+ self.vfollow(t.Elem())
+ }
+}
+
+func (self *_Assembler) mapassign_std(t reflect.Type, v obj.Addr) {
+ self.Emit("LEAQ", v, _AX) // LEAQ ${v}, AX
+ self.mapassign_call_from_AX(t, _F_mapassign) // MAPASSIGN ${t}, mapassign
+}
+
+func (self *_Assembler) mapassign_str_fast(t reflect.Type, p obj.Addr, n obj.Addr) {
+ self.Emit("MOVQ", jit.Type(t), _AX) // MOVQ ${t}, AX
+ self.Emit("MOVQ", _VP, _BX) // MOVQ VP, BX
+ self.Emit("MOVQ", p, _CX) // MOVQ ${p}, CX
+ self.Emit("MOVQ", n, _DI) // MOVQ ${n}, DI
+ self.call_go(_F_mapassign_faststr) // CALL_GO ${fn}
+ self.Emit("MOVQ", _AX, _VP) // MOVQ AX, VP
+ self.mapaccess_ptr(t)
+}
+
+func (self *_Assembler) mapassign_call_from_AX(t reflect.Type, fn obj.Addr) {
+ self.Emit("MOVQ", _AX, _CX)
+ self.Emit("MOVQ", jit.Type(t), _AX) // MOVQ ${t}, AX
+ self.Emit("MOVQ", _VP, _BX) // MOVQ VP, _BX
+ self.call_go(fn) // CALL_GO ${fn}
+ self.Emit("MOVQ", _AX, _VP) // MOVQ AX, VP
+}
+
+func (self *_Assembler) mapassign_fastx(t reflect.Type, fn obj.Addr) {
+ self.mapassign_call_from_AX(t, fn)
+ self.mapaccess_ptr(t)
+}
+
+func (self *_Assembler) mapassign_utext(t reflect.Type, addressable bool) {
+ pv := false
+ vk := t.Key()
+ tk := t.Key()
+
+ /* deref pointer if needed */
+ if vk.Kind() == reflect.Ptr {
+ pv = true
+ vk = vk.Elem()
+ }
+
+ /* addressable value with pointer receiver */
+ if addressable {
+ pv = false
+ tk = reflect.PtrTo(tk)
+ }
+
+ /* allocate the key, and call the unmarshaler */
+ self.valloc(vk, _BX) // VALLOC ${vk}, BX
+ // must spill vk pointer since next call_go may invoke GC
+ self.Emit("MOVQ" , _BX, _ARG_vk)
+ self.Emit("MOVQ" , jit.Type(tk), _AX) // MOVQ ${tk}, AX
+ self.Emit("MOVQ" , _ARG_sv_p, _CX) // MOVQ sv.p, CX
+ self.Emit("MOVQ" , _ARG_sv_n, _DI) // MOVQ sv.n, DI
+ self.call_go(_F_decodeTextUnmarshaler) // CALL_GO decodeTextUnmarshaler
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+ self.Emit("MOVQ" , _ARG_vk, _AX) // MOVQ VAR.vk, AX
+ self.Emit("MOVQ", jit.Imm(0), _ARG_vk)
+
+ /* select the correct assignment function */
+ if !pv {
+ self.mapassign_call_from_AX(t, _F_mapassign)
+ } else {
+ self.mapassign_fastx(t, _F_mapassign_fast64ptr)
+ }
+}
+
+/** External Unmarshaler Routines **/
+
+var (
+ _F_skip_one = jit.Imm(int64(native.S_skip_one))
+ _F_skip_array = jit.Imm(int64(native.S_skip_array))
+ _F_skip_object = jit.Imm(int64(native.S_skip_object))
+ _F_skip_number = jit.Imm(int64(native.S_skip_number))
+)
+
+func (self *_Assembler) unmarshal_json(t reflect.Type, deref bool) {
+ self.call_sf(_F_skip_one) // CALL_SF skip_one
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+ self.slice_from_r(_AX, 0) // SLICE_R AX, $0
+ self.Emit("MOVQ" , _DI, _ARG_sv_p) // MOVQ DI, sv.p
+ self.Emit("MOVQ" , _SI, _ARG_sv_n) // MOVQ SI, sv.n
+ self.unmarshal_func(t, _F_decodeJsonUnmarshaler, deref) // UNMARSHAL json, ${t}, ${deref}
+}
+
+func (self *_Assembler) unmarshal_text(t reflect.Type, deref bool) {
+ self.parse_string() // PARSE STRING
+ self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n
+ self.unmarshal_func(t, _F_decodeTextUnmarshaler, deref) // UNMARSHAL text, ${t}, ${deref}
+}
+
+func (self *_Assembler) unmarshal_func(t reflect.Type, fn obj.Addr, deref bool) {
+ pt := t
+ vk := t.Kind()
+
+ /* allocate the field if needed */
+ if deref && vk == reflect.Ptr {
+ self.Emit("MOVQ" , _VP, _BX) // MOVQ VP, BX
+ self.Emit("MOVQ" , jit.Ptr(_BX, 0), _BX) // MOVQ (BX), BX
+ self.Emit("TESTQ", _BX, _BX) // TESTQ BX, BX
+ self.Sjmp("JNZ" , "_deref_{n}") // JNZ _deref_{n}
+ self.valloc(t.Elem(), _BX) // VALLOC ${t.Elem()}, BX
+ self.WriteRecNotAX(3, _BX, jit.Ptr(_VP, 0), false, false) // MOVQ BX, (VP)
+ self.Link("_deref_{n}") // _deref_{n}:
+ } else {
+ /* set value pointer */
+ self.Emit("MOVQ", _VP, _BX) // MOVQ (VP), BX
+ }
+
+ /* set value type */
+ self.Emit("MOVQ", jit.Type(pt), _AX) // MOVQ ${pt}, AX
+
+ /* set the source string and call the unmarshaler */
+ self.Emit("MOVQ" , _ARG_sv_p, _CX) // MOVQ sv.p, CX
+ self.Emit("MOVQ" , _ARG_sv_n, _DI) // MOVQ sv.n, DI
+ self.call_go(fn) // CALL_GO ${fn}
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+}
+
+/** Dynamic Decoding Routine **/
+
+var (
+ _F_decodeTypedPointer obj.Addr
+)
+
+func init() {
+ _F_decodeTypedPointer = jit.Func(decodeTypedPointer)
+}
+
+func (self *_Assembler) decode_dynamic(vt obj.Addr, vp obj.Addr) {
+ self.Emit("MOVQ" , vp, _SI) // MOVQ ${vp}, SI
+ self.Emit("MOVQ" , vt, _DI) // MOVQ ${vt}, DI
+ self.Emit("MOVQ", _ARG_sp, _AX) // MOVQ sp, AX
+ self.Emit("MOVQ", _ARG_sl, _BX) // MOVQ sp, BX
+ self.Emit("MOVQ" , _IC, _CX) // MOVQ IC, CX
+ self.Emit("MOVQ" , _ST, _R8) // MOVQ ST, R8
+ self.Emit("MOVQ" , _ARG_fv, _R9) // MOVQ fv, R9
+ self.save(_REG_rt...)
+ self.Emit("MOVQ", _F_decodeTypedPointer, _IL) // MOVQ ${fn}, R11
+ self.Rjmp("CALL", _IL) // CALL R11
+ self.load(_REG_rt...)
+ self.Emit("MOVQ" , _AX, _IC) // MOVQ AX, IC
+ self.Emit("MOVQ" , _BX, _ET) // MOVQ BX, ET
+ self.Emit("MOVQ" , _CX, _EP) // MOVQ CX, EP
+ self.Emit("TESTQ", _ET, _ET) // TESTQ ET, ET
+ self.Sjmp("JNZ" , _LB_error) // JNZ _error
+}
+
+/** OpCode Assembler Functions **/
+
+var (
+ _F_memequal = jit.Func(memequal)
+ _F_memmove = jit.Func(memmove)
+ _F_growslice = jit.Func(growslice)
+ _F_makeslice = jit.Func(makeslice)
+ _F_makemap_small = jit.Func(makemap_small)
+ _F_mapassign_fast64 = jit.Func(mapassign_fast64)
+)
+
+var (
+ _F_lspace = jit.Imm(int64(native.S_lspace))
+ _F_strhash = jit.Imm(int64(caching.S_strhash))
+)
+
+var (
+ _F_b64decode = jit.Imm(int64(_subr__b64decode))
+ _F_decodeValue = jit.Imm(int64(_subr_decode_value))
+)
+
+var (
+ _F_FieldMap_GetCaseInsensitive obj.Addr
+)
+
+const (
+ _MODE_AVX2 = 1 << 2
+)
+
+const (
+ _Fe_ID = int64(unsafe.Offsetof(caching.FieldEntry{}.ID))
+ _Fe_Name = int64(unsafe.Offsetof(caching.FieldEntry{}.Name))
+ _Fe_Hash = int64(unsafe.Offsetof(caching.FieldEntry{}.Hash))
+)
+
+const (
+ _Vk_Ptr = int64(reflect.Ptr)
+ _Gt_KindFlags = int64(unsafe.Offsetof(rt.GoType{}.KindFlags))
+)
+
+func init() {
+ _F_FieldMap_GetCaseInsensitive = jit.Func((*caching.FieldMap).GetCaseInsensitive)
+}
+
+func (self *_Assembler) _asm_OP_any(_ *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_VP, 8), _CX) // MOVQ 8(VP), CX
+ self.Emit("TESTQ" , _CX, _CX) // TESTQ CX, CX
+ self.Sjmp("JZ" , "_decode_{n}") // JZ _decode_{n}
+ self.Emit("CMPQ" , _CX, _VP) // CMPQ CX, VP
+ self.Sjmp("JE" , "_decode_{n}") // JE _decode_{n}
+ self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX
+ self.Emit("MOVBLZX", jit.Ptr(_AX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(AX), DX
+ self.Emit("ANDL" , jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX
+ self.Emit("CMPL" , _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr}
+ self.Sjmp("JNE" , "_decode_{n}") // JNE _decode_{n}
+ self.Emit("LEAQ" , jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI
+ self.decode_dynamic(_AX, _DI) // DECODE AX, DI
+ self.Sjmp("JMP" , "_decode_end_{n}") // JMP _decode_end_{n}
+ self.Link("_decode_{n}") // _decode_{n}:
+ self.Emit("MOVQ" , _ARG_fv, _DF) // MOVQ fv, DF
+ self.Emit("MOVQ" , _ST, jit.Ptr(_SP, 0)) // MOVQ _ST, (SP)
+ self.call(_F_decodeValue) // CALL decodeValue
+ self.Emit("MOVQ" , jit.Imm(0), jit.Ptr(_SP, 0)) // MOVQ _ST, (SP)
+ self.Emit("TESTQ" , _EP, _EP) // TESTQ EP, EP
+ self.Sjmp("JNZ" , _LB_parsing_error) // JNZ _parsing_error
+ self.Link("_decode_end_{n}") // _decode_end_{n}:
+}
+
+func (self *_Assembler) _asm_OP_dyn(p *_Instr) {
+ self.Emit("MOVQ" , jit.Type(p.vt()), _ET) // MOVQ ${p.vt()}, ET
+ self.Emit("CMPQ" , jit.Ptr(_VP, 8), jit.Imm(0)) // CMPQ 8(VP), $0
+ self.Sjmp("JE" , _LB_type_error) // JE _type_error
+ self.Emit("MOVQ" , jit.Ptr(_VP, 0), _CX) // MOVQ (VP), CX
+ self.Emit("MOVQ" , jit.Ptr(_CX, 8), _CX) // MOVQ 8(CX), CX
+ self.Emit("MOVBLZX", jit.Ptr(_CX, _Gt_KindFlags), _DX) // MOVBLZX _Gt_KindFlags(CX), DX
+ self.Emit("ANDL" , jit.Imm(rt.F_kind_mask), _DX) // ANDL ${F_kind_mask}, DX
+ self.Emit("CMPL" , _DX, jit.Imm(_Vk_Ptr)) // CMPL DX, ${reflect.Ptr}
+ self.Sjmp("JNE" , _LB_type_error) // JNE _type_error
+ self.Emit("LEAQ" , jit.Ptr(_VP, 8), _DI) // LEAQ 8(VP), DI
+ self.decode_dynamic(_CX, _DI) // DECODE CX, DI
+ self.Link("_decode_end_{n}") // _decode_end_{n}:
+}
+
+func (self *_Assembler) _asm_OP_str(_ *_Instr) {
+ self.parse_string() // PARSE STRING
+ self.unquote_once(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false, true) // UNQUOTE once, (VP), 8(VP)
+}
+
+func (self *_Assembler) _asm_OP_bin(_ *_Instr) {
+ self.parse_string() // PARSE STRING
+ self.slice_from(_VAR_st_Iv, -1) // SLICE st.Iv, $-1
+ self.Emit("MOVQ" , _DI, jit.Ptr(_VP, 0)) // MOVQ DI, (VP)
+ self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP)
+ self.Emit("SHRQ" , jit.Imm(2), _SI) // SHRQ $2, SI
+ self.Emit("LEAQ" , jit.Sib(_SI, _SI, 2, 0), _SI) // LEAQ (SI)(SI*2), SI
+ self.Emit("MOVQ" , _SI, jit.Ptr(_VP, 16)) // MOVQ SI, 16(VP)
+ self.malloc_AX(_SI, _SI) // MALLOC SI, SI
+
+ // TODO: due to base64x's bug, only use AVX mode now
+ self.Emit("MOVL", jit.Imm(_MODE_JSON), _CX) // MOVL $_MODE_JSON, CX
+
+ /* call the decoder */
+ self.Emit("XORL" , _DX, _DX) // XORL DX, DX
+ self.Emit("MOVQ" , _VP, _DI) // MOVQ VP, DI
+
+ self.Emit("MOVQ" , jit.Ptr(_VP, 0), _R8) // MOVQ SI, (VP)
+ self.WriteRecNotAX(4, _SI, jit.Ptr(_VP, 0), true, false) // XCHGQ SI, (VP)
+ self.Emit("MOVQ" , _R8, _SI)
+
+ self.Emit("XCHGQ", _DX, jit.Ptr(_VP, 8)) // XCHGQ DX, 8(VP)
+ self.call_c(_F_b64decode) // CALL b64decode
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_base64_error) // JS _base64_error
+ self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP)
+}
+
+func (self *_Assembler) _asm_OP_bool(_ *_Instr) {
+ self.Emit("LEAQ", jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX
+ self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL
+ self.Sjmp("JA" , _LB_eof_error) // JA _eof_error
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('f')) // CMPB (IP)(IC), $'f'
+ self.Sjmp("JE" , "_false_{n}") // JE _false_{n}
+ self.Emit("MOVL", jit.Imm(_IM_true), _CX) // MOVL $"true", CX
+ self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC)
+ self.Sjmp("JE" , "_bool_true_{n}")
+ // try to skip the value
+ self.Emit("MOVQ", _IC, _VAR_ic)
+ self.Emit("MOVQ", _T_bool, _ET)
+ self.Emit("MOVQ", _ET, _VAR_et)
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref("_end_{n}", 4)
+ self.Emit("MOVQ", _R9, _VAR_pc)
+ self.Sjmp("JMP" , _LB_skip_one)
+
+ self.Link("_bool_true_{n}")
+ self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC
+ self.Emit("MOVB", jit.Imm(1), jit.Ptr(_VP, 0)) // MOVB $1, (VP)
+ self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n}
+ self.Link("_false_{n}") // _false_{n}:
+ self.Emit("ADDQ", jit.Imm(1), _AX) // ADDQ $1, AX
+ self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Emit("CMPQ", _AX, _IL) // CMPQ AX, IL
+ self.Sjmp("JA" , _LB_eof_error) // JA _eof_error
+ self.Emit("MOVL", jit.Imm(_IM_alse), _CX) // MOVL $"alse", CX
+ self.Emit("CMPL", _CX, jit.Sib(_IP, _IC, 1, 0)) // CMPL CX, (IP)(IC)
+ self.Sjmp("JNE" , _LB_im_error) // JNE _im_error
+ self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC
+ self.Emit("XORL", _AX, _AX) // XORL AX, AX
+ self.Emit("MOVB", _AX, jit.Ptr(_VP, 0)) // MOVB AX, (VP)
+ self.Link("_end_{n}") // _end_{n}:
+}
+
+func (self *_Assembler) _asm_OP_num(_ *_Instr) {
+ self.Emit("MOVQ", jit.Imm(0), _VAR_fl)
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"'))
+ self.Emit("MOVQ", _IC, _BX)
+ self.Sjmp("JNE", "_skip_number_{n}")
+ self.Emit("MOVQ", jit.Imm(1), _VAR_fl)
+ self.Emit("ADDQ", jit.Imm(1), _IC)
+ self.Link("_skip_number_{n}")
+
+ /* call skip_number */
+ self.Emit("LEAQ", _ARG_s, _DI) // LEAQ s<>+0(FP), DI
+ self.Emit("MOVQ", _IC, _ARG_ic) // MOVQ IC, ic<>+16(FP)
+ self.Emit("LEAQ", _ARG_ic, _SI) // LEAQ ic<>+16(FP), SI
+ self.callc(_F_skip_number) // CALL _F_skip_number
+ self.Emit("MOVQ", _ARG_ic, _IC) // MOVQ ic<>+16(FP), IC
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JNS" , "_num_next_{n}")
+
+ /* call skip one */
+ self.Emit("MOVQ", _BX, _VAR_ic)
+ self.Emit("MOVQ", _T_number, _ET)
+ self.Emit("MOVQ", _ET, _VAR_et)
+ self.Byte(0x4c, 0x8d, 0x0d)
+ self.Sref("_num_end_{n}", 4)
+ self.Emit("MOVQ", _R9, _VAR_pc)
+ self.Sjmp("JMP" , _LB_skip_one)
+
+ /* assgin string */
+ self.Link("_num_next_{n}")
+ self.slice_from_r(_AX, 0)
+ self.Emit("BTQ", jit.Imm(_F_copy_string), _ARG_fv)
+ self.Sjmp("JNC", "_num_write_{n}")
+ self.Byte(0x4c, 0x8d, 0x0d) // LEAQ (PC), R9
+ self.Sref("_num_write_{n}", 4)
+ self.Sjmp("JMP", "_copy_string")
+ self.Link("_num_write_{n}")
+ self.Emit("MOVQ", _SI, jit.Ptr(_VP, 8)) // MOVQ SI, 8(VP)
+ self.WriteRecNotAX(13, _DI, jit.Ptr(_VP, 0), false, false)
+ self.Emit("CMPQ", _VAR_fl, jit.Imm(1))
+ self.Sjmp("JNE", "_num_end_{n}")
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('"'))
+ self.Sjmp("JNE", _LB_char_0_error)
+ self.Emit("ADDQ", jit.Imm(1), _IC)
+ self.Link("_num_end_{n}")
+}
+
+func (self *_Assembler) _asm_OP_i8(_ *_Instr) {
+ var pin = "_i8_end_{n}"
+ self.parse_signed(int8Type, pin, -1) // PARSE int8
+ self.range_signed_CX(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8
+ self.Emit("MOVB", _CX, jit.Ptr(_VP, 0)) // MOVB CX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_i16(_ *_Instr) {
+ var pin = "_i16_end_{n}"
+ self.parse_signed(int16Type, pin, -1) // PARSE int16
+ self.range_signed_CX(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16
+ self.Emit("MOVW", _CX, jit.Ptr(_VP, 0)) // MOVW CX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_i32(_ *_Instr) {
+ var pin = "_i32_end_{n}"
+ self.parse_signed(int32Type, pin, -1) // PARSE int32
+ self.range_signed_CX(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32
+ self.Emit("MOVL", _CX, jit.Ptr(_VP, 0)) // MOVL CX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_i64(_ *_Instr) {
+ var pin = "_i64_end_{n}"
+ self.parse_signed(int64Type, pin, -1) // PARSE int64
+ self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_u8(_ *_Instr) {
+ var pin = "_u8_end_{n}"
+ self.parse_unsigned(uint8Type, pin, -1) // PARSE uint8
+ self.range_unsigned_CX(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8
+ self.Emit("MOVB", _CX, jit.Ptr(_VP, 0)) // MOVB CX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_u16(_ *_Instr) {
+ var pin = "_u16_end_{n}"
+ self.parse_unsigned(uint16Type, pin, -1) // PARSE uint16
+ self.range_unsigned_CX(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16
+ self.Emit("MOVW", _CX, jit.Ptr(_VP, 0)) // MOVW CX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_u32(_ *_Instr) {
+ var pin = "_u32_end_{n}"
+ self.parse_unsigned(uint32Type, pin, -1) // PARSE uint32
+ self.range_unsigned_CX(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32
+ self.Emit("MOVL", _CX, jit.Ptr(_VP, 0)) // MOVL CX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_u64(_ *_Instr) {
+ var pin = "_u64_end_{n}"
+ self.parse_unsigned(uint64Type, pin, -1) // PARSE uint64
+ self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_f32(_ *_Instr) {
+ var pin = "_f32_end_{n}"
+ self.parse_number(float32Type, pin, -1) // PARSE NUMBER
+ self.range_single_X0() // RANGE float32
+ self.Emit("MOVSS", _X0, jit.Ptr(_VP, 0)) // MOVSS X0, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_f64(_ *_Instr) {
+ var pin = "_f64_end_{n}"
+ self.parse_number(float64Type, pin, -1) // PARSE NUMBER
+ self.Emit("MOVSD", _VAR_st_Dv, _X0) // MOVSD st.Dv, X0
+ self.Emit("MOVSD", _X0, jit.Ptr(_VP, 0)) // MOVSD X0, (VP)
+ self.Link(pin)
+}
+
+func (self *_Assembler) _asm_OP_unquote(_ *_Instr) {
+ self.check_eof(2)
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm('\\')) // CMPB (IP)(IC), $'\\'
+ self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 1), jit.Imm('"')) // CMPB 1(IP)(IC), $'"'
+ self.Sjmp("JNE" , _LB_char_1_error) // JNE _char_1_error
+ self.Emit("ADDQ", jit.Imm(2), _IC) // ADDQ $2, IC
+ self.parse_string() // PARSE STRING
+ self.unquote_twice(jit.Ptr(_VP, 0), jit.Ptr(_VP, 8), false) // UNQUOTE twice, (VP), 8(VP)
+}
+
+func (self *_Assembler) _asm_OP_nil_1(_ *_Instr) {
+ self.Emit("XORL", _AX, _AX) // XORL AX, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_VP, 0)) // MOVQ AX, (VP)
+}
+
+func (self *_Assembler) _asm_OP_nil_2(_ *_Instr) {
+ self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP)
+}
+
+func (self *_Assembler) _asm_OP_nil_3(_ *_Instr) {
+ self.Emit("XORL" , _AX, _AX) // XORL AX, AX
+ self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP)
+ self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 16)) // MOVOU AX, 16(VP)
+}
+
+func (self *_Assembler) _asm_OP_deref(p *_Instr) {
+ self.vfollow(p.vt())
+}
+
+func (self *_Assembler) _asm_OP_index(p *_Instr) {
+ self.Emit("MOVQ", jit.Imm(p.i64()), _AX) // MOVQ ${p.vi()}, AX
+ self.Emit("ADDQ", _AX, _VP) // ADDQ _AX, _VP
+}
+
+func (self *_Assembler) _asm_OP_is_null(p *_Instr) {
+ self.Emit("LEAQ" , jit.Ptr(_IC, 4), _AX) // LEAQ 4(IC), AX
+ self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL
+ self.Sjmp("JA" , "_not_null_{n}") // JA _not_null_{n}
+ self.Emit("CMPL" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null"
+ self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC
+ self.Xjmp("JE" , p.vi()) // JE {p.vi()}
+ self.Link("_not_null_{n}") // _not_null_{n}:
+}
+
+func (self *_Assembler) _asm_OP_is_null_quote(p *_Instr) {
+ self.Emit("LEAQ" , jit.Ptr(_IC, 5), _AX) // LEAQ 4(IC), AX
+ self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL
+ self.Sjmp("JA" , "_not_null_quote_{n}") // JA _not_null_quote_{n}
+ self.Emit("CMPL" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(_IM_null)) // CMPL (IP)(IC), $"null"
+ self.Sjmp("JNE" , "_not_null_quote_{n}") // JNE _not_null_quote_{n}
+ self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, 4), jit.Imm('"')) // CMPB 4(IP)(IC), $'"'
+ self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC
+ self.Xjmp("JE" , p.vi()) // JE {p.vi()}
+ self.Link("_not_null_quote_{n}") // _not_null_quote_{n}:
+}
+
+func (self *_Assembler) _asm_OP_map_init(_ *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_VP, 0), _AX) // MOVQ (VP), AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JNZ" , "_end_{n}") // JNZ _end_{n}
+ self.call_go(_F_makemap_small) // CALL_GO makemap_small
+ self.WritePtrAX(6, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP)
+ self.Link("_end_{n}") // _end_{n}:
+ self.Emit("MOVQ" , _AX, _VP) // MOVQ AX, VP
+}
+
+func (self *_Assembler) _asm_OP_map_key_i8(p *_Instr) {
+ self.parse_signed(int8Type, "", p.vi()) // PARSE int8
+ self.range_signed_CX(_I_int8, _T_int8, math.MinInt8, math.MaxInt8) // RANGE int8
+ self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int8, mapassign, st.Iv
+}
+
+func (self *_Assembler) _asm_OP_map_key_i16(p *_Instr) {
+ self.parse_signed(int16Type, "", p.vi()) // PARSE int16
+ self.range_signed_CX(_I_int16, _T_int16, math.MinInt16, math.MaxInt16) // RANGE int16
+ self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN int16, mapassign, st.Iv
+}
+
+func (self *_Assembler) _asm_OP_map_key_i32(p *_Instr) {
+ self.parse_signed(int32Type, "", p.vi()) // PARSE int32
+ self.range_signed_CX(_I_int32, _T_int32, math.MinInt32, math.MaxInt32) // RANGE int32
+ if vt := p.vt(); !mapfast(vt) {
+ self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int32, mapassign, st.Iv
+ } else {
+ self.Emit("MOVQ", _CX, _AX) // MOVQ CX, AX
+ self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN int32, mapassign_fast32
+ }
+}
+
+func (self *_Assembler) _asm_OP_map_key_i64(p *_Instr) {
+ self.parse_signed(int64Type, "", p.vi()) // PARSE int64
+ if vt := p.vt(); !mapfast(vt) {
+ self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN int64, mapassign, st.Iv
+ } else {
+ self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX
+ self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN int64, mapassign_fast64
+ }
+}
+
+func (self *_Assembler) _asm_OP_map_key_u8(p *_Instr) {
+ self.parse_unsigned(uint8Type, "", p.vi()) // PARSE uint8
+ self.range_unsigned_CX(_I_uint8, _T_uint8, math.MaxUint8) // RANGE uint8
+ self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint8, vt.Iv
+}
+
+func (self *_Assembler) _asm_OP_map_key_u16(p *_Instr) {
+ self.parse_unsigned(uint16Type, "", p.vi()) // PARSE uint16
+ self.range_unsigned_CX(_I_uint16, _T_uint16, math.MaxUint16) // RANGE uint16
+ self.mapassign_std(p.vt(), _VAR_st_Iv) // MAPASSIGN uint16, vt.Iv
+}
+
+func (self *_Assembler) _asm_OP_map_key_u32(p *_Instr) {
+ self.parse_unsigned(uint32Type, "", p.vi()) // PARSE uint32
+ self.range_unsigned_CX(_I_uint32, _T_uint32, math.MaxUint32) // RANGE uint32
+ if vt := p.vt(); !mapfast(vt) {
+ self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint32, vt.Iv
+ } else {
+ self.Emit("MOVQ", _CX, _AX) // MOVQ CX, AX
+ self.mapassign_fastx(vt, _F_mapassign_fast32) // MAPASSIGN uint32, mapassign_fast32
+ }
+}
+
+func (self *_Assembler) _asm_OP_map_key_u64(p *_Instr) {
+ self.parse_unsigned(uint64Type, "", p.vi()) // PARSE uint64
+ if vt := p.vt(); !mapfast(vt) {
+ self.mapassign_std(vt, _VAR_st_Iv) // MAPASSIGN uint64, vt.Iv
+ } else {
+ self.Emit("MOVQ", _VAR_st_Iv, _AX) // MOVQ st.Iv, AX
+ self.mapassign_fastx(vt, _F_mapassign_fast64) // MAPASSIGN uint64, mapassign_fast64
+ }
+}
+
+func (self *_Assembler) _asm_OP_map_key_f32(p *_Instr) {
+ self.parse_number(float32Type, "", p.vi()) // PARSE NUMBER
+ self.range_single_X0() // RANGE float32
+ self.Emit("MOVSS", _X0, _VAR_st_Dv) // MOVSS X0, st.Dv
+ self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv
+}
+
+func (self *_Assembler) _asm_OP_map_key_f64(p *_Instr) {
+ self.parse_number(float64Type, "", p.vi()) // PARSE NUMBER
+ self.mapassign_std(p.vt(), _VAR_st_Dv) // MAPASSIGN ${p.vt()}, mapassign, st.Dv
+}
+
+func (self *_Assembler) _asm_OP_map_key_str(p *_Instr) {
+ self.parse_string() // PARSE STRING
+ self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n
+ if vt := p.vt(); !mapfast(vt) {
+ self.valloc(vt.Key(), _DI)
+ self.Emit("MOVOU", _ARG_sv, _X0)
+ self.Emit("MOVOU", _X0, jit.Ptr(_DI, 0))
+ self.mapassign_std(vt, jit.Ptr(_DI, 0)) // MAPASSIGN string, DI, SI
+ } else {
+ self.mapassign_str_fast(vt, _ARG_sv_p, _ARG_sv_n) // MAPASSIGN string, DI, SI
+ }
+}
+
+func (self *_Assembler) _asm_OP_map_key_utext(p *_Instr) {
+ self.parse_string() // PARSE STRING
+ self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n
+ self.mapassign_utext(p.vt(), false) // MAPASSIGN utext, ${p.vt()}, false
+}
+
+func (self *_Assembler) _asm_OP_map_key_utext_p(p *_Instr) {
+ self.parse_string() // PARSE STRING
+ self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, true) // UNQUOTE once, sv.p, sv.n
+ self.mapassign_utext(p.vt(), true) // MAPASSIGN utext, ${p.vt()}, true
+}
+
+func (self *_Assembler) _asm_OP_array_skip(_ *_Instr) {
+ self.call_sf(_F_skip_array) // CALL_SF skip_array
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+}
+
+func (self *_Assembler) _asm_OP_array_clear(p *_Instr) {
+ self.mem_clear_rem(p.i64(), true)
+}
+
+func (self *_Assembler) _asm_OP_array_clear_p(p *_Instr) {
+ self.mem_clear_rem(p.i64(), false)
+}
+
+func (self *_Assembler) _asm_OP_slice_init(p *_Instr) {
+ self.Emit("XORL" , _AX, _AX) // XORL AX, AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP)
+ self.Emit("MOVQ" , jit.Ptr(_VP, 16), _BX) // MOVQ 16(VP), BX
+ self.Emit("TESTQ", _BX, _BX) // TESTQ BX, BX
+ self.Sjmp("JNZ" , "_done_{n}") // JNZ _done_{n}
+ self.Emit("MOVQ" , jit.Imm(_MinSlice), _CX) // MOVQ ${_MinSlice}, CX
+ self.Emit("MOVQ" , _CX, jit.Ptr(_VP, 16)) // MOVQ CX, 16(VP)
+ self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, DX
+ self.call_go(_F_makeslice) // CALL_GO makeslice
+ self.WritePtrAX(7, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP)
+ self.Emit("XORL" , _AX, _AX) // XORL AX, AX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_VP, 8)) // MOVQ AX, 8(VP)
+ self.Link("_done_{n}") // _done_{n}
+}
+
+func (self *_Assembler) _asm_OP_slice_append(p *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_VP, 8), _AX) // MOVQ 8(VP), AX
+ self.Emit("CMPQ" , _AX, jit.Ptr(_VP, 16)) // CMPQ AX, 16(VP)
+ self.Sjmp("JB" , "_index_{n}") // JB _index_{n}
+ self.Emit("MOVQ" , _AX, _SI) // MOVQ AX, SI
+ self.Emit("SHLQ" , jit.Imm(1), _SI) // SHLQ $1, SI
+ self.Emit("MOVQ" , jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX
+ self.Emit("MOVQ" , jit.Ptr(_VP, 0), _BX) // MOVQ (VP), BX
+ self.Emit("MOVQ" , jit.Ptr(_VP, 8), _CX) // MOVQ 8(VP), CX
+ self.Emit("MOVQ" , jit.Ptr(_VP, 16), _DI) // MOVQ 16(VP), DI
+ self.call_go(_F_growslice) // CALL_GO growslice
+ self.WritePtrAX(8, jit.Ptr(_VP, 0), false) // MOVQ AX, (VP)
+ self.Emit("MOVQ" , _BX, jit.Ptr(_VP, 8)) // MOVQ BX, 8(VP)
+ self.Emit("MOVQ" , _CX, jit.Ptr(_VP, 16)) // MOVQ CX, 16(VP)
+ self.Emit("MOVQ" , _BX, _AX) // MOVQ BX, AX
+ self.Link("_index_{n}") // _index_{n}:
+ self.Emit("ADDQ" , jit.Imm(1), jit.Ptr(_VP, 8)) // ADDQ $1, 8(VP)
+ self.Emit("MOVQ" , jit.Ptr(_VP, 0), _VP) // MOVQ (VP), VP
+ self.Emit("MOVQ" , jit.Imm(int64(p.vlen())), _CX) // MOVQ ${p.vlen()}, CX
+ self.From("MULQ" , _CX) // MULQ CX
+ self.Emit("ADDQ" , _AX, _VP) // ADDQ AX, VP
+}
+
+func (self *_Assembler) _asm_OP_object_skip(_ *_Instr) {
+ self.call_sf(_F_skip_object) // CALL_SF skip_object
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+}
+
+func (self *_Assembler) _asm_OP_object_next(_ *_Instr) {
+ self.call_sf(_F_skip_one) // CALL_SF skip_one
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parse_error_v
+}
+
+func (self *_Assembler) _asm_OP_struct_field(p *_Instr) {
+ assert_eq(caching.FieldEntrySize, 32, "invalid field entry size")
+ self.Emit("MOVQ" , jit.Imm(-1), _AX) // MOVQ $-1, AX
+ self.Emit("MOVQ" , _AX, _VAR_sr) // MOVQ AX, sr
+ self.parse_string() // PARSE STRING
+ self.unquote_once(_ARG_sv_p, _ARG_sv_n, true, false) // UNQUOTE once, sv.p, sv.n
+ self.Emit("LEAQ" , _ARG_sv, _AX) // LEAQ sv, AX
+ self.Emit("XORL" , _BX, _BX) // XORL BX, BX
+ self.call_go(_F_strhash) // CALL_GO strhash
+ self.Emit("MOVQ" , _AX, _R9) // MOVQ AX, R9
+ self.Emit("MOVQ" , jit.Imm(freezeFields(p.vf())), _CX) // MOVQ ${p.vf()}, CX
+ self.Emit("MOVQ" , jit.Ptr(_CX, caching.FieldMap_b), _SI) // MOVQ FieldMap.b(CX), SI
+ self.Emit("MOVQ" , jit.Ptr(_CX, caching.FieldMap_N), _CX) // MOVQ FieldMap.N(CX), CX
+ self.Emit("TESTQ", _CX, _CX) // TESTQ CX, CX
+ self.Sjmp("JZ" , "_try_lowercase_{n}") // JZ _try_lowercase_{n}
+ self.Link("_loop_{n}") // _loop_{n}:
+ self.Emit("XORL" , _DX, _DX) // XORL DX, DX
+ self.From("DIVQ" , _CX) // DIVQ CX
+ self.Emit("LEAQ" , jit.Ptr(_DX, 1), _AX) // LEAQ 1(DX), AX
+ self.Emit("SHLQ" , jit.Imm(5), _DX) // SHLQ $5, DX
+ self.Emit("LEAQ" , jit.Sib(_SI, _DX, 1, 0), _DI) // LEAQ (SI)(DX), DI
+ self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Hash), _R8) // MOVQ FieldEntry.Hash(DI), R8
+ self.Emit("TESTQ", _R8, _R8) // TESTQ R8, R8
+ self.Sjmp("JZ" , "_try_lowercase_{n}") // JZ _try_lowercase_{n}
+ self.Emit("CMPQ" , _R8, _R9) // CMPQ R8, R9
+ self.Sjmp("JNE" , "_loop_{n}") // JNE _loop_{n}
+ self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Name + 8), _DX) // MOVQ FieldEntry.Name+8(DI), DX
+ self.Emit("CMPQ" , _DX, _ARG_sv_n) // CMPQ DX, sv.n
+ self.Sjmp("JNE" , "_loop_{n}") // JNE _loop_{n}
+ self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_ID), _R8) // MOVQ FieldEntry.ID(DI), R8
+ self.Emit("MOVQ" , _AX, _VAR_ss_AX) // MOVQ AX, ss.AX
+ self.Emit("MOVQ" , _CX, _VAR_ss_CX) // MOVQ CX, ss.CX
+ self.Emit("MOVQ" , _SI, _VAR_ss_SI) // MOVQ SI, ss.SI
+ self.Emit("MOVQ" , _R8, _VAR_ss_R8) // MOVQ R8, ss.R8
+ self.Emit("MOVQ" , _R9, _VAR_ss_R9) // MOVQ R9, ss.R9
+ self.Emit("MOVQ" , _ARG_sv_p, _AX) // MOVQ _VAR_sv_p, AX
+ self.Emit("MOVQ" , jit.Ptr(_DI, _Fe_Name), _CX) // MOVQ FieldEntry.Name(DI), CX
+ self.Emit("MOVQ" , _CX, _BX) // MOVQ CX, 8(SP)
+ self.Emit("MOVQ" , _DX, _CX) // MOVQ DX, 16(SP)
+ self.call_go(_F_memequal) // CALL_GO memequal
+ self.Emit("MOVB" , _AX, _DX) // MOVB 24(SP), DX
+ self.Emit("MOVQ" , _VAR_ss_AX, _AX) // MOVQ ss.AX, AX
+ self.Emit("MOVQ" , _VAR_ss_CX, _CX) // MOVQ ss.CX, CX
+ self.Emit("MOVQ" , _VAR_ss_SI, _SI) // MOVQ ss.SI, SI
+ self.Emit("MOVQ" , _VAR_ss_R9, _R9) // MOVQ ss.R9, R9
+ self.Emit("TESTB", _DX, _DX) // TESTB DX, DX
+ self.Sjmp("JZ" , "_loop_{n}") // JZ _loop_{n}
+ self.Emit("MOVQ" , _VAR_ss_R8, _R8) // MOVQ ss.R8, R8
+ self.Emit("MOVQ" , _R8, _VAR_sr) // MOVQ R8, sr
+ self.Sjmp("JMP" , "_end_{n}") // JMP _end_{n}
+ self.Link("_try_lowercase_{n}") // _try_lowercase_{n}:
+ self.Emit("MOVQ" , jit.Imm(referenceFields(p.vf())), _AX) // MOVQ ${p.vf()}, AX
+ self.Emit("MOVQ", _ARG_sv_p, _BX) // MOVQ sv, BX
+ self.Emit("MOVQ", _ARG_sv_n, _CX) // MOVQ sv, CX
+ self.call_go(_F_FieldMap_GetCaseInsensitive) // CALL_GO FieldMap::GetCaseInsensitive
+ self.Emit("MOVQ" , _AX, _VAR_sr) // MOVQ AX, _VAR_sr
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JNS" , "_end_{n}") // JNS _end_{n}
+ self.Emit("BTQ" , jit.Imm(_F_disable_unknown), _ARG_fv) // BTQ ${_F_disable_unknown}, fv
+ self.Sjmp("JC" , _LB_field_error) // JC _field_error
+ self.Link("_end_{n}") // _end_{n}:
+}
+
+func (self *_Assembler) _asm_OP_unmarshal(p *_Instr) {
+ self.unmarshal_json(p.vt(), true)
+}
+
+func (self *_Assembler) _asm_OP_unmarshal_p(p *_Instr) {
+ self.unmarshal_json(p.vt(), false)
+}
+
+func (self *_Assembler) _asm_OP_unmarshal_text(p *_Instr) {
+ self.unmarshal_text(p.vt(), true)
+}
+
+func (self *_Assembler) _asm_OP_unmarshal_text_p(p *_Instr) {
+ self.unmarshal_text(p.vt(), false)
+}
+
+func (self *_Assembler) _asm_OP_lspace(_ *_Instr) {
+ self.lspace("_{n}")
+}
+
+func (self *_Assembler) lspace(subfix string) {
+ var label = "_lspace" + subfix
+ self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL
+ self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error
+ self.Emit("MOVQ" , jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX
+ self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX
+ self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' '
+ self.Sjmp("JA" , label) // JA _nospace_{n}
+ self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX
+ self.Sjmp("JNC" , label) // JNC _nospace_{n}
+
+ /* test up to 4 characters */
+ for i := 0; i < 3; i++ {
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL
+ self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error
+ self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX
+ self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' '
+ self.Sjmp("JA" , label) // JA _nospace_{n}
+ self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX
+ self.Sjmp("JNC" , label) // JNC _nospace_{n}
+ }
+
+ /* handle over to the native function */
+ self.Emit("MOVQ" , _IP, _DI) // MOVQ IP, DI
+ self.Emit("MOVQ" , _IL, _SI) // MOVQ IL, SI
+ self.Emit("MOVQ" , _IC, _DX) // MOVQ IC, DX
+ self.callc(_F_lspace) // CALL lspace
+ self.Emit("TESTQ" , _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , _LB_parsing_error_v) // JS _parsing_error_v
+ self.Emit("CMPQ" , _AX, _IL) // CMPQ AX, IL
+ self.Sjmp("JAE" , _LB_eof_error) // JAE _eof_error
+ self.Emit("MOVQ" , _AX, _IC) // MOVQ AX, IC
+ self.Link(label) // _nospace_{n}:
+}
+
+func (self *_Assembler) _asm_OP_match_char(p *_Instr) {
+ self.check_eof(1)
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()}
+ self.Sjmp("JNE" , _LB_char_0_error) // JNE _char_0_error
+ self.Emit("ADDQ", jit.Imm(1), _IC) // ADDQ $1, IC
+}
+
+func (self *_Assembler) _asm_OP_check_char(p *_Instr) {
+ self.check_eof(1)
+ self.Emit("LEAQ" , jit.Ptr(_IC, 1), _AX) // LEAQ 1(IC), AX
+ self.Emit("CMPB" , jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()}
+ self.Emit("CMOVQEQ", _AX, _IC) // CMOVQEQ AX, IC
+ self.Xjmp("JE" , p.vi()) // JE {p.vi()}
+}
+
+func (self *_Assembler) _asm_OP_check_char_0(p *_Instr) {
+ self.check_eof(1)
+ self.Emit("CMPB", jit.Sib(_IP, _IC, 1, 0), jit.Imm(int64(p.vb()))) // CMPB (IP)(IC), ${p.vb()}
+ self.Xjmp("JE" , p.vi()) // JE {p.vi()}
+}
+
+func (self *_Assembler) _asm_OP_add(p *_Instr) {
+ self.Emit("ADDQ", jit.Imm(int64(p.vi())), _IC) // ADDQ ${p.vi()}, IC
+}
+
+func (self *_Assembler) _asm_OP_load(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX
+ self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 0), _VP) // MOVQ (ST)(AX), VP
+}
+
+func (self *_Assembler) _asm_OP_save(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_ST, 0), _CX) // MOVQ (ST), CX
+ self.Emit("CMPQ", _CX, jit.Imm(_MaxStackBytes)) // CMPQ CX, ${_MaxStackBytes}
+ self.Sjmp("JAE" , _LB_stack_error) // JA _stack_error
+ self.WriteRecNotAX(0 , _VP, jit.Sib(_ST, _CX, 1, 8), false, false) // MOVQ VP, 8(ST)(CX)
+ self.Emit("ADDQ", jit.Imm(8), _CX) // ADDQ $8, CX
+ self.Emit("MOVQ", _CX, jit.Ptr(_ST, 0)) // MOVQ CX, (ST)
+}
+
+func (self *_Assembler) _asm_OP_drop(_ *_Instr) {
+ self.Emit("MOVQ", jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX
+ self.Emit("SUBQ", jit.Imm(8), _AX) // SUBQ $8, AX
+ self.Emit("MOVQ", jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP
+ self.Emit("MOVQ", _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST)
+ self.Emit("XORL", _BX, _BX) // XORL BX, BX
+ self.Emit("MOVQ", _BX, jit.Sib(_ST, _AX, 1, 8)) // MOVQ BX, 8(ST)(AX)
+}
+
+func (self *_Assembler) _asm_OP_drop_2(_ *_Instr) {
+ self.Emit("MOVQ" , jit.Ptr(_ST, 0), _AX) // MOVQ (ST), AX
+ self.Emit("SUBQ" , jit.Imm(16), _AX) // SUBQ $16, AX
+ self.Emit("MOVQ" , jit.Sib(_ST, _AX, 1, 8), _VP) // MOVQ 8(ST)(AX), VP
+ self.Emit("MOVQ" , _AX, jit.Ptr(_ST, 0)) // MOVQ AX, (ST)
+ self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0
+ self.Emit("MOVOU", _X0, jit.Sib(_ST, _AX, 1, 8)) // MOVOU X0, 8(ST)(AX)
+}
+
+func (self *_Assembler) _asm_OP_recurse(p *_Instr) {
+ self.Emit("MOVQ", jit.Type(p.vt()), _AX) // MOVQ ${p.vt()}, AX
+ self.decode_dynamic(_AX, _VP) // DECODE AX, VP
+}
+
+func (self *_Assembler) _asm_OP_goto(p *_Instr) {
+ self.Xjmp("JMP", p.vi())
+}
+
+func (self *_Assembler) _asm_OP_switch(p *_Instr) {
+ self.Emit("MOVQ", _VAR_sr, _AX) // MOVQ sr, AX
+ self.Emit("CMPQ", _AX, jit.Imm(p.i64())) // CMPQ AX, ${len(p.vs())}
+ self.Sjmp("JAE" , "_default_{n}") // JAE _default_{n}
+
+ /* jump table selector */
+ self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI
+ self.Sref("_switch_table_{n}", 4) // .... &_switch_table_{n}
+ self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX
+ self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX
+ self.Rjmp("JMP" , _AX) // JMP AX
+ self.Link("_switch_table_{n}") // _switch_table_{n}:
+
+ /* generate the jump table */
+ for i, v := range p.vs() {
+ self.Xref(v, int64(-i) * 4)
+ }
+
+ /* default case */
+ self.Link("_default_{n}")
+ self.NOP()
+}
+
+func (self *_Assembler) print_gc(i int, p1 *_Instr, p2 *_Instr) {
+ self.Emit("MOVQ", jit.Imm(int64(p2.op())), _CX)// MOVQ $(p2.op()), 16(SP)
+ self.Emit("MOVQ", jit.Imm(int64(p1.op())), _BX) // MOVQ $(p1.op()), 8(SP)
+ self.Emit("MOVQ", jit.Imm(int64(i)), _AX) // MOVQ $(i), (SP)
+ self.call_go(_F_println)
+}
+
+//go:linkname _runtime_writeBarrier runtime.writeBarrier
+var _runtime_writeBarrier uintptr
+
+//go:linkname gcWriteBarrierAX runtime.gcWriteBarrier
+func gcWriteBarrierAX()
+
+var (
+ _V_writeBarrier = jit.Imm(int64(uintptr(unsafe.Pointer(&_runtime_writeBarrier))))
+
+ _F_gcWriteBarrierAX = jit.Func(gcWriteBarrierAX)
+)
+
+func (self *_Assembler) WritePtrAX(i int, rec obj.Addr, saveDI bool) {
+ self.Emit("MOVQ", _V_writeBarrier, _R9)
+ self.Emit("CMPL", jit.Ptr(_R9, 0), jit.Imm(0))
+ self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ if saveDI {
+ self.save(_DI)
+ }
+ self.Emit("LEAQ", rec, _DI)
+ self.call(_F_gcWriteBarrierAX)
+ if saveDI {
+ self.load(_DI)
+ }
+ self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Emit("MOVQ", _AX, rec)
+ self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+}
+
+func (self *_Assembler) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool, saveAX bool) {
+ if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX {
+ panic("rec contains AX!")
+ }
+ self.Emit("MOVQ", _V_writeBarrier, _R9)
+ self.Emit("CMPL", jit.Ptr(_R9, 0), jit.Imm(0))
+ self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ if saveAX {
+ self.Emit("XCHGQ", ptr, _AX)
+ } else {
+ self.Emit("MOVQ", ptr, _AX)
+ }
+ if saveDI {
+ self.save(_DI)
+ }
+ self.Emit("LEAQ", rec, _DI)
+ self.call(_F_gcWriteBarrierAX)
+ if saveDI {
+ self.load(_DI)
+ }
+ if saveAX {
+ self.Emit("XCHGQ", ptr, _AX)
+ }
+ self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Emit("MOVQ", ptr, rec)
+ self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/decoder/compiler.go b/vendor/github.com/bytedance/sonic/decoder/compiler.go
new file mode 100644
index 000000000..b4fc2fed2
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/compiler.go
@@ -0,0 +1,1136 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `encoding/json`
+ `fmt`
+ `reflect`
+ `sort`
+ `strconv`
+ `strings`
+ `unsafe`
+
+ `github.com/bytedance/sonic/internal/caching`
+ `github.com/bytedance/sonic/internal/resolver`
+ `github.com/bytedance/sonic/internal/rt`
+ `github.com/bytedance/sonic/option`
+)
+
+type _Op uint8
+
+const (
+ _OP_any _Op = iota + 1
+ _OP_dyn
+ _OP_str
+ _OP_bin
+ _OP_bool
+ _OP_num
+ _OP_i8
+ _OP_i16
+ _OP_i32
+ _OP_i64
+ _OP_u8
+ _OP_u16
+ _OP_u32
+ _OP_u64
+ _OP_f32
+ _OP_f64
+ _OP_unquote
+ _OP_nil_1
+ _OP_nil_2
+ _OP_nil_3
+ _OP_deref
+ _OP_index
+ _OP_is_null
+ _OP_is_null_quote
+ _OP_map_init
+ _OP_map_key_i8
+ _OP_map_key_i16
+ _OP_map_key_i32
+ _OP_map_key_i64
+ _OP_map_key_u8
+ _OP_map_key_u16
+ _OP_map_key_u32
+ _OP_map_key_u64
+ _OP_map_key_f32
+ _OP_map_key_f64
+ _OP_map_key_str
+ _OP_map_key_utext
+ _OP_map_key_utext_p
+ _OP_array_skip
+ _OP_array_clear
+ _OP_array_clear_p
+ _OP_slice_init
+ _OP_slice_append
+ _OP_object_skip
+ _OP_object_next
+ _OP_struct_field
+ _OP_unmarshal
+ _OP_unmarshal_p
+ _OP_unmarshal_text
+ _OP_unmarshal_text_p
+ _OP_lspace
+ _OP_match_char
+ _OP_check_char
+ _OP_load
+ _OP_save
+ _OP_drop
+ _OP_drop_2
+ _OP_recurse
+ _OP_goto
+ _OP_switch
+ _OP_check_char_0
+ _OP_dismatch_err
+ _OP_go_skip
+ _OP_add
+ _OP_debug
+)
+
+const (
+ _INT_SIZE = 32 << (^uint(0) >> 63)
+ _PTR_SIZE = 32 << (^uintptr(0) >> 63)
+ _PTR_BYTE = unsafe.Sizeof(uintptr(0))
+)
+
+const (
+ _MAX_ILBUF = 100000 // cutoff at 100k of IL instructions
+ _MAX_FIELDS = 50 // cutoff at 50 fields struct
+)
+
+var _OpNames = [256]string {
+ _OP_any : "any",
+ _OP_dyn : "dyn",
+ _OP_str : "str",
+ _OP_bin : "bin",
+ _OP_bool : "bool",
+ _OP_num : "num",
+ _OP_i8 : "i8",
+ _OP_i16 : "i16",
+ _OP_i32 : "i32",
+ _OP_i64 : "i64",
+ _OP_u8 : "u8",
+ _OP_u16 : "u16",
+ _OP_u32 : "u32",
+ _OP_u64 : "u64",
+ _OP_f32 : "f32",
+ _OP_f64 : "f64",
+ _OP_unquote : "unquote",
+ _OP_nil_1 : "nil_1",
+ _OP_nil_2 : "nil_2",
+ _OP_nil_3 : "nil_3",
+ _OP_deref : "deref",
+ _OP_index : "index",
+ _OP_is_null : "is_null",
+ _OP_is_null_quote : "is_null_quote",
+ _OP_map_init : "map_init",
+ _OP_map_key_i8 : "map_key_i8",
+ _OP_map_key_i16 : "map_key_i16",
+ _OP_map_key_i32 : "map_key_i32",
+ _OP_map_key_i64 : "map_key_i64",
+ _OP_map_key_u8 : "map_key_u8",
+ _OP_map_key_u16 : "map_key_u16",
+ _OP_map_key_u32 : "map_key_u32",
+ _OP_map_key_u64 : "map_key_u64",
+ _OP_map_key_f32 : "map_key_f32",
+ _OP_map_key_f64 : "map_key_f64",
+ _OP_map_key_str : "map_key_str",
+ _OP_map_key_utext : "map_key_utext",
+ _OP_map_key_utext_p : "map_key_utext_p",
+ _OP_array_skip : "array_skip",
+ _OP_slice_init : "slice_init",
+ _OP_slice_append : "slice_append",
+ _OP_object_skip : "object_skip",
+ _OP_object_next : "object_next",
+ _OP_struct_field : "struct_field",
+ _OP_unmarshal : "unmarshal",
+ _OP_unmarshal_p : "unmarshal_p",
+ _OP_unmarshal_text : "unmarshal_text",
+ _OP_unmarshal_text_p : "unmarshal_text_p",
+ _OP_lspace : "lspace",
+ _OP_match_char : "match_char",
+ _OP_check_char : "check_char",
+ _OP_load : "load",
+ _OP_save : "save",
+ _OP_drop : "drop",
+ _OP_drop_2 : "drop_2",
+ _OP_recurse : "recurse",
+ _OP_goto : "goto",
+ _OP_switch : "switch",
+ _OP_check_char_0 : "check_char_0",
+ _OP_dismatch_err : "dismatch_err",
+ _OP_add : "add",
+}
+
+func (self _Op) String() string {
+ if ret := _OpNames[self]; ret != "" {
+ return ret
+ } else {
+ return "<invalid>"
+ }
+}
+
+func _OP_int() _Op {
+ switch _INT_SIZE {
+ case 32: return _OP_i32
+ case 64: return _OP_i64
+ default: panic("unsupported int size")
+ }
+}
+
+func _OP_uint() _Op {
+ switch _INT_SIZE {
+ case 32: return _OP_u32
+ case 64: return _OP_u64
+ default: panic("unsupported uint size")
+ }
+}
+
+func _OP_uintptr() _Op {
+ switch _PTR_SIZE {
+ case 32: return _OP_u32
+ case 64: return _OP_u64
+ default: panic("unsupported pointer size")
+ }
+}
+
+func _OP_map_key_int() _Op {
+ switch _INT_SIZE {
+ case 32: return _OP_map_key_i32
+ case 64: return _OP_map_key_i64
+ default: panic("unsupported int size")
+ }
+}
+
+func _OP_map_key_uint() _Op {
+ switch _INT_SIZE {
+ case 32: return _OP_map_key_u32
+ case 64: return _OP_map_key_u64
+ default: panic("unsupported uint size")
+ }
+}
+
+func _OP_map_key_uintptr() _Op {
+ switch _PTR_SIZE {
+ case 32: return _OP_map_key_u32
+ case 64: return _OP_map_key_u64
+ default: panic("unsupported pointer size")
+ }
+}
+
+type _Instr struct {
+ u uint64 // union {op: 8, vb: 8, vi: 48}, iv maybe int or len([]int)
+ p unsafe.Pointer // maybe GoSlice.Data, *GoType or *caching.FieldMap
+}
+
+func packOp(op _Op) uint64 {
+ return uint64(op) << 56
+}
+
+func newInsOp(op _Op) _Instr {
+ return _Instr{u: packOp(op)}
+}
+
+func newInsVi(op _Op, vi int) _Instr {
+ return _Instr{u: packOp(op) | rt.PackInt(vi)}
+}
+
+func newInsVb(op _Op, vb byte) _Instr {
+ return _Instr{u: packOp(op) | (uint64(vb) << 48)}
+}
+
+func newInsVs(op _Op, vs []int) _Instr {
+ return _Instr {
+ u: packOp(op) | rt.PackInt(len(vs)),
+ p: (*rt.GoSlice)(unsafe.Pointer(&vs)).Ptr,
+ }
+}
+
+func newInsVt(op _Op, vt reflect.Type) _Instr {
+ return _Instr {
+ u: packOp(op),
+ p: unsafe.Pointer(rt.UnpackType(vt)),
+ }
+}
+
+func newInsVf(op _Op, vf *caching.FieldMap) _Instr {
+ return _Instr {
+ u: packOp(op),
+ p: unsafe.Pointer(vf),
+ }
+}
+
+func (self _Instr) op() _Op {
+ return _Op(self.u >> 56)
+}
+
+func (self _Instr) vi() int {
+ return rt.UnpackInt(self.u)
+}
+
+func (self _Instr) vb() byte {
+ return byte(self.u >> 48)
+}
+
+func (self _Instr) vs() (v []int) {
+ (*rt.GoSlice)(unsafe.Pointer(&v)).Ptr = self.p
+ (*rt.GoSlice)(unsafe.Pointer(&v)).Cap = self.vi()
+ (*rt.GoSlice)(unsafe.Pointer(&v)).Len = self.vi()
+ return
+}
+
+func (self _Instr) vf() *caching.FieldMap {
+ return (*caching.FieldMap)(self.p)
+}
+
+func (self _Instr) vk() reflect.Kind {
+ return (*rt.GoType)(self.p).Kind()
+}
+
+func (self _Instr) vt() reflect.Type {
+ return (*rt.GoType)(self.p).Pack()
+}
+
+func (self _Instr) i64() int64 {
+ return int64(self.vi())
+}
+
+func (self _Instr) vlen() int {
+ return int((*rt.GoType)(self.p).Size)
+}
+
+func (self _Instr) isBranch() bool {
+ switch self.op() {
+ case _OP_goto : fallthrough
+ case _OP_switch : fallthrough
+ case _OP_is_null : fallthrough
+ case _OP_is_null_quote : fallthrough
+ case _OP_check_char : return true
+ default : return false
+ }
+}
+
+func (self _Instr) disassemble() string {
+ switch self.op() {
+ case _OP_dyn : fallthrough
+ case _OP_deref : fallthrough
+ case _OP_map_key_i8 : fallthrough
+ case _OP_map_key_i16 : fallthrough
+ case _OP_map_key_i32 : fallthrough
+ case _OP_map_key_i64 : fallthrough
+ case _OP_map_key_u8 : fallthrough
+ case _OP_map_key_u16 : fallthrough
+ case _OP_map_key_u32 : fallthrough
+ case _OP_map_key_u64 : fallthrough
+ case _OP_map_key_f32 : fallthrough
+ case _OP_map_key_f64 : fallthrough
+ case _OP_map_key_str : fallthrough
+ case _OP_map_key_utext : fallthrough
+ case _OP_map_key_utext_p : fallthrough
+ case _OP_slice_init : fallthrough
+ case _OP_slice_append : fallthrough
+ case _OP_unmarshal : fallthrough
+ case _OP_unmarshal_p : fallthrough
+ case _OP_unmarshal_text : fallthrough
+ case _OP_unmarshal_text_p : fallthrough
+ case _OP_recurse : return fmt.Sprintf("%-18s%s", self.op(), self.vt())
+ case _OP_goto : fallthrough
+ case _OP_is_null_quote : fallthrough
+ case _OP_is_null : return fmt.Sprintf("%-18sL_%d", self.op(), self.vi())
+ case _OP_index : fallthrough
+ case _OP_array_clear : fallthrough
+ case _OP_array_clear_p : return fmt.Sprintf("%-18s%d", self.op(), self.vi())
+ case _OP_switch : return fmt.Sprintf("%-18s%s", self.op(), self.formatSwitchLabels())
+ case _OP_struct_field : return fmt.Sprintf("%-18s%s", self.op(), self.formatStructFields())
+ case _OP_match_char : return fmt.Sprintf("%-18s%s", self.op(), strconv.QuoteRune(rune(self.vb())))
+ case _OP_check_char : return fmt.Sprintf("%-18sL_%d, %s", self.op(), self.vi(), strconv.QuoteRune(rune(self.vb())))
+ default : return self.op().String()
+ }
+}
+
+func (self _Instr) formatSwitchLabels() string {
+ var i int
+ var v int
+ var m []string
+
+ /* format each label */
+ for i, v = range self.vs() {
+ m = append(m, fmt.Sprintf("%d=L_%d", i, v))
+ }
+
+ /* join them with "," */
+ return strings.Join(m, ", ")
+}
+
+func (self _Instr) formatStructFields() string {
+ var i uint64
+ var r []string
+ var m []struct{i int; n string}
+
+ /* extract all the fields */
+ for i = 0; i < self.vf().N; i++ {
+ if v := self.vf().At(i); v.Hash != 0 {
+ m = append(m, struct{i int; n string}{i: v.ID, n: v.Name})
+ }
+ }
+
+ /* sort by field name */
+ sort.Slice(m, func(i, j int) bool {
+ return m[i].n < m[j].n
+ })
+
+ /* format each field */
+ for _, v := range m {
+ r = append(r, fmt.Sprintf("%s=%d", v.n, v.i))
+ }
+
+ /* join them with "," */
+ return strings.Join(r, ", ")
+}
+
+type (
+ _Program []_Instr
+)
+
+func (self _Program) pc() int {
+ return len(self)
+}
+
+func (self _Program) tag(n int) {
+ if n >= _MaxStack {
+ panic("type nesting too deep")
+ }
+}
+
+func (self _Program) pin(i int) {
+ v := &self[i]
+ v.u &= 0xffff000000000000
+ v.u |= rt.PackInt(self.pc())
+}
+
+func (self _Program) rel(v []int) {
+ for _, i := range v {
+ self.pin(i)
+ }
+}
+
+func (self *_Program) add(op _Op) {
+ *self = append(*self, newInsOp(op))
+}
+
+func (self *_Program) int(op _Op, vi int) {
+ *self = append(*self, newInsVi(op, vi))
+}
+
+func (self *_Program) chr(op _Op, vb byte) {
+ *self = append(*self, newInsVb(op, vb))
+}
+
+func (self *_Program) tab(op _Op, vs []int) {
+ *self = append(*self, newInsVs(op, vs))
+}
+
+func (self *_Program) rtt(op _Op, vt reflect.Type) {
+ *self = append(*self, newInsVt(op, vt))
+}
+
+func (self *_Program) fmv(op _Op, vf *caching.FieldMap) {
+ *self = append(*self, newInsVf(op, vf))
+}
+
+func (self _Program) disassemble() string {
+ nb := len(self)
+ tab := make([]bool, nb + 1)
+ ret := make([]string, 0, nb + 1)
+
+ /* prescan to get all the labels */
+ for _, ins := range self {
+ if ins.isBranch() {
+ if ins.op() != _OP_switch {
+ tab[ins.vi()] = true
+ } else {
+ for _, v := range ins.vs() {
+ tab[v] = true
+ }
+ }
+ }
+ }
+
+ /* disassemble each instruction */
+ for i, ins := range self {
+ if !tab[i] {
+ ret = append(ret, "\t" + ins.disassemble())
+ } else {
+ ret = append(ret, fmt.Sprintf("L_%d:\n\t%s", i, ins.disassemble()))
+ }
+ }
+
+ /* add the last label, if needed */
+ if tab[nb] {
+ ret = append(ret, fmt.Sprintf("L_%d:", nb))
+ }
+
+ /* add an "end" indicator, and join all the strings */
+ return strings.Join(append(ret, "\tend"), "\n")
+}
+
+type _Compiler struct {
+ opts option.CompileOptions
+ tab map[reflect.Type]bool
+ rec map[reflect.Type]bool
+}
+
+func newCompiler() *_Compiler {
+ return &_Compiler {
+ opts: option.DefaultCompileOptions(),
+ tab: map[reflect.Type]bool{},
+ rec: map[reflect.Type]bool{},
+ }
+}
+
+func (self *_Compiler) apply(opts option.CompileOptions) *_Compiler {
+ self.opts = opts
+ return self
+}
+
+func (self *_Compiler) rescue(ep *error) {
+ if val := recover(); val != nil {
+ if err, ok := val.(error); ok {
+ *ep = err
+ } else {
+ panic(val)
+ }
+ }
+}
+
+func (self *_Compiler) compile(vt reflect.Type) (ret _Program, err error) {
+ defer self.rescue(&err)
+ self.compileOne(&ret, 0, vt)
+ return
+}
+
+func (self *_Compiler) compileOne(p *_Program, sp int, vt reflect.Type) {
+ /* check for recursive nesting */
+ ok := self.tab[vt]
+ if ok {
+ p.rtt(_OP_recurse, vt)
+ return
+ }
+
+ pt := reflect.PtrTo(vt)
+
+ /* check for `json.Unmarshaler` with pointer receiver */
+ if pt.Implements(jsonUnmarshalerType) {
+ p.rtt(_OP_unmarshal_p, pt)
+ return
+ }
+
+ /* check for `json.Unmarshaler` */
+ if vt.Implements(jsonUnmarshalerType) {
+ p.add(_OP_lspace)
+ self.compileUnmarshalJson(p, vt)
+ return
+ }
+
+ /* check for `encoding.TextMarshaler` with pointer receiver */
+ if pt.Implements(encodingTextUnmarshalerType) {
+ p.add(_OP_lspace)
+ self.compileUnmarshalTextPtr(p, pt)
+ return
+ }
+
+ /* check for `encoding.TextUnmarshaler` */
+ if vt.Implements(encodingTextUnmarshalerType) {
+ p.add(_OP_lspace)
+ self.compileUnmarshalText(p, vt)
+ return
+ }
+
+ /* enter the recursion */
+ p.add(_OP_lspace)
+ self.tab[vt] = true
+ self.compileOps(p, sp, vt)
+ delete(self.tab, vt)
+}
+
+func (self *_Compiler) compileOps(p *_Program, sp int, vt reflect.Type) {
+ switch vt.Kind() {
+ case reflect.Bool : self.compilePrimitive (vt, p, _OP_bool)
+ case reflect.Int : self.compilePrimitive (vt, p, _OP_int())
+ case reflect.Int8 : self.compilePrimitive (vt, p, _OP_i8)
+ case reflect.Int16 : self.compilePrimitive (vt, p, _OP_i16)
+ case reflect.Int32 : self.compilePrimitive (vt, p, _OP_i32)
+ case reflect.Int64 : self.compilePrimitive (vt, p, _OP_i64)
+ case reflect.Uint : self.compilePrimitive (vt, p, _OP_uint())
+ case reflect.Uint8 : self.compilePrimitive (vt, p, _OP_u8)
+ case reflect.Uint16 : self.compilePrimitive (vt, p, _OP_u16)
+ case reflect.Uint32 : self.compilePrimitive (vt, p, _OP_u32)
+ case reflect.Uint64 : self.compilePrimitive (vt, p, _OP_u64)
+ case reflect.Uintptr : self.compilePrimitive (vt, p, _OP_uintptr())
+ case reflect.Float32 : self.compilePrimitive (vt, p, _OP_f32)
+ case reflect.Float64 : self.compilePrimitive (vt, p, _OP_f64)
+ case reflect.String : self.compileString (p, vt)
+ case reflect.Array : self.compileArray (p, sp, vt)
+ case reflect.Interface : self.compileInterface (p, vt)
+ case reflect.Map : self.compileMap (p, sp, vt)
+ case reflect.Ptr : self.compilePtr (p, sp, vt)
+ case reflect.Slice : self.compileSlice (p, sp, vt)
+ case reflect.Struct : self.compileStruct (p, sp, vt)
+ default : panic (&json.UnmarshalTypeError{Type: vt})
+ }
+}
+
+func (self *_Compiler) compileMap(p *_Program, sp int, vt reflect.Type) {
+ if reflect.PtrTo(vt.Key()).Implements(encodingTextUnmarshalerType) {
+ self.compileMapOp(p, sp, vt, _OP_map_key_utext_p)
+ } else if vt.Key().Implements(encodingTextUnmarshalerType) {
+ self.compileMapOp(p, sp, vt, _OP_map_key_utext)
+ } else {
+ self.compileMapUt(p, sp, vt)
+ }
+}
+
+func (self *_Compiler) compileMapUt(p *_Program, sp int, vt reflect.Type) {
+ switch vt.Key().Kind() {
+ case reflect.Int : self.compileMapOp(p, sp, vt, _OP_map_key_int())
+ case reflect.Int8 : self.compileMapOp(p, sp, vt, _OP_map_key_i8)
+ case reflect.Int16 : self.compileMapOp(p, sp, vt, _OP_map_key_i16)
+ case reflect.Int32 : self.compileMapOp(p, sp, vt, _OP_map_key_i32)
+ case reflect.Int64 : self.compileMapOp(p, sp, vt, _OP_map_key_i64)
+ case reflect.Uint : self.compileMapOp(p, sp, vt, _OP_map_key_uint())
+ case reflect.Uint8 : self.compileMapOp(p, sp, vt, _OP_map_key_u8)
+ case reflect.Uint16 : self.compileMapOp(p, sp, vt, _OP_map_key_u16)
+ case reflect.Uint32 : self.compileMapOp(p, sp, vt, _OP_map_key_u32)
+ case reflect.Uint64 : self.compileMapOp(p, sp, vt, _OP_map_key_u64)
+ case reflect.Uintptr : self.compileMapOp(p, sp, vt, _OP_map_key_uintptr())
+ case reflect.Float32 : self.compileMapOp(p, sp, vt, _OP_map_key_f32)
+ case reflect.Float64 : self.compileMapOp(p, sp, vt, _OP_map_key_f64)
+ case reflect.String : self.compileMapOp(p, sp, vt, _OP_map_key_str)
+ default : panic(&json.UnmarshalTypeError{Type: vt})
+ }
+}
+
+func (self *_Compiler) compileMapOp(p *_Program, sp int, vt reflect.Type, op _Op) {
+ i := p.pc()
+ p.add(_OP_is_null)
+ p.tag(sp + 1)
+ skip := self.checkIfSkip(p, vt, '{')
+ p.add(_OP_save)
+ p.add(_OP_map_init)
+ p.add(_OP_save)
+ p.add(_OP_lspace)
+ j := p.pc()
+ p.chr(_OP_check_char, '}')
+ p.chr(_OP_match_char, '"')
+ skip2 := p.pc()
+ p.rtt(op, vt)
+
+ /* match the closing quote if needed */
+ if op != _OP_map_key_str && op != _OP_map_key_utext && op != _OP_map_key_utext_p {
+ p.chr(_OP_match_char, '"')
+ }
+
+ /* match the value separator */
+ p.add(_OP_lspace)
+ p.chr(_OP_match_char, ':')
+ self.compileOne(p, sp + 2, vt.Elem())
+ p.pin(skip2)
+ p.add(_OP_load)
+ k0 := p.pc()
+ p.add(_OP_lspace)
+ k1 := p.pc()
+ p.chr(_OP_check_char, '}')
+ p.chr(_OP_match_char, ',')
+ p.add(_OP_lspace)
+ p.chr(_OP_match_char, '"')
+ skip3 := p.pc()
+ p.rtt(op, vt)
+
+ /* match the closing quote if needed */
+ if op != _OP_map_key_str && op != _OP_map_key_utext && op != _OP_map_key_utext_p {
+ p.chr(_OP_match_char, '"')
+ }
+
+ /* match the value separator */
+ p.add(_OP_lspace)
+ p.chr(_OP_match_char, ':')
+ self.compileOne(p, sp + 2, vt.Elem())
+ p.pin(skip3)
+ p.add(_OP_load)
+ p.int(_OP_goto, k0)
+ p.pin(j)
+ p.pin(k1)
+ p.add(_OP_drop_2)
+ x := p.pc()
+ p.add(_OP_goto)
+ p.pin(i)
+ p.add(_OP_nil_1)
+ p.pin(skip)
+ p.pin(x)
+}
+
+func (self *_Compiler) compilePtr(p *_Program, sp int, et reflect.Type) {
+ i := p.pc()
+ p.add(_OP_is_null)
+
+ /* dereference all the way down */
+ for et.Kind() == reflect.Ptr {
+ et = et.Elem()
+ p.rtt(_OP_deref, et)
+ }
+
+ /* compile the element type */
+ self.compileOne(p, sp + 1, et)
+ j := p.pc()
+ p.add(_OP_goto)
+ p.pin(i)
+ p.add(_OP_nil_1)
+ p.pin(j)
+}
+
+func (self *_Compiler) compileArray(p *_Program, sp int, vt reflect.Type) {
+ x := p.pc()
+ p.add(_OP_is_null)
+ p.tag(sp)
+ skip := self.checkIfSkip(p, vt, '[')
+
+ p.add(_OP_save)
+ p.add(_OP_lspace)
+ v := []int{p.pc()}
+ p.chr(_OP_check_char, ']')
+
+ /* decode every item */
+ for i := 1; i <= vt.Len(); i++ {
+ self.compileOne(p, sp + 1, vt.Elem())
+ p.add(_OP_load)
+ p.int(_OP_index, i * int(vt.Elem().Size()))
+ p.add(_OP_lspace)
+ v = append(v, p.pc())
+ p.chr(_OP_check_char, ']')
+ p.chr(_OP_match_char, ',')
+ }
+
+ /* drop rest of the array */
+ p.add(_OP_array_skip)
+ w := p.pc()
+ p.add(_OP_goto)
+ p.rel(v)
+
+ /* check for pointer data */
+ if rt.UnpackType(vt.Elem()).PtrData == 0 {
+ p.int(_OP_array_clear, int(vt.Size()))
+ } else {
+ p.int(_OP_array_clear_p, int(vt.Size()))
+ }
+
+ /* restore the stack */
+ p.pin(w)
+ p.add(_OP_drop)
+
+ p.pin(skip)
+ p.pin(x)
+}
+
+func (self *_Compiler) compileSlice(p *_Program, sp int, vt reflect.Type) {
+ if vt.Elem().Kind() == byteType.Kind() {
+ self.compileSliceBin(p, sp, vt)
+ } else {
+ self.compileSliceList(p, sp, vt)
+ }
+}
+
+func (self *_Compiler) compileSliceBin(p *_Program, sp int, vt reflect.Type) {
+ i := p.pc()
+ p.add(_OP_is_null)
+ j := p.pc()
+ p.chr(_OP_check_char, '[')
+ skip := self.checkIfSkip(p, vt, '"')
+ k := p.pc()
+ p.chr(_OP_check_char, '"')
+ p.add(_OP_bin)
+ x := p.pc()
+ p.add(_OP_goto)
+ p.pin(j)
+ self.compileSliceBody(p, sp, vt.Elem())
+ y := p.pc()
+ p.add(_OP_goto)
+ p.pin(i)
+ p.pin(k)
+ p.add(_OP_nil_3)
+ p.pin(x)
+ p.pin(skip)
+ p.pin(y)
+}
+
+func (self *_Compiler) compileSliceList(p *_Program, sp int, vt reflect.Type) {
+ i := p.pc()
+ p.add(_OP_is_null)
+ p.tag(sp)
+ skip := self.checkIfSkip(p, vt, '[')
+ self.compileSliceBody(p, sp, vt.Elem())
+ x := p.pc()
+ p.add(_OP_goto)
+ p.pin(i)
+ p.add(_OP_nil_3)
+ p.pin(x)
+ p.pin(skip)
+}
+
+func (self *_Compiler) compileSliceBody(p *_Program, sp int, et reflect.Type) {
+ p.rtt(_OP_slice_init, et)
+ p.add(_OP_save)
+ p.add(_OP_lspace)
+ j := p.pc()
+ p.chr(_OP_check_char, ']')
+ p.rtt(_OP_slice_append, et)
+ self.compileOne(p, sp + 1, et)
+ p.add(_OP_load)
+ k0 := p.pc()
+ p.add(_OP_lspace)
+ k1 := p.pc()
+ p.chr(_OP_check_char, ']')
+ p.chr(_OP_match_char, ',')
+ p.rtt(_OP_slice_append, et)
+ self.compileOne(p, sp + 1, et)
+ p.add(_OP_load)
+ p.int(_OP_goto, k0)
+ p.pin(j)
+ p.pin(k1)
+ p.add(_OP_drop)
+}
+
+func (self *_Compiler) compileString(p *_Program, vt reflect.Type) {
+ if vt == jsonNumberType {
+ self.compilePrimitive(vt, p, _OP_num)
+ } else {
+ self.compileStringBody(vt, p)
+ }
+}
+
+func (self *_Compiler) compileStringBody(vt reflect.Type, p *_Program) {
+ i := p.pc()
+ p.add(_OP_is_null)
+ skip := self.checkIfSkip(p, vt, '"')
+ p.add(_OP_str)
+ p.pin(i)
+ p.pin(skip)
+}
+
+func (self *_Compiler) compileStruct(p *_Program, sp int, vt reflect.Type) {
+ if sp >= self.opts.MaxInlineDepth || p.pc() >= _MAX_ILBUF || (sp > 0 && vt.NumField() >= _MAX_FIELDS) {
+ p.rtt(_OP_recurse, vt)
+ if self.opts.RecursiveDepth > 0 {
+ self.rec[vt] = true
+ }
+ } else {
+ self.compileStructBody(p, sp, vt)
+ }
+}
+
+func (self *_Compiler) compileStructBody(p *_Program, sp int, vt reflect.Type) {
+ fv := resolver.ResolveStruct(vt)
+ fm, sw := caching.CreateFieldMap(len(fv)), make([]int, len(fv))
+
+ /* start of object */
+ p.tag(sp)
+ n := p.pc()
+ p.add(_OP_is_null)
+
+ skip := self.checkIfSkip(p, vt, '{')
+
+ p.add(_OP_save)
+ p.add(_OP_lspace)
+ x := p.pc()
+ p.chr(_OP_check_char, '}')
+ p.chr(_OP_match_char, '"')
+ p.fmv(_OP_struct_field, fm)
+ p.add(_OP_lspace)
+ p.chr(_OP_match_char, ':')
+ p.tab(_OP_switch, sw)
+ p.add(_OP_object_next)
+ y0 := p.pc()
+ p.add(_OP_lspace)
+ y1 := p.pc()
+ p.chr(_OP_check_char, '}')
+ p.chr(_OP_match_char, ',')
+
+ /* special case of an empty struct */
+ if len(fv) == 0 {
+ p.add(_OP_object_skip)
+ goto end_of_object
+ }
+
+ /* match the remaining fields */
+ p.add(_OP_lspace)
+ p.chr(_OP_match_char, '"')
+ p.fmv(_OP_struct_field, fm)
+ p.add(_OP_lspace)
+ p.chr(_OP_match_char, ':')
+ p.tab(_OP_switch, sw)
+ p.add(_OP_object_next)
+ p.int(_OP_goto, y0)
+
+ /* process each field */
+ for i, f := range fv {
+ sw[i] = p.pc()
+ fm.Set(f.Name, i)
+
+ /* index to the field */
+ for _, o := range f.Path {
+ if p.int(_OP_index, int(o.Size)); o.Kind == resolver.F_deref {
+ p.rtt(_OP_deref, o.Type)
+ }
+ }
+
+ /* check for "stringnize" option */
+ if (f.Opts & resolver.F_stringize) == 0 {
+ self.compileOne(p, sp + 1, f.Type)
+ } else {
+ self.compileStructFieldStr(p, sp + 1, f.Type)
+ }
+
+ /* load the state, and try next field */
+ p.add(_OP_load)
+ p.int(_OP_goto, y0)
+ }
+
+end_of_object:
+ p.pin(x)
+ p.pin(y1)
+ p.add(_OP_drop)
+ p.pin(n)
+ p.pin(skip)
+}
+
+func (self *_Compiler) compileStructFieldStr(p *_Program, sp int, vt reflect.Type) {
+ n1 := -1
+ ft := vt
+ sv := false
+
+ /* dereference the pointer if needed */
+ if ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+
+ /* check if it can be stringized */
+ switch ft.Kind() {
+ case reflect.Bool : sv = true
+ case reflect.Int : sv = true
+ case reflect.Int8 : sv = true
+ case reflect.Int16 : sv = true
+ case reflect.Int32 : sv = true
+ case reflect.Int64 : sv = true
+ case reflect.Uint : sv = true
+ case reflect.Uint8 : sv = true
+ case reflect.Uint16 : sv = true
+ case reflect.Uint32 : sv = true
+ case reflect.Uint64 : sv = true
+ case reflect.Uintptr : sv = true
+ case reflect.Float32 : sv = true
+ case reflect.Float64 : sv = true
+ case reflect.String : sv = true
+ }
+
+ /* if it's not, ignore the "string" and follow the regular path */
+ if !sv {
+ self.compileOne(p, sp, vt)
+ return
+ }
+
+ /* remove the leading space, and match the leading quote */
+ vk := vt.Kind()
+ p.add(_OP_lspace)
+ n0 := p.pc()
+ p.add(_OP_is_null)
+
+ skip := self.checkIfSkip(p, stringType, '"')
+
+ /* also check for inner "null" */
+ n1 = p.pc()
+ p.add(_OP_is_null_quote)
+
+ /* dereference the pointer only when it is not null */
+ if vk == reflect.Ptr {
+ vt = vt.Elem()
+ p.rtt(_OP_deref, vt)
+ }
+
+ n2 := p.pc()
+ p.chr(_OP_check_char_0, '"')
+
+ /* string opcode selector */
+ _OP_string := func() _Op {
+ if ft == jsonNumberType {
+ return _OP_num
+ } else {
+ return _OP_unquote
+ }
+ }
+
+ /* compile for each type */
+ switch vt.Kind() {
+ case reflect.Bool : p.add(_OP_bool)
+ case reflect.Int : p.add(_OP_int())
+ case reflect.Int8 : p.add(_OP_i8)
+ case reflect.Int16 : p.add(_OP_i16)
+ case reflect.Int32 : p.add(_OP_i32)
+ case reflect.Int64 : p.add(_OP_i64)
+ case reflect.Uint : p.add(_OP_uint())
+ case reflect.Uint8 : p.add(_OP_u8)
+ case reflect.Uint16 : p.add(_OP_u16)
+ case reflect.Uint32 : p.add(_OP_u32)
+ case reflect.Uint64 : p.add(_OP_u64)
+ case reflect.Uintptr : p.add(_OP_uintptr())
+ case reflect.Float32 : p.add(_OP_f32)
+ case reflect.Float64 : p.add(_OP_f64)
+ case reflect.String : p.add(_OP_string())
+ default : panic("not reachable")
+ }
+
+ /* the closing quote is not needed when parsing a pure string */
+ if vt == jsonNumberType || vt.Kind() != reflect.String {
+ p.chr(_OP_match_char, '"')
+ }
+
+ /* pin the `is_null_quote` jump location */
+ if n1 != -1 && vk != reflect.Ptr {
+ p.pin(n1)
+ }
+
+ /* "null" but not a pointer, act as if the field is not present */
+ if vk != reflect.Ptr {
+ pc2 := p.pc()
+ p.add(_OP_goto)
+ p.pin(n2)
+ p.rtt(_OP_dismatch_err, vt)
+ p.int(_OP_add, 1)
+ p.pin(pc2)
+ p.pin(n0)
+ return
+ }
+
+ /* the "null" case of the pointer */
+ pc := p.pc()
+ p.add(_OP_goto)
+ p.pin(n0) // `is_null` jump location
+ p.pin(n1) // `is_null_quote` jump location
+ p.add(_OP_nil_1)
+ pc2 := p.pc()
+ p.add(_OP_goto)
+ p.pin(n2)
+ p.rtt(_OP_dismatch_err, vt)
+ p.int(_OP_add, 1)
+ p.pin(pc)
+ p.pin(pc2)
+ p.pin(skip)
+}
+
+func (self *_Compiler) compileInterface(p *_Program, vt reflect.Type) {
+ i := p.pc()
+ p.add(_OP_is_null)
+
+ /* check for empty interface */
+ if vt.NumMethod() == 0 {
+ p.add(_OP_any)
+ } else {
+ p.rtt(_OP_dyn, vt)
+ }
+
+ /* finish the OpCode */
+ j := p.pc()
+ p.add(_OP_goto)
+ p.pin(i)
+ p.add(_OP_nil_2)
+ p.pin(j)
+}
+
+func (self *_Compiler) compilePrimitive(vt reflect.Type, p *_Program, op _Op) {
+ i := p.pc()
+ p.add(_OP_is_null)
+ // skip := self.checkPrimitive(p, vt)
+ p.add(op)
+ p.pin(i)
+ // p.pin(skip)
+}
+
+func (self *_Compiler) compileUnmarshalEnd(p *_Program, vt reflect.Type, i int) {
+ j := p.pc()
+ k := vt.Kind()
+
+ /* not a pointer */
+ if k != reflect.Ptr {
+ p.pin(i)
+ return
+ }
+
+ /* it seems that in Go JSON library, "null" takes priority over any kind of unmarshaler */
+ p.add(_OP_goto)
+ p.pin(i)
+ p.add(_OP_nil_1)
+ p.pin(j)
+}
+
+func (self *_Compiler) compileUnmarshalJson(p *_Program, vt reflect.Type) {
+ i := p.pc()
+ v := _OP_unmarshal
+ p.add(_OP_is_null)
+
+ /* check for dynamic interface */
+ if vt.Kind() == reflect.Interface {
+ v = _OP_dyn
+ }
+
+ /* call the unmarshaler */
+ p.rtt(v, vt)
+ self.compileUnmarshalEnd(p, vt, i)
+}
+
+func (self *_Compiler) compileUnmarshalText(p *_Program, vt reflect.Type) {
+ i := p.pc()
+ v := _OP_unmarshal_text
+ p.add(_OP_is_null)
+
+ /* check for dynamic interface */
+ if vt.Kind() == reflect.Interface {
+ v = _OP_dyn
+ } else {
+ p.chr(_OP_match_char, '"')
+ }
+
+ /* call the unmarshaler */
+ p.rtt(v, vt)
+ self.compileUnmarshalEnd(p, vt, i)
+}
+
+func (self *_Compiler) compileUnmarshalTextPtr(p *_Program, vt reflect.Type) {
+ i := p.pc()
+ p.add(_OP_is_null)
+ p.chr(_OP_match_char, '"')
+ p.rtt(_OP_unmarshal_text_p, vt)
+ p.pin(i)
+}
+
+func (self *_Compiler) checkIfSkip(p *_Program, vt reflect.Type, c byte) int {
+ j := p.pc()
+ p.chr(_OP_check_char_0, c)
+ p.rtt(_OP_dismatch_err, vt)
+ s := p.pc()
+ p.add(_OP_go_skip)
+ p.pin(j)
+ p.int(_OP_add, 1)
+ return s
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/decoder/debug.go b/vendor/github.com/bytedance/sonic/decoder/debug.go
new file mode 100644
index 000000000..9cf3a6a00
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/debug.go
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `os`
+ `runtime`
+ `runtime/debug`
+ `strings`
+
+ `github.com/bytedance/sonic/internal/jit`
+)
+
+
+var (
+ debugSyncGC = os.Getenv("SONIC_SYNC_GC") != ""
+ debugAsyncGC = os.Getenv("SONIC_NO_ASYNC_GC") == ""
+)
+
+var (
+ _Instr_End _Instr = newInsOp(_OP_nil_1)
+
+ _F_gc = jit.Func(runtime.GC)
+ _F_force_gc = jit.Func(debug.FreeOSMemory)
+ _F_println = jit.Func(println_wrapper)
+ _F_print = jit.Func(print)
+)
+
+func println_wrapper(i int, op1 int, op2 int){
+ println(i, " Intrs ", op1, _OpNames[op1], "next: ", op2, _OpNames[op2])
+}
+
+func print(i int){
+ println(i)
+}
+
+func (self *_Assembler) force_gc() {
+ self.call_go(_F_gc)
+ self.call_go(_F_force_gc)
+}
+
+func (self *_Assembler) debug_instr(i int, v *_Instr) {
+ if debugSyncGC {
+ if (i+1 == len(self.p)) {
+ self.print_gc(i, v, &_Instr_End)
+ } else {
+ next := &(self.p[i+1])
+ self.print_gc(i, v, next)
+ name := _OpNames[next.op()]
+ if strings.Contains(name, "save") {
+ return
+ }
+ }
+ self.force_gc()
+ }
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/decoder/decoder.go b/vendor/github.com/bytedance/sonic/decoder/decoder.go
new file mode 100644
index 000000000..5326f9728
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/decoder.go
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `encoding/json`
+ `reflect`
+ `runtime`
+
+ `github.com/bytedance/sonic/internal/native`
+ `github.com/bytedance/sonic/internal/native/types`
+ `github.com/bytedance/sonic/internal/rt`
+ `github.com/bytedance/sonic/option`
+ `github.com/bytedance/sonic/utf8`
+)
+
+const (
+ _F_use_int64 = iota
+ _F_use_number
+ _F_disable_urc
+ _F_disable_unknown
+ _F_copy_string
+ _F_validate_string
+
+ _F_allow_control = 31
+)
+
+type Options uint64
+
+const (
+ OptionUseInt64 Options = 1 << _F_use_int64
+ OptionUseNumber Options = 1 << _F_use_number
+ OptionUseUnicodeErrors Options = 1 << _F_disable_urc
+ OptionDisableUnknown Options = 1 << _F_disable_unknown
+ OptionCopyString Options = 1 << _F_copy_string
+ OptionValidateString Options = 1 << _F_validate_string
+)
+
+func (self *Decoder) SetOptions(opts Options) {
+ if (opts & OptionUseNumber != 0) && (opts & OptionUseInt64 != 0) {
+ panic("can't set OptionUseInt64 and OptionUseNumber both!")
+ }
+ self.f = uint64(opts)
+}
+
+
+// Decoder is the decoder context object
+type Decoder struct {
+ i int
+ f uint64
+ s string
+}
+
+// NewDecoder creates a new decoder instance.
+func NewDecoder(s string) *Decoder {
+ return &Decoder{s: s}
+}
+
+// Pos returns the current decoding position.
+func (self *Decoder) Pos() int {
+ return self.i
+}
+
+func (self *Decoder) Reset(s string) {
+ self.s = s
+ self.i = 0
+ // self.f = 0
+}
+
+func (self *Decoder) CheckTrailings() error {
+ pos := self.i
+ buf := self.s
+ /* skip all the trailing spaces */
+ if pos != len(buf) {
+ for pos < len(buf) && (types.SPACE_MASK & (1 << buf[pos])) != 0 {
+ pos++
+ }
+ }
+
+ /* then it must be at EOF */
+ if pos == len(buf) {
+ return nil
+ }
+
+ /* junk after JSON value */
+ return SyntaxError {
+ Src : buf,
+ Pos : pos,
+ Code : types.ERR_INVALID_CHAR,
+ }
+}
+
+
+// Decode parses the JSON-encoded data from current position and stores the result
+// in the value pointed to by val.
+func (self *Decoder) Decode(val interface{}) error {
+ /* validate json if needed */
+ if (self.f & (1 << _F_validate_string)) != 0 && !utf8.ValidateString(self.s){
+ dbuf := utf8.CorrectWith(nil, rt.Str2Mem(self.s), "\ufffd")
+ self.s = rt.Mem2Str(dbuf)
+ }
+
+ vv := rt.UnpackEface(val)
+ vp := vv.Value
+
+ /* check for nil type */
+ if vv.Type == nil {
+ return &json.InvalidUnmarshalError{}
+ }
+
+ /* must be a non-nil pointer */
+ if vp == nil || vv.Type.Kind() != reflect.Ptr {
+ return &json.InvalidUnmarshalError{Type: vv.Type.Pack()}
+ }
+
+ /* create a new stack, and call the decoder */
+ sb, etp := newStack(), rt.PtrElem(vv.Type)
+ nb, err := decodeTypedPointer(self.s, self.i, etp, vp, sb, self.f)
+ /* return the stack back */
+ self.i = nb
+ freeStack(sb)
+
+ /* avoid GC ahead */
+ runtime.KeepAlive(vv)
+ return err
+}
+
+// UseInt64 indicates the Decoder to unmarshal an integer into an interface{} as an
+// int64 instead of as a float64.
+func (self *Decoder) UseInt64() {
+ self.f |= 1 << _F_use_int64
+ self.f &^= 1 << _F_use_number
+}
+
+// UseNumber indicates the Decoder to unmarshal a number into an interface{} as a
+// json.Number instead of as a float64.
+func (self *Decoder) UseNumber() {
+ self.f &^= 1 << _F_use_int64
+ self.f |= 1 << _F_use_number
+}
+
+// UseUnicodeErrors indicates the Decoder to return an error when encounter invalid
+// UTF-8 escape sequences.
+func (self *Decoder) UseUnicodeErrors() {
+ self.f |= 1 << _F_disable_urc
+}
+
+// DisallowUnknownFields indicates the Decoder to return an error when the destination
+// is a struct and the input contains object keys which do not match any
+// non-ignored, exported fields in the destination.
+func (self *Decoder) DisallowUnknownFields() {
+ self.f |= 1 << _F_disable_unknown
+}
+
+// CopyString indicates the Decoder to decode string values by copying instead of referring.
+func (self *Decoder) CopyString() {
+ self.f |= 1 << _F_copy_string
+}
+
+// ValidateString causes the Decoder to validate string values when decoding string value
+// in JSON. Validation is that, returning error when unescaped control chars(0x00-0x1f) or
+// invalid UTF-8 chars in the string value of JSON.
+func (self *Decoder) ValidateString() {
+ self.f |= 1 << _F_validate_string
+}
+
+// Pretouch compiles vt ahead-of-time to avoid JIT compilation on-the-fly, in
+// order to reduce the first-hit latency.
+//
+// Opts are the compile options, for example, "option.WithCompileRecursiveDepth" is
+// a compile option to set the depth of recursive compile for the nested struct type.
+func Pretouch(vt reflect.Type, opts ...option.CompileOption) error {
+ cfg := option.DefaultCompileOptions()
+ for _, opt := range opts {
+ opt(&cfg)
+ }
+ return pretouchRec(map[reflect.Type]bool{vt:true}, cfg)
+}
+
+func pretouchType(_vt reflect.Type, opts option.CompileOptions) (map[reflect.Type]bool, error) {
+ /* compile function */
+ compiler := newCompiler().apply(opts)
+ decoder := func(vt *rt.GoType, _ ...interface{}) (interface{}, error) {
+ if pp, err := compiler.compile(_vt); err != nil {
+ return nil, err
+ } else {
+ as := newAssembler(pp)
+ as.name = _vt.String()
+ return as.Load(), nil
+ }
+ }
+
+ /* find or compile */
+ vt := rt.UnpackType(_vt)
+ if val := programCache.Get(vt); val != nil {
+ return nil, nil
+ } else if _, err := programCache.Compute(vt, decoder); err == nil {
+ return compiler.rec, nil
+ } else {
+ return nil, err
+ }
+}
+
+func pretouchRec(vtm map[reflect.Type]bool, opts option.CompileOptions) error {
+ if opts.RecursiveDepth < 0 || len(vtm) == 0 {
+ return nil
+ }
+ next := make(map[reflect.Type]bool)
+ for vt := range(vtm) {
+ sub, err := pretouchType(vt, opts)
+ if err != nil {
+ return err
+ }
+ for svt := range(sub) {
+ next[svt] = true
+ }
+ }
+ opts.RecursiveDepth -= 1
+ return pretouchRec(next, opts)
+}
+
+// Skip skips only one json value, and returns first non-blank character position and its ending position if it is valid.
+// Otherwise, returns negative error code using start and invalid character position using end
+func Skip(data []byte) (start int, end int) {
+ s := rt.Mem2Str(data)
+ p := 0
+ m := types.NewStateMachine()
+ ret := native.SkipOne(&s, &p, m, uint64(0))
+ types.FreeStateMachine(m)
+ return ret, p
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/decoder/errors.go b/vendor/github.com/bytedance/sonic/decoder/errors.go
new file mode 100644
index 000000000..c905fdfb0
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/errors.go
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `encoding/json`
+ `errors`
+ `fmt`
+ `reflect`
+ `strconv`
+ `strings`
+
+ `github.com/bytedance/sonic/internal/native/types`
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+type SyntaxError struct {
+ Pos int
+ Src string
+ Code types.ParsingError
+ Msg string
+}
+
+func (self SyntaxError) Error() string {
+ return fmt.Sprintf("%q", self.Description())
+}
+
+func (self SyntaxError) Description() string {
+ return "Syntax error " + self.description()
+}
+
+func (self SyntaxError) description() string {
+ i := 16
+ p := self.Pos - i
+ q := self.Pos + i
+
+ /* check for empty source */
+ if self.Src == "" {
+ return fmt.Sprintf("no sources available: %#v", self)
+ }
+
+ /* prevent slicing before the beginning */
+ if p < 0 {
+ p, q, i = 0, q - p, i + p
+ }
+
+ /* prevent slicing beyond the end */
+ if n := len(self.Src); q > n {
+ n = q - n
+ q = len(self.Src)
+
+ /* move the left bound if possible */
+ if p > n {
+ i += n
+ p -= n
+ }
+ }
+
+ /* left and right length */
+ x := clamp_zero(i)
+ y := clamp_zero(q - p - i - 1)
+
+ /* compose the error description */
+ return fmt.Sprintf(
+ "at index %d: %s\n\n\t%s\n\t%s^%s\n",
+ self.Pos,
+ self.Message(),
+ self.Src[p:q],
+ strings.Repeat(".", x),
+ strings.Repeat(".", y),
+ )
+}
+
+func (self SyntaxError) Message() string {
+ if self.Msg == "" {
+ return self.Code.Message()
+ }
+ return self.Msg
+}
+
+func clamp_zero(v int) int {
+ if v < 0 {
+ return 0
+ } else {
+ return v
+ }
+}
+
+/** JIT Error Helpers **/
+
+var stackOverflow = &json.UnsupportedValueError {
+ Str : "Value nesting too deep",
+ Value : reflect.ValueOf("..."),
+}
+
+//go:nosplit
+func error_wrap(src string, pos int, code types.ParsingError) error {
+ return SyntaxError {
+ Pos : pos,
+ Src : src,
+ Code : code,
+ }
+}
+
+//go:nosplit
+func error_type(vt *rt.GoType) error {
+ return &json.UnmarshalTypeError{Type: vt.Pack()}
+}
+
+type MismatchTypeError struct {
+ Pos int
+ Src string
+ Type reflect.Type
+}
+
+func swithchJSONType (src string, pos int) string {
+ var val string
+ switch src[pos] {
+ case 'f': fallthrough
+ case 't': val = "bool"
+ case '"': val = "string"
+ case '{': val = "object"
+ case '[': val = "array"
+ case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': val = "number"
+ }
+ return val
+}
+
+func (self MismatchTypeError) Error() string {
+ se := SyntaxError {
+ Pos : self.Pos,
+ Src : self.Src,
+ Code : types.ERR_MISMATCH,
+ }
+ return fmt.Sprintf("Mismatch type %s with value %s %q", self.Type.String(), swithchJSONType(self.Src, self.Pos), se.description())
+}
+
+func (self MismatchTypeError) Description() string {
+ se := SyntaxError {
+ Pos : self.Pos,
+ Src : self.Src,
+ Code : types.ERR_MISMATCH,
+ }
+ return fmt.Sprintf("Mismatch type %s with value %s %s", self.Type.String(), swithchJSONType(self.Src, self.Pos), se.description())
+}
+
+//go:nosplit
+func error_mismatch(src string, pos int, vt *rt.GoType) error {
+ return &MismatchTypeError {
+ Pos : pos,
+ Src : src,
+ Type : vt.Pack(),
+ }
+}
+
+//go:nosplit
+func error_field(name string) error {
+ return errors.New("json: unknown field " + strconv.Quote(name))
+}
+
+//go:nosplit
+func error_value(value string, vtype reflect.Type) error {
+ return &json.UnmarshalTypeError {
+ Type : vtype,
+ Value : value,
+ }
+}
diff --git a/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go116.go b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go116.go
new file mode 100644
index 000000000..b597043f9
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go116.go
@@ -0,0 +1,776 @@
+// +build go1.15,!go1.17
+
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `encoding/json`
+ `fmt`
+ `reflect`
+ `strconv`
+
+ `github.com/bytedance/sonic/internal/jit`
+ `github.com/bytedance/sonic/internal/native`
+ `github.com/bytedance/sonic/internal/native/types`
+ `github.com/twitchyliquid64/golang-asm/obj`
+ `github.com/twitchyliquid64/golang-asm/obj/x86`
+)
+
+/** Crucial Registers:
+ *
+ * ST(BX) : ro, decoder stack
+ * DF(R10) : ro, decoder flags
+ * EP(R11) : wo, error pointer
+ * IP(R12) : ro, input pointer
+ * IL(R13) : ro, input length
+ * IC(R14) : rw, input cursor
+ * VP(R15) : ro, value pointer (to an interface{})
+ */
+
+const (
+ _VD_args = 8 // 8 bytes for passing arguments to this functions
+ _VD_fargs = 64 // 64 bytes for passing arguments to other Go functions
+ _VD_saves = 40 // 40 bytes for saving the registers before CALL instructions
+ _VD_locals = 88 // 88 bytes for local variables
+)
+
+const (
+ _VD_offs = _VD_fargs + _VD_saves + _VD_locals
+ _VD_size = _VD_offs + 8 // 8 bytes for the parent frame pointer
+)
+
+var (
+ _VAR_ss = _VAR_ss_Vt
+ _VAR_df = jit.Ptr(_SP, _VD_fargs + _VD_saves)
+)
+
+var (
+ _VAR_ss_Vt = jit.Ptr(_SP, _VD_fargs + _VD_saves + 8)
+ _VAR_ss_Dv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 16)
+ _VAR_ss_Iv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 24)
+ _VAR_ss_Ep = jit.Ptr(_SP, _VD_fargs + _VD_saves + 32)
+ _VAR_ss_Db = jit.Ptr(_SP, _VD_fargs + _VD_saves + 40)
+ _VAR_ss_Dc = jit.Ptr(_SP, _VD_fargs + _VD_saves + 48)
+)
+
+var (
+ _VAR_cs_LR = jit.Ptr(_SP, _VD_fargs + _VD_saves + 56)
+ _VAR_cs_p = jit.Ptr(_SP, _VD_fargs + _VD_saves + 64)
+ _VAR_cs_n = jit.Ptr(_SP, _VD_fargs + _VD_saves + 72)
+ _VAR_cs_d = jit.Ptr(_SP, _VD_fargs + _VD_saves + 80)
+)
+
+type _ValueDecoder struct {
+ jit.BaseAssembler
+}
+
+func (self *_ValueDecoder) build() uintptr {
+ self.Init(self.compile)
+ return *(*uintptr)(self.Load("decode_value", _VD_size, _VD_args, argPtrs_generic, localPtrs_generic))
+}
+
+/** Function Calling Helpers **/
+
+func (self *_ValueDecoder) save(r ...obj.Addr) {
+ for i, v := range r {
+ if i > _VD_saves / 8 - 1 {
+ panic("too many registers to save")
+ } else {
+ self.Emit("MOVQ", v, jit.Ptr(_SP, _VD_fargs + int64(i) * 8))
+ }
+ }
+}
+
+func (self *_ValueDecoder) load(r ...obj.Addr) {
+ for i, v := range r {
+ if i > _VD_saves / 8 - 1 {
+ panic("too many registers to load")
+ } else {
+ self.Emit("MOVQ", jit.Ptr(_SP, _VD_fargs + int64(i) * 8), v)
+ }
+ }
+}
+
+func (self *_ValueDecoder) call(fn obj.Addr) {
+ self.Emit("MOVQ", fn, _AX) // MOVQ ${fn}, AX
+ self.Rjmp("CALL", _AX) // CALL AX
+}
+
+func (self *_ValueDecoder) call_go(fn obj.Addr) {
+ self.save(_REG_go...) // SAVE $REG_go
+ self.call(fn) // CALL ${fn}
+ self.load(_REG_go...) // LOAD $REG_go
+}
+
+/** Decoder Assembler **/
+
+const (
+ _S_val = iota + 1
+ _S_arr
+ _S_arr_0
+ _S_obj
+ _S_obj_0
+ _S_obj_delim
+ _S_obj_sep
+)
+
+const (
+ _S_omask_key = (1 << _S_obj_0) | (1 << _S_obj_sep)
+ _S_omask_end = (1 << _S_obj_0) | (1 << _S_obj)
+ _S_vmask = (1 << _S_val) | (1 << _S_arr_0)
+)
+
+const (
+ _A_init_len = 1
+ _A_init_cap = 16
+)
+
+const (
+ _ST_Sp = 0
+ _ST_Vt = _PtrBytes
+ _ST_Vp = _PtrBytes * (types.MAX_RECURSE + 1)
+)
+
+var (
+ _V_true = jit.Imm(int64(pbool(true)))
+ _V_false = jit.Imm(int64(pbool(false)))
+ _F_value = jit.Imm(int64(native.S_value))
+)
+
+var (
+ _V_max = jit.Imm(int64(types.V_MAX))
+ _E_eof = jit.Imm(int64(types.ERR_EOF))
+ _E_invalid = jit.Imm(int64(types.ERR_INVALID_CHAR))
+ _E_recurse = jit.Imm(int64(types.ERR_RECURSE_EXCEED_MAX))
+)
+
+var (
+ _F_convTslice = jit.Func(convTslice)
+ _F_convTstring = jit.Func(convTstring)
+ _F_invalid_vtype = jit.Func(invalid_vtype)
+)
+
+var (
+ _T_map = jit.Type(reflect.TypeOf((map[string]interface{})(nil)))
+ _T_bool = jit.Type(reflect.TypeOf(false))
+ _T_int64 = jit.Type(reflect.TypeOf(int64(0)))
+ _T_eface = jit.Type(reflect.TypeOf((*interface{})(nil)).Elem())
+ _T_slice = jit.Type(reflect.TypeOf(([]interface{})(nil)))
+ _T_string = jit.Type(reflect.TypeOf(""))
+ _T_number = jit.Type(reflect.TypeOf(json.Number("")))
+ _T_float64 = jit.Type(reflect.TypeOf(float64(0)))
+)
+
+var _R_tab = map[int]string {
+ '[': "_decode_V_ARRAY",
+ '{': "_decode_V_OBJECT",
+ ':': "_decode_V_KEY_SEP",
+ ',': "_decode_V_ELEM_SEP",
+ ']': "_decode_V_ARRAY_END",
+ '}': "_decode_V_OBJECT_END",
+}
+
+func (self *_ValueDecoder) compile() {
+ self.Emit("SUBQ", jit.Imm(_VD_size), _SP) // SUBQ $_VD_size, SP
+ self.Emit("MOVQ", _BP, jit.Ptr(_SP, _VD_offs)) // MOVQ BP, _VD_offs(SP)
+ self.Emit("LEAQ", jit.Ptr(_SP, _VD_offs), _BP) // LEAQ _VD_offs(SP), BP
+
+ /* initialize the state machine */
+ self.Emit("XORL", _CX, _CX) // XORL CX, CX
+ self.Emit("MOVQ", _DF, _VAR_df) // MOVQ DF, df
+ /* initialize digital buffer first */
+ self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_ss_Dc) // MOVQ $_MaxDigitNums, ss.Dcap
+ self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX
+ self.Emit("MOVQ", _AX, _VAR_ss_Db) // MOVQ AX, ss.Dbuf
+ /* add ST offset */
+ self.Emit("ADDQ", jit.Imm(_FsmOffset), _ST) // ADDQ _FsmOffset, _ST
+ self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp
+ self.WriteRecNotAX(0, _VP, jit.Ptr(_ST, _ST_Vp), false) // MOVQ VP, ST.Vp[0]
+ self.Emit("MOVQ", jit.Imm(_S_val), jit.Ptr(_ST, _ST_Vt)) // MOVQ _S_val, ST.Vt[0]
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /* set the value from previous round */
+ self.Link("_set_value") // _set_value:
+ self.Emit("MOVL" , jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX
+ self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ" , jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX
+ self.Sjmp("JNC" , "_vtype_error") // JNC _vtype_error
+ self.Emit("XORL" , _SI, _SI) // XORL SI, SI
+ self.Emit("SUBQ" , jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp
+ self.Emit("XCHGQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // XCHGQ ST.Vp[CX], SI
+ self.Emit("MOVQ" , _R8, jit.Ptr(_SI, 0)) // MOVQ R8, (SI)
+ self.WriteRecNotAX(1, _R9, jit.Ptr(_SI, 8), false) // MOVQ R9, 8(SI)
+
+ /* check for value stack */
+ self.Link("_next") // _next:
+ self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _AX) // MOVQ ST.Sp, AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , "_return") // JS _return
+
+ /* fast path: test up to 4 characters manually */
+ self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL
+ self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF
+ self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX
+ self.Emit("MOVQ" , jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX
+ self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' '
+ self.Sjmp("JA" , "_decode_fast") // JA _decode_fast
+ self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX
+ self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+
+ /* at least 1 to 3 spaces */
+ for i := 0; i < 3; i++ {
+ self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL
+ self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF
+ self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX
+ self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' '
+ self.Sjmp("JA" , "_decode_fast") // JA _decode_fast
+ self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX
+ self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ }
+
+ /* at least 4 spaces */
+ self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL
+ self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF
+ self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX
+
+ /* fast path: use lookup table to select decoder */
+ self.Link("_decode_fast") // _decode_fast:
+ self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI
+ self.Sref("_decode_tab", 4) // .... &_decode_tab
+ self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX
+ self.Emit("TESTQ" , _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JZ" , "_decode_native") // JZ _decode_native
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX
+ self.Rjmp("JMP" , _AX) // JMP AX
+
+ /* decode with native decoder */
+ self.Link("_decode_native") // _decode_native:
+ self.Emit("MOVQ", _IP, _DI) // MOVQ IP, DI
+ self.Emit("MOVQ", _IL, _SI) // MOVQ IL, SI
+ self.Emit("MOVQ", _IC, _DX) // MOVQ IC, DX
+ self.Emit("LEAQ", _VAR_ss, _CX) // LEAQ ss, CX
+ self.Emit("MOVQ", _VAR_df, _R8) // MOVQ $df, R8
+ self.Emit("BTSQ", jit.Imm(_F_allow_control), _R8) // ANDQ $1<<_F_allow_control, R8
+ self.call(_F_value) // CALL value
+ self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC
+
+ /* check for errors */
+ self.Emit("MOVQ" , _VAR_ss_Vt, _AX) // MOVQ ss.Vt, AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , "_parsing_error")
+ self.Sjmp("JZ" , "_invalid_vtype") // JZ _invalid_vtype
+ self.Emit("CMPQ" , _AX, _V_max) // CMPQ AX, _V_max
+ self.Sjmp("JA" , "_invalid_vtype") // JA _invalid_vtype
+
+ /* jump table selector */
+ self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI
+ self.Sref("_switch_table", 4) // .... &_switch_table
+ self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, -4), _AX) // MOVLQSX -4(DI)(AX*4), AX
+ self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX
+ self.Rjmp("JMP" , _AX) // JMP AX
+
+ /** V_EOF **/
+ self.Link("_decode_V_EOF") // _decode_V_EOF:
+ self.Emit("MOVL", _E_eof, _EP) // MOVL _E_eof, EP
+ self.Sjmp("JMP" , "_error") // JMP _error
+
+ /** V_NULL **/
+ self.Link("_decode_V_NULL") // _decode_V_NULL:
+ self.Emit("XORL", _R8, _R8) // XORL R8, R8
+ self.Emit("XORL", _R9, _R9) // XORL R9, R9
+ self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /** V_TRUE **/
+ self.Link("_decode_V_TRUE") // _decode_V_TRUE:
+ self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8
+ // TODO: maybe modified by users?
+ self.Emit("MOVQ", _V_true, _R9) // MOVQ _V_true, R9
+ self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /** V_FALSE **/
+ self.Link("_decode_V_FALSE") // _decode_V_FALSE:
+ self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8
+ self.Emit("MOVQ", _V_false, _R9) // MOVQ _V_false, R9
+ self.Emit("LEAQ", jit.Ptr(_IC, -5), _DI) // LEAQ -5(IC), DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /** V_ARRAY **/
+ self.Link("_decode_V_ARRAY") // _decode_V_ARRAY
+ self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX
+ self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char
+
+ /* create a new array */
+ self.Emit("MOVQ", _T_eface, _AX) // MOVQ _T_eface, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVQ", jit.Imm(_A_init_len), jit.Ptr(_SP, 8)) // MOVQ _A_init_len, 8(SP)
+ self.Emit("MOVQ", jit.Imm(_A_init_cap), jit.Ptr(_SP, 16)) // MOVQ _A_init_cap, 16(SP)
+ self.call_go(_F_makeslice) // CALL_GO runtime.makeslice
+ self.Emit("MOVQ", jit.Ptr(_SP, 24), _DX) // MOVQ 24(SP), DX
+
+ /* pack into an interface */
+ self.Emit("MOVQ", _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP)
+ self.Emit("MOVQ", jit.Imm(_A_init_len), jit.Ptr(_SP, 8)) // MOVQ _A_init_len, 8(SP)
+ self.Emit("MOVQ", jit.Imm(_A_init_cap), jit.Ptr(_SP, 16)) // MOVQ _A_init_cap, 16(SP)
+ self.call_go(_F_convTslice) // CALL_GO runtime.convTslice
+ self.Emit("MOVQ", jit.Ptr(_SP, 24), _R8) // MOVQ 24(SP), R8
+
+ /* replace current state with an array */
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI
+ self.Emit("MOVQ", jit.Imm(_S_arr), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr, ST.Vt[CX]
+ self.Emit("MOVQ", _T_slice, _AX) // MOVQ _T_slice, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_SI, 0)) // MOVQ AX, (SI)
+ self.WriteRecNotAX(2, _R8, jit.Ptr(_SI, 8), false) // MOVQ R8, 8(SI)
+
+ /* add a new slot for the first element */
+ self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX
+ self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE}
+ self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow
+ self.Emit("MOVQ", jit.Ptr(_R8, 0), _AX) // MOVQ (R8), AX
+ self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp
+ self.WritePtrAX(3, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX]
+ self.Emit("MOVQ", jit.Imm(_S_arr_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr_0, ST.Vt[CX]
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /** V_OBJECT **/
+ self.Link("_decode_V_OBJECT") // _decode_V_OBJECT:
+ self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX
+ self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char
+ self.call_go(_F_makemap_small) // CALL_GO runtime.makemap_small
+ self.Emit("MOVQ", jit.Ptr(_SP, 0), _AX) // MOVQ (SP), AX
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Imm(_S_obj_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj, ST.Vt[CX]
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI
+ self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX
+ self.Emit("MOVQ", _DX, jit.Ptr(_SI, 0)) // MOVQ DX, (SI)
+ self.WritePtrAX(4, jit.Ptr(_SI, 8), false) // MOVQ AX, 8(SI)
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /** V_STRING **/
+ self.Link("_decode_V_STRING") // _decode_V_STRING:
+ self.Emit("MOVQ", _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX
+ self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX
+ self.Emit("SUBQ", _CX, _AX) // SUBQ CX, AX
+
+ /* check for escapes */
+ self.Emit("CMPQ", _VAR_ss_Ep, jit.Imm(-1)) // CMPQ ss.Ep, $-1
+ self.Sjmp("JNE" , "_unquote") // JNE _unquote
+ self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX
+ self.Emit("LEAQ", jit.Sib(_IP, _CX, 1, 0), _R8) // LEAQ (IP)(CX), R8
+ self.Byte(0x48, 0x8d, 0x3d) // LEAQ (PC), DI
+ self.Sref("_copy_string_end", 4)
+ self.Emit("BTQ", jit.Imm(_F_copy_string), _VAR_df)
+ self.Sjmp("JC", "copy_string")
+ self.Link("_copy_string_end")
+ self.Emit("XORL", _DX, _DX) // XORL DX, DX
+ /* strings with no escape sequences */
+ self.Link("_noescape") // _noescape:
+ self.Emit("MOVL", jit.Imm(_S_omask_key), _DI) // MOVL _S_omask, DI
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _SI) // MOVQ ST.Vt[CX], SI
+ self.Emit("BTQ" , _SI, _DI) // BTQ SI, DI
+ self.Sjmp("JC" , "_object_key") // JC _object_key
+
+ /* check for pre-packed strings, avoid 1 allocation */
+ self.Emit("TESTQ", _DX, _DX) // TESTQ DX, DX
+ self.Sjmp("JNZ" , "_packed_str") // JNZ _packed_str
+ self.Emit("MOVQ" , _R8, jit.Ptr(_SP, 0)) // MOVQ R8, (SP)
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 8)) // MOVQ AX, 8(SP)
+ self.call_go(_F_convTstring) // CALL_GO runtime.convTstring
+ self.Emit("MOVQ" , jit.Ptr(_SP, 16), _R9) // MOVQ 16(SP), R9
+
+ /* packed string already in R9 */
+ self.Link("_packed_str") // _packed_str:
+ self.Emit("MOVQ", _T_string, _R8) // MOVQ _T_string, R8
+ self.Emit("MOVQ", _VAR_ss_Iv, _DI) // MOVQ ss.Iv, DI
+ self.Emit("SUBQ", jit.Imm(1), _DI) // SUBQ $1, DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /* the string is an object key, get the map */
+ self.Link("_object_key")
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI
+ self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI
+
+ /* add a new delimiter */
+ self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX
+ self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE}
+ self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow
+ self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp
+ self.Emit("MOVQ", jit.Imm(_S_obj_delim), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj_delim, ST.Vt[CX]
+
+ /* add a new slot int the map */
+ self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX
+ self.Emit("MOVQ", _DX, jit.Ptr(_SP, 0)) // MOVQ DX, (SP)
+ self.Emit("MOVQ", _SI, jit.Ptr(_SP, 8)) // MOVQ SI, 8(SP)
+ self.Emit("MOVQ", _R8, jit.Ptr(_SP, 16)) // MOVQ R9, 16(SP)
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 24)) // MOVQ AX, 24(SP)
+ self.call_go(_F_mapassign_faststr) // CALL_GO runtime.mapassign_faststr
+ self.Emit("MOVQ", jit.Ptr(_SP, 32), _AX) // MOVQ 32(SP), AX
+
+ /* add to the pointer stack */
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.WritePtrAX(6, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX]
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /* allocate memory to store the string header and unquoted result */
+ self.Link("_unquote") // _unquote:
+ self.Emit("ADDQ", jit.Imm(15), _AX) // ADDQ $15, AX
+ self.Emit("MOVQ", _T_byte, _CX) // MOVQ _T_byte, CX
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP)
+ self.Emit("MOVB", jit.Imm(0), jit.Ptr(_SP, 16)) // MOVB $0, 16(SP)
+ self.call_go(_F_mallocgc) // CALL_GO runtime.mallocgc
+ self.Emit("MOVQ", jit.Ptr(_SP, 24), _R9) // MOVQ 24(SP), R9
+
+ /* prepare the unquoting parameters */
+ self.Emit("MOVQ" , _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX
+ self.Emit("LEAQ" , jit.Sib(_IP, _CX, 1, 0), _DI) // LEAQ (IP)(CX), DI
+ self.Emit("NEGQ" , _CX) // NEGQ CX
+ self.Emit("LEAQ" , jit.Sib(_IC, _CX, 1, -1), _SI) // LEAQ -1(IC)(CX), SI
+ self.Emit("LEAQ" , jit.Ptr(_R9, 16), _DX) // LEAQ 16(R8), DX
+ self.Emit("LEAQ" , _VAR_ss_Ep, _CX) // LEAQ ss.Ep, CX
+ self.Emit("XORL" , _R8, _R8) // XORL R8, R8
+ self.Emit("BTQ" , jit.Imm(_F_disable_urc), _VAR_df) // BTQ ${_F_disable_urc}, fv
+ self.Emit("SETCC", _R8) // SETCC R8
+ self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8
+
+ /* unquote the string, with R9 been preserved */
+ self.save(_R9) // SAVE R9
+ self.call(_F_unquote) // CALL unquote
+ self.load(_R9) // LOAD R9
+
+ /* check for errors */
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , "_unquote_error") // JS _unquote_error
+ self.Emit("MOVL" , jit.Imm(1), _DX) // MOVL $1, DX
+ self.Emit("LEAQ" , jit.Ptr(_R9, 16), _R8) // ADDQ $16, R8
+ self.Emit("MOVQ" , _R8, jit.Ptr(_R9, 0)) // MOVQ R8, (R9)
+ self.Emit("MOVQ" , _AX, jit.Ptr(_R9, 8)) // MOVQ AX, 8(R9)
+ self.Sjmp("JMP" , "_noescape") // JMP _noescape
+
+ /** V_DOUBLE **/
+ self.Link("_decode_V_DOUBLE") // _decode_V_DOUBLE:
+ self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df
+ self.Sjmp("JC" , "_use_number") // JC _use_number
+ self.Emit("MOVSD", _VAR_ss_Dv, _X0) // MOVSD ss.Dv, X0
+ self.Sjmp("JMP" , "_use_float64") // JMP _use_float64
+
+ /** V_INTEGER **/
+ self.Link("_decode_V_INTEGER") // _decode_V_INTEGER:
+ self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df
+ self.Sjmp("JC" , "_use_number") // JC _use_number
+ self.Emit("BTQ" , jit.Imm(_F_use_int64), _VAR_df) // BTQ _F_use_int64, df
+ self.Sjmp("JC" , "_use_int64") // JC _use_int64
+ self.Emit("MOVQ" , _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX
+ self.Emit("CVTSQ2SD", _AX, _X0) // CVTSQ2SD AX, X0
+
+ /* represent numbers as `float64` */
+ self.Link("_use_float64") // _use_float64:
+ self.Emit("MOVSD", _X0, jit.Ptr(_SP, 0)) // MOVSD X0, (SP)
+ self.call_go(_F_convT64) // CALL_GO runtime.convT64
+ self.Emit("MOVQ" , _T_float64, _R8) // MOVQ _T_float64, R8
+ self.Emit("MOVQ" , jit.Ptr(_SP, 8), _R9) // MOVQ 8(SP), R9
+ self.Emit("MOVQ" , _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /* represent numbers as `json.Number` */
+ self.Link("_use_number") // _use_number
+ self.Emit("MOVQ", _VAR_ss_Ep, _AX) // MOVQ ss.Ep, AX
+ self.Emit("LEAQ", jit.Sib(_IP, _AX, 1, 0), _SI) // LEAQ (IP)(AX), SI
+ self.Emit("MOVQ", _IC, _CX) // MOVQ IC, CX
+ self.Emit("SUBQ", _AX, _CX) // SUBQ AX, CX
+ self.Emit("MOVQ", _SI, jit.Ptr(_SP, 0)) // MOVQ SI, (SP)
+ self.Emit("MOVQ", _CX, jit.Ptr(_SP, 8)) // MOVQ CX, 8(SP)
+ self.call_go(_F_convTstring) // CALL_GO runtime.convTstring
+ self.Emit("MOVQ", _T_number, _R8) // MOVQ _T_number, R8
+ self.Emit("MOVQ", jit.Ptr(_SP, 16), _R9) // MOVQ 16(SP), R9
+ self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /* represent numbers as `int64` */
+ self.Link("_use_int64") // _use_int64:
+ self.Emit("MOVQ", _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.call_go(_F_convT64) // CALL_GO runtime.convT64
+ self.Emit("MOVQ", _T_int64, _R8) // MOVQ _T_int64, R8
+ self.Emit("MOVQ", jit.Ptr(_SP, 8), _R9) // MOVQ 8(SP), R9
+ self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /** V_KEY_SEP **/
+ self.Link("_decode_V_KEY_SEP") // _decode_V_KEY_SEP:
+ // self.Byte(0xcc)
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("CMPQ", _AX, jit.Imm(_S_obj_delim)) // CMPQ AX, _S_obj_delim
+ self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char
+ self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX]
+ self.Emit("MOVQ", jit.Imm(_S_obj), jit.Sib(_ST, _CX, 8, _ST_Vt - 8)) // MOVQ _S_obj, ST.Vt[CX - 1]
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /** V_ELEM_SEP **/
+ self.Link("_decode_V_ELEM_SEP") // _decode_V_ELEM_SEP:
+ self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ" , jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("CMPQ" , _AX, jit.Imm(_S_arr)) // CMPQ _AX, _S_arr
+ self.Sjmp("JE" , "_array_sep") // JZ _next
+ self.Emit("CMPQ" , _AX, jit.Imm(_S_obj)) // CMPQ _AX, _S_arr
+ self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char
+ self.Emit("MOVQ" , jit.Imm(_S_obj_sep), jit.Sib(_ST, _CX, 8, _ST_Vt))
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /* arrays */
+ self.Link("_array_sep")
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI
+ self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI
+ self.Emit("MOVQ", jit.Ptr(_SI, 8), _DX) // MOVQ 8(SI), DX
+ self.Emit("CMPQ", _DX, jit.Ptr(_SI, 16)) // CMPQ DX, 16(SI)
+ self.Sjmp("JAE" , "_array_more") // JAE _array_more
+
+ /* add a slot for the new element */
+ self.Link("_array_append") // _array_append:
+ self.Emit("ADDQ", jit.Imm(1), jit.Ptr(_SI, 8)) // ADDQ $1, 8(SI)
+ self.Emit("MOVQ", jit.Ptr(_SI, 0), _SI) // MOVQ (SI), SI
+ self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX
+ self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE}
+ self.Sjmp("JAE" , "_stack_overflow")
+ self.Emit("SHLQ", jit.Imm(1), _DX) // SHLQ $1, DX
+ self.Emit("LEAQ", jit.Sib(_SI, _DX, 8, 0), _SI) // LEAQ (SI)(DX*8), SI
+ self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp
+ self.WriteRecNotAX(7 , _SI, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ SI, ST.Vp[CX]
+ self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX}
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /** V_ARRAY_END **/
+ self.Link("_decode_V_ARRAY_END") // _decode_V_ARRAY_END:
+ self.Emit("XORL", _DX, _DX) // XORL DX, DX
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("CMPQ", _AX, jit.Imm(_S_arr_0)) // CMPQ AX, _S_arr_0
+ self.Sjmp("JE" , "_first_item") // JE _first_item
+ self.Emit("CMPQ", _AX, jit.Imm(_S_arr)) // CMPQ AX, _S_arr
+ self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char
+ self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp
+ self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX]
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /* first element of an array */
+ self.Link("_first_item") // _first_item:
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("SUBQ", jit.Imm(2), jit.Ptr(_ST, _ST_Sp)) // SUBQ $2, ST.Sp
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp - 8), _SI) // MOVQ ST.Vp[CX - 1], SI
+ self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI
+ self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp - 8)) // MOVQ DX, ST.Vp[CX - 1]
+ self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX]
+ self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI)
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /** V_OBJECT_END **/
+ self.Link("_decode_V_OBJECT_END") // _decode_V_OBJECT_END:
+ self.Emit("MOVL", jit.Imm(_S_omask_end), _DX) // MOVL _S_omask, DI
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("BTQ" , _AX, _DX)
+ self.Sjmp("JNC" , "_invalid_char") // JNE _invalid_char
+ self.Emit("XORL", _AX, _AX) // XORL AX, AX
+ self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp
+ self.Emit("MOVQ", _AX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ AX, ST.Vp[CX]
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /* return from decoder */
+ self.Link("_return") // _return:
+ self.Emit("XORL", _EP, _EP) // XORL EP, EP
+ self.Emit("MOVQ", _EP, jit.Ptr(_ST, _ST_Vp)) // MOVQ EP, ST.Vp[0]
+ self.Link("_epilogue") // _epilogue:
+ self.Emit("SUBQ", jit.Imm(_FsmOffset), _ST) // SUBQ _FsmOffset, _ST
+ self.Emit("MOVQ", jit.Ptr(_SP, _VD_offs), _BP) // MOVQ _VD_offs(SP), BP
+ self.Emit("ADDQ", jit.Imm(_VD_size), _SP) // ADDQ $_VD_size, SP
+ self.Emit("RET") // RET
+
+ /* array expand */
+ self.Link("_array_more") // _array_more:
+ self.Emit("MOVQ" , _T_eface, _AX) // MOVQ _T_eface, AX
+ self.Emit("MOVOU", jit.Ptr(_SI, 0), _X0) // MOVOU (SI), X0
+ self.Emit("MOVQ" , jit.Ptr(_SI, 16), _DX) // MOVQ 16(SI), DX
+ self.Emit("MOVQ" , _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.Emit("MOVOU", _X0, jit.Ptr(_SP, 8)) // MOVOU X0, 8(SP)
+ self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 24)) // MOVQ DX, 24(SP)
+ self.Emit("SHLQ" , jit.Imm(1), _DX) // SHLQ $1, DX
+ self.Emit("MOVQ" , _DX, jit.Ptr(_SP, 32)) // MOVQ DX, 32(SP)
+ self.call_go(_F_growslice) // CALL_GO runtime.growslice
+ self.Emit("MOVQ" , jit.Ptr(_SP, 40), _DI) // MOVOU 40(SP), DI
+ self.Emit("MOVQ" , jit.Ptr(_SP, 48), _DX) // MOVOU 48(SP), DX
+ self.Emit("MOVQ" , jit.Ptr(_SP, 56), _AX) // MOVQ 56(SP), AX
+
+ /* update the slice */
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI
+ self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI
+ self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI)
+ self.Emit("MOVQ", _AX, jit.Ptr(_SI, 16)) // MOVQ AX, 16(AX)
+ self.WriteRecNotAX(8 , _DI, jit.Ptr(_SI, 0), false) // MOVQ R10, (SI)
+ self.Sjmp("JMP" , "_array_append") // JMP _array_append
+
+ /* copy string */
+ self.Link("copy_string") // pointer: R8, length: AX, return addr: DI
+ // self.Byte(0xcc)
+ self.Emit("MOVQ", _R8, _VAR_cs_p)
+ self.Emit("MOVQ", _AX, _VAR_cs_n)
+ self.Emit("MOVQ", _DI, _VAR_cs_LR)
+ self.Emit("MOVQ", _T_byte, jit.Ptr(_SP, 0))
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 8))
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16))
+ self.call_go(_F_makeslice)
+ self.Emit("MOVQ", jit.Ptr(_SP, 24), _R8)
+ self.Emit("MOVQ", _R8, _VAR_cs_d)
+ self.Emit("MOVQ", _R8, jit.Ptr(_SP, 0))
+ self.Emit("MOVQ", _VAR_cs_p, _R8)
+ self.Emit("MOVQ", _R8, jit.Ptr(_SP, 8))
+ self.Emit("MOVQ", _VAR_cs_n, _AX)
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 16))
+ self.call_go(_F_memmove)
+ self.Emit("MOVQ", _VAR_cs_d, _R8)
+ self.Emit("MOVQ", _VAR_cs_n, _AX)
+ self.Emit("MOVQ", _VAR_cs_LR, _DI)
+ // self.Byte(0xcc)
+ self.Rjmp("JMP", _DI)
+
+ /* error handlers */
+ self.Link("_stack_overflow")
+ self.Emit("MOVL" , _E_recurse, _EP) // MOVQ _E_recurse, EP
+ self.Sjmp("JMP" , "_error") // JMP _error
+ self.Link("_vtype_error") // _vtype_error:
+ self.Emit("MOVQ" , _DI, _IC) // MOVQ DI, IC
+ self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP
+ self.Sjmp("JMP" , "_error") // JMP _error
+ self.Link("_invalid_char") // _invalid_char:
+ self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC
+ self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP
+ self.Sjmp("JMP" , "_error") // JMP _error
+ self.Link("_unquote_error") // _unquote_error:
+ self.Emit("MOVQ" , _VAR_ss_Iv, _IC) // MOVQ ss.Iv, IC
+ self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC
+ self.Link("_parsing_error") // _parsing_error:
+ self.Emit("NEGQ" , _AX) // NEGQ AX
+ self.Emit("MOVQ" , _AX, _EP) // MOVQ AX, EP
+ self.Link("_error") // _error:
+ self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP)
+ self.Sjmp("JMP" , "_epilogue") // JMP _epilogue
+
+ /* invalid value type, never returns */
+ self.Link("_invalid_vtype")
+ self.Emit("MOVQ", _AX, jit.Ptr(_SP, 0)) // MOVQ AX, (SP)
+ self.call(_F_invalid_vtype) // CALL invalid_type
+ self.Emit("UD2") // UD2
+
+ /* switch jump table */
+ self.Link("_switch_table") // _switch_table:
+ self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0
+ self.Sref("_decode_V_NULL", -4) // SREF &_decode_V_NULL, $-4
+ self.Sref("_decode_V_TRUE", -8) // SREF &_decode_V_TRUE, $-8
+ self.Sref("_decode_V_FALSE", -12) // SREF &_decode_V_FALSE, $-12
+ self.Sref("_decode_V_ARRAY", -16) // SREF &_decode_V_ARRAY, $-16
+ self.Sref("_decode_V_OBJECT", -20) // SREF &_decode_V_OBJECT, $-20
+ self.Sref("_decode_V_STRING", -24) // SREF &_decode_V_STRING, $-24
+ self.Sref("_decode_V_DOUBLE", -28) // SREF &_decode_V_DOUBLE, $-28
+ self.Sref("_decode_V_INTEGER", -32) // SREF &_decode_V_INTEGER, $-32
+ self.Sref("_decode_V_KEY_SEP", -36) // SREF &_decode_V_KEY_SEP, $-36
+ self.Sref("_decode_V_ELEM_SEP", -40) // SREF &_decode_V_ELEM_SEP, $-40
+ self.Sref("_decode_V_ARRAY_END", -44) // SREF &_decode_V_ARRAY_END, $-44
+ self.Sref("_decode_V_OBJECT_END", -48) // SREF &_decode_V_OBJECT_END, $-48
+
+ /* fast character lookup table */
+ self.Link("_decode_tab") // _decode_tab:
+ self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0
+
+ /* generate rest of the tabs */
+ for i := 1; i < 256; i++ {
+ if to, ok := _R_tab[i]; ok {
+ self.Sref(to, -int64(i) * 4)
+ } else {
+ self.Byte(0x00, 0x00, 0x00, 0x00)
+ }
+ }
+}
+
+func (self *_ValueDecoder) WritePtrAX(i int, rec obj.Addr, saveDI bool) {
+ self.Emit("MOVQ", _V_writeBarrier, _R10)
+ self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0))
+ self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ if saveDI {
+ self.save(_DI)
+ }
+ self.Emit("LEAQ", rec, _DI)
+ self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX
+ self.Rjmp("CALL", _R10)
+ if saveDI {
+ self.load(_DI)
+ }
+ self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Emit("MOVQ", _AX, rec)
+ self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+}
+
+func (self *_ValueDecoder) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool) {
+ if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX {
+ panic("rec contains AX!")
+ }
+ self.Emit("MOVQ", _V_writeBarrier, _R10)
+ self.Emit("CMPL", jit.Ptr(_R10, 0), jit.Imm(0))
+ self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Emit("MOVQ", ptr, _AX)
+ if saveDI {
+ self.save(_DI)
+ }
+ self.Emit("LEAQ", rec, _DI)
+ self.Emit("MOVQ", _F_gcWriteBarrierAX, _R10) // MOVQ ${fn}, AX
+ self.Rjmp("CALL", _R10)
+ if saveDI {
+ self.load(_DI)
+ }
+ self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Emit("MOVQ", ptr, rec)
+ self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+}
+
+/** Generic Decoder **/
+
+var (
+ _subr_decode_value = new(_ValueDecoder).build()
+)
+
+//go:nosplit
+func invalid_vtype(vt types.ValueType) {
+ throw(fmt.Sprintf("invalid value type: %d", vt))
+}
diff --git a/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117.go b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117.go
new file mode 100644
index 000000000..df1cd9f5b
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117.go
@@ -0,0 +1,772 @@
+//go:build go1.17 && !go1.21
+// +build go1.17,!go1.21
+
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `encoding/json`
+ `fmt`
+ `reflect`
+ `strconv`
+
+ `github.com/bytedance/sonic/internal/jit`
+ `github.com/bytedance/sonic/internal/native`
+ `github.com/bytedance/sonic/internal/native/types`
+ `github.com/twitchyliquid64/golang-asm/obj`
+ `github.com/twitchyliquid64/golang-asm/obj/x86`
+)
+
+/** Crucial Registers:
+ *
+ * ST(R13) && 0(SP) : ro, decoder stack
+ * DF(AX) : ro, decoder flags
+ * EP(BX) : wo, error pointer
+ * IP(R10) : ro, input pointer
+ * IL(R12) : ro, input length
+ * IC(R11) : rw, input cursor
+ * VP(R15) : ro, value pointer (to an interface{})
+ */
+
+const (
+ _VD_args = 8 // 8 bytes for passing arguments to this functions
+ _VD_fargs = 64 // 64 bytes for passing arguments to other Go functions
+ _VD_saves = 48 // 48 bytes for saving the registers before CALL instructions
+ _VD_locals = 96 // 96 bytes for local variables
+)
+
+const (
+ _VD_offs = _VD_fargs + _VD_saves + _VD_locals
+ _VD_size = _VD_offs + 8 // 8 bytes for the parent frame pointer
+)
+
+var (
+ _VAR_ss = _VAR_ss_Vt
+ _VAR_df = jit.Ptr(_SP, _VD_fargs + _VD_saves)
+)
+
+var (
+ _VAR_ss_Vt = jit.Ptr(_SP, _VD_fargs + _VD_saves + 8)
+ _VAR_ss_Dv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 16)
+ _VAR_ss_Iv = jit.Ptr(_SP, _VD_fargs + _VD_saves + 24)
+ _VAR_ss_Ep = jit.Ptr(_SP, _VD_fargs + _VD_saves + 32)
+ _VAR_ss_Db = jit.Ptr(_SP, _VD_fargs + _VD_saves + 40)
+ _VAR_ss_Dc = jit.Ptr(_SP, _VD_fargs + _VD_saves + 48)
+)
+
+var (
+ _VAR_R9 = jit.Ptr(_SP, _VD_fargs + _VD_saves + 56)
+)
+type _ValueDecoder struct {
+ jit.BaseAssembler
+}
+
+var (
+ _VAR_cs_LR = jit.Ptr(_SP, _VD_fargs + _VD_saves + 64)
+ _VAR_cs_p = jit.Ptr(_SP, _VD_fargs + _VD_saves + 72)
+ _VAR_cs_n = jit.Ptr(_SP, _VD_fargs + _VD_saves + 80)
+ _VAR_cs_d = jit.Ptr(_SP, _VD_fargs + _VD_saves + 88)
+)
+
+func (self *_ValueDecoder) build() uintptr {
+ self.Init(self.compile)
+ return *(*uintptr)(self.Load("decode_value", _VD_size, _VD_args, argPtrs_generic, localPtrs_generic))
+}
+
+/** Function Calling Helpers **/
+
+func (self *_ValueDecoder) save(r ...obj.Addr) {
+ for i, v := range r {
+ if i > _VD_saves / 8 - 1 {
+ panic("too many registers to save")
+ } else {
+ self.Emit("MOVQ", v, jit.Ptr(_SP, _VD_fargs + int64(i) * 8))
+ }
+ }
+}
+
+func (self *_ValueDecoder) load(r ...obj.Addr) {
+ for i, v := range r {
+ if i > _VD_saves / 8 - 1 {
+ panic("too many registers to load")
+ } else {
+ self.Emit("MOVQ", jit.Ptr(_SP, _VD_fargs + int64(i) * 8), v)
+ }
+ }
+}
+
+func (self *_ValueDecoder) call(fn obj.Addr) {
+ self.Emit("MOVQ", fn, _R9) // MOVQ ${fn}, AX
+ self.Rjmp("CALL", _R9) // CALL AX
+}
+
+func (self *_ValueDecoder) call_go(fn obj.Addr) {
+ self.save(_REG_go...) // SAVE $REG_go
+ self.call(fn) // CALL ${fn}
+ self.load(_REG_go...) // LOAD $REG_go
+}
+
+func (self *_ValueDecoder) callc(fn obj.Addr) {
+ self.Emit("XCHGQ", _IP, _BP)
+ self.call(fn)
+ self.Emit("XCHGQ", _IP, _BP)
+}
+
+func (self *_ValueDecoder) call_c(fn obj.Addr) {
+ self.Emit("XCHGQ", _IC, _BX)
+ self.callc(fn)
+ self.Emit("XCHGQ", _IC, _BX)
+}
+
+/** Decoder Assembler **/
+
+const (
+ _S_val = iota + 1
+ _S_arr
+ _S_arr_0
+ _S_obj
+ _S_obj_0
+ _S_obj_delim
+ _S_obj_sep
+)
+
+const (
+ _S_omask_key = (1 << _S_obj_0) | (1 << _S_obj_sep)
+ _S_omask_end = (1 << _S_obj_0) | (1 << _S_obj)
+ _S_vmask = (1 << _S_val) | (1 << _S_arr_0)
+)
+
+const (
+ _A_init_len = 1
+ _A_init_cap = 16
+)
+
+const (
+ _ST_Sp = 0
+ _ST_Vt = _PtrBytes
+ _ST_Vp = _PtrBytes * (types.MAX_RECURSE + 1)
+)
+
+var (
+ _V_true = jit.Imm(int64(pbool(true)))
+ _V_false = jit.Imm(int64(pbool(false)))
+ _F_value = jit.Imm(int64(native.S_value))
+)
+
+var (
+ _V_max = jit.Imm(int64(types.V_MAX))
+ _E_eof = jit.Imm(int64(types.ERR_EOF))
+ _E_invalid = jit.Imm(int64(types.ERR_INVALID_CHAR))
+ _E_recurse = jit.Imm(int64(types.ERR_RECURSE_EXCEED_MAX))
+)
+
+var (
+ _F_convTslice = jit.Func(convTslice)
+ _F_convTstring = jit.Func(convTstring)
+ _F_invalid_vtype = jit.Func(invalid_vtype)
+)
+
+var (
+ _T_map = jit.Type(reflect.TypeOf((map[string]interface{})(nil)))
+ _T_bool = jit.Type(reflect.TypeOf(false))
+ _T_int64 = jit.Type(reflect.TypeOf(int64(0)))
+ _T_eface = jit.Type(reflect.TypeOf((*interface{})(nil)).Elem())
+ _T_slice = jit.Type(reflect.TypeOf(([]interface{})(nil)))
+ _T_string = jit.Type(reflect.TypeOf(""))
+ _T_number = jit.Type(reflect.TypeOf(json.Number("")))
+ _T_float64 = jit.Type(reflect.TypeOf(float64(0)))
+)
+
+var _R_tab = map[int]string {
+ '[': "_decode_V_ARRAY",
+ '{': "_decode_V_OBJECT",
+ ':': "_decode_V_KEY_SEP",
+ ',': "_decode_V_ELEM_SEP",
+ ']': "_decode_V_ARRAY_END",
+ '}': "_decode_V_OBJECT_END",
+}
+
+func (self *_ValueDecoder) compile() {
+ self.Emit("SUBQ", jit.Imm(_VD_size), _SP) // SUBQ $_VD_size, SP
+ self.Emit("MOVQ", _BP, jit.Ptr(_SP, _VD_offs)) // MOVQ BP, _VD_offs(SP)
+ self.Emit("LEAQ", jit.Ptr(_SP, _VD_offs), _BP) // LEAQ _VD_offs(SP), BP
+
+ /* initialize the state machine */
+ self.Emit("XORL", _CX, _CX) // XORL CX, CX
+ self.Emit("MOVQ", _DF, _VAR_df) // MOVQ DF, df
+ /* initialize digital buffer first */
+ self.Emit("MOVQ", jit.Imm(_MaxDigitNums), _VAR_ss_Dc) // MOVQ $_MaxDigitNums, ss.Dcap
+ self.Emit("LEAQ", jit.Ptr(_ST, _DbufOffset), _AX) // LEAQ _DbufOffset(ST), AX
+ self.Emit("MOVQ", _AX, _VAR_ss_Db) // MOVQ AX, ss.Dbuf
+ /* add ST offset */
+ self.Emit("ADDQ", jit.Imm(_FsmOffset), _ST) // ADDQ _FsmOffset, _ST
+ self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp
+ self.WriteRecNotAX(0, _VP, jit.Ptr(_ST, _ST_Vp), false) // MOVQ VP, ST.Vp[0]
+ self.Emit("MOVQ", jit.Imm(_S_val), jit.Ptr(_ST, _ST_Vt)) // MOVQ _S_val, ST.Vt[0]
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /* set the value from previous round */
+ self.Link("_set_value") // _set_value:
+ self.Emit("MOVL" , jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX
+ self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ" , jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX
+ self.Sjmp("JNC" , "_vtype_error") // JNC _vtype_error
+ self.Emit("XORL" , _SI, _SI) // XORL SI, SI
+ self.Emit("SUBQ" , jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp
+ self.Emit("XCHGQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // XCHGQ ST.Vp[CX], SI
+ self.Emit("MOVQ" , _R8, jit.Ptr(_SI, 0)) // MOVQ R8, (SI)
+ self.WriteRecNotAX(1, _R9, jit.Ptr(_SI, 8), false) // MOVQ R9, 8(SI)
+
+ /* check for value stack */
+ self.Link("_next") // _next:
+ self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _AX) // MOVQ ST.Sp, AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , "_return") // JS _return
+
+ /* fast path: test up to 4 characters manually */
+ self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL
+ self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF
+ self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX
+ self.Emit("MOVQ" , jit.Imm(_BM_space), _DX) // MOVQ _BM_space, DX
+ self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' '
+ self.Sjmp("JA" , "_decode_fast") // JA _decode_fast
+ self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX
+ self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+
+ /* at least 1 to 3 spaces */
+ for i := 0; i < 3; i++ {
+ self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL
+ self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF
+ self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX
+ self.Emit("CMPQ" , _AX, jit.Imm(' ')) // CMPQ AX, $' '
+ self.Sjmp("JA" , "_decode_fast") // JA _decode_fast
+ self.Emit("BTQ" , _AX, _DX) // BTQ _AX, _DX
+ self.Sjmp("JNC" , "_decode_fast") // JNC _decode_fast
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ }
+
+ /* at least 4 spaces */
+ self.Emit("CMPQ" , _IC, _IL) // CMPQ IC, IL
+ self.Sjmp("JAE" , "_decode_V_EOF") // JAE _decode_V_EOF
+ self.Emit("MOVBQZX", jit.Sib(_IP, _IC, 1, 0), _AX) // MOVBQZX (IP)(IC), AX
+
+ /* fast path: use lookup table to select decoder */
+ self.Link("_decode_fast") // _decode_fast:
+ self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI
+ self.Sref("_decode_tab", 4) // .... &_decode_tab
+ self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, 0), _AX) // MOVLQSX (DI)(AX*4), AX
+ self.Emit("TESTQ" , _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JZ" , "_decode_native") // JZ _decode_native
+ self.Emit("ADDQ" , jit.Imm(1), _IC) // ADDQ $1, IC
+ self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX
+ self.Rjmp("JMP" , _AX) // JMP AX
+
+ /* decode with native decoder */
+ self.Link("_decode_native") // _decode_native:
+ self.Emit("MOVQ", _IP, _DI) // MOVQ IP, DI
+ self.Emit("MOVQ", _IL, _SI) // MOVQ IL, SI
+ self.Emit("MOVQ", _IC, _DX) // MOVQ IC, DX
+ self.Emit("LEAQ", _VAR_ss, _CX) // LEAQ ss, CX
+ self.Emit("MOVQ", _VAR_df, _R8) // MOVQ $df, R8
+ self.Emit("BTSQ", jit.Imm(_F_allow_control), _R8) // ANDQ $1<<_F_allow_control, R8
+ self.callc(_F_value) // CALL value
+ self.Emit("MOVQ", _AX, _IC) // MOVQ AX, IC
+
+ /* check for errors */
+ self.Emit("MOVQ" , _VAR_ss_Vt, _AX) // MOVQ ss.Vt, AX
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , "_parsing_error")
+ self.Sjmp("JZ" , "_invalid_vtype") // JZ _invalid_vtype
+ self.Emit("CMPQ" , _AX, _V_max) // CMPQ AX, _V_max
+ self.Sjmp("JA" , "_invalid_vtype") // JA _invalid_vtype
+
+ /* jump table selector */
+ self.Byte(0x48, 0x8d, 0x3d) // LEAQ ?(PC), DI
+ self.Sref("_switch_table", 4) // .... &_switch_table
+ self.Emit("MOVLQSX", jit.Sib(_DI, _AX, 4, -4), _AX) // MOVLQSX -4(DI)(AX*4), AX
+ self.Emit("ADDQ" , _DI, _AX) // ADDQ DI, AX
+ self.Rjmp("JMP" , _AX) // JMP AX
+
+ /** V_EOF **/
+ self.Link("_decode_V_EOF") // _decode_V_EOF:
+ self.Emit("MOVL", _E_eof, _EP) // MOVL _E_eof, EP
+ self.Sjmp("JMP" , "_error") // JMP _error
+
+ /** V_NULL **/
+ self.Link("_decode_V_NULL") // _decode_V_NULL:
+ self.Emit("XORL", _R8, _R8) // XORL R8, R8
+ self.Emit("XORL", _R9, _R9) // XORL R9, R9
+ self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /** V_TRUE **/
+ self.Link("_decode_V_TRUE") // _decode_V_TRUE:
+ self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8
+ // TODO: maybe modified by users?
+ self.Emit("MOVQ", _V_true, _R9) // MOVQ _V_true, R9
+ self.Emit("LEAQ", jit.Ptr(_IC, -4), _DI) // LEAQ -4(IC), DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /** V_FALSE **/
+ self.Link("_decode_V_FALSE") // _decode_V_FALSE:
+ self.Emit("MOVQ", _T_bool, _R8) // MOVQ _T_bool, R8
+ self.Emit("MOVQ", _V_false, _R9) // MOVQ _V_false, R9
+ self.Emit("LEAQ", jit.Ptr(_IC, -5), _DI) // LEAQ -5(IC), DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /** V_ARRAY **/
+ self.Link("_decode_V_ARRAY") // _decode_V_ARRAY
+ self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX
+ self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char
+
+ /* create a new array */
+ self.Emit("MOVQ", _T_eface, _AX) // MOVQ _T_eface, AX
+ self.Emit("MOVQ", jit.Imm(_A_init_len), _BX) // MOVQ _A_init_len, BX
+ self.Emit("MOVQ", jit.Imm(_A_init_cap), _CX) // MOVQ _A_init_cap, CX
+ self.call_go(_F_makeslice) // CALL_GO runtime.makeslice
+
+ /* pack into an interface */
+ self.Emit("MOVQ", jit.Imm(_A_init_len), _BX) // MOVQ _A_init_len, BX
+ self.Emit("MOVQ", jit.Imm(_A_init_cap), _CX) // MOVQ _A_init_cap, CX
+ self.call_go(_F_convTslice) // CALL_GO runtime.convTslice
+ self.Emit("MOVQ", _AX, _R8) // MOVQ AX, R8
+
+ /* replace current state with an array */
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI
+ self.Emit("MOVQ", jit.Imm(_S_arr), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr, ST.Vt[CX]
+ self.Emit("MOVQ", _T_slice, _AX) // MOVQ _T_slice, AX
+ self.Emit("MOVQ", _AX, jit.Ptr(_SI, 0)) // MOVQ AX, (SI)
+ self.WriteRecNotAX(2, _R8, jit.Ptr(_SI, 8), false) // MOVQ R8, 8(SI)
+
+ /* add a new slot for the first element */
+ self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX
+ self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE}
+ self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow
+ self.Emit("MOVQ", jit.Ptr(_R8, 0), _AX) // MOVQ (R8), AX
+ self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp
+ self.WritePtrAX(3, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX]
+ self.Emit("MOVQ", jit.Imm(_S_arr_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_arr_0, ST.Vt[CX]
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /** V_OBJECT **/
+ self.Link("_decode_V_OBJECT") // _decode_V_OBJECT:
+ self.Emit("MOVL", jit.Imm(_S_vmask), _DX) // MOVL _S_vmask, DX
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("BTQ" , _AX, _DX) // BTQ AX, DX
+ self.Sjmp("JNC" , "_invalid_char") // JNC _invalid_char
+ self.call_go(_F_makemap_small) // CALL_GO runtime.makemap_small
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Imm(_S_obj_0), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj_0, ST.Vt[CX]
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI
+ self.Emit("MOVQ", _T_map, _DX) // MOVQ _T_map, DX
+ self.Emit("MOVQ", _DX, jit.Ptr(_SI, 0)) // MOVQ DX, (SI)
+ self.WritePtrAX(4, jit.Ptr(_SI, 8), false) // MOVQ AX, 8(SI)
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /** V_STRING **/
+ self.Link("_decode_V_STRING") // _decode_V_STRING:
+ self.Emit("MOVQ", _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX
+ self.Emit("MOVQ", _IC, _AX) // MOVQ IC, AX
+ self.Emit("SUBQ", _CX, _AX) // SUBQ CX, AX
+
+ /* check for escapes */
+ self.Emit("CMPQ", _VAR_ss_Ep, jit.Imm(-1)) // CMPQ ss.Ep, $-1
+ self.Sjmp("JNE" , "_unquote") // JNE _unquote
+ self.Emit("SUBQ", jit.Imm(1), _AX) // SUBQ $1, AX
+ self.Emit("LEAQ", jit.Sib(_IP, _CX, 1, 0), _R8) // LEAQ (IP)(CX), R8
+ self.Byte(0x48, 0x8d, 0x3d) // LEAQ (PC), DI
+ self.Sref("_copy_string_end", 4)
+ self.Emit("BTQ", jit.Imm(_F_copy_string), _VAR_df)
+ self.Sjmp("JC", "copy_string")
+ self.Link("_copy_string_end")
+ self.Emit("XORL", _DX, _DX)
+
+ /* strings with no escape sequences */
+ self.Link("_noescape") // _noescape:
+ self.Emit("MOVL", jit.Imm(_S_omask_key), _DI) // MOVL _S_omask, DI
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _SI) // MOVQ ST.Vt[CX], SI
+ self.Emit("BTQ" , _SI, _DI) // BTQ SI, DI
+ self.Sjmp("JC" , "_object_key") // JC _object_key
+
+ /* check for pre-packed strings, avoid 1 allocation */
+ self.Emit("TESTQ", _DX, _DX) // TESTQ DX, DX
+ self.Sjmp("JNZ" , "_packed_str") // JNZ _packed_str
+ self.Emit("MOVQ" , _AX, _BX) // MOVQ AX, BX
+ self.Emit("MOVQ" , _R8, _AX) // MOVQ R8, AX
+ self.call_go(_F_convTstring) // CALL_GO runtime.convTstring
+ self.Emit("MOVQ" , _AX, _R9) // MOVQ AX, R9
+
+ /* packed string already in R9 */
+ self.Link("_packed_str") // _packed_str:
+ self.Emit("MOVQ", _T_string, _R8) // MOVQ _T_string, R8
+ self.Emit("MOVQ", _VAR_ss_Iv, _DI) // MOVQ ss.Iv, DI
+ self.Emit("SUBQ", jit.Imm(1), _DI) // SUBQ $1, DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /* the string is an object key, get the map */
+ self.Link("_object_key")
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI
+ self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI
+
+ /* add a new delimiter */
+ self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX
+ self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE}
+ self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow
+ self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp
+ self.Emit("MOVQ", jit.Imm(_S_obj_delim), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_obj_delim, ST.Vt[CX]
+
+ /* add a new slot int the map */
+ self.Emit("MOVQ", _AX, _DI) // MOVQ AX, DI
+ self.Emit("MOVQ", _T_map, _AX) // MOVQ _T_map, AX
+ self.Emit("MOVQ", _SI, _BX) // MOVQ SI, BX
+ self.Emit("MOVQ", _R8, _CX) // MOVQ R9, CX
+ self.call_go(_F_mapassign_faststr) // CALL_GO runtime.mapassign_faststr
+
+ /* add to the pointer stack */
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.WritePtrAX(6, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ AX, ST.Vp[CX]
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /* allocate memory to store the string header and unquoted result */
+ self.Link("_unquote") // _unquote:
+ self.Emit("ADDQ", jit.Imm(15), _AX) // ADDQ $15, AX
+ self.Emit("MOVQ", _T_byte, _BX) // MOVQ _T_byte, BX
+ self.Emit("MOVB", jit.Imm(0), _CX) // MOVB $0, CX
+ self.call_go(_F_mallocgc) // CALL_GO runtime.mallocgc
+ self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9
+
+ /* prepare the unquoting parameters */
+ self.Emit("MOVQ" , _VAR_ss_Iv, _CX) // MOVQ ss.Iv, CX
+ self.Emit("LEAQ" , jit.Sib(_IP, _CX, 1, 0), _DI) // LEAQ (IP)(CX), DI
+ self.Emit("NEGQ" , _CX) // NEGQ CX
+ self.Emit("LEAQ" , jit.Sib(_IC, _CX, 1, -1), _SI) // LEAQ -1(IC)(CX), SI
+ self.Emit("LEAQ" , jit.Ptr(_R9, 16), _DX) // LEAQ 16(R8), DX
+ self.Emit("LEAQ" , _VAR_ss_Ep, _CX) // LEAQ ss.Ep, CX
+ self.Emit("XORL" , _R8, _R8) // XORL R8, R8
+ self.Emit("BTQ" , jit.Imm(_F_disable_urc), _VAR_df) // BTQ ${_F_disable_urc}, fv
+ self.Emit("SETCC", _R8) // SETCC R8
+ self.Emit("SHLQ" , jit.Imm(types.B_UNICODE_REPLACE), _R8) // SHLQ ${types.B_UNICODE_REPLACE}, R8
+
+ /* unquote the string, with R9 been preserved */
+ self.Emit("MOVQ", _R9, _VAR_R9) // SAVE R9
+ self.call_c(_F_unquote) // CALL unquote
+ self.Emit("MOVQ", _VAR_R9, _R9) // LOAD R9
+
+ /* check for errors */
+ self.Emit("TESTQ", _AX, _AX) // TESTQ AX, AX
+ self.Sjmp("JS" , "_unquote_error") // JS _unquote_error
+ self.Emit("MOVL" , jit.Imm(1), _DX) // MOVL $1, DX
+ self.Emit("LEAQ" , jit.Ptr(_R9, 16), _R8) // ADDQ $16, R8
+ self.Emit("MOVQ" , _R8, jit.Ptr(_R9, 0)) // MOVQ R8, (R9)
+ self.Emit("MOVQ" , _AX, jit.Ptr(_R9, 8)) // MOVQ AX, 8(R9)
+ self.Sjmp("JMP" , "_noescape") // JMP _noescape
+
+ /** V_DOUBLE **/
+ self.Link("_decode_V_DOUBLE") // _decode_V_DOUBLE:
+ self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df
+ self.Sjmp("JC" , "_use_number") // JC _use_number
+ self.Emit("MOVSD", _VAR_ss_Dv, _X0) // MOVSD ss.Dv, X0
+ self.Sjmp("JMP" , "_use_float64") // JMP _use_float64
+
+ /** V_INTEGER **/
+ self.Link("_decode_V_INTEGER") // _decode_V_INTEGER:
+ self.Emit("BTQ" , jit.Imm(_F_use_number), _VAR_df) // BTQ _F_use_number, df
+ self.Sjmp("JC" , "_use_number") // JC _use_number
+ self.Emit("BTQ" , jit.Imm(_F_use_int64), _VAR_df) // BTQ _F_use_int64, df
+ self.Sjmp("JC" , "_use_int64") // JC _use_int64
+ //TODO: use ss.Dv directly
+ self.Emit("MOVSD", _VAR_ss_Dv, _X0) // MOVSD ss.Dv, X0
+
+ /* represent numbers as `float64` */
+ self.Link("_use_float64") // _use_float64:
+ self.Emit("MOVQ" , _X0, _AX) // MOVQ X0, AX
+ self.call_go(_F_convT64) // CALL_GO runtime.convT64
+ self.Emit("MOVQ" , _T_float64, _R8) // MOVQ _T_float64, R8
+ self.Emit("MOVQ" , _AX, _R9) // MOVQ AX, R9
+ self.Emit("MOVQ" , _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /* represent numbers as `json.Number` */
+ self.Link("_use_number") // _use_number
+ self.Emit("MOVQ", _VAR_ss_Ep, _AX) // MOVQ ss.Ep, AX
+ self.Emit("LEAQ", jit.Sib(_IP, _AX, 1, 0), _SI) // LEAQ (IP)(AX), SI
+ self.Emit("MOVQ", _IC, _CX) // MOVQ IC, CX
+ self.Emit("SUBQ", _AX, _CX) // SUBQ AX, CX
+ self.Emit("MOVQ", _SI, _AX) // MOVQ SI, AX
+ self.Emit("MOVQ", _CX, _BX) // MOVQ CX, BX
+ self.call_go(_F_convTstring) // CALL_GO runtime.convTstring
+ self.Emit("MOVQ", _T_number, _R8) // MOVQ _T_number, R8
+ self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9
+ self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /* represent numbers as `int64` */
+ self.Link("_use_int64") // _use_int64:
+ self.Emit("MOVQ", _VAR_ss_Iv, _AX) // MOVQ ss.Iv, AX
+ self.call_go(_F_convT64) // CALL_GO runtime.convT64
+ self.Emit("MOVQ", _T_int64, _R8) // MOVQ _T_int64, R8
+ self.Emit("MOVQ", _AX, _R9) // MOVQ AX, R9
+ self.Emit("MOVQ", _VAR_ss_Ep, _DI) // MOVQ ss.Ep, DI
+ self.Sjmp("JMP" , "_set_value") // JMP _set_value
+
+ /** V_KEY_SEP **/
+ self.Link("_decode_V_KEY_SEP") // _decode_V_KEY_SEP:
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("CMPQ", _AX, jit.Imm(_S_obj_delim)) // CMPQ AX, _S_obj_delim
+ self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char
+ self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX]
+ self.Emit("MOVQ", jit.Imm(_S_obj), jit.Sib(_ST, _CX, 8, _ST_Vt - 8)) // MOVQ _S_obj, ST.Vt[CX - 1]
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /** V_ELEM_SEP **/
+ self.Link("_decode_V_ELEM_SEP") // _decode_V_ELEM_SEP:
+ self.Emit("MOVQ" , jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ" , jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("CMPQ" , _AX, jit.Imm(_S_arr))
+ self.Sjmp("JE" , "_array_sep") // JZ _next
+ self.Emit("CMPQ" , _AX, jit.Imm(_S_obj)) // CMPQ _AX, _S_arr
+ self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char
+ self.Emit("MOVQ" , jit.Imm(_S_obj_sep), jit.Sib(_ST, _CX, 8, _ST_Vt))
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /* arrays */
+ self.Link("_array_sep")
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI
+ self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI
+ self.Emit("MOVQ", jit.Ptr(_SI, 8), _DX) // MOVQ 8(SI), DX
+ self.Emit("CMPQ", _DX, jit.Ptr(_SI, 16)) // CMPQ DX, 16(SI)
+ self.Sjmp("JAE" , "_array_more") // JAE _array_more
+
+ /* add a slot for the new element */
+ self.Link("_array_append") // _array_append:
+ self.Emit("ADDQ", jit.Imm(1), jit.Ptr(_SI, 8)) // ADDQ $1, 8(SI)
+ self.Emit("MOVQ", jit.Ptr(_SI, 0), _SI) // MOVQ (SI), SI
+ self.Emit("ADDQ", jit.Imm(1), _CX) // ADDQ $1, CX
+ self.Emit("CMPQ", _CX, jit.Imm(types.MAX_RECURSE)) // CMPQ CX, ${types.MAX_RECURSE}
+ self.Sjmp("JAE" , "_stack_overflow") // JA _stack_overflow
+ self.Emit("SHLQ", jit.Imm(1), _DX) // SHLQ $1, DX
+ self.Emit("LEAQ", jit.Sib(_SI, _DX, 8, 0), _SI) // LEAQ (SI)(DX*8), SI
+ self.Emit("MOVQ", _CX, jit.Ptr(_ST, _ST_Sp)) // MOVQ CX, ST.Sp
+ self.WriteRecNotAX(7 , _SI, jit.Sib(_ST, _CX, 8, _ST_Vp), false) // MOVQ SI, ST.Vp[CX]
+ self.Emit("MOVQ", jit.Imm(_S_val), jit.Sib(_ST, _CX, 8, _ST_Vt)) // MOVQ _S_val, ST.Vt[CX}
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /** V_ARRAY_END **/
+ self.Link("_decode_V_ARRAY_END") // _decode_V_ARRAY_END:
+ self.Emit("XORL", _DX, _DX) // XORL DX, DX
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("CMPQ", _AX, jit.Imm(_S_arr_0)) // CMPQ AX, _S_arr_0
+ self.Sjmp("JE" , "_first_item") // JE _first_item
+ self.Emit("CMPQ", _AX, jit.Imm(_S_arr)) // CMPQ AX, _S_arr
+ self.Sjmp("JNE" , "_invalid_char") // JNE _invalid_char
+ self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp
+ self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX]
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /* first element of an array */
+ self.Link("_first_item") // _first_item:
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("SUBQ", jit.Imm(2), jit.Ptr(_ST, _ST_Sp)) // SUBQ $2, ST.Sp
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp - 8), _SI) // MOVQ ST.Vp[CX - 1], SI
+ self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI
+ self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp - 8)) // MOVQ DX, ST.Vp[CX - 1]
+ self.Emit("MOVQ", _DX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ DX, ST.Vp[CX]
+ self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI)
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /** V_OBJECT_END **/
+ self.Link("_decode_V_OBJECT_END") // _decode_V_OBJECT_END:
+ self.Emit("MOVL", jit.Imm(_S_omask_end), _DI) // MOVL _S_omask, DI
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vt), _AX) // MOVQ ST.Vt[CX], AX
+ self.Emit("BTQ" , _AX, _DI)
+ self.Sjmp("JNC" , "_invalid_char") // JNE _invalid_char
+ self.Emit("XORL", _AX, _AX) // XORL AX, AX
+ self.Emit("SUBQ", jit.Imm(1), jit.Ptr(_ST, _ST_Sp)) // SUBQ $1, ST.Sp
+ self.Emit("MOVQ", _AX, jit.Sib(_ST, _CX, 8, _ST_Vp)) // MOVQ AX, ST.Vp[CX]
+ self.Sjmp("JMP" , "_next") // JMP _next
+
+ /* return from decoder */
+ self.Link("_return") // _return:
+ self.Emit("XORL", _EP, _EP) // XORL EP, EP
+ self.Emit("MOVQ", _EP, jit.Ptr(_ST, _ST_Vp)) // MOVQ EP, ST.Vp[0]
+ self.Link("_epilogue") // _epilogue:
+ self.Emit("SUBQ", jit.Imm(_FsmOffset), _ST) // SUBQ _FsmOffset, _ST
+ self.Emit("MOVQ", jit.Ptr(_SP, _VD_offs), _BP) // MOVQ _VD_offs(SP), BP
+ self.Emit("ADDQ", jit.Imm(_VD_size), _SP) // ADDQ $_VD_size, SP
+ self.Emit("RET") // RET
+
+ /* array expand */
+ self.Link("_array_more") // _array_more:
+ self.Emit("MOVQ" , _T_eface, _AX) // MOVQ _T_eface, AX
+ self.Emit("MOVQ" , jit.Ptr(_SI, 0), _BX) // MOVQ (SI), BX
+ self.Emit("MOVQ" , jit.Ptr(_SI, 8), _CX) // MOVQ 8(SI), CX
+ self.Emit("MOVQ" , jit.Ptr(_SI, 16), _DI) // MOVQ 16(SI), DI
+ self.Emit("MOVQ" , _DI, _SI) // MOVQ DI, 24(SP)
+ self.Emit("SHLQ" , jit.Imm(1), _SI) // SHLQ $1, SI
+ self.call_go(_F_growslice) // CALL_GO runtime.growslice
+ self.Emit("MOVQ" , _AX, _DI) // MOVQ AX, DI
+ self.Emit("MOVQ" , _BX, _DX) // MOVQ BX, DX
+ self.Emit("MOVQ" , _CX, _AX) // MOVQ CX, AX
+
+ /* update the slice */
+ self.Emit("MOVQ", jit.Ptr(_ST, _ST_Sp), _CX) // MOVQ ST.Sp, CX
+ self.Emit("MOVQ", jit.Sib(_ST, _CX, 8, _ST_Vp), _SI) // MOVQ ST.Vp[CX], SI
+ self.Emit("MOVQ", jit.Ptr(_SI, 8), _SI) // MOVQ 8(SI), SI
+ self.Emit("MOVQ", _DX, jit.Ptr(_SI, 8)) // MOVQ DX, 8(SI)
+ self.Emit("MOVQ", _AX, jit.Ptr(_SI, 16)) // MOVQ AX, 16(AX)
+ self.WriteRecNotAX(8 , _DI, jit.Ptr(_SI, 0), false) // MOVQ R10, (SI)
+ self.Sjmp("JMP" , "_array_append") // JMP _array_append
+
+ /* copy string */
+ self.Link("copy_string") // pointer: R8, length: AX, return addr: DI
+ self.Emit("MOVQ", _R8, _VAR_cs_p)
+ self.Emit("MOVQ", _AX, _VAR_cs_n)
+ self.Emit("MOVQ", _DI, _VAR_cs_LR)
+ self.Emit("MOVQ", _AX, _BX)
+ self.Emit("MOVQ", _AX, _CX)
+ self.Emit("MOVQ", _T_byte, _AX)
+ self.call_go(_F_makeslice)
+ self.Emit("MOVQ", _AX, _VAR_cs_d)
+ self.Emit("MOVQ", _VAR_cs_p, _BX)
+ self.Emit("MOVQ", _VAR_cs_n, _CX)
+ self.call_go(_F_memmove)
+ self.Emit("MOVQ", _VAR_cs_d, _R8)
+ self.Emit("MOVQ", _VAR_cs_n, _AX)
+ self.Emit("MOVQ", _VAR_cs_LR, _DI)
+ self.Rjmp("JMP", _DI)
+
+ /* error handlers */
+ self.Link("_stack_overflow")
+ self.Emit("MOVL" , _E_recurse, _EP) // MOVQ _E_recurse, EP
+ self.Sjmp("JMP" , "_error") // JMP _error
+ self.Link("_vtype_error") // _vtype_error:
+ self.Emit("MOVQ" , _DI, _IC) // MOVQ DI, IC
+ self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP
+ self.Sjmp("JMP" , "_error") // JMP _error
+ self.Link("_invalid_char") // _invalid_char:
+ self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC
+ self.Emit("MOVL" , _E_invalid, _EP) // MOVL _E_invalid, EP
+ self.Sjmp("JMP" , "_error") // JMP _error
+ self.Link("_unquote_error") // _unquote_error:
+ self.Emit("MOVQ" , _VAR_ss_Iv, _IC) // MOVQ ss.Iv, IC
+ self.Emit("SUBQ" , jit.Imm(1), _IC) // SUBQ $1, IC
+ self.Link("_parsing_error") // _parsing_error:
+ self.Emit("NEGQ" , _AX) // NEGQ AX
+ self.Emit("MOVQ" , _AX, _EP) // MOVQ AX, EP
+ self.Link("_error") // _error:
+ self.Emit("PXOR" , _X0, _X0) // PXOR X0, X0
+ self.Emit("MOVOU", _X0, jit.Ptr(_VP, 0)) // MOVOU X0, (VP)
+ self.Sjmp("JMP" , "_epilogue") // JMP _epilogue
+
+ /* invalid value type, never returns */
+ self.Link("_invalid_vtype")
+ self.call_go(_F_invalid_vtype) // CALL invalid_type
+ self.Emit("UD2") // UD2
+
+ /* switch jump table */
+ self.Link("_switch_table") // _switch_table:
+ self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0
+ self.Sref("_decode_V_NULL", -4) // SREF &_decode_V_NULL, $-4
+ self.Sref("_decode_V_TRUE", -8) // SREF &_decode_V_TRUE, $-8
+ self.Sref("_decode_V_FALSE", -12) // SREF &_decode_V_FALSE, $-12
+ self.Sref("_decode_V_ARRAY", -16) // SREF &_decode_V_ARRAY, $-16
+ self.Sref("_decode_V_OBJECT", -20) // SREF &_decode_V_OBJECT, $-20
+ self.Sref("_decode_V_STRING", -24) // SREF &_decode_V_STRING, $-24
+ self.Sref("_decode_V_DOUBLE", -28) // SREF &_decode_V_DOUBLE, $-28
+ self.Sref("_decode_V_INTEGER", -32) // SREF &_decode_V_INTEGER, $-32
+ self.Sref("_decode_V_KEY_SEP", -36) // SREF &_decode_V_KEY_SEP, $-36
+ self.Sref("_decode_V_ELEM_SEP", -40) // SREF &_decode_V_ELEM_SEP, $-40
+ self.Sref("_decode_V_ARRAY_END", -44) // SREF &_decode_V_ARRAY_END, $-44
+ self.Sref("_decode_V_OBJECT_END", -48) // SREF &_decode_V_OBJECT_END, $-48
+
+ /* fast character lookup table */
+ self.Link("_decode_tab") // _decode_tab:
+ self.Sref("_decode_V_EOF", 0) // SREF &_decode_V_EOF, $0
+
+ /* generate rest of the tabs */
+ for i := 1; i < 256; i++ {
+ if to, ok := _R_tab[i]; ok {
+ self.Sref(to, -int64(i) * 4)
+ } else {
+ self.Byte(0x00, 0x00, 0x00, 0x00)
+ }
+ }
+}
+
+func (self *_ValueDecoder) WritePtrAX(i int, rec obj.Addr, saveDI bool) {
+ self.Emit("MOVQ", _V_writeBarrier, _R9)
+ self.Emit("CMPL", jit.Ptr(_R9, 0), jit.Imm(0))
+ self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ if saveDI {
+ self.save(_DI)
+ }
+ self.Emit("LEAQ", rec, _DI)
+ self.call(_F_gcWriteBarrierAX)
+ if saveDI {
+ self.load(_DI)
+ }
+ self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Emit("MOVQ", _AX, rec)
+ self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+}
+
+func (self *_ValueDecoder) WriteRecNotAX(i int, ptr obj.Addr, rec obj.Addr, saveDI bool) {
+ if rec.Reg == x86.REG_AX || rec.Index == x86.REG_AX {
+ panic("rec contains AX!")
+ }
+ self.Emit("MOVQ", _V_writeBarrier, _AX)
+ self.Emit("CMPL", jit.Ptr(_AX, 0), jit.Imm(0))
+ self.Sjmp("JE", "_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Emit("MOVQ", ptr, _AX)
+ if saveDI {
+ self.save(_DI)
+ }
+ self.Emit("LEAQ", rec, _DI)
+ self.call(_F_gcWriteBarrierAX)
+ if saveDI {
+ self.load(_DI)
+ }
+ self.Sjmp("JMP", "_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Link("_no_writeBarrier" + strconv.Itoa(i) + "_{n}")
+ self.Emit("MOVQ", ptr, rec)
+ self.Link("_end_writeBarrier" + strconv.Itoa(i) + "_{n}")
+}
+
+/** Generic Decoder **/
+
+var (
+ _subr_decode_value = new(_ValueDecoder).build()
+)
+
+//go:nosplit
+func invalid_vtype(vt types.ValueType) {
+ throw(fmt.Sprintf("invalid value type: %d", vt))
+}
diff --git a/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117_test.s b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117_test.s
new file mode 100644
index 000000000..6c2686de9
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_go117_test.s
@@ -0,0 +1,37 @@
+// +build go1.17,!go1.21
+
+//
+// Copyright 2021 ByteDance Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "go_asm.h"
+#include "funcdata.h"
+#include "textflag.h"
+
+TEXT ·decodeValueStub(SB), NOSPLIT, $0 - 72
+ NO_LOCAL_POINTERS
+ PXOR X0, X0
+ MOVOU X0, rv+48(FP)
+ MOVQ st+0(FP) , R13
+ MOVQ sp+8(FP) , R10
+ MOVQ sn+16(FP), R12
+ MOVQ ic+24(FP), R11
+ MOVQ vp+32(FP), R15
+ MOVQ df+40(FP), AX
+ MOVQ ·_subr_decode_value(SB), BX
+ CALL BX
+ MOVQ R11, rp+48(FP)
+ MOVQ BX, ex+56(FP)
+ RET
diff --git a/vendor/github.com/bytedance/sonic/decoder/generic_amd64_test.s b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_test.s
new file mode 100644
index 000000000..36cb1f5f3
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/generic_amd64_test.s
@@ -0,0 +1,37 @@
+// +build go1.15,!go1.17
+
+//
+// Copyright 2021 ByteDance Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include "go_asm.h"
+#include "funcdata.h"
+#include "textflag.h"
+
+TEXT ·decodeValueStub(SB), NOSPLIT, $0 - 72
+ NO_LOCAL_POINTERS
+ PXOR X0, X0
+ MOVOU X0, rv+48(FP)
+ MOVQ st+0(FP), BX
+ MOVQ sp+8(FP), R12
+ MOVQ sn+16(FP), R13
+ MOVQ ic+24(FP), R14
+ MOVQ vp+32(FP), R15
+ MOVQ df+40(FP), R10
+ MOVQ ·_subr_decode_value(SB), AX
+ CALL AX
+ MOVQ R14, rp+48(FP)
+ MOVQ R11, ex+56(FP)
+ RET
diff --git a/vendor/github.com/bytedance/sonic/decoder/pools.go b/vendor/github.com/bytedance/sonic/decoder/pools.go
new file mode 100644
index 000000000..ab1e5f23c
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/pools.go
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `sync`
+ `unsafe`
+
+ `github.com/bytedance/sonic/internal/caching`
+ `github.com/bytedance/sonic/internal/native/types`
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+const (
+ _MinSlice = 16
+ _MaxStack = 4096 // 4k slots
+ _MaxStackBytes = _MaxStack * _PtrBytes
+ _MaxDigitNums = 800 // used in atof fallback algorithm
+)
+
+const (
+ _PtrBytes = _PTR_SIZE / 8
+ _FsmOffset = (_MaxStack + 1) * _PtrBytes
+ _DbufOffset = _FsmOffset + int64(unsafe.Sizeof(types.StateMachine{})) + types.MAX_RECURSE * _PtrBytes
+ _StackSize = unsafe.Sizeof(_Stack{})
+)
+
+var (
+ stackPool = sync.Pool{}
+ valueCache = []unsafe.Pointer(nil)
+ fieldCache = []*caching.FieldMap(nil)
+ fieldCacheMux = sync.Mutex{}
+ programCache = caching.CreateProgramCache()
+)
+
+type _Stack struct {
+ sp uintptr
+ sb [_MaxStack]unsafe.Pointer
+ mm types.StateMachine
+ vp [types.MAX_RECURSE]unsafe.Pointer
+ dp [_MaxDigitNums]byte
+}
+
+type _Decoder func(
+ s string,
+ i int,
+ vp unsafe.Pointer,
+ sb *_Stack,
+ fv uint64,
+ sv string, // DO NOT pass value to this arguement, since it is only used for local _VAR_sv
+ vk unsafe.Pointer, // DO NOT pass value to this arguement, since it is only used for local _VAR_vk
+) (int, error)
+
+var _KeepAlive struct {
+ s string
+ i int
+ vp unsafe.Pointer
+ sb *_Stack
+ fv uint64
+ sv string
+ vk unsafe.Pointer
+
+ ret int
+ err error
+
+ frame_decoder [_FP_offs]byte
+ frame_generic [_VD_offs]byte
+}
+
+var (
+ argPtrs = []bool{true, false, false, true, true, false, true, false, true}
+ localPtrs = []bool{}
+)
+
+var (
+ argPtrs_generic = []bool{true}
+ localPtrs_generic = []bool{}
+)
+
+func newStack() *_Stack {
+ if ret := stackPool.Get(); ret == nil {
+ return new(_Stack)
+ } else {
+ return ret.(*_Stack)
+ }
+}
+
+func resetStack(p *_Stack) {
+ memclrNoHeapPointers(unsafe.Pointer(p), _StackSize)
+}
+
+func freeStack(p *_Stack) {
+ p.sp = 0
+ stackPool.Put(p)
+}
+
+func freezeValue(v unsafe.Pointer) uintptr {
+ valueCache = append(valueCache, v)
+ return uintptr(v)
+}
+
+func freezeFields(v *caching.FieldMap) int64 {
+ fieldCacheMux.Lock()
+ fieldCache = append(fieldCache, v)
+ fieldCacheMux.Unlock()
+ return referenceFields(v)
+}
+
+func referenceFields(v *caching.FieldMap) int64 {
+ return int64(uintptr(unsafe.Pointer(v)))
+}
+
+func makeDecoder(vt *rt.GoType, _ ...interface{}) (interface{}, error) {
+ if pp, err := newCompiler().compile(vt.Pack()); err != nil {
+ return nil, err
+ } else {
+ return newAssembler(pp).Load(), nil
+ }
+}
+
+func findOrCompile(vt *rt.GoType) (_Decoder, error) {
+ if val := programCache.Get(vt); val != nil {
+ return val.(_Decoder), nil
+ } else if ret, err := programCache.Compute(vt, makeDecoder); err == nil {
+ return ret.(_Decoder), nil
+ } else {
+ return nil, err
+ }
+} \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/decoder/primitives.go b/vendor/github.com/bytedance/sonic/decoder/primitives.go
new file mode 100644
index 000000000..d6053e2cb
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/primitives.go
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `encoding`
+ `encoding/json`
+ `unsafe`
+
+ `github.com/bytedance/sonic/internal/native`
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+func decodeTypedPointer(s string, i int, vt *rt.GoType, vp unsafe.Pointer, sb *_Stack, fv uint64) (int, error) {
+ if fn, err := findOrCompile(vt); err != nil {
+ return 0, err
+ } else {
+ rt.MoreStack(_FP_size + _VD_size + native.MaxFrameSize)
+ rt.StopProf()
+ ret, err := fn(s, i, vp, sb, fv, "", nil)
+ rt.StartProf()
+ return ret, err
+ }
+}
+
+func decodeJsonUnmarshaler(vv interface{}, s string) error {
+ return vv.(json.Unmarshaler).UnmarshalJSON(rt.Str2Mem(s))
+}
+
+func decodeTextUnmarshaler(vv interface{}, s string) error {
+ return vv.(encoding.TextUnmarshaler).UnmarshalText(rt.Str2Mem(s))
+}
diff --git a/vendor/github.com/bytedance/sonic/decoder/stream.go b/vendor/github.com/bytedance/sonic/decoder/stream.go
new file mode 100644
index 000000000..06dc8185b
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/stream.go
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `bytes`
+ `io`
+ `sync`
+
+ `github.com/bytedance/sonic/internal/native/types`
+)
+
+var (
+ defaultBufferSize uint = 4096
+ growSliceFactorShift uint = 1
+ minLeftBufferShift uint = 2
+)
+
+type StreamDecoder struct {
+ r io.Reader
+ buf []byte
+ scanp int
+ scanned int64
+ err error
+ Decoder
+}
+
+var bufPool = sync.Pool{
+ New: func () interface{} {
+ return make([]byte, 0, defaultBufferSize)
+ },
+}
+
+// NewStreamDecoder adapts to encoding/json.NewDecoder API.
+//
+// NewStreamDecoder returns a new decoder that reads from r.
+func NewStreamDecoder(r io.Reader) *StreamDecoder {
+ return &StreamDecoder{r : r}
+}
+
+// Decode decodes input stream into val with corresponding data.
+// Redundantly bytes may be read and left in its buffer, and can be used at next call.
+// Either io error from underlying io.Reader (except io.EOF)
+// or syntax error from data will be recorded and stop subsequently decoding.
+func (self *StreamDecoder) Decode(val interface{}) (err error) {
+ if self.err != nil {
+ return self.err
+ }
+
+ var buf = self.buf[self.scanp:]
+ var p = 0
+ var recycle bool
+ if cap(buf) == 0 {
+ buf = bufPool.Get().([]byte)
+ recycle = true
+ }
+
+ var first = true
+ var repeat = true
+read_more:
+ for {
+ l := len(buf)
+ realloc(&buf)
+ n, err := self.r.Read(buf[l:cap(buf)])
+ buf = buf[:l+n]
+ if err != nil {
+ repeat = false
+ if err == io.EOF {
+ if len(buf) == 0 {
+ return err
+ }
+ break
+ }
+ self.err = err
+ return err
+ }
+ if n > 0 || first {
+ break
+ }
+ }
+ first = false
+
+ l := len(buf)
+ if l > 0 {
+ self.Decoder.Reset(string(buf))
+ err = self.Decoder.Decode(val)
+ if err != nil {
+ if repeat && self.repeatable(err) {
+ goto read_more
+ }
+ self.err = err
+ }
+
+ p = self.Decoder.Pos()
+ self.scanned += int64(p)
+ self.scanp = 0
+ }
+
+ if l > p {
+ // remain undecoded bytes, so copy them into self.buf
+ self.buf = append(self.buf[:0], buf[p:]...)
+ } else {
+ self.buf = nil
+ recycle = true
+ }
+
+ if recycle {
+ buf = buf[:0]
+ bufPool.Put(buf)
+ }
+ return err
+}
+
+func (self StreamDecoder) repeatable(err error) bool {
+ if ee, ok := err.(SyntaxError); ok &&
+ (ee.Code == types.ERR_EOF || (ee.Code == types.ERR_INVALID_CHAR && self.i >= len(self.s)-1)) {
+ return true
+ }
+ return false
+}
+
+// InputOffset returns the input stream byte offset of the current decoder position.
+// The offset gives the location of the end of the most recently returned token and the beginning of the next token.
+func (self *StreamDecoder) InputOffset() int64 {
+ return self.scanned + int64(self.scanp)
+}
+
+// Buffered returns a reader of the data remaining in the Decoder's buffer.
+// The reader is valid until the next call to Decode.
+func (self *StreamDecoder) Buffered() io.Reader {
+ return bytes.NewReader(self.buf[self.scanp:])
+}
+
+// More reports whether there is another element in the
+// current array or object being parsed.
+func (self *StreamDecoder) More() bool {
+ if self.err != nil {
+ return false
+ }
+ c, err := self.peek()
+ return err == nil && c != ']' && c != '}'
+}
+
+func (self *StreamDecoder) peek() (byte, error) {
+ var err error
+ for {
+ for i := self.scanp; i < len(self.buf); i++ {
+ c := self.buf[i]
+ if isSpace(c) {
+ continue
+ }
+ self.scanp = i
+ return c, nil
+ }
+ // buffer has been scanned, now report any error
+ if err != nil {
+ if err != io.EOF {
+ self.err = err
+ }
+ return 0, err
+ }
+ err = self.refill()
+ }
+}
+
+func isSpace(c byte) bool {
+ return types.SPACE_MASK & (1 << c) != 0
+}
+
+func (self *StreamDecoder) refill() error {
+ // Make room to read more into the buffer.
+ // First slide down data already consumed.
+ if self.scanp > 0 {
+ self.scanned += int64(self.scanp)
+ n := copy(self.buf, self.buf[self.scanp:])
+ self.buf = self.buf[:n]
+ self.scanp = 0
+ }
+
+ // Grow buffer if not large enough.
+ realloc(&self.buf)
+
+ // Read. Delay error for next iteration (after scan).
+ n, err := self.r.Read(self.buf[len(self.buf):cap(self.buf)])
+ self.buf = self.buf[0 : len(self.buf)+n]
+
+ return err
+}
+
+func realloc(buf *[]byte) {
+ l := uint(len(*buf))
+ c := uint(cap(*buf))
+ if c - l <= c >> minLeftBufferShift {
+ e := l+(l>>minLeftBufferShift)
+ if e < defaultBufferSize {
+ e = defaultBufferSize
+ }
+ tmp := make([]byte, l, e)
+ copy(tmp, *buf)
+ *buf = tmp
+ }
+}
+
diff --git a/vendor/github.com/bytedance/sonic/decoder/stubs_go115.go b/vendor/github.com/bytedance/sonic/decoder/stubs_go115.go
new file mode 100644
index 000000000..1a0917c3c
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/stubs_go115.go
@@ -0,0 +1,111 @@
+// +build go1.15,!go1.20
+
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `unsafe`
+ `reflect`
+
+ _ `github.com/chenzhuoyu/base64x`
+
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+//go:linkname _subr__b64decode github.com/chenzhuoyu/base64x._subr__b64decode
+var _subr__b64decode uintptr
+
+// runtime.maxElementSize
+const _max_map_element_size uintptr = 128
+
+func mapfast(vt reflect.Type) bool {
+ return vt.Elem().Size() <= _max_map_element_size
+}
+
+//go:nosplit
+//go:linkname throw runtime.throw
+//goland:noinspection GoUnusedParameter
+func throw(s string)
+
+//go:linkname convT64 runtime.convT64
+//goland:noinspection GoUnusedParameter
+func convT64(v uint64) unsafe.Pointer
+
+//go:linkname convTslice runtime.convTslice
+//goland:noinspection GoUnusedParameter
+func convTslice(v []byte) unsafe.Pointer
+
+//go:linkname convTstring runtime.convTstring
+//goland:noinspection GoUnusedParameter
+func convTstring(v string) unsafe.Pointer
+
+//go:noescape
+//go:linkname memequal runtime.memequal
+//goland:noinspection GoUnusedParameter
+func memequal(a unsafe.Pointer, b unsafe.Pointer, size uintptr) bool
+
+//go:noescape
+//go:linkname memmove runtime.memmove
+//goland:noinspection GoUnusedParameter
+func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
+
+//go:linkname mallocgc runtime.mallocgc
+//goland:noinspection GoUnusedParameter
+func mallocgc(size uintptr, typ *rt.GoType, needzero bool) unsafe.Pointer
+
+//go:linkname makeslice runtime.makeslice
+//goland:noinspection GoUnusedParameter
+func makeslice(et *rt.GoType, len int, cap int) unsafe.Pointer
+
+//go:noescape
+//go:linkname growslice runtime.growslice
+//goland:noinspection GoUnusedParameter
+func growslice(et *rt.GoType, old rt.GoSlice, cap int) rt.GoSlice
+
+//go:linkname makemap_small runtime.makemap_small
+func makemap_small() unsafe.Pointer
+
+//go:linkname mapassign runtime.mapassign
+//goland:noinspection GoUnusedParameter
+func mapassign(t *rt.GoType, h unsafe.Pointer, k unsafe.Pointer) unsafe.Pointer
+
+//go:linkname mapassign_fast32 runtime.mapassign_fast32
+//goland:noinspection GoUnusedParameter
+func mapassign_fast32(t *rt.GoType, h unsafe.Pointer, k uint32) unsafe.Pointer
+
+//go:linkname mapassign_fast64 runtime.mapassign_fast64
+//goland:noinspection GoUnusedParameter
+func mapassign_fast64(t *rt.GoType, h unsafe.Pointer, k uint64) unsafe.Pointer
+
+//go:linkname mapassign_fast64ptr runtime.mapassign_fast64ptr
+//goland:noinspection GoUnusedParameter
+func mapassign_fast64ptr(t *rt.GoType, h unsafe.Pointer, k unsafe.Pointer) unsafe.Pointer
+
+//go:linkname mapassign_faststr runtime.mapassign_faststr
+//goland:noinspection GoUnusedParameter
+func mapassign_faststr(t *rt.GoType, h unsafe.Pointer, s string) unsafe.Pointer
+
+//go:nosplit
+//go:linkname memclrHasPointers runtime.memclrHasPointers
+//goland:noinspection GoUnusedParameter
+func memclrHasPointers(ptr unsafe.Pointer, n uintptr)
+
+//go:noescape
+//go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers
+//goland:noinspection GoUnusedParameter
+func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/decoder/stubs_go120.go b/vendor/github.com/bytedance/sonic/decoder/stubs_go120.go
new file mode 100644
index 000000000..cde6a1972
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/stubs_go120.go
@@ -0,0 +1,111 @@
+// +build go1.20
+
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `unsafe`
+ `reflect`
+
+ _ `github.com/chenzhuoyu/base64x`
+
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+//go:linkname _subr__b64decode github.com/chenzhuoyu/base64x._subr__b64decode
+var _subr__b64decode uintptr
+
+// runtime.maxElementSize
+const _max_map_element_size uintptr = 128
+
+func mapfast(vt reflect.Type) bool {
+ return vt.Elem().Size() <= _max_map_element_size
+}
+
+//go:nosplit
+//go:linkname throw runtime.throw
+//goland:noinspection GoUnusedParameter
+func throw(s string)
+
+//go:linkname convT64 runtime.convT64
+//goland:noinspection GoUnusedParameter
+func convT64(v uint64) unsafe.Pointer
+
+//go:linkname convTslice runtime.convTslice
+//goland:noinspection GoUnusedParameter
+func convTslice(v []byte) unsafe.Pointer
+
+//go:linkname convTstring runtime.convTstring
+//goland:noinspection GoUnusedParameter
+func convTstring(v string) unsafe.Pointer
+
+//go:noescape
+//go:linkname memequal runtime.memequal
+//goland:noinspection GoUnusedParameter
+func memequal(a unsafe.Pointer, b unsafe.Pointer, size uintptr) bool
+
+//go:noescape
+//go:linkname memmove runtime.memmove
+//goland:noinspection GoUnusedParameter
+func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
+
+//go:linkname mallocgc runtime.mallocgc
+//goland:noinspection GoUnusedParameter
+func mallocgc(size uintptr, typ *rt.GoType, needzero bool) unsafe.Pointer
+
+//go:linkname makeslice runtime.makeslice
+//goland:noinspection GoUnusedParameter
+func makeslice(et *rt.GoType, len int, cap int) unsafe.Pointer
+
+//go:noescape
+//go:linkname growslice reflect.growslice
+//goland:noinspection GoUnusedParameter
+func growslice(et *rt.GoType, old rt.GoSlice, cap int) rt.GoSlice
+
+//go:linkname makemap_small runtime.makemap_small
+func makemap_small() unsafe.Pointer
+
+//go:linkname mapassign runtime.mapassign
+//goland:noinspection GoUnusedParameter
+func mapassign(t *rt.GoType, h unsafe.Pointer, k unsafe.Pointer) unsafe.Pointer
+
+//go:linkname mapassign_fast32 runtime.mapassign_fast32
+//goland:noinspection GoUnusedParameter
+func mapassign_fast32(t *rt.GoType, h unsafe.Pointer, k uint32) unsafe.Pointer
+
+//go:linkname mapassign_fast64 runtime.mapassign_fast64
+//goland:noinspection GoUnusedParameter
+func mapassign_fast64(t *rt.GoType, h unsafe.Pointer, k uint64) unsafe.Pointer
+
+//go:linkname mapassign_fast64ptr runtime.mapassign_fast64ptr
+//goland:noinspection GoUnusedParameter
+func mapassign_fast64ptr(t *rt.GoType, h unsafe.Pointer, k unsafe.Pointer) unsafe.Pointer
+
+//go:linkname mapassign_faststr runtime.mapassign_faststr
+//goland:noinspection GoUnusedParameter
+func mapassign_faststr(t *rt.GoType, h unsafe.Pointer, s string) unsafe.Pointer
+
+//go:nosplit
+//go:linkname memclrHasPointers runtime.memclrHasPointers
+//goland:noinspection GoUnusedParameter
+func memclrHasPointers(ptr unsafe.Pointer, n uintptr)
+
+//go:noescape
+//go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers
+//goland:noinspection GoUnusedParameter
+func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) \ No newline at end of file
diff --git a/vendor/github.com/bytedance/sonic/decoder/types.go b/vendor/github.com/bytedance/sonic/decoder/types.go
new file mode 100644
index 000000000..6fc0e706c
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/types.go
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `encoding`
+ `encoding/base64`
+ `encoding/json`
+ `reflect`
+ `unsafe`
+
+ `github.com/bytedance/sonic/internal/rt`
+)
+
+var (
+ byteType = reflect.TypeOf(byte(0))
+ intType = reflect.TypeOf(int(0))
+ int8Type = reflect.TypeOf(int8(0))
+ int16Type = reflect.TypeOf(int16(0))
+ int32Type = reflect.TypeOf(int32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uintType = reflect.TypeOf(uint(0))
+ uint8Type = reflect.TypeOf(uint8(0))
+ uint16Type = reflect.TypeOf(uint16(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float32Type = reflect.TypeOf(float32(0))
+ float64Type = reflect.TypeOf(float64(0))
+ stringType = reflect.TypeOf("")
+ bytesType = reflect.TypeOf([]byte(nil))
+ jsonNumberType = reflect.TypeOf(json.Number(""))
+ base64CorruptInputError = reflect.TypeOf(base64.CorruptInputError(0))
+)
+
+var (
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+ jsonUnmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()
+ encodingTextUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+func rtype(t reflect.Type) (*rt.GoItab, *rt.GoType) {
+ p := (*rt.GoIface)(unsafe.Pointer(&t))
+ return p.Itab, (*rt.GoType)(p.Value)
+}
diff --git a/vendor/github.com/bytedance/sonic/decoder/utils.go b/vendor/github.com/bytedance/sonic/decoder/utils.go
new file mode 100644
index 000000000..23ee5d501
--- /dev/null
+++ b/vendor/github.com/bytedance/sonic/decoder/utils.go
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2021 ByteDance Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package decoder
+
+import (
+ `unsafe`
+
+ `github.com/bytedance/sonic/loader`
+)
+
+//go:nosplit
+func pbool(v bool) uintptr {
+ return freezeValue(unsafe.Pointer(&v))
+}
+
+//go:nosplit
+func ptodec(p loader.Function) _Decoder {
+ return *(*_Decoder)(unsafe.Pointer(&p))
+}
+
+func assert_eq(v int64, exp int64, msg string) {
+ if v != exp {
+ panic(msg)
+ }
+}