summaryrefslogtreecommitdiff
path: root/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/tetratelabs/wazero/internal/engine/interpreter')
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/compiler.go3634
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/format.go22
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/interpreter.go4583
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/operations.go2812
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/signature.go767
5 files changed, 11818 insertions, 0 deletions
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/compiler.go b/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/compiler.go
new file mode 100644
index 000000000..56dfac620
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/compiler.go
@@ -0,0 +1,3634 @@
+package interpreter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strings"
+
+ "github.com/tetratelabs/wazero/api"
+ "github.com/tetratelabs/wazero/internal/leb128"
+ "github.com/tetratelabs/wazero/internal/wasm"
+)
+
+type controlFrameKind byte
+
+const (
+ controlFrameKindBlockWithContinuationLabel controlFrameKind = iota
+ controlFrameKindBlockWithoutContinuationLabel
+ controlFrameKindFunction
+ controlFrameKindLoop
+ controlFrameKindIfWithElse
+ controlFrameKindIfWithoutElse
+)
+
+type (
+ controlFrame struct {
+ frameID uint32
+ // originalStackLen holds the number of values on the stack
+ // when Start executing this control frame minus params for the block.
+ originalStackLenWithoutParam int
+ blockType *wasm.FunctionType
+ kind controlFrameKind
+ }
+ controlFrames struct{ frames []controlFrame }
+)
+
+func (c *controlFrame) ensureContinuation() {
+ // Make sure that if the frame is block and doesn't have continuation,
+ // change the Kind so we can emit the continuation block
+ // later when we reach the End instruction of this frame.
+ if c.kind == controlFrameKindBlockWithoutContinuationLabel {
+ c.kind = controlFrameKindBlockWithContinuationLabel
+ }
+}
+
+func (c *controlFrame) asLabel() label {
+ switch c.kind {
+ case controlFrameKindBlockWithContinuationLabel,
+ controlFrameKindBlockWithoutContinuationLabel:
+ return newLabel(labelKindContinuation, c.frameID)
+ case controlFrameKindLoop:
+ return newLabel(labelKindHeader, c.frameID)
+ case controlFrameKindFunction:
+ return newLabel(labelKindReturn, 0)
+ case controlFrameKindIfWithElse,
+ controlFrameKindIfWithoutElse:
+ return newLabel(labelKindContinuation, c.frameID)
+ }
+ panic(fmt.Sprintf("unreachable: a bug in interpreterir implementation: %v", c.kind))
+}
+
+func (c *controlFrames) functionFrame() *controlFrame {
+ // No need to check stack bound
+ // as we can assume that all the operations
+ // are valid thanks to validateFunction
+ // at module validation phase.
+ return &c.frames[0]
+}
+
+func (c *controlFrames) get(n int) *controlFrame {
+ // No need to check stack bound
+ // as we can assume that all the operations
+ // are valid thanks to validateFunction
+ // at module validation phase.
+ return &c.frames[len(c.frames)-n-1]
+}
+
+func (c *controlFrames) top() *controlFrame {
+ // No need to check stack bound
+ // as we can assume that all the operations
+ // are valid thanks to validateFunction
+ // at module validation phase.
+ return &c.frames[len(c.frames)-1]
+}
+
+func (c *controlFrames) empty() bool {
+ return len(c.frames) == 0
+}
+
+func (c *controlFrames) pop() (frame *controlFrame) {
+ // No need to check stack bound
+ // as we can assume that all the operations
+ // are valid thanks to validateFunction
+ // at module validation phase.
+ frame = c.top()
+ c.frames = c.frames[:len(c.frames)-1]
+ return
+}
+
+func (c *controlFrames) push(frame controlFrame) {
+ c.frames = append(c.frames, frame)
+}
+
+func (c *compiler) initializeStack() {
+ // Reuse the existing slice.
+ c.localIndexToStackHeightInUint64 = c.localIndexToStackHeightInUint64[:0]
+ var current int
+ for _, lt := range c.sig.Params {
+ c.localIndexToStackHeightInUint64 = append(c.localIndexToStackHeightInUint64, current)
+ if lt == wasm.ValueTypeV128 {
+ current++
+ }
+ current++
+ }
+
+ if c.callFrameStackSizeInUint64 > 0 {
+ // We reserve the stack slots for result values below the return call frame slots.
+ if diff := c.sig.ResultNumInUint64 - c.sig.ParamNumInUint64; diff > 0 {
+ current += diff
+ }
+ }
+
+ // Non-func param locals Start after the return call frame.
+ current += c.callFrameStackSizeInUint64
+
+ for _, lt := range c.localTypes {
+ c.localIndexToStackHeightInUint64 = append(c.localIndexToStackHeightInUint64, current)
+ if lt == wasm.ValueTypeV128 {
+ current++
+ }
+ current++
+ }
+
+ // Push function arguments.
+ for _, t := range c.sig.Params {
+ c.stackPush(wasmValueTypeTounsignedType(t))
+ }
+
+ if c.callFrameStackSizeInUint64 > 0 {
+ // Reserve the stack slots for results.
+ for i := 0; i < c.sig.ResultNumInUint64-c.sig.ParamNumInUint64; i++ {
+ c.stackPush(unsignedTypeI64)
+ }
+
+ // Reserve the stack slots for call frame.
+ for i := 0; i < c.callFrameStackSizeInUint64; i++ {
+ c.stackPush(unsignedTypeI64)
+ }
+ }
+}
+
+// compiler is in charge of lowering raw Wasm function body to get compilationResult.
+// This is created per *wasm.Module and reused for all functions in it to reduce memory allocations.
+type compiler struct {
+ module *wasm.Module
+ enabledFeatures api.CoreFeatures
+ callFrameStackSizeInUint64 int
+ stack []unsignedType
+ currentFrameID uint32
+ controlFrames controlFrames
+ unreachableState struct {
+ on bool
+ depth int
+ }
+ pc, currentOpPC uint64
+ result compilationResult
+
+ // body holds the code for the function's body where Wasm instructions are stored.
+ body []byte
+ // sig is the function type of the target function.
+ sig *wasm.FunctionType
+ // localTypes holds the target function locals' value types except function params.
+ localTypes []wasm.ValueType
+ // localIndexToStackHeightInUint64 maps the local index (starting with function params) to the stack height
+ // where the local is places. This is the necessary mapping for functions who contain vector type locals.
+ localIndexToStackHeightInUint64 []int
+
+ // types hold all the function types in the module where the targe function exists.
+ types []wasm.FunctionType
+ // funcs holds the type indexes for all declared functions in the module where the target function exists.
+ funcs []uint32
+ // globals holds the global types for all declared globals in the module where the target function exists.
+ globals []wasm.GlobalType
+
+ // needSourceOffset is true if this module requires DWARF based stack trace.
+ needSourceOffset bool
+ // bodyOffsetInCodeSection is the offset of the body of this function in the original Wasm binary's code section.
+ bodyOffsetInCodeSection uint64
+
+ ensureTermination bool
+ // Pre-allocated bytes.Reader to be used in various places.
+ br *bytes.Reader
+ funcTypeToSigs funcTypeToIRSignatures
+
+ next int
+}
+
+//lint:ignore U1000 for debugging only.
+func (c *compiler) stackDump() string {
+ strs := make([]string, 0, len(c.stack))
+ for _, s := range c.stack {
+ strs = append(strs, s.String())
+ }
+ return "[" + strings.Join(strs, ", ") + "]"
+}
+
+func (c *compiler) markUnreachable() {
+ c.unreachableState.on = true
+}
+
+func (c *compiler) resetUnreachable() {
+ c.unreachableState.on = false
+}
+
+// memoryType is the type of memory in a compiled module.
+type memoryType byte
+
+const (
+ // memoryTypeNone indicates there is no memory.
+ memoryTypeNone memoryType = iota
+ // memoryTypeStandard indicates there is a non-shared memory.
+ memoryTypeStandard
+ // memoryTypeShared indicates there is a shared memory.
+ memoryTypeShared
+)
+
+type compilationResult struct {
+ // Operations holds interpreterir operations compiled from Wasm instructions in a Wasm function.
+ Operations []unionOperation
+
+ // IROperationSourceOffsetsInWasmBinary is index-correlated with Operation and maps each operation to the corresponding source instruction's
+ // offset in the original WebAssembly binary.
+ // Non nil only when the given Wasm module has the DWARF section.
+ IROperationSourceOffsetsInWasmBinary []uint64
+
+ // LabelCallers maps label to the number of callers to that label.
+ // Here "callers" means that the call-sites which jumps to the label with br, br_if or br_table
+ // instructions.
+ //
+ // Note: zero possible and allowed in wasm. e.g.
+ //
+ // (block
+ // (br 0)
+ // (block i32.const 1111)
+ // )
+ //
+ // This example the label corresponding to `(block i32.const 1111)` is never be reached at runtime because `br 0` exits the function before we reach there
+ LabelCallers map[label]uint32
+ // UsesMemory is true if this function might use memory.
+ UsesMemory bool
+
+ // The following fields are per-module values, not per-function.
+
+ // Globals holds all the declarations of globals in the module from which this function is compiled.
+ Globals []wasm.GlobalType
+ // Functions holds all the declarations of function in the module from which this function is compiled, including itself.
+ Functions []wasm.Index
+ // Types holds all the types in the module from which this function is compiled.
+ Types []wasm.FunctionType
+ // Memory indicates the type of memory of the module.
+ Memory memoryType
+ // HasTable is true if the module from which this function is compiled has table declaration.
+ HasTable bool
+ // HasDataInstances is true if the module has data instances which might be used by memory.init or data.drop instructions.
+ HasDataInstances bool
+ // HasDataInstances is true if the module has element instances which might be used by table.init or elem.drop instructions.
+ HasElementInstances bool
+}
+
+// newCompiler returns the new *compiler for the given parameters.
+// Use compiler.Next function to get compilation result per function.
+func newCompiler(enabledFeatures api.CoreFeatures, callFrameStackSizeInUint64 int, module *wasm.Module, ensureTermination bool) (*compiler, error) {
+ functions, globals, mem, tables, err := module.AllDeclarations()
+ if err != nil {
+ return nil, err
+ }
+
+ hasTable, hasDataInstances, hasElementInstances := len(tables) > 0,
+ len(module.DataSection) > 0, len(module.ElementSection) > 0
+
+ var mt memoryType
+ switch {
+ case mem == nil:
+ mt = memoryTypeNone
+ case mem.IsShared:
+ mt = memoryTypeShared
+ default:
+ mt = memoryTypeStandard
+ }
+
+ types := module.TypeSection
+
+ c := &compiler{
+ module: module,
+ enabledFeatures: enabledFeatures,
+ controlFrames: controlFrames{},
+ callFrameStackSizeInUint64: callFrameStackSizeInUint64,
+ result: compilationResult{
+ Globals: globals,
+ Functions: functions,
+ Types: types,
+ Memory: mt,
+ HasTable: hasTable,
+ HasDataInstances: hasDataInstances,
+ HasElementInstances: hasElementInstances,
+ LabelCallers: map[label]uint32{},
+ },
+ globals: globals,
+ funcs: functions,
+ types: types,
+ ensureTermination: ensureTermination,
+ br: bytes.NewReader(nil),
+ funcTypeToSigs: funcTypeToIRSignatures{
+ indirectCalls: make([]*signature, len(types)),
+ directCalls: make([]*signature, len(types)),
+ wasmTypes: types,
+ },
+ needSourceOffset: module.DWARFLines != nil,
+ }
+ return c, nil
+}
+
+// Next returns the next compilationResult for this compiler.
+func (c *compiler) Next() (*compilationResult, error) {
+ funcIndex := c.next
+ code := &c.module.CodeSection[funcIndex]
+ sig := &c.types[c.module.FunctionSection[funcIndex]]
+
+ // Reset the previous result.
+ c.result.Operations = c.result.Operations[:0]
+ c.result.IROperationSourceOffsetsInWasmBinary = c.result.IROperationSourceOffsetsInWasmBinary[:0]
+ c.result.UsesMemory = false
+ // Clears the existing entries in LabelCallers.
+ for frameID := uint32(0); frameID <= c.currentFrameID; frameID++ {
+ for k := labelKind(0); k < labelKindNum; k++ {
+ delete(c.result.LabelCallers, newLabel(k, frameID))
+ }
+ }
+ // Reset the previous states.
+ c.pc = 0
+ c.currentOpPC = 0
+ c.currentFrameID = 0
+ c.unreachableState.on, c.unreachableState.depth = false, 0
+
+ if err := c.compile(sig, code.Body, code.LocalTypes, code.BodyOffsetInCodeSection); err != nil {
+ return nil, err
+ }
+ c.next++
+ return &c.result, nil
+}
+
+// Compile lowers given function instance into interpreterir operations
+// so that the resulting operations can be consumed by the interpreter
+// or the compiler compilation engine.
+func (c *compiler) compile(sig *wasm.FunctionType, body []byte, localTypes []wasm.ValueType, bodyOffsetInCodeSection uint64) error {
+ // Set function specific fields.
+ c.body = body
+ c.localTypes = localTypes
+ c.sig = sig
+ c.bodyOffsetInCodeSection = bodyOffsetInCodeSection
+
+ // Reuses the underlying slices.
+ c.stack = c.stack[:0]
+ c.controlFrames.frames = c.controlFrames.frames[:0]
+
+ c.initializeStack()
+
+ // Emit const expressions for locals.
+ // Note that here we don't take function arguments
+ // into account, meaning that callers must push
+ // arguments before entering into the function body.
+ for _, t := range c.localTypes {
+ c.emitDefaultValue(t)
+ }
+
+ // Insert the function control frame.
+ c.controlFrames.push(controlFrame{
+ frameID: c.nextFrameID(),
+ blockType: c.sig,
+ kind: controlFrameKindFunction,
+ })
+
+ // Now, enter the function body.
+ for !c.controlFrames.empty() && c.pc < uint64(len(c.body)) {
+ if err := c.handleInstruction(); err != nil {
+ return fmt.Errorf("handling instruction: %w", err)
+ }
+ }
+ return nil
+}
+
+// Translate the current Wasm instruction to interpreterir's operations,
+// and emit the results into c.results.
+func (c *compiler) handleInstruction() error {
+ op := c.body[c.pc]
+ c.currentOpPC = c.pc
+ if false {
+ var instName string
+ if op == wasm.OpcodeVecPrefix {
+ instName = wasm.VectorInstructionName(c.body[c.pc+1])
+ } else if op == wasm.OpcodeAtomicPrefix {
+ instName = wasm.AtomicInstructionName(c.body[c.pc+1])
+ } else if op == wasm.OpcodeMiscPrefix {
+ instName = wasm.MiscInstructionName(c.body[c.pc+1])
+ } else {
+ instName = wasm.InstructionName(op)
+ }
+ fmt.Printf("handling %s, unreachable_state(on=%v,depth=%d), stack=%v\n",
+ instName, c.unreachableState.on, c.unreachableState.depth, c.stack,
+ )
+ }
+
+ var peekValueType unsignedType
+ if len(c.stack) > 0 {
+ peekValueType = c.stackPeek()
+ }
+
+ // Modify the stack according the current instruction.
+ // Note that some instructions will read "index" in
+ // applyToStack and advance c.pc inside the function.
+ index, err := c.applyToStack(op)
+ if err != nil {
+ return fmt.Errorf("apply stack failed for %s: %w", wasm.InstructionName(op), err)
+ }
+ // Now we handle each instruction, and
+ // emit the corresponding interpreterir operations to the results.
+operatorSwitch:
+ switch op {
+ case wasm.OpcodeUnreachable:
+ c.emit(newOperationUnreachable())
+ c.markUnreachable()
+ case wasm.OpcodeNop:
+ // Nop is noop!
+ case wasm.OpcodeBlock:
+ c.br.Reset(c.body[c.pc+1:])
+ bt, num, err := wasm.DecodeBlockType(c.types, c.br, c.enabledFeatures)
+ if err != nil {
+ return fmt.Errorf("reading block type for block instruction: %w", err)
+ }
+ c.pc += num
+
+ if c.unreachableState.on {
+ // If it is currently in unreachable,
+ // just remove the entire block.
+ c.unreachableState.depth++
+ break operatorSwitch
+ }
+
+ // Create a new frame -- entering this block.
+ frame := controlFrame{
+ frameID: c.nextFrameID(),
+ originalStackLenWithoutParam: len(c.stack) - len(bt.Params),
+ kind: controlFrameKindBlockWithoutContinuationLabel,
+ blockType: bt,
+ }
+ c.controlFrames.push(frame)
+
+ case wasm.OpcodeLoop:
+ c.br.Reset(c.body[c.pc+1:])
+ bt, num, err := wasm.DecodeBlockType(c.types, c.br, c.enabledFeatures)
+ if err != nil {
+ return fmt.Errorf("reading block type for loop instruction: %w", err)
+ }
+ c.pc += num
+
+ if c.unreachableState.on {
+ // If it is currently in unreachable,
+ // just remove the entire block.
+ c.unreachableState.depth++
+ break operatorSwitch
+ }
+
+ // Create a new frame -- entering loop.
+ frame := controlFrame{
+ frameID: c.nextFrameID(),
+ originalStackLenWithoutParam: len(c.stack) - len(bt.Params),
+ kind: controlFrameKindLoop,
+ blockType: bt,
+ }
+ c.controlFrames.push(frame)
+
+ // Prep labels for inside and the continuation of this loop.
+ loopLabel := newLabel(labelKindHeader, frame.frameID)
+ c.result.LabelCallers[loopLabel]++
+
+ // Emit the branch operation to enter inside the loop.
+ c.emit(newOperationBr(loopLabel))
+ c.emit(newOperationLabel(loopLabel))
+
+ // Insert the exit code check on the loop header, which is the only necessary point in the function body
+ // to prevent infinite loop.
+ //
+ // Note that this is a little aggressive: this checks the exit code regardless the loop header is actually
+ // the loop. In other words, this checks even when no br/br_if/br_table instructions jumping to this loop
+ // exist. However, in reality, that shouldn't be an issue since such "noop" loop header will highly likely be
+ // optimized out by almost all guest language compilers which have the control flow optimization passes.
+ if c.ensureTermination {
+ c.emit(newOperationBuiltinFunctionCheckExitCode())
+ }
+ case wasm.OpcodeIf:
+ c.br.Reset(c.body[c.pc+1:])
+ bt, num, err := wasm.DecodeBlockType(c.types, c.br, c.enabledFeatures)
+ if err != nil {
+ return fmt.Errorf("reading block type for if instruction: %w", err)
+ }
+ c.pc += num
+
+ if c.unreachableState.on {
+ // If it is currently in unreachable,
+ // just remove the entire block.
+ c.unreachableState.depth++
+ break operatorSwitch
+ }
+
+ // Create a new frame -- entering if.
+ frame := controlFrame{
+ frameID: c.nextFrameID(),
+ originalStackLenWithoutParam: len(c.stack) - len(bt.Params),
+ // Note this will be set to controlFrameKindIfWithElse
+ // when else opcode found later.
+ kind: controlFrameKindIfWithoutElse,
+ blockType: bt,
+ }
+ c.controlFrames.push(frame)
+
+ // Prep labels for if and else of this if.
+ thenLabel := newLabel(labelKindHeader, frame.frameID)
+ elseLabel := newLabel(labelKindElse, frame.frameID)
+ c.result.LabelCallers[thenLabel]++
+ c.result.LabelCallers[elseLabel]++
+
+ // Emit the branch operation to enter the then block.
+ c.emit(newOperationBrIf(thenLabel, elseLabel, nopinclusiveRange))
+ c.emit(newOperationLabel(thenLabel))
+ case wasm.OpcodeElse:
+ frame := c.controlFrames.top()
+ if c.unreachableState.on && c.unreachableState.depth > 0 {
+ // If it is currently in unreachable, and the nested if,
+ // just remove the entire else block.
+ break operatorSwitch
+ } else if c.unreachableState.on {
+ // If it is currently in unreachable, and the non-nested if,
+ // reset the stack so we can correctly handle the else block.
+ top := c.controlFrames.top()
+ c.stack = c.stack[:top.originalStackLenWithoutParam]
+ top.kind = controlFrameKindIfWithElse
+
+ // Re-push the parameters to the if block so that else block can use them.
+ for _, t := range frame.blockType.Params {
+ c.stackPush(wasmValueTypeTounsignedType(t))
+ }
+
+ // We are no longer unreachable in else frame,
+ // so emit the correct label, and reset the unreachable state.
+ elseLabel := newLabel(labelKindElse, frame.frameID)
+ c.resetUnreachable()
+ c.emit(
+ newOperationLabel(elseLabel),
+ )
+ break operatorSwitch
+ }
+
+ // Change the Kind of this If block, indicating that
+ // the if has else block.
+ frame.kind = controlFrameKindIfWithElse
+
+ // We need to reset the stack so that
+ // the values pushed inside the then block
+ // do not affect the else block.
+ dropOp := newOperationDrop(c.getFrameDropRange(frame, false))
+
+ // Reset the stack manipulated by the then block, and re-push the block param types to the stack.
+
+ c.stack = c.stack[:frame.originalStackLenWithoutParam]
+ for _, t := range frame.blockType.Params {
+ c.stackPush(wasmValueTypeTounsignedType(t))
+ }
+
+ // Prep labels for else and the continuation of this if block.
+ elseLabel := newLabel(labelKindElse, frame.frameID)
+ continuationLabel := newLabel(labelKindContinuation, frame.frameID)
+ c.result.LabelCallers[continuationLabel]++
+
+ // Emit the instructions for exiting the if loop,
+ // and then the initiation of else block.
+ c.emit(dropOp)
+ // Jump to the continuation of this block.
+ c.emit(newOperationBr(continuationLabel))
+ // Initiate the else block.
+ c.emit(newOperationLabel(elseLabel))
+ case wasm.OpcodeEnd:
+ if c.unreachableState.on && c.unreachableState.depth > 0 {
+ c.unreachableState.depth--
+ break operatorSwitch
+ } else if c.unreachableState.on {
+ c.resetUnreachable()
+
+ frame := c.controlFrames.pop()
+ if c.controlFrames.empty() {
+ return nil
+ }
+
+ c.stack = c.stack[:frame.originalStackLenWithoutParam]
+ for _, t := range frame.blockType.Results {
+ c.stackPush(wasmValueTypeTounsignedType(t))
+ }
+
+ continuationLabel := newLabel(labelKindContinuation, frame.frameID)
+ if frame.kind == controlFrameKindIfWithoutElse {
+ // Emit the else label.
+ elseLabel := newLabel(labelKindElse, frame.frameID)
+ c.result.LabelCallers[continuationLabel]++
+ c.emit(newOperationLabel(elseLabel))
+ c.emit(newOperationBr(continuationLabel))
+ c.emit(newOperationLabel(continuationLabel))
+ } else {
+ c.emit(
+ newOperationLabel(continuationLabel),
+ )
+ }
+
+ break operatorSwitch
+ }
+
+ frame := c.controlFrames.pop()
+
+ // We need to reset the stack so that
+ // the values pushed inside the block.
+ dropOp := newOperationDrop(c.getFrameDropRange(frame, true))
+ c.stack = c.stack[:frame.originalStackLenWithoutParam]
+
+ // Push the result types onto the stack.
+ for _, t := range frame.blockType.Results {
+ c.stackPush(wasmValueTypeTounsignedType(t))
+ }
+
+ // Emit the instructions according to the Kind of the current control frame.
+ switch frame.kind {
+ case controlFrameKindFunction:
+ if !c.controlFrames.empty() {
+ // Should never happen. If so, there's a bug in the translation.
+ panic("bug: found more function control frames")
+ }
+ // Return from function.
+ c.emit(dropOp)
+ c.emit(newOperationBr(newLabel(labelKindReturn, 0)))
+ case controlFrameKindIfWithoutElse:
+ // This case we have to emit "empty" else label.
+ elseLabel := newLabel(labelKindElse, frame.frameID)
+ continuationLabel := newLabel(labelKindContinuation, frame.frameID)
+ c.result.LabelCallers[continuationLabel] += 2
+ c.emit(dropOp)
+ c.emit(newOperationBr(continuationLabel))
+ // Emit the else which soon branches into the continuation.
+ c.emit(newOperationLabel(elseLabel))
+ c.emit(newOperationBr(continuationLabel))
+ // Initiate the continuation.
+ c.emit(newOperationLabel(continuationLabel))
+ case controlFrameKindBlockWithContinuationLabel,
+ controlFrameKindIfWithElse:
+ continuationLabel := newLabel(labelKindContinuation, frame.frameID)
+ c.result.LabelCallers[continuationLabel]++
+ c.emit(dropOp)
+ c.emit(newOperationBr(continuationLabel))
+ c.emit(newOperationLabel(continuationLabel))
+ case controlFrameKindLoop, controlFrameKindBlockWithoutContinuationLabel:
+ c.emit(
+ dropOp,
+ )
+ default:
+ // Should never happen. If so, there's a bug in the translation.
+ panic(fmt.Errorf("bug: invalid control frame Kind: 0x%x", frame.kind))
+ }
+
+ case wasm.OpcodeBr:
+ targetIndex, n, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("read the target for br_if: %w", err)
+ }
+ c.pc += n
+
+ if c.unreachableState.on {
+ // If it is currently in unreachable, br is no-op.
+ break operatorSwitch
+ }
+
+ targetFrame := c.controlFrames.get(int(targetIndex))
+ targetFrame.ensureContinuation()
+ dropOp := newOperationDrop(c.getFrameDropRange(targetFrame, false))
+ targetID := targetFrame.asLabel()
+ c.result.LabelCallers[targetID]++
+ c.emit(dropOp)
+ c.emit(newOperationBr(targetID))
+ // Br operation is stack-polymorphic, and mark the state as unreachable.
+ // That means subsequent instructions in the current control frame are "unreachable"
+ // and can be safely removed.
+ c.markUnreachable()
+ case wasm.OpcodeBrIf:
+ targetIndex, n, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("read the target for br_if: %w", err)
+ }
+ c.pc += n
+
+ if c.unreachableState.on {
+ // If it is currently in unreachable, br-if is no-op.
+ break operatorSwitch
+ }
+
+ targetFrame := c.controlFrames.get(int(targetIndex))
+ targetFrame.ensureContinuation()
+ drop := c.getFrameDropRange(targetFrame, false)
+ target := targetFrame.asLabel()
+ c.result.LabelCallers[target]++
+
+ continuationLabel := newLabel(labelKindHeader, c.nextFrameID())
+ c.result.LabelCallers[continuationLabel]++
+ c.emit(newOperationBrIf(target, continuationLabel, drop))
+ // Start emitting else block operations.
+ c.emit(newOperationLabel(continuationLabel))
+ case wasm.OpcodeBrTable:
+ c.br.Reset(c.body[c.pc+1:])
+ r := c.br
+ numTargets, n, err := leb128.DecodeUint32(r)
+ if err != nil {
+ return fmt.Errorf("error reading number of targets in br_table: %w", err)
+ }
+ c.pc += n
+
+ if c.unreachableState.on {
+ // If it is currently in unreachable, br_table is no-op.
+ // But before proceeding to the next instruction, we must advance the pc
+ // according to the number of br_table targets.
+ for i := uint32(0); i <= numTargets; i++ { // inclusive as we also need to read the index of default target.
+ _, n, err := leb128.DecodeUint32(r)
+ if err != nil {
+ return fmt.Errorf("error reading target %d in br_table: %w", i, err)
+ }
+ c.pc += n
+ }
+ break operatorSwitch
+ }
+
+ // Read the branch targets.
+ s := numTargets * 2
+ targetLabels := make([]uint64, 2+s) // (label, inclusiveRange) * (default+numTargets)
+ for i := uint32(0); i < s; i += 2 {
+ l, n, err := leb128.DecodeUint32(r)
+ if err != nil {
+ return fmt.Errorf("error reading target %d in br_table: %w", i, err)
+ }
+ c.pc += n
+ targetFrame := c.controlFrames.get(int(l))
+ targetFrame.ensureContinuation()
+ drop := c.getFrameDropRange(targetFrame, false)
+ targetLabel := targetFrame.asLabel()
+ targetLabels[i] = uint64(targetLabel)
+ targetLabels[i+1] = drop.AsU64()
+ c.result.LabelCallers[targetLabel]++
+ }
+
+ // Prep default target control frame.
+ l, n, err := leb128.DecodeUint32(r)
+ if err != nil {
+ return fmt.Errorf("error reading default target of br_table: %w", err)
+ }
+ c.pc += n
+ defaultTargetFrame := c.controlFrames.get(int(l))
+ defaultTargetFrame.ensureContinuation()
+ defaultTargetDrop := c.getFrameDropRange(defaultTargetFrame, false)
+ defaultLabel := defaultTargetFrame.asLabel()
+ c.result.LabelCallers[defaultLabel]++
+ targetLabels[s] = uint64(defaultLabel)
+ targetLabels[s+1] = defaultTargetDrop.AsU64()
+ c.emit(newOperationBrTable(targetLabels))
+
+ // br_table operation is stack-polymorphic, and mark the state as unreachable.
+ // That means subsequent instructions in the current control frame are "unreachable"
+ // and can be safely removed.
+ c.markUnreachable()
+ case wasm.OpcodeReturn:
+ functionFrame := c.controlFrames.functionFrame()
+ dropOp := newOperationDrop(c.getFrameDropRange(functionFrame, false))
+
+ // Cleanup the stack and then jmp to function frame's continuation (meaning return).
+ c.emit(dropOp)
+ c.emit(newOperationBr(functionFrame.asLabel()))
+
+ // Return operation is stack-polymorphic, and mark the state as unreachable.
+ // That means subsequent instructions in the current control frame are "unreachable"
+ // and can be safely removed.
+ c.markUnreachable()
+ case wasm.OpcodeCall:
+ c.emit(
+ newOperationCall(index),
+ )
+ case wasm.OpcodeCallIndirect:
+ typeIndex := index
+ tableIndex, n, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("read target for br_table: %w", err)
+ }
+ c.pc += n
+ c.emit(
+ newOperationCallIndirect(typeIndex, tableIndex),
+ )
+ case wasm.OpcodeDrop:
+ r := inclusiveRange{Start: 0, End: 0}
+ if peekValueType == unsignedTypeV128 {
+ // inclusiveRange is the range in uint64 representation, so dropping a vector value on top
+ // should be translated as drop [0..1] inclusively.
+ r.End++
+ }
+ c.emit(newOperationDrop(r))
+ case wasm.OpcodeSelect:
+ // If it is on the unreachable state, ignore the instruction.
+ if c.unreachableState.on {
+ break operatorSwitch
+ }
+ isTargetVector := c.stackPeek() == unsignedTypeV128
+ c.emit(
+ newOperationSelect(isTargetVector),
+ )
+ case wasm.OpcodeTypedSelect:
+ // Skips two bytes: vector size fixed to 1, and the value type for select.
+ c.pc += 2
+ // If it is on the unreachable state, ignore the instruction.
+ if c.unreachableState.on {
+ break operatorSwitch
+ }
+ // Typed select is semantically equivalent to select at runtime.
+ isTargetVector := c.stackPeek() == unsignedTypeV128
+ c.emit(
+ newOperationSelect(isTargetVector),
+ )
+ case wasm.OpcodeLocalGet:
+ depth := c.localDepth(index)
+ if isVector := c.localType(index) == wasm.ValueTypeV128; !isVector {
+ c.emit(
+ // -1 because we already manipulated the stack before
+ // called localDepth ^^.
+ newOperationPick(depth-1, isVector),
+ )
+ } else {
+ c.emit(
+ // -2 because we already manipulated the stack before
+ // called localDepth ^^.
+ newOperationPick(depth-2, isVector),
+ )
+ }
+ case wasm.OpcodeLocalSet:
+ depth := c.localDepth(index)
+
+ isVector := c.localType(index) == wasm.ValueTypeV128
+ if isVector {
+ c.emit(
+ // +2 because we already popped the operands for this operation from the c.stack before
+ // called localDepth ^^,
+ newOperationSet(depth+2, isVector),
+ )
+ } else {
+ c.emit(
+ // +1 because we already popped the operands for this operation from the c.stack before
+ // called localDepth ^^,
+ newOperationSet(depth+1, isVector),
+ )
+ }
+ case wasm.OpcodeLocalTee:
+ depth := c.localDepth(index)
+ isVector := c.localType(index) == wasm.ValueTypeV128
+ if isVector {
+ c.emit(newOperationPick(1, isVector))
+ c.emit(newOperationSet(depth+2, isVector))
+ } else {
+ c.emit(
+ newOperationPick(0, isVector))
+ c.emit(newOperationSet(depth+1, isVector))
+ }
+ case wasm.OpcodeGlobalGet:
+ c.emit(
+ newOperationGlobalGet(index),
+ )
+ case wasm.OpcodeGlobalSet:
+ c.emit(
+ newOperationGlobalSet(index),
+ )
+ case wasm.OpcodeI32Load:
+ imm, err := c.readMemoryArg(wasm.OpcodeI32LoadName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad(unsignedTypeI32, imm))
+ case wasm.OpcodeI64Load:
+ imm, err := c.readMemoryArg(wasm.OpcodeI64LoadName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad(unsignedTypeI64, imm))
+ case wasm.OpcodeF32Load:
+ imm, err := c.readMemoryArg(wasm.OpcodeF32LoadName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad(unsignedTypeF32, imm))
+ case wasm.OpcodeF64Load:
+ imm, err := c.readMemoryArg(wasm.OpcodeF64LoadName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad(unsignedTypeF64, imm))
+ case wasm.OpcodeI32Load8S:
+ imm, err := c.readMemoryArg(wasm.OpcodeI32Load8SName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad8(signedInt32, imm))
+ case wasm.OpcodeI32Load8U:
+ imm, err := c.readMemoryArg(wasm.OpcodeI32Load8UName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad8(signedUint32, imm))
+ case wasm.OpcodeI32Load16S:
+ imm, err := c.readMemoryArg(wasm.OpcodeI32Load16SName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad16(signedInt32, imm))
+ case wasm.OpcodeI32Load16U:
+ imm, err := c.readMemoryArg(wasm.OpcodeI32Load16UName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad16(signedUint32, imm))
+ case wasm.OpcodeI64Load8S:
+ imm, err := c.readMemoryArg(wasm.OpcodeI64Load8SName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad8(signedInt64, imm))
+ case wasm.OpcodeI64Load8U:
+ imm, err := c.readMemoryArg(wasm.OpcodeI64Load8UName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad8(signedUint64, imm))
+ case wasm.OpcodeI64Load16S:
+ imm, err := c.readMemoryArg(wasm.OpcodeI64Load16SName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad16(signedInt64, imm))
+ case wasm.OpcodeI64Load16U:
+ imm, err := c.readMemoryArg(wasm.OpcodeI64Load16UName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad16(signedUint64, imm))
+ case wasm.OpcodeI64Load32S:
+ imm, err := c.readMemoryArg(wasm.OpcodeI64Load32SName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad32(true, imm))
+ case wasm.OpcodeI64Load32U:
+ imm, err := c.readMemoryArg(wasm.OpcodeI64Load32UName)
+ if err != nil {
+ return err
+ }
+ c.emit(newOperationLoad32(false, imm))
+ case wasm.OpcodeI32Store:
+ imm, err := c.readMemoryArg(wasm.OpcodeI32StoreName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationStore(unsignedTypeI32, imm),
+ )
+ case wasm.OpcodeI64Store:
+ imm, err := c.readMemoryArg(wasm.OpcodeI64StoreName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationStore(unsignedTypeI64, imm),
+ )
+ case wasm.OpcodeF32Store:
+ imm, err := c.readMemoryArg(wasm.OpcodeF32StoreName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationStore(unsignedTypeF32, imm),
+ )
+ case wasm.OpcodeF64Store:
+ imm, err := c.readMemoryArg(wasm.OpcodeF64StoreName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationStore(unsignedTypeF64, imm),
+ )
+ case wasm.OpcodeI32Store8:
+ imm, err := c.readMemoryArg(wasm.OpcodeI32Store8Name)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationStore8(imm),
+ )
+ case wasm.OpcodeI32Store16:
+ imm, err := c.readMemoryArg(wasm.OpcodeI32Store16Name)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationStore16(imm),
+ )
+ case wasm.OpcodeI64Store8:
+ imm, err := c.readMemoryArg(wasm.OpcodeI64Store8Name)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationStore8(imm),
+ )
+ case wasm.OpcodeI64Store16:
+ imm, err := c.readMemoryArg(wasm.OpcodeI64Store16Name)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationStore16(imm),
+ )
+ case wasm.OpcodeI64Store32:
+ imm, err := c.readMemoryArg(wasm.OpcodeI64Store32Name)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationStore32(imm),
+ )
+ case wasm.OpcodeMemorySize:
+ c.result.UsesMemory = true
+ c.pc++ // Skip the reserved one byte.
+ c.emit(
+ newOperationMemorySize(),
+ )
+ case wasm.OpcodeMemoryGrow:
+ c.result.UsesMemory = true
+ c.pc++ // Skip the reserved one byte.
+ c.emit(
+ newOperationMemoryGrow(),
+ )
+ case wasm.OpcodeI32Const:
+ val, num, err := leb128.LoadInt32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("reading i32.const value: %v", err)
+ }
+ c.pc += num
+ c.emit(
+ newOperationConstI32(uint32(val)),
+ )
+ case wasm.OpcodeI64Const:
+ val, num, err := leb128.LoadInt64(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("reading i64.const value: %v", err)
+ }
+ c.pc += num
+ c.emit(
+ newOperationConstI64(uint64(val)),
+ )
+ case wasm.OpcodeF32Const:
+ v := math.Float32frombits(binary.LittleEndian.Uint32(c.body[c.pc+1:]))
+ c.pc += 4
+ c.emit(
+ newOperationConstF32(v),
+ )
+ case wasm.OpcodeF64Const:
+ v := math.Float64frombits(binary.LittleEndian.Uint64(c.body[c.pc+1:]))
+ c.pc += 8
+ c.emit(
+ newOperationConstF64(v),
+ )
+ case wasm.OpcodeI32Eqz:
+ c.emit(
+ newOperationEqz(unsignedInt32),
+ )
+ case wasm.OpcodeI32Eq:
+ c.emit(
+ newOperationEq(unsignedTypeI32),
+ )
+ case wasm.OpcodeI32Ne:
+ c.emit(
+ newOperationNe(unsignedTypeI32),
+ )
+ case wasm.OpcodeI32LtS:
+ c.emit(
+ newOperationLt(signedTypeInt32),
+ )
+ case wasm.OpcodeI32LtU:
+ c.emit(
+ newOperationLt(signedTypeUint32),
+ )
+ case wasm.OpcodeI32GtS:
+ c.emit(
+ newOperationGt(signedTypeInt32),
+ )
+ case wasm.OpcodeI32GtU:
+ c.emit(
+ newOperationGt(signedTypeUint32),
+ )
+ case wasm.OpcodeI32LeS:
+ c.emit(
+ newOperationLe(signedTypeInt32),
+ )
+ case wasm.OpcodeI32LeU:
+ c.emit(
+ newOperationLe(signedTypeUint32),
+ )
+ case wasm.OpcodeI32GeS:
+ c.emit(
+ newOperationGe(signedTypeInt32),
+ )
+ case wasm.OpcodeI32GeU:
+ c.emit(
+ newOperationGe(signedTypeUint32),
+ )
+ case wasm.OpcodeI64Eqz:
+ c.emit(
+ newOperationEqz(unsignedInt64),
+ )
+ case wasm.OpcodeI64Eq:
+ c.emit(
+ newOperationEq(unsignedTypeI64),
+ )
+ case wasm.OpcodeI64Ne:
+ c.emit(
+ newOperationNe(unsignedTypeI64),
+ )
+ case wasm.OpcodeI64LtS:
+ c.emit(
+ newOperationLt(signedTypeInt64),
+ )
+ case wasm.OpcodeI64LtU:
+ c.emit(
+ newOperationLt(signedTypeUint64),
+ )
+ case wasm.OpcodeI64GtS:
+ c.emit(
+ newOperationGt(signedTypeInt64),
+ )
+ case wasm.OpcodeI64GtU:
+ c.emit(
+ newOperationGt(signedTypeUint64),
+ )
+ case wasm.OpcodeI64LeS:
+ c.emit(
+ newOperationLe(signedTypeInt64),
+ )
+ case wasm.OpcodeI64LeU:
+ c.emit(
+ newOperationLe(signedTypeUint64),
+ )
+ case wasm.OpcodeI64GeS:
+ c.emit(
+ newOperationGe(signedTypeInt64),
+ )
+ case wasm.OpcodeI64GeU:
+ c.emit(
+ newOperationGe(signedTypeUint64),
+ )
+ case wasm.OpcodeF32Eq:
+ c.emit(
+ newOperationEq(unsignedTypeF32),
+ )
+ case wasm.OpcodeF32Ne:
+ c.emit(
+ newOperationNe(unsignedTypeF32),
+ )
+ case wasm.OpcodeF32Lt:
+ c.emit(
+ newOperationLt(signedTypeFloat32),
+ )
+ case wasm.OpcodeF32Gt:
+ c.emit(
+ newOperationGt(signedTypeFloat32),
+ )
+ case wasm.OpcodeF32Le:
+ c.emit(
+ newOperationLe(signedTypeFloat32),
+ )
+ case wasm.OpcodeF32Ge:
+ c.emit(
+ newOperationGe(signedTypeFloat32),
+ )
+ case wasm.OpcodeF64Eq:
+ c.emit(
+ newOperationEq(unsignedTypeF64),
+ )
+ case wasm.OpcodeF64Ne:
+ c.emit(
+ newOperationNe(unsignedTypeF64),
+ )
+ case wasm.OpcodeF64Lt:
+ c.emit(
+ newOperationLt(signedTypeFloat64),
+ )
+ case wasm.OpcodeF64Gt:
+ c.emit(
+ newOperationGt(signedTypeFloat64),
+ )
+ case wasm.OpcodeF64Le:
+ c.emit(
+ newOperationLe(signedTypeFloat64),
+ )
+ case wasm.OpcodeF64Ge:
+ c.emit(
+ newOperationGe(signedTypeFloat64),
+ )
+ case wasm.OpcodeI32Clz:
+ c.emit(
+ newOperationClz(unsignedInt32),
+ )
+ case wasm.OpcodeI32Ctz:
+ c.emit(
+ newOperationCtz(unsignedInt32),
+ )
+ case wasm.OpcodeI32Popcnt:
+ c.emit(
+ newOperationPopcnt(unsignedInt32),
+ )
+ case wasm.OpcodeI32Add:
+ c.emit(
+ newOperationAdd(unsignedTypeI32),
+ )
+ case wasm.OpcodeI32Sub:
+ c.emit(
+ newOperationSub(unsignedTypeI32),
+ )
+ case wasm.OpcodeI32Mul:
+ c.emit(
+ newOperationMul(unsignedTypeI32),
+ )
+ case wasm.OpcodeI32DivS:
+ c.emit(
+ newOperationDiv(signedTypeInt32),
+ )
+ case wasm.OpcodeI32DivU:
+ c.emit(
+ newOperationDiv(signedTypeUint32),
+ )
+ case wasm.OpcodeI32RemS:
+ c.emit(
+ newOperationRem(signedInt32),
+ )
+ case wasm.OpcodeI32RemU:
+ c.emit(
+ newOperationRem(signedUint32),
+ )
+ case wasm.OpcodeI32And:
+ c.emit(
+ newOperationAnd(unsignedInt32),
+ )
+ case wasm.OpcodeI32Or:
+ c.emit(
+ newOperationOr(unsignedInt32),
+ )
+ case wasm.OpcodeI32Xor:
+ c.emit(
+ newOperationXor(unsignedInt64),
+ )
+ case wasm.OpcodeI32Shl:
+ c.emit(
+ newOperationShl(unsignedInt32),
+ )
+ case wasm.OpcodeI32ShrS:
+ c.emit(
+ newOperationShr(signedInt32),
+ )
+ case wasm.OpcodeI32ShrU:
+ c.emit(
+ newOperationShr(signedUint32),
+ )
+ case wasm.OpcodeI32Rotl:
+ c.emit(
+ newOperationRotl(unsignedInt32),
+ )
+ case wasm.OpcodeI32Rotr:
+ c.emit(
+ newOperationRotr(unsignedInt32),
+ )
+ case wasm.OpcodeI64Clz:
+ c.emit(
+ newOperationClz(unsignedInt64),
+ )
+ case wasm.OpcodeI64Ctz:
+ c.emit(
+ newOperationCtz(unsignedInt64),
+ )
+ case wasm.OpcodeI64Popcnt:
+ c.emit(
+ newOperationPopcnt(unsignedInt64),
+ )
+ case wasm.OpcodeI64Add:
+ c.emit(
+ newOperationAdd(unsignedTypeI64),
+ )
+ case wasm.OpcodeI64Sub:
+ c.emit(
+ newOperationSub(unsignedTypeI64),
+ )
+ case wasm.OpcodeI64Mul:
+ c.emit(
+ newOperationMul(unsignedTypeI64),
+ )
+ case wasm.OpcodeI64DivS:
+ c.emit(
+ newOperationDiv(signedTypeInt64),
+ )
+ case wasm.OpcodeI64DivU:
+ c.emit(
+ newOperationDiv(signedTypeUint64),
+ )
+ case wasm.OpcodeI64RemS:
+ c.emit(
+ newOperationRem(signedInt64),
+ )
+ case wasm.OpcodeI64RemU:
+ c.emit(
+ newOperationRem(signedUint64),
+ )
+ case wasm.OpcodeI64And:
+ c.emit(
+ newOperationAnd(unsignedInt64),
+ )
+ case wasm.OpcodeI64Or:
+ c.emit(
+ newOperationOr(unsignedInt64),
+ )
+ case wasm.OpcodeI64Xor:
+ c.emit(
+ newOperationXor(unsignedInt64),
+ )
+ case wasm.OpcodeI64Shl:
+ c.emit(
+ newOperationShl(unsignedInt64),
+ )
+ case wasm.OpcodeI64ShrS:
+ c.emit(
+ newOperationShr(signedInt64),
+ )
+ case wasm.OpcodeI64ShrU:
+ c.emit(
+ newOperationShr(signedUint64),
+ )
+ case wasm.OpcodeI64Rotl:
+ c.emit(
+ newOperationRotl(unsignedInt64),
+ )
+ case wasm.OpcodeI64Rotr:
+ c.emit(
+ newOperationRotr(unsignedInt64),
+ )
+ case wasm.OpcodeF32Abs:
+ c.emit(
+ newOperationAbs(f32),
+ )
+ case wasm.OpcodeF32Neg:
+ c.emit(
+ newOperationNeg(f32),
+ )
+ case wasm.OpcodeF32Ceil:
+ c.emit(
+ newOperationCeil(f32),
+ )
+ case wasm.OpcodeF32Floor:
+ c.emit(
+ newOperationFloor(f32),
+ )
+ case wasm.OpcodeF32Trunc:
+ c.emit(
+ newOperationTrunc(f32),
+ )
+ case wasm.OpcodeF32Nearest:
+ c.emit(
+ newOperationNearest(f32),
+ )
+ case wasm.OpcodeF32Sqrt:
+ c.emit(
+ newOperationSqrt(f32),
+ )
+ case wasm.OpcodeF32Add:
+ c.emit(
+ newOperationAdd(unsignedTypeF32),
+ )
+ case wasm.OpcodeF32Sub:
+ c.emit(
+ newOperationSub(unsignedTypeF32),
+ )
+ case wasm.OpcodeF32Mul:
+ c.emit(
+ newOperationMul(unsignedTypeF32),
+ )
+ case wasm.OpcodeF32Div:
+ c.emit(
+ newOperationDiv(signedTypeFloat32),
+ )
+ case wasm.OpcodeF32Min:
+ c.emit(
+ newOperationMin(f32),
+ )
+ case wasm.OpcodeF32Max:
+ c.emit(
+ newOperationMax(f32),
+ )
+ case wasm.OpcodeF32Copysign:
+ c.emit(
+ newOperationCopysign(f32),
+ )
+ case wasm.OpcodeF64Abs:
+ c.emit(
+ newOperationAbs(f64),
+ )
+ case wasm.OpcodeF64Neg:
+ c.emit(
+ newOperationNeg(f64),
+ )
+ case wasm.OpcodeF64Ceil:
+ c.emit(
+ newOperationCeil(f64),
+ )
+ case wasm.OpcodeF64Floor:
+ c.emit(
+ newOperationFloor(f64),
+ )
+ case wasm.OpcodeF64Trunc:
+ c.emit(
+ newOperationTrunc(f64),
+ )
+ case wasm.OpcodeF64Nearest:
+ c.emit(
+ newOperationNearest(f64),
+ )
+ case wasm.OpcodeF64Sqrt:
+ c.emit(
+ newOperationSqrt(f64),
+ )
+ case wasm.OpcodeF64Add:
+ c.emit(
+ newOperationAdd(unsignedTypeF64),
+ )
+ case wasm.OpcodeF64Sub:
+ c.emit(
+ newOperationSub(unsignedTypeF64),
+ )
+ case wasm.OpcodeF64Mul:
+ c.emit(
+ newOperationMul(unsignedTypeF64),
+ )
+ case wasm.OpcodeF64Div:
+ c.emit(
+ newOperationDiv(signedTypeFloat64),
+ )
+ case wasm.OpcodeF64Min:
+ c.emit(
+ newOperationMin(f64),
+ )
+ case wasm.OpcodeF64Max:
+ c.emit(
+ newOperationMax(f64),
+ )
+ case wasm.OpcodeF64Copysign:
+ c.emit(
+ newOperationCopysign(f64),
+ )
+ case wasm.OpcodeI32WrapI64:
+ c.emit(
+ newOperationI32WrapFromI64(),
+ )
+ case wasm.OpcodeI32TruncF32S:
+ c.emit(
+ newOperationITruncFromF(f32, signedInt32, false),
+ )
+ case wasm.OpcodeI32TruncF32U:
+ c.emit(
+ newOperationITruncFromF(f32, signedUint32, false),
+ )
+ case wasm.OpcodeI32TruncF64S:
+ c.emit(
+ newOperationITruncFromF(f64, signedInt32, false),
+ )
+ case wasm.OpcodeI32TruncF64U:
+ c.emit(
+ newOperationITruncFromF(f64, signedUint32, false),
+ )
+ case wasm.OpcodeI64ExtendI32S:
+ c.emit(
+ newOperationExtend(true),
+ )
+ case wasm.OpcodeI64ExtendI32U:
+ c.emit(
+ newOperationExtend(false),
+ )
+ case wasm.OpcodeI64TruncF32S:
+ c.emit(
+ newOperationITruncFromF(f32, signedInt64, false),
+ )
+ case wasm.OpcodeI64TruncF32U:
+ c.emit(
+ newOperationITruncFromF(f32, signedUint64, false),
+ )
+ case wasm.OpcodeI64TruncF64S:
+ c.emit(
+ newOperationITruncFromF(f64, signedInt64, false),
+ )
+ case wasm.OpcodeI64TruncF64U:
+ c.emit(
+ newOperationITruncFromF(f64, signedUint64, false),
+ )
+ case wasm.OpcodeF32ConvertI32S:
+ c.emit(
+ newOperationFConvertFromI(signedInt32, f32),
+ )
+ case wasm.OpcodeF32ConvertI32U:
+ c.emit(
+ newOperationFConvertFromI(signedUint32, f32),
+ )
+ case wasm.OpcodeF32ConvertI64S:
+ c.emit(
+ newOperationFConvertFromI(signedInt64, f32),
+ )
+ case wasm.OpcodeF32ConvertI64U:
+ c.emit(
+ newOperationFConvertFromI(signedUint64, f32),
+ )
+ case wasm.OpcodeF32DemoteF64:
+ c.emit(
+ newOperationF32DemoteFromF64(),
+ )
+ case wasm.OpcodeF64ConvertI32S:
+ c.emit(
+ newOperationFConvertFromI(signedInt32, f64),
+ )
+ case wasm.OpcodeF64ConvertI32U:
+ c.emit(
+ newOperationFConvertFromI(signedUint32, f64),
+ )
+ case wasm.OpcodeF64ConvertI64S:
+ c.emit(
+ newOperationFConvertFromI(signedInt64, f64),
+ )
+ case wasm.OpcodeF64ConvertI64U:
+ c.emit(
+ newOperationFConvertFromI(signedUint64, f64),
+ )
+ case wasm.OpcodeF64PromoteF32:
+ c.emit(
+ newOperationF64PromoteFromF32(),
+ )
+ case wasm.OpcodeI32ReinterpretF32:
+ c.emit(
+ newOperationI32ReinterpretFromF32(),
+ )
+ case wasm.OpcodeI64ReinterpretF64:
+ c.emit(
+ newOperationI64ReinterpretFromF64(),
+ )
+ case wasm.OpcodeF32ReinterpretI32:
+ c.emit(
+ newOperationF32ReinterpretFromI32(),
+ )
+ case wasm.OpcodeF64ReinterpretI64:
+ c.emit(
+ newOperationF64ReinterpretFromI64(),
+ )
+ case wasm.OpcodeI32Extend8S:
+ c.emit(
+ newOperationSignExtend32From8(),
+ )
+ case wasm.OpcodeI32Extend16S:
+ c.emit(
+ newOperationSignExtend32From16(),
+ )
+ case wasm.OpcodeI64Extend8S:
+ c.emit(
+ newOperationSignExtend64From8(),
+ )
+ case wasm.OpcodeI64Extend16S:
+ c.emit(
+ newOperationSignExtend64From16(),
+ )
+ case wasm.OpcodeI64Extend32S:
+ c.emit(
+ newOperationSignExtend64From32(),
+ )
+ case wasm.OpcodeRefFunc:
+ c.pc++
+ index, num, err := leb128.LoadUint32(c.body[c.pc:])
+ if err != nil {
+ return fmt.Errorf("failed to read function index for ref.func: %v", err)
+ }
+ c.pc += num - 1
+ c.emit(
+ newOperationRefFunc(index),
+ )
+ case wasm.OpcodeRefNull:
+ c.pc++ // Skip the type of reftype as every ref value is opaque pointer.
+ c.emit(
+ newOperationConstI64(0),
+ )
+ case wasm.OpcodeRefIsNull:
+ // Simply compare the opaque pointer (i64) with zero.
+ c.emit(
+ newOperationEqz(unsignedInt64),
+ )
+ case wasm.OpcodeTableGet:
+ c.pc++
+ tableIndex, num, err := leb128.LoadUint32(c.body[c.pc:])
+ if err != nil {
+ return fmt.Errorf("failed to read function index for table.get: %v", err)
+ }
+ c.pc += num - 1
+ c.emit(
+ newOperationTableGet(tableIndex),
+ )
+ case wasm.OpcodeTableSet:
+ c.pc++
+ tableIndex, num, err := leb128.LoadUint32(c.body[c.pc:])
+ if err != nil {
+ return fmt.Errorf("failed to read function index for table.set: %v", err)
+ }
+ c.pc += num - 1
+ c.emit(
+ newOperationTableSet(tableIndex),
+ )
+ case wasm.OpcodeMiscPrefix:
+ c.pc++
+ // A misc opcode is encoded as an unsigned variable 32-bit integer.
+ miscOp, num, err := leb128.LoadUint32(c.body[c.pc:])
+ if err != nil {
+ return fmt.Errorf("failed to read misc opcode: %v", err)
+ }
+ c.pc += num - 1
+ switch byte(miscOp) {
+ case wasm.OpcodeMiscI32TruncSatF32S:
+ c.emit(
+ newOperationITruncFromF(f32, signedInt32, true),
+ )
+ case wasm.OpcodeMiscI32TruncSatF32U:
+ c.emit(
+ newOperationITruncFromF(f32, signedUint32, true),
+ )
+ case wasm.OpcodeMiscI32TruncSatF64S:
+ c.emit(
+ newOperationITruncFromF(f64, signedInt32, true),
+ )
+ case wasm.OpcodeMiscI32TruncSatF64U:
+ c.emit(
+ newOperationITruncFromF(f64, signedUint32, true),
+ )
+ case wasm.OpcodeMiscI64TruncSatF32S:
+ c.emit(
+ newOperationITruncFromF(f32, signedInt64, true),
+ )
+ case wasm.OpcodeMiscI64TruncSatF32U:
+ c.emit(
+ newOperationITruncFromF(f32, signedUint64, true),
+ )
+ case wasm.OpcodeMiscI64TruncSatF64S:
+ c.emit(
+ newOperationITruncFromF(f64, signedInt64, true),
+ )
+ case wasm.OpcodeMiscI64TruncSatF64U:
+ c.emit(
+ newOperationITruncFromF(f64, signedUint64, true),
+ )
+ case wasm.OpcodeMiscMemoryInit:
+ c.result.UsesMemory = true
+ dataIndex, num, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("reading i32.const value: %v", err)
+ }
+ c.pc += num + 1 // +1 to skip the memory index which is fixed to zero.
+ c.emit(
+ newOperationMemoryInit(dataIndex),
+ )
+ case wasm.OpcodeMiscDataDrop:
+ dataIndex, num, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("reading i32.const value: %v", err)
+ }
+ c.pc += num
+ c.emit(
+ newOperationDataDrop(dataIndex),
+ )
+ case wasm.OpcodeMiscMemoryCopy:
+ c.result.UsesMemory = true
+ c.pc += 2 // +2 to skip two memory indexes which are fixed to zero.
+ c.emit(
+ newOperationMemoryCopy(),
+ )
+ case wasm.OpcodeMiscMemoryFill:
+ c.result.UsesMemory = true
+ c.pc += 1 // +1 to skip the memory index which is fixed to zero.
+ c.emit(
+ newOperationMemoryFill(),
+ )
+ case wasm.OpcodeMiscTableInit:
+ elemIndex, num, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("reading i32.const value: %v", err)
+ }
+ c.pc += num
+ // Read table index which is fixed to zero currently.
+ tableIndex, num, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("reading i32.const value: %v", err)
+ }
+ c.pc += num
+ c.emit(
+ newOperationTableInit(elemIndex, tableIndex),
+ )
+ case wasm.OpcodeMiscElemDrop:
+ elemIndex, num, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("reading i32.const value: %v", err)
+ }
+ c.pc += num
+ c.emit(
+ newOperationElemDrop(elemIndex),
+ )
+ case wasm.OpcodeMiscTableCopy:
+ // Read the source table inde.g.
+ dst, num, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("reading i32.const value: %v", err)
+ }
+ c.pc += num
+ // Read the destination table inde.g.
+ src, num, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("reading i32.const value: %v", err)
+ }
+ c.pc += num
+ c.emit(
+ newOperationTableCopy(src, dst),
+ )
+ case wasm.OpcodeMiscTableGrow:
+ // Read the source table inde.g.
+ tableIndex, num, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("reading i32.const value: %v", err)
+ }
+ c.pc += num
+ c.emit(
+ newOperationTableGrow(tableIndex),
+ )
+ case wasm.OpcodeMiscTableSize:
+ // Read the source table inde.g.
+ tableIndex, num, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("reading i32.const value: %v", err)
+ }
+ c.pc += num
+ c.emit(
+ newOperationTableSize(tableIndex),
+ )
+ case wasm.OpcodeMiscTableFill:
+ // Read the source table index.
+ tableIndex, num, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return fmt.Errorf("reading i32.const value: %v", err)
+ }
+ c.pc += num
+ c.emit(
+ newOperationTableFill(tableIndex),
+ )
+ default:
+ return fmt.Errorf("unsupported misc instruction in interpreterir: 0x%x", op)
+ }
+ case wasm.OpcodeVecPrefix:
+ c.pc++
+ switch vecOp := c.body[c.pc]; vecOp {
+ case wasm.OpcodeVecV128Const:
+ c.pc++
+ lo := binary.LittleEndian.Uint64(c.body[c.pc : c.pc+8])
+ c.pc += 8
+ hi := binary.LittleEndian.Uint64(c.body[c.pc : c.pc+8])
+ c.emit(
+ newOperationV128Const(lo, hi),
+ )
+ c.pc += 7
+ case wasm.OpcodeVecV128Load:
+ arg, err := c.readMemoryArg(wasm.OpcodeI32LoadName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Load(v128LoadType128, arg),
+ )
+ case wasm.OpcodeVecV128Load8x8s:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load8x8SName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Load(v128LoadType8x8s, arg),
+ )
+ case wasm.OpcodeVecV128Load8x8u:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load8x8UName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Load(v128LoadType8x8u, arg),
+ )
+ case wasm.OpcodeVecV128Load16x4s:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load16x4SName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Load(v128LoadType16x4s, arg),
+ )
+ case wasm.OpcodeVecV128Load16x4u:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load16x4UName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Load(v128LoadType16x4u, arg),
+ )
+ case wasm.OpcodeVecV128Load32x2s:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load32x2SName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Load(v128LoadType32x2s, arg),
+ )
+ case wasm.OpcodeVecV128Load32x2u:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load32x2UName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Load(v128LoadType32x2u, arg),
+ )
+ case wasm.OpcodeVecV128Load8Splat:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load8SplatName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Load(v128LoadType8Splat, arg),
+ )
+ case wasm.OpcodeVecV128Load16Splat:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load16SplatName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Load(v128LoadType16Splat, arg),
+ )
+ case wasm.OpcodeVecV128Load32Splat:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load32SplatName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Load(v128LoadType32Splat, arg),
+ )
+ case wasm.OpcodeVecV128Load64Splat:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load64SplatName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Load(v128LoadType64Splat, arg),
+ )
+ case wasm.OpcodeVecV128Load32zero:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load32zeroName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Load(v128LoadType32zero, arg),
+ )
+ case wasm.OpcodeVecV128Load64zero:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load64zeroName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Load(v128LoadType64zero, arg),
+ )
+ case wasm.OpcodeVecV128Load8Lane:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load8LaneName)
+ if err != nil {
+ return err
+ }
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128LoadLane(laneIndex, 8, arg),
+ )
+ case wasm.OpcodeVecV128Load16Lane:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load16LaneName)
+ if err != nil {
+ return err
+ }
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128LoadLane(laneIndex, 16, arg),
+ )
+ case wasm.OpcodeVecV128Load32Lane:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load32LaneName)
+ if err != nil {
+ return err
+ }
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128LoadLane(laneIndex, 32, arg),
+ )
+ case wasm.OpcodeVecV128Load64Lane:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Load64LaneName)
+ if err != nil {
+ return err
+ }
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128LoadLane(laneIndex, 64, arg),
+ )
+ case wasm.OpcodeVecV128Store:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128StoreName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationV128Store(arg),
+ )
+ case wasm.OpcodeVecV128Store8Lane:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Store8LaneName)
+ if err != nil {
+ return err
+ }
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128StoreLane(laneIndex, 8, arg),
+ )
+ case wasm.OpcodeVecV128Store16Lane:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Store16LaneName)
+ if err != nil {
+ return err
+ }
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128StoreLane(laneIndex, 16, arg),
+ )
+ case wasm.OpcodeVecV128Store32Lane:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Store32LaneName)
+ if err != nil {
+ return err
+ }
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128StoreLane(laneIndex, 32, arg),
+ )
+ case wasm.OpcodeVecV128Store64Lane:
+ arg, err := c.readMemoryArg(wasm.OpcodeVecV128Store64LaneName)
+ if err != nil {
+ return err
+ }
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128StoreLane(laneIndex, 64, arg),
+ )
+ case wasm.OpcodeVecI8x16ExtractLaneS:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ExtractLane(laneIndex, true, shapeI8x16),
+ )
+ case wasm.OpcodeVecI8x16ExtractLaneU:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ExtractLane(laneIndex, false, shapeI8x16),
+ )
+ case wasm.OpcodeVecI16x8ExtractLaneS:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ExtractLane(laneIndex, true, shapeI16x8),
+ )
+ case wasm.OpcodeVecI16x8ExtractLaneU:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ExtractLane(laneIndex, false, shapeI16x8),
+ )
+ case wasm.OpcodeVecI32x4ExtractLane:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ExtractLane(laneIndex, false, shapeI32x4),
+ )
+ case wasm.OpcodeVecI64x2ExtractLane:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ExtractLane(laneIndex, false, shapeI64x2),
+ )
+ case wasm.OpcodeVecF32x4ExtractLane:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ExtractLane(laneIndex, false, shapeF32x4),
+ )
+ case wasm.OpcodeVecF64x2ExtractLane:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ExtractLane(laneIndex, false, shapeF64x2),
+ )
+ case wasm.OpcodeVecI8x16ReplaceLane:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ReplaceLane(laneIndex, shapeI8x16),
+ )
+ case wasm.OpcodeVecI16x8ReplaceLane:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ReplaceLane(laneIndex, shapeI16x8),
+ )
+ case wasm.OpcodeVecI32x4ReplaceLane:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ReplaceLane(laneIndex, shapeI32x4),
+ )
+ case wasm.OpcodeVecI64x2ReplaceLane:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ReplaceLane(laneIndex, shapeI64x2),
+ )
+ case wasm.OpcodeVecF32x4ReplaceLane:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ReplaceLane(laneIndex, shapeF32x4),
+ )
+ case wasm.OpcodeVecF64x2ReplaceLane:
+ c.pc++
+ laneIndex := c.body[c.pc]
+ c.emit(
+ newOperationV128ReplaceLane(laneIndex, shapeF64x2),
+ )
+ case wasm.OpcodeVecI8x16Splat:
+ c.emit(
+ newOperationV128Splat(shapeI8x16),
+ )
+ case wasm.OpcodeVecI16x8Splat:
+ c.emit(
+ newOperationV128Splat(shapeI16x8),
+ )
+ case wasm.OpcodeVecI32x4Splat:
+ c.emit(
+ newOperationV128Splat(shapeI32x4),
+ )
+ case wasm.OpcodeVecI64x2Splat:
+ c.emit(
+ newOperationV128Splat(shapeI64x2),
+ )
+ case wasm.OpcodeVecF32x4Splat:
+ c.emit(
+ newOperationV128Splat(shapeF32x4),
+ )
+ case wasm.OpcodeVecF64x2Splat:
+ c.emit(
+ newOperationV128Splat(shapeF64x2),
+ )
+ case wasm.OpcodeVecI8x16Swizzle:
+ c.emit(
+ newOperationV128Swizzle(),
+ )
+ case wasm.OpcodeVecV128i8x16Shuffle:
+ c.pc++
+ lanes := make([]uint64, 16)
+ for i := uint64(0); i < 16; i++ {
+ lanes[i] = uint64(c.body[c.pc+i])
+ }
+ op := newOperationV128Shuffle(lanes)
+ c.emit(op)
+ c.pc += 15
+ case wasm.OpcodeVecV128AnyTrue:
+ c.emit(
+ newOperationV128AnyTrue(),
+ )
+ case wasm.OpcodeVecI8x16AllTrue:
+ c.emit(
+ newOperationV128AllTrue(shapeI8x16),
+ )
+ case wasm.OpcodeVecI16x8AllTrue:
+ c.emit(
+ newOperationV128AllTrue(shapeI16x8),
+ )
+ case wasm.OpcodeVecI32x4AllTrue:
+ c.emit(
+ newOperationV128AllTrue(shapeI32x4),
+ )
+ case wasm.OpcodeVecI64x2AllTrue:
+ c.emit(
+ newOperationV128AllTrue(shapeI64x2),
+ )
+ case wasm.OpcodeVecI8x16BitMask:
+ c.emit(
+ newOperationV128BitMask(shapeI8x16),
+ )
+ case wasm.OpcodeVecI16x8BitMask:
+ c.emit(
+ newOperationV128BitMask(shapeI16x8),
+ )
+ case wasm.OpcodeVecI32x4BitMask:
+ c.emit(
+ newOperationV128BitMask(shapeI32x4),
+ )
+ case wasm.OpcodeVecI64x2BitMask:
+ c.emit(
+ newOperationV128BitMask(shapeI64x2),
+ )
+ case wasm.OpcodeVecV128And:
+ c.emit(
+ newOperationV128And(),
+ )
+ case wasm.OpcodeVecV128Not:
+ c.emit(
+ newOperationV128Not(),
+ )
+ case wasm.OpcodeVecV128Or:
+ c.emit(
+ newOperationV128Or(),
+ )
+ case wasm.OpcodeVecV128Xor:
+ c.emit(
+ newOperationV128Xor(),
+ )
+ case wasm.OpcodeVecV128Bitselect:
+ c.emit(
+ newOperationV128Bitselect(),
+ )
+ case wasm.OpcodeVecV128AndNot:
+ c.emit(
+ newOperationV128AndNot(),
+ )
+ case wasm.OpcodeVecI8x16Shl:
+ c.emit(
+ newOperationV128Shl(shapeI8x16),
+ )
+ case wasm.OpcodeVecI8x16ShrS:
+ c.emit(
+ newOperationV128Shr(shapeI8x16, true),
+ )
+ case wasm.OpcodeVecI8x16ShrU:
+ c.emit(
+ newOperationV128Shr(shapeI8x16, false),
+ )
+ case wasm.OpcodeVecI16x8Shl:
+ c.emit(
+ newOperationV128Shl(shapeI16x8),
+ )
+ case wasm.OpcodeVecI16x8ShrS:
+ c.emit(
+ newOperationV128Shr(shapeI16x8, true),
+ )
+ case wasm.OpcodeVecI16x8ShrU:
+ c.emit(
+ newOperationV128Shr(shapeI16x8, false),
+ )
+ case wasm.OpcodeVecI32x4Shl:
+ c.emit(
+ newOperationV128Shl(shapeI32x4),
+ )
+ case wasm.OpcodeVecI32x4ShrS:
+ c.emit(
+ newOperationV128Shr(shapeI32x4, true),
+ )
+ case wasm.OpcodeVecI32x4ShrU:
+ c.emit(
+ newOperationV128Shr(shapeI32x4, false),
+ )
+ case wasm.OpcodeVecI64x2Shl:
+ c.emit(
+ newOperationV128Shl(shapeI64x2),
+ )
+ case wasm.OpcodeVecI64x2ShrS:
+ c.emit(
+ newOperationV128Shr(shapeI64x2, true),
+ )
+ case wasm.OpcodeVecI64x2ShrU:
+ c.emit(
+ newOperationV128Shr(shapeI64x2, false),
+ )
+ case wasm.OpcodeVecI8x16Eq:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI8x16Eq),
+ )
+ case wasm.OpcodeVecI8x16Ne:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI8x16Ne),
+ )
+ case wasm.OpcodeVecI8x16LtS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI8x16LtS),
+ )
+ case wasm.OpcodeVecI8x16LtU:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI8x16LtU),
+ )
+ case wasm.OpcodeVecI8x16GtS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI8x16GtS),
+ )
+ case wasm.OpcodeVecI8x16GtU:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI8x16GtU),
+ )
+ case wasm.OpcodeVecI8x16LeS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI8x16LeS),
+ )
+ case wasm.OpcodeVecI8x16LeU:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI8x16LeU),
+ )
+ case wasm.OpcodeVecI8x16GeS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI8x16GeS),
+ )
+ case wasm.OpcodeVecI8x16GeU:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI8x16GeU),
+ )
+ case wasm.OpcodeVecI16x8Eq:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI16x8Eq),
+ )
+ case wasm.OpcodeVecI16x8Ne:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI16x8Ne),
+ )
+ case wasm.OpcodeVecI16x8LtS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI16x8LtS),
+ )
+ case wasm.OpcodeVecI16x8LtU:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI16x8LtU),
+ )
+ case wasm.OpcodeVecI16x8GtS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI16x8GtS),
+ )
+ case wasm.OpcodeVecI16x8GtU:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI16x8GtU),
+ )
+ case wasm.OpcodeVecI16x8LeS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI16x8LeS),
+ )
+ case wasm.OpcodeVecI16x8LeU:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI16x8LeU),
+ )
+ case wasm.OpcodeVecI16x8GeS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI16x8GeS),
+ )
+ case wasm.OpcodeVecI16x8GeU:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI16x8GeU),
+ )
+ case wasm.OpcodeVecI32x4Eq:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI32x4Eq),
+ )
+ case wasm.OpcodeVecI32x4Ne:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI32x4Ne),
+ )
+ case wasm.OpcodeVecI32x4LtS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI32x4LtS),
+ )
+ case wasm.OpcodeVecI32x4LtU:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI32x4LtU),
+ )
+ case wasm.OpcodeVecI32x4GtS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI32x4GtS),
+ )
+ case wasm.OpcodeVecI32x4GtU:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI32x4GtU),
+ )
+ case wasm.OpcodeVecI32x4LeS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI32x4LeS),
+ )
+ case wasm.OpcodeVecI32x4LeU:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI32x4LeU),
+ )
+ case wasm.OpcodeVecI32x4GeS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI32x4GeS),
+ )
+ case wasm.OpcodeVecI32x4GeU:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI32x4GeU),
+ )
+ case wasm.OpcodeVecI64x2Eq:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI64x2Eq),
+ )
+ case wasm.OpcodeVecI64x2Ne:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI64x2Ne),
+ )
+ case wasm.OpcodeVecI64x2LtS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI64x2LtS),
+ )
+ case wasm.OpcodeVecI64x2GtS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI64x2GtS),
+ )
+ case wasm.OpcodeVecI64x2LeS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI64x2LeS),
+ )
+ case wasm.OpcodeVecI64x2GeS:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeI64x2GeS),
+ )
+ case wasm.OpcodeVecF32x4Eq:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeF32x4Eq),
+ )
+ case wasm.OpcodeVecF32x4Ne:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeF32x4Ne),
+ )
+ case wasm.OpcodeVecF32x4Lt:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeF32x4Lt),
+ )
+ case wasm.OpcodeVecF32x4Gt:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeF32x4Gt),
+ )
+ case wasm.OpcodeVecF32x4Le:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeF32x4Le),
+ )
+ case wasm.OpcodeVecF32x4Ge:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeF32x4Ge),
+ )
+ case wasm.OpcodeVecF64x2Eq:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeF64x2Eq),
+ )
+ case wasm.OpcodeVecF64x2Ne:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeF64x2Ne),
+ )
+ case wasm.OpcodeVecF64x2Lt:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeF64x2Lt),
+ )
+ case wasm.OpcodeVecF64x2Gt:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeF64x2Gt),
+ )
+ case wasm.OpcodeVecF64x2Le:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeF64x2Le),
+ )
+ case wasm.OpcodeVecF64x2Ge:
+ c.emit(
+ newOperationV128Cmp(v128CmpTypeF64x2Ge),
+ )
+ case wasm.OpcodeVecI8x16Neg:
+ c.emit(
+ newOperationV128Neg(shapeI8x16),
+ )
+ case wasm.OpcodeVecI16x8Neg:
+ c.emit(
+ newOperationV128Neg(shapeI16x8),
+ )
+ case wasm.OpcodeVecI32x4Neg:
+ c.emit(
+ newOperationV128Neg(shapeI32x4),
+ )
+ case wasm.OpcodeVecI64x2Neg:
+ c.emit(
+ newOperationV128Neg(shapeI64x2),
+ )
+ case wasm.OpcodeVecF32x4Neg:
+ c.emit(
+ newOperationV128Neg(shapeF32x4),
+ )
+ case wasm.OpcodeVecF64x2Neg:
+ c.emit(
+ newOperationV128Neg(shapeF64x2),
+ )
+ case wasm.OpcodeVecI8x16Add:
+ c.emit(
+ newOperationV128Add(shapeI8x16),
+ )
+ case wasm.OpcodeVecI16x8Add:
+ c.emit(
+ newOperationV128Add(shapeI16x8),
+ )
+ case wasm.OpcodeVecI32x4Add:
+ c.emit(
+ newOperationV128Add(shapeI32x4),
+ )
+ case wasm.OpcodeVecI64x2Add:
+ c.emit(
+ newOperationV128Add(shapeI64x2),
+ )
+ case wasm.OpcodeVecF32x4Add:
+ c.emit(
+ newOperationV128Add(shapeF32x4),
+ )
+ case wasm.OpcodeVecF64x2Add:
+ c.emit(
+ newOperationV128Add(shapeF64x2),
+ )
+ case wasm.OpcodeVecI8x16Sub:
+ c.emit(
+ newOperationV128Sub(shapeI8x16),
+ )
+ case wasm.OpcodeVecI16x8Sub:
+ c.emit(
+ newOperationV128Sub(shapeI16x8),
+ )
+ case wasm.OpcodeVecI32x4Sub:
+ c.emit(
+ newOperationV128Sub(shapeI32x4),
+ )
+ case wasm.OpcodeVecI64x2Sub:
+ c.emit(
+ newOperationV128Sub(shapeI64x2),
+ )
+ case wasm.OpcodeVecF32x4Sub:
+ c.emit(
+ newOperationV128Sub(shapeF32x4),
+ )
+ case wasm.OpcodeVecF64x2Sub:
+ c.emit(
+ newOperationV128Sub(shapeF64x2),
+ )
+ case wasm.OpcodeVecI8x16AddSatS:
+ c.emit(
+ newOperationV128AddSat(shapeI8x16, true),
+ )
+ case wasm.OpcodeVecI8x16AddSatU:
+ c.emit(
+ newOperationV128AddSat(shapeI8x16, false),
+ )
+ case wasm.OpcodeVecI16x8AddSatS:
+ c.emit(
+ newOperationV128AddSat(shapeI16x8, true),
+ )
+ case wasm.OpcodeVecI16x8AddSatU:
+ c.emit(
+ newOperationV128AddSat(shapeI16x8, false),
+ )
+ case wasm.OpcodeVecI8x16SubSatS:
+ c.emit(
+ newOperationV128SubSat(shapeI8x16, true),
+ )
+ case wasm.OpcodeVecI8x16SubSatU:
+ c.emit(
+ newOperationV128SubSat(shapeI8x16, false),
+ )
+ case wasm.OpcodeVecI16x8SubSatS:
+ c.emit(
+ newOperationV128SubSat(shapeI16x8, true),
+ )
+ case wasm.OpcodeVecI16x8SubSatU:
+ c.emit(
+ newOperationV128SubSat(shapeI16x8, false),
+ )
+ case wasm.OpcodeVecI16x8Mul:
+ c.emit(
+ newOperationV128Mul(shapeI16x8),
+ )
+ case wasm.OpcodeVecI32x4Mul:
+ c.emit(
+ newOperationV128Mul(shapeI32x4),
+ )
+ case wasm.OpcodeVecI64x2Mul:
+ c.emit(
+ newOperationV128Mul(shapeI64x2),
+ )
+ case wasm.OpcodeVecF32x4Mul:
+ c.emit(
+ newOperationV128Mul(shapeF32x4),
+ )
+ case wasm.OpcodeVecF64x2Mul:
+ c.emit(
+ newOperationV128Mul(shapeF64x2),
+ )
+ case wasm.OpcodeVecF32x4Sqrt:
+ c.emit(
+ newOperationV128Sqrt(shapeF32x4),
+ )
+ case wasm.OpcodeVecF64x2Sqrt:
+ c.emit(
+ newOperationV128Sqrt(shapeF64x2),
+ )
+ case wasm.OpcodeVecF32x4Div:
+ c.emit(
+ newOperationV128Div(shapeF32x4),
+ )
+ case wasm.OpcodeVecF64x2Div:
+ c.emit(
+ newOperationV128Div(shapeF64x2),
+ )
+ case wasm.OpcodeVecI8x16Abs:
+ c.emit(
+ newOperationV128Abs(shapeI8x16),
+ )
+ case wasm.OpcodeVecI8x16Popcnt:
+ c.emit(
+ newOperationV128Popcnt(shapeI8x16),
+ )
+ case wasm.OpcodeVecI16x8Abs:
+ c.emit(
+ newOperationV128Abs(shapeI16x8),
+ )
+ case wasm.OpcodeVecI32x4Abs:
+ c.emit(
+ newOperationV128Abs(shapeI32x4),
+ )
+ case wasm.OpcodeVecI64x2Abs:
+ c.emit(
+ newOperationV128Abs(shapeI64x2),
+ )
+ case wasm.OpcodeVecF32x4Abs:
+ c.emit(
+ newOperationV128Abs(shapeF32x4),
+ )
+ case wasm.OpcodeVecF64x2Abs:
+ c.emit(
+ newOperationV128Abs(shapeF64x2),
+ )
+ case wasm.OpcodeVecI8x16MinS:
+ c.emit(
+ newOperationV128Min(shapeI8x16, true),
+ )
+ case wasm.OpcodeVecI8x16MinU:
+ c.emit(
+ newOperationV128Min(shapeI8x16, false),
+ )
+ case wasm.OpcodeVecI8x16MaxS:
+ c.emit(
+ newOperationV128Max(shapeI8x16, true),
+ )
+ case wasm.OpcodeVecI8x16MaxU:
+ c.emit(
+ newOperationV128Max(shapeI8x16, false),
+ )
+ case wasm.OpcodeVecI8x16AvgrU:
+ c.emit(
+ newOperationV128AvgrU(shapeI8x16),
+ )
+ case wasm.OpcodeVecI16x8MinS:
+ c.emit(
+ newOperationV128Min(shapeI16x8, true),
+ )
+ case wasm.OpcodeVecI16x8MinU:
+ c.emit(
+ newOperationV128Min(shapeI16x8, false),
+ )
+ case wasm.OpcodeVecI16x8MaxS:
+ c.emit(
+ newOperationV128Max(shapeI16x8, true),
+ )
+ case wasm.OpcodeVecI16x8MaxU:
+ c.emit(
+ newOperationV128Max(shapeI16x8, false),
+ )
+ case wasm.OpcodeVecI16x8AvgrU:
+ c.emit(
+ newOperationV128AvgrU(shapeI16x8),
+ )
+ case wasm.OpcodeVecI32x4MinS:
+ c.emit(
+ newOperationV128Min(shapeI32x4, true),
+ )
+ case wasm.OpcodeVecI32x4MinU:
+ c.emit(
+ newOperationV128Min(shapeI32x4, false),
+ )
+ case wasm.OpcodeVecI32x4MaxS:
+ c.emit(
+ newOperationV128Max(shapeI32x4, true),
+ )
+ case wasm.OpcodeVecI32x4MaxU:
+ c.emit(
+ newOperationV128Max(shapeI32x4, false),
+ )
+ case wasm.OpcodeVecF32x4Min:
+ c.emit(
+ newOperationV128Min(shapeF32x4, false),
+ )
+ case wasm.OpcodeVecF32x4Max:
+ c.emit(
+ newOperationV128Max(shapeF32x4, false),
+ )
+ case wasm.OpcodeVecF64x2Min:
+ c.emit(
+ newOperationV128Min(shapeF64x2, false),
+ )
+ case wasm.OpcodeVecF64x2Max:
+ c.emit(
+ newOperationV128Max(shapeF64x2, false),
+ )
+ case wasm.OpcodeVecF32x4Pmin:
+ c.emit(
+ newOperationV128Pmin(shapeF32x4),
+ )
+ case wasm.OpcodeVecF32x4Pmax:
+ c.emit(
+ newOperationV128Pmax(shapeF32x4),
+ )
+ case wasm.OpcodeVecF64x2Pmin:
+ c.emit(
+ newOperationV128Pmin(shapeF64x2),
+ )
+ case wasm.OpcodeVecF64x2Pmax:
+ c.emit(
+ newOperationV128Pmax(shapeF64x2),
+ )
+ case wasm.OpcodeVecF32x4Ceil:
+ c.emit(
+ newOperationV128Ceil(shapeF32x4),
+ )
+ case wasm.OpcodeVecF32x4Floor:
+ c.emit(
+ newOperationV128Floor(shapeF32x4),
+ )
+ case wasm.OpcodeVecF32x4Trunc:
+ c.emit(
+ newOperationV128Trunc(shapeF32x4),
+ )
+ case wasm.OpcodeVecF32x4Nearest:
+ c.emit(
+ newOperationV128Nearest(shapeF32x4),
+ )
+ case wasm.OpcodeVecF64x2Ceil:
+ c.emit(
+ newOperationV128Ceil(shapeF64x2),
+ )
+ case wasm.OpcodeVecF64x2Floor:
+ c.emit(
+ newOperationV128Floor(shapeF64x2),
+ )
+ case wasm.OpcodeVecF64x2Trunc:
+ c.emit(
+ newOperationV128Trunc(shapeF64x2),
+ )
+ case wasm.OpcodeVecF64x2Nearest:
+ c.emit(
+ newOperationV128Nearest(shapeF64x2),
+ )
+ case wasm.OpcodeVecI16x8ExtendLowI8x16S:
+ c.emit(
+ newOperationV128Extend(shapeI8x16, true, true),
+ )
+ case wasm.OpcodeVecI16x8ExtendHighI8x16S:
+ c.emit(
+ newOperationV128Extend(shapeI8x16, true, false),
+ )
+ case wasm.OpcodeVecI16x8ExtendLowI8x16U:
+ c.emit(
+ newOperationV128Extend(shapeI8x16, false, true),
+ )
+ case wasm.OpcodeVecI16x8ExtendHighI8x16U:
+ c.emit(
+ newOperationV128Extend(shapeI8x16, false, false),
+ )
+ case wasm.OpcodeVecI32x4ExtendLowI16x8S:
+ c.emit(
+ newOperationV128Extend(shapeI16x8, true, true),
+ )
+ case wasm.OpcodeVecI32x4ExtendHighI16x8S:
+ c.emit(
+ newOperationV128Extend(shapeI16x8, true, false),
+ )
+ case wasm.OpcodeVecI32x4ExtendLowI16x8U:
+ c.emit(
+ newOperationV128Extend(shapeI16x8, false, true),
+ )
+ case wasm.OpcodeVecI32x4ExtendHighI16x8U:
+ c.emit(
+ newOperationV128Extend(shapeI16x8, false, false),
+ )
+ case wasm.OpcodeVecI64x2ExtendLowI32x4S:
+ c.emit(
+ newOperationV128Extend(shapeI32x4, true, true),
+ )
+ case wasm.OpcodeVecI64x2ExtendHighI32x4S:
+ c.emit(
+ newOperationV128Extend(shapeI32x4, true, false),
+ )
+ case wasm.OpcodeVecI64x2ExtendLowI32x4U:
+ c.emit(
+ newOperationV128Extend(shapeI32x4, false, true),
+ )
+ case wasm.OpcodeVecI64x2ExtendHighI32x4U:
+ c.emit(
+ newOperationV128Extend(shapeI32x4, false, false),
+ )
+ case wasm.OpcodeVecI16x8Q15mulrSatS:
+ c.emit(
+ newOperationV128Q15mulrSatS(),
+ )
+ case wasm.OpcodeVecI16x8ExtMulLowI8x16S:
+ c.emit(
+ newOperationV128ExtMul(shapeI8x16, true, true),
+ )
+ case wasm.OpcodeVecI16x8ExtMulHighI8x16S:
+ c.emit(
+ newOperationV128ExtMul(shapeI8x16, true, false),
+ )
+ case wasm.OpcodeVecI16x8ExtMulLowI8x16U:
+ c.emit(
+ newOperationV128ExtMul(shapeI8x16, false, true),
+ )
+ case wasm.OpcodeVecI16x8ExtMulHighI8x16U:
+ c.emit(
+ newOperationV128ExtMul(shapeI8x16, false, false),
+ )
+ case wasm.OpcodeVecI32x4ExtMulLowI16x8S:
+ c.emit(
+ newOperationV128ExtMul(shapeI16x8, true, true),
+ )
+ case wasm.OpcodeVecI32x4ExtMulHighI16x8S:
+ c.emit(
+ newOperationV128ExtMul(shapeI16x8, true, false),
+ )
+ case wasm.OpcodeVecI32x4ExtMulLowI16x8U:
+ c.emit(
+ newOperationV128ExtMul(shapeI16x8, false, true),
+ )
+ case wasm.OpcodeVecI32x4ExtMulHighI16x8U:
+ c.emit(
+ newOperationV128ExtMul(shapeI16x8, false, false),
+ )
+ case wasm.OpcodeVecI64x2ExtMulLowI32x4S:
+ c.emit(
+ newOperationV128ExtMul(shapeI32x4, true, true),
+ )
+ case wasm.OpcodeVecI64x2ExtMulHighI32x4S:
+ c.emit(
+ newOperationV128ExtMul(shapeI32x4, true, false),
+ )
+ case wasm.OpcodeVecI64x2ExtMulLowI32x4U:
+ c.emit(
+ newOperationV128ExtMul(shapeI32x4, false, true),
+ )
+ case wasm.OpcodeVecI64x2ExtMulHighI32x4U:
+ c.emit(
+ newOperationV128ExtMul(shapeI32x4, false, false),
+ )
+ case wasm.OpcodeVecI16x8ExtaddPairwiseI8x16S:
+ c.emit(
+ newOperationV128ExtAddPairwise(shapeI8x16, true),
+ )
+ case wasm.OpcodeVecI16x8ExtaddPairwiseI8x16U:
+ c.emit(
+ newOperationV128ExtAddPairwise(shapeI8x16, false),
+ )
+ case wasm.OpcodeVecI32x4ExtaddPairwiseI16x8S:
+ c.emit(
+ newOperationV128ExtAddPairwise(shapeI16x8, true),
+ )
+ case wasm.OpcodeVecI32x4ExtaddPairwiseI16x8U:
+ c.emit(
+ newOperationV128ExtAddPairwise(shapeI16x8, false),
+ )
+ case wasm.OpcodeVecF64x2PromoteLowF32x4Zero:
+ c.emit(
+ newOperationV128FloatPromote(),
+ )
+ case wasm.OpcodeVecF32x4DemoteF64x2Zero:
+ c.emit(
+ newOperationV128FloatDemote(),
+ )
+ case wasm.OpcodeVecF32x4ConvertI32x4S:
+ c.emit(
+ newOperationV128FConvertFromI(shapeF32x4, true),
+ )
+ case wasm.OpcodeVecF32x4ConvertI32x4U:
+ c.emit(
+ newOperationV128FConvertFromI(shapeF32x4, false),
+ )
+ case wasm.OpcodeVecF64x2ConvertLowI32x4S:
+ c.emit(
+ newOperationV128FConvertFromI(shapeF64x2, true),
+ )
+ case wasm.OpcodeVecF64x2ConvertLowI32x4U:
+ c.emit(
+ newOperationV128FConvertFromI(shapeF64x2, false),
+ )
+ case wasm.OpcodeVecI32x4DotI16x8S:
+ c.emit(
+ newOperationV128Dot(),
+ )
+ case wasm.OpcodeVecI8x16NarrowI16x8S:
+ c.emit(
+ newOperationV128Narrow(shapeI16x8, true),
+ )
+ case wasm.OpcodeVecI8x16NarrowI16x8U:
+ c.emit(
+ newOperationV128Narrow(shapeI16x8, false),
+ )
+ case wasm.OpcodeVecI16x8NarrowI32x4S:
+ c.emit(
+ newOperationV128Narrow(shapeI32x4, true),
+ )
+ case wasm.OpcodeVecI16x8NarrowI32x4U:
+ c.emit(
+ newOperationV128Narrow(shapeI32x4, false),
+ )
+ case wasm.OpcodeVecI32x4TruncSatF32x4S:
+ c.emit(
+ newOperationV128ITruncSatFromF(shapeF32x4, true),
+ )
+ case wasm.OpcodeVecI32x4TruncSatF32x4U:
+ c.emit(
+ newOperationV128ITruncSatFromF(shapeF32x4, false),
+ )
+ case wasm.OpcodeVecI32x4TruncSatF64x2SZero:
+ c.emit(
+ newOperationV128ITruncSatFromF(shapeF64x2, true),
+ )
+ case wasm.OpcodeVecI32x4TruncSatF64x2UZero:
+ c.emit(
+ newOperationV128ITruncSatFromF(shapeF64x2, false),
+ )
+ default:
+ return fmt.Errorf("unsupported vector instruction in interpreterir: %s", wasm.VectorInstructionName(vecOp))
+ }
+ case wasm.OpcodeAtomicPrefix:
+ c.pc++
+ atomicOp := c.body[c.pc]
+ switch atomicOp {
+ case wasm.OpcodeAtomicMemoryWait32:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicMemoryWait32Name)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicMemoryWait(unsignedTypeI32, imm),
+ )
+ case wasm.OpcodeAtomicMemoryWait64:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicMemoryWait64Name)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicMemoryWait(unsignedTypeI64, imm),
+ )
+ case wasm.OpcodeAtomicMemoryNotify:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicMemoryNotifyName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicMemoryNotify(imm),
+ )
+ case wasm.OpcodeAtomicFence:
+ // Skip immediate value
+ c.pc++
+ _ = c.body[c.pc]
+ c.emit(
+ newOperationAtomicFence(),
+ )
+ case wasm.OpcodeAtomicI32Load:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32LoadName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicLoad(unsignedTypeI32, imm),
+ )
+ case wasm.OpcodeAtomicI64Load:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64LoadName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicLoad(unsignedTypeI64, imm),
+ )
+ case wasm.OpcodeAtomicI32Load8U:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Load8UName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicLoad8(unsignedTypeI32, imm),
+ )
+ case wasm.OpcodeAtomicI32Load16U:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Load16UName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicLoad16(unsignedTypeI32, imm),
+ )
+ case wasm.OpcodeAtomicI64Load8U:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Load8UName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicLoad8(unsignedTypeI64, imm),
+ )
+ case wasm.OpcodeAtomicI64Load16U:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Load16UName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicLoad16(unsignedTypeI64, imm),
+ )
+ case wasm.OpcodeAtomicI64Load32U:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Load32UName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicLoad(unsignedTypeI32, imm),
+ )
+ case wasm.OpcodeAtomicI32Store:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32StoreName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicStore(unsignedTypeI32, imm),
+ )
+ case wasm.OpcodeAtomicI32Store8:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Store8Name)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicStore8(unsignedTypeI32, imm),
+ )
+ case wasm.OpcodeAtomicI32Store16:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Store16Name)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicStore16(unsignedTypeI32, imm),
+ )
+ case wasm.OpcodeAtomicI64Store:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64StoreName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicStore(unsignedTypeI64, imm),
+ )
+ case wasm.OpcodeAtomicI64Store8:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Store8Name)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicStore8(unsignedTypeI64, imm),
+ )
+ case wasm.OpcodeAtomicI64Store16:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Store16Name)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicStore16(unsignedTypeI64, imm),
+ )
+ case wasm.OpcodeAtomicI64Store32:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Store32Name)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicStore(unsignedTypeI32, imm),
+ )
+ case wasm.OpcodeAtomicI32RmwAdd:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwAddName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI32, imm, atomicArithmeticOpAdd),
+ )
+ case wasm.OpcodeAtomicI64RmwAdd:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwAddName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI64, imm, atomicArithmeticOpAdd),
+ )
+ case wasm.OpcodeAtomicI32Rmw8AddU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8AddUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8(unsignedTypeI32, imm, atomicArithmeticOpAdd),
+ )
+ case wasm.OpcodeAtomicI64Rmw8AddU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8AddUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8(unsignedTypeI64, imm, atomicArithmeticOpAdd),
+ )
+ case wasm.OpcodeAtomicI32Rmw16AddU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16AddUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16(unsignedTypeI32, imm, atomicArithmeticOpAdd),
+ )
+ case wasm.OpcodeAtomicI64Rmw16AddU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16AddUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16(unsignedTypeI64, imm, atomicArithmeticOpAdd),
+ )
+ case wasm.OpcodeAtomicI64Rmw32AddU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32AddUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI32, imm, atomicArithmeticOpAdd),
+ )
+ case wasm.OpcodeAtomicI32RmwSub:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwSubName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI32, imm, atomicArithmeticOpSub),
+ )
+ case wasm.OpcodeAtomicI64RmwSub:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwSubName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI64, imm, atomicArithmeticOpSub),
+ )
+ case wasm.OpcodeAtomicI32Rmw8SubU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8SubUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8(unsignedTypeI32, imm, atomicArithmeticOpSub),
+ )
+ case wasm.OpcodeAtomicI64Rmw8SubU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8SubUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8(unsignedTypeI64, imm, atomicArithmeticOpSub),
+ )
+ case wasm.OpcodeAtomicI32Rmw16SubU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16SubUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16(unsignedTypeI32, imm, atomicArithmeticOpSub),
+ )
+ case wasm.OpcodeAtomicI64Rmw16SubU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16SubUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16(unsignedTypeI64, imm, atomicArithmeticOpSub),
+ )
+ case wasm.OpcodeAtomicI64Rmw32SubU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32SubUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI32, imm, atomicArithmeticOpSub),
+ )
+ case wasm.OpcodeAtomicI32RmwAnd:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwAndName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI32, imm, atomicArithmeticOpAnd),
+ )
+ case wasm.OpcodeAtomicI64RmwAnd:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwAndName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI64, imm, atomicArithmeticOpAnd),
+ )
+ case wasm.OpcodeAtomicI32Rmw8AndU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8AndUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8(unsignedTypeI32, imm, atomicArithmeticOpAnd),
+ )
+ case wasm.OpcodeAtomicI64Rmw8AndU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8AndUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8(unsignedTypeI64, imm, atomicArithmeticOpAnd),
+ )
+ case wasm.OpcodeAtomicI32Rmw16AndU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16AndUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16(unsignedTypeI32, imm, atomicArithmeticOpAnd),
+ )
+ case wasm.OpcodeAtomicI64Rmw16AndU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16AndUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16(unsignedTypeI64, imm, atomicArithmeticOpAnd),
+ )
+ case wasm.OpcodeAtomicI64Rmw32AndU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32AndUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI32, imm, atomicArithmeticOpAnd),
+ )
+ case wasm.OpcodeAtomicI32RmwOr:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwOrName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI32, imm, atomicArithmeticOpOr),
+ )
+ case wasm.OpcodeAtomicI64RmwOr:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwOrName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI64, imm, atomicArithmeticOpOr),
+ )
+ case wasm.OpcodeAtomicI32Rmw8OrU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8OrUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8(unsignedTypeI32, imm, atomicArithmeticOpOr),
+ )
+ case wasm.OpcodeAtomicI64Rmw8OrU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8OrUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8(unsignedTypeI64, imm, atomicArithmeticOpOr),
+ )
+ case wasm.OpcodeAtomicI32Rmw16OrU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16OrUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16(unsignedTypeI32, imm, atomicArithmeticOpOr),
+ )
+ case wasm.OpcodeAtomicI64Rmw16OrU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16OrUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16(unsignedTypeI64, imm, atomicArithmeticOpOr),
+ )
+ case wasm.OpcodeAtomicI64Rmw32OrU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32OrUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI32, imm, atomicArithmeticOpOr),
+ )
+ case wasm.OpcodeAtomicI32RmwXor:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwXorName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI32, imm, atomicArithmeticOpXor),
+ )
+ case wasm.OpcodeAtomicI64RmwXor:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwXorName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI64, imm, atomicArithmeticOpXor),
+ )
+ case wasm.OpcodeAtomicI32Rmw8XorU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8XorUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8(unsignedTypeI32, imm, atomicArithmeticOpXor),
+ )
+ case wasm.OpcodeAtomicI64Rmw8XorU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8XorUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8(unsignedTypeI64, imm, atomicArithmeticOpXor),
+ )
+ case wasm.OpcodeAtomicI32Rmw16XorU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16XorUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16(unsignedTypeI32, imm, atomicArithmeticOpXor),
+ )
+ case wasm.OpcodeAtomicI64Rmw16XorU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16XorUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16(unsignedTypeI64, imm, atomicArithmeticOpXor),
+ )
+ case wasm.OpcodeAtomicI64Rmw32XorU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32XorUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI32, imm, atomicArithmeticOpXor),
+ )
+ case wasm.OpcodeAtomicI32RmwXchg:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwXchgName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI32, imm, atomicArithmeticOpNop),
+ )
+ case wasm.OpcodeAtomicI64RmwXchg:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwXchgName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI64, imm, atomicArithmeticOpNop),
+ )
+ case wasm.OpcodeAtomicI32Rmw8XchgU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8XchgUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8(unsignedTypeI32, imm, atomicArithmeticOpNop),
+ )
+ case wasm.OpcodeAtomicI64Rmw8XchgU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8XchgUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8(unsignedTypeI64, imm, atomicArithmeticOpNop),
+ )
+ case wasm.OpcodeAtomicI32Rmw16XchgU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16XchgUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16(unsignedTypeI32, imm, atomicArithmeticOpNop),
+ )
+ case wasm.OpcodeAtomicI64Rmw16XchgU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16XchgUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16(unsignedTypeI64, imm, atomicArithmeticOpNop),
+ )
+ case wasm.OpcodeAtomicI64Rmw32XchgU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32XchgUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW(unsignedTypeI32, imm, atomicArithmeticOpNop),
+ )
+ case wasm.OpcodeAtomicI32RmwCmpxchg:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32RmwCmpxchgName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMWCmpxchg(unsignedTypeI32, imm),
+ )
+ case wasm.OpcodeAtomicI64RmwCmpxchg:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64RmwCmpxchgName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMWCmpxchg(unsignedTypeI64, imm),
+ )
+ case wasm.OpcodeAtomicI32Rmw8CmpxchgU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw8CmpxchgUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8Cmpxchg(unsignedTypeI32, imm),
+ )
+ case wasm.OpcodeAtomicI64Rmw8CmpxchgU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw8CmpxchgUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW8Cmpxchg(unsignedTypeI64, imm),
+ )
+ case wasm.OpcodeAtomicI32Rmw16CmpxchgU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI32Rmw16CmpxchgUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16Cmpxchg(unsignedTypeI32, imm),
+ )
+ case wasm.OpcodeAtomicI64Rmw16CmpxchgU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw16CmpxchgUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMW16Cmpxchg(unsignedTypeI64, imm),
+ )
+ case wasm.OpcodeAtomicI64Rmw32CmpxchgU:
+ imm, err := c.readMemoryArg(wasm.OpcodeAtomicI64Rmw32CmpxchgUName)
+ if err != nil {
+ return err
+ }
+ c.emit(
+ newOperationAtomicRMWCmpxchg(unsignedTypeI32, imm),
+ )
+ default:
+ return fmt.Errorf("unsupported atomic instruction in interpreterir: %s", wasm.AtomicInstructionName(atomicOp))
+ }
+ default:
+ return fmt.Errorf("unsupported instruction in interpreterir: 0x%x", op)
+ }
+
+ // Move the program counter to point to the next instruction.
+ c.pc++
+ return nil
+}
+
+func (c *compiler) nextFrameID() (id uint32) {
+ id = c.currentFrameID + 1
+ c.currentFrameID++
+ return
+}
+
+func (c *compiler) applyToStack(opcode wasm.Opcode) (index uint32, err error) {
+ switch opcode {
+ case
+ // These are the opcodes that is coupled with "index" immediate
+ // and it DOES affect the signature of opcode.
+ wasm.OpcodeCall,
+ wasm.OpcodeCallIndirect,
+ wasm.OpcodeLocalGet,
+ wasm.OpcodeLocalSet,
+ wasm.OpcodeLocalTee,
+ wasm.OpcodeGlobalGet,
+ wasm.OpcodeGlobalSet:
+ // Assumes that we are at the opcode now so skip it before read immediates.
+ v, num, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return 0, fmt.Errorf("reading immediates: %w", err)
+ }
+ c.pc += num
+ index = v
+ default:
+ // Note that other opcodes are free of index
+ // as it doesn't affect the signature of opt code.
+ // In other words, the "index" argument of wasmOpcodeSignature
+ // is ignored there.
+ }
+
+ if c.unreachableState.on {
+ return 0, nil
+ }
+
+ // Retrieve the signature of the opcode.
+ s, err := c.wasmOpcodeSignature(opcode, index)
+ if err != nil {
+ return 0, err
+ }
+
+ // Manipulate the stack according to the signature.
+ // Note that the following algorithm assumes that
+ // the unknown type is unique in the signature,
+ // and is determined by the actual type on the stack.
+ // The determined type is stored in this typeParam.
+ var typeParam unsignedType
+ var typeParamFound bool
+ for i := range s.in {
+ want := s.in[len(s.in)-1-i]
+ actual := c.stackPop()
+ if want == unsignedTypeUnknown && typeParamFound {
+ want = typeParam
+ } else if want == unsignedTypeUnknown {
+ want = actual
+ typeParam = want
+ typeParamFound = true
+ }
+ if want != actual {
+ return 0, fmt.Errorf("input signature mismatch: want %s but have %s", want, actual)
+ }
+ }
+
+ for _, target := range s.out {
+ if target == unsignedTypeUnknown && !typeParamFound {
+ return 0, fmt.Errorf("cannot determine type of unknown result")
+ } else if target == unsignedTypeUnknown {
+ c.stackPush(typeParam)
+ } else {
+ c.stackPush(target)
+ }
+ }
+
+ return index, nil
+}
+
+func (c *compiler) stackPeek() (ret unsignedType) {
+ ret = c.stack[len(c.stack)-1]
+ return
+}
+
+func (c *compiler) stackPop() (ret unsignedType) {
+ // No need to check stack bound
+ // as we can assume that all the operations
+ // are valid thanks to validateFunction
+ // at module validation phase.
+ ret = c.stack[len(c.stack)-1]
+ c.stack = c.stack[:len(c.stack)-1]
+ return
+}
+
+func (c *compiler) stackPush(ts unsignedType) {
+ c.stack = append(c.stack, ts)
+}
+
+// emit adds the operations into the result.
+func (c *compiler) emit(op unionOperation) {
+ if !c.unreachableState.on {
+ switch op.Kind {
+ case operationKindDrop:
+ // If the drop range is nil,
+ // we could remove such operations.
+ // That happens when drop operation is unnecessary.
+ // i.e. when there's no need to adjust stack before jmp.
+ if int64(op.U1) == -1 {
+ return
+ }
+ }
+ c.result.Operations = append(c.result.Operations, op)
+ if c.needSourceOffset {
+ c.result.IROperationSourceOffsetsInWasmBinary = append(c.result.IROperationSourceOffsetsInWasmBinary,
+ c.currentOpPC+c.bodyOffsetInCodeSection)
+ }
+ }
+}
+
+// Emit const expression with default values of the given type.
+func (c *compiler) emitDefaultValue(t wasm.ValueType) {
+ switch t {
+ case wasm.ValueTypeI32:
+ c.stackPush(unsignedTypeI32)
+ c.emit(newOperationConstI32(0))
+ case wasm.ValueTypeI64, wasm.ValueTypeExternref, wasm.ValueTypeFuncref:
+ c.stackPush(unsignedTypeI64)
+ c.emit(newOperationConstI64(0))
+ case wasm.ValueTypeF32:
+ c.stackPush(unsignedTypeF32)
+ c.emit(newOperationConstF32(0))
+ case wasm.ValueTypeF64:
+ c.stackPush(unsignedTypeF64)
+ c.emit(newOperationConstF64(0))
+ case wasm.ValueTypeV128:
+ c.stackPush(unsignedTypeV128)
+ c.emit(newOperationV128Const(0, 0))
+ }
+}
+
+// Returns the "depth" (starting from top of the stack)
+// of the n-th local.
+func (c *compiler) localDepth(index wasm.Index) int {
+ height := c.localIndexToStackHeightInUint64[index]
+ return c.stackLenInUint64(len(c.stack)) - 1 - int(height)
+}
+
+func (c *compiler) localType(index wasm.Index) (t wasm.ValueType) {
+ if params := uint32(len(c.sig.Params)); index < params {
+ t = c.sig.Params[index]
+ } else {
+ t = c.localTypes[index-params]
+ }
+ return
+}
+
+// getFrameDropRange returns the range (starting from top of the stack) that spans across the (uint64) stack. The range is
+// supposed to be dropped from the stack when the given frame exists or branch into it.
+//
+// * frame is the control frame which the call-site is trying to branch into or exit.
+// * isEnd true if the call-site is handling wasm.OpcodeEnd.
+func (c *compiler) getFrameDropRange(frame *controlFrame, isEnd bool) inclusiveRange {
+ var start int
+ if !isEnd && frame.kind == controlFrameKindLoop {
+ // If this is not End and the call-site is trying to branch into the Loop control frame,
+ // we have to Start executing from the beginning of the loop block.
+ // Therefore, we have to pass the inputs to the frame.
+ start = frame.blockType.ParamNumInUint64
+ } else {
+ start = frame.blockType.ResultNumInUint64
+ }
+ var end int
+ if frame.kind == controlFrameKindFunction {
+ // On the function return, we eliminate all the contents on the stack
+ // including locals (existing below of frame.originalStackLen)
+ end = c.stackLenInUint64(len(c.stack)) - 1
+ } else {
+ end = c.stackLenInUint64(len(c.stack)) - 1 - c.stackLenInUint64(frame.originalStackLenWithoutParam)
+ }
+ if start <= end {
+ return inclusiveRange{Start: int32(start), End: int32(end)}
+ } else {
+ return nopinclusiveRange
+ }
+}
+
+func (c *compiler) stackLenInUint64(ceil int) (ret int) {
+ for i := 0; i < ceil; i++ {
+ if c.stack[i] == unsignedTypeV128 {
+ ret += 2
+ } else {
+ ret++
+ }
+ }
+ return
+}
+
+func (c *compiler) readMemoryArg(tag string) (memoryArg, error) {
+ c.result.UsesMemory = true
+ alignment, num, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return memoryArg{}, fmt.Errorf("reading alignment for %s: %w", tag, err)
+ }
+ c.pc += num
+ offset, num, err := leb128.LoadUint32(c.body[c.pc+1:])
+ if err != nil {
+ return memoryArg{}, fmt.Errorf("reading offset for %s: %w", tag, err)
+ }
+ c.pc += num
+ return memoryArg{Offset: offset, Alignment: alignment}, nil
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/format.go b/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/format.go
new file mode 100644
index 000000000..8af1d94b0
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/format.go
@@ -0,0 +1,22 @@
+package interpreter
+
+import (
+ "bytes"
+)
+
+func format(ops []unionOperation) string {
+ buf := bytes.NewBuffer(nil)
+
+ _, _ = buf.WriteString(".entrypoint\n")
+ for i := range ops {
+ op := &ops[i]
+ str := op.String()
+ isLabel := op.Kind == operationKindLabel
+ if !isLabel {
+ const indent = "\t"
+ str = indent + str
+ }
+ _, _ = buf.WriteString(str + "\n")
+ }
+ return buf.String()
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/interpreter.go b/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/interpreter.go
new file mode 100644
index 000000000..a89ddc457
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/interpreter.go
@@ -0,0 +1,4583 @@
+package interpreter
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "math/bits"
+ "sync"
+ "unsafe"
+
+ "github.com/tetratelabs/wazero/api"
+ "github.com/tetratelabs/wazero/experimental"
+ "github.com/tetratelabs/wazero/internal/expctxkeys"
+ "github.com/tetratelabs/wazero/internal/filecache"
+ "github.com/tetratelabs/wazero/internal/internalapi"
+ "github.com/tetratelabs/wazero/internal/moremath"
+ "github.com/tetratelabs/wazero/internal/wasm"
+ "github.com/tetratelabs/wazero/internal/wasmdebug"
+ "github.com/tetratelabs/wazero/internal/wasmruntime"
+)
+
+// callStackCeiling is the maximum WebAssembly call frame stack height. This allows wazero to raise
+// wasm.ErrCallStackOverflow instead of overflowing the Go runtime.
+//
+// The default value should suffice for most use cases. Those wishing to change this can via `go build -ldflags`.
+var callStackCeiling = 2000
+
+// engine is an interpreter implementation of wasm.Engine
+type engine struct {
+ enabledFeatures api.CoreFeatures
+ compiledFunctions map[wasm.ModuleID][]compiledFunction // guarded by mutex.
+ mux sync.RWMutex
+}
+
+func NewEngine(_ context.Context, enabledFeatures api.CoreFeatures, _ filecache.Cache) wasm.Engine {
+ return &engine{
+ enabledFeatures: enabledFeatures,
+ compiledFunctions: map[wasm.ModuleID][]compiledFunction{},
+ }
+}
+
+// Close implements the same method as documented on wasm.Engine.
+func (e *engine) Close() (err error) {
+ return
+}
+
+// CompiledModuleCount implements the same method as documented on wasm.Engine.
+func (e *engine) CompiledModuleCount() uint32 {
+ return uint32(len(e.compiledFunctions))
+}
+
+// DeleteCompiledModule implements the same method as documented on wasm.Engine.
+func (e *engine) DeleteCompiledModule(m *wasm.Module) {
+ e.deleteCompiledFunctions(m)
+}
+
+func (e *engine) deleteCompiledFunctions(module *wasm.Module) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+ delete(e.compiledFunctions, module.ID)
+}
+
+func (e *engine) addCompiledFunctions(module *wasm.Module, fs []compiledFunction) {
+ e.mux.Lock()
+ defer e.mux.Unlock()
+ e.compiledFunctions[module.ID] = fs
+}
+
+func (e *engine) getCompiledFunctions(module *wasm.Module) (fs []compiledFunction, ok bool) {
+ e.mux.RLock()
+ defer e.mux.RUnlock()
+ fs, ok = e.compiledFunctions[module.ID]
+ return
+}
+
+// moduleEngine implements wasm.ModuleEngine
+type moduleEngine struct {
+ // codes are the compiled functions in a module instances.
+ // The index is module instance-scoped.
+ functions []function
+
+ // parentEngine holds *engine from which this module engine is created from.
+ parentEngine *engine
+}
+
+// GetGlobalValue implements the same method as documented on wasm.ModuleEngine.
+func (e *moduleEngine) GetGlobalValue(wasm.Index) (lo, hi uint64) {
+ panic("BUG: GetGlobalValue should never be called on interpreter mode")
+}
+
+// SetGlobalValue implements the same method as documented on wasm.ModuleEngine.
+func (e *moduleEngine) SetGlobalValue(idx wasm.Index, lo, hi uint64) {
+ panic("BUG: SetGlobalValue should never be called on interpreter mode")
+}
+
+// OwnsGlobals implements the same method as documented on wasm.ModuleEngine.
+func (e *moduleEngine) OwnsGlobals() bool { return false }
+
+// callEngine holds context per moduleEngine.Call, and shared across all the
+// function calls originating from the same moduleEngine.Call execution.
+//
+// This implements api.Function.
+type callEngine struct {
+ internalapi.WazeroOnlyType
+
+ // stack contains the operands.
+ // Note that all the values are represented as uint64.
+ stack []uint64
+
+ // frames are the function call stack.
+ frames []*callFrame
+
+ // f is the initial function for this call engine.
+ f *function
+
+ // stackiterator for Listeners to walk frames and stack.
+ stackIterator stackIterator
+}
+
+func (e *moduleEngine) newCallEngine(compiled *function) *callEngine {
+ return &callEngine{f: compiled}
+}
+
+func (ce *callEngine) pushValue(v uint64) {
+ ce.stack = append(ce.stack, v)
+}
+
+func (ce *callEngine) pushValues(v []uint64) {
+ ce.stack = append(ce.stack, v...)
+}
+
+func (ce *callEngine) popValue() (v uint64) {
+ // No need to check stack bound
+ // as we can assume that all the operations
+ // are valid thanks to validateFunction
+ // at module validation phase
+ // and interpreterir translation
+ // before compilation.
+ stackTopIndex := len(ce.stack) - 1
+ v = ce.stack[stackTopIndex]
+ ce.stack = ce.stack[:stackTopIndex]
+ return
+}
+
+func (ce *callEngine) popValues(v []uint64) {
+ stackTopIndex := len(ce.stack) - len(v)
+ copy(v, ce.stack[stackTopIndex:])
+ ce.stack = ce.stack[:stackTopIndex]
+}
+
+// peekValues peeks api.ValueType values from the stack and returns them.
+func (ce *callEngine) peekValues(count int) []uint64 {
+ if count == 0 {
+ return nil
+ }
+ stackLen := len(ce.stack)
+ return ce.stack[stackLen-count : stackLen]
+}
+
+func (ce *callEngine) drop(raw uint64) {
+ r := inclusiveRangeFromU64(raw)
+ if r.Start == -1 {
+ return
+ } else if r.Start == 0 {
+ ce.stack = ce.stack[:int32(len(ce.stack))-1-r.End]
+ } else {
+ newStack := ce.stack[:int32(len(ce.stack))-1-r.End]
+ newStack = append(newStack, ce.stack[int32(len(ce.stack))-r.Start:]...)
+ ce.stack = newStack
+ }
+}
+
+func (ce *callEngine) pushFrame(frame *callFrame) {
+ if callStackCeiling <= len(ce.frames) {
+ panic(wasmruntime.ErrRuntimeStackOverflow)
+ }
+ ce.frames = append(ce.frames, frame)
+}
+
+func (ce *callEngine) popFrame() (frame *callFrame) {
+ // No need to check stack bound as we can assume that all the operations are valid thanks to validateFunction at
+ // module validation phase and interpreterir translation before compilation.
+ oneLess := len(ce.frames) - 1
+ frame = ce.frames[oneLess]
+ ce.frames = ce.frames[:oneLess]
+ return
+}
+
+type callFrame struct {
+ // pc is the program counter representing the current position in code.body.
+ pc uint64
+ // f is the compiled function used in this function frame.
+ f *function
+ // base index in the frame of this function, used to detect the count of
+ // values on the stack.
+ base int
+}
+
+type compiledFunction struct {
+ source *wasm.Module
+ body []unionOperation
+ listener experimental.FunctionListener
+ offsetsInWasmBinary []uint64
+ hostFn interface{}
+ ensureTermination bool
+ index wasm.Index
+}
+
+type function struct {
+ funcType *wasm.FunctionType
+ moduleInstance *wasm.ModuleInstance
+ typeID wasm.FunctionTypeID
+ parent *compiledFunction
+}
+
+// functionFromUintptr resurrects the original *function from the given uintptr
+// which comes from either funcref table or OpcodeRefFunc instruction.
+func functionFromUintptr(ptr uintptr) *function {
+ // Wraps ptrs as the double pointer in order to avoid the unsafe access as detected by race detector.
+ //
+ // For example, if we have (*function)(unsafe.Pointer(ptr)) instead, then the race detector's "checkptr"
+ // subroutine wanrs as "checkptr: pointer arithmetic result points to invalid allocation"
+ // https://github.com/golang/go/blob/1ce7fcf139417d618c2730010ede2afb41664211/src/runtime/checkptr.go#L69
+ var wrapped *uintptr = &ptr
+ return *(**function)(unsafe.Pointer(wrapped))
+}
+
+type snapshot struct {
+ stack []uint64
+ frames []*callFrame
+ pc uint64
+
+ ret []uint64
+
+ ce *callEngine
+}
+
+// Snapshot implements the same method as documented on experimental.Snapshotter.
+func (ce *callEngine) Snapshot() experimental.Snapshot {
+ stack := make([]uint64, len(ce.stack))
+ copy(stack, ce.stack)
+
+ frames := make([]*callFrame, len(ce.frames))
+ copy(frames, ce.frames)
+
+ return &snapshot{
+ stack: stack,
+ frames: frames,
+ ce: ce,
+ }
+}
+
+// Restore implements the same method as documented on experimental.Snapshot.
+func (s *snapshot) Restore(ret []uint64) {
+ s.ret = ret
+ panic(s)
+}
+
+func (s *snapshot) doRestore() {
+ ce := s.ce
+
+ ce.stack = s.stack
+ ce.frames = s.frames
+ ce.frames[len(ce.frames)-1].pc = s.pc
+
+ copy(ce.stack[len(ce.stack)-len(s.ret):], s.ret)
+}
+
+// Error implements the same method on error.
+func (s *snapshot) Error() string {
+ return "unhandled snapshot restore, this generally indicates restore was called from a different " +
+ "exported function invocation than snapshot"
+}
+
+// stackIterator implements experimental.StackIterator.
+type stackIterator struct {
+ stack []uint64
+ frames []*callFrame
+ started bool
+ fn *function
+ pc uint64
+}
+
+func (si *stackIterator) reset(stack []uint64, frames []*callFrame, f *function) {
+ si.fn = f
+ si.pc = 0
+ si.stack = stack
+ si.frames = frames
+ si.started = false
+}
+
+func (si *stackIterator) clear() {
+ si.stack = nil
+ si.frames = nil
+ si.started = false
+ si.fn = nil
+}
+
+// Next implements the same method as documented on experimental.StackIterator.
+func (si *stackIterator) Next() bool {
+ if !si.started {
+ si.started = true
+ return true
+ }
+
+ if len(si.frames) == 0 {
+ return false
+ }
+
+ frame := si.frames[len(si.frames)-1]
+ si.stack = si.stack[:frame.base]
+ si.fn = frame.f
+ si.pc = frame.pc
+ si.frames = si.frames[:len(si.frames)-1]
+ return true
+}
+
+// Function implements the same method as documented on
+// experimental.StackIterator.
+func (si *stackIterator) Function() experimental.InternalFunction {
+ return internalFunction{si.fn}
+}
+
+// ProgramCounter implements the same method as documented on
+// experimental.StackIterator.
+func (si *stackIterator) ProgramCounter() experimental.ProgramCounter {
+ return experimental.ProgramCounter(si.pc)
+}
+
+// internalFunction implements experimental.InternalFunction.
+type internalFunction struct{ *function }
+
+// Definition implements the same method as documented on
+// experimental.InternalFunction.
+func (f internalFunction) Definition() api.FunctionDefinition {
+ return f.definition()
+}
+
+// SourceOffsetForPC implements the same method as documented on
+// experimental.InternalFunction.
+func (f internalFunction) SourceOffsetForPC(pc experimental.ProgramCounter) uint64 {
+ offsetsMap := f.parent.offsetsInWasmBinary
+ if uint64(pc) < uint64(len(offsetsMap)) {
+ return offsetsMap[pc]
+ }
+ return 0
+}
+
+// interpreter mode doesn't maintain call frames in the stack, so pass the zero size to the IR.
+const callFrameStackSize = 0
+
+// CompileModule implements the same method as documented on wasm.Engine.
+func (e *engine) CompileModule(_ context.Context, module *wasm.Module, listeners []experimental.FunctionListener, ensureTermination bool) error {
+ if _, ok := e.getCompiledFunctions(module); ok { // cache hit!
+ return nil
+ }
+
+ funcs := make([]compiledFunction, len(module.FunctionSection))
+ irCompiler, err := newCompiler(e.enabledFeatures, callFrameStackSize, module, ensureTermination)
+ if err != nil {
+ return err
+ }
+ imported := module.ImportFunctionCount
+ for i := range module.CodeSection {
+ var lsn experimental.FunctionListener
+ if i < len(listeners) {
+ lsn = listeners[i]
+ }
+
+ compiled := &funcs[i]
+ // If this is the host function, there's nothing to do as the runtime representation of
+ // host function in interpreter is its Go function itself as opposed to Wasm functions,
+ // which need to be compiled down to
+ if codeSeg := &module.CodeSection[i]; codeSeg.GoFunc != nil {
+ compiled.hostFn = codeSeg.GoFunc
+ } else {
+ ir, err := irCompiler.Next()
+ if err != nil {
+ return err
+ }
+ err = e.lowerIR(ir, compiled)
+ if err != nil {
+ def := module.FunctionDefinition(uint32(i) + module.ImportFunctionCount)
+ return fmt.Errorf("failed to lower func[%s] to interpreterir: %w", def.DebugName(), err)
+ }
+ }
+ compiled.source = module
+ compiled.ensureTermination = ensureTermination
+ compiled.listener = lsn
+ compiled.index = imported + uint32(i)
+ }
+ e.addCompiledFunctions(module, funcs)
+ return nil
+}
+
+// NewModuleEngine implements the same method as documented on wasm.Engine.
+func (e *engine) NewModuleEngine(module *wasm.Module, instance *wasm.ModuleInstance) (wasm.ModuleEngine, error) {
+ me := &moduleEngine{
+ parentEngine: e,
+ functions: make([]function, len(module.FunctionSection)+int(module.ImportFunctionCount)),
+ }
+
+ codes, ok := e.getCompiledFunctions(module)
+ if !ok {
+ return nil, errors.New("source module must be compiled before instantiation")
+ }
+
+ for i := range codes {
+ c := &codes[i]
+ offset := i + int(module.ImportFunctionCount)
+ typeIndex := module.FunctionSection[i]
+ me.functions[offset] = function{
+ moduleInstance: instance,
+ typeID: instance.TypeIDs[typeIndex],
+ funcType: &module.TypeSection[typeIndex],
+ parent: c,
+ }
+ }
+ return me, nil
+}
+
+// lowerIR lowers the interpreterir operations to engine friendly struct.
+func (e *engine) lowerIR(ir *compilationResult, ret *compiledFunction) error {
+ // Copy the body from the result.
+ ret.body = make([]unionOperation, len(ir.Operations))
+ copy(ret.body, ir.Operations)
+ // Also copy the offsets if necessary.
+ if offsets := ir.IROperationSourceOffsetsInWasmBinary; len(offsets) > 0 {
+ ret.offsetsInWasmBinary = make([]uint64, len(offsets))
+ copy(ret.offsetsInWasmBinary, offsets)
+ }
+
+ labelAddressResolutions := [labelKindNum][]uint64{}
+
+ // First, we iterate all labels, and resolve the address.
+ for i := range ret.body {
+ op := &ret.body[i]
+ switch op.Kind {
+ case operationKindLabel:
+ label := label(op.U1)
+ address := uint64(i)
+
+ kind, fid := label.Kind(), label.FrameID()
+ frameToAddresses := labelAddressResolutions[label.Kind()]
+ // Expand the slice if necessary.
+ if diff := fid - len(frameToAddresses) + 1; diff > 0 {
+ for j := 0; j < diff; j++ {
+ frameToAddresses = append(frameToAddresses, 0)
+ }
+ }
+ frameToAddresses[fid] = address
+ labelAddressResolutions[kind] = frameToAddresses
+ }
+ }
+
+ // Then resolve the label as the index to the body.
+ for i := range ret.body {
+ op := &ret.body[i]
+ switch op.Kind {
+ case operationKindBr:
+ e.setLabelAddress(&op.U1, label(op.U1), labelAddressResolutions)
+ case operationKindBrIf:
+ e.setLabelAddress(&op.U1, label(op.U1), labelAddressResolutions)
+ e.setLabelAddress(&op.U2, label(op.U2), labelAddressResolutions)
+ case operationKindBrTable:
+ for j := 0; j < len(op.Us); j += 2 {
+ target := op.Us[j]
+ e.setLabelAddress(&op.Us[j], label(target), labelAddressResolutions)
+ }
+ }
+ }
+ return nil
+}
+
+func (e *engine) setLabelAddress(op *uint64, label label, labelAddressResolutions [labelKindNum][]uint64) {
+ if label.IsReturnTarget() {
+ // Jmp to the end of the possible binary.
+ *op = math.MaxUint64
+ } else {
+ *op = labelAddressResolutions[label.Kind()][label.FrameID()]
+ }
+}
+
+// ResolveImportedFunction implements wasm.ModuleEngine.
+func (e *moduleEngine) ResolveImportedFunction(index, indexInImportedModule wasm.Index, importedModuleEngine wasm.ModuleEngine) {
+ imported := importedModuleEngine.(*moduleEngine)
+ e.functions[index] = imported.functions[indexInImportedModule]
+}
+
+// ResolveImportedMemory implements wasm.ModuleEngine.
+func (e *moduleEngine) ResolveImportedMemory(wasm.ModuleEngine) {}
+
+// DoneInstantiation implements wasm.ModuleEngine.
+func (e *moduleEngine) DoneInstantiation() {}
+
+// FunctionInstanceReference implements the same method as documented on wasm.ModuleEngine.
+func (e *moduleEngine) FunctionInstanceReference(funcIndex wasm.Index) wasm.Reference {
+ return uintptr(unsafe.Pointer(&e.functions[funcIndex]))
+}
+
+// NewFunction implements the same method as documented on wasm.ModuleEngine.
+func (e *moduleEngine) NewFunction(index wasm.Index) (ce api.Function) {
+ // Note: The input parameters are pre-validated, so a compiled function is only absent on close. Updates to
+ // code on close aren't locked, neither is this read.
+ compiled := &e.functions[index]
+ return e.newCallEngine(compiled)
+}
+
+// LookupFunction implements the same method as documented on wasm.ModuleEngine.
+func (e *moduleEngine) LookupFunction(t *wasm.TableInstance, typeId wasm.FunctionTypeID, tableOffset wasm.Index) (*wasm.ModuleInstance, wasm.Index) {
+ if tableOffset >= uint32(len(t.References)) {
+ panic(wasmruntime.ErrRuntimeInvalidTableAccess)
+ }
+ rawPtr := t.References[tableOffset]
+ if rawPtr == 0 {
+ panic(wasmruntime.ErrRuntimeInvalidTableAccess)
+ }
+
+ tf := functionFromUintptr(rawPtr)
+ if tf.typeID != typeId {
+ panic(wasmruntime.ErrRuntimeIndirectCallTypeMismatch)
+ }
+ return tf.moduleInstance, tf.parent.index
+}
+
+// Definition implements the same method as documented on api.Function.
+func (ce *callEngine) Definition() api.FunctionDefinition {
+ return ce.f.definition()
+}
+
+func (f *function) definition() api.FunctionDefinition {
+ compiled := f.parent
+ return compiled.source.FunctionDefinition(compiled.index)
+}
+
+// Call implements the same method as documented on api.Function.
+func (ce *callEngine) Call(ctx context.Context, params ...uint64) (results []uint64, err error) {
+ ft := ce.f.funcType
+ if n := ft.ParamNumInUint64; n != len(params) {
+ return nil, fmt.Errorf("expected %d params, but passed %d", n, len(params))
+ }
+ return ce.call(ctx, params, nil)
+}
+
+// CallWithStack implements the same method as documented on api.Function.
+func (ce *callEngine) CallWithStack(ctx context.Context, stack []uint64) error {
+ params, results, err := wasm.SplitCallStack(ce.f.funcType, stack)
+ if err != nil {
+ return err
+ }
+ _, err = ce.call(ctx, params, results)
+ return err
+}
+
+func (ce *callEngine) call(ctx context.Context, params, results []uint64) (_ []uint64, err error) {
+ m := ce.f.moduleInstance
+ if ce.f.parent.ensureTermination {
+ select {
+ case <-ctx.Done():
+ // If the provided context is already done, close the call context
+ // and return the error.
+ m.CloseWithCtxErr(ctx)
+ return nil, m.FailIfClosed()
+ default:
+ }
+ }
+
+ if ctx.Value(expctxkeys.EnableSnapshotterKey{}) != nil {
+ ctx = context.WithValue(ctx, expctxkeys.SnapshotterKey{}, ce)
+ }
+
+ defer func() {
+ // If the module closed during the call, and the call didn't err for another reason, set an ExitError.
+ if err == nil {
+ err = m.FailIfClosed()
+ }
+ // TODO: ^^ Will not fail if the function was imported from a closed module.
+
+ if v := recover(); v != nil {
+ err = ce.recoverOnCall(ctx, m, v)
+ }
+ }()
+
+ ce.pushValues(params)
+
+ if ce.f.parent.ensureTermination {
+ done := m.CloseModuleOnCanceledOrTimeout(ctx)
+ defer done()
+ }
+
+ ce.callFunction(ctx, m, ce.f)
+
+ // This returns a safe copy of the results, instead of a slice view. If we
+ // returned a re-slice, the caller could accidentally or purposefully
+ // corrupt the stack of subsequent calls.
+ ft := ce.f.funcType
+ if results == nil && ft.ResultNumInUint64 > 0 {
+ results = make([]uint64, ft.ResultNumInUint64)
+ }
+ ce.popValues(results)
+ return results, nil
+}
+
+// functionListenerInvocation captures arguments needed to perform function
+// listener invocations when unwinding the call stack.
+type functionListenerInvocation struct {
+ experimental.FunctionListener
+ def api.FunctionDefinition
+}
+
+// recoverOnCall takes the recovered value `recoverOnCall`, and wraps it
+// with the call frame stack traces. Also, reset the state of callEngine
+// so that it can be used for the subsequent calls.
+func (ce *callEngine) recoverOnCall(ctx context.Context, m *wasm.ModuleInstance, v interface{}) (err error) {
+ if s, ok := v.(*snapshot); ok {
+ // A snapshot that wasn't handled was created by a different call engine possibly from a nested wasm invocation,
+ // let it propagate up to be handled by the caller.
+ panic(s)
+ }
+
+ builder := wasmdebug.NewErrorBuilder()
+ frameCount := len(ce.frames)
+ functionListeners := make([]functionListenerInvocation, 0, 16)
+
+ if frameCount > wasmdebug.MaxFrames {
+ frameCount = wasmdebug.MaxFrames
+ }
+ for i := 0; i < frameCount; i++ {
+ frame := ce.popFrame()
+ f := frame.f
+ def := f.definition()
+ var sources []string
+ if parent := frame.f.parent; parent.body != nil && len(parent.offsetsInWasmBinary) > 0 {
+ sources = parent.source.DWARFLines.Line(parent.offsetsInWasmBinary[frame.pc])
+ }
+ builder.AddFrame(def.DebugName(), def.ParamTypes(), def.ResultTypes(), sources)
+ if f.parent.listener != nil {
+ functionListeners = append(functionListeners, functionListenerInvocation{
+ FunctionListener: f.parent.listener,
+ def: f.definition(),
+ })
+ }
+ }
+
+ err = builder.FromRecovered(v)
+ for i := range functionListeners {
+ functionListeners[i].Abort(ctx, m, functionListeners[i].def, err)
+ }
+
+ // Allows the reuse of CallEngine.
+ ce.stack, ce.frames = ce.stack[:0], ce.frames[:0]
+ return
+}
+
+func (ce *callEngine) callFunction(ctx context.Context, m *wasm.ModuleInstance, f *function) {
+ if f.parent.hostFn != nil {
+ ce.callGoFuncWithStack(ctx, m, f)
+ } else if lsn := f.parent.listener; lsn != nil {
+ ce.callNativeFuncWithListener(ctx, m, f, lsn)
+ } else {
+ ce.callNativeFunc(ctx, m, f)
+ }
+}
+
+func (ce *callEngine) callGoFunc(ctx context.Context, m *wasm.ModuleInstance, f *function, stack []uint64) {
+ typ := f.funcType
+ lsn := f.parent.listener
+ if lsn != nil {
+ params := stack[:typ.ParamNumInUint64]
+ ce.stackIterator.reset(ce.stack, ce.frames, f)
+ lsn.Before(ctx, m, f.definition(), params, &ce.stackIterator)
+ ce.stackIterator.clear()
+ }
+ frame := &callFrame{f: f, base: len(ce.stack)}
+ ce.pushFrame(frame)
+
+ fn := f.parent.hostFn
+ switch fn := fn.(type) {
+ case api.GoModuleFunction:
+ fn.Call(ctx, m, stack)
+ case api.GoFunction:
+ fn.Call(ctx, stack)
+ }
+
+ ce.popFrame()
+ if lsn != nil {
+ // TODO: This doesn't get the error due to use of panic to propagate them.
+ results := stack[:typ.ResultNumInUint64]
+ lsn.After(ctx, m, f.definition(), results)
+ }
+}
+
+func (ce *callEngine) callNativeFunc(ctx context.Context, m *wasm.ModuleInstance, f *function) {
+ frame := &callFrame{f: f, base: len(ce.stack)}
+ moduleInst := f.moduleInstance
+ functions := moduleInst.Engine.(*moduleEngine).functions
+ memoryInst := moduleInst.MemoryInstance
+ globals := moduleInst.Globals
+ tables := moduleInst.Tables
+ typeIDs := moduleInst.TypeIDs
+ dataInstances := moduleInst.DataInstances
+ elementInstances := moduleInst.ElementInstances
+ ce.pushFrame(frame)
+ body := frame.f.parent.body
+ bodyLen := uint64(len(body))
+ for frame.pc < bodyLen {
+ op := &body[frame.pc]
+ // TODO: add description of each operation/case
+ // on, for example, how many args are used,
+ // how the stack is modified, etc.
+ switch op.Kind {
+ case operationKindBuiltinFunctionCheckExitCode:
+ if err := m.FailIfClosed(); err != nil {
+ panic(err)
+ }
+ frame.pc++
+ case operationKindUnreachable:
+ panic(wasmruntime.ErrRuntimeUnreachable)
+ case operationKindBr:
+ frame.pc = op.U1
+ case operationKindBrIf:
+ if ce.popValue() > 0 {
+ ce.drop(op.U3)
+ frame.pc = op.U1
+ } else {
+ frame.pc = op.U2
+ }
+ case operationKindBrTable:
+ v := ce.popValue()
+ defaultAt := uint64(len(op.Us))/2 - 1
+ if v > defaultAt {
+ v = defaultAt
+ }
+ v *= 2
+ ce.drop(op.Us[v+1])
+ frame.pc = op.Us[v]
+ case operationKindCall:
+ func() {
+ if ctx.Value(expctxkeys.EnableSnapshotterKey{}) != nil {
+ defer func() {
+ if r := recover(); r != nil {
+ if s, ok := r.(*snapshot); ok && s.ce == ce {
+ s.doRestore()
+ frame = ce.frames[len(ce.frames)-1]
+ body = frame.f.parent.body
+ bodyLen = uint64(len(body))
+ } else {
+ panic(r)
+ }
+ }
+ }()
+ }
+ ce.callFunction(ctx, f.moduleInstance, &functions[op.U1])
+ }()
+ frame.pc++
+ case operationKindCallIndirect:
+ offset := ce.popValue()
+ table := tables[op.U2]
+ if offset >= uint64(len(table.References)) {
+ panic(wasmruntime.ErrRuntimeInvalidTableAccess)
+ }
+ rawPtr := table.References[offset]
+ if rawPtr == 0 {
+ panic(wasmruntime.ErrRuntimeInvalidTableAccess)
+ }
+
+ tf := functionFromUintptr(rawPtr)
+ if tf.typeID != typeIDs[op.U1] {
+ panic(wasmruntime.ErrRuntimeIndirectCallTypeMismatch)
+ }
+
+ ce.callFunction(ctx, f.moduleInstance, tf)
+ frame.pc++
+ case operationKindDrop:
+ ce.drop(op.U1)
+ frame.pc++
+ case operationKindSelect:
+ c := ce.popValue()
+ if op.B3 { // Target is vector.
+ x2Hi, x2Lo := ce.popValue(), ce.popValue()
+ if c == 0 {
+ _, _ = ce.popValue(), ce.popValue() // discard the x1's lo and hi bits.
+ ce.pushValue(x2Lo)
+ ce.pushValue(x2Hi)
+ }
+ } else {
+ v2 := ce.popValue()
+ if c == 0 {
+ _ = ce.popValue()
+ ce.pushValue(v2)
+ }
+ }
+ frame.pc++
+ case operationKindPick:
+ index := len(ce.stack) - 1 - int(op.U1)
+ ce.pushValue(ce.stack[index])
+ if op.B3 { // V128 value target.
+ ce.pushValue(ce.stack[index+1])
+ }
+ frame.pc++
+ case operationKindSet:
+ if op.B3 { // V128 value target.
+ lowIndex := len(ce.stack) - 1 - int(op.U1)
+ highIndex := lowIndex + 1
+ hi, lo := ce.popValue(), ce.popValue()
+ ce.stack[lowIndex], ce.stack[highIndex] = lo, hi
+ } else {
+ index := len(ce.stack) - 1 - int(op.U1)
+ ce.stack[index] = ce.popValue()
+ }
+ frame.pc++
+ case operationKindGlobalGet:
+ g := globals[op.U1]
+ ce.pushValue(g.Val)
+ if g.Type.ValType == wasm.ValueTypeV128 {
+ ce.pushValue(g.ValHi)
+ }
+ frame.pc++
+ case operationKindGlobalSet:
+ g := globals[op.U1]
+ if g.Type.ValType == wasm.ValueTypeV128 {
+ g.ValHi = ce.popValue()
+ }
+ g.Val = ce.popValue()
+ frame.pc++
+ case operationKindLoad:
+ offset := ce.popMemoryOffset(op)
+ switch unsignedType(op.B1) {
+ case unsignedTypeI32, unsignedTypeF32:
+ if val, ok := memoryInst.ReadUint32Le(offset); !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ } else {
+ ce.pushValue(uint64(val))
+ }
+ case unsignedTypeI64, unsignedTypeF64:
+ if val, ok := memoryInst.ReadUint64Le(offset); !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ } else {
+ ce.pushValue(val)
+ }
+ }
+ frame.pc++
+ case operationKindLoad8:
+ val, ok := memoryInst.ReadByte(ce.popMemoryOffset(op))
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+
+ switch signedInt(op.B1) {
+ case signedInt32:
+ ce.pushValue(uint64(uint32(int8(val))))
+ case signedInt64:
+ ce.pushValue(uint64(int8(val)))
+ case signedUint32, signedUint64:
+ ce.pushValue(uint64(val))
+ }
+ frame.pc++
+ case operationKindLoad16:
+
+ val, ok := memoryInst.ReadUint16Le(ce.popMemoryOffset(op))
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+
+ switch signedInt(op.B1) {
+ case signedInt32:
+ ce.pushValue(uint64(uint32(int16(val))))
+ case signedInt64:
+ ce.pushValue(uint64(int16(val)))
+ case signedUint32, signedUint64:
+ ce.pushValue(uint64(val))
+ }
+ frame.pc++
+ case operationKindLoad32:
+ val, ok := memoryInst.ReadUint32Le(ce.popMemoryOffset(op))
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+
+ if op.B1 == 1 { // Signed
+ ce.pushValue(uint64(int32(val)))
+ } else {
+ ce.pushValue(uint64(val))
+ }
+ frame.pc++
+ case operationKindStore:
+ val := ce.popValue()
+ offset := ce.popMemoryOffset(op)
+ switch unsignedType(op.B1) {
+ case unsignedTypeI32, unsignedTypeF32:
+ if !memoryInst.WriteUint32Le(offset, uint32(val)) {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ case unsignedTypeI64, unsignedTypeF64:
+ if !memoryInst.WriteUint64Le(offset, val) {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ }
+ frame.pc++
+ case operationKindStore8:
+ val := byte(ce.popValue())
+ offset := ce.popMemoryOffset(op)
+ if !memoryInst.WriteByte(offset, val) {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ frame.pc++
+ case operationKindStore16:
+ val := uint16(ce.popValue())
+ offset := ce.popMemoryOffset(op)
+ if !memoryInst.WriteUint16Le(offset, val) {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ frame.pc++
+ case operationKindStore32:
+ val := uint32(ce.popValue())
+ offset := ce.popMemoryOffset(op)
+ if !memoryInst.WriteUint32Le(offset, val) {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ frame.pc++
+ case operationKindMemorySize:
+ ce.pushValue(uint64(memoryInst.Pages()))
+ frame.pc++
+ case operationKindMemoryGrow:
+ n := ce.popValue()
+ if res, ok := memoryInst.Grow(uint32(n)); !ok {
+ ce.pushValue(uint64(0xffffffff)) // = -1 in signed 32-bit integer.
+ } else {
+ ce.pushValue(uint64(res))
+ }
+ frame.pc++
+ case operationKindConstI32, operationKindConstI64,
+ operationKindConstF32, operationKindConstF64:
+ ce.pushValue(op.U1)
+ frame.pc++
+ case operationKindEq:
+ var b bool
+ switch unsignedType(op.B1) {
+ case unsignedTypeI32:
+ v2, v1 := ce.popValue(), ce.popValue()
+ b = uint32(v1) == uint32(v2)
+ case unsignedTypeI64:
+ v2, v1 := ce.popValue(), ce.popValue()
+ b = v1 == v2
+ case unsignedTypeF32:
+ v2, v1 := ce.popValue(), ce.popValue()
+ b = math.Float32frombits(uint32(v2)) == math.Float32frombits(uint32(v1))
+ case unsignedTypeF64:
+ v2, v1 := ce.popValue(), ce.popValue()
+ b = math.Float64frombits(v2) == math.Float64frombits(v1)
+ }
+ if b {
+ ce.pushValue(1)
+ } else {
+ ce.pushValue(0)
+ }
+ frame.pc++
+ case operationKindNe:
+ var b bool
+ switch unsignedType(op.B1) {
+ case unsignedTypeI32, unsignedTypeI64:
+ v2, v1 := ce.popValue(), ce.popValue()
+ b = v1 != v2
+ case unsignedTypeF32:
+ v2, v1 := ce.popValue(), ce.popValue()
+ b = math.Float32frombits(uint32(v2)) != math.Float32frombits(uint32(v1))
+ case unsignedTypeF64:
+ v2, v1 := ce.popValue(), ce.popValue()
+ b = math.Float64frombits(v2) != math.Float64frombits(v1)
+ }
+ if b {
+ ce.pushValue(1)
+ } else {
+ ce.pushValue(0)
+ }
+ frame.pc++
+ case operationKindEqz:
+ if ce.popValue() == 0 {
+ ce.pushValue(1)
+ } else {
+ ce.pushValue(0)
+ }
+ frame.pc++
+ case operationKindLt:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ var b bool
+ switch signedType(op.B1) {
+ case signedTypeInt32:
+ b = int32(v1) < int32(v2)
+ case signedTypeInt64:
+ b = int64(v1) < int64(v2)
+ case signedTypeUint32, signedTypeUint64:
+ b = v1 < v2
+ case signedTypeFloat32:
+ b = math.Float32frombits(uint32(v1)) < math.Float32frombits(uint32(v2))
+ case signedTypeFloat64:
+ b = math.Float64frombits(v1) < math.Float64frombits(v2)
+ }
+ if b {
+ ce.pushValue(1)
+ } else {
+ ce.pushValue(0)
+ }
+ frame.pc++
+ case operationKindGt:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ var b bool
+ switch signedType(op.B1) {
+ case signedTypeInt32:
+ b = int32(v1) > int32(v2)
+ case signedTypeInt64:
+ b = int64(v1) > int64(v2)
+ case signedTypeUint32, signedTypeUint64:
+ b = v1 > v2
+ case signedTypeFloat32:
+ b = math.Float32frombits(uint32(v1)) > math.Float32frombits(uint32(v2))
+ case signedTypeFloat64:
+ b = math.Float64frombits(v1) > math.Float64frombits(v2)
+ }
+ if b {
+ ce.pushValue(1)
+ } else {
+ ce.pushValue(0)
+ }
+ frame.pc++
+ case operationKindLe:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ var b bool
+ switch signedType(op.B1) {
+ case signedTypeInt32:
+ b = int32(v1) <= int32(v2)
+ case signedTypeInt64:
+ b = int64(v1) <= int64(v2)
+ case signedTypeUint32, signedTypeUint64:
+ b = v1 <= v2
+ case signedTypeFloat32:
+ b = math.Float32frombits(uint32(v1)) <= math.Float32frombits(uint32(v2))
+ case signedTypeFloat64:
+ b = math.Float64frombits(v1) <= math.Float64frombits(v2)
+ }
+ if b {
+ ce.pushValue(1)
+ } else {
+ ce.pushValue(0)
+ }
+ frame.pc++
+ case operationKindGe:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ var b bool
+ switch signedType(op.B1) {
+ case signedTypeInt32:
+ b = int32(v1) >= int32(v2)
+ case signedTypeInt64:
+ b = int64(v1) >= int64(v2)
+ case signedTypeUint32, signedTypeUint64:
+ b = v1 >= v2
+ case signedTypeFloat32:
+ b = math.Float32frombits(uint32(v1)) >= math.Float32frombits(uint32(v2))
+ case signedTypeFloat64:
+ b = math.Float64frombits(v1) >= math.Float64frombits(v2)
+ }
+ if b {
+ ce.pushValue(1)
+ } else {
+ ce.pushValue(0)
+ }
+ frame.pc++
+ case operationKindAdd:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ switch unsignedType(op.B1) {
+ case unsignedTypeI32:
+ v := uint32(v1) + uint32(v2)
+ ce.pushValue(uint64(v))
+ case unsignedTypeI64:
+ ce.pushValue(v1 + v2)
+ case unsignedTypeF32:
+ ce.pushValue(addFloat32bits(uint32(v1), uint32(v2)))
+ case unsignedTypeF64:
+ v := math.Float64frombits(v1) + math.Float64frombits(v2)
+ ce.pushValue(math.Float64bits(v))
+ }
+ frame.pc++
+ case operationKindSub:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ switch unsignedType(op.B1) {
+ case unsignedTypeI32:
+ ce.pushValue(uint64(uint32(v1) - uint32(v2)))
+ case unsignedTypeI64:
+ ce.pushValue(v1 - v2)
+ case unsignedTypeF32:
+ ce.pushValue(subFloat32bits(uint32(v1), uint32(v2)))
+ case unsignedTypeF64:
+ v := math.Float64frombits(v1) - math.Float64frombits(v2)
+ ce.pushValue(math.Float64bits(v))
+ }
+ frame.pc++
+ case operationKindMul:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ switch unsignedType(op.B1) {
+ case unsignedTypeI32:
+ ce.pushValue(uint64(uint32(v1) * uint32(v2)))
+ case unsignedTypeI64:
+ ce.pushValue(v1 * v2)
+ case unsignedTypeF32:
+ ce.pushValue(mulFloat32bits(uint32(v1), uint32(v2)))
+ case unsignedTypeF64:
+ v := math.Float64frombits(v2) * math.Float64frombits(v1)
+ ce.pushValue(math.Float64bits(v))
+ }
+ frame.pc++
+ case operationKindClz:
+ v := ce.popValue()
+ if op.B1 == 0 {
+ // unsignedInt32
+ ce.pushValue(uint64(bits.LeadingZeros32(uint32(v))))
+ } else {
+ // unsignedInt64
+ ce.pushValue(uint64(bits.LeadingZeros64(v)))
+ }
+ frame.pc++
+ case operationKindCtz:
+ v := ce.popValue()
+ if op.B1 == 0 {
+ // unsignedInt32
+ ce.pushValue(uint64(bits.TrailingZeros32(uint32(v))))
+ } else {
+ // unsignedInt64
+ ce.pushValue(uint64(bits.TrailingZeros64(v)))
+ }
+ frame.pc++
+ case operationKindPopcnt:
+ v := ce.popValue()
+ if op.B1 == 0 {
+ // unsignedInt32
+ ce.pushValue(uint64(bits.OnesCount32(uint32(v))))
+ } else {
+ // unsignedInt64
+ ce.pushValue(uint64(bits.OnesCount64(v)))
+ }
+ frame.pc++
+ case operationKindDiv:
+ // If an integer, check we won't divide by zero.
+ t := signedType(op.B1)
+ v2, v1 := ce.popValue(), ce.popValue()
+ switch t {
+ case signedTypeFloat32, signedTypeFloat64: // not integers
+ default:
+ if v2 == 0 {
+ panic(wasmruntime.ErrRuntimeIntegerDivideByZero)
+ }
+ }
+
+ switch t {
+ case signedTypeInt32:
+ d := int32(v2)
+ n := int32(v1)
+ if n == math.MinInt32 && d == -1 {
+ panic(wasmruntime.ErrRuntimeIntegerOverflow)
+ }
+ ce.pushValue(uint64(uint32(n / d)))
+ case signedTypeInt64:
+ d := int64(v2)
+ n := int64(v1)
+ if n == math.MinInt64 && d == -1 {
+ panic(wasmruntime.ErrRuntimeIntegerOverflow)
+ }
+ ce.pushValue(uint64(n / d))
+ case signedTypeUint32:
+ d := uint32(v2)
+ n := uint32(v1)
+ ce.pushValue(uint64(n / d))
+ case signedTypeUint64:
+ d := v2
+ n := v1
+ ce.pushValue(n / d)
+ case signedTypeFloat32:
+ ce.pushValue(divFloat32bits(uint32(v1), uint32(v2)))
+ case signedTypeFloat64:
+ ce.pushValue(math.Float64bits(math.Float64frombits(v1) / math.Float64frombits(v2)))
+ }
+ frame.pc++
+ case operationKindRem:
+ v2, v1 := ce.popValue(), ce.popValue()
+ if v2 == 0 {
+ panic(wasmruntime.ErrRuntimeIntegerDivideByZero)
+ }
+ switch signedInt(op.B1) {
+ case signedInt32:
+ d := int32(v2)
+ n := int32(v1)
+ ce.pushValue(uint64(uint32(n % d)))
+ case signedInt64:
+ d := int64(v2)
+ n := int64(v1)
+ ce.pushValue(uint64(n % d))
+ case signedUint32:
+ d := uint32(v2)
+ n := uint32(v1)
+ ce.pushValue(uint64(n % d))
+ case signedUint64:
+ d := v2
+ n := v1
+ ce.pushValue(n % d)
+ }
+ frame.pc++
+ case operationKindAnd:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ if op.B1 == 0 {
+ // unsignedInt32
+ ce.pushValue(uint64(uint32(v2) & uint32(v1)))
+ } else {
+ // unsignedInt64
+ ce.pushValue(uint64(v2 & v1))
+ }
+ frame.pc++
+ case operationKindOr:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ if op.B1 == 0 {
+ // unsignedInt32
+ ce.pushValue(uint64(uint32(v2) | uint32(v1)))
+ } else {
+ // unsignedInt64
+ ce.pushValue(uint64(v2 | v1))
+ }
+ frame.pc++
+ case operationKindXor:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ if op.B1 == 0 {
+ // unsignedInt32
+ ce.pushValue(uint64(uint32(v2) ^ uint32(v1)))
+ } else {
+ // unsignedInt64
+ ce.pushValue(uint64(v2 ^ v1))
+ }
+ frame.pc++
+ case operationKindShl:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ if op.B1 == 0 {
+ // unsignedInt32
+ ce.pushValue(uint64(uint32(v1) << (uint32(v2) % 32)))
+ } else {
+ // unsignedInt64
+ ce.pushValue(v1 << (v2 % 64))
+ }
+ frame.pc++
+ case operationKindShr:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ switch signedInt(op.B1) {
+ case signedInt32:
+ ce.pushValue(uint64(uint32(int32(v1) >> (uint32(v2) % 32))))
+ case signedInt64:
+ ce.pushValue(uint64(int64(v1) >> (v2 % 64)))
+ case signedUint32:
+ ce.pushValue(uint64(uint32(v1) >> (uint32(v2) % 32)))
+ case signedUint64:
+ ce.pushValue(v1 >> (v2 % 64))
+ }
+ frame.pc++
+ case operationKindRotl:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ if op.B1 == 0 {
+ // unsignedInt32
+ ce.pushValue(uint64(bits.RotateLeft32(uint32(v1), int(v2))))
+ } else {
+ // unsignedInt64
+ ce.pushValue(uint64(bits.RotateLeft64(v1, int(v2))))
+ }
+ frame.pc++
+ case operationKindRotr:
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ if op.B1 == 0 {
+ // unsignedInt32
+ ce.pushValue(uint64(bits.RotateLeft32(uint32(v1), -int(v2))))
+ } else {
+ // unsignedInt64
+ ce.pushValue(uint64(bits.RotateLeft64(v1, -int(v2))))
+ }
+ frame.pc++
+ case operationKindAbs:
+ if op.B1 == 0 {
+ // float32
+ const mask uint32 = 1 << 31
+ ce.pushValue(uint64(uint32(ce.popValue()) &^ mask))
+ } else {
+ // float64
+ const mask uint64 = 1 << 63
+ ce.pushValue(ce.popValue() &^ mask)
+ }
+ frame.pc++
+ case operationKindNeg:
+ if op.B1 == 0 {
+ // float32
+ v := -math.Float32frombits(uint32(ce.popValue()))
+ ce.pushValue(uint64(math.Float32bits(v)))
+ } else {
+ // float64
+ v := -math.Float64frombits(ce.popValue())
+ ce.pushValue(math.Float64bits(v))
+ }
+ frame.pc++
+ case operationKindCeil:
+ if op.B1 == 0 {
+ // float32
+ v := moremath.WasmCompatCeilF32(math.Float32frombits(uint32(ce.popValue())))
+ ce.pushValue(uint64(math.Float32bits(v)))
+ } else {
+ // float64
+ v := moremath.WasmCompatCeilF64(math.Float64frombits(ce.popValue()))
+ ce.pushValue(math.Float64bits(v))
+ }
+ frame.pc++
+ case operationKindFloor:
+ if op.B1 == 0 {
+ // float32
+ v := moremath.WasmCompatFloorF32(math.Float32frombits(uint32(ce.popValue())))
+ ce.pushValue(uint64(math.Float32bits(v)))
+ } else {
+ // float64
+ v := moremath.WasmCompatFloorF64(math.Float64frombits(ce.popValue()))
+ ce.pushValue(math.Float64bits(v))
+ }
+ frame.pc++
+ case operationKindTrunc:
+ if op.B1 == 0 {
+ // float32
+ v := moremath.WasmCompatTruncF32(math.Float32frombits(uint32(ce.popValue())))
+ ce.pushValue(uint64(math.Float32bits(v)))
+ } else {
+ // float64
+ v := moremath.WasmCompatTruncF64(math.Float64frombits(ce.popValue()))
+ ce.pushValue(math.Float64bits(v))
+ }
+ frame.pc++
+ case operationKindNearest:
+ if op.B1 == 0 {
+ // float32
+ f := math.Float32frombits(uint32(ce.popValue()))
+ ce.pushValue(uint64(math.Float32bits(moremath.WasmCompatNearestF32(f))))
+ } else {
+ // float64
+ f := math.Float64frombits(ce.popValue())
+ ce.pushValue(math.Float64bits(moremath.WasmCompatNearestF64(f)))
+ }
+ frame.pc++
+ case operationKindSqrt:
+ if op.B1 == 0 {
+ // float32
+ v := math.Sqrt(float64(math.Float32frombits(uint32(ce.popValue()))))
+ ce.pushValue(uint64(math.Float32bits(float32(v))))
+ } else {
+ // float64
+ v := math.Sqrt(math.Float64frombits(ce.popValue()))
+ ce.pushValue(math.Float64bits(v))
+ }
+ frame.pc++
+ case operationKindMin:
+ if op.B1 == 0 {
+ // float32
+ ce.pushValue(wasmCompatMin32bits(uint32(ce.popValue()), uint32(ce.popValue())))
+ } else {
+ v2 := math.Float64frombits(ce.popValue())
+ v1 := math.Float64frombits(ce.popValue())
+ ce.pushValue(math.Float64bits(moremath.WasmCompatMin64(v1, v2)))
+ }
+ frame.pc++
+ case operationKindMax:
+ if op.B1 == 0 {
+ ce.pushValue(wasmCompatMax32bits(uint32(ce.popValue()), uint32(ce.popValue())))
+ } else {
+ // float64
+ v2 := math.Float64frombits(ce.popValue())
+ v1 := math.Float64frombits(ce.popValue())
+ ce.pushValue(math.Float64bits(moremath.WasmCompatMax64(v1, v2)))
+ }
+ frame.pc++
+ case operationKindCopysign:
+ if op.B1 == 0 {
+ // float32
+ v2 := uint32(ce.popValue())
+ v1 := uint32(ce.popValue())
+ const signbit = 1 << 31
+ ce.pushValue(uint64(v1&^signbit | v2&signbit))
+ } else {
+ // float64
+ v2 := ce.popValue()
+ v1 := ce.popValue()
+ const signbit = 1 << 63
+ ce.pushValue(v1&^signbit | v2&signbit)
+ }
+ frame.pc++
+ case operationKindI32WrapFromI64:
+ ce.pushValue(uint64(uint32(ce.popValue())))
+ frame.pc++
+ case operationKindITruncFromF:
+ if op.B1 == 0 {
+ // float32
+ switch signedInt(op.B2) {
+ case signedInt32:
+ v := math.Trunc(float64(math.Float32frombits(uint32(ce.popValue()))))
+ if math.IsNaN(v) { // NaN cannot be compared with themselves, so we have to use IsNaN
+ if op.B3 {
+ // non-trapping conversion must cast nan to zero.
+ v = 0
+ } else {
+ panic(wasmruntime.ErrRuntimeInvalidConversionToInteger)
+ }
+ } else if v < math.MinInt32 || v > math.MaxInt32 {
+ if op.B3 {
+ // non-trapping conversion must "saturate" the value for overflowing sources.
+ if v < 0 {
+ v = math.MinInt32
+ } else {
+ v = math.MaxInt32
+ }
+ } else {
+ panic(wasmruntime.ErrRuntimeIntegerOverflow)
+ }
+ }
+ ce.pushValue(uint64(uint32(int32(v))))
+ case signedInt64:
+ v := math.Trunc(float64(math.Float32frombits(uint32(ce.popValue()))))
+ res := int64(v)
+ if math.IsNaN(v) { // NaN cannot be compared with themselves, so we have to use IsNaN
+ if op.B3 {
+ // non-trapping conversion must cast nan to zero.
+ res = 0
+ } else {
+ panic(wasmruntime.ErrRuntimeInvalidConversionToInteger)
+ }
+ } else if v < math.MinInt64 || v >= math.MaxInt64 {
+ // Note: math.MaxInt64 is rounded up to math.MaxInt64+1 in 64-bit float representation,
+ // and that's why we use '>=' not '>' to check overflow.
+ if op.B3 {
+ // non-trapping conversion must "saturate" the value for overflowing sources.
+ if v < 0 {
+ res = math.MinInt64
+ } else {
+ res = math.MaxInt64
+ }
+ } else {
+ panic(wasmruntime.ErrRuntimeIntegerOverflow)
+ }
+ }
+ ce.pushValue(uint64(res))
+ case signedUint32:
+ v := math.Trunc(float64(math.Float32frombits(uint32(ce.popValue()))))
+ if math.IsNaN(v) { // NaN cannot be compared with themselves, so we have to use IsNaN
+ if op.B3 {
+ // non-trapping conversion must cast nan to zero.
+ v = 0
+ } else {
+ panic(wasmruntime.ErrRuntimeInvalidConversionToInteger)
+ }
+ } else if v < 0 || v > math.MaxUint32 {
+ if op.B3 {
+ // non-trapping conversion must "saturate" the value for overflowing source.
+ if v < 0 {
+ v = 0
+ } else {
+ v = math.MaxUint32
+ }
+ } else {
+ panic(wasmruntime.ErrRuntimeIntegerOverflow)
+ }
+ }
+ ce.pushValue(uint64(uint32(v)))
+ case signedUint64:
+ v := math.Trunc(float64(math.Float32frombits(uint32(ce.popValue()))))
+ res := uint64(v)
+ if math.IsNaN(v) { // NaN cannot be compared with themselves, so we have to use IsNaN
+ if op.B3 {
+ // non-trapping conversion must cast nan to zero.
+ res = 0
+ } else {
+ panic(wasmruntime.ErrRuntimeInvalidConversionToInteger)
+ }
+ } else if v < 0 || v >= math.MaxUint64 {
+ // Note: math.MaxUint64 is rounded up to math.MaxUint64+1 in 64-bit float representation,
+ // and that's why we use '>=' not '>' to check overflow.
+ if op.B3 {
+ // non-trapping conversion must "saturate" the value for overflowing source.
+ if v < 0 {
+ res = 0
+ } else {
+ res = math.MaxUint64
+ }
+ } else {
+ panic(wasmruntime.ErrRuntimeIntegerOverflow)
+ }
+ }
+ ce.pushValue(res)
+ }
+ } else {
+ // float64
+ switch signedInt(op.B2) {
+ case signedInt32:
+ v := math.Trunc(math.Float64frombits(ce.popValue()))
+ if math.IsNaN(v) { // NaN cannot be compared with themselves, so we have to use IsNaN
+ if op.B3 {
+ // non-trapping conversion must cast nan to zero.
+ v = 0
+ } else {
+ panic(wasmruntime.ErrRuntimeInvalidConversionToInteger)
+ }
+ } else if v < math.MinInt32 || v > math.MaxInt32 {
+ if op.B3 {
+ // non-trapping conversion must "saturate" the value for overflowing source.
+ if v < 0 {
+ v = math.MinInt32
+ } else {
+ v = math.MaxInt32
+ }
+ } else {
+ panic(wasmruntime.ErrRuntimeIntegerOverflow)
+ }
+ }
+ ce.pushValue(uint64(uint32(int32(v))))
+ case signedInt64:
+ v := math.Trunc(math.Float64frombits(ce.popValue()))
+ res := int64(v)
+ if math.IsNaN(v) { // NaN cannot be compared with themselves, so we have to use IsNaN
+ if op.B3 {
+ // non-trapping conversion must cast nan to zero.
+ res = 0
+ } else {
+ panic(wasmruntime.ErrRuntimeInvalidConversionToInteger)
+ }
+ } else if v < math.MinInt64 || v >= math.MaxInt64 {
+ // Note: math.MaxInt64 is rounded up to math.MaxInt64+1 in 64-bit float representation,
+ // and that's why we use '>=' not '>' to check overflow.
+ if op.B3 {
+ // non-trapping conversion must "saturate" the value for overflowing source.
+ if v < 0 {
+ res = math.MinInt64
+ } else {
+ res = math.MaxInt64
+ }
+ } else {
+ panic(wasmruntime.ErrRuntimeIntegerOverflow)
+ }
+ }
+ ce.pushValue(uint64(res))
+ case signedUint32:
+ v := math.Trunc(math.Float64frombits(ce.popValue()))
+ if math.IsNaN(v) { // NaN cannot be compared with themselves, so we have to use IsNaN
+ if op.B3 {
+ // non-trapping conversion must cast nan to zero.
+ v = 0
+ } else {
+ panic(wasmruntime.ErrRuntimeInvalidConversionToInteger)
+ }
+ } else if v < 0 || v > math.MaxUint32 {
+ if op.B3 {
+ // non-trapping conversion must "saturate" the value for overflowing source.
+ if v < 0 {
+ v = 0
+ } else {
+ v = math.MaxUint32
+ }
+ } else {
+ panic(wasmruntime.ErrRuntimeIntegerOverflow)
+ }
+ }
+ ce.pushValue(uint64(uint32(v)))
+ case signedUint64:
+ v := math.Trunc(math.Float64frombits(ce.popValue()))
+ res := uint64(v)
+ if math.IsNaN(v) { // NaN cannot be compared with themselves, so we have to use IsNaN
+ if op.B3 {
+ // non-trapping conversion must cast nan to zero.
+ res = 0
+ } else {
+ panic(wasmruntime.ErrRuntimeInvalidConversionToInteger)
+ }
+ } else if v < 0 || v >= math.MaxUint64 {
+ // Note: math.MaxUint64 is rounded up to math.MaxUint64+1 in 64-bit float representation,
+ // and that's why we use '>=' not '>' to check overflow.
+ if op.B3 {
+ // non-trapping conversion must "saturate" the value for overflowing source.
+ if v < 0 {
+ res = 0
+ } else {
+ res = math.MaxUint64
+ }
+ } else {
+ panic(wasmruntime.ErrRuntimeIntegerOverflow)
+ }
+ }
+ ce.pushValue(res)
+ }
+ }
+ frame.pc++
+ case operationKindFConvertFromI:
+ switch signedInt(op.B1) {
+ case signedInt32:
+ if op.B2 == 0 {
+ // float32
+ v := float32(int32(ce.popValue()))
+ ce.pushValue(uint64(math.Float32bits(v)))
+ } else {
+ // float64
+ v := float64(int32(ce.popValue()))
+ ce.pushValue(math.Float64bits(v))
+ }
+ case signedInt64:
+ if op.B2 == 0 {
+ // float32
+ v := float32(int64(ce.popValue()))
+ ce.pushValue(uint64(math.Float32bits(v)))
+ } else {
+ // float64
+ v := float64(int64(ce.popValue()))
+ ce.pushValue(math.Float64bits(v))
+ }
+ case signedUint32:
+ if op.B2 == 0 {
+ // float32
+ v := float32(uint32(ce.popValue()))
+ ce.pushValue(uint64(math.Float32bits(v)))
+ } else {
+ // float64
+ v := float64(uint32(ce.popValue()))
+ ce.pushValue(math.Float64bits(v))
+ }
+ case signedUint64:
+ if op.B2 == 0 {
+ // float32
+ v := float32(ce.popValue())
+ ce.pushValue(uint64(math.Float32bits(v)))
+ } else {
+ // float64
+ v := float64(ce.popValue())
+ ce.pushValue(math.Float64bits(v))
+ }
+ }
+ frame.pc++
+ case operationKindF32DemoteFromF64:
+ v := float32(math.Float64frombits(ce.popValue()))
+ ce.pushValue(uint64(math.Float32bits(v)))
+ frame.pc++
+ case operationKindF64PromoteFromF32:
+ v := float64(math.Float32frombits(uint32(ce.popValue())))
+ ce.pushValue(math.Float64bits(v))
+ frame.pc++
+ case operationKindExtend:
+ if op.B1 == 1 {
+ // Signed.
+ v := int64(int32(ce.popValue()))
+ ce.pushValue(uint64(v))
+ } else {
+ v := uint64(uint32(ce.popValue()))
+ ce.pushValue(v)
+ }
+ frame.pc++
+ case operationKindSignExtend32From8:
+ v := uint32(int8(ce.popValue()))
+ ce.pushValue(uint64(v))
+ frame.pc++
+ case operationKindSignExtend32From16:
+ v := uint32(int16(ce.popValue()))
+ ce.pushValue(uint64(v))
+ frame.pc++
+ case operationKindSignExtend64From8:
+ v := int64(int8(ce.popValue()))
+ ce.pushValue(uint64(v))
+ frame.pc++
+ case operationKindSignExtend64From16:
+ v := int64(int16(ce.popValue()))
+ ce.pushValue(uint64(v))
+ frame.pc++
+ case operationKindSignExtend64From32:
+ v := int64(int32(ce.popValue()))
+ ce.pushValue(uint64(v))
+ frame.pc++
+ case operationKindMemoryInit:
+ dataInstance := dataInstances[op.U1]
+ copySize := ce.popValue()
+ inDataOffset := ce.popValue()
+ inMemoryOffset := ce.popValue()
+ if inDataOffset+copySize > uint64(len(dataInstance)) ||
+ inMemoryOffset+copySize > uint64(len(memoryInst.Buffer)) {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ } else if copySize != 0 {
+ copy(memoryInst.Buffer[inMemoryOffset:inMemoryOffset+copySize], dataInstance[inDataOffset:])
+ }
+ frame.pc++
+ case operationKindDataDrop:
+ dataInstances[op.U1] = nil
+ frame.pc++
+ case operationKindMemoryCopy:
+ memLen := uint64(len(memoryInst.Buffer))
+ copySize := ce.popValue()
+ sourceOffset := ce.popValue()
+ destinationOffset := ce.popValue()
+ if sourceOffset+copySize > memLen || destinationOffset+copySize > memLen {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ } else if copySize != 0 {
+ copy(memoryInst.Buffer[destinationOffset:],
+ memoryInst.Buffer[sourceOffset:sourceOffset+copySize])
+ }
+ frame.pc++
+ case operationKindMemoryFill:
+ fillSize := ce.popValue()
+ value := byte(ce.popValue())
+ offset := ce.popValue()
+ if fillSize+offset > uint64(len(memoryInst.Buffer)) {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ } else if fillSize != 0 {
+ // Uses the copy trick for faster filling buffer.
+ // https://gist.github.com/taylorza/df2f89d5f9ab3ffd06865062a4cf015d
+ buf := memoryInst.Buffer[offset : offset+fillSize]
+ buf[0] = value
+ for i := 1; i < len(buf); i *= 2 {
+ copy(buf[i:], buf[:i])
+ }
+ }
+ frame.pc++
+ case operationKindTableInit:
+ elementInstance := elementInstances[op.U1]
+ copySize := ce.popValue()
+ inElementOffset := ce.popValue()
+ inTableOffset := ce.popValue()
+ table := tables[op.U2]
+ if inElementOffset+copySize > uint64(len(elementInstance)) ||
+ inTableOffset+copySize > uint64(len(table.References)) {
+ panic(wasmruntime.ErrRuntimeInvalidTableAccess)
+ } else if copySize != 0 {
+ copy(table.References[inTableOffset:inTableOffset+copySize], elementInstance[inElementOffset:])
+ }
+ frame.pc++
+ case operationKindElemDrop:
+ elementInstances[op.U1] = nil
+ frame.pc++
+ case operationKindTableCopy:
+ srcTable, dstTable := tables[op.U1].References, tables[op.U2].References
+ copySize := ce.popValue()
+ sourceOffset := ce.popValue()
+ destinationOffset := ce.popValue()
+ if sourceOffset+copySize > uint64(len(srcTable)) || destinationOffset+copySize > uint64(len(dstTable)) {
+ panic(wasmruntime.ErrRuntimeInvalidTableAccess)
+ } else if copySize != 0 {
+ copy(dstTable[destinationOffset:], srcTable[sourceOffset:sourceOffset+copySize])
+ }
+ frame.pc++
+ case operationKindRefFunc:
+ ce.pushValue(uint64(uintptr(unsafe.Pointer(&functions[op.U1]))))
+ frame.pc++
+ case operationKindTableGet:
+ table := tables[op.U1]
+
+ offset := ce.popValue()
+ if offset >= uint64(len(table.References)) {
+ panic(wasmruntime.ErrRuntimeInvalidTableAccess)
+ }
+
+ ce.pushValue(uint64(table.References[offset]))
+ frame.pc++
+ case operationKindTableSet:
+ table := tables[op.U1]
+ ref := ce.popValue()
+
+ offset := ce.popValue()
+ if offset >= uint64(len(table.References)) {
+ panic(wasmruntime.ErrRuntimeInvalidTableAccess)
+ }
+
+ table.References[offset] = uintptr(ref) // externrefs are opaque uint64.
+ frame.pc++
+ case operationKindTableSize:
+ table := tables[op.U1]
+ ce.pushValue(uint64(len(table.References)))
+ frame.pc++
+ case operationKindTableGrow:
+ table := tables[op.U1]
+ num, ref := ce.popValue(), ce.popValue()
+ ret := table.Grow(uint32(num), uintptr(ref))
+ ce.pushValue(uint64(ret))
+ frame.pc++
+ case operationKindTableFill:
+ table := tables[op.U1]
+ num := ce.popValue()
+ ref := uintptr(ce.popValue())
+ offset := ce.popValue()
+ if num+offset > uint64(len(table.References)) {
+ panic(wasmruntime.ErrRuntimeInvalidTableAccess)
+ } else if num > 0 {
+ // Uses the copy trick for faster filling the region with the value.
+ // https://gist.github.com/taylorza/df2f89d5f9ab3ffd06865062a4cf015d
+ targetRegion := table.References[offset : offset+num]
+ targetRegion[0] = ref
+ for i := 1; i < len(targetRegion); i *= 2 {
+ copy(targetRegion[i:], targetRegion[:i])
+ }
+ }
+ frame.pc++
+ case operationKindV128Const:
+ lo, hi := op.U1, op.U2
+ ce.pushValue(lo)
+ ce.pushValue(hi)
+ frame.pc++
+ case operationKindV128Add:
+ yHigh, yLow := ce.popValue(), ce.popValue()
+ xHigh, xLow := ce.popValue(), ce.popValue()
+ switch op.B1 {
+ case shapeI8x16:
+ ce.pushValue(
+ uint64(uint8(xLow>>8)+uint8(yLow>>8))<<8 | uint64(uint8(xLow)+uint8(yLow)) |
+ uint64(uint8(xLow>>24)+uint8(yLow>>24))<<24 | uint64(uint8(xLow>>16)+uint8(yLow>>16))<<16 |
+ uint64(uint8(xLow>>40)+uint8(yLow>>40))<<40 | uint64(uint8(xLow>>32)+uint8(yLow>>32))<<32 |
+ uint64(uint8(xLow>>56)+uint8(yLow>>56))<<56 | uint64(uint8(xLow>>48)+uint8(yLow>>48))<<48,
+ )
+ ce.pushValue(
+ uint64(uint8(xHigh>>8)+uint8(yHigh>>8))<<8 | uint64(uint8(xHigh)+uint8(yHigh)) |
+ uint64(uint8(xHigh>>24)+uint8(yHigh>>24))<<24 | uint64(uint8(xHigh>>16)+uint8(yHigh>>16))<<16 |
+ uint64(uint8(xHigh>>40)+uint8(yHigh>>40))<<40 | uint64(uint8(xHigh>>32)+uint8(yHigh>>32))<<32 |
+ uint64(uint8(xHigh>>56)+uint8(yHigh>>56))<<56 | uint64(uint8(xHigh>>48)+uint8(yHigh>>48))<<48,
+ )
+ case shapeI16x8:
+ ce.pushValue(
+ uint64(uint16(xLow>>16+yLow>>16))<<16 | uint64(uint16(xLow)+uint16(yLow)) |
+ uint64(uint16(xLow>>48+yLow>>48))<<48 | uint64(uint16(xLow>>32+yLow>>32))<<32,
+ )
+ ce.pushValue(
+ uint64(uint16(xHigh>>16)+uint16(yHigh>>16))<<16 | uint64(uint16(xHigh)+uint16(yHigh)) |
+ uint64(uint16(xHigh>>48)+uint16(yHigh>>48))<<48 | uint64(uint16(xHigh>>32)+uint16(yHigh>>32))<<32,
+ )
+ case shapeI32x4:
+ ce.pushValue(uint64(uint32(xLow>>32)+uint32(yLow>>32))<<32 | uint64(uint32(xLow)+uint32(yLow)))
+ ce.pushValue(uint64(uint32(xHigh>>32)+uint32(yHigh>>32))<<32 | uint64(uint32(xHigh)+uint32(yHigh)))
+ case shapeI64x2:
+ ce.pushValue(xLow + yLow)
+ ce.pushValue(xHigh + yHigh)
+ case shapeF32x4:
+ ce.pushValue(
+ addFloat32bits(uint32(xLow), uint32(yLow)) | addFloat32bits(uint32(xLow>>32), uint32(yLow>>32))<<32,
+ )
+ ce.pushValue(
+ addFloat32bits(uint32(xHigh), uint32(yHigh)) | addFloat32bits(uint32(xHigh>>32), uint32(yHigh>>32))<<32,
+ )
+ case shapeF64x2:
+ ce.pushValue(math.Float64bits(math.Float64frombits(xLow) + math.Float64frombits(yLow)))
+ ce.pushValue(math.Float64bits(math.Float64frombits(xHigh) + math.Float64frombits(yHigh)))
+ }
+ frame.pc++
+ case operationKindV128Sub:
+ yHigh, yLow := ce.popValue(), ce.popValue()
+ xHigh, xLow := ce.popValue(), ce.popValue()
+ switch op.B1 {
+ case shapeI8x16:
+ ce.pushValue(
+ uint64(uint8(xLow>>8)-uint8(yLow>>8))<<8 | uint64(uint8(xLow)-uint8(yLow)) |
+ uint64(uint8(xLow>>24)-uint8(yLow>>24))<<24 | uint64(uint8(xLow>>16)-uint8(yLow>>16))<<16 |
+ uint64(uint8(xLow>>40)-uint8(yLow>>40))<<40 | uint64(uint8(xLow>>32)-uint8(yLow>>32))<<32 |
+ uint64(uint8(xLow>>56)-uint8(yLow>>56))<<56 | uint64(uint8(xLow>>48)-uint8(yLow>>48))<<48,
+ )
+ ce.pushValue(
+ uint64(uint8(xHigh>>8)-uint8(yHigh>>8))<<8 | uint64(uint8(xHigh)-uint8(yHigh)) |
+ uint64(uint8(xHigh>>24)-uint8(yHigh>>24))<<24 | uint64(uint8(xHigh>>16)-uint8(yHigh>>16))<<16 |
+ uint64(uint8(xHigh>>40)-uint8(yHigh>>40))<<40 | uint64(uint8(xHigh>>32)-uint8(yHigh>>32))<<32 |
+ uint64(uint8(xHigh>>56)-uint8(yHigh>>56))<<56 | uint64(uint8(xHigh>>48)-uint8(yHigh>>48))<<48,
+ )
+ case shapeI16x8:
+ ce.pushValue(
+ uint64(uint16(xLow>>16)-uint16(yLow>>16))<<16 | uint64(uint16(xLow)-uint16(yLow)) |
+ uint64(uint16(xLow>>48)-uint16(yLow>>48))<<48 | uint64(uint16(xLow>>32)-uint16(yLow>>32))<<32,
+ )
+ ce.pushValue(
+ uint64(uint16(xHigh>>16)-uint16(yHigh>>16))<<16 | uint64(uint16(xHigh)-uint16(yHigh)) |
+ uint64(uint16(xHigh>>48)-uint16(yHigh>>48))<<48 | uint64(uint16(xHigh>>32)-uint16(yHigh>>32))<<32,
+ )
+ case shapeI32x4:
+ ce.pushValue(uint64(uint32(xLow>>32-yLow>>32))<<32 | uint64(uint32(xLow)-uint32(yLow)))
+ ce.pushValue(uint64(uint32(xHigh>>32-yHigh>>32))<<32 | uint64(uint32(xHigh)-uint32(yHigh)))
+ case shapeI64x2:
+ ce.pushValue(xLow - yLow)
+ ce.pushValue(xHigh - yHigh)
+ case shapeF32x4:
+ ce.pushValue(
+ subFloat32bits(uint32(xLow), uint32(yLow)) | subFloat32bits(uint32(xLow>>32), uint32(yLow>>32))<<32,
+ )
+ ce.pushValue(
+ subFloat32bits(uint32(xHigh), uint32(yHigh)) | subFloat32bits(uint32(xHigh>>32), uint32(yHigh>>32))<<32,
+ )
+ case shapeF64x2:
+ ce.pushValue(math.Float64bits(math.Float64frombits(xLow) - math.Float64frombits(yLow)))
+ ce.pushValue(math.Float64bits(math.Float64frombits(xHigh) - math.Float64frombits(yHigh)))
+ }
+ frame.pc++
+ case operationKindV128Load:
+ offset := ce.popMemoryOffset(op)
+ switch op.B1 {
+ case v128LoadType128:
+ lo, ok := memoryInst.ReadUint64Le(offset)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(lo)
+ hi, ok := memoryInst.ReadUint64Le(offset + 8)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(hi)
+ case v128LoadType8x8s:
+ data, ok := memoryInst.Read(offset, 8)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(
+ uint64(uint16(int8(data[3])))<<48 | uint64(uint16(int8(data[2])))<<32 | uint64(uint16(int8(data[1])))<<16 | uint64(uint16(int8(data[0]))),
+ )
+ ce.pushValue(
+ uint64(uint16(int8(data[7])))<<48 | uint64(uint16(int8(data[6])))<<32 | uint64(uint16(int8(data[5])))<<16 | uint64(uint16(int8(data[4]))),
+ )
+ case v128LoadType8x8u:
+ data, ok := memoryInst.Read(offset, 8)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(
+ uint64(data[3])<<48 | uint64(data[2])<<32 | uint64(data[1])<<16 | uint64(data[0]),
+ )
+ ce.pushValue(
+ uint64(data[7])<<48 | uint64(data[6])<<32 | uint64(data[5])<<16 | uint64(data[4]),
+ )
+ case v128LoadType16x4s:
+ data, ok := memoryInst.Read(offset, 8)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(
+ uint64(int16(binary.LittleEndian.Uint16(data[2:])))<<32 |
+ uint64(uint32(int16(binary.LittleEndian.Uint16(data)))),
+ )
+ ce.pushValue(
+ uint64(uint32(int16(binary.LittleEndian.Uint16(data[6:]))))<<32 |
+ uint64(uint32(int16(binary.LittleEndian.Uint16(data[4:])))),
+ )
+ case v128LoadType16x4u:
+ data, ok := memoryInst.Read(offset, 8)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(
+ uint64(binary.LittleEndian.Uint16(data[2:]))<<32 | uint64(binary.LittleEndian.Uint16(data)),
+ )
+ ce.pushValue(
+ uint64(binary.LittleEndian.Uint16(data[6:]))<<32 | uint64(binary.LittleEndian.Uint16(data[4:])),
+ )
+ case v128LoadType32x2s:
+ data, ok := memoryInst.Read(offset, 8)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(uint64(int32(binary.LittleEndian.Uint32(data))))
+ ce.pushValue(uint64(int32(binary.LittleEndian.Uint32(data[4:]))))
+ case v128LoadType32x2u:
+ data, ok := memoryInst.Read(offset, 8)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(uint64(binary.LittleEndian.Uint32(data)))
+ ce.pushValue(uint64(binary.LittleEndian.Uint32(data[4:])))
+ case v128LoadType8Splat:
+ v, ok := memoryInst.ReadByte(offset)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ v8 := uint64(v)<<56 | uint64(v)<<48 | uint64(v)<<40 | uint64(v)<<32 |
+ uint64(v)<<24 | uint64(v)<<16 | uint64(v)<<8 | uint64(v)
+ ce.pushValue(v8)
+ ce.pushValue(v8)
+ case v128LoadType16Splat:
+ v, ok := memoryInst.ReadUint16Le(offset)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ v4 := uint64(v)<<48 | uint64(v)<<32 | uint64(v)<<16 | uint64(v)
+ ce.pushValue(v4)
+ ce.pushValue(v4)
+ case v128LoadType32Splat:
+ v, ok := memoryInst.ReadUint32Le(offset)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ vv := uint64(v)<<32 | uint64(v)
+ ce.pushValue(vv)
+ ce.pushValue(vv)
+ case v128LoadType64Splat:
+ lo, ok := memoryInst.ReadUint64Le(offset)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(lo)
+ ce.pushValue(lo)
+ case v128LoadType32zero:
+ lo, ok := memoryInst.ReadUint32Le(offset)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(uint64(lo))
+ ce.pushValue(0)
+ case v128LoadType64zero:
+ lo, ok := memoryInst.ReadUint64Le(offset)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(lo)
+ ce.pushValue(0)
+ }
+ frame.pc++
+ case operationKindV128LoadLane:
+ hi, lo := ce.popValue(), ce.popValue()
+ offset := ce.popMemoryOffset(op)
+ switch op.B1 {
+ case 8:
+ b, ok := memoryInst.ReadByte(offset)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ if op.B2 < 8 {
+ s := op.B2 << 3
+ lo = (lo & ^(0xff << s)) | uint64(b)<<s
+ } else {
+ s := (op.B2 - 8) << 3
+ hi = (hi & ^(0xff << s)) | uint64(b)<<s
+ }
+ case 16:
+ b, ok := memoryInst.ReadUint16Le(offset)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ if op.B2 < 4 {
+ s := op.B2 << 4
+ lo = (lo & ^(0xff_ff << s)) | uint64(b)<<s
+ } else {
+ s := (op.B2 - 4) << 4
+ hi = (hi & ^(0xff_ff << s)) | uint64(b)<<s
+ }
+ case 32:
+ b, ok := memoryInst.ReadUint32Le(offset)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ if op.B2 < 2 {
+ s := op.B2 << 5
+ lo = (lo & ^(0xff_ff_ff_ff << s)) | uint64(b)<<s
+ } else {
+ s := (op.B2 - 2) << 5
+ hi = (hi & ^(0xff_ff_ff_ff << s)) | uint64(b)<<s
+ }
+ case 64:
+ b, ok := memoryInst.ReadUint64Le(offset)
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ if op.B2 == 0 {
+ lo = b
+ } else {
+ hi = b
+ }
+ }
+ ce.pushValue(lo)
+ ce.pushValue(hi)
+ frame.pc++
+ case operationKindV128Store:
+ hi, lo := ce.popValue(), ce.popValue()
+ offset := ce.popMemoryOffset(op)
+ // Write the upper bytes first to trigger an early error if the memory access is out of bounds.
+ // Otherwise, the lower bytes might be written to memory, but the upper bytes might not.
+ if uint64(offset)+8 > math.MaxUint32 {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ if ok := memoryInst.WriteUint64Le(offset+8, hi); !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ if ok := memoryInst.WriteUint64Le(offset, lo); !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ frame.pc++
+ case operationKindV128StoreLane:
+ hi, lo := ce.popValue(), ce.popValue()
+ offset := ce.popMemoryOffset(op)
+ var ok bool
+ switch op.B1 {
+ case 8:
+ if op.B2 < 8 {
+ ok = memoryInst.WriteByte(offset, byte(lo>>(op.B2*8)))
+ } else {
+ ok = memoryInst.WriteByte(offset, byte(hi>>((op.B2-8)*8)))
+ }
+ case 16:
+ if op.B2 < 4 {
+ ok = memoryInst.WriteUint16Le(offset, uint16(lo>>(op.B2*16)))
+ } else {
+ ok = memoryInst.WriteUint16Le(offset, uint16(hi>>((op.B2-4)*16)))
+ }
+ case 32:
+ if op.B2 < 2 {
+ ok = memoryInst.WriteUint32Le(offset, uint32(lo>>(op.B2*32)))
+ } else {
+ ok = memoryInst.WriteUint32Le(offset, uint32(hi>>((op.B2-2)*32)))
+ }
+ case 64:
+ if op.B2 == 0 {
+ ok = memoryInst.WriteUint64Le(offset, lo)
+ } else {
+ ok = memoryInst.WriteUint64Le(offset, hi)
+ }
+ }
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ frame.pc++
+ case operationKindV128ReplaceLane:
+ v := ce.popValue()
+ hi, lo := ce.popValue(), ce.popValue()
+ switch op.B1 {
+ case shapeI8x16:
+ if op.B2 < 8 {
+ s := op.B2 << 3
+ lo = (lo & ^(0xff << s)) | uint64(byte(v))<<s
+ } else {
+ s := (op.B2 - 8) << 3
+ hi = (hi & ^(0xff << s)) | uint64(byte(v))<<s
+ }
+ case shapeI16x8:
+ if op.B2 < 4 {
+ s := op.B2 << 4
+ lo = (lo & ^(0xff_ff << s)) | uint64(uint16(v))<<s
+ } else {
+ s := (op.B2 - 4) << 4
+ hi = (hi & ^(0xff_ff << s)) | uint64(uint16(v))<<s
+ }
+ case shapeI32x4, shapeF32x4:
+ if op.B2 < 2 {
+ s := op.B2 << 5
+ lo = (lo & ^(0xff_ff_ff_ff << s)) | uint64(uint32(v))<<s
+ } else {
+ s := (op.B2 - 2) << 5
+ hi = (hi & ^(0xff_ff_ff_ff << s)) | uint64(uint32(v))<<s
+ }
+ case shapeI64x2, shapeF64x2:
+ if op.B2 == 0 {
+ lo = v
+ } else {
+ hi = v
+ }
+ }
+ ce.pushValue(lo)
+ ce.pushValue(hi)
+ frame.pc++
+ case operationKindV128ExtractLane:
+ hi, lo := ce.popValue(), ce.popValue()
+ var v uint64
+ switch op.B1 {
+ case shapeI8x16:
+ var u8 byte
+ if op.B2 < 8 {
+ u8 = byte(lo >> (op.B2 * 8))
+ } else {
+ u8 = byte(hi >> ((op.B2 - 8) * 8))
+ }
+ if op.B3 {
+ // sign-extend.
+ v = uint64(uint32(int8(u8)))
+ } else {
+ v = uint64(u8)
+ }
+ case shapeI16x8:
+ var u16 uint16
+ if op.B2 < 4 {
+ u16 = uint16(lo >> (op.B2 * 16))
+ } else {
+ u16 = uint16(hi >> ((op.B2 - 4) * 16))
+ }
+ if op.B3 {
+ // sign-extend.
+ v = uint64(uint32(int16(u16)))
+ } else {
+ v = uint64(u16)
+ }
+ case shapeI32x4, shapeF32x4:
+ if op.B2 < 2 {
+ v = uint64(uint32(lo >> (op.B2 * 32)))
+ } else {
+ v = uint64(uint32(hi >> ((op.B2 - 2) * 32)))
+ }
+ case shapeI64x2, shapeF64x2:
+ if op.B2 == 0 {
+ v = lo
+ } else {
+ v = hi
+ }
+ }
+ ce.pushValue(v)
+ frame.pc++
+ case operationKindV128Splat:
+ v := ce.popValue()
+ var hi, lo uint64
+ switch op.B1 {
+ case shapeI8x16:
+ v8 := uint64(byte(v))<<56 | uint64(byte(v))<<48 | uint64(byte(v))<<40 | uint64(byte(v))<<32 |
+ uint64(byte(v))<<24 | uint64(byte(v))<<16 | uint64(byte(v))<<8 | uint64(byte(v))
+ hi, lo = v8, v8
+ case shapeI16x8:
+ v4 := uint64(uint16(v))<<48 | uint64(uint16(v))<<32 | uint64(uint16(v))<<16 | uint64(uint16(v))
+ hi, lo = v4, v4
+ case shapeI32x4, shapeF32x4:
+ v2 := uint64(uint32(v))<<32 | uint64(uint32(v))
+ lo, hi = v2, v2
+ case shapeI64x2, shapeF64x2:
+ lo, hi = v, v
+ }
+ ce.pushValue(lo)
+ ce.pushValue(hi)
+ frame.pc++
+ case operationKindV128Swizzle:
+ idxHi, idxLo := ce.popValue(), ce.popValue()
+ baseHi, baseLo := ce.popValue(), ce.popValue()
+ var newVal [16]byte
+ for i := 0; i < 16; i++ {
+ var id byte
+ if i < 8 {
+ id = byte(idxLo >> (i * 8))
+ } else {
+ id = byte(idxHi >> ((i - 8) * 8))
+ }
+ if id < 8 {
+ newVal[i] = byte(baseLo >> (id * 8))
+ } else if id < 16 {
+ newVal[i] = byte(baseHi >> ((id - 8) * 8))
+ }
+ }
+ ce.pushValue(binary.LittleEndian.Uint64(newVal[:8]))
+ ce.pushValue(binary.LittleEndian.Uint64(newVal[8:]))
+ frame.pc++
+ case operationKindV128Shuffle:
+ xHi, xLo, yHi, yLo := ce.popValue(), ce.popValue(), ce.popValue(), ce.popValue()
+ var newVal [16]byte
+ for i, l := range op.Us {
+ if l < 8 {
+ newVal[i] = byte(yLo >> (l * 8))
+ } else if l < 16 {
+ newVal[i] = byte(yHi >> ((l - 8) * 8))
+ } else if l < 24 {
+ newVal[i] = byte(xLo >> ((l - 16) * 8))
+ } else if l < 32 {
+ newVal[i] = byte(xHi >> ((l - 24) * 8))
+ }
+ }
+ ce.pushValue(binary.LittleEndian.Uint64(newVal[:8]))
+ ce.pushValue(binary.LittleEndian.Uint64(newVal[8:]))
+ frame.pc++
+ case operationKindV128AnyTrue:
+ hi, lo := ce.popValue(), ce.popValue()
+ if hi != 0 || lo != 0 {
+ ce.pushValue(1)
+ } else {
+ ce.pushValue(0)
+ }
+ frame.pc++
+ case operationKindV128AllTrue:
+ hi, lo := ce.popValue(), ce.popValue()
+ var ret bool
+ switch op.B1 {
+ case shapeI8x16:
+ ret = (uint8(lo) != 0) && (uint8(lo>>8) != 0) && (uint8(lo>>16) != 0) && (uint8(lo>>24) != 0) &&
+ (uint8(lo>>32) != 0) && (uint8(lo>>40) != 0) && (uint8(lo>>48) != 0) && (uint8(lo>>56) != 0) &&
+ (uint8(hi) != 0) && (uint8(hi>>8) != 0) && (uint8(hi>>16) != 0) && (uint8(hi>>24) != 0) &&
+ (uint8(hi>>32) != 0) && (uint8(hi>>40) != 0) && (uint8(hi>>48) != 0) && (uint8(hi>>56) != 0)
+ case shapeI16x8:
+ ret = (uint16(lo) != 0) && (uint16(lo>>16) != 0) && (uint16(lo>>32) != 0) && (uint16(lo>>48) != 0) &&
+ (uint16(hi) != 0) && (uint16(hi>>16) != 0) && (uint16(hi>>32) != 0) && (uint16(hi>>48) != 0)
+ case shapeI32x4:
+ ret = (uint32(lo) != 0) && (uint32(lo>>32) != 0) &&
+ (uint32(hi) != 0) && (uint32(hi>>32) != 0)
+ case shapeI64x2:
+ ret = (lo != 0) &&
+ (hi != 0)
+ }
+ if ret {
+ ce.pushValue(1)
+ } else {
+ ce.pushValue(0)
+ }
+ frame.pc++
+ case operationKindV128BitMask:
+ // https://github.com/WebAssembly/spec/blob/wg-2.0.draft1/proposals/simd/SIMD.md#bitmask-extraction
+ hi, lo := ce.popValue(), ce.popValue()
+ var res uint64
+ switch op.B1 {
+ case shapeI8x16:
+ for i := 0; i < 8; i++ {
+ if int8(lo>>(i*8)) < 0 {
+ res |= 1 << i
+ }
+ }
+ for i := 0; i < 8; i++ {
+ if int8(hi>>(i*8)) < 0 {
+ res |= 1 << (i + 8)
+ }
+ }
+ case shapeI16x8:
+ for i := 0; i < 4; i++ {
+ if int16(lo>>(i*16)) < 0 {
+ res |= 1 << i
+ }
+ }
+ for i := 0; i < 4; i++ {
+ if int16(hi>>(i*16)) < 0 {
+ res |= 1 << (i + 4)
+ }
+ }
+ case shapeI32x4:
+ for i := 0; i < 2; i++ {
+ if int32(lo>>(i*32)) < 0 {
+ res |= 1 << i
+ }
+ }
+ for i := 0; i < 2; i++ {
+ if int32(hi>>(i*32)) < 0 {
+ res |= 1 << (i + 2)
+ }
+ }
+ case shapeI64x2:
+ if int64(lo) < 0 {
+ res |= 0b01
+ }
+ if int(hi) < 0 {
+ res |= 0b10
+ }
+ }
+ ce.pushValue(res)
+ frame.pc++
+ case operationKindV128And:
+ x2Hi, x2Lo := ce.popValue(), ce.popValue()
+ x1Hi, x1Lo := ce.popValue(), ce.popValue()
+ ce.pushValue(x1Lo & x2Lo)
+ ce.pushValue(x1Hi & x2Hi)
+ frame.pc++
+ case operationKindV128Not:
+ hi, lo := ce.popValue(), ce.popValue()
+ ce.pushValue(^lo)
+ ce.pushValue(^hi)
+ frame.pc++
+ case operationKindV128Or:
+ x2Hi, x2Lo := ce.popValue(), ce.popValue()
+ x1Hi, x1Lo := ce.popValue(), ce.popValue()
+ ce.pushValue(x1Lo | x2Lo)
+ ce.pushValue(x1Hi | x2Hi)
+ frame.pc++
+ case operationKindV128Xor:
+ x2Hi, x2Lo := ce.popValue(), ce.popValue()
+ x1Hi, x1Lo := ce.popValue(), ce.popValue()
+ ce.pushValue(x1Lo ^ x2Lo)
+ ce.pushValue(x1Hi ^ x2Hi)
+ frame.pc++
+ case operationKindV128Bitselect:
+ // https://github.com/WebAssembly/spec/blob/wg-2.0.draft1/proposals/simd/SIMD.md#bitwise-select
+ cHi, cLo := ce.popValue(), ce.popValue()
+ x2Hi, x2Lo := ce.popValue(), ce.popValue()
+ x1Hi, x1Lo := ce.popValue(), ce.popValue()
+ // v128.or(v128.and(v1, c), v128.and(v2, v128.not(c)))
+ ce.pushValue((x1Lo & cLo) | (x2Lo & (^cLo)))
+ ce.pushValue((x1Hi & cHi) | (x2Hi & (^cHi)))
+ frame.pc++
+ case operationKindV128AndNot:
+ x2Hi, x2Lo := ce.popValue(), ce.popValue()
+ x1Hi, x1Lo := ce.popValue(), ce.popValue()
+ ce.pushValue(x1Lo & (^x2Lo))
+ ce.pushValue(x1Hi & (^x2Hi))
+ frame.pc++
+ case operationKindV128Shl:
+ s := ce.popValue()
+ hi, lo := ce.popValue(), ce.popValue()
+ switch op.B1 {
+ case shapeI8x16:
+ s = s % 8
+ lo = uint64(uint8(lo<<s)) |
+ uint64(uint8((lo>>8)<<s))<<8 |
+ uint64(uint8((lo>>16)<<s))<<16 |
+ uint64(uint8((lo>>24)<<s))<<24 |
+ uint64(uint8((lo>>32)<<s))<<32 |
+ uint64(uint8((lo>>40)<<s))<<40 |
+ uint64(uint8((lo>>48)<<s))<<48 |
+ uint64(uint8((lo>>56)<<s))<<56
+ hi = uint64(uint8(hi<<s)) |
+ uint64(uint8((hi>>8)<<s))<<8 |
+ uint64(uint8((hi>>16)<<s))<<16 |
+ uint64(uint8((hi>>24)<<s))<<24 |
+ uint64(uint8((hi>>32)<<s))<<32 |
+ uint64(uint8((hi>>40)<<s))<<40 |
+ uint64(uint8((hi>>48)<<s))<<48 |
+ uint64(uint8((hi>>56)<<s))<<56
+ case shapeI16x8:
+ s = s % 16
+ lo = uint64(uint16(lo<<s)) |
+ uint64(uint16((lo>>16)<<s))<<16 |
+ uint64(uint16((lo>>32)<<s))<<32 |
+ uint64(uint16((lo>>48)<<s))<<48
+ hi = uint64(uint16(hi<<s)) |
+ uint64(uint16((hi>>16)<<s))<<16 |
+ uint64(uint16((hi>>32)<<s))<<32 |
+ uint64(uint16((hi>>48)<<s))<<48
+ case shapeI32x4:
+ s = s % 32
+ lo = uint64(uint32(lo<<s)) | uint64(uint32((lo>>32)<<s))<<32
+ hi = uint64(uint32(hi<<s)) | uint64(uint32((hi>>32)<<s))<<32
+ case shapeI64x2:
+ s = s % 64
+ lo = lo << s
+ hi = hi << s
+ }
+ ce.pushValue(lo)
+ ce.pushValue(hi)
+ frame.pc++
+ case operationKindV128Shr:
+ s := ce.popValue()
+ hi, lo := ce.popValue(), ce.popValue()
+ switch op.B1 {
+ case shapeI8x16:
+ s = s % 8
+ if op.B3 { // signed
+ lo = uint64(uint8(int8(lo)>>s)) |
+ uint64(uint8(int8(lo>>8)>>s))<<8 |
+ uint64(uint8(int8(lo>>16)>>s))<<16 |
+ uint64(uint8(int8(lo>>24)>>s))<<24 |
+ uint64(uint8(int8(lo>>32)>>s))<<32 |
+ uint64(uint8(int8(lo>>40)>>s))<<40 |
+ uint64(uint8(int8(lo>>48)>>s))<<48 |
+ uint64(uint8(int8(lo>>56)>>s))<<56
+ hi = uint64(uint8(int8(hi)>>s)) |
+ uint64(uint8(int8(hi>>8)>>s))<<8 |
+ uint64(uint8(int8(hi>>16)>>s))<<16 |
+ uint64(uint8(int8(hi>>24)>>s))<<24 |
+ uint64(uint8(int8(hi>>32)>>s))<<32 |
+ uint64(uint8(int8(hi>>40)>>s))<<40 |
+ uint64(uint8(int8(hi>>48)>>s))<<48 |
+ uint64(uint8(int8(hi>>56)>>s))<<56
+ } else {
+ lo = uint64(uint8(lo)>>s) |
+ uint64(uint8(lo>>8)>>s)<<8 |
+ uint64(uint8(lo>>16)>>s)<<16 |
+ uint64(uint8(lo>>24)>>s)<<24 |
+ uint64(uint8(lo>>32)>>s)<<32 |
+ uint64(uint8(lo>>40)>>s)<<40 |
+ uint64(uint8(lo>>48)>>s)<<48 |
+ uint64(uint8(lo>>56)>>s)<<56
+ hi = uint64(uint8(hi)>>s) |
+ uint64(uint8(hi>>8)>>s)<<8 |
+ uint64(uint8(hi>>16)>>s)<<16 |
+ uint64(uint8(hi>>24)>>s)<<24 |
+ uint64(uint8(hi>>32)>>s)<<32 |
+ uint64(uint8(hi>>40)>>s)<<40 |
+ uint64(uint8(hi>>48)>>s)<<48 |
+ uint64(uint8(hi>>56)>>s)<<56
+ }
+ case shapeI16x8:
+ s = s % 16
+ if op.B3 { // signed
+ lo = uint64(uint16(int16(lo)>>s)) |
+ uint64(uint16(int16(lo>>16)>>s))<<16 |
+ uint64(uint16(int16(lo>>32)>>s))<<32 |
+ uint64(uint16(int16(lo>>48)>>s))<<48
+ hi = uint64(uint16(int16(hi)>>s)) |
+ uint64(uint16(int16(hi>>16)>>s))<<16 |
+ uint64(uint16(int16(hi>>32)>>s))<<32 |
+ uint64(uint16(int16(hi>>48)>>s))<<48
+ } else {
+ lo = uint64(uint16(lo)>>s) |
+ uint64(uint16(lo>>16)>>s)<<16 |
+ uint64(uint16(lo>>32)>>s)<<32 |
+ uint64(uint16(lo>>48)>>s)<<48
+ hi = uint64(uint16(hi)>>s) |
+ uint64(uint16(hi>>16)>>s)<<16 |
+ uint64(uint16(hi>>32)>>s)<<32 |
+ uint64(uint16(hi>>48)>>s)<<48
+ }
+ case shapeI32x4:
+ s = s % 32
+ if op.B3 {
+ lo = uint64(uint32(int32(lo)>>s)) | uint64(uint32(int32(lo>>32)>>s))<<32
+ hi = uint64(uint32(int32(hi)>>s)) | uint64(uint32(int32(hi>>32)>>s))<<32
+ } else {
+ lo = uint64(uint32(lo)>>s) | uint64(uint32(lo>>32)>>s)<<32
+ hi = uint64(uint32(hi)>>s) | uint64(uint32(hi>>32)>>s)<<32
+ }
+ case shapeI64x2:
+ s = s % 64
+ if op.B3 { // signed
+ lo = uint64(int64(lo) >> s)
+ hi = uint64(int64(hi) >> s)
+ } else {
+ lo = lo >> s
+ hi = hi >> s
+ }
+
+ }
+ ce.pushValue(lo)
+ ce.pushValue(hi)
+ frame.pc++
+ case operationKindV128Cmp:
+ x2Hi, x2Lo := ce.popValue(), ce.popValue()
+ x1Hi, x1Lo := ce.popValue(), ce.popValue()
+ var result []bool
+ switch op.B1 {
+ case v128CmpTypeI8x16Eq:
+ result = []bool{
+ byte(x1Lo>>0) == byte(x2Lo>>0), byte(x1Lo>>8) == byte(x2Lo>>8),
+ byte(x1Lo>>16) == byte(x2Lo>>16), byte(x1Lo>>24) == byte(x2Lo>>24),
+ byte(x1Lo>>32) == byte(x2Lo>>32), byte(x1Lo>>40) == byte(x2Lo>>40),
+ byte(x1Lo>>48) == byte(x2Lo>>48), byte(x1Lo>>56) == byte(x2Lo>>56),
+ byte(x1Hi>>0) == byte(x2Hi>>0), byte(x1Hi>>8) == byte(x2Hi>>8),
+ byte(x1Hi>>16) == byte(x2Hi>>16), byte(x1Hi>>24) == byte(x2Hi>>24),
+ byte(x1Hi>>32) == byte(x2Hi>>32), byte(x1Hi>>40) == byte(x2Hi>>40),
+ byte(x1Hi>>48) == byte(x2Hi>>48), byte(x1Hi>>56) == byte(x2Hi>>56),
+ }
+ case v128CmpTypeI8x16Ne:
+ result = []bool{
+ byte(x1Lo>>0) != byte(x2Lo>>0), byte(x1Lo>>8) != byte(x2Lo>>8),
+ byte(x1Lo>>16) != byte(x2Lo>>16), byte(x1Lo>>24) != byte(x2Lo>>24),
+ byte(x1Lo>>32) != byte(x2Lo>>32), byte(x1Lo>>40) != byte(x2Lo>>40),
+ byte(x1Lo>>48) != byte(x2Lo>>48), byte(x1Lo>>56) != byte(x2Lo>>56),
+ byte(x1Hi>>0) != byte(x2Hi>>0), byte(x1Hi>>8) != byte(x2Hi>>8),
+ byte(x1Hi>>16) != byte(x2Hi>>16), byte(x1Hi>>24) != byte(x2Hi>>24),
+ byte(x1Hi>>32) != byte(x2Hi>>32), byte(x1Hi>>40) != byte(x2Hi>>40),
+ byte(x1Hi>>48) != byte(x2Hi>>48), byte(x1Hi>>56) != byte(x2Hi>>56),
+ }
+ case v128CmpTypeI8x16LtS:
+ result = []bool{
+ int8(x1Lo>>0) < int8(x2Lo>>0), int8(x1Lo>>8) < int8(x2Lo>>8),
+ int8(x1Lo>>16) < int8(x2Lo>>16), int8(x1Lo>>24) < int8(x2Lo>>24),
+ int8(x1Lo>>32) < int8(x2Lo>>32), int8(x1Lo>>40) < int8(x2Lo>>40),
+ int8(x1Lo>>48) < int8(x2Lo>>48), int8(x1Lo>>56) < int8(x2Lo>>56),
+ int8(x1Hi>>0) < int8(x2Hi>>0), int8(x1Hi>>8) < int8(x2Hi>>8),
+ int8(x1Hi>>16) < int8(x2Hi>>16), int8(x1Hi>>24) < int8(x2Hi>>24),
+ int8(x1Hi>>32) < int8(x2Hi>>32), int8(x1Hi>>40) < int8(x2Hi>>40),
+ int8(x1Hi>>48) < int8(x2Hi>>48), int8(x1Hi>>56) < int8(x2Hi>>56),
+ }
+ case v128CmpTypeI8x16LtU:
+ result = []bool{
+ byte(x1Lo>>0) < byte(x2Lo>>0), byte(x1Lo>>8) < byte(x2Lo>>8),
+ byte(x1Lo>>16) < byte(x2Lo>>16), byte(x1Lo>>24) < byte(x2Lo>>24),
+ byte(x1Lo>>32) < byte(x2Lo>>32), byte(x1Lo>>40) < byte(x2Lo>>40),
+ byte(x1Lo>>48) < byte(x2Lo>>48), byte(x1Lo>>56) < byte(x2Lo>>56),
+ byte(x1Hi>>0) < byte(x2Hi>>0), byte(x1Hi>>8) < byte(x2Hi>>8),
+ byte(x1Hi>>16) < byte(x2Hi>>16), byte(x1Hi>>24) < byte(x2Hi>>24),
+ byte(x1Hi>>32) < byte(x2Hi>>32), byte(x1Hi>>40) < byte(x2Hi>>40),
+ byte(x1Hi>>48) < byte(x2Hi>>48), byte(x1Hi>>56) < byte(x2Hi>>56),
+ }
+ case v128CmpTypeI8x16GtS:
+ result = []bool{
+ int8(x1Lo>>0) > int8(x2Lo>>0), int8(x1Lo>>8) > int8(x2Lo>>8),
+ int8(x1Lo>>16) > int8(x2Lo>>16), int8(x1Lo>>24) > int8(x2Lo>>24),
+ int8(x1Lo>>32) > int8(x2Lo>>32), int8(x1Lo>>40) > int8(x2Lo>>40),
+ int8(x1Lo>>48) > int8(x2Lo>>48), int8(x1Lo>>56) > int8(x2Lo>>56),
+ int8(x1Hi>>0) > int8(x2Hi>>0), int8(x1Hi>>8) > int8(x2Hi>>8),
+ int8(x1Hi>>16) > int8(x2Hi>>16), int8(x1Hi>>24) > int8(x2Hi>>24),
+ int8(x1Hi>>32) > int8(x2Hi>>32), int8(x1Hi>>40) > int8(x2Hi>>40),
+ int8(x1Hi>>48) > int8(x2Hi>>48), int8(x1Hi>>56) > int8(x2Hi>>56),
+ }
+ case v128CmpTypeI8x16GtU:
+ result = []bool{
+ byte(x1Lo>>0) > byte(x2Lo>>0), byte(x1Lo>>8) > byte(x2Lo>>8),
+ byte(x1Lo>>16) > byte(x2Lo>>16), byte(x1Lo>>24) > byte(x2Lo>>24),
+ byte(x1Lo>>32) > byte(x2Lo>>32), byte(x1Lo>>40) > byte(x2Lo>>40),
+ byte(x1Lo>>48) > byte(x2Lo>>48), byte(x1Lo>>56) > byte(x2Lo>>56),
+ byte(x1Hi>>0) > byte(x2Hi>>0), byte(x1Hi>>8) > byte(x2Hi>>8),
+ byte(x1Hi>>16) > byte(x2Hi>>16), byte(x1Hi>>24) > byte(x2Hi>>24),
+ byte(x1Hi>>32) > byte(x2Hi>>32), byte(x1Hi>>40) > byte(x2Hi>>40),
+ byte(x1Hi>>48) > byte(x2Hi>>48), byte(x1Hi>>56) > byte(x2Hi>>56),
+ }
+ case v128CmpTypeI8x16LeS:
+ result = []bool{
+ int8(x1Lo>>0) <= int8(x2Lo>>0), int8(x1Lo>>8) <= int8(x2Lo>>8),
+ int8(x1Lo>>16) <= int8(x2Lo>>16), int8(x1Lo>>24) <= int8(x2Lo>>24),
+ int8(x1Lo>>32) <= int8(x2Lo>>32), int8(x1Lo>>40) <= int8(x2Lo>>40),
+ int8(x1Lo>>48) <= int8(x2Lo>>48), int8(x1Lo>>56) <= int8(x2Lo>>56),
+ int8(x1Hi>>0) <= int8(x2Hi>>0), int8(x1Hi>>8) <= int8(x2Hi>>8),
+ int8(x1Hi>>16) <= int8(x2Hi>>16), int8(x1Hi>>24) <= int8(x2Hi>>24),
+ int8(x1Hi>>32) <= int8(x2Hi>>32), int8(x1Hi>>40) <= int8(x2Hi>>40),
+ int8(x1Hi>>48) <= int8(x2Hi>>48), int8(x1Hi>>56) <= int8(x2Hi>>56),
+ }
+ case v128CmpTypeI8x16LeU:
+ result = []bool{
+ byte(x1Lo>>0) <= byte(x2Lo>>0), byte(x1Lo>>8) <= byte(x2Lo>>8),
+ byte(x1Lo>>16) <= byte(x2Lo>>16), byte(x1Lo>>24) <= byte(x2Lo>>24),
+ byte(x1Lo>>32) <= byte(x2Lo>>32), byte(x1Lo>>40) <= byte(x2Lo>>40),
+ byte(x1Lo>>48) <= byte(x2Lo>>48), byte(x1Lo>>56) <= byte(x2Lo>>56),
+ byte(x1Hi>>0) <= byte(x2Hi>>0), byte(x1Hi>>8) <= byte(x2Hi>>8),
+ byte(x1Hi>>16) <= byte(x2Hi>>16), byte(x1Hi>>24) <= byte(x2Hi>>24),
+ byte(x1Hi>>32) <= byte(x2Hi>>32), byte(x1Hi>>40) <= byte(x2Hi>>40),
+ byte(x1Hi>>48) <= byte(x2Hi>>48), byte(x1Hi>>56) <= byte(x2Hi>>56),
+ }
+ case v128CmpTypeI8x16GeS:
+ result = []bool{
+ int8(x1Lo>>0) >= int8(x2Lo>>0), int8(x1Lo>>8) >= int8(x2Lo>>8),
+ int8(x1Lo>>16) >= int8(x2Lo>>16), int8(x1Lo>>24) >= int8(x2Lo>>24),
+ int8(x1Lo>>32) >= int8(x2Lo>>32), int8(x1Lo>>40) >= int8(x2Lo>>40),
+ int8(x1Lo>>48) >= int8(x2Lo>>48), int8(x1Lo>>56) >= int8(x2Lo>>56),
+ int8(x1Hi>>0) >= int8(x2Hi>>0), int8(x1Hi>>8) >= int8(x2Hi>>8),
+ int8(x1Hi>>16) >= int8(x2Hi>>16), int8(x1Hi>>24) >= int8(x2Hi>>24),
+ int8(x1Hi>>32) >= int8(x2Hi>>32), int8(x1Hi>>40) >= int8(x2Hi>>40),
+ int8(x1Hi>>48) >= int8(x2Hi>>48), int8(x1Hi>>56) >= int8(x2Hi>>56),
+ }
+ case v128CmpTypeI8x16GeU:
+ result = []bool{
+ byte(x1Lo>>0) >= byte(x2Lo>>0), byte(x1Lo>>8) >= byte(x2Lo>>8),
+ byte(x1Lo>>16) >= byte(x2Lo>>16), byte(x1Lo>>24) >= byte(x2Lo>>24),
+ byte(x1Lo>>32) >= byte(x2Lo>>32), byte(x1Lo>>40) >= byte(x2Lo>>40),
+ byte(x1Lo>>48) >= byte(x2Lo>>48), byte(x1Lo>>56) >= byte(x2Lo>>56),
+ byte(x1Hi>>0) >= byte(x2Hi>>0), byte(x1Hi>>8) >= byte(x2Hi>>8),
+ byte(x1Hi>>16) >= byte(x2Hi>>16), byte(x1Hi>>24) >= byte(x2Hi>>24),
+ byte(x1Hi>>32) >= byte(x2Hi>>32), byte(x1Hi>>40) >= byte(x2Hi>>40),
+ byte(x1Hi>>48) >= byte(x2Hi>>48), byte(x1Hi>>56) >= byte(x2Hi>>56),
+ }
+ case v128CmpTypeI16x8Eq:
+ result = []bool{
+ uint16(x1Lo>>0) == uint16(x2Lo>>0), uint16(x1Lo>>16) == uint16(x2Lo>>16),
+ uint16(x1Lo>>32) == uint16(x2Lo>>32), uint16(x1Lo>>48) == uint16(x2Lo>>48),
+ uint16(x1Hi>>0) == uint16(x2Hi>>0), uint16(x1Hi>>16) == uint16(x2Hi>>16),
+ uint16(x1Hi>>32) == uint16(x2Hi>>32), uint16(x1Hi>>48) == uint16(x2Hi>>48),
+ }
+ case v128CmpTypeI16x8Ne:
+ result = []bool{
+ uint16(x1Lo>>0) != uint16(x2Lo>>0), uint16(x1Lo>>16) != uint16(x2Lo>>16),
+ uint16(x1Lo>>32) != uint16(x2Lo>>32), uint16(x1Lo>>48) != uint16(x2Lo>>48),
+ uint16(x1Hi>>0) != uint16(x2Hi>>0), uint16(x1Hi>>16) != uint16(x2Hi>>16),
+ uint16(x1Hi>>32) != uint16(x2Hi>>32), uint16(x1Hi>>48) != uint16(x2Hi>>48),
+ }
+ case v128CmpTypeI16x8LtS:
+ result = []bool{
+ int16(x1Lo>>0) < int16(x2Lo>>0), int16(x1Lo>>16) < int16(x2Lo>>16),
+ int16(x1Lo>>32) < int16(x2Lo>>32), int16(x1Lo>>48) < int16(x2Lo>>48),
+ int16(x1Hi>>0) < int16(x2Hi>>0), int16(x1Hi>>16) < int16(x2Hi>>16),
+ int16(x1Hi>>32) < int16(x2Hi>>32), int16(x1Hi>>48) < int16(x2Hi>>48),
+ }
+ case v128CmpTypeI16x8LtU:
+ result = []bool{
+ uint16(x1Lo>>0) < uint16(x2Lo>>0), uint16(x1Lo>>16) < uint16(x2Lo>>16),
+ uint16(x1Lo>>32) < uint16(x2Lo>>32), uint16(x1Lo>>48) < uint16(x2Lo>>48),
+ uint16(x1Hi>>0) < uint16(x2Hi>>0), uint16(x1Hi>>16) < uint16(x2Hi>>16),
+ uint16(x1Hi>>32) < uint16(x2Hi>>32), uint16(x1Hi>>48) < uint16(x2Hi>>48),
+ }
+ case v128CmpTypeI16x8GtS:
+ result = []bool{
+ int16(x1Lo>>0) > int16(x2Lo>>0), int16(x1Lo>>16) > int16(x2Lo>>16),
+ int16(x1Lo>>32) > int16(x2Lo>>32), int16(x1Lo>>48) > int16(x2Lo>>48),
+ int16(x1Hi>>0) > int16(x2Hi>>0), int16(x1Hi>>16) > int16(x2Hi>>16),
+ int16(x1Hi>>32) > int16(x2Hi>>32), int16(x1Hi>>48) > int16(x2Hi>>48),
+ }
+ case v128CmpTypeI16x8GtU:
+ result = []bool{
+ uint16(x1Lo>>0) > uint16(x2Lo>>0), uint16(x1Lo>>16) > uint16(x2Lo>>16),
+ uint16(x1Lo>>32) > uint16(x2Lo>>32), uint16(x1Lo>>48) > uint16(x2Lo>>48),
+ uint16(x1Hi>>0) > uint16(x2Hi>>0), uint16(x1Hi>>16) > uint16(x2Hi>>16),
+ uint16(x1Hi>>32) > uint16(x2Hi>>32), uint16(x1Hi>>48) > uint16(x2Hi>>48),
+ }
+ case v128CmpTypeI16x8LeS:
+ result = []bool{
+ int16(x1Lo>>0) <= int16(x2Lo>>0), int16(x1Lo>>16) <= int16(x2Lo>>16),
+ int16(x1Lo>>32) <= int16(x2Lo>>32), int16(x1Lo>>48) <= int16(x2Lo>>48),
+ int16(x1Hi>>0) <= int16(x2Hi>>0), int16(x1Hi>>16) <= int16(x2Hi>>16),
+ int16(x1Hi>>32) <= int16(x2Hi>>32), int16(x1Hi>>48) <= int16(x2Hi>>48),
+ }
+ case v128CmpTypeI16x8LeU:
+ result = []bool{
+ uint16(x1Lo>>0) <= uint16(x2Lo>>0), uint16(x1Lo>>16) <= uint16(x2Lo>>16),
+ uint16(x1Lo>>32) <= uint16(x2Lo>>32), uint16(x1Lo>>48) <= uint16(x2Lo>>48),
+ uint16(x1Hi>>0) <= uint16(x2Hi>>0), uint16(x1Hi>>16) <= uint16(x2Hi>>16),
+ uint16(x1Hi>>32) <= uint16(x2Hi>>32), uint16(x1Hi>>48) <= uint16(x2Hi>>48),
+ }
+ case v128CmpTypeI16x8GeS:
+ result = []bool{
+ int16(x1Lo>>0) >= int16(x2Lo>>0), int16(x1Lo>>16) >= int16(x2Lo>>16),
+ int16(x1Lo>>32) >= int16(x2Lo>>32), int16(x1Lo>>48) >= int16(x2Lo>>48),
+ int16(x1Hi>>0) >= int16(x2Hi>>0), int16(x1Hi>>16) >= int16(x2Hi>>16),
+ int16(x1Hi>>32) >= int16(x2Hi>>32), int16(x1Hi>>48) >= int16(x2Hi>>48),
+ }
+ case v128CmpTypeI16x8GeU:
+ result = []bool{
+ uint16(x1Lo>>0) >= uint16(x2Lo>>0), uint16(x1Lo>>16) >= uint16(x2Lo>>16),
+ uint16(x1Lo>>32) >= uint16(x2Lo>>32), uint16(x1Lo>>48) >= uint16(x2Lo>>48),
+ uint16(x1Hi>>0) >= uint16(x2Hi>>0), uint16(x1Hi>>16) >= uint16(x2Hi>>16),
+ uint16(x1Hi>>32) >= uint16(x2Hi>>32), uint16(x1Hi>>48) >= uint16(x2Hi>>48),
+ }
+ case v128CmpTypeI32x4Eq:
+ result = []bool{
+ uint32(x1Lo>>0) == uint32(x2Lo>>0), uint32(x1Lo>>32) == uint32(x2Lo>>32),
+ uint32(x1Hi>>0) == uint32(x2Hi>>0), uint32(x1Hi>>32) == uint32(x2Hi>>32),
+ }
+ case v128CmpTypeI32x4Ne:
+ result = []bool{
+ uint32(x1Lo>>0) != uint32(x2Lo>>0), uint32(x1Lo>>32) != uint32(x2Lo>>32),
+ uint32(x1Hi>>0) != uint32(x2Hi>>0), uint32(x1Hi>>32) != uint32(x2Hi>>32),
+ }
+ case v128CmpTypeI32x4LtS:
+ result = []bool{
+ int32(x1Lo>>0) < int32(x2Lo>>0), int32(x1Lo>>32) < int32(x2Lo>>32),
+ int32(x1Hi>>0) < int32(x2Hi>>0), int32(x1Hi>>32) < int32(x2Hi>>32),
+ }
+ case v128CmpTypeI32x4LtU:
+ result = []bool{
+ uint32(x1Lo>>0) < uint32(x2Lo>>0), uint32(x1Lo>>32) < uint32(x2Lo>>32),
+ uint32(x1Hi>>0) < uint32(x2Hi>>0), uint32(x1Hi>>32) < uint32(x2Hi>>32),
+ }
+ case v128CmpTypeI32x4GtS:
+ result = []bool{
+ int32(x1Lo>>0) > int32(x2Lo>>0), int32(x1Lo>>32) > int32(x2Lo>>32),
+ int32(x1Hi>>0) > int32(x2Hi>>0), int32(x1Hi>>32) > int32(x2Hi>>32),
+ }
+ case v128CmpTypeI32x4GtU:
+ result = []bool{
+ uint32(x1Lo>>0) > uint32(x2Lo>>0), uint32(x1Lo>>32) > uint32(x2Lo>>32),
+ uint32(x1Hi>>0) > uint32(x2Hi>>0), uint32(x1Hi>>32) > uint32(x2Hi>>32),
+ }
+ case v128CmpTypeI32x4LeS:
+ result = []bool{
+ int32(x1Lo>>0) <= int32(x2Lo>>0), int32(x1Lo>>32) <= int32(x2Lo>>32),
+ int32(x1Hi>>0) <= int32(x2Hi>>0), int32(x1Hi>>32) <= int32(x2Hi>>32),
+ }
+ case v128CmpTypeI32x4LeU:
+ result = []bool{
+ uint32(x1Lo>>0) <= uint32(x2Lo>>0), uint32(x1Lo>>32) <= uint32(x2Lo>>32),
+ uint32(x1Hi>>0) <= uint32(x2Hi>>0), uint32(x1Hi>>32) <= uint32(x2Hi>>32),
+ }
+ case v128CmpTypeI32x4GeS:
+ result = []bool{
+ int32(x1Lo>>0) >= int32(x2Lo>>0), int32(x1Lo>>32) >= int32(x2Lo>>32),
+ int32(x1Hi>>0) >= int32(x2Hi>>0), int32(x1Hi>>32) >= int32(x2Hi>>32),
+ }
+ case v128CmpTypeI32x4GeU:
+ result = []bool{
+ uint32(x1Lo>>0) >= uint32(x2Lo>>0), uint32(x1Lo>>32) >= uint32(x2Lo>>32),
+ uint32(x1Hi>>0) >= uint32(x2Hi>>0), uint32(x1Hi>>32) >= uint32(x2Hi>>32),
+ }
+ case v128CmpTypeI64x2Eq:
+ result = []bool{x1Lo == x2Lo, x1Hi == x2Hi}
+ case v128CmpTypeI64x2Ne:
+ result = []bool{x1Lo != x2Lo, x1Hi != x2Hi}
+ case v128CmpTypeI64x2LtS:
+ result = []bool{int64(x1Lo) < int64(x2Lo), int64(x1Hi) < int64(x2Hi)}
+ case v128CmpTypeI64x2GtS:
+ result = []bool{int64(x1Lo) > int64(x2Lo), int64(x1Hi) > int64(x2Hi)}
+ case v128CmpTypeI64x2LeS:
+ result = []bool{int64(x1Lo) <= int64(x2Lo), int64(x1Hi) <= int64(x2Hi)}
+ case v128CmpTypeI64x2GeS:
+ result = []bool{int64(x1Lo) >= int64(x2Lo), int64(x1Hi) >= int64(x2Hi)}
+ case v128CmpTypeF32x4Eq:
+ result = []bool{
+ math.Float32frombits(uint32(x1Lo>>0)) == math.Float32frombits(uint32(x2Lo>>0)),
+ math.Float32frombits(uint32(x1Lo>>32)) == math.Float32frombits(uint32(x2Lo>>32)),
+ math.Float32frombits(uint32(x1Hi>>0)) == math.Float32frombits(uint32(x2Hi>>0)),
+ math.Float32frombits(uint32(x1Hi>>32)) == math.Float32frombits(uint32(x2Hi>>32)),
+ }
+ case v128CmpTypeF32x4Ne:
+ result = []bool{
+ math.Float32frombits(uint32(x1Lo>>0)) != math.Float32frombits(uint32(x2Lo>>0)),
+ math.Float32frombits(uint32(x1Lo>>32)) != math.Float32frombits(uint32(x2Lo>>32)),
+ math.Float32frombits(uint32(x1Hi>>0)) != math.Float32frombits(uint32(x2Hi>>0)),
+ math.Float32frombits(uint32(x1Hi>>32)) != math.Float32frombits(uint32(x2Hi>>32)),
+ }
+ case v128CmpTypeF32x4Lt:
+ result = []bool{
+ math.Float32frombits(uint32(x1Lo>>0)) < math.Float32frombits(uint32(x2Lo>>0)),
+ math.Float32frombits(uint32(x1Lo>>32)) < math.Float32frombits(uint32(x2Lo>>32)),
+ math.Float32frombits(uint32(x1Hi>>0)) < math.Float32frombits(uint32(x2Hi>>0)),
+ math.Float32frombits(uint32(x1Hi>>32)) < math.Float32frombits(uint32(x2Hi>>32)),
+ }
+ case v128CmpTypeF32x4Gt:
+ result = []bool{
+ math.Float32frombits(uint32(x1Lo>>0)) > math.Float32frombits(uint32(x2Lo>>0)),
+ math.Float32frombits(uint32(x1Lo>>32)) > math.Float32frombits(uint32(x2Lo>>32)),
+ math.Float32frombits(uint32(x1Hi>>0)) > math.Float32frombits(uint32(x2Hi>>0)),
+ math.Float32frombits(uint32(x1Hi>>32)) > math.Float32frombits(uint32(x2Hi>>32)),
+ }
+ case v128CmpTypeF32x4Le:
+ result = []bool{
+ math.Float32frombits(uint32(x1Lo>>0)) <= math.Float32frombits(uint32(x2Lo>>0)),
+ math.Float32frombits(uint32(x1Lo>>32)) <= math.Float32frombits(uint32(x2Lo>>32)),
+ math.Float32frombits(uint32(x1Hi>>0)) <= math.Float32frombits(uint32(x2Hi>>0)),
+ math.Float32frombits(uint32(x1Hi>>32)) <= math.Float32frombits(uint32(x2Hi>>32)),
+ }
+ case v128CmpTypeF32x4Ge:
+ result = []bool{
+ math.Float32frombits(uint32(x1Lo>>0)) >= math.Float32frombits(uint32(x2Lo>>0)),
+ math.Float32frombits(uint32(x1Lo>>32)) >= math.Float32frombits(uint32(x2Lo>>32)),
+ math.Float32frombits(uint32(x1Hi>>0)) >= math.Float32frombits(uint32(x2Hi>>0)),
+ math.Float32frombits(uint32(x1Hi>>32)) >= math.Float32frombits(uint32(x2Hi>>32)),
+ }
+ case v128CmpTypeF64x2Eq:
+ result = []bool{
+ math.Float64frombits(x1Lo) == math.Float64frombits(x2Lo),
+ math.Float64frombits(x1Hi) == math.Float64frombits(x2Hi),
+ }
+ case v128CmpTypeF64x2Ne:
+ result = []bool{
+ math.Float64frombits(x1Lo) != math.Float64frombits(x2Lo),
+ math.Float64frombits(x1Hi) != math.Float64frombits(x2Hi),
+ }
+ case v128CmpTypeF64x2Lt:
+ result = []bool{
+ math.Float64frombits(x1Lo) < math.Float64frombits(x2Lo),
+ math.Float64frombits(x1Hi) < math.Float64frombits(x2Hi),
+ }
+ case v128CmpTypeF64x2Gt:
+ result = []bool{
+ math.Float64frombits(x1Lo) > math.Float64frombits(x2Lo),
+ math.Float64frombits(x1Hi) > math.Float64frombits(x2Hi),
+ }
+ case v128CmpTypeF64x2Le:
+ result = []bool{
+ math.Float64frombits(x1Lo) <= math.Float64frombits(x2Lo),
+ math.Float64frombits(x1Hi) <= math.Float64frombits(x2Hi),
+ }
+ case v128CmpTypeF64x2Ge:
+ result = []bool{
+ math.Float64frombits(x1Lo) >= math.Float64frombits(x2Lo),
+ math.Float64frombits(x1Hi) >= math.Float64frombits(x2Hi),
+ }
+ }
+
+ var retLo, retHi uint64
+ laneNum := len(result)
+ switch laneNum {
+ case 16:
+ for i, b := range result {
+ if b {
+ if i < 8 {
+ retLo |= 0xff << (i * 8)
+ } else {
+ retHi |= 0xff << ((i - 8) * 8)
+ }
+ }
+ }
+ case 8:
+ for i, b := range result {
+ if b {
+ if i < 4 {
+ retLo |= 0xffff << (i * 16)
+ } else {
+ retHi |= 0xffff << ((i - 4) * 16)
+ }
+ }
+ }
+ case 4:
+ for i, b := range result {
+ if b {
+ if i < 2 {
+ retLo |= 0xffff_ffff << (i * 32)
+ } else {
+ retHi |= 0xffff_ffff << ((i - 2) * 32)
+ }
+ }
+ }
+ case 2:
+ if result[0] {
+ retLo = ^uint64(0)
+ }
+ if result[1] {
+ retHi = ^uint64(0)
+ }
+ }
+
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128AddSat:
+ x2hi, x2Lo := ce.popValue(), ce.popValue()
+ x1hi, x1Lo := ce.popValue(), ce.popValue()
+
+ var retLo, retHi uint64
+
+ // Lane-wise addition while saturating the overflowing values.
+ // https://github.com/WebAssembly/spec/blob/wg-2.0.draft1/proposals/simd/SIMD.md#saturating-integer-addition
+ switch op.B1 {
+ case shapeI8x16:
+ for i := 0; i < 16; i++ {
+ var v, w byte
+ if i < 8 {
+ v, w = byte(x1Lo>>(i*8)), byte(x2Lo>>(i*8))
+ } else {
+ v, w = byte(x1hi>>((i-8)*8)), byte(x2hi>>((i-8)*8))
+ }
+
+ var uv uint64
+ if op.B3 { // signed
+ if subbed := int64(int8(v)) + int64(int8(w)); subbed < math.MinInt8 {
+ uv = uint64(byte(0x80))
+ } else if subbed > math.MaxInt8 {
+ uv = uint64(byte(0x7f))
+ } else {
+ uv = uint64(byte(int8(subbed)))
+ }
+ } else {
+ if subbed := int64(v) + int64(w); subbed < 0 {
+ uv = uint64(byte(0))
+ } else if subbed > math.MaxUint8 {
+ uv = uint64(byte(0xff))
+ } else {
+ uv = uint64(byte(subbed))
+ }
+ }
+
+ if i < 8 { // first 8 lanes are on lower 64bits.
+ retLo |= uv << (i * 8)
+ } else {
+ retHi |= uv << ((i - 8) * 8)
+ }
+ }
+ case shapeI16x8:
+ for i := 0; i < 8; i++ {
+ var v, w uint16
+ if i < 4 {
+ v, w = uint16(x1Lo>>(i*16)), uint16(x2Lo>>(i*16))
+ } else {
+ v, w = uint16(x1hi>>((i-4)*16)), uint16(x2hi>>((i-4)*16))
+ }
+
+ var uv uint64
+ if op.B3 { // signed
+ if added := int64(int16(v)) + int64(int16(w)); added < math.MinInt16 {
+ uv = uint64(uint16(0x8000))
+ } else if added > math.MaxInt16 {
+ uv = uint64(uint16(0x7fff))
+ } else {
+ uv = uint64(uint16(int16(added)))
+ }
+ } else {
+ if added := int64(v) + int64(w); added < 0 {
+ uv = uint64(uint16(0))
+ } else if added > math.MaxUint16 {
+ uv = uint64(uint16(0xffff))
+ } else {
+ uv = uint64(uint16(added))
+ }
+ }
+
+ if i < 4 { // first 4 lanes are on lower 64bits.
+ retLo |= uv << (i * 16)
+ } else {
+ retHi |= uv << ((i - 4) * 16)
+ }
+ }
+ }
+
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128SubSat:
+ x2hi, x2Lo := ce.popValue(), ce.popValue()
+ x1hi, x1Lo := ce.popValue(), ce.popValue()
+
+ var retLo, retHi uint64
+
+ // Lane-wise subtraction while saturating the overflowing values.
+ // https://github.com/WebAssembly/spec/blob/wg-2.0.draft1/proposals/simd/SIMD.md#saturating-integer-subtraction
+ switch op.B1 {
+ case shapeI8x16:
+ for i := 0; i < 16; i++ {
+ var v, w byte
+ if i < 8 {
+ v, w = byte(x1Lo>>(i*8)), byte(x2Lo>>(i*8))
+ } else {
+ v, w = byte(x1hi>>((i-8)*8)), byte(x2hi>>((i-8)*8))
+ }
+
+ var uv uint64
+ if op.B3 { // signed
+ if subbed := int64(int8(v)) - int64(int8(w)); subbed < math.MinInt8 {
+ uv = uint64(byte(0x80))
+ } else if subbed > math.MaxInt8 {
+ uv = uint64(byte(0x7f))
+ } else {
+ uv = uint64(byte(int8(subbed)))
+ }
+ } else {
+ if subbed := int64(v) - int64(w); subbed < 0 {
+ uv = uint64(byte(0))
+ } else if subbed > math.MaxUint8 {
+ uv = uint64(byte(0xff))
+ } else {
+ uv = uint64(byte(subbed))
+ }
+ }
+
+ if i < 8 {
+ retLo |= uv << (i * 8)
+ } else {
+ retHi |= uv << ((i - 8) * 8)
+ }
+ }
+ case shapeI16x8:
+ for i := 0; i < 8; i++ {
+ var v, w uint16
+ if i < 4 {
+ v, w = uint16(x1Lo>>(i*16)), uint16(x2Lo>>(i*16))
+ } else {
+ v, w = uint16(x1hi>>((i-4)*16)), uint16(x2hi>>((i-4)*16))
+ }
+
+ var uv uint64
+ if op.B3 { // signed
+ if subbed := int64(int16(v)) - int64(int16(w)); subbed < math.MinInt16 {
+ uv = uint64(uint16(0x8000))
+ } else if subbed > math.MaxInt16 {
+ uv = uint64(uint16(0x7fff))
+ } else {
+ uv = uint64(uint16(int16(subbed)))
+ }
+ } else {
+ if subbed := int64(v) - int64(w); subbed < 0 {
+ uv = uint64(uint16(0))
+ } else if subbed > math.MaxUint16 {
+ uv = uint64(uint16(0xffff))
+ } else {
+ uv = uint64(uint16(subbed))
+ }
+ }
+
+ if i < 4 {
+ retLo |= uv << (i * 16)
+ } else {
+ retHi |= uv << ((i - 4) * 16)
+ }
+ }
+ }
+
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128Mul:
+ x2hi, x2lo := ce.popValue(), ce.popValue()
+ x1hi, x1lo := ce.popValue(), ce.popValue()
+ var retLo, retHi uint64
+ switch op.B1 {
+ case shapeI16x8:
+ retHi = uint64(uint16(x1hi)*uint16(x2hi)) | (uint64(uint16(x1hi>>16)*uint16(x2hi>>16)) << 16) |
+ (uint64(uint16(x1hi>>32)*uint16(x2hi>>32)) << 32) | (uint64(uint16(x1hi>>48)*uint16(x2hi>>48)) << 48)
+ retLo = uint64(uint16(x1lo)*uint16(x2lo)) | (uint64(uint16(x1lo>>16)*uint16(x2lo>>16)) << 16) |
+ (uint64(uint16(x1lo>>32)*uint16(x2lo>>32)) << 32) | (uint64(uint16(x1lo>>48)*uint16(x2lo>>48)) << 48)
+ case shapeI32x4:
+ retHi = uint64(uint32(x1hi)*uint32(x2hi)) | (uint64(uint32(x1hi>>32)*uint32(x2hi>>32)) << 32)
+ retLo = uint64(uint32(x1lo)*uint32(x2lo)) | (uint64(uint32(x1lo>>32)*uint32(x2lo>>32)) << 32)
+ case shapeI64x2:
+ retHi = x1hi * x2hi
+ retLo = x1lo * x2lo
+ case shapeF32x4:
+ retHi = mulFloat32bits(uint32(x1hi), uint32(x2hi)) | mulFloat32bits(uint32(x1hi>>32), uint32(x2hi>>32))<<32
+ retLo = mulFloat32bits(uint32(x1lo), uint32(x2lo)) | mulFloat32bits(uint32(x1lo>>32), uint32(x2lo>>32))<<32
+ case shapeF64x2:
+ retHi = math.Float64bits(math.Float64frombits(x1hi) * math.Float64frombits(x2hi))
+ retLo = math.Float64bits(math.Float64frombits(x1lo) * math.Float64frombits(x2lo))
+ }
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128Div:
+ x2hi, x2lo := ce.popValue(), ce.popValue()
+ x1hi, x1lo := ce.popValue(), ce.popValue()
+ var retLo, retHi uint64
+ if op.B1 == shapeF64x2 {
+ retHi = math.Float64bits(math.Float64frombits(x1hi) / math.Float64frombits(x2hi))
+ retLo = math.Float64bits(math.Float64frombits(x1lo) / math.Float64frombits(x2lo))
+ } else {
+ retHi = divFloat32bits(uint32(x1hi), uint32(x2hi)) | divFloat32bits(uint32(x1hi>>32), uint32(x2hi>>32))<<32
+ retLo = divFloat32bits(uint32(x1lo), uint32(x2lo)) | divFloat32bits(uint32(x1lo>>32), uint32(x2lo>>32))<<32
+ }
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128Neg:
+ hi, lo := ce.popValue(), ce.popValue()
+ switch op.B1 {
+ case shapeI8x16:
+ lo = uint64(-byte(lo)) | (uint64(-byte(lo>>8)) << 8) |
+ (uint64(-byte(lo>>16)) << 16) | (uint64(-byte(lo>>24)) << 24) |
+ (uint64(-byte(lo>>32)) << 32) | (uint64(-byte(lo>>40)) << 40) |
+ (uint64(-byte(lo>>48)) << 48) | (uint64(-byte(lo>>56)) << 56)
+ hi = uint64(-byte(hi)) | (uint64(-byte(hi>>8)) << 8) |
+ (uint64(-byte(hi>>16)) << 16) | (uint64(-byte(hi>>24)) << 24) |
+ (uint64(-byte(hi>>32)) << 32) | (uint64(-byte(hi>>40)) << 40) |
+ (uint64(-byte(hi>>48)) << 48) | (uint64(-byte(hi>>56)) << 56)
+ case shapeI16x8:
+ hi = uint64(-uint16(hi)) | (uint64(-uint16(hi>>16)) << 16) |
+ (uint64(-uint16(hi>>32)) << 32) | (uint64(-uint16(hi>>48)) << 48)
+ lo = uint64(-uint16(lo)) | (uint64(-uint16(lo>>16)) << 16) |
+ (uint64(-uint16(lo>>32)) << 32) | (uint64(-uint16(lo>>48)) << 48)
+ case shapeI32x4:
+ hi = uint64(-uint32(hi)) | (uint64(-uint32(hi>>32)) << 32)
+ lo = uint64(-uint32(lo)) | (uint64(-uint32(lo>>32)) << 32)
+ case shapeI64x2:
+ hi = -hi
+ lo = -lo
+ case shapeF32x4:
+ hi = uint64(math.Float32bits(-math.Float32frombits(uint32(hi)))) |
+ (uint64(math.Float32bits(-math.Float32frombits(uint32(hi>>32)))) << 32)
+ lo = uint64(math.Float32bits(-math.Float32frombits(uint32(lo)))) |
+ (uint64(math.Float32bits(-math.Float32frombits(uint32(lo>>32)))) << 32)
+ case shapeF64x2:
+ hi = math.Float64bits(-math.Float64frombits(hi))
+ lo = math.Float64bits(-math.Float64frombits(lo))
+ }
+ ce.pushValue(lo)
+ ce.pushValue(hi)
+ frame.pc++
+ case operationKindV128Sqrt:
+ hi, lo := ce.popValue(), ce.popValue()
+ if op.B1 == shapeF64x2 {
+ hi = math.Float64bits(math.Sqrt(math.Float64frombits(hi)))
+ lo = math.Float64bits(math.Sqrt(math.Float64frombits(lo)))
+ } else {
+ hi = uint64(math.Float32bits(float32(math.Sqrt(float64(math.Float32frombits(uint32(hi))))))) |
+ (uint64(math.Float32bits(float32(math.Sqrt(float64(math.Float32frombits(uint32(hi>>32))))))) << 32)
+ lo = uint64(math.Float32bits(float32(math.Sqrt(float64(math.Float32frombits(uint32(lo))))))) |
+ (uint64(math.Float32bits(float32(math.Sqrt(float64(math.Float32frombits(uint32(lo>>32))))))) << 32)
+ }
+ ce.pushValue(lo)
+ ce.pushValue(hi)
+ frame.pc++
+ case operationKindV128Abs:
+ hi, lo := ce.popValue(), ce.popValue()
+ switch op.B1 {
+ case shapeI8x16:
+ lo = uint64(i8Abs(byte(lo))) | (uint64(i8Abs(byte(lo>>8))) << 8) |
+ (uint64(i8Abs(byte(lo>>16))) << 16) | (uint64(i8Abs(byte(lo>>24))) << 24) |
+ (uint64(i8Abs(byte(lo>>32))) << 32) | (uint64(i8Abs(byte(lo>>40))) << 40) |
+ (uint64(i8Abs(byte(lo>>48))) << 48) | (uint64(i8Abs(byte(lo>>56))) << 56)
+ hi = uint64(i8Abs(byte(hi))) | (uint64(i8Abs(byte(hi>>8))) << 8) |
+ (uint64(i8Abs(byte(hi>>16))) << 16) | (uint64(i8Abs(byte(hi>>24))) << 24) |
+ (uint64(i8Abs(byte(hi>>32))) << 32) | (uint64(i8Abs(byte(hi>>40))) << 40) |
+ (uint64(i8Abs(byte(hi>>48))) << 48) | (uint64(i8Abs(byte(hi>>56))) << 56)
+ case shapeI16x8:
+ hi = uint64(i16Abs(uint16(hi))) | (uint64(i16Abs(uint16(hi>>16))) << 16) |
+ (uint64(i16Abs(uint16(hi>>32))) << 32) | (uint64(i16Abs(uint16(hi>>48))) << 48)
+ lo = uint64(i16Abs(uint16(lo))) | (uint64(i16Abs(uint16(lo>>16))) << 16) |
+ (uint64(i16Abs(uint16(lo>>32))) << 32) | (uint64(i16Abs(uint16(lo>>48))) << 48)
+ case shapeI32x4:
+ hi = uint64(i32Abs(uint32(hi))) | (uint64(i32Abs(uint32(hi>>32))) << 32)
+ lo = uint64(i32Abs(uint32(lo))) | (uint64(i32Abs(uint32(lo>>32))) << 32)
+ case shapeI64x2:
+ if int64(hi) < 0 {
+ hi = -hi
+ }
+ if int64(lo) < 0 {
+ lo = -lo
+ }
+ case shapeF32x4:
+ hi = hi &^ (1<<31 | 1<<63)
+ lo = lo &^ (1<<31 | 1<<63)
+ case shapeF64x2:
+ hi = hi &^ (1 << 63)
+ lo = lo &^ (1 << 63)
+ }
+ ce.pushValue(lo)
+ ce.pushValue(hi)
+ frame.pc++
+ case operationKindV128Popcnt:
+ hi, lo := ce.popValue(), ce.popValue()
+ var retLo, retHi uint64
+ for i := 0; i < 16; i++ {
+ var v byte
+ if i < 8 {
+ v = byte(lo >> (i * 8))
+ } else {
+ v = byte(hi >> ((i - 8) * 8))
+ }
+
+ var cnt uint64
+ for i := 0; i < 8; i++ {
+ if (v>>i)&0b1 != 0 {
+ cnt++
+ }
+ }
+
+ if i < 8 {
+ retLo |= cnt << (i * 8)
+ } else {
+ retHi |= cnt << ((i - 8) * 8)
+ }
+ }
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128Min:
+ x2hi, x2lo := ce.popValue(), ce.popValue()
+ x1hi, x1lo := ce.popValue(), ce.popValue()
+ var retLo, retHi uint64
+ switch op.B1 {
+ case shapeI8x16:
+ if op.B3 { // signed
+ retLo = uint64(i8MinS(uint8(x1lo>>8), uint8(x2lo>>8)))<<8 | uint64(i8MinS(uint8(x1lo), uint8(x2lo))) |
+ uint64(i8MinS(uint8(x1lo>>24), uint8(x2lo>>24)))<<24 | uint64(i8MinS(uint8(x1lo>>16), uint8(x2lo>>16)))<<16 |
+ uint64(i8MinS(uint8(x1lo>>40), uint8(x2lo>>40)))<<40 | uint64(i8MinS(uint8(x1lo>>32), uint8(x2lo>>32)))<<32 |
+ uint64(i8MinS(uint8(x1lo>>56), uint8(x2lo>>56)))<<56 | uint64(i8MinS(uint8(x1lo>>48), uint8(x2lo>>48)))<<48
+ retHi = uint64(i8MinS(uint8(x1hi>>8), uint8(x2hi>>8)))<<8 | uint64(i8MinS(uint8(x1hi), uint8(x2hi))) |
+ uint64(i8MinS(uint8(x1hi>>24), uint8(x2hi>>24)))<<24 | uint64(i8MinS(uint8(x1hi>>16), uint8(x2hi>>16)))<<16 |
+ uint64(i8MinS(uint8(x1hi>>40), uint8(x2hi>>40)))<<40 | uint64(i8MinS(uint8(x1hi>>32), uint8(x2hi>>32)))<<32 |
+ uint64(i8MinS(uint8(x1hi>>56), uint8(x2hi>>56)))<<56 | uint64(i8MinS(uint8(x1hi>>48), uint8(x2hi>>48)))<<48
+ } else {
+ retLo = uint64(i8MinU(uint8(x1lo>>8), uint8(x2lo>>8)))<<8 | uint64(i8MinU(uint8(x1lo), uint8(x2lo))) |
+ uint64(i8MinU(uint8(x1lo>>24), uint8(x2lo>>24)))<<24 | uint64(i8MinU(uint8(x1lo>>16), uint8(x2lo>>16)))<<16 |
+ uint64(i8MinU(uint8(x1lo>>40), uint8(x2lo>>40)))<<40 | uint64(i8MinU(uint8(x1lo>>32), uint8(x2lo>>32)))<<32 |
+ uint64(i8MinU(uint8(x1lo>>56), uint8(x2lo>>56)))<<56 | uint64(i8MinU(uint8(x1lo>>48), uint8(x2lo>>48)))<<48
+ retHi = uint64(i8MinU(uint8(x1hi>>8), uint8(x2hi>>8)))<<8 | uint64(i8MinU(uint8(x1hi), uint8(x2hi))) |
+ uint64(i8MinU(uint8(x1hi>>24), uint8(x2hi>>24)))<<24 | uint64(i8MinU(uint8(x1hi>>16), uint8(x2hi>>16)))<<16 |
+ uint64(i8MinU(uint8(x1hi>>40), uint8(x2hi>>40)))<<40 | uint64(i8MinU(uint8(x1hi>>32), uint8(x2hi>>32)))<<32 |
+ uint64(i8MinU(uint8(x1hi>>56), uint8(x2hi>>56)))<<56 | uint64(i8MinU(uint8(x1hi>>48), uint8(x2hi>>48)))<<48
+ }
+ case shapeI16x8:
+ if op.B3 { // signed
+ retLo = uint64(i16MinS(uint16(x1lo), uint16(x2lo))) |
+ uint64(i16MinS(uint16(x1lo>>16), uint16(x2lo>>16)))<<16 |
+ uint64(i16MinS(uint16(x1lo>>32), uint16(x2lo>>32)))<<32 |
+ uint64(i16MinS(uint16(x1lo>>48), uint16(x2lo>>48)))<<48
+ retHi = uint64(i16MinS(uint16(x1hi), uint16(x2hi))) |
+ uint64(i16MinS(uint16(x1hi>>16), uint16(x2hi>>16)))<<16 |
+ uint64(i16MinS(uint16(x1hi>>32), uint16(x2hi>>32)))<<32 |
+ uint64(i16MinS(uint16(x1hi>>48), uint16(x2hi>>48)))<<48
+ } else {
+ retLo = uint64(i16MinU(uint16(x1lo), uint16(x2lo))) |
+ uint64(i16MinU(uint16(x1lo>>16), uint16(x2lo>>16)))<<16 |
+ uint64(i16MinU(uint16(x1lo>>32), uint16(x2lo>>32)))<<32 |
+ uint64(i16MinU(uint16(x1lo>>48), uint16(x2lo>>48)))<<48
+ retHi = uint64(i16MinU(uint16(x1hi), uint16(x2hi))) |
+ uint64(i16MinU(uint16(x1hi>>16), uint16(x2hi>>16)))<<16 |
+ uint64(i16MinU(uint16(x1hi>>32), uint16(x2hi>>32)))<<32 |
+ uint64(i16MinU(uint16(x1hi>>48), uint16(x2hi>>48)))<<48
+ }
+ case shapeI32x4:
+ if op.B3 { // signed
+ retLo = uint64(i32MinS(uint32(x1lo), uint32(x2lo))) |
+ uint64(i32MinS(uint32(x1lo>>32), uint32(x2lo>>32)))<<32
+ retHi = uint64(i32MinS(uint32(x1hi), uint32(x2hi))) |
+ uint64(i32MinS(uint32(x1hi>>32), uint32(x2hi>>32)))<<32
+ } else {
+ retLo = uint64(i32MinU(uint32(x1lo), uint32(x2lo))) |
+ uint64(i32MinU(uint32(x1lo>>32), uint32(x2lo>>32)))<<32
+ retHi = uint64(i32MinU(uint32(x1hi), uint32(x2hi))) |
+ uint64(i32MinU(uint32(x1hi>>32), uint32(x2hi>>32)))<<32
+ }
+ case shapeF32x4:
+ retHi = wasmCompatMin32bits(uint32(x1hi), uint32(x2hi)) |
+ wasmCompatMin32bits(uint32(x1hi>>32), uint32(x2hi>>32))<<32
+ retLo = wasmCompatMin32bits(uint32(x1lo), uint32(x2lo)) |
+ wasmCompatMin32bits(uint32(x1lo>>32), uint32(x2lo>>32))<<32
+ case shapeF64x2:
+ retHi = math.Float64bits(moremath.WasmCompatMin64(
+ math.Float64frombits(x1hi),
+ math.Float64frombits(x2hi),
+ ))
+ retLo = math.Float64bits(moremath.WasmCompatMin64(
+ math.Float64frombits(x1lo),
+ math.Float64frombits(x2lo),
+ ))
+ }
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128Max:
+ x2hi, x2lo := ce.popValue(), ce.popValue()
+ x1hi, x1lo := ce.popValue(), ce.popValue()
+ var retLo, retHi uint64
+ switch op.B1 {
+ case shapeI8x16:
+ if op.B3 { // signed
+ retLo = uint64(i8MaxS(uint8(x1lo>>8), uint8(x2lo>>8)))<<8 | uint64(i8MaxS(uint8(x1lo), uint8(x2lo))) |
+ uint64(i8MaxS(uint8(x1lo>>24), uint8(x2lo>>24)))<<24 | uint64(i8MaxS(uint8(x1lo>>16), uint8(x2lo>>16)))<<16 |
+ uint64(i8MaxS(uint8(x1lo>>40), uint8(x2lo>>40)))<<40 | uint64(i8MaxS(uint8(x1lo>>32), uint8(x2lo>>32)))<<32 |
+ uint64(i8MaxS(uint8(x1lo>>56), uint8(x2lo>>56)))<<56 | uint64(i8MaxS(uint8(x1lo>>48), uint8(x2lo>>48)))<<48
+ retHi = uint64(i8MaxS(uint8(x1hi>>8), uint8(x2hi>>8)))<<8 | uint64(i8MaxS(uint8(x1hi), uint8(x2hi))) |
+ uint64(i8MaxS(uint8(x1hi>>24), uint8(x2hi>>24)))<<24 | uint64(i8MaxS(uint8(x1hi>>16), uint8(x2hi>>16)))<<16 |
+ uint64(i8MaxS(uint8(x1hi>>40), uint8(x2hi>>40)))<<40 | uint64(i8MaxS(uint8(x1hi>>32), uint8(x2hi>>32)))<<32 |
+ uint64(i8MaxS(uint8(x1hi>>56), uint8(x2hi>>56)))<<56 | uint64(i8MaxS(uint8(x1hi>>48), uint8(x2hi>>48)))<<48
+ } else {
+ retLo = uint64(i8MaxU(uint8(x1lo>>8), uint8(x2lo>>8)))<<8 | uint64(i8MaxU(uint8(x1lo), uint8(x2lo))) |
+ uint64(i8MaxU(uint8(x1lo>>24), uint8(x2lo>>24)))<<24 | uint64(i8MaxU(uint8(x1lo>>16), uint8(x2lo>>16)))<<16 |
+ uint64(i8MaxU(uint8(x1lo>>40), uint8(x2lo>>40)))<<40 | uint64(i8MaxU(uint8(x1lo>>32), uint8(x2lo>>32)))<<32 |
+ uint64(i8MaxU(uint8(x1lo>>56), uint8(x2lo>>56)))<<56 | uint64(i8MaxU(uint8(x1lo>>48), uint8(x2lo>>48)))<<48
+ retHi = uint64(i8MaxU(uint8(x1hi>>8), uint8(x2hi>>8)))<<8 | uint64(i8MaxU(uint8(x1hi), uint8(x2hi))) |
+ uint64(i8MaxU(uint8(x1hi>>24), uint8(x2hi>>24)))<<24 | uint64(i8MaxU(uint8(x1hi>>16), uint8(x2hi>>16)))<<16 |
+ uint64(i8MaxU(uint8(x1hi>>40), uint8(x2hi>>40)))<<40 | uint64(i8MaxU(uint8(x1hi>>32), uint8(x2hi>>32)))<<32 |
+ uint64(i8MaxU(uint8(x1hi>>56), uint8(x2hi>>56)))<<56 | uint64(i8MaxU(uint8(x1hi>>48), uint8(x2hi>>48)))<<48
+ }
+ case shapeI16x8:
+ if op.B3 { // signed
+ retLo = uint64(i16MaxS(uint16(x1lo), uint16(x2lo))) |
+ uint64(i16MaxS(uint16(x1lo>>16), uint16(x2lo>>16)))<<16 |
+ uint64(i16MaxS(uint16(x1lo>>32), uint16(x2lo>>32)))<<32 |
+ uint64(i16MaxS(uint16(x1lo>>48), uint16(x2lo>>48)))<<48
+ retHi = uint64(i16MaxS(uint16(x1hi), uint16(x2hi))) |
+ uint64(i16MaxS(uint16(x1hi>>16), uint16(x2hi>>16)))<<16 |
+ uint64(i16MaxS(uint16(x1hi>>32), uint16(x2hi>>32)))<<32 |
+ uint64(i16MaxS(uint16(x1hi>>48), uint16(x2hi>>48)))<<48
+ } else {
+ retLo = uint64(i16MaxU(uint16(x1lo), uint16(x2lo))) |
+ uint64(i16MaxU(uint16(x1lo>>16), uint16(x2lo>>16)))<<16 |
+ uint64(i16MaxU(uint16(x1lo>>32), uint16(x2lo>>32)))<<32 |
+ uint64(i16MaxU(uint16(x1lo>>48), uint16(x2lo>>48)))<<48
+ retHi = uint64(i16MaxU(uint16(x1hi), uint16(x2hi))) |
+ uint64(i16MaxU(uint16(x1hi>>16), uint16(x2hi>>16)))<<16 |
+ uint64(i16MaxU(uint16(x1hi>>32), uint16(x2hi>>32)))<<32 |
+ uint64(i16MaxU(uint16(x1hi>>48), uint16(x2hi>>48)))<<48
+ }
+ case shapeI32x4:
+ if op.B3 { // signed
+ retLo = uint64(i32MaxS(uint32(x1lo), uint32(x2lo))) |
+ uint64(i32MaxS(uint32(x1lo>>32), uint32(x2lo>>32)))<<32
+ retHi = uint64(i32MaxS(uint32(x1hi), uint32(x2hi))) |
+ uint64(i32MaxS(uint32(x1hi>>32), uint32(x2hi>>32)))<<32
+ } else {
+ retLo = uint64(i32MaxU(uint32(x1lo), uint32(x2lo))) |
+ uint64(i32MaxU(uint32(x1lo>>32), uint32(x2lo>>32)))<<32
+ retHi = uint64(i32MaxU(uint32(x1hi), uint32(x2hi))) |
+ uint64(i32MaxU(uint32(x1hi>>32), uint32(x2hi>>32)))<<32
+ }
+ case shapeF32x4:
+ retHi = wasmCompatMax32bits(uint32(x1hi), uint32(x2hi)) |
+ wasmCompatMax32bits(uint32(x1hi>>32), uint32(x2hi>>32))<<32
+ retLo = wasmCompatMax32bits(uint32(x1lo), uint32(x2lo)) |
+ wasmCompatMax32bits(uint32(x1lo>>32), uint32(x2lo>>32))<<32
+ case shapeF64x2:
+ retHi = math.Float64bits(moremath.WasmCompatMax64(
+ math.Float64frombits(x1hi),
+ math.Float64frombits(x2hi),
+ ))
+ retLo = math.Float64bits(moremath.WasmCompatMax64(
+ math.Float64frombits(x1lo),
+ math.Float64frombits(x2lo),
+ ))
+ }
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128AvgrU:
+ x2hi, x2lo := ce.popValue(), ce.popValue()
+ x1hi, x1lo := ce.popValue(), ce.popValue()
+ var retLo, retHi uint64
+ switch op.B1 {
+ case shapeI8x16:
+ retLo = uint64(i8RoundingAverage(uint8(x1lo>>8), uint8(x2lo>>8)))<<8 | uint64(i8RoundingAverage(uint8(x1lo), uint8(x2lo))) |
+ uint64(i8RoundingAverage(uint8(x1lo>>24), uint8(x2lo>>24)))<<24 | uint64(i8RoundingAverage(uint8(x1lo>>16), uint8(x2lo>>16)))<<16 |
+ uint64(i8RoundingAverage(uint8(x1lo>>40), uint8(x2lo>>40)))<<40 | uint64(i8RoundingAverage(uint8(x1lo>>32), uint8(x2lo>>32)))<<32 |
+ uint64(i8RoundingAverage(uint8(x1lo>>56), uint8(x2lo>>56)))<<56 | uint64(i8RoundingAverage(uint8(x1lo>>48), uint8(x2lo>>48)))<<48
+ retHi = uint64(i8RoundingAverage(uint8(x1hi>>8), uint8(x2hi>>8)))<<8 | uint64(i8RoundingAverage(uint8(x1hi), uint8(x2hi))) |
+ uint64(i8RoundingAverage(uint8(x1hi>>24), uint8(x2hi>>24)))<<24 | uint64(i8RoundingAverage(uint8(x1hi>>16), uint8(x2hi>>16)))<<16 |
+ uint64(i8RoundingAverage(uint8(x1hi>>40), uint8(x2hi>>40)))<<40 | uint64(i8RoundingAverage(uint8(x1hi>>32), uint8(x2hi>>32)))<<32 |
+ uint64(i8RoundingAverage(uint8(x1hi>>56), uint8(x2hi>>56)))<<56 | uint64(i8RoundingAverage(uint8(x1hi>>48), uint8(x2hi>>48)))<<48
+ case shapeI16x8:
+ retLo = uint64(i16RoundingAverage(uint16(x1lo), uint16(x2lo))) |
+ uint64(i16RoundingAverage(uint16(x1lo>>16), uint16(x2lo>>16)))<<16 |
+ uint64(i16RoundingAverage(uint16(x1lo>>32), uint16(x2lo>>32)))<<32 |
+ uint64(i16RoundingAverage(uint16(x1lo>>48), uint16(x2lo>>48)))<<48
+ retHi = uint64(i16RoundingAverage(uint16(x1hi), uint16(x2hi))) |
+ uint64(i16RoundingAverage(uint16(x1hi>>16), uint16(x2hi>>16)))<<16 |
+ uint64(i16RoundingAverage(uint16(x1hi>>32), uint16(x2hi>>32)))<<32 |
+ uint64(i16RoundingAverage(uint16(x1hi>>48), uint16(x2hi>>48)))<<48
+ }
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128Pmin:
+ x2hi, x2lo := ce.popValue(), ce.popValue()
+ x1hi, x1lo := ce.popValue(), ce.popValue()
+ var retLo, retHi uint64
+ if op.B1 == shapeF32x4 {
+ if flt32(math.Float32frombits(uint32(x2lo)), math.Float32frombits(uint32(x1lo))) {
+ retLo = x2lo & 0x00000000_ffffffff
+ } else {
+ retLo = x1lo & 0x00000000_ffffffff
+ }
+ if flt32(math.Float32frombits(uint32(x2lo>>32)), math.Float32frombits(uint32(x1lo>>32))) {
+ retLo |= x2lo & 0xffffffff_00000000
+ } else {
+ retLo |= x1lo & 0xffffffff_00000000
+ }
+ if flt32(math.Float32frombits(uint32(x2hi)), math.Float32frombits(uint32(x1hi))) {
+ retHi = x2hi & 0x00000000_ffffffff
+ } else {
+ retHi = x1hi & 0x00000000_ffffffff
+ }
+ if flt32(math.Float32frombits(uint32(x2hi>>32)), math.Float32frombits(uint32(x1hi>>32))) {
+ retHi |= x2hi & 0xffffffff_00000000
+ } else {
+ retHi |= x1hi & 0xffffffff_00000000
+ }
+ } else {
+ if flt64(math.Float64frombits(x2lo), math.Float64frombits(x1lo)) {
+ retLo = x2lo
+ } else {
+ retLo = x1lo
+ }
+ if flt64(math.Float64frombits(x2hi), math.Float64frombits(x1hi)) {
+ retHi = x2hi
+ } else {
+ retHi = x1hi
+ }
+ }
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128Pmax:
+ x2hi, x2lo := ce.popValue(), ce.popValue()
+ x1hi, x1lo := ce.popValue(), ce.popValue()
+ var retLo, retHi uint64
+ if op.B1 == shapeF32x4 {
+ if flt32(math.Float32frombits(uint32(x1lo)), math.Float32frombits(uint32(x2lo))) {
+ retLo = x2lo & 0x00000000_ffffffff
+ } else {
+ retLo = x1lo & 0x00000000_ffffffff
+ }
+ if flt32(math.Float32frombits(uint32(x1lo>>32)), math.Float32frombits(uint32(x2lo>>32))) {
+ retLo |= x2lo & 0xffffffff_00000000
+ } else {
+ retLo |= x1lo & 0xffffffff_00000000
+ }
+ if flt32(math.Float32frombits(uint32(x1hi)), math.Float32frombits(uint32(x2hi))) {
+ retHi = x2hi & 0x00000000_ffffffff
+ } else {
+ retHi = x1hi & 0x00000000_ffffffff
+ }
+ if flt32(math.Float32frombits(uint32(x1hi>>32)), math.Float32frombits(uint32(x2hi>>32))) {
+ retHi |= x2hi & 0xffffffff_00000000
+ } else {
+ retHi |= x1hi & 0xffffffff_00000000
+ }
+ } else {
+ if flt64(math.Float64frombits(x1lo), math.Float64frombits(x2lo)) {
+ retLo = x2lo
+ } else {
+ retLo = x1lo
+ }
+ if flt64(math.Float64frombits(x1hi), math.Float64frombits(x2hi)) {
+ retHi = x2hi
+ } else {
+ retHi = x1hi
+ }
+ }
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128Ceil:
+ hi, lo := ce.popValue(), ce.popValue()
+ if op.B1 == shapeF32x4 {
+ lo = uint64(math.Float32bits(moremath.WasmCompatCeilF32(math.Float32frombits(uint32(lo))))) |
+ (uint64(math.Float32bits(moremath.WasmCompatCeilF32(math.Float32frombits(uint32(lo>>32))))) << 32)
+ hi = uint64(math.Float32bits(moremath.WasmCompatCeilF32(math.Float32frombits(uint32(hi))))) |
+ (uint64(math.Float32bits(moremath.WasmCompatCeilF32(math.Float32frombits(uint32(hi>>32))))) << 32)
+ } else {
+ lo = math.Float64bits(moremath.WasmCompatCeilF64(math.Float64frombits(lo)))
+ hi = math.Float64bits(moremath.WasmCompatCeilF64(math.Float64frombits(hi)))
+ }
+ ce.pushValue(lo)
+ ce.pushValue(hi)
+ frame.pc++
+ case operationKindV128Floor:
+ hi, lo := ce.popValue(), ce.popValue()
+ if op.B1 == shapeF32x4 {
+ lo = uint64(math.Float32bits(moremath.WasmCompatFloorF32(math.Float32frombits(uint32(lo))))) |
+ (uint64(math.Float32bits(moremath.WasmCompatFloorF32(math.Float32frombits(uint32(lo>>32))))) << 32)
+ hi = uint64(math.Float32bits(moremath.WasmCompatFloorF32(math.Float32frombits(uint32(hi))))) |
+ (uint64(math.Float32bits(moremath.WasmCompatFloorF32(math.Float32frombits(uint32(hi>>32))))) << 32)
+ } else {
+ lo = math.Float64bits(moremath.WasmCompatFloorF64(math.Float64frombits(lo)))
+ hi = math.Float64bits(moremath.WasmCompatFloorF64(math.Float64frombits(hi)))
+ }
+ ce.pushValue(lo)
+ ce.pushValue(hi)
+ frame.pc++
+ case operationKindV128Trunc:
+ hi, lo := ce.popValue(), ce.popValue()
+ if op.B1 == shapeF32x4 {
+ lo = uint64(math.Float32bits(moremath.WasmCompatTruncF32(math.Float32frombits(uint32(lo))))) |
+ (uint64(math.Float32bits(moremath.WasmCompatTruncF32(math.Float32frombits(uint32(lo>>32))))) << 32)
+ hi = uint64(math.Float32bits(moremath.WasmCompatTruncF32(math.Float32frombits(uint32(hi))))) |
+ (uint64(math.Float32bits(moremath.WasmCompatTruncF32(math.Float32frombits(uint32(hi>>32))))) << 32)
+ } else {
+ lo = math.Float64bits(moremath.WasmCompatTruncF64(math.Float64frombits(lo)))
+ hi = math.Float64bits(moremath.WasmCompatTruncF64(math.Float64frombits(hi)))
+ }
+ ce.pushValue(lo)
+ ce.pushValue(hi)
+ frame.pc++
+ case operationKindV128Nearest:
+ hi, lo := ce.popValue(), ce.popValue()
+ if op.B1 == shapeF32x4 {
+ lo = uint64(math.Float32bits(moremath.WasmCompatNearestF32(math.Float32frombits(uint32(lo))))) |
+ (uint64(math.Float32bits(moremath.WasmCompatNearestF32(math.Float32frombits(uint32(lo>>32))))) << 32)
+ hi = uint64(math.Float32bits(moremath.WasmCompatNearestF32(math.Float32frombits(uint32(hi))))) |
+ (uint64(math.Float32bits(moremath.WasmCompatNearestF32(math.Float32frombits(uint32(hi>>32))))) << 32)
+ } else {
+ lo = math.Float64bits(moremath.WasmCompatNearestF64(math.Float64frombits(lo)))
+ hi = math.Float64bits(moremath.WasmCompatNearestF64(math.Float64frombits(hi)))
+ }
+ ce.pushValue(lo)
+ ce.pushValue(hi)
+ frame.pc++
+ case operationKindV128Extend:
+ hi, lo := ce.popValue(), ce.popValue()
+ var origin uint64
+ if op.B3 { // use lower 64 bits
+ origin = lo
+ } else {
+ origin = hi
+ }
+
+ signed := op.B2 == 1
+
+ var retHi, retLo uint64
+ switch op.B1 {
+ case shapeI8x16:
+ for i := 0; i < 8; i++ {
+ v8 := byte(origin >> (i * 8))
+
+ var v16 uint16
+ if signed {
+ v16 = uint16(int8(v8))
+ } else {
+ v16 = uint16(v8)
+ }
+
+ if i < 4 {
+ retLo |= uint64(v16) << (i * 16)
+ } else {
+ retHi |= uint64(v16) << ((i - 4) * 16)
+ }
+ }
+ case shapeI16x8:
+ for i := 0; i < 4; i++ {
+ v16 := uint16(origin >> (i * 16))
+
+ var v32 uint32
+ if signed {
+ v32 = uint32(int16(v16))
+ } else {
+ v32 = uint32(v16)
+ }
+
+ if i < 2 {
+ retLo |= uint64(v32) << (i * 32)
+ } else {
+ retHi |= uint64(v32) << ((i - 2) * 32)
+ }
+ }
+ case shapeI32x4:
+ v32Lo := uint32(origin)
+ v32Hi := uint32(origin >> 32)
+ if signed {
+ retLo = uint64(int32(v32Lo))
+ retHi = uint64(int32(v32Hi))
+ } else {
+ retLo = uint64(v32Lo)
+ retHi = uint64(v32Hi)
+ }
+ }
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128ExtMul:
+ x2Hi, x2Lo := ce.popValue(), ce.popValue()
+ x1Hi, x1Lo := ce.popValue(), ce.popValue()
+ var x1, x2 uint64
+ if op.B3 { // use lower 64 bits
+ x1, x2 = x1Lo, x2Lo
+ } else {
+ x1, x2 = x1Hi, x2Hi
+ }
+
+ signed := op.B2 == 1
+
+ var retLo, retHi uint64
+ switch op.B1 {
+ case shapeI8x16:
+ for i := 0; i < 8; i++ {
+ v1, v2 := byte(x1>>(i*8)), byte(x2>>(i*8))
+
+ var v16 uint16
+ if signed {
+ v16 = uint16(int16(int8(v1)) * int16(int8(v2)))
+ } else {
+ v16 = uint16(v1) * uint16(v2)
+ }
+
+ if i < 4 {
+ retLo |= uint64(v16) << (i * 16)
+ } else {
+ retHi |= uint64(v16) << ((i - 4) * 16)
+ }
+ }
+ case shapeI16x8:
+ for i := 0; i < 4; i++ {
+ v1, v2 := uint16(x1>>(i*16)), uint16(x2>>(i*16))
+
+ var v32 uint32
+ if signed {
+ v32 = uint32(int32(int16(v1)) * int32(int16(v2)))
+ } else {
+ v32 = uint32(v1) * uint32(v2)
+ }
+
+ if i < 2 {
+ retLo |= uint64(v32) << (i * 32)
+ } else {
+ retHi |= uint64(v32) << ((i - 2) * 32)
+ }
+ }
+ case shapeI32x4:
+ v1Lo, v2Lo := uint32(x1), uint32(x2)
+ v1Hi, v2Hi := uint32(x1>>32), uint32(x2>>32)
+ if signed {
+ retLo = uint64(int64(int32(v1Lo)) * int64(int32(v2Lo)))
+ retHi = uint64(int64(int32(v1Hi)) * int64(int32(v2Hi)))
+ } else {
+ retLo = uint64(v1Lo) * uint64(v2Lo)
+ retHi = uint64(v1Hi) * uint64(v2Hi)
+ }
+ }
+
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128Q15mulrSatS:
+ x2hi, x2Lo := ce.popValue(), ce.popValue()
+ x1hi, x1Lo := ce.popValue(), ce.popValue()
+ var retLo, retHi uint64
+ for i := 0; i < 8; i++ {
+ var v, w int16
+ if i < 4 {
+ v, w = int16(uint16(x1Lo>>(i*16))), int16(uint16(x2Lo>>(i*16)))
+ } else {
+ v, w = int16(uint16(x1hi>>((i-4)*16))), int16(uint16(x2hi>>((i-4)*16)))
+ }
+
+ var uv uint64
+ // https://github.com/WebAssembly/spec/blob/wg-2.0.draft1/proposals/simd/SIMD.md#saturating-integer-q-format-rounding-multiplication
+ if calc := ((int32(v) * int32(w)) + 0x4000) >> 15; calc < math.MinInt16 {
+ uv = uint64(uint16(0x8000))
+ } else if calc > math.MaxInt16 {
+ uv = uint64(uint16(0x7fff))
+ } else {
+ uv = uint64(uint16(int16(calc)))
+ }
+
+ if i < 4 {
+ retLo |= uv << (i * 16)
+ } else {
+ retHi |= uv << ((i - 4) * 16)
+ }
+ }
+
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128ExtAddPairwise:
+ hi, lo := ce.popValue(), ce.popValue()
+
+ signed := op.B3
+
+ var retLo, retHi uint64
+ switch op.B1 {
+ case shapeI8x16:
+ for i := 0; i < 8; i++ {
+ var v1, v2 byte
+ if i < 4 {
+ v1, v2 = byte(lo>>((i*2)*8)), byte(lo>>((i*2+1)*8))
+ } else {
+ v1, v2 = byte(hi>>(((i-4)*2)*8)), byte(hi>>(((i-4)*2+1)*8))
+ }
+
+ var v16 uint16
+ if signed {
+ v16 = uint16(int16(int8(v1)) + int16(int8(v2)))
+ } else {
+ v16 = uint16(v1) + uint16(v2)
+ }
+
+ if i < 4 {
+ retLo |= uint64(v16) << (i * 16)
+ } else {
+ retHi |= uint64(v16) << ((i - 4) * 16)
+ }
+ }
+ case shapeI16x8:
+ for i := 0; i < 4; i++ {
+ var v1, v2 uint16
+ if i < 2 {
+ v1, v2 = uint16(lo>>((i*2)*16)), uint16(lo>>((i*2+1)*16))
+ } else {
+ v1, v2 = uint16(hi>>(((i-2)*2)*16)), uint16(hi>>(((i-2)*2+1)*16))
+ }
+
+ var v32 uint32
+ if signed {
+ v32 = uint32(int32(int16(v1)) + int32(int16(v2)))
+ } else {
+ v32 = uint32(v1) + uint32(v2)
+ }
+
+ if i < 2 {
+ retLo |= uint64(v32) << (i * 32)
+ } else {
+ retHi |= uint64(v32) << ((i - 2) * 32)
+ }
+ }
+ }
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128FloatPromote:
+ _, toPromote := ce.popValue(), ce.popValue()
+ ce.pushValue(math.Float64bits(float64(math.Float32frombits(uint32(toPromote)))))
+ ce.pushValue(math.Float64bits(float64(math.Float32frombits(uint32(toPromote >> 32)))))
+ frame.pc++
+ case operationKindV128FloatDemote:
+ hi, lo := ce.popValue(), ce.popValue()
+ ce.pushValue(
+ uint64(math.Float32bits(float32(math.Float64frombits(lo)))) |
+ (uint64(math.Float32bits(float32(math.Float64frombits(hi)))) << 32),
+ )
+ ce.pushValue(0)
+ frame.pc++
+ case operationKindV128FConvertFromI:
+ hi, lo := ce.popValue(), ce.popValue()
+ v1, v2, v3, v4 := uint32(lo), uint32(lo>>32), uint32(hi), uint32(hi>>32)
+ signed := op.B3
+
+ var retLo, retHi uint64
+ switch op.B1 { // Destination shape.
+ case shapeF32x4: // f32x4 from signed/unsigned i32x4
+ if signed {
+ retLo = uint64(math.Float32bits(float32(int32(v1)))) |
+ (uint64(math.Float32bits(float32(int32(v2)))) << 32)
+ retHi = uint64(math.Float32bits(float32(int32(v3)))) |
+ (uint64(math.Float32bits(float32(int32(v4)))) << 32)
+ } else {
+ retLo = uint64(math.Float32bits(float32(v1))) |
+ (uint64(math.Float32bits(float32(v2))) << 32)
+ retHi = uint64(math.Float32bits(float32(v3))) |
+ (uint64(math.Float32bits(float32(v4))) << 32)
+ }
+ case shapeF64x2: // f64x2 from signed/unsigned i32x4
+ if signed {
+ retLo, retHi = math.Float64bits(float64(int32(v1))), math.Float64bits(float64(int32(v2)))
+ } else {
+ retLo, retHi = math.Float64bits(float64(v1)), math.Float64bits(float64(v2))
+ }
+ }
+
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128Narrow:
+ x2Hi, x2Lo := ce.popValue(), ce.popValue()
+ x1Hi, x1Lo := ce.popValue(), ce.popValue()
+ signed := op.B3
+
+ var retLo, retHi uint64
+ switch op.B1 {
+ case shapeI16x8: // signed/unsigned i16x8 to i8x16
+ for i := 0; i < 8; i++ {
+ var v16 uint16
+ if i < 4 {
+ v16 = uint16(x1Lo >> (i * 16))
+ } else {
+ v16 = uint16(x1Hi >> ((i - 4) * 16))
+ }
+
+ var v byte
+ if signed {
+ if s := int16(v16); s > math.MaxInt8 {
+ v = math.MaxInt8
+ } else if s < math.MinInt8 {
+ s = math.MinInt8
+ v = byte(s)
+ } else {
+ v = byte(v16)
+ }
+ } else {
+ if s := int16(v16); s > math.MaxUint8 {
+ v = math.MaxUint8
+ } else if s < 0 {
+ v = 0
+ } else {
+ v = byte(v16)
+ }
+ }
+ retLo |= uint64(v) << (i * 8)
+ }
+ for i := 0; i < 8; i++ {
+ var v16 uint16
+ if i < 4 {
+ v16 = uint16(x2Lo >> (i * 16))
+ } else {
+ v16 = uint16(x2Hi >> ((i - 4) * 16))
+ }
+
+ var v byte
+ if signed {
+ if s := int16(v16); s > math.MaxInt8 {
+ v = math.MaxInt8
+ } else if s < math.MinInt8 {
+ s = math.MinInt8
+ v = byte(s)
+ } else {
+ v = byte(v16)
+ }
+ } else {
+ if s := int16(v16); s > math.MaxUint8 {
+ v = math.MaxUint8
+ } else if s < 0 {
+ v = 0
+ } else {
+ v = byte(v16)
+ }
+ }
+ retHi |= uint64(v) << (i * 8)
+ }
+ case shapeI32x4: // signed/unsigned i32x4 to i16x8
+ for i := 0; i < 4; i++ {
+ var v32 uint32
+ if i < 2 {
+ v32 = uint32(x1Lo >> (i * 32))
+ } else {
+ v32 = uint32(x1Hi >> ((i - 2) * 32))
+ }
+
+ var v uint16
+ if signed {
+ if s := int32(v32); s > math.MaxInt16 {
+ v = math.MaxInt16
+ } else if s < math.MinInt16 {
+ s = math.MinInt16
+ v = uint16(s)
+ } else {
+ v = uint16(v32)
+ }
+ } else {
+ if s := int32(v32); s > math.MaxUint16 {
+ v = math.MaxUint16
+ } else if s < 0 {
+ v = 0
+ } else {
+ v = uint16(v32)
+ }
+ }
+ retLo |= uint64(v) << (i * 16)
+ }
+
+ for i := 0; i < 4; i++ {
+ var v32 uint32
+ if i < 2 {
+ v32 = uint32(x2Lo >> (i * 32))
+ } else {
+ v32 = uint32(x2Hi >> ((i - 2) * 32))
+ }
+
+ var v uint16
+ if signed {
+ if s := int32(v32); s > math.MaxInt16 {
+ v = math.MaxInt16
+ } else if s < math.MinInt16 {
+ s = math.MinInt16
+ v = uint16(s)
+ } else {
+ v = uint16(v32)
+ }
+ } else {
+ if s := int32(v32); s > math.MaxUint16 {
+ v = math.MaxUint16
+ } else if s < 0 {
+ v = 0
+ } else {
+ v = uint16(v32)
+ }
+ }
+ retHi |= uint64(v) << (i * 16)
+ }
+ }
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindV128Dot:
+ x2Hi, x2Lo := ce.popValue(), ce.popValue()
+ x1Hi, x1Lo := ce.popValue(), ce.popValue()
+ ce.pushValue(
+ uint64(uint32(int32(int16(x1Lo>>0))*int32(int16(x2Lo>>0))+int32(int16(x1Lo>>16))*int32(int16(x2Lo>>16)))) |
+ (uint64(uint32(int32(int16(x1Lo>>32))*int32(int16(x2Lo>>32))+int32(int16(x1Lo>>48))*int32(int16(x2Lo>>48)))) << 32),
+ )
+ ce.pushValue(
+ uint64(uint32(int32(int16(x1Hi>>0))*int32(int16(x2Hi>>0))+int32(int16(x1Hi>>16))*int32(int16(x2Hi>>16)))) |
+ (uint64(uint32(int32(int16(x1Hi>>32))*int32(int16(x2Hi>>32))+int32(int16(x1Hi>>48))*int32(int16(x2Hi>>48)))) << 32),
+ )
+ frame.pc++
+ case operationKindV128ITruncSatFromF:
+ hi, lo := ce.popValue(), ce.popValue()
+ signed := op.B3
+ var retLo, retHi uint64
+
+ switch op.B1 {
+ case shapeF32x4: // f32x4 to i32x4
+ for i, f64 := range [4]float64{
+ math.Trunc(float64(math.Float32frombits(uint32(lo)))),
+ math.Trunc(float64(math.Float32frombits(uint32(lo >> 32)))),
+ math.Trunc(float64(math.Float32frombits(uint32(hi)))),
+ math.Trunc(float64(math.Float32frombits(uint32(hi >> 32)))),
+ } {
+
+ var v uint32
+ if math.IsNaN(f64) {
+ v = 0
+ } else if signed {
+ if f64 < math.MinInt32 {
+ f64 = math.MinInt32
+ } else if f64 > math.MaxInt32 {
+ f64 = math.MaxInt32
+ }
+ v = uint32(int32(f64))
+ } else {
+ if f64 < 0 {
+ f64 = 0
+ } else if f64 > math.MaxUint32 {
+ f64 = math.MaxUint32
+ }
+ v = uint32(f64)
+ }
+
+ if i < 2 {
+ retLo |= uint64(v) << (i * 32)
+ } else {
+ retHi |= uint64(v) << ((i - 2) * 32)
+ }
+ }
+
+ case shapeF64x2: // f64x2 to i32x4
+ for i, f := range [2]float64{
+ math.Trunc(math.Float64frombits(lo)),
+ math.Trunc(math.Float64frombits(hi)),
+ } {
+ var v uint32
+ if math.IsNaN(f) {
+ v = 0
+ } else if signed {
+ if f < math.MinInt32 {
+ f = math.MinInt32
+ } else if f > math.MaxInt32 {
+ f = math.MaxInt32
+ }
+ v = uint32(int32(f))
+ } else {
+ if f < 0 {
+ f = 0
+ } else if f > math.MaxUint32 {
+ f = math.MaxUint32
+ }
+ v = uint32(f)
+ }
+
+ retLo |= uint64(v) << (i * 32)
+ }
+ }
+
+ ce.pushValue(retLo)
+ ce.pushValue(retHi)
+ frame.pc++
+ case operationKindAtomicMemoryWait:
+ timeout := int64(ce.popValue())
+ exp := ce.popValue()
+ offset := ce.popMemoryOffset(op)
+ // Runtime instead of validation error because the spec intends to allow binaries to include
+ // such instructions as long as they are not executed.
+ if !memoryInst.Shared {
+ panic(wasmruntime.ErrRuntimeExpectedSharedMemory)
+ }
+
+ switch unsignedType(op.B1) {
+ case unsignedTypeI32:
+ if offset%4 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ if int(offset) > len(memoryInst.Buffer)-4 {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(memoryInst.Wait32(offset, uint32(exp), timeout, func(mem *wasm.MemoryInstance, offset uint32) uint32 {
+ mem.Mux.Lock()
+ defer mem.Mux.Unlock()
+ value, _ := mem.ReadUint32Le(offset)
+ return value
+ }))
+ case unsignedTypeI64:
+ if offset%8 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ if int(offset) > len(memoryInst.Buffer)-8 {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(memoryInst.Wait64(offset, exp, timeout, func(mem *wasm.MemoryInstance, offset uint32) uint64 {
+ mem.Mux.Lock()
+ defer mem.Mux.Unlock()
+ value, _ := mem.ReadUint64Le(offset)
+ return value
+ }))
+ }
+ frame.pc++
+ case operationKindAtomicMemoryNotify:
+ count := ce.popValue()
+ offset := ce.popMemoryOffset(op)
+ if offset%4 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ // Just a bounds check
+ if offset >= memoryInst.Size() {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ res := memoryInst.Notify(offset, uint32(count))
+ ce.pushValue(uint64(res))
+ frame.pc++
+ case operationKindAtomicFence:
+ // Memory not required for fence only
+ if memoryInst != nil {
+ // An empty critical section can be used as a synchronization primitive, which is what
+ // fence is. Probably, there are no spectests or defined behavior to confirm this yet.
+ memoryInst.Mux.Lock()
+ memoryInst.Mux.Unlock() //nolint:staticcheck
+ }
+ frame.pc++
+ case operationKindAtomicLoad:
+ offset := ce.popMemoryOffset(op)
+ switch unsignedType(op.B1) {
+ case unsignedTypeI32:
+ if offset%4 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ memoryInst.Mux.Lock()
+ val, ok := memoryInst.ReadUint32Le(offset)
+ memoryInst.Mux.Unlock()
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(uint64(val))
+ case unsignedTypeI64:
+ if offset%8 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ memoryInst.Mux.Lock()
+ val, ok := memoryInst.ReadUint64Le(offset)
+ memoryInst.Mux.Unlock()
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(val)
+ }
+ frame.pc++
+ case operationKindAtomicLoad8:
+ offset := ce.popMemoryOffset(op)
+ memoryInst.Mux.Lock()
+ val, ok := memoryInst.ReadByte(offset)
+ memoryInst.Mux.Unlock()
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(uint64(val))
+ frame.pc++
+ case operationKindAtomicLoad16:
+ offset := ce.popMemoryOffset(op)
+ if offset%2 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ memoryInst.Mux.Lock()
+ val, ok := memoryInst.ReadUint16Le(offset)
+ memoryInst.Mux.Unlock()
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ ce.pushValue(uint64(val))
+ frame.pc++
+ case operationKindAtomicStore:
+ val := ce.popValue()
+ offset := ce.popMemoryOffset(op)
+ switch unsignedType(op.B1) {
+ case unsignedTypeI32:
+ if offset%4 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ memoryInst.Mux.Lock()
+ ok := memoryInst.WriteUint32Le(offset, uint32(val))
+ memoryInst.Mux.Unlock()
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ case unsignedTypeI64:
+ if offset%8 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ memoryInst.Mux.Lock()
+ ok := memoryInst.WriteUint64Le(offset, val)
+ memoryInst.Mux.Unlock()
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ }
+ frame.pc++
+ case operationKindAtomicStore8:
+ val := byte(ce.popValue())
+ offset := ce.popMemoryOffset(op)
+ memoryInst.Mux.Lock()
+ ok := memoryInst.WriteByte(offset, val)
+ memoryInst.Mux.Unlock()
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ frame.pc++
+ case operationKindAtomicStore16:
+ val := uint16(ce.popValue())
+ offset := ce.popMemoryOffset(op)
+ if offset%2 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ memoryInst.Mux.Lock()
+ ok := memoryInst.WriteUint16Le(offset, val)
+ memoryInst.Mux.Unlock()
+ if !ok {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ frame.pc++
+ case operationKindAtomicRMW:
+ val := ce.popValue()
+ offset := ce.popMemoryOffset(op)
+ switch unsignedType(op.B1) {
+ case unsignedTypeI32:
+ if offset%4 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ memoryInst.Mux.Lock()
+ old, ok := memoryInst.ReadUint32Le(offset)
+ if !ok {
+ memoryInst.Mux.Unlock()
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ var newVal uint32
+ switch atomicArithmeticOp(op.B2) {
+ case atomicArithmeticOpAdd:
+ newVal = old + uint32(val)
+ case atomicArithmeticOpSub:
+ newVal = old - uint32(val)
+ case atomicArithmeticOpAnd:
+ newVal = old & uint32(val)
+ case atomicArithmeticOpOr:
+ newVal = old | uint32(val)
+ case atomicArithmeticOpXor:
+ newVal = old ^ uint32(val)
+ case atomicArithmeticOpNop:
+ newVal = uint32(val)
+ }
+ memoryInst.WriteUint32Le(offset, newVal)
+ memoryInst.Mux.Unlock()
+ ce.pushValue(uint64(old))
+ case unsignedTypeI64:
+ if offset%8 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ memoryInst.Mux.Lock()
+ old, ok := memoryInst.ReadUint64Le(offset)
+ if !ok {
+ memoryInst.Mux.Unlock()
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ var newVal uint64
+ switch atomicArithmeticOp(op.B2) {
+ case atomicArithmeticOpAdd:
+ newVal = old + val
+ case atomicArithmeticOpSub:
+ newVal = old - val
+ case atomicArithmeticOpAnd:
+ newVal = old & val
+ case atomicArithmeticOpOr:
+ newVal = old | val
+ case atomicArithmeticOpXor:
+ newVal = old ^ val
+ case atomicArithmeticOpNop:
+ newVal = val
+ }
+ memoryInst.WriteUint64Le(offset, newVal)
+ memoryInst.Mux.Unlock()
+ ce.pushValue(old)
+ }
+ frame.pc++
+ case operationKindAtomicRMW8:
+ val := ce.popValue()
+ offset := ce.popMemoryOffset(op)
+ memoryInst.Mux.Lock()
+ old, ok := memoryInst.ReadByte(offset)
+ if !ok {
+ memoryInst.Mux.Unlock()
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ arg := byte(val)
+ var newVal byte
+ switch atomicArithmeticOp(op.B2) {
+ case atomicArithmeticOpAdd:
+ newVal = old + arg
+ case atomicArithmeticOpSub:
+ newVal = old - arg
+ case atomicArithmeticOpAnd:
+ newVal = old & arg
+ case atomicArithmeticOpOr:
+ newVal = old | arg
+ case atomicArithmeticOpXor:
+ newVal = old ^ arg
+ case atomicArithmeticOpNop:
+ newVal = arg
+ }
+ memoryInst.WriteByte(offset, newVal)
+ memoryInst.Mux.Unlock()
+ ce.pushValue(uint64(old))
+ frame.pc++
+ case operationKindAtomicRMW16:
+ val := ce.popValue()
+ offset := ce.popMemoryOffset(op)
+ if offset%2 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ memoryInst.Mux.Lock()
+ old, ok := memoryInst.ReadUint16Le(offset)
+ if !ok {
+ memoryInst.Mux.Unlock()
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ arg := uint16(val)
+ var newVal uint16
+ switch atomicArithmeticOp(op.B2) {
+ case atomicArithmeticOpAdd:
+ newVal = old + arg
+ case atomicArithmeticOpSub:
+ newVal = old - arg
+ case atomicArithmeticOpAnd:
+ newVal = old & arg
+ case atomicArithmeticOpOr:
+ newVal = old | arg
+ case atomicArithmeticOpXor:
+ newVal = old ^ arg
+ case atomicArithmeticOpNop:
+ newVal = arg
+ }
+ memoryInst.WriteUint16Le(offset, newVal)
+ memoryInst.Mux.Unlock()
+ ce.pushValue(uint64(old))
+ frame.pc++
+ case operationKindAtomicRMWCmpxchg:
+ rep := ce.popValue()
+ exp := ce.popValue()
+ offset := ce.popMemoryOffset(op)
+ switch unsignedType(op.B1) {
+ case unsignedTypeI32:
+ if offset%4 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ memoryInst.Mux.Lock()
+ old, ok := memoryInst.ReadUint32Le(offset)
+ if !ok {
+ memoryInst.Mux.Unlock()
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ if old == uint32(exp) {
+ memoryInst.WriteUint32Le(offset, uint32(rep))
+ }
+ memoryInst.Mux.Unlock()
+ ce.pushValue(uint64(old))
+ case unsignedTypeI64:
+ if offset%8 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ memoryInst.Mux.Lock()
+ old, ok := memoryInst.ReadUint64Le(offset)
+ if !ok {
+ memoryInst.Mux.Unlock()
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ if old == exp {
+ memoryInst.WriteUint64Le(offset, rep)
+ }
+ memoryInst.Mux.Unlock()
+ ce.pushValue(old)
+ }
+ frame.pc++
+ case operationKindAtomicRMW8Cmpxchg:
+ rep := byte(ce.popValue())
+ exp := byte(ce.popValue())
+ offset := ce.popMemoryOffset(op)
+ memoryInst.Mux.Lock()
+ old, ok := memoryInst.ReadByte(offset)
+ if !ok {
+ memoryInst.Mux.Unlock()
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ if old == exp {
+ memoryInst.WriteByte(offset, rep)
+ }
+ memoryInst.Mux.Unlock()
+ ce.pushValue(uint64(old))
+ frame.pc++
+ case operationKindAtomicRMW16Cmpxchg:
+ rep := uint16(ce.popValue())
+ exp := uint16(ce.popValue())
+ offset := ce.popMemoryOffset(op)
+ if offset%2 != 0 {
+ panic(wasmruntime.ErrRuntimeUnalignedAtomic)
+ }
+ memoryInst.Mux.Lock()
+ old, ok := memoryInst.ReadUint16Le(offset)
+ if !ok {
+ memoryInst.Mux.Unlock()
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ if old == exp {
+ memoryInst.WriteUint16Le(offset, rep)
+ }
+ memoryInst.Mux.Unlock()
+ ce.pushValue(uint64(old))
+ frame.pc++
+ default:
+ frame.pc++
+ }
+ }
+ ce.popFrame()
+}
+
+func wasmCompatMax32bits(v1, v2 uint32) uint64 {
+ return uint64(math.Float32bits(moremath.WasmCompatMax32(
+ math.Float32frombits(v1),
+ math.Float32frombits(v2),
+ )))
+}
+
+func wasmCompatMin32bits(v1, v2 uint32) uint64 {
+ return uint64(math.Float32bits(moremath.WasmCompatMin32(
+ math.Float32frombits(v1),
+ math.Float32frombits(v2),
+ )))
+}
+
+func addFloat32bits(v1, v2 uint32) uint64 {
+ return uint64(math.Float32bits(math.Float32frombits(v1) + math.Float32frombits(v2)))
+}
+
+func subFloat32bits(v1, v2 uint32) uint64 {
+ return uint64(math.Float32bits(math.Float32frombits(v1) - math.Float32frombits(v2)))
+}
+
+func mulFloat32bits(v1, v2 uint32) uint64 {
+ return uint64(math.Float32bits(math.Float32frombits(v1) * math.Float32frombits(v2)))
+}
+
+func divFloat32bits(v1, v2 uint32) uint64 {
+ return uint64(math.Float32bits(math.Float32frombits(v1) / math.Float32frombits(v2)))
+}
+
+// https://www.w3.org/TR/2022/WD-wasm-core-2-20220419/exec/numerics.html#xref-exec-numerics-op-flt-mathrm-flt-n-z-1-z-2
+func flt32(z1, z2 float32) bool {
+ if z1 != z1 || z2 != z2 {
+ return false
+ } else if z1 == z2 {
+ return false
+ } else if math.IsInf(float64(z1), 1) {
+ return false
+ } else if math.IsInf(float64(z1), -1) {
+ return true
+ } else if math.IsInf(float64(z2), 1) {
+ return true
+ } else if math.IsInf(float64(z2), -1) {
+ return false
+ }
+ return z1 < z2
+}
+
+// https://www.w3.org/TR/2022/WD-wasm-core-2-20220419/exec/numerics.html#xref-exec-numerics-op-flt-mathrm-flt-n-z-1-z-2
+func flt64(z1, z2 float64) bool {
+ if z1 != z1 || z2 != z2 {
+ return false
+ } else if z1 == z2 {
+ return false
+ } else if math.IsInf(z1, 1) {
+ return false
+ } else if math.IsInf(z1, -1) {
+ return true
+ } else if math.IsInf(z2, 1) {
+ return true
+ } else if math.IsInf(z2, -1) {
+ return false
+ }
+ return z1 < z2
+}
+
+func i8RoundingAverage(v1, v2 byte) byte {
+ // https://github.com/WebAssembly/spec/blob/wg-2.0.draft1/proposals/simd/SIMD.md#lane-wise-integer-rounding-average
+ return byte((uint16(v1) + uint16(v2) + uint16(1)) / 2)
+}
+
+func i16RoundingAverage(v1, v2 uint16) uint16 {
+ // https://github.com/WebAssembly/spec/blob/wg-2.0.draft1/proposals/simd/SIMD.md#lane-wise-integer-rounding-average
+ return uint16((uint32(v1) + uint32(v2) + 1) / 2)
+}
+
+func i8Abs(v byte) byte {
+ if i := int8(v); i < 0 {
+ return byte(-i)
+ } else {
+ return byte(i)
+ }
+}
+
+func i8MaxU(v1, v2 byte) byte {
+ if v1 < v2 {
+ return v2
+ } else {
+ return v1
+ }
+}
+
+func i8MinU(v1, v2 byte) byte {
+ if v1 > v2 {
+ return v2
+ } else {
+ return v1
+ }
+}
+
+func i8MaxS(v1, v2 byte) byte {
+ if int8(v1) < int8(v2) {
+ return v2
+ } else {
+ return v1
+ }
+}
+
+func i8MinS(v1, v2 byte) byte {
+ if int8(v1) > int8(v2) {
+ return v2
+ } else {
+ return v1
+ }
+}
+
+func i16MaxU(v1, v2 uint16) uint16 {
+ if v1 < v2 {
+ return v2
+ } else {
+ return v1
+ }
+}
+
+func i16MinU(v1, v2 uint16) uint16 {
+ if v1 > v2 {
+ return v2
+ } else {
+ return v1
+ }
+}
+
+func i16MaxS(v1, v2 uint16) uint16 {
+ if int16(v1) < int16(v2) {
+ return v2
+ } else {
+ return v1
+ }
+}
+
+func i16MinS(v1, v2 uint16) uint16 {
+ if int16(v1) > int16(v2) {
+ return v2
+ } else {
+ return v1
+ }
+}
+
+func i32MaxU(v1, v2 uint32) uint32 {
+ if v1 < v2 {
+ return v2
+ } else {
+ return v1
+ }
+}
+
+func i32MinU(v1, v2 uint32) uint32 {
+ if v1 > v2 {
+ return v2
+ } else {
+ return v1
+ }
+}
+
+func i32MaxS(v1, v2 uint32) uint32 {
+ if int32(v1) < int32(v2) {
+ return v2
+ } else {
+ return v1
+ }
+}
+
+func i32MinS(v1, v2 uint32) uint32 {
+ if int32(v1) > int32(v2) {
+ return v2
+ } else {
+ return v1
+ }
+}
+
+func i16Abs(v uint16) uint16 {
+ if i := int16(v); i < 0 {
+ return uint16(-i)
+ } else {
+ return uint16(i)
+ }
+}
+
+func i32Abs(v uint32) uint32 {
+ if i := int32(v); i < 0 {
+ return uint32(-i)
+ } else {
+ return uint32(i)
+ }
+}
+
+func (ce *callEngine) callNativeFuncWithListener(ctx context.Context, m *wasm.ModuleInstance, f *function, fnl experimental.FunctionListener) context.Context {
+ def, typ := f.definition(), f.funcType
+
+ ce.stackIterator.reset(ce.stack, ce.frames, f)
+ fnl.Before(ctx, m, def, ce.peekValues(typ.ParamNumInUint64), &ce.stackIterator)
+ ce.stackIterator.clear()
+ ce.callNativeFunc(ctx, m, f)
+ fnl.After(ctx, m, def, ce.peekValues(typ.ResultNumInUint64))
+ return ctx
+}
+
+// popMemoryOffset takes a memory offset off the stack for use in load and store instructions.
+// As the top of stack value is 64-bit, this ensures it is in range before returning it.
+func (ce *callEngine) popMemoryOffset(op *unionOperation) uint32 {
+ offset := op.U2 + ce.popValue()
+ if offset > math.MaxUint32 {
+ panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
+ }
+ return uint32(offset)
+}
+
+func (ce *callEngine) callGoFuncWithStack(ctx context.Context, m *wasm.ModuleInstance, f *function) {
+ typ := f.funcType
+ paramLen := typ.ParamNumInUint64
+ resultLen := typ.ResultNumInUint64
+ stackLen := paramLen
+
+ // In the interpreter engine, ce.stack may only have capacity to store
+ // parameters. Grow when there are more results than parameters.
+ if growLen := resultLen - paramLen; growLen > 0 {
+ for i := 0; i < growLen; i++ {
+ ce.stack = append(ce.stack, 0)
+ }
+ stackLen += growLen
+ }
+
+ // Pass the stack elements to the go function.
+ stack := ce.stack[len(ce.stack)-stackLen:]
+ ce.callGoFunc(ctx, m, f, stack)
+
+ // Shrink the stack when there were more parameters than results.
+ if shrinkLen := paramLen - resultLen; shrinkLen > 0 {
+ ce.stack = ce.stack[0 : len(ce.stack)-shrinkLen]
+ }
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/operations.go b/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/operations.go
new file mode 100644
index 000000000..3087a718f
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/operations.go
@@ -0,0 +1,2812 @@
+package interpreter
+
+import (
+ "fmt"
+ "math"
+ "strings"
+)
+
+// unsignedInt represents unsigned 32-bit or 64-bit integers.
+type unsignedInt byte
+
+const (
+ unsignedInt32 unsignedInt = iota
+ unsignedInt64
+)
+
+// String implements fmt.Stringer.
+func (s unsignedInt) String() (ret string) {
+ switch s {
+ case unsignedInt32:
+ ret = "i32"
+ case unsignedInt64:
+ ret = "i64"
+ }
+ return
+}
+
+// signedInt represents signed or unsigned integers.
+type signedInt byte
+
+const (
+ signedInt32 signedInt = iota
+ signedInt64
+ signedUint32
+ signedUint64
+)
+
+// String implements fmt.Stringer.
+func (s signedInt) String() (ret string) {
+ switch s {
+ case signedUint32:
+ ret = "u32"
+ case signedUint64:
+ ret = "u64"
+ case signedInt32:
+ ret = "s32"
+ case signedInt64:
+ ret = "s64"
+ }
+ return
+}
+
+// float represents the scalar double or single precision floating points.
+type float byte
+
+const (
+ f32 float = iota
+ f64
+)
+
+// String implements fmt.Stringer.
+func (s float) String() (ret string) {
+ switch s {
+ case f32:
+ ret = "f32"
+ case f64:
+ ret = "f64"
+ }
+ return
+}
+
+// unsignedType is the union of unsignedInt, float and V128 vector type.
+type unsignedType byte
+
+const (
+ unsignedTypeI32 unsignedType = iota
+ unsignedTypeI64
+ unsignedTypeF32
+ unsignedTypeF64
+ unsignedTypeV128
+ unsignedTypeUnknown
+)
+
+// String implements fmt.Stringer.
+func (s unsignedType) String() (ret string) {
+ switch s {
+ case unsignedTypeI32:
+ ret = "i32"
+ case unsignedTypeI64:
+ ret = "i64"
+ case unsignedTypeF32:
+ ret = "f32"
+ case unsignedTypeF64:
+ ret = "f64"
+ case unsignedTypeV128:
+ ret = "v128"
+ case unsignedTypeUnknown:
+ ret = "unknown"
+ }
+ return
+}
+
+// signedType is the union of signedInt and float types.
+type signedType byte
+
+const (
+ signedTypeInt32 signedType = iota
+ signedTypeUint32
+ signedTypeInt64
+ signedTypeUint64
+ signedTypeFloat32
+ signedTypeFloat64
+)
+
+// String implements fmt.Stringer.
+func (s signedType) String() (ret string) {
+ switch s {
+ case signedTypeInt32:
+ ret = "s32"
+ case signedTypeUint32:
+ ret = "u32"
+ case signedTypeInt64:
+ ret = "s64"
+ case signedTypeUint64:
+ ret = "u64"
+ case signedTypeFloat32:
+ ret = "f32"
+ case signedTypeFloat64:
+ ret = "f64"
+ }
+ return
+}
+
+// operationKind is the Kind of each implementation of Operation interface.
+type operationKind uint16
+
+// String implements fmt.Stringer.
+func (o operationKind) String() (ret string) {
+ switch o {
+ case operationKindUnreachable:
+ ret = "Unreachable"
+ case operationKindLabel:
+ ret = "label"
+ case operationKindBr:
+ ret = "Br"
+ case operationKindBrIf:
+ ret = "BrIf"
+ case operationKindBrTable:
+ ret = "BrTable"
+ case operationKindCall:
+ ret = "Call"
+ case operationKindCallIndirect:
+ ret = "CallIndirect"
+ case operationKindDrop:
+ ret = "Drop"
+ case operationKindSelect:
+ ret = "Select"
+ case operationKindPick:
+ ret = "Pick"
+ case operationKindSet:
+ ret = "Swap"
+ case operationKindGlobalGet:
+ ret = "GlobalGet"
+ case operationKindGlobalSet:
+ ret = "GlobalSet"
+ case operationKindLoad:
+ ret = "Load"
+ case operationKindLoad8:
+ ret = "Load8"
+ case operationKindLoad16:
+ ret = "Load16"
+ case operationKindLoad32:
+ ret = "Load32"
+ case operationKindStore:
+ ret = "Store"
+ case operationKindStore8:
+ ret = "Store8"
+ case operationKindStore16:
+ ret = "Store16"
+ case operationKindStore32:
+ ret = "Store32"
+ case operationKindMemorySize:
+ ret = "MemorySize"
+ case operationKindMemoryGrow:
+ ret = "MemoryGrow"
+ case operationKindConstI32:
+ ret = "ConstI32"
+ case operationKindConstI64:
+ ret = "ConstI64"
+ case operationKindConstF32:
+ ret = "ConstF32"
+ case operationKindConstF64:
+ ret = "ConstF64"
+ case operationKindEq:
+ ret = "Eq"
+ case operationKindNe:
+ ret = "Ne"
+ case operationKindEqz:
+ ret = "Eqz"
+ case operationKindLt:
+ ret = "Lt"
+ case operationKindGt:
+ ret = "Gt"
+ case operationKindLe:
+ ret = "Le"
+ case operationKindGe:
+ ret = "Ge"
+ case operationKindAdd:
+ ret = "Add"
+ case operationKindSub:
+ ret = "Sub"
+ case operationKindMul:
+ ret = "Mul"
+ case operationKindClz:
+ ret = "Clz"
+ case operationKindCtz:
+ ret = "Ctz"
+ case operationKindPopcnt:
+ ret = "Popcnt"
+ case operationKindDiv:
+ ret = "Div"
+ case operationKindRem:
+ ret = "Rem"
+ case operationKindAnd:
+ ret = "And"
+ case operationKindOr:
+ ret = "Or"
+ case operationKindXor:
+ ret = "Xor"
+ case operationKindShl:
+ ret = "Shl"
+ case operationKindShr:
+ ret = "Shr"
+ case operationKindRotl:
+ ret = "Rotl"
+ case operationKindRotr:
+ ret = "Rotr"
+ case operationKindAbs:
+ ret = "Abs"
+ case operationKindNeg:
+ ret = "Neg"
+ case operationKindCeil:
+ ret = "Ceil"
+ case operationKindFloor:
+ ret = "Floor"
+ case operationKindTrunc:
+ ret = "Trunc"
+ case operationKindNearest:
+ ret = "Nearest"
+ case operationKindSqrt:
+ ret = "Sqrt"
+ case operationKindMin:
+ ret = "Min"
+ case operationKindMax:
+ ret = "Max"
+ case operationKindCopysign:
+ ret = "Copysign"
+ case operationKindI32WrapFromI64:
+ ret = "I32WrapFromI64"
+ case operationKindITruncFromF:
+ ret = "ITruncFromF"
+ case operationKindFConvertFromI:
+ ret = "FConvertFromI"
+ case operationKindF32DemoteFromF64:
+ ret = "F32DemoteFromF64"
+ case operationKindF64PromoteFromF32:
+ ret = "F64PromoteFromF32"
+ case operationKindI32ReinterpretFromF32:
+ ret = "I32ReinterpretFromF32"
+ case operationKindI64ReinterpretFromF64:
+ ret = "I64ReinterpretFromF64"
+ case operationKindF32ReinterpretFromI32:
+ ret = "F32ReinterpretFromI32"
+ case operationKindF64ReinterpretFromI64:
+ ret = "F64ReinterpretFromI64"
+ case operationKindExtend:
+ ret = "Extend"
+ case operationKindMemoryInit:
+ ret = "MemoryInit"
+ case operationKindDataDrop:
+ ret = "DataDrop"
+ case operationKindMemoryCopy:
+ ret = "MemoryCopy"
+ case operationKindMemoryFill:
+ ret = "MemoryFill"
+ case operationKindTableInit:
+ ret = "TableInit"
+ case operationKindElemDrop:
+ ret = "ElemDrop"
+ case operationKindTableCopy:
+ ret = "TableCopy"
+ case operationKindRefFunc:
+ ret = "RefFunc"
+ case operationKindTableGet:
+ ret = "TableGet"
+ case operationKindTableSet:
+ ret = "TableSet"
+ case operationKindTableSize:
+ ret = "TableSize"
+ case operationKindTableGrow:
+ ret = "TableGrow"
+ case operationKindTableFill:
+ ret = "TableFill"
+ case operationKindV128Const:
+ ret = "ConstV128"
+ case operationKindV128Add:
+ ret = "V128Add"
+ case operationKindV128Sub:
+ ret = "V128Sub"
+ case operationKindV128Load:
+ ret = "V128Load"
+ case operationKindV128LoadLane:
+ ret = "V128LoadLane"
+ case operationKindV128Store:
+ ret = "V128Store"
+ case operationKindV128StoreLane:
+ ret = "V128StoreLane"
+ case operationKindV128ExtractLane:
+ ret = "V128ExtractLane"
+ case operationKindV128ReplaceLane:
+ ret = "V128ReplaceLane"
+ case operationKindV128Splat:
+ ret = "V128Splat"
+ case operationKindV128Shuffle:
+ ret = "V128Shuffle"
+ case operationKindV128Swizzle:
+ ret = "V128Swizzle"
+ case operationKindV128AnyTrue:
+ ret = "V128AnyTrue"
+ case operationKindV128AllTrue:
+ ret = "V128AllTrue"
+ case operationKindV128And:
+ ret = "V128And"
+ case operationKindV128Not:
+ ret = "V128Not"
+ case operationKindV128Or:
+ ret = "V128Or"
+ case operationKindV128Xor:
+ ret = "V128Xor"
+ case operationKindV128Bitselect:
+ ret = "V128Bitselect"
+ case operationKindV128AndNot:
+ ret = "V128AndNot"
+ case operationKindV128BitMask:
+ ret = "V128BitMask"
+ case operationKindV128Shl:
+ ret = "V128Shl"
+ case operationKindV128Shr:
+ ret = "V128Shr"
+ case operationKindV128Cmp:
+ ret = "V128Cmp"
+ case operationKindSignExtend32From8:
+ ret = "SignExtend32From8"
+ case operationKindSignExtend32From16:
+ ret = "SignExtend32From16"
+ case operationKindSignExtend64From8:
+ ret = "SignExtend64From8"
+ case operationKindSignExtend64From16:
+ ret = "SignExtend64From16"
+ case operationKindSignExtend64From32:
+ ret = "SignExtend64From32"
+ case operationKindV128AddSat:
+ ret = "V128AddSat"
+ case operationKindV128SubSat:
+ ret = "V128SubSat"
+ case operationKindV128Mul:
+ ret = "V128Mul"
+ case operationKindV128Div:
+ ret = "V128Div"
+ case operationKindV128Neg:
+ ret = "V128Neg"
+ case operationKindV128Sqrt:
+ ret = "V128Sqrt"
+ case operationKindV128Abs:
+ ret = "V128Abs"
+ case operationKindV128Popcnt:
+ ret = "V128Popcnt"
+ case operationKindV128Min:
+ ret = "V128Min"
+ case operationKindV128Max:
+ ret = "V128Max"
+ case operationKindV128AvgrU:
+ ret = "V128AvgrU"
+ case operationKindV128Ceil:
+ ret = "V128Ceil"
+ case operationKindV128Floor:
+ ret = "V128Floor"
+ case operationKindV128Trunc:
+ ret = "V128Trunc"
+ case operationKindV128Nearest:
+ ret = "V128Nearest"
+ case operationKindV128Pmin:
+ ret = "V128Pmin"
+ case operationKindV128Pmax:
+ ret = "V128Pmax"
+ case operationKindV128Extend:
+ ret = "V128Extend"
+ case operationKindV128ExtMul:
+ ret = "V128ExtMul"
+ case operationKindV128Q15mulrSatS:
+ ret = "V128Q15mulrSatS"
+ case operationKindV128ExtAddPairwise:
+ ret = "V128ExtAddPairwise"
+ case operationKindV128FloatPromote:
+ ret = "V128FloatPromote"
+ case operationKindV128FloatDemote:
+ ret = "V128FloatDemote"
+ case operationKindV128FConvertFromI:
+ ret = "V128FConvertFromI"
+ case operationKindV128Dot:
+ ret = "V128Dot"
+ case operationKindV128Narrow:
+ ret = "V128Narrow"
+ case operationKindV128ITruncSatFromF:
+ ret = "V128ITruncSatFromF"
+ case operationKindBuiltinFunctionCheckExitCode:
+ ret = "BuiltinFunctionCheckExitCode"
+ case operationKindAtomicMemoryWait:
+ ret = "operationKindAtomicMemoryWait"
+ case operationKindAtomicMemoryNotify:
+ ret = "operationKindAtomicMemoryNotify"
+ case operationKindAtomicFence:
+ ret = "operationKindAtomicFence"
+ case operationKindAtomicLoad:
+ ret = "operationKindAtomicLoad"
+ case operationKindAtomicLoad8:
+ ret = "operationKindAtomicLoad8"
+ case operationKindAtomicLoad16:
+ ret = "operationKindAtomicLoad16"
+ case operationKindAtomicStore:
+ ret = "operationKindAtomicStore"
+ case operationKindAtomicStore8:
+ ret = "operationKindAtomicStore8"
+ case operationKindAtomicStore16:
+ ret = "operationKindAtomicStore16"
+ case operationKindAtomicRMW:
+ ret = "operationKindAtomicRMW"
+ case operationKindAtomicRMW8:
+ ret = "operationKindAtomicRMW8"
+ case operationKindAtomicRMW16:
+ ret = "operationKindAtomicRMW16"
+ case operationKindAtomicRMWCmpxchg:
+ ret = "operationKindAtomicRMWCmpxchg"
+ case operationKindAtomicRMW8Cmpxchg:
+ ret = "operationKindAtomicRMW8Cmpxchg"
+ case operationKindAtomicRMW16Cmpxchg:
+ ret = "operationKindAtomicRMW16Cmpxchg"
+ default:
+ panic(fmt.Errorf("unknown operation %d", o))
+ }
+ return
+}
+
+const (
+ // operationKindUnreachable is the Kind for NewOperationUnreachable.
+ operationKindUnreachable operationKind = iota
+ // operationKindLabel is the Kind for NewOperationLabel.
+ operationKindLabel
+ // operationKindBr is the Kind for NewOperationBr.
+ operationKindBr
+ // operationKindBrIf is the Kind for NewOperationBrIf.
+ operationKindBrIf
+ // operationKindBrTable is the Kind for NewOperationBrTable.
+ operationKindBrTable
+ // operationKindCall is the Kind for NewOperationCall.
+ operationKindCall
+ // operationKindCallIndirect is the Kind for NewOperationCallIndirect.
+ operationKindCallIndirect
+ // operationKindDrop is the Kind for NewOperationDrop.
+ operationKindDrop
+ // operationKindSelect is the Kind for NewOperationSelect.
+ operationKindSelect
+ // operationKindPick is the Kind for NewOperationPick.
+ operationKindPick
+ // operationKindSet is the Kind for NewOperationSet.
+ operationKindSet
+ // operationKindGlobalGet is the Kind for NewOperationGlobalGet.
+ operationKindGlobalGet
+ // operationKindGlobalSet is the Kind for NewOperationGlobalSet.
+ operationKindGlobalSet
+ // operationKindLoad is the Kind for NewOperationLoad.
+ operationKindLoad
+ // operationKindLoad8 is the Kind for NewOperationLoad8.
+ operationKindLoad8
+ // operationKindLoad16 is the Kind for NewOperationLoad16.
+ operationKindLoad16
+ // operationKindLoad32 is the Kind for NewOperationLoad32.
+ operationKindLoad32
+ // operationKindStore is the Kind for NewOperationStore.
+ operationKindStore
+ // operationKindStore8 is the Kind for NewOperationStore8.
+ operationKindStore8
+ // operationKindStore16 is the Kind for NewOperationStore16.
+ operationKindStore16
+ // operationKindStore32 is the Kind for NewOperationStore32.
+ operationKindStore32
+ // operationKindMemorySize is the Kind for NewOperationMemorySize.
+ operationKindMemorySize
+ // operationKindMemoryGrow is the Kind for NewOperationMemoryGrow.
+ operationKindMemoryGrow
+ // operationKindConstI32 is the Kind for NewOperationConstI32.
+ operationKindConstI32
+ // operationKindConstI64 is the Kind for NewOperationConstI64.
+ operationKindConstI64
+ // operationKindConstF32 is the Kind for NewOperationConstF32.
+ operationKindConstF32
+ // operationKindConstF64 is the Kind for NewOperationConstF64.
+ operationKindConstF64
+ // operationKindEq is the Kind for NewOperationEq.
+ operationKindEq
+ // operationKindNe is the Kind for NewOperationNe.
+ operationKindNe
+ // operationKindEqz is the Kind for NewOperationEqz.
+ operationKindEqz
+ // operationKindLt is the Kind for NewOperationLt.
+ operationKindLt
+ // operationKindGt is the Kind for NewOperationGt.
+ operationKindGt
+ // operationKindLe is the Kind for NewOperationLe.
+ operationKindLe
+ // operationKindGe is the Kind for NewOperationGe.
+ operationKindGe
+ // operationKindAdd is the Kind for NewOperationAdd.
+ operationKindAdd
+ // operationKindSub is the Kind for NewOperationSub.
+ operationKindSub
+ // operationKindMul is the Kind for NewOperationMul.
+ operationKindMul
+ // operationKindClz is the Kind for NewOperationClz.
+ operationKindClz
+ // operationKindCtz is the Kind for NewOperationCtz.
+ operationKindCtz
+ // operationKindPopcnt is the Kind for NewOperationPopcnt.
+ operationKindPopcnt
+ // operationKindDiv is the Kind for NewOperationDiv.
+ operationKindDiv
+ // operationKindRem is the Kind for NewOperationRem.
+ operationKindRem
+ // operationKindAnd is the Kind for NewOperationAnd.
+ operationKindAnd
+ // operationKindOr is the Kind for NewOperationOr.
+ operationKindOr
+ // operationKindXor is the Kind for NewOperationXor.
+ operationKindXor
+ // operationKindShl is the Kind for NewOperationShl.
+ operationKindShl
+ // operationKindShr is the Kind for NewOperationShr.
+ operationKindShr
+ // operationKindRotl is the Kind for NewOperationRotl.
+ operationKindRotl
+ // operationKindRotr is the Kind for NewOperationRotr.
+ operationKindRotr
+ // operationKindAbs is the Kind for NewOperationAbs.
+ operationKindAbs
+ // operationKindNeg is the Kind for NewOperationNeg.
+ operationKindNeg
+ // operationKindCeil is the Kind for NewOperationCeil.
+ operationKindCeil
+ // operationKindFloor is the Kind for NewOperationFloor.
+ operationKindFloor
+ // operationKindTrunc is the Kind for NewOperationTrunc.
+ operationKindTrunc
+ // operationKindNearest is the Kind for NewOperationNearest.
+ operationKindNearest
+ // operationKindSqrt is the Kind for NewOperationSqrt.
+ operationKindSqrt
+ // operationKindMin is the Kind for NewOperationMin.
+ operationKindMin
+ // operationKindMax is the Kind for NewOperationMax.
+ operationKindMax
+ // operationKindCopysign is the Kind for NewOperationCopysign.
+ operationKindCopysign
+ // operationKindI32WrapFromI64 is the Kind for NewOperationI32WrapFromI64.
+ operationKindI32WrapFromI64
+ // operationKindITruncFromF is the Kind for NewOperationITruncFromF.
+ operationKindITruncFromF
+ // operationKindFConvertFromI is the Kind for NewOperationFConvertFromI.
+ operationKindFConvertFromI
+ // operationKindF32DemoteFromF64 is the Kind for NewOperationF32DemoteFromF64.
+ operationKindF32DemoteFromF64
+ // operationKindF64PromoteFromF32 is the Kind for NewOperationF64PromoteFromF32.
+ operationKindF64PromoteFromF32
+ // operationKindI32ReinterpretFromF32 is the Kind for NewOperationI32ReinterpretFromF32.
+ operationKindI32ReinterpretFromF32
+ // operationKindI64ReinterpretFromF64 is the Kind for NewOperationI64ReinterpretFromF64.
+ operationKindI64ReinterpretFromF64
+ // operationKindF32ReinterpretFromI32 is the Kind for NewOperationF32ReinterpretFromI32.
+ operationKindF32ReinterpretFromI32
+ // operationKindF64ReinterpretFromI64 is the Kind for NewOperationF64ReinterpretFromI64.
+ operationKindF64ReinterpretFromI64
+ // operationKindExtend is the Kind for NewOperationExtend.
+ operationKindExtend
+ // operationKindSignExtend32From8 is the Kind for NewOperationSignExtend32From8.
+ operationKindSignExtend32From8
+ // operationKindSignExtend32From16 is the Kind for NewOperationSignExtend32From16.
+ operationKindSignExtend32From16
+ // operationKindSignExtend64From8 is the Kind for NewOperationSignExtend64From8.
+ operationKindSignExtend64From8
+ // operationKindSignExtend64From16 is the Kind for NewOperationSignExtend64From16.
+ operationKindSignExtend64From16
+ // operationKindSignExtend64From32 is the Kind for NewOperationSignExtend64From32.
+ operationKindSignExtend64From32
+ // operationKindMemoryInit is the Kind for NewOperationMemoryInit.
+ operationKindMemoryInit
+ // operationKindDataDrop is the Kind for NewOperationDataDrop.
+ operationKindDataDrop
+ // operationKindMemoryCopy is the Kind for NewOperationMemoryCopy.
+ operationKindMemoryCopy
+ // operationKindMemoryFill is the Kind for NewOperationMemoryFill.
+ operationKindMemoryFill
+ // operationKindTableInit is the Kind for NewOperationTableInit.
+ operationKindTableInit
+ // operationKindElemDrop is the Kind for NewOperationElemDrop.
+ operationKindElemDrop
+ // operationKindTableCopy is the Kind for NewOperationTableCopy.
+ operationKindTableCopy
+ // operationKindRefFunc is the Kind for NewOperationRefFunc.
+ operationKindRefFunc
+ // operationKindTableGet is the Kind for NewOperationTableGet.
+ operationKindTableGet
+ // operationKindTableSet is the Kind for NewOperationTableSet.
+ operationKindTableSet
+ // operationKindTableSize is the Kind for NewOperationTableSize.
+ operationKindTableSize
+ // operationKindTableGrow is the Kind for NewOperationTableGrow.
+ operationKindTableGrow
+ // operationKindTableFill is the Kind for NewOperationTableFill.
+ operationKindTableFill
+
+ // Vector value related instructions are prefixed by V128.
+
+ // operationKindV128Const is the Kind for NewOperationV128Const.
+ operationKindV128Const
+ // operationKindV128Add is the Kind for NewOperationV128Add.
+ operationKindV128Add
+ // operationKindV128Sub is the Kind for NewOperationV128Sub.
+ operationKindV128Sub
+ // operationKindV128Load is the Kind for NewOperationV128Load.
+ operationKindV128Load
+ // operationKindV128LoadLane is the Kind for NewOperationV128LoadLane.
+ operationKindV128LoadLane
+ // operationKindV128Store is the Kind for NewOperationV128Store.
+ operationKindV128Store
+ // operationKindV128StoreLane is the Kind for NewOperationV128StoreLane.
+ operationKindV128StoreLane
+ // operationKindV128ExtractLane is the Kind for NewOperationV128ExtractLane.
+ operationKindV128ExtractLane
+ // operationKindV128ReplaceLane is the Kind for NewOperationV128ReplaceLane.
+ operationKindV128ReplaceLane
+ // operationKindV128Splat is the Kind for NewOperationV128Splat.
+ operationKindV128Splat
+ // operationKindV128Shuffle is the Kind for NewOperationV128Shuffle.
+ operationKindV128Shuffle
+ // operationKindV128Swizzle is the Kind for NewOperationV128Swizzle.
+ operationKindV128Swizzle
+ // operationKindV128AnyTrue is the Kind for NewOperationV128AnyTrue.
+ operationKindV128AnyTrue
+ // operationKindV128AllTrue is the Kind for NewOperationV128AllTrue.
+ operationKindV128AllTrue
+ // operationKindV128BitMask is the Kind for NewOperationV128BitMask.
+ operationKindV128BitMask
+ // operationKindV128And is the Kind for NewOperationV128And.
+ operationKindV128And
+ // operationKindV128Not is the Kind for NewOperationV128Not.
+ operationKindV128Not
+ // operationKindV128Or is the Kind for NewOperationV128Or.
+ operationKindV128Or
+ // operationKindV128Xor is the Kind for NewOperationV128Xor.
+ operationKindV128Xor
+ // operationKindV128Bitselect is the Kind for NewOperationV128Bitselect.
+ operationKindV128Bitselect
+ // operationKindV128AndNot is the Kind for NewOperationV128AndNot.
+ operationKindV128AndNot
+ // operationKindV128Shl is the Kind for NewOperationV128Shl.
+ operationKindV128Shl
+ // operationKindV128Shr is the Kind for NewOperationV128Shr.
+ operationKindV128Shr
+ // operationKindV128Cmp is the Kind for NewOperationV128Cmp.
+ operationKindV128Cmp
+ // operationKindV128AddSat is the Kind for NewOperationV128AddSat.
+ operationKindV128AddSat
+ // operationKindV128SubSat is the Kind for NewOperationV128SubSat.
+ operationKindV128SubSat
+ // operationKindV128Mul is the Kind for NewOperationV128Mul.
+ operationKindV128Mul
+ // operationKindV128Div is the Kind for NewOperationV128Div.
+ operationKindV128Div
+ // operationKindV128Neg is the Kind for NewOperationV128Neg.
+ operationKindV128Neg
+ // operationKindV128Sqrt is the Kind for NewOperationV128Sqrt.
+ operationKindV128Sqrt
+ // operationKindV128Abs is the Kind for NewOperationV128Abs.
+ operationKindV128Abs
+ // operationKindV128Popcnt is the Kind for NewOperationV128Popcnt.
+ operationKindV128Popcnt
+ // operationKindV128Min is the Kind for NewOperationV128Min.
+ operationKindV128Min
+ // operationKindV128Max is the Kind for NewOperationV128Max.
+ operationKindV128Max
+ // operationKindV128AvgrU is the Kind for NewOperationV128AvgrU.
+ operationKindV128AvgrU
+ // operationKindV128Pmin is the Kind for NewOperationV128Pmin.
+ operationKindV128Pmin
+ // operationKindV128Pmax is the Kind for NewOperationV128Pmax.
+ operationKindV128Pmax
+ // operationKindV128Ceil is the Kind for NewOperationV128Ceil.
+ operationKindV128Ceil
+ // operationKindV128Floor is the Kind for NewOperationV128Floor.
+ operationKindV128Floor
+ // operationKindV128Trunc is the Kind for NewOperationV128Trunc.
+ operationKindV128Trunc
+ // operationKindV128Nearest is the Kind for NewOperationV128Nearest.
+ operationKindV128Nearest
+ // operationKindV128Extend is the Kind for NewOperationV128Extend.
+ operationKindV128Extend
+ // operationKindV128ExtMul is the Kind for NewOperationV128ExtMul.
+ operationKindV128ExtMul
+ // operationKindV128Q15mulrSatS is the Kind for NewOperationV128Q15mulrSatS.
+ operationKindV128Q15mulrSatS
+ // operationKindV128ExtAddPairwise is the Kind for NewOperationV128ExtAddPairwise.
+ operationKindV128ExtAddPairwise
+ // operationKindV128FloatPromote is the Kind for NewOperationV128FloatPromote.
+ operationKindV128FloatPromote
+ // operationKindV128FloatDemote is the Kind for NewOperationV128FloatDemote.
+ operationKindV128FloatDemote
+ // operationKindV128FConvertFromI is the Kind for NewOperationV128FConvertFromI.
+ operationKindV128FConvertFromI
+ // operationKindV128Dot is the Kind for NewOperationV128Dot.
+ operationKindV128Dot
+ // operationKindV128Narrow is the Kind for NewOperationV128Narrow.
+ operationKindV128Narrow
+ // operationKindV128ITruncSatFromF is the Kind for NewOperationV128ITruncSatFromF.
+ operationKindV128ITruncSatFromF
+
+ // operationKindBuiltinFunctionCheckExitCode is the Kind for NewOperationBuiltinFunctionCheckExitCode.
+ operationKindBuiltinFunctionCheckExitCode
+
+ // operationKindAtomicMemoryWait is the kind for NewOperationAtomicMemoryWait.
+ operationKindAtomicMemoryWait
+ // operationKindAtomicMemoryNotify is the kind for NewOperationAtomicMemoryNotify.
+ operationKindAtomicMemoryNotify
+ // operationKindAtomicFence is the kind for NewOperationAtomicFence.
+ operationKindAtomicFence
+ // operationKindAtomicLoad is the kind for NewOperationAtomicLoad.
+ operationKindAtomicLoad
+ // operationKindAtomicLoad8 is the kind for NewOperationAtomicLoad8.
+ operationKindAtomicLoad8
+ // operationKindAtomicLoad16 is the kind for NewOperationAtomicLoad16.
+ operationKindAtomicLoad16
+ // operationKindAtomicStore is the kind for NewOperationAtomicStore.
+ operationKindAtomicStore
+ // operationKindAtomicStore8 is the kind for NewOperationAtomicStore8.
+ operationKindAtomicStore8
+ // operationKindAtomicStore16 is the kind for NewOperationAtomicStore16.
+ operationKindAtomicStore16
+
+ // operationKindAtomicRMW is the kind for NewOperationAtomicRMW.
+ operationKindAtomicRMW
+ // operationKindAtomicRMW8 is the kind for NewOperationAtomicRMW8.
+ operationKindAtomicRMW8
+ // operationKindAtomicRMW16 is the kind for NewOperationAtomicRMW16.
+ operationKindAtomicRMW16
+
+ // operationKindAtomicRMWCmpxchg is the kind for NewOperationAtomicRMWCmpxchg.
+ operationKindAtomicRMWCmpxchg
+ // operationKindAtomicRMW8Cmpxchg is the kind for NewOperationAtomicRMW8Cmpxchg.
+ operationKindAtomicRMW8Cmpxchg
+ // operationKindAtomicRMW16Cmpxchg is the kind for NewOperationAtomicRMW16Cmpxchg.
+ operationKindAtomicRMW16Cmpxchg
+
+ // operationKindEnd is always placed at the bottom of this iota definition to be used in the test.
+ operationKindEnd
+)
+
+// NewOperationBuiltinFunctionCheckExitCode is a constructor for unionOperation with Kind operationKindBuiltinFunctionCheckExitCode.
+//
+// OperationBuiltinFunctionCheckExitCode corresponds to the instruction to check the api.Module is already closed due to
+// context.DeadlineExceeded, context.Canceled, or the explicit call of CloseWithExitCode on api.Module.
+func newOperationBuiltinFunctionCheckExitCode() unionOperation {
+ return unionOperation{Kind: operationKindBuiltinFunctionCheckExitCode}
+}
+
+// label is the unique identifier for each block in a single function in interpreterir
+// where "block" consists of multiple operations, and must End with branching operations
+// (e.g. operationKindBr or operationKindBrIf).
+type label uint64
+
+// Kind returns the labelKind encoded in this label.
+func (l label) Kind() labelKind {
+ return labelKind(uint32(l))
+}
+
+// FrameID returns the frame id encoded in this label.
+func (l label) FrameID() int {
+ return int(uint32(l >> 32))
+}
+
+// NewLabel is a constructor for a label.
+func newLabel(kind labelKind, frameID uint32) label {
+ return label(kind) | label(frameID)<<32
+}
+
+// String implements fmt.Stringer.
+func (l label) String() (ret string) {
+ frameID := l.FrameID()
+ switch l.Kind() {
+ case labelKindHeader:
+ ret = fmt.Sprintf(".L%d", frameID)
+ case labelKindElse:
+ ret = fmt.Sprintf(".L%d_else", frameID)
+ case labelKindContinuation:
+ ret = fmt.Sprintf(".L%d_cont", frameID)
+ case labelKindReturn:
+ return ".return"
+ }
+ return
+}
+
+func (l label) IsReturnTarget() bool {
+ return l.Kind() == labelKindReturn
+}
+
+// labelKind is the Kind of the label.
+type labelKind = byte
+
+const (
+ // labelKindHeader is the header for various blocks. For example, the "then" block of
+ // wasm.OpcodeIfName in Wasm has the label of this Kind.
+ labelKindHeader labelKind = iota
+ // labelKindElse is the Kind of label for "else" block of wasm.OpcodeIfName in Wasm.
+ labelKindElse
+ // labelKindContinuation is the Kind of label which is the continuation of blocks.
+ // For example, for wasm text like
+ // (func
+ // ....
+ // (if (local.get 0) (then (nop)) (else (nop)))
+ // return
+ // )
+ // we have the continuation block (of if-block) corresponding to "return" opcode.
+ labelKindContinuation
+ labelKindReturn
+ labelKindNum
+)
+
+// unionOperation implements Operation and is the compilation (engine.lowerIR) result of a interpreterir.Operation.
+//
+// Not all operations result in a unionOperation, e.g. interpreterir.OperationI32ReinterpretFromF32, and some operations are
+// more complex than others, e.g. interpreterir.NewOperationBrTable.
+//
+// Note: This is a form of union type as it can store fields needed for any operation. Hence, most fields are opaque and
+// only relevant when in context of its kind.
+type unionOperation struct {
+ // Kind determines how to interpret the other fields in this struct.
+ Kind operationKind
+ B1, B2 byte
+ B3 bool
+ U1, U2 uint64
+ U3 uint64
+ Us []uint64
+}
+
+// String implements fmt.Stringer.
+func (o unionOperation) String() string {
+ switch o.Kind {
+ case operationKindUnreachable,
+ operationKindSelect,
+ operationKindMemorySize,
+ operationKindMemoryGrow,
+ operationKindI32WrapFromI64,
+ operationKindF32DemoteFromF64,
+ operationKindF64PromoteFromF32,
+ operationKindI32ReinterpretFromF32,
+ operationKindI64ReinterpretFromF64,
+ operationKindF32ReinterpretFromI32,
+ operationKindF64ReinterpretFromI64,
+ operationKindSignExtend32From8,
+ operationKindSignExtend32From16,
+ operationKindSignExtend64From8,
+ operationKindSignExtend64From16,
+ operationKindSignExtend64From32,
+ operationKindMemoryInit,
+ operationKindDataDrop,
+ operationKindMemoryCopy,
+ operationKindMemoryFill,
+ operationKindTableInit,
+ operationKindElemDrop,
+ operationKindTableCopy,
+ operationKindRefFunc,
+ operationKindTableGet,
+ operationKindTableSet,
+ operationKindTableSize,
+ operationKindTableGrow,
+ operationKindTableFill,
+ operationKindBuiltinFunctionCheckExitCode:
+ return o.Kind.String()
+
+ case operationKindCall,
+ operationKindGlobalGet,
+ operationKindGlobalSet:
+ return fmt.Sprintf("%s %d", o.Kind, o.B1)
+
+ case operationKindLabel:
+ return label(o.U1).String()
+
+ case operationKindBr:
+ return fmt.Sprintf("%s %s", o.Kind, label(o.U1).String())
+
+ case operationKindBrIf:
+ thenTarget := label(o.U1)
+ elseTarget := label(o.U2)
+ return fmt.Sprintf("%s %s, %s", o.Kind, thenTarget, elseTarget)
+
+ case operationKindBrTable:
+ var targets []string
+ var defaultLabel label
+ if len(o.Us) > 0 {
+ targets = make([]string, len(o.Us)-1)
+ for i, t := range o.Us[1:] {
+ targets[i] = label(t).String()
+ }
+ defaultLabel = label(o.Us[0])
+ }
+ return fmt.Sprintf("%s [%s] %s", o.Kind, strings.Join(targets, ","), defaultLabel)
+
+ case operationKindCallIndirect:
+ return fmt.Sprintf("%s: type=%d, table=%d", o.Kind, o.U1, o.U2)
+
+ case operationKindDrop:
+ start := int64(o.U1)
+ end := int64(o.U2)
+ return fmt.Sprintf("%s %d..%d", o.Kind, start, end)
+
+ case operationKindPick, operationKindSet:
+ return fmt.Sprintf("%s %d (is_vector=%v)", o.Kind, o.U1, o.B3)
+
+ case operationKindLoad, operationKindStore:
+ return fmt.Sprintf("%s.%s (align=%d, offset=%d)", unsignedType(o.B1), o.Kind, o.U1, o.U2)
+
+ case operationKindLoad8,
+ operationKindLoad16:
+ return fmt.Sprintf("%s.%s (align=%d, offset=%d)", signedType(o.B1), o.Kind, o.U1, o.U2)
+
+ case operationKindStore8,
+ operationKindStore16,
+ operationKindStore32:
+ return fmt.Sprintf("%s (align=%d, offset=%d)", o.Kind, o.U1, o.U2)
+
+ case operationKindLoad32:
+ var t string
+ if o.B1 == 1 {
+ t = "i64"
+ } else {
+ t = "u64"
+ }
+ return fmt.Sprintf("%s.%s (align=%d, offset=%d)", t, o.Kind, o.U1, o.U2)
+
+ case operationKindEq,
+ operationKindNe,
+ operationKindAdd,
+ operationKindSub,
+ operationKindMul:
+ return fmt.Sprintf("%s.%s", unsignedType(o.B1), o.Kind)
+
+ case operationKindEqz,
+ operationKindClz,
+ operationKindCtz,
+ operationKindPopcnt,
+ operationKindAnd,
+ operationKindOr,
+ operationKindXor,
+ operationKindShl,
+ operationKindRotl,
+ operationKindRotr:
+ return fmt.Sprintf("%s.%s", unsignedInt(o.B1), o.Kind)
+
+ case operationKindRem, operationKindShr:
+ return fmt.Sprintf("%s.%s", signedInt(o.B1), o.Kind)
+
+ case operationKindLt,
+ operationKindGt,
+ operationKindLe,
+ operationKindGe,
+ operationKindDiv:
+ return fmt.Sprintf("%s.%s", signedType(o.B1), o.Kind)
+
+ case operationKindAbs,
+ operationKindNeg,
+ operationKindCeil,
+ operationKindFloor,
+ operationKindTrunc,
+ operationKindNearest,
+ operationKindSqrt,
+ operationKindMin,
+ operationKindMax,
+ operationKindCopysign:
+ return fmt.Sprintf("%s.%s", float(o.B1), o.Kind)
+
+ case operationKindConstI32,
+ operationKindConstI64:
+ return fmt.Sprintf("%s %#x", o.Kind, o.U1)
+
+ case operationKindConstF32:
+ return fmt.Sprintf("%s %f", o.Kind, math.Float32frombits(uint32(o.U1)))
+ case operationKindConstF64:
+ return fmt.Sprintf("%s %f", o.Kind, math.Float64frombits(o.U1))
+
+ case operationKindITruncFromF:
+ return fmt.Sprintf("%s.%s.%s (non_trapping=%v)", signedInt(o.B2), o.Kind, float(o.B1), o.B3)
+ case operationKindFConvertFromI:
+ return fmt.Sprintf("%s.%s.%s", float(o.B2), o.Kind, signedInt(o.B1))
+ case operationKindExtend:
+ var in, out string
+ if o.B3 {
+ in = "i32"
+ out = "i64"
+ } else {
+ in = "u32"
+ out = "u64"
+ }
+ return fmt.Sprintf("%s.%s.%s", out, o.Kind, in)
+
+ case operationKindV128Const:
+ return fmt.Sprintf("%s [%#x, %#x]", o.Kind, o.U1, o.U2)
+ case operationKindV128Add,
+ operationKindV128Sub:
+ return fmt.Sprintf("%s (shape=%s)", o.Kind, shapeName(o.B1))
+ case operationKindV128Load,
+ operationKindV128LoadLane,
+ operationKindV128Store,
+ operationKindV128StoreLane,
+ operationKindV128ExtractLane,
+ operationKindV128ReplaceLane,
+ operationKindV128Splat,
+ operationKindV128Shuffle,
+ operationKindV128Swizzle,
+ operationKindV128AnyTrue,
+ operationKindV128AllTrue,
+ operationKindV128BitMask,
+ operationKindV128And,
+ operationKindV128Not,
+ operationKindV128Or,
+ operationKindV128Xor,
+ operationKindV128Bitselect,
+ operationKindV128AndNot,
+ operationKindV128Shl,
+ operationKindV128Shr,
+ operationKindV128Cmp,
+ operationKindV128AddSat,
+ operationKindV128SubSat,
+ operationKindV128Mul,
+ operationKindV128Div,
+ operationKindV128Neg,
+ operationKindV128Sqrt,
+ operationKindV128Abs,
+ operationKindV128Popcnt,
+ operationKindV128Min,
+ operationKindV128Max,
+ operationKindV128AvgrU,
+ operationKindV128Pmin,
+ operationKindV128Pmax,
+ operationKindV128Ceil,
+ operationKindV128Floor,
+ operationKindV128Trunc,
+ operationKindV128Nearest,
+ operationKindV128Extend,
+ operationKindV128ExtMul,
+ operationKindV128Q15mulrSatS,
+ operationKindV128ExtAddPairwise,
+ operationKindV128FloatPromote,
+ operationKindV128FloatDemote,
+ operationKindV128FConvertFromI,
+ operationKindV128Dot,
+ operationKindV128Narrow:
+ return o.Kind.String()
+
+ case operationKindV128ITruncSatFromF:
+ if o.B3 {
+ return fmt.Sprintf("%s.%sS", o.Kind, shapeName(o.B1))
+ } else {
+ return fmt.Sprintf("%s.%sU", o.Kind, shapeName(o.B1))
+ }
+
+ case operationKindAtomicMemoryWait,
+ operationKindAtomicMemoryNotify,
+ operationKindAtomicFence,
+ operationKindAtomicLoad,
+ operationKindAtomicLoad8,
+ operationKindAtomicLoad16,
+ operationKindAtomicStore,
+ operationKindAtomicStore8,
+ operationKindAtomicStore16,
+ operationKindAtomicRMW,
+ operationKindAtomicRMW8,
+ operationKindAtomicRMW16,
+ operationKindAtomicRMWCmpxchg,
+ operationKindAtomicRMW8Cmpxchg,
+ operationKindAtomicRMW16Cmpxchg:
+ return o.Kind.String()
+
+ default:
+ panic(fmt.Sprintf("TODO: %v", o.Kind))
+ }
+}
+
+// NewOperationUnreachable is a constructor for unionOperation with operationKindUnreachable
+//
+// This corresponds to wasm.OpcodeUnreachable.
+//
+// The engines are expected to exit the execution with wasmruntime.ErrRuntimeUnreachable error.
+func newOperationUnreachable() unionOperation {
+ return unionOperation{Kind: operationKindUnreachable}
+}
+
+// NewOperationLabel is a constructor for unionOperation with operationKindLabel.
+//
+// This is used to inform the engines of the beginning of a label.
+func newOperationLabel(label label) unionOperation {
+ return unionOperation{Kind: operationKindLabel, U1: uint64(label)}
+}
+
+// NewOperationBr is a constructor for unionOperation with operationKindBr.
+//
+// The engines are expected to branch into U1 label.
+func newOperationBr(target label) unionOperation {
+ return unionOperation{Kind: operationKindBr, U1: uint64(target)}
+}
+
+// NewOperationBrIf is a constructor for unionOperation with operationKindBrIf.
+//
+// The engines are expected to pop a value and branch into U1 label if the value equals 1.
+// Otherwise, the code branches into U2 label.
+func newOperationBrIf(thenTarget, elseTarget label, thenDrop inclusiveRange) unionOperation {
+ return unionOperation{
+ Kind: operationKindBrIf,
+ U1: uint64(thenTarget),
+ U2: uint64(elseTarget),
+ U3: thenDrop.AsU64(),
+ }
+}
+
+// NewOperationBrTable is a constructor for unionOperation with operationKindBrTable.
+//
+// This corresponds to wasm.OpcodeBrTableName except that the label
+// here means the interpreterir level, not the ones of Wasm.
+//
+// The engines are expected to do the br_table operation based on the default (Us[len(Us)-1], Us[len(Us)-2]) and
+// targets (Us[:len(Us)-1], Rs[:len(Us)-1]). More precisely, this pops a value from the stack (called "index")
+// and decides which branch we go into next based on the value.
+//
+// For example, assume we have operations like {default: L_DEFAULT, targets: [L0, L1, L2]}.
+// If "index" >= len(defaults), then branch into the L_DEFAULT label.
+// Otherwise, we enter label of targets[index].
+func newOperationBrTable(targetLabelsAndRanges []uint64) unionOperation {
+ return unionOperation{
+ Kind: operationKindBrTable,
+ Us: targetLabelsAndRanges,
+ }
+}
+
+// NewOperationCall is a constructor for unionOperation with operationKindCall.
+//
+// This corresponds to wasm.OpcodeCallName, and engines are expected to
+// enter into a function whose index equals OperationCall.FunctionIndex.
+func newOperationCall(functionIndex uint32) unionOperation {
+ return unionOperation{Kind: operationKindCall, U1: uint64(functionIndex)}
+}
+
+// NewOperationCallIndirect implements Operation.
+//
+// This corresponds to wasm.OpcodeCallIndirectName, and engines are expected to
+// consume the one value from the top of stack (called "offset"),
+// and make a function call against the function whose function address equals
+// Tables[OperationCallIndirect.TableIndex][offset].
+//
+// Note: This is called indirect function call in the sense that the target function is indirectly
+// determined by the current state (top value) of the stack.
+// Therefore, two checks are performed at runtime before entering the target function:
+// 1) whether "offset" exceeds the length of table Tables[OperationCallIndirect.TableIndex].
+// 2) whether the type of the function table[offset] matches the function type specified by OperationCallIndirect.TypeIndex.
+func newOperationCallIndirect(typeIndex, tableIndex uint32) unionOperation {
+ return unionOperation{Kind: operationKindCallIndirect, U1: uint64(typeIndex), U2: uint64(tableIndex)}
+}
+
+// inclusiveRange is the range which spans across the value stack starting from the top to the bottom, and
+// both boundary are included in the range.
+type inclusiveRange struct {
+ Start, End int32
+}
+
+// AsU64 is be used to convert inclusiveRange to uint64 so that it can be stored in unionOperation.
+func (i inclusiveRange) AsU64() uint64 {
+ return uint64(uint32(i.Start))<<32 | uint64(uint32(i.End))
+}
+
+// inclusiveRangeFromU64 retrieves inclusiveRange from the given uint64 which is stored in unionOperation.
+func inclusiveRangeFromU64(v uint64) inclusiveRange {
+ return inclusiveRange{
+ Start: int32(uint32(v >> 32)),
+ End: int32(uint32(v)),
+ }
+}
+
+// nopinclusiveRange is inclusiveRange which corresponds to no-operation.
+var nopinclusiveRange = inclusiveRange{Start: -1, End: -1}
+
+// NewOperationDrop is a constructor for unionOperation with operationKindDrop.
+//
+// The engines are expected to discard the values selected by NewOperationDrop.Depth which
+// starts from the top of the stack to the bottom.
+//
+// depth spans across the uint64 value stack at runtime to be dropped by this operation.
+func newOperationDrop(depth inclusiveRange) unionOperation {
+ return unionOperation{Kind: operationKindDrop, U1: depth.AsU64()}
+}
+
+// NewOperationSelect is a constructor for unionOperation with operationKindSelect.
+//
+// This corresponds to wasm.OpcodeSelect.
+//
+// The engines are expected to pop three values, say [..., x2, x1, c], then if the value "c" equals zero,
+// "x1" is pushed back onto the stack and, otherwise "x2" is pushed back.
+//
+// isTargetVector true if the selection target value's type is wasm.ValueTypeV128.
+func newOperationSelect(isTargetVector bool) unionOperation {
+ return unionOperation{Kind: operationKindSelect, B3: isTargetVector}
+}
+
+// NewOperationPick is a constructor for unionOperation with operationKindPick.
+//
+// The engines are expected to copy a value pointed by depth, and push the
+// copied value onto the top of the stack.
+//
+// depth is the location of the pick target in the uint64 value stack at runtime.
+// If isTargetVector=true, this points to the location of the lower 64-bits of the vector.
+func newOperationPick(depth int, isTargetVector bool) unionOperation {
+ return unionOperation{Kind: operationKindPick, U1: uint64(depth), B3: isTargetVector}
+}
+
+// NewOperationSet is a constructor for unionOperation with operationKindSet.
+//
+// The engines are expected to set the top value of the stack to the location specified by
+// depth.
+//
+// depth is the location of the set target in the uint64 value stack at runtime.
+// If isTargetVector=true, this points the location of the lower 64-bits of the vector.
+func newOperationSet(depth int, isTargetVector bool) unionOperation {
+ return unionOperation{Kind: operationKindSet, U1: uint64(depth), B3: isTargetVector}
+}
+
+// NewOperationGlobalGet is a constructor for unionOperation with operationKindGlobalGet.
+//
+// The engines are expected to read the global value specified by OperationGlobalGet.Index,
+// and push the copy of the value onto the stack.
+//
+// See wasm.OpcodeGlobalGet.
+func newOperationGlobalGet(index uint32) unionOperation {
+ return unionOperation{Kind: operationKindGlobalGet, U1: uint64(index)}
+}
+
+// NewOperationGlobalSet is a constructor for unionOperation with operationKindGlobalSet.
+//
+// The engines are expected to consume the value from the top of the stack,
+// and write the value into the global specified by OperationGlobalSet.Index.
+//
+// See wasm.OpcodeGlobalSet.
+func newOperationGlobalSet(index uint32) unionOperation {
+ return unionOperation{Kind: operationKindGlobalSet, U1: uint64(index)}
+}
+
+// memoryArg is the "memarg" to all memory instructions.
+//
+// See https://www.w3.org/TR/2019/REC-wasm-core-1-20191205/#memory-instructions%E2%91%A0
+type memoryArg struct {
+ // Alignment the expected alignment (expressed as the exponent of a power of 2). Default to the natural alignment.
+ //
+ // "Natural alignment" is defined here as the smallest power of two that can hold the size of the value type. Ex
+ // wasm.ValueTypeI64 is encoded in 8 little-endian bytes. 2^3 = 8, so the natural alignment is three.
+ Alignment uint32
+
+ // Offset is the address offset added to the instruction's dynamic address operand, yielding a 33-bit effective
+ // address that is the zero-based index at which the memory is accessed. Default to zero.
+ Offset uint32
+}
+
+// NewOperationLoad is a constructor for unionOperation with operationKindLoad.
+//
+// This corresponds to wasm.OpcodeI32LoadName wasm.OpcodeI64LoadName wasm.OpcodeF32LoadName and wasm.OpcodeF64LoadName.
+//
+// The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary,
+// otherwise load the corresponding value following the semantics of the corresponding WebAssembly instruction.
+func newOperationLoad(unsignedType unsignedType, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindLoad, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationLoad8 is a constructor for unionOperation with operationKindLoad8.
+//
+// This corresponds to wasm.OpcodeI32Load8SName wasm.OpcodeI32Load8UName wasm.OpcodeI64Load8SName wasm.OpcodeI64Load8UName.
+//
+// The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary,
+// otherwise load the corresponding value following the semantics of the corresponding WebAssembly instruction.
+func newOperationLoad8(signedInt signedInt, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindLoad8, B1: byte(signedInt), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationLoad16 is a constructor for unionOperation with operationKindLoad16.
+//
+// This corresponds to wasm.OpcodeI32Load16SName wasm.OpcodeI32Load16UName wasm.OpcodeI64Load16SName wasm.OpcodeI64Load16UName.
+//
+// The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary,
+// otherwise load the corresponding value following the semantics of the corresponding WebAssembly instruction.
+func newOperationLoad16(signedInt signedInt, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindLoad16, B1: byte(signedInt), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationLoad32 is a constructor for unionOperation with operationKindLoad32.
+//
+// This corresponds to wasm.OpcodeI64Load32SName wasm.OpcodeI64Load32UName.
+//
+// The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary,
+// otherwise load the corresponding value following the semantics of the corresponding WebAssembly instruction.
+func newOperationLoad32(signed bool, arg memoryArg) unionOperation {
+ sigB := byte(0)
+ if signed {
+ sigB = 1
+ }
+ return unionOperation{Kind: operationKindLoad32, B1: sigB, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationStore is a constructor for unionOperation with operationKindStore.
+//
+// # This corresponds to wasm.OpcodeI32StoreName wasm.OpcodeI64StoreName wasm.OpcodeF32StoreName wasm.OpcodeF64StoreName
+//
+// The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary,
+// otherwise store the corresponding value following the semantics of the corresponding WebAssembly instruction.
+func newOperationStore(unsignedType unsignedType, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindStore, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationStore8 is a constructor for unionOperation with operationKindStore8.
+//
+// # This corresponds to wasm.OpcodeI32Store8Name wasm.OpcodeI64Store8Name
+//
+// The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary,
+// otherwise store the corresponding value following the semantics of the corresponding WebAssembly instruction.
+func newOperationStore8(arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindStore8, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationStore16 is a constructor for unionOperation with operationKindStore16.
+//
+// # This corresponds to wasm.OpcodeI32Store16Name wasm.OpcodeI64Store16Name
+//
+// The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary,
+// otherwise store the corresponding value following the semantics of the corresponding WebAssembly instruction.
+func newOperationStore16(arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindStore16, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationStore32 is a constructor for unionOperation with operationKindStore32.
+//
+// # This corresponds to wasm.OpcodeI64Store32Name
+//
+// The engines are expected to check the boundary of memory length, and exit the execution if this exceeds the boundary,
+// otherwise store the corresponding value following the semantics of the corresponding WebAssembly instruction.
+func newOperationStore32(arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindStore32, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationMemorySize is a constructor for unionOperation with operationKindMemorySize.
+//
+// This corresponds to wasm.OpcodeMemorySize.
+//
+// The engines are expected to push the current page size of the memory onto the stack.
+func newOperationMemorySize() unionOperation {
+ return unionOperation{Kind: operationKindMemorySize}
+}
+
+// NewOperationMemoryGrow is a constructor for unionOperation with operationKindMemoryGrow.
+//
+// This corresponds to wasm.OpcodeMemoryGrow.
+//
+// The engines are expected to pop one value from the top of the stack, then
+// execute wasm.MemoryInstance Grow with the value, and push the previous
+// page size of the memory onto the stack.
+func newOperationMemoryGrow() unionOperation {
+ return unionOperation{Kind: operationKindMemoryGrow}
+}
+
+// NewOperationConstI32 is a constructor for unionOperation with OperationConstI32.
+//
+// This corresponds to wasm.OpcodeI32Const.
+func newOperationConstI32(value uint32) unionOperation {
+ return unionOperation{Kind: operationKindConstI32, U1: uint64(value)}
+}
+
+// NewOperationConstI64 is a constructor for unionOperation with OperationConstI64.
+//
+// This corresponds to wasm.OpcodeI64Const.
+func newOperationConstI64(value uint64) unionOperation {
+ return unionOperation{Kind: operationKindConstI64, U1: value}
+}
+
+// NewOperationConstF32 is a constructor for unionOperation with OperationConstF32.
+//
+// This corresponds to wasm.OpcodeF32Const.
+func newOperationConstF32(value float32) unionOperation {
+ return unionOperation{Kind: operationKindConstF32, U1: uint64(math.Float32bits(value))}
+}
+
+// NewOperationConstF64 is a constructor for unionOperation with OperationConstF64.
+//
+// This corresponds to wasm.OpcodeF64Const.
+func newOperationConstF64(value float64) unionOperation {
+ return unionOperation{Kind: operationKindConstF64, U1: math.Float64bits(value)}
+}
+
+// NewOperationEq is a constructor for unionOperation with operationKindEq.
+//
+// This corresponds to wasm.OpcodeI32EqName wasm.OpcodeI64EqName wasm.OpcodeF32EqName wasm.OpcodeF64EqName
+func newOperationEq(b unsignedType) unionOperation {
+ return unionOperation{Kind: operationKindEq, B1: byte(b)}
+}
+
+// NewOperationNe is a constructor for unionOperation with operationKindNe.
+//
+// This corresponds to wasm.OpcodeI32NeName wasm.OpcodeI64NeName wasm.OpcodeF32NeName wasm.OpcodeF64NeName
+func newOperationNe(b unsignedType) unionOperation {
+ return unionOperation{Kind: operationKindNe, B1: byte(b)}
+}
+
+// NewOperationEqz is a constructor for unionOperation with operationKindEqz.
+//
+// This corresponds to wasm.OpcodeI32EqzName wasm.OpcodeI64EqzName
+func newOperationEqz(b unsignedInt) unionOperation {
+ return unionOperation{Kind: operationKindEqz, B1: byte(b)}
+}
+
+// NewOperationLt is a constructor for unionOperation with operationKindLt.
+//
+// This corresponds to wasm.OpcodeI32LtS wasm.OpcodeI32LtU wasm.OpcodeI64LtS wasm.OpcodeI64LtU wasm.OpcodeF32Lt wasm.OpcodeF64Lt
+func newOperationLt(b signedType) unionOperation {
+ return unionOperation{Kind: operationKindLt, B1: byte(b)}
+}
+
+// NewOperationGt is a constructor for unionOperation with operationKindGt.
+//
+// This corresponds to wasm.OpcodeI32GtS wasm.OpcodeI32GtU wasm.OpcodeI64GtS wasm.OpcodeI64GtU wasm.OpcodeF32Gt wasm.OpcodeF64Gt
+func newOperationGt(b signedType) unionOperation {
+ return unionOperation{Kind: operationKindGt, B1: byte(b)}
+}
+
+// NewOperationLe is a constructor for unionOperation with operationKindLe.
+//
+// This corresponds to wasm.OpcodeI32LeS wasm.OpcodeI32LeU wasm.OpcodeI64LeS wasm.OpcodeI64LeU wasm.OpcodeF32Le wasm.OpcodeF64Le
+func newOperationLe(b signedType) unionOperation {
+ return unionOperation{Kind: operationKindLe, B1: byte(b)}
+}
+
+// NewOperationGe is a constructor for unionOperation with operationKindGe.
+//
+// This corresponds to wasm.OpcodeI32GeS wasm.OpcodeI32GeU wasm.OpcodeI64GeS wasm.OpcodeI64GeU wasm.OpcodeF32Ge wasm.OpcodeF64Ge
+// NewOperationGe is the constructor for OperationGe
+func newOperationGe(b signedType) unionOperation {
+ return unionOperation{Kind: operationKindGe, B1: byte(b)}
+}
+
+// NewOperationAdd is a constructor for unionOperation with operationKindAdd.
+//
+// This corresponds to wasm.OpcodeI32AddName wasm.OpcodeI64AddName wasm.OpcodeF32AddName wasm.OpcodeF64AddName.
+func newOperationAdd(b unsignedType) unionOperation {
+ return unionOperation{Kind: operationKindAdd, B1: byte(b)}
+}
+
+// NewOperationSub is a constructor for unionOperation with operationKindSub.
+//
+// This corresponds to wasm.OpcodeI32SubName wasm.OpcodeI64SubName wasm.OpcodeF32SubName wasm.OpcodeF64SubName.
+func newOperationSub(b unsignedType) unionOperation {
+ return unionOperation{Kind: operationKindSub, B1: byte(b)}
+}
+
+// NewOperationMul is a constructor for unionOperation with wperationKindMul.
+//
+// This corresponds to wasm.OpcodeI32MulName wasm.OpcodeI64MulName wasm.OpcodeF32MulName wasm.OpcodeF64MulName.
+// NewOperationMul is the constructor for OperationMul
+func newOperationMul(b unsignedType) unionOperation {
+ return unionOperation{Kind: operationKindMul, B1: byte(b)}
+}
+
+// NewOperationClz is a constructor for unionOperation with operationKindClz.
+//
+// This corresponds to wasm.OpcodeI32ClzName wasm.OpcodeI64ClzName.
+//
+// The engines are expected to count up the leading zeros in the
+// current top of the stack, and push the count result.
+// For example, stack of [..., 0x00_ff_ff_ff] results in [..., 8].
+// See wasm.OpcodeI32Clz wasm.OpcodeI64Clz
+func newOperationClz(b unsignedInt) unionOperation {
+ return unionOperation{Kind: operationKindClz, B1: byte(b)}
+}
+
+// NewOperationCtz is a constructor for unionOperation with operationKindCtz.
+//
+// This corresponds to wasm.OpcodeI32CtzName wasm.OpcodeI64CtzName.
+//
+// The engines are expected to count up the trailing zeros in the
+// current top of the stack, and push the count result.
+// For example, stack of [..., 0xff_ff_ff_00] results in [..., 8].
+func newOperationCtz(b unsignedInt) unionOperation {
+ return unionOperation{Kind: operationKindCtz, B1: byte(b)}
+}
+
+// NewOperationPopcnt is a constructor for unionOperation with operationKindPopcnt.
+//
+// This corresponds to wasm.OpcodeI32PopcntName wasm.OpcodeI64PopcntName.
+//
+// The engines are expected to count up the number of set bits in the
+// current top of the stack, and push the count result.
+// For example, stack of [..., 0b00_00_00_11] results in [..., 2].
+func newOperationPopcnt(b unsignedInt) unionOperation {
+ return unionOperation{Kind: operationKindPopcnt, B1: byte(b)}
+}
+
+// NewOperationDiv is a constructor for unionOperation with operationKindDiv.
+//
+// This corresponds to wasm.OpcodeI32DivS wasm.OpcodeI32DivU wasm.OpcodeI64DivS
+//
+// wasm.OpcodeI64DivU wasm.OpcodeF32Div wasm.OpcodeF64Div.
+func newOperationDiv(b signedType) unionOperation {
+ return unionOperation{Kind: operationKindDiv, B1: byte(b)}
+}
+
+// NewOperationRem is a constructor for unionOperation with operationKindRem.
+//
+// This corresponds to wasm.OpcodeI32RemS wasm.OpcodeI32RemU wasm.OpcodeI64RemS wasm.OpcodeI64RemU.
+//
+// The engines are expected to perform division on the top
+// two values of integer type on the stack and puts the remainder of the result
+// onto the stack. For example, stack [..., 10, 3] results in [..., 1] where
+// the quotient is discarded.
+// NewOperationRem is the constructor for OperationRem
+func newOperationRem(b signedInt) unionOperation {
+ return unionOperation{Kind: operationKindRem, B1: byte(b)}
+}
+
+// NewOperationAnd is a constructor for unionOperation with operationKindAnd.
+//
+// # This corresponds to wasm.OpcodeI32AndName wasm.OpcodeI64AndName
+//
+// The engines are expected to perform "And" operation on
+// top two values on the stack, and pushes the result.
+func newOperationAnd(b unsignedInt) unionOperation {
+ return unionOperation{Kind: operationKindAnd, B1: byte(b)}
+}
+
+// NewOperationOr is a constructor for unionOperation with operationKindOr.
+//
+// # This corresponds to wasm.OpcodeI32OrName wasm.OpcodeI64OrName
+//
+// The engines are expected to perform "Or" operation on
+// top two values on the stack, and pushes the result.
+func newOperationOr(b unsignedInt) unionOperation {
+ return unionOperation{Kind: operationKindOr, B1: byte(b)}
+}
+
+// NewOperationXor is a constructor for unionOperation with operationKindXor.
+//
+// # This corresponds to wasm.OpcodeI32XorName wasm.OpcodeI64XorName
+//
+// The engines are expected to perform "Xor" operation on
+// top two values on the stack, and pushes the result.
+func newOperationXor(b unsignedInt) unionOperation {
+ return unionOperation{Kind: operationKindXor, B1: byte(b)}
+}
+
+// NewOperationShl is a constructor for unionOperation with operationKindShl.
+//
+// # This corresponds to wasm.OpcodeI32ShlName wasm.OpcodeI64ShlName
+//
+// The engines are expected to perform "Shl" operation on
+// top two values on the stack, and pushes the result.
+func newOperationShl(b unsignedInt) unionOperation {
+ return unionOperation{Kind: operationKindShl, B1: byte(b)}
+}
+
+// NewOperationShr is a constructor for unionOperation with operationKindShr.
+//
+// # This corresponds to wasm.OpcodeI32ShrSName wasm.OpcodeI32ShrUName wasm.OpcodeI64ShrSName wasm.OpcodeI64ShrUName
+//
+// If OperationShr.Type is signed integer, then, the engines are expected to perform arithmetic right shift on the two
+// top values on the stack, otherwise do the logical right shift.
+func newOperationShr(b signedInt) unionOperation {
+ return unionOperation{Kind: operationKindShr, B1: byte(b)}
+}
+
+// NewOperationRotl is a constructor for unionOperation with operationKindRotl.
+//
+// # This corresponds to wasm.OpcodeI32RotlName wasm.OpcodeI64RotlName
+//
+// The engines are expected to perform "Rotl" operation on
+// top two values on the stack, and pushes the result.
+func newOperationRotl(b unsignedInt) unionOperation {
+ return unionOperation{Kind: operationKindRotl, B1: byte(b)}
+}
+
+// NewOperationRotr is a constructor for unionOperation with operationKindRotr.
+//
+// # This corresponds to wasm.OpcodeI32RotrName wasm.OpcodeI64RotrName
+//
+// The engines are expected to perform "Rotr" operation on
+// top two values on the stack, and pushes the result.
+func newOperationRotr(b unsignedInt) unionOperation {
+ return unionOperation{Kind: operationKindRotr, B1: byte(b)}
+}
+
+// NewOperationAbs is a constructor for unionOperation with operationKindAbs.
+//
+// This corresponds to wasm.OpcodeF32Abs wasm.OpcodeF64Abs
+func newOperationAbs(b float) unionOperation {
+ return unionOperation{Kind: operationKindAbs, B1: byte(b)}
+}
+
+// NewOperationNeg is a constructor for unionOperation with operationKindNeg.
+//
+// This corresponds to wasm.OpcodeF32Neg wasm.OpcodeF64Neg
+func newOperationNeg(b float) unionOperation {
+ return unionOperation{Kind: operationKindNeg, B1: byte(b)}
+}
+
+// NewOperationCeil is a constructor for unionOperation with operationKindCeil.
+//
+// This corresponds to wasm.OpcodeF32CeilName wasm.OpcodeF64CeilName
+func newOperationCeil(b float) unionOperation {
+ return unionOperation{Kind: operationKindCeil, B1: byte(b)}
+}
+
+// NewOperationFloor is a constructor for unionOperation with operationKindFloor.
+//
+// This corresponds to wasm.OpcodeF32FloorName wasm.OpcodeF64FloorName
+func newOperationFloor(b float) unionOperation {
+ return unionOperation{Kind: operationKindFloor, B1: byte(b)}
+}
+
+// NewOperationTrunc is a constructor for unionOperation with operationKindTrunc.
+//
+// This corresponds to wasm.OpcodeF32TruncName wasm.OpcodeF64TruncName
+func newOperationTrunc(b float) unionOperation {
+ return unionOperation{Kind: operationKindTrunc, B1: byte(b)}
+}
+
+// NewOperationNearest is a constructor for unionOperation with operationKindNearest.
+//
+// # This corresponds to wasm.OpcodeF32NearestName wasm.OpcodeF64NearestName
+//
+// Note: this is *not* equivalent to math.Round and instead has the same
+// the semantics of LLVM's rint intrinsic. See https://llvm.org/docs/LangRef.html#llvm-rint-intrinsic.
+// For example, math.Round(-4.5) produces -5 while we want to produce -4.
+func newOperationNearest(b float) unionOperation {
+ return unionOperation{Kind: operationKindNearest, B1: byte(b)}
+}
+
+// NewOperationSqrt is a constructor for unionOperation with operationKindSqrt.
+//
+// This corresponds to wasm.OpcodeF32SqrtName wasm.OpcodeF64SqrtName
+func newOperationSqrt(b float) unionOperation {
+ return unionOperation{Kind: operationKindSqrt, B1: byte(b)}
+}
+
+// NewOperationMin is a constructor for unionOperation with operationKindMin.
+//
+// # This corresponds to wasm.OpcodeF32MinName wasm.OpcodeF64MinName
+//
+// The engines are expected to pop two values from the stack, and push back the maximum of
+// these two values onto the stack. For example, stack [..., 100.1, 1.9] results in [..., 1.9].
+//
+// Note: WebAssembly specifies that min/max must always return NaN if one of values is NaN,
+// which is a different behavior different from math.Min.
+func newOperationMin(b float) unionOperation {
+ return unionOperation{Kind: operationKindMin, B1: byte(b)}
+}
+
+// NewOperationMax is a constructor for unionOperation with operationKindMax.
+//
+// # This corresponds to wasm.OpcodeF32MaxName wasm.OpcodeF64MaxName
+//
+// The engines are expected to pop two values from the stack, and push back the maximum of
+// these two values onto the stack. For example, stack [..., 100.1, 1.9] results in [..., 100.1].
+//
+// Note: WebAssembly specifies that min/max must always return NaN if one of values is NaN,
+// which is a different behavior different from math.Max.
+func newOperationMax(b float) unionOperation {
+ return unionOperation{Kind: operationKindMax, B1: byte(b)}
+}
+
+// NewOperationCopysign is a constructor for unionOperation with operationKindCopysign.
+//
+// # This corresponds to wasm.OpcodeF32CopysignName wasm.OpcodeF64CopysignName
+//
+// The engines are expected to pop two float values from the stack, and copy the signbit of
+// the first-popped value to the last one.
+// For example, stack [..., 1.213, -5.0] results in [..., -1.213].
+func newOperationCopysign(b float) unionOperation {
+ return unionOperation{Kind: operationKindCopysign, B1: byte(b)}
+}
+
+// NewOperationI32WrapFromI64 is a constructor for unionOperation with operationKindI32WrapFromI64.
+//
+// This corresponds to wasm.OpcodeI32WrapI64 and equivalent to uint64(uint32(v)) in Go.
+//
+// The engines are expected to replace the 64-bit int on top of the stack
+// with the corresponding 32-bit integer.
+func newOperationI32WrapFromI64() unionOperation {
+ return unionOperation{Kind: operationKindI32WrapFromI64}
+}
+
+// NewOperationITruncFromF is a constructor for unionOperation with operationKindITruncFromF.
+//
+// This corresponds to
+//
+// wasm.OpcodeI32TruncF32SName wasm.OpcodeI32TruncF32UName wasm.OpcodeI32TruncF64SName
+// wasm.OpcodeI32TruncF64UName wasm.OpcodeI64TruncF32SName wasm.OpcodeI64TruncF32UName wasm.OpcodeI64TruncF64SName
+// wasm.OpcodeI64TruncF64UName. wasm.OpcodeI32TruncSatF32SName wasm.OpcodeI32TruncSatF32UName
+// wasm.OpcodeI32TruncSatF64SName wasm.OpcodeI32TruncSatF64UName wasm.OpcodeI64TruncSatF32SName
+// wasm.OpcodeI64TruncSatF32UName wasm.OpcodeI64TruncSatF64SName wasm.OpcodeI64TruncSatF64UName
+//
+// See [1] and [2] for when we encounter undefined behavior in the WebAssembly specification if NewOperationITruncFromF.NonTrapping == false.
+// To summarize, if the source float value is NaN or doesn't fit in the destination range of integers (incl. +=Inf),
+// then the runtime behavior is undefined. In wazero, the engines are expected to exit the execution in these undefined cases with
+// wasmruntime.ErrRuntimeInvalidConversionToInteger error.
+//
+// [1] https://www.w3.org/TR/2019/REC-wasm-core-1-20191205/#-hrefop-trunc-umathrmtruncmathsfu_m-n-z for unsigned integers.
+// [2] https://www.w3.org/TR/2019/REC-wasm-core-1-20191205/#-hrefop-trunc-smathrmtruncmathsfs_m-n-z for signed integers.
+//
+// nonTrapping true if this conversion is "nontrapping" in the sense of the
+// https://github.com/WebAssembly/spec/blob/ce4b6c4d47eb06098cc7ab2e81f24748da822f20/proposals/nontrapping-float-to-int-conversion/Overview.md
+func newOperationITruncFromF(inputType float, outputType signedInt, nonTrapping bool) unionOperation {
+ return unionOperation{
+ Kind: operationKindITruncFromF,
+ B1: byte(inputType),
+ B2: byte(outputType),
+ B3: nonTrapping,
+ }
+}
+
+// NewOperationFConvertFromI is a constructor for unionOperation with operationKindFConvertFromI.
+//
+// This corresponds to
+//
+// wasm.OpcodeF32ConvertI32SName wasm.OpcodeF32ConvertI32UName wasm.OpcodeF32ConvertI64SName wasm.OpcodeF32ConvertI64UName
+// wasm.OpcodeF64ConvertI32SName wasm.OpcodeF64ConvertI32UName wasm.OpcodeF64ConvertI64SName wasm.OpcodeF64ConvertI64UName
+//
+// and equivalent to float32(uint32(x)), float32(int32(x)), etc in Go.
+func newOperationFConvertFromI(inputType signedInt, outputType float) unionOperation {
+ return unionOperation{
+ Kind: operationKindFConvertFromI,
+ B1: byte(inputType),
+ B2: byte(outputType),
+ }
+}
+
+// NewOperationF32DemoteFromF64 is a constructor for unionOperation with operationKindF32DemoteFromF64.
+//
+// This corresponds to wasm.OpcodeF32DemoteF64 and is equivalent float32(float64(v)).
+func newOperationF32DemoteFromF64() unionOperation {
+ return unionOperation{Kind: operationKindF32DemoteFromF64}
+}
+
+// NewOperationF64PromoteFromF32 is a constructor for unionOperation with operationKindF64PromoteFromF32.
+//
+// This corresponds to wasm.OpcodeF64PromoteF32 and is equivalent float64(float32(v)).
+func newOperationF64PromoteFromF32() unionOperation {
+ return unionOperation{Kind: operationKindF64PromoteFromF32}
+}
+
+// NewOperationI32ReinterpretFromF32 is a constructor for unionOperation with operationKindI32ReinterpretFromF32.
+//
+// This corresponds to wasm.OpcodeI32ReinterpretF32Name.
+func newOperationI32ReinterpretFromF32() unionOperation {
+ return unionOperation{Kind: operationKindI32ReinterpretFromF32}
+}
+
+// NewOperationI64ReinterpretFromF64 is a constructor for unionOperation with operationKindI64ReinterpretFromF64.
+//
+// This corresponds to wasm.OpcodeI64ReinterpretF64Name.
+func newOperationI64ReinterpretFromF64() unionOperation {
+ return unionOperation{Kind: operationKindI64ReinterpretFromF64}
+}
+
+// NewOperationF32ReinterpretFromI32 is a constructor for unionOperation with operationKindF32ReinterpretFromI32.
+//
+// This corresponds to wasm.OpcodeF32ReinterpretI32Name.
+func newOperationF32ReinterpretFromI32() unionOperation {
+ return unionOperation{Kind: operationKindF32ReinterpretFromI32}
+}
+
+// NewOperationF64ReinterpretFromI64 is a constructor for unionOperation with operationKindF64ReinterpretFromI64.
+//
+// This corresponds to wasm.OpcodeF64ReinterpretI64Name.
+func newOperationF64ReinterpretFromI64() unionOperation {
+ return unionOperation{Kind: operationKindF64ReinterpretFromI64}
+}
+
+// NewOperationExtend is a constructor for unionOperation with operationKindExtend.
+//
+// # This corresponds to wasm.OpcodeI64ExtendI32SName wasm.OpcodeI64ExtendI32UName
+//
+// The engines are expected to extend the 32-bit signed or unsigned int on top of the stack
+// as a 64-bit integer of corresponding signedness. For unsigned case, this is just reinterpreting the
+// underlying bit pattern as 64-bit integer. For signed case, this is sign-extension which preserves the
+// original integer's sign.
+func newOperationExtend(signed bool) unionOperation {
+ op := unionOperation{Kind: operationKindExtend}
+ if signed {
+ op.B1 = 1
+ }
+ return op
+}
+
+// NewOperationSignExtend32From8 is a constructor for unionOperation with operationKindSignExtend32From8.
+//
+// This corresponds to wasm.OpcodeI32Extend8SName.
+//
+// The engines are expected to sign-extend the first 8-bits of 32-bit in as signed 32-bit int.
+func newOperationSignExtend32From8() unionOperation {
+ return unionOperation{Kind: operationKindSignExtend32From8}
+}
+
+// NewOperationSignExtend32From16 is a constructor for unionOperation with operationKindSignExtend32From16.
+//
+// This corresponds to wasm.OpcodeI32Extend16SName.
+//
+// The engines are expected to sign-extend the first 16-bits of 32-bit in as signed 32-bit int.
+func newOperationSignExtend32From16() unionOperation {
+ return unionOperation{Kind: operationKindSignExtend32From16}
+}
+
+// NewOperationSignExtend64From8 is a constructor for unionOperation with operationKindSignExtend64From8.
+//
+// This corresponds to wasm.OpcodeI64Extend8SName.
+//
+// The engines are expected to sign-extend the first 8-bits of 64-bit in as signed 32-bit int.
+func newOperationSignExtend64From8() unionOperation {
+ return unionOperation{Kind: operationKindSignExtend64From8}
+}
+
+// NewOperationSignExtend64From16 is a constructor for unionOperation with operationKindSignExtend64From16.
+//
+// This corresponds to wasm.OpcodeI64Extend16SName.
+//
+// The engines are expected to sign-extend the first 16-bits of 64-bit in as signed 32-bit int.
+func newOperationSignExtend64From16() unionOperation {
+ return unionOperation{Kind: operationKindSignExtend64From16}
+}
+
+// NewOperationSignExtend64From32 is a constructor for unionOperation with operationKindSignExtend64From32.
+//
+// This corresponds to wasm.OpcodeI64Extend32SName.
+//
+// The engines are expected to sign-extend the first 32-bits of 64-bit in as signed 32-bit int.
+func newOperationSignExtend64From32() unionOperation {
+ return unionOperation{Kind: operationKindSignExtend64From32}
+}
+
+// NewOperationMemoryInit is a constructor for unionOperation with operationKindMemoryInit.
+//
+// This corresponds to wasm.OpcodeMemoryInitName.
+//
+// dataIndex is the index of the data instance in ModuleInstance.DataInstances
+// by which this operation instantiates a part of the memory.
+func newOperationMemoryInit(dataIndex uint32) unionOperation {
+ return unionOperation{Kind: operationKindMemoryInit, U1: uint64(dataIndex)}
+}
+
+// NewOperationDataDrop implements Operation.
+//
+// This corresponds to wasm.OpcodeDataDropName.
+//
+// dataIndex is the index of the data instance in ModuleInstance.DataInstances
+// which this operation drops.
+func newOperationDataDrop(dataIndex uint32) unionOperation {
+ return unionOperation{Kind: operationKindDataDrop, U1: uint64(dataIndex)}
+}
+
+// NewOperationMemoryCopy is a consuctor for unionOperation with operationKindMemoryCopy.
+//
+// This corresponds to wasm.OpcodeMemoryCopyName.
+func newOperationMemoryCopy() unionOperation {
+ return unionOperation{Kind: operationKindMemoryCopy}
+}
+
+// NewOperationMemoryFill is a consuctor for unionOperation with operationKindMemoryFill.
+func newOperationMemoryFill() unionOperation {
+ return unionOperation{Kind: operationKindMemoryFill}
+}
+
+// NewOperationTableInit is a constructor for unionOperation with operationKindTableInit.
+//
+// This corresponds to wasm.OpcodeTableInitName.
+//
+// elemIndex is the index of the element by which this operation initializes a part of the table.
+// tableIndex is the index of the table on which this operation initialize by the target element.
+func newOperationTableInit(elemIndex, tableIndex uint32) unionOperation {
+ return unionOperation{Kind: operationKindTableInit, U1: uint64(elemIndex), U2: uint64(tableIndex)}
+}
+
+// NewOperationElemDrop is a constructor for unionOperation with operationKindElemDrop.
+//
+// This corresponds to wasm.OpcodeElemDropName.
+//
+// elemIndex is the index of the element which this operation drops.
+func newOperationElemDrop(elemIndex uint32) unionOperation {
+ return unionOperation{Kind: operationKindElemDrop, U1: uint64(elemIndex)}
+}
+
+// NewOperationTableCopy implements Operation.
+//
+// This corresponds to wasm.OpcodeTableCopyName.
+func newOperationTableCopy(srcTableIndex, dstTableIndex uint32) unionOperation {
+ return unionOperation{Kind: operationKindTableCopy, U1: uint64(srcTableIndex), U2: uint64(dstTableIndex)}
+}
+
+// NewOperationRefFunc constructor for unionOperation with operationKindRefFunc.
+//
+// This corresponds to wasm.OpcodeRefFuncName, and engines are expected to
+// push the opaque pointer value of engine specific func for the given FunctionIndex.
+//
+// Note: in wazero, we express any reference types (funcref or externref) as opaque pointers which is uint64.
+// Therefore, the engine implementations emit instructions to push the address of *function onto the stack.
+func newOperationRefFunc(functionIndex uint32) unionOperation {
+ return unionOperation{Kind: operationKindRefFunc, U1: uint64(functionIndex)}
+}
+
+// NewOperationTableGet constructor for unionOperation with operationKindTableGet.
+//
+// This corresponds to wasm.OpcodeTableGetName.
+func newOperationTableGet(tableIndex uint32) unionOperation {
+ return unionOperation{Kind: operationKindTableGet, U1: uint64(tableIndex)}
+}
+
+// NewOperationTableSet constructor for unionOperation with operationKindTableSet.
+//
+// This corresponds to wasm.OpcodeTableSetName.
+func newOperationTableSet(tableIndex uint32) unionOperation {
+ return unionOperation{Kind: operationKindTableSet, U1: uint64(tableIndex)}
+}
+
+// NewOperationTableSize constructor for unionOperation with operationKindTableSize.
+//
+// This corresponds to wasm.OpcodeTableSizeName.
+func newOperationTableSize(tableIndex uint32) unionOperation {
+ return unionOperation{Kind: operationKindTableSize, U1: uint64(tableIndex)}
+}
+
+// NewOperationTableGrow constructor for unionOperation with operationKindTableGrow.
+//
+// This corresponds to wasm.OpcodeTableGrowName.
+func newOperationTableGrow(tableIndex uint32) unionOperation {
+ return unionOperation{Kind: operationKindTableGrow, U1: uint64(tableIndex)}
+}
+
+// NewOperationTableFill constructor for unionOperation with operationKindTableFill.
+//
+// This corresponds to wasm.OpcodeTableFillName.
+func newOperationTableFill(tableIndex uint32) unionOperation {
+ return unionOperation{Kind: operationKindTableFill, U1: uint64(tableIndex)}
+}
+
+// NewOperationV128Const constructor for unionOperation with operationKindV128Const
+func newOperationV128Const(lo, hi uint64) unionOperation {
+ return unionOperation{Kind: operationKindV128Const, U1: lo, U2: hi}
+}
+
+// shape corresponds to a shape of v128 values.
+// https://webassembly.github.io/spec/core/syntax/instructions.html#syntax-shape
+type shape = byte
+
+const (
+ shapeI8x16 shape = iota
+ shapeI16x8
+ shapeI32x4
+ shapeI64x2
+ shapeF32x4
+ shapeF64x2
+)
+
+func shapeName(s shape) (ret string) {
+ switch s {
+ case shapeI8x16:
+ ret = "I8x16"
+ case shapeI16x8:
+ ret = "I16x8"
+ case shapeI32x4:
+ ret = "I32x4"
+ case shapeI64x2:
+ ret = "I64x2"
+ case shapeF32x4:
+ ret = "F32x4"
+ case shapeF64x2:
+ ret = "F64x2"
+ }
+ return
+}
+
+// NewOperationV128Add constructor for unionOperation with operationKindV128Add.
+//
+// This corresponds to wasm.OpcodeVecI8x16AddName wasm.OpcodeVecI16x8AddName wasm.OpcodeVecI32x4AddName
+//
+// wasm.OpcodeVecI64x2AddName wasm.OpcodeVecF32x4AddName wasm.OpcodeVecF64x2AddName
+func newOperationV128Add(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Add, B1: shape}
+}
+
+// NewOperationV128Sub constructor for unionOperation with operationKindV128Sub.
+//
+// This corresponds to wasm.OpcodeVecI8x16SubName wasm.OpcodeVecI16x8SubName wasm.OpcodeVecI32x4SubName
+//
+// wasm.OpcodeVecI64x2SubName wasm.OpcodeVecF32x4SubName wasm.OpcodeVecF64x2SubName
+func newOperationV128Sub(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Sub, B1: shape}
+}
+
+// v128LoadType represents a type of wasm.OpcodeVecV128Load* instructions.
+type v128LoadType = byte
+
+const (
+ // v128LoadType128 corresponds to wasm.OpcodeVecV128LoadName.
+ v128LoadType128 v128LoadType = iota
+ // v128LoadType8x8s corresponds to wasm.OpcodeVecV128Load8x8SName.
+ v128LoadType8x8s
+ // v128LoadType8x8u corresponds to wasm.OpcodeVecV128Load8x8UName.
+ v128LoadType8x8u
+ // v128LoadType16x4s corresponds to wasm.OpcodeVecV128Load16x4SName
+ v128LoadType16x4s
+ // v128LoadType16x4u corresponds to wasm.OpcodeVecV128Load16x4UName
+ v128LoadType16x4u
+ // v128LoadType32x2s corresponds to wasm.OpcodeVecV128Load32x2SName
+ v128LoadType32x2s
+ // v128LoadType32x2u corresponds to wasm.OpcodeVecV128Load32x2UName
+ v128LoadType32x2u
+ // v128LoadType8Splat corresponds to wasm.OpcodeVecV128Load8SplatName
+ v128LoadType8Splat
+ // v128LoadType16Splat corresponds to wasm.OpcodeVecV128Load16SplatName
+ v128LoadType16Splat
+ // v128LoadType32Splat corresponds to wasm.OpcodeVecV128Load32SplatName
+ v128LoadType32Splat
+ // v128LoadType64Splat corresponds to wasm.OpcodeVecV128Load64SplatName
+ v128LoadType64Splat
+ // v128LoadType32zero corresponds to wasm.OpcodeVecV128Load32zeroName
+ v128LoadType32zero
+ // v128LoadType64zero corresponds to wasm.OpcodeVecV128Load64zeroName
+ v128LoadType64zero
+)
+
+// NewOperationV128Load is a constructor for unionOperation with operationKindV128Load.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecV128LoadName wasm.OpcodeVecV128Load8x8SName wasm.OpcodeVecV128Load8x8UName
+// wasm.OpcodeVecV128Load16x4SName wasm.OpcodeVecV128Load16x4UName wasm.OpcodeVecV128Load32x2SName
+// wasm.OpcodeVecV128Load32x2UName wasm.OpcodeVecV128Load8SplatName wasm.OpcodeVecV128Load16SplatName
+// wasm.OpcodeVecV128Load32SplatName wasm.OpcodeVecV128Load64SplatName wasm.OpcodeVecV128Load32zeroName
+// wasm.OpcodeVecV128Load64zeroName
+func newOperationV128Load(loadType v128LoadType, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindV128Load, B1: loadType, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationV128LoadLane is a constructor for unionOperation with operationKindV128LoadLane.
+//
+// This corresponds to wasm.OpcodeVecV128Load8LaneName wasm.OpcodeVecV128Load16LaneName
+//
+// wasm.OpcodeVecV128Load32LaneName wasm.OpcodeVecV128Load64LaneName.
+//
+// laneIndex is >=0 && <(128/LaneSize).
+// laneSize is either 8, 16, 32, or 64.
+func newOperationV128LoadLane(laneIndex, laneSize byte, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindV128LoadLane, B1: laneSize, B2: laneIndex, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationV128Store is a constructor for unionOperation with operationKindV128Store.
+//
+// This corresponds to wasm.OpcodeVecV128Load8LaneName wasm.OpcodeVecV128Load16LaneName
+//
+// wasm.OpcodeVecV128Load32LaneName wasm.OpcodeVecV128Load64LaneName.
+func newOperationV128Store(arg memoryArg) unionOperation {
+ return unionOperation{
+ Kind: operationKindV128Store,
+ U1: uint64(arg.Alignment),
+ U2: uint64(arg.Offset),
+ }
+}
+
+// NewOperationV128StoreLane implements Operation.
+//
+// This corresponds to wasm.OpcodeVecV128Load8LaneName wasm.OpcodeVecV128Load16LaneName
+//
+// wasm.OpcodeVecV128Load32LaneName wasm.OpcodeVecV128Load64LaneName.
+//
+// laneIndex is >=0 && <(128/LaneSize).
+// laneSize is either 8, 16, 32, or 64.
+func newOperationV128StoreLane(laneIndex byte, laneSize byte, arg memoryArg) unionOperation {
+ return unionOperation{
+ Kind: operationKindV128StoreLane,
+ B1: laneSize,
+ B2: laneIndex,
+ U1: uint64(arg.Alignment),
+ U2: uint64(arg.Offset),
+ }
+}
+
+// NewOperationV128ExtractLane is a constructor for unionOperation with operationKindV128ExtractLane.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI8x16ExtractLaneSName wasm.OpcodeVecI8x16ExtractLaneUName
+// wasm.OpcodeVecI16x8ExtractLaneSName wasm.OpcodeVecI16x8ExtractLaneUName
+// wasm.OpcodeVecI32x4ExtractLaneName wasm.OpcodeVecI64x2ExtractLaneName
+// wasm.OpcodeVecF32x4ExtractLaneName wasm.OpcodeVecF64x2ExtractLaneName.
+//
+// laneIndex is >=0 && <M where shape = NxM.
+// signed is used when shape is either i8x16 or i16x2 to specify whether to sign-extend or not.
+func newOperationV128ExtractLane(laneIndex byte, signed bool, shape shape) unionOperation {
+ return unionOperation{
+ Kind: operationKindV128ExtractLane,
+ B1: shape,
+ B2: laneIndex,
+ B3: signed,
+ }
+}
+
+// NewOperationV128ReplaceLane is a constructor for unionOperation with operationKindV128ReplaceLane.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI8x16ReplaceLaneName wasm.OpcodeVecI16x8ReplaceLaneName
+// wasm.OpcodeVecI32x4ReplaceLaneName wasm.OpcodeVecI64x2ReplaceLaneName
+// wasm.OpcodeVecF32x4ReplaceLaneName wasm.OpcodeVecF64x2ReplaceLaneName.
+//
+// laneIndex is >=0 && <M where shape = NxM.
+func newOperationV128ReplaceLane(laneIndex byte, shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128ReplaceLane, B1: shape, B2: laneIndex}
+}
+
+// NewOperationV128Splat is a constructor for unionOperation with operationKindV128Splat.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI8x16SplatName wasm.OpcodeVecI16x8SplatName
+// wasm.OpcodeVecI32x4SplatName wasm.OpcodeVecI64x2SplatName
+// wasm.OpcodeVecF32x4SplatName wasm.OpcodeVecF64x2SplatName.
+func newOperationV128Splat(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Splat, B1: shape}
+}
+
+// NewOperationV128Shuffle is a constructor for unionOperation with operationKindV128Shuffle.
+func newOperationV128Shuffle(lanes []uint64) unionOperation {
+ return unionOperation{Kind: operationKindV128Shuffle, Us: lanes}
+}
+
+// NewOperationV128Swizzle is a constructor for unionOperation with operationKindV128Swizzle.
+//
+// This corresponds to wasm.OpcodeVecI8x16SwizzleName.
+func newOperationV128Swizzle() unionOperation {
+ return unionOperation{Kind: operationKindV128Swizzle}
+}
+
+// NewOperationV128AnyTrue is a constructor for unionOperation with operationKindV128AnyTrue.
+//
+// This corresponds to wasm.OpcodeVecV128AnyTrueName.
+func newOperationV128AnyTrue() unionOperation {
+ return unionOperation{Kind: operationKindV128AnyTrue}
+}
+
+// NewOperationV128AllTrue is a constructor for unionOperation with operationKindV128AllTrue.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI8x16AllTrueName wasm.OpcodeVecI16x8AllTrueName
+// wasm.OpcodeVecI32x4AllTrueName wasm.OpcodeVecI64x2AllTrueName.
+func newOperationV128AllTrue(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128AllTrue, B1: shape}
+}
+
+// NewOperationV128BitMask is a constructor for unionOperation with operationKindV128BitMask.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI8x16BitMaskName wasm.OpcodeVecI16x8BitMaskName
+// wasm.OpcodeVecI32x4BitMaskName wasm.OpcodeVecI64x2BitMaskName.
+func newOperationV128BitMask(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128BitMask, B1: shape}
+}
+
+// NewOperationV128And is a constructor for unionOperation with operationKindV128And.
+//
+// This corresponds to wasm.OpcodeVecV128And.
+func newOperationV128And() unionOperation {
+ return unionOperation{Kind: operationKindV128And}
+}
+
+// NewOperationV128Not is a constructor for unionOperation with operationKindV128Not.
+//
+// This corresponds to wasm.OpcodeVecV128Not.
+func newOperationV128Not() unionOperation {
+ return unionOperation{Kind: operationKindV128Not}
+}
+
+// NewOperationV128Or is a constructor for unionOperation with operationKindV128Or.
+//
+// This corresponds to wasm.OpcodeVecV128Or.
+func newOperationV128Or() unionOperation {
+ return unionOperation{Kind: operationKindV128Or}
+}
+
+// NewOperationV128Xor is a constructor for unionOperation with operationKindV128Xor.
+//
+// This corresponds to wasm.OpcodeVecV128Xor.
+func newOperationV128Xor() unionOperation {
+ return unionOperation{Kind: operationKindV128Xor}
+}
+
+// NewOperationV128Bitselect is a constructor for unionOperation with operationKindV128Bitselect.
+//
+// This corresponds to wasm.OpcodeVecV128Bitselect.
+func newOperationV128Bitselect() unionOperation {
+ return unionOperation{Kind: operationKindV128Bitselect}
+}
+
+// NewOperationV128AndNot is a constructor for unionOperation with operationKindV128AndNot.
+//
+// This corresponds to wasm.OpcodeVecV128AndNot.
+func newOperationV128AndNot() unionOperation {
+ return unionOperation{Kind: operationKindV128AndNot}
+}
+
+// NewOperationV128Shl is a constructor for unionOperation with operationKindV128Shl.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI8x16ShlName wasm.OpcodeVecI16x8ShlName
+// wasm.OpcodeVecI32x4ShlName wasm.OpcodeVecI64x2ShlName
+func newOperationV128Shl(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Shl, B1: shape}
+}
+
+// NewOperationV128Shr is a constructor for unionOperation with operationKindV128Shr.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI8x16ShrSName wasm.OpcodeVecI8x16ShrUName wasm.OpcodeVecI16x8ShrSName
+// wasm.OpcodeVecI16x8ShrUName wasm.OpcodeVecI32x4ShrSName wasm.OpcodeVecI32x4ShrUName.
+// wasm.OpcodeVecI64x2ShrSName wasm.OpcodeVecI64x2ShrUName.
+func newOperationV128Shr(shape shape, signed bool) unionOperation {
+ return unionOperation{Kind: operationKindV128Shr, B1: shape, B3: signed}
+}
+
+// NewOperationV128Cmp is a constructor for unionOperation with operationKindV128Cmp.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI8x16EqName, wasm.OpcodeVecI8x16NeName, wasm.OpcodeVecI8x16LtSName, wasm.OpcodeVecI8x16LtUName, wasm.OpcodeVecI8x16GtSName,
+// wasm.OpcodeVecI8x16GtUName, wasm.OpcodeVecI8x16LeSName, wasm.OpcodeVecI8x16LeUName, wasm.OpcodeVecI8x16GeSName, wasm.OpcodeVecI8x16GeUName,
+// wasm.OpcodeVecI16x8EqName, wasm.OpcodeVecI16x8NeName, wasm.OpcodeVecI16x8LtSName, wasm.OpcodeVecI16x8LtUName, wasm.OpcodeVecI16x8GtSName,
+// wasm.OpcodeVecI16x8GtUName, wasm.OpcodeVecI16x8LeSName, wasm.OpcodeVecI16x8LeUName, wasm.OpcodeVecI16x8GeSName, wasm.OpcodeVecI16x8GeUName,
+// wasm.OpcodeVecI32x4EqName, wasm.OpcodeVecI32x4NeName, wasm.OpcodeVecI32x4LtSName, wasm.OpcodeVecI32x4LtUName, wasm.OpcodeVecI32x4GtSName,
+// wasm.OpcodeVecI32x4GtUName, wasm.OpcodeVecI32x4LeSName, wasm.OpcodeVecI32x4LeUName, wasm.OpcodeVecI32x4GeSName, wasm.OpcodeVecI32x4GeUName,
+// wasm.OpcodeVecI64x2EqName, wasm.OpcodeVecI64x2NeName, wasm.OpcodeVecI64x2LtSName, wasm.OpcodeVecI64x2GtSName, wasm.OpcodeVecI64x2LeSName,
+// wasm.OpcodeVecI64x2GeSName, wasm.OpcodeVecF32x4EqName, wasm.OpcodeVecF32x4NeName, wasm.OpcodeVecF32x4LtName, wasm.OpcodeVecF32x4GtName,
+// wasm.OpcodeVecF32x4LeName, wasm.OpcodeVecF32x4GeName, wasm.OpcodeVecF64x2EqName, wasm.OpcodeVecF64x2NeName, wasm.OpcodeVecF64x2LtName,
+// wasm.OpcodeVecF64x2GtName, wasm.OpcodeVecF64x2LeName, wasm.OpcodeVecF64x2GeName
+func newOperationV128Cmp(cmpType v128CmpType) unionOperation {
+ return unionOperation{Kind: operationKindV128Cmp, B1: cmpType}
+}
+
+// v128CmpType represents a type of vector comparison operation.
+type v128CmpType = byte
+
+const (
+ // v128CmpTypeI8x16Eq corresponds to wasm.OpcodeVecI8x16EqName.
+ v128CmpTypeI8x16Eq v128CmpType = iota
+ // v128CmpTypeI8x16Ne corresponds to wasm.OpcodeVecI8x16NeName.
+ v128CmpTypeI8x16Ne
+ // v128CmpTypeI8x16LtS corresponds to wasm.OpcodeVecI8x16LtSName.
+ v128CmpTypeI8x16LtS
+ // v128CmpTypeI8x16LtU corresponds to wasm.OpcodeVecI8x16LtUName.
+ v128CmpTypeI8x16LtU
+ // v128CmpTypeI8x16GtS corresponds to wasm.OpcodeVecI8x16GtSName.
+ v128CmpTypeI8x16GtS
+ // v128CmpTypeI8x16GtU corresponds to wasm.OpcodeVecI8x16GtUName.
+ v128CmpTypeI8x16GtU
+ // v128CmpTypeI8x16LeS corresponds to wasm.OpcodeVecI8x16LeSName.
+ v128CmpTypeI8x16LeS
+ // v128CmpTypeI8x16LeU corresponds to wasm.OpcodeVecI8x16LeUName.
+ v128CmpTypeI8x16LeU
+ // v128CmpTypeI8x16GeS corresponds to wasm.OpcodeVecI8x16GeSName.
+ v128CmpTypeI8x16GeS
+ // v128CmpTypeI8x16GeU corresponds to wasm.OpcodeVecI8x16GeUName.
+ v128CmpTypeI8x16GeU
+ // v128CmpTypeI16x8Eq corresponds to wasm.OpcodeVecI16x8EqName.
+ v128CmpTypeI16x8Eq
+ // v128CmpTypeI16x8Ne corresponds to wasm.OpcodeVecI16x8NeName.
+ v128CmpTypeI16x8Ne
+ // v128CmpTypeI16x8LtS corresponds to wasm.OpcodeVecI16x8LtSName.
+ v128CmpTypeI16x8LtS
+ // v128CmpTypeI16x8LtU corresponds to wasm.OpcodeVecI16x8LtUName.
+ v128CmpTypeI16x8LtU
+ // v128CmpTypeI16x8GtS corresponds to wasm.OpcodeVecI16x8GtSName.
+ v128CmpTypeI16x8GtS
+ // v128CmpTypeI16x8GtU corresponds to wasm.OpcodeVecI16x8GtUName.
+ v128CmpTypeI16x8GtU
+ // v128CmpTypeI16x8LeS corresponds to wasm.OpcodeVecI16x8LeSName.
+ v128CmpTypeI16x8LeS
+ // v128CmpTypeI16x8LeU corresponds to wasm.OpcodeVecI16x8LeUName.
+ v128CmpTypeI16x8LeU
+ // v128CmpTypeI16x8GeS corresponds to wasm.OpcodeVecI16x8GeSName.
+ v128CmpTypeI16x8GeS
+ // v128CmpTypeI16x8GeU corresponds to wasm.OpcodeVecI16x8GeUName.
+ v128CmpTypeI16x8GeU
+ // v128CmpTypeI32x4Eq corresponds to wasm.OpcodeVecI32x4EqName.
+ v128CmpTypeI32x4Eq
+ // v128CmpTypeI32x4Ne corresponds to wasm.OpcodeVecI32x4NeName.
+ v128CmpTypeI32x4Ne
+ // v128CmpTypeI32x4LtS corresponds to wasm.OpcodeVecI32x4LtSName.
+ v128CmpTypeI32x4LtS
+ // v128CmpTypeI32x4LtU corresponds to wasm.OpcodeVecI32x4LtUName.
+ v128CmpTypeI32x4LtU
+ // v128CmpTypeI32x4GtS corresponds to wasm.OpcodeVecI32x4GtSName.
+ v128CmpTypeI32x4GtS
+ // v128CmpTypeI32x4GtU corresponds to wasm.OpcodeVecI32x4GtUName.
+ v128CmpTypeI32x4GtU
+ // v128CmpTypeI32x4LeS corresponds to wasm.OpcodeVecI32x4LeSName.
+ v128CmpTypeI32x4LeS
+ // v128CmpTypeI32x4LeU corresponds to wasm.OpcodeVecI32x4LeUName.
+ v128CmpTypeI32x4LeU
+ // v128CmpTypeI32x4GeS corresponds to wasm.OpcodeVecI32x4GeSName.
+ v128CmpTypeI32x4GeS
+ // v128CmpTypeI32x4GeU corresponds to wasm.OpcodeVecI32x4GeUName.
+ v128CmpTypeI32x4GeU
+ // v128CmpTypeI64x2Eq corresponds to wasm.OpcodeVecI64x2EqName.
+ v128CmpTypeI64x2Eq
+ // v128CmpTypeI64x2Ne corresponds to wasm.OpcodeVecI64x2NeName.
+ v128CmpTypeI64x2Ne
+ // v128CmpTypeI64x2LtS corresponds to wasm.OpcodeVecI64x2LtSName.
+ v128CmpTypeI64x2LtS
+ // v128CmpTypeI64x2GtS corresponds to wasm.OpcodeVecI64x2GtSName.
+ v128CmpTypeI64x2GtS
+ // v128CmpTypeI64x2LeS corresponds to wasm.OpcodeVecI64x2LeSName.
+ v128CmpTypeI64x2LeS
+ // v128CmpTypeI64x2GeS corresponds to wasm.OpcodeVecI64x2GeSName.
+ v128CmpTypeI64x2GeS
+ // v128CmpTypeF32x4Eq corresponds to wasm.OpcodeVecF32x4EqName.
+ v128CmpTypeF32x4Eq
+ // v128CmpTypeF32x4Ne corresponds to wasm.OpcodeVecF32x4NeName.
+ v128CmpTypeF32x4Ne
+ // v128CmpTypeF32x4Lt corresponds to wasm.OpcodeVecF32x4LtName.
+ v128CmpTypeF32x4Lt
+ // v128CmpTypeF32x4Gt corresponds to wasm.OpcodeVecF32x4GtName.
+ v128CmpTypeF32x4Gt
+ // v128CmpTypeF32x4Le corresponds to wasm.OpcodeVecF32x4LeName.
+ v128CmpTypeF32x4Le
+ // v128CmpTypeF32x4Ge corresponds to wasm.OpcodeVecF32x4GeName.
+ v128CmpTypeF32x4Ge
+ // v128CmpTypeF64x2Eq corresponds to wasm.OpcodeVecF64x2EqName.
+ v128CmpTypeF64x2Eq
+ // v128CmpTypeF64x2Ne corresponds to wasm.OpcodeVecF64x2NeName.
+ v128CmpTypeF64x2Ne
+ // v128CmpTypeF64x2Lt corresponds to wasm.OpcodeVecF64x2LtName.
+ v128CmpTypeF64x2Lt
+ // v128CmpTypeF64x2Gt corresponds to wasm.OpcodeVecF64x2GtName.
+ v128CmpTypeF64x2Gt
+ // v128CmpTypeF64x2Le corresponds to wasm.OpcodeVecF64x2LeName.
+ v128CmpTypeF64x2Le
+ // v128CmpTypeF64x2Ge corresponds to wasm.OpcodeVecF64x2GeName.
+ v128CmpTypeF64x2Ge
+)
+
+// NewOperationV128AddSat is a constructor for unionOperation with operationKindV128AddSat.
+//
+// This corresponds to wasm.OpcodeVecI8x16AddSatUName wasm.OpcodeVecI8x16AddSatSName
+//
+// wasm.OpcodeVecI16x8AddSatUName wasm.OpcodeVecI16x8AddSatSName
+//
+// shape is either shapeI8x16 or shapeI16x8.
+func newOperationV128AddSat(shape shape, signed bool) unionOperation {
+ return unionOperation{Kind: operationKindV128AddSat, B1: shape, B3: signed}
+}
+
+// NewOperationV128SubSat is a constructor for unionOperation with operationKindV128SubSat.
+//
+// This corresponds to wasm.OpcodeVecI8x16SubSatUName wasm.OpcodeVecI8x16SubSatSName
+//
+// wasm.OpcodeVecI16x8SubSatUName wasm.OpcodeVecI16x8SubSatSName
+//
+// shape is either shapeI8x16 or shapeI16x8.
+func newOperationV128SubSat(shape shape, signed bool) unionOperation {
+ return unionOperation{Kind: operationKindV128SubSat, B1: shape, B3: signed}
+}
+
+// NewOperationV128Mul is a constructor for unionOperation with operationKindV128Mul
+//
+// This corresponds to wasm.OpcodeVecF32x4MulName wasm.OpcodeVecF64x2MulName
+//
+// wasm.OpcodeVecI16x8MulName wasm.OpcodeVecI32x4MulName wasm.OpcodeVecI64x2MulName.
+// shape is either shapeI16x8, shapeI32x4, shapeI64x2, shapeF32x4 or shapeF64x2.
+func newOperationV128Mul(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Mul, B1: shape}
+}
+
+// NewOperationV128Div is a constructor for unionOperation with operationKindV128Div.
+//
+// This corresponds to wasm.OpcodeVecF32x4DivName wasm.OpcodeVecF64x2DivName.
+// shape is either shapeF32x4 or shapeF64x2.
+func newOperationV128Div(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Div, B1: shape}
+}
+
+// NewOperationV128Neg is a constructor for unionOperation with operationKindV128Neg.
+//
+// This corresponds to wasm.OpcodeVecI8x16NegName wasm.OpcodeVecI16x8NegName wasm.OpcodeVecI32x4NegName
+//
+// wasm.OpcodeVecI64x2NegName wasm.OpcodeVecF32x4NegName wasm.OpcodeVecF64x2NegName.
+func newOperationV128Neg(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Neg, B1: shape}
+}
+
+// NewOperationV128Sqrt is a constructor for unionOperation with 128operationKindV128Sqrt.
+//
+// shape is either shapeF32x4 or shapeF64x2.
+// This corresponds to wasm.OpcodeVecF32x4SqrtName wasm.OpcodeVecF64x2SqrtName.
+func newOperationV128Sqrt(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Sqrt, B1: shape}
+}
+
+// NewOperationV128Abs is a constructor for unionOperation with operationKindV128Abs.
+//
+// This corresponds to wasm.OpcodeVecI8x16AbsName wasm.OpcodeVecI16x8AbsName wasm.OpcodeVecI32x4AbsName
+//
+// wasm.OpcodeVecI64x2AbsName wasm.OpcodeVecF32x4AbsName wasm.OpcodeVecF64x2AbsName.
+func newOperationV128Abs(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Abs, B1: shape}
+}
+
+// NewOperationV128Popcnt is a constructor for unionOperation with operationKindV128Popcnt.
+//
+// This corresponds to wasm.OpcodeVecI8x16PopcntName.
+func newOperationV128Popcnt(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Popcnt, B1: shape}
+}
+
+// NewOperationV128Min is a constructor for unionOperation with operationKindV128Min.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI8x16MinSName wasm.OpcodeVecI8x16MinUName wasm.OpcodeVecI16x8MinSName wasm.OpcodeVecI16x8MinUName
+// wasm.OpcodeVecI32x4MinSName wasm.OpcodeVecI32x4MinUName wasm.OpcodeVecI16x8MinSName wasm.OpcodeVecI16x8MinUName
+// wasm.OpcodeVecF32x4MinName wasm.OpcodeVecF64x2MinName
+func newOperationV128Min(shape shape, signed bool) unionOperation {
+ return unionOperation{Kind: operationKindV128Min, B1: shape, B3: signed}
+}
+
+// NewOperationV128Max is a constructor for unionOperation with operationKindV128Max.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI8x16MaxSName wasm.OpcodeVecI8x16MaxUName wasm.OpcodeVecI16x8MaxSName wasm.OpcodeVecI16x8MaxUName
+// wasm.OpcodeVecI32x4MaxSName wasm.OpcodeVecI32x4MaxUName wasm.OpcodeVecI16x8MaxSName wasm.OpcodeVecI16x8MaxUName
+// wasm.OpcodeVecF32x4MaxName wasm.OpcodeVecF64x2MaxName.
+func newOperationV128Max(shape shape, signed bool) unionOperation {
+ return unionOperation{Kind: operationKindV128Max, B1: shape, B3: signed}
+}
+
+// NewOperationV128AvgrU is a constructor for unionOperation with operationKindV128AvgrU.
+//
+// This corresponds to wasm.OpcodeVecI8x16AvgrUName.
+func newOperationV128AvgrU(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128AvgrU, B1: shape}
+}
+
+// NewOperationV128Pmin is a constructor for unionOperation with operationKindV128Pmin.
+//
+// This corresponds to wasm.OpcodeVecF32x4PminName wasm.OpcodeVecF64x2PminName.
+func newOperationV128Pmin(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Pmin, B1: shape}
+}
+
+// NewOperationV128Pmax is a constructor for unionOperation with operationKindV128Pmax.
+//
+// This corresponds to wasm.OpcodeVecF32x4PmaxName wasm.OpcodeVecF64x2PmaxName.
+func newOperationV128Pmax(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Pmax, B1: shape}
+}
+
+// NewOperationV128Ceil is a constructor for unionOperation with operationKindV128Ceil.
+//
+// This corresponds to wasm.OpcodeVecF32x4CeilName wasm.OpcodeVecF64x2CeilName
+func newOperationV128Ceil(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Ceil, B1: shape}
+}
+
+// NewOperationV128Floor is a constructor for unionOperation with operationKindV128Floor.
+//
+// This corresponds to wasm.OpcodeVecF32x4FloorName wasm.OpcodeVecF64x2FloorName
+func newOperationV128Floor(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Floor, B1: shape}
+}
+
+// NewOperationV128Trunc is a constructor for unionOperation with operationKindV128Trunc.
+//
+// This corresponds to wasm.OpcodeVecF32x4TruncName wasm.OpcodeVecF64x2TruncName
+func newOperationV128Trunc(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Trunc, B1: shape}
+}
+
+// NewOperationV128Nearest is a constructor for unionOperation with operationKindV128Nearest.
+//
+// This corresponds to wasm.OpcodeVecF32x4NearestName wasm.OpcodeVecF64x2NearestName
+func newOperationV128Nearest(shape shape) unionOperation {
+ return unionOperation{Kind: operationKindV128Nearest, B1: shape}
+}
+
+// NewOperationV128Extend is a constructor for unionOperation with operationKindV128Extend.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI16x8ExtendLowI8x16SName wasm.OpcodeVecI16x8ExtendHighI8x16SName
+// wasm.OpcodeVecI16x8ExtendLowI8x16UName wasm.OpcodeVecI16x8ExtendHighI8x16UName
+// wasm.OpcodeVecI32x4ExtendLowI16x8SName wasm.OpcodeVecI32x4ExtendHighI16x8SName
+// wasm.OpcodeVecI32x4ExtendLowI16x8UName wasm.OpcodeVecI32x4ExtendHighI16x8UName
+// wasm.OpcodeVecI64x2ExtendLowI32x4SName wasm.OpcodeVecI64x2ExtendHighI32x4SName
+// wasm.OpcodeVecI64x2ExtendLowI32x4UName wasm.OpcodeVecI64x2ExtendHighI32x4UName
+//
+// originshape is the shape of the original lanes for extension which is
+// either shapeI8x16, shapeI16x8, or shapeI32x4.
+// useLow true if it uses the lower half of vector for extension.
+func newOperationV128Extend(originshape shape, signed bool, useLow bool) unionOperation {
+ op := unionOperation{Kind: operationKindV128Extend}
+ op.B1 = originshape
+ if signed {
+ op.B2 = 1
+ }
+ op.B3 = useLow
+ return op
+}
+
+// NewOperationV128ExtMul is a constructor for unionOperation with operationKindV128ExtMul.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI16x8ExtMulLowI8x16SName wasm.OpcodeVecI16x8ExtMulLowI8x16UName
+// wasm.OpcodeVecI16x8ExtMulHighI8x16SName wasm.OpcodeVecI16x8ExtMulHighI8x16UName
+// wasm.OpcodeVecI32x4ExtMulLowI16x8SName wasm.OpcodeVecI32x4ExtMulLowI16x8UName
+// wasm.OpcodeVecI32x4ExtMulHighI16x8SName wasm.OpcodeVecI32x4ExtMulHighI16x8UName
+// wasm.OpcodeVecI64x2ExtMulLowI32x4SName wasm.OpcodeVecI64x2ExtMulLowI32x4UName
+// wasm.OpcodeVecI64x2ExtMulHighI32x4SName wasm.OpcodeVecI64x2ExtMulHighI32x4UName.
+//
+// originshape is the shape of the original lanes for extension which is
+// either shapeI8x16, shapeI16x8, or shapeI32x4.
+// useLow true if it uses the lower half of vector for extension.
+func newOperationV128ExtMul(originshape shape, signed bool, useLow bool) unionOperation {
+ op := unionOperation{Kind: operationKindV128ExtMul}
+ op.B1 = originshape
+ if signed {
+ op.B2 = 1
+ }
+ op.B3 = useLow
+ return op
+}
+
+// NewOperationV128Q15mulrSatS is a constructor for unionOperation with operationKindV128Q15mulrSatS.
+//
+// This corresponds to wasm.OpcodeVecI16x8Q15mulrSatSName
+func newOperationV128Q15mulrSatS() unionOperation {
+ return unionOperation{Kind: operationKindV128Q15mulrSatS}
+}
+
+// NewOperationV128ExtAddPairwise is a constructor for unionOperation with operationKindV128ExtAddPairwise.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI16x8ExtaddPairwiseI8x16SName wasm.OpcodeVecI16x8ExtaddPairwiseI8x16UName
+// wasm.OpcodeVecI32x4ExtaddPairwiseI16x8SName wasm.OpcodeVecI32x4ExtaddPairwiseI16x8UName.
+//
+// originshape is the shape of the original lanes for extension which is
+// either shapeI8x16, or shapeI16x8.
+func newOperationV128ExtAddPairwise(originshape shape, signed bool) unionOperation {
+ return unionOperation{Kind: operationKindV128ExtAddPairwise, B1: originshape, B3: signed}
+}
+
+// NewOperationV128FloatPromote is a constructor for unionOperation with NewOperationV128FloatPromote.
+//
+// This corresponds to wasm.OpcodeVecF64x2PromoteLowF32x4ZeroName
+// This discards the higher 64-bit of a vector, and promotes two
+// 32-bit floats in the lower 64-bit as two 64-bit floats.
+func newOperationV128FloatPromote() unionOperation {
+ return unionOperation{Kind: operationKindV128FloatPromote}
+}
+
+// NewOperationV128FloatDemote is a constructor for unionOperation with NewOperationV128FloatDemote.
+//
+// This corresponds to wasm.OpcodeVecF32x4DemoteF64x2ZeroName.
+func newOperationV128FloatDemote() unionOperation {
+ return unionOperation{Kind: operationKindV128FloatDemote}
+}
+
+// NewOperationV128FConvertFromI is a constructor for unionOperation with NewOperationV128FConvertFromI.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecF32x4ConvertI32x4SName wasm.OpcodeVecF32x4ConvertI32x4UName
+// wasm.OpcodeVecF64x2ConvertLowI32x4SName wasm.OpcodeVecF64x2ConvertLowI32x4UName.
+//
+// destinationshape is the shape of the destination lanes for conversion which is
+// either shapeF32x4, or shapeF64x2.
+func newOperationV128FConvertFromI(destinationshape shape, signed bool) unionOperation {
+ return unionOperation{Kind: operationKindV128FConvertFromI, B1: destinationshape, B3: signed}
+}
+
+// NewOperationV128Dot is a constructor for unionOperation with operationKindV128Dot.
+//
+// This corresponds to wasm.OpcodeVecI32x4DotI16x8SName
+func newOperationV128Dot() unionOperation {
+ return unionOperation{Kind: operationKindV128Dot}
+}
+
+// NewOperationV128Narrow is a constructor for unionOperation with operationKindV128Narrow.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI8x16NarrowI16x8SName wasm.OpcodeVecI8x16NarrowI16x8UName
+// wasm.OpcodeVecI16x8NarrowI32x4SName wasm.OpcodeVecI16x8NarrowI32x4UName.
+//
+// originshape is the shape of the original lanes for narrowing which is
+// either shapeI16x8, or shapeI32x4.
+func newOperationV128Narrow(originshape shape, signed bool) unionOperation {
+ return unionOperation{Kind: operationKindV128Narrow, B1: originshape, B3: signed}
+}
+
+// NewOperationV128ITruncSatFromF is a constructor for unionOperation with operationKindV128ITruncSatFromF.
+//
+// This corresponds to
+//
+// wasm.OpcodeVecI32x4TruncSatF64x2UZeroName wasm.OpcodeVecI32x4TruncSatF64x2SZeroName
+// wasm.OpcodeVecI32x4TruncSatF32x4UName wasm.OpcodeVecI32x4TruncSatF32x4SName.
+//
+// originshape is the shape of the original lanes for truncation which is
+// either shapeF32x4, or shapeF64x2.
+func newOperationV128ITruncSatFromF(originshape shape, signed bool) unionOperation {
+ return unionOperation{Kind: operationKindV128ITruncSatFromF, B1: originshape, B3: signed}
+}
+
+// atomicArithmeticOp is the type for the operation kind of atomic arithmetic operations.
+type atomicArithmeticOp byte
+
+const (
+ // atomicArithmeticOpAdd is the kind for an add operation.
+ atomicArithmeticOpAdd atomicArithmeticOp = iota
+ // atomicArithmeticOpSub is the kind for a sub operation.
+ atomicArithmeticOpSub
+ // atomicArithmeticOpAnd is the kind for a bitwise and operation.
+ atomicArithmeticOpAnd
+ // atomicArithmeticOpOr is the kind for a bitwise or operation.
+ atomicArithmeticOpOr
+ // atomicArithmeticOpXor is the kind for a bitwise xor operation.
+ atomicArithmeticOpXor
+ // atomicArithmeticOpNop is the kind for a nop operation.
+ atomicArithmeticOpNop
+)
+
+// NewOperationAtomicMemoryWait is a constructor for unionOperation with operationKindAtomicMemoryWait.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicWait32Name wasm.OpcodeAtomicWait64Name
+func newOperationAtomicMemoryWait(unsignedType unsignedType, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindAtomicMemoryWait, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationAtomicMemoryNotify is a constructor for unionOperation with operationKindAtomicMemoryNotify.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicNotifyName
+func newOperationAtomicMemoryNotify(arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindAtomicMemoryNotify, U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationAtomicFence is a constructor for unionOperation with operationKindAtomicFence.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicFenceName
+func newOperationAtomicFence() unionOperation {
+ return unionOperation{Kind: operationKindAtomicFence}
+}
+
+// NewOperationAtomicLoad is a constructor for unionOperation with operationKindAtomicLoad.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicI32LoadName wasm.OpcodeAtomicI64LoadName
+func newOperationAtomicLoad(unsignedType unsignedType, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindAtomicLoad, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationAtomicLoad8 is a constructor for unionOperation with operationKindAtomicLoad8.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicI32Load8UName wasm.OpcodeAtomicI64Load8UName
+func newOperationAtomicLoad8(unsignedType unsignedType, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindAtomicLoad8, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationAtomicLoad16 is a constructor for unionOperation with operationKindAtomicLoad16.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicI32Load16UName wasm.OpcodeAtomicI64Load16UName
+func newOperationAtomicLoad16(unsignedType unsignedType, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindAtomicLoad16, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationAtomicStore is a constructor for unionOperation with operationKindAtomicStore.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicI32StoreName wasm.OpcodeAtomicI64StoreName
+func newOperationAtomicStore(unsignedType unsignedType, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindAtomicStore, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationAtomicStore8 is a constructor for unionOperation with operationKindAtomicStore8.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicI32Store8UName wasm.OpcodeAtomicI64Store8UName
+func newOperationAtomicStore8(unsignedType unsignedType, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindAtomicStore8, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationAtomicStore16 is a constructor for unionOperation with operationKindAtomicStore16.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicI32Store16UName wasm.OpcodeAtomicI64Store16UName
+func newOperationAtomicStore16(unsignedType unsignedType, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindAtomicStore16, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationAtomicRMW is a constructor for unionOperation with operationKindAtomicRMW.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicI32RMWAddName wasm.OpcodeAtomicI64RmwAddName
+// wasm.OpcodeAtomicI32RMWSubName wasm.OpcodeAtomicI64RmwSubName
+// wasm.OpcodeAtomicI32RMWAndName wasm.OpcodeAtomicI64RmwAndName
+// wasm.OpcodeAtomicI32RMWOrName wasm.OpcodeAtomicI64RmwOrName
+// wasm.OpcodeAtomicI32RMWXorName wasm.OpcodeAtomicI64RmwXorName
+func newOperationAtomicRMW(unsignedType unsignedType, arg memoryArg, op atomicArithmeticOp) unionOperation {
+ return unionOperation{Kind: operationKindAtomicRMW, B1: byte(unsignedType), B2: byte(op), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationAtomicRMW8 is a constructor for unionOperation with operationKindAtomicRMW8.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicI32RMW8AddUName wasm.OpcodeAtomicI64Rmw8AddUName
+// wasm.OpcodeAtomicI32RMW8SubUName wasm.OpcodeAtomicI64Rmw8SubUName
+// wasm.OpcodeAtomicI32RMW8AndUName wasm.OpcodeAtomicI64Rmw8AndUName
+// wasm.OpcodeAtomicI32RMW8OrUName wasm.OpcodeAtomicI64Rmw8OrUName
+// wasm.OpcodeAtomicI32RMW8XorUName wasm.OpcodeAtomicI64Rmw8XorUName
+func newOperationAtomicRMW8(unsignedType unsignedType, arg memoryArg, op atomicArithmeticOp) unionOperation {
+ return unionOperation{Kind: operationKindAtomicRMW8, B1: byte(unsignedType), B2: byte(op), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationAtomicRMW16 is a constructor for unionOperation with operationKindAtomicRMW16.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicI32RMW16AddUName wasm.OpcodeAtomicI64Rmw16AddUName
+// wasm.OpcodeAtomicI32RMW16SubUName wasm.OpcodeAtomicI64Rmw16SubUName
+// wasm.OpcodeAtomicI32RMW16AndUName wasm.OpcodeAtomicI64Rmw16AndUName
+// wasm.OpcodeAtomicI32RMW16OrUName wasm.OpcodeAtomicI64Rmw16OrUName
+// wasm.OpcodeAtomicI32RMW16XorUName wasm.OpcodeAtomicI64Rmw16XorUName
+func newOperationAtomicRMW16(unsignedType unsignedType, arg memoryArg, op atomicArithmeticOp) unionOperation {
+ return unionOperation{Kind: operationKindAtomicRMW16, B1: byte(unsignedType), B2: byte(op), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationAtomicRMWCmpxchg is a constructor for unionOperation with operationKindAtomicRMWCmpxchg.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicI32RMWCmpxchgName wasm.OpcodeAtomicI64RmwCmpxchgName
+func newOperationAtomicRMWCmpxchg(unsignedType unsignedType, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindAtomicRMWCmpxchg, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationAtomicRMW8Cmpxchg is a constructor for unionOperation with operationKindAtomicRMW8Cmpxchg.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicI32RMW8CmpxchgUName wasm.OpcodeAtomicI64Rmw8CmpxchgUName
+func newOperationAtomicRMW8Cmpxchg(unsignedType unsignedType, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindAtomicRMW8Cmpxchg, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
+
+// NewOperationAtomicRMW16Cmpxchg is a constructor for unionOperation with operationKindAtomicRMW16Cmpxchg.
+//
+// This corresponds to
+//
+// wasm.OpcodeAtomicI32RMW16CmpxchgUName wasm.OpcodeAtomicI64Rmw16CmpxchgUName
+func newOperationAtomicRMW16Cmpxchg(unsignedType unsignedType, arg memoryArg) unionOperation {
+ return unionOperation{Kind: operationKindAtomicRMW16Cmpxchg, B1: byte(unsignedType), U1: uint64(arg.Alignment), U2: uint64(arg.Offset)}
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/signature.go b/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/signature.go
new file mode 100644
index 000000000..7b9d5602d
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/interpreter/signature.go
@@ -0,0 +1,767 @@
+package interpreter
+
+import (
+ "fmt"
+
+ "github.com/tetratelabs/wazero/internal/wasm"
+)
+
+// signature represents how a Wasm opcode
+// manipulates the value stacks in terms of value types.
+type signature struct {
+ in, out []unsignedType
+}
+
+var (
+ signature_None_None = &signature{}
+ signature_Unknown_None = &signature{
+ in: []unsignedType{unsignedTypeUnknown},
+ }
+ signature_None_I32 = &signature{
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_None_I64 = &signature{
+ out: []unsignedType{unsignedTypeI64},
+ }
+ signature_None_V128 = &signature{
+ out: []unsignedType{unsignedTypeV128},
+ }
+ signature_None_F32 = &signature{
+ out: []unsignedType{unsignedTypeF32},
+ }
+ signature_None_F64 = &signature{
+ out: []unsignedType{unsignedTypeF64},
+ }
+ signature_I32_None = &signature{
+ in: []unsignedType{unsignedTypeI32},
+ }
+ signature_I64_None = &signature{
+ in: []unsignedType{unsignedTypeI64},
+ }
+ signature_F32_None = &signature{
+ in: []unsignedType{unsignedTypeF32},
+ }
+ signature_F64_None = &signature{
+ in: []unsignedType{unsignedTypeF64},
+ }
+ signature_V128_None = &signature{
+ in: []unsignedType{unsignedTypeV128},
+ }
+ signature_I32_I32 = &signature{
+ in: []unsignedType{unsignedTypeI32},
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_I32_I64 = &signature{
+ in: []unsignedType{unsignedTypeI32},
+ out: []unsignedType{unsignedTypeI64},
+ }
+ signature_I64_I64 = &signature{
+ in: []unsignedType{unsignedTypeI64},
+ out: []unsignedType{unsignedTypeI64},
+ }
+ signature_I32_F32 = &signature{
+ in: []unsignedType{unsignedTypeI32},
+ out: []unsignedType{unsignedTypeF32},
+ }
+ signature_I32_F64 = &signature{
+ in: []unsignedType{unsignedTypeI32},
+ out: []unsignedType{unsignedTypeF64},
+ }
+ signature_I64_I32 = &signature{
+ in: []unsignedType{unsignedTypeI64},
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_I64_F32 = &signature{
+ in: []unsignedType{unsignedTypeI64},
+ out: []unsignedType{unsignedTypeF32},
+ }
+ signature_I64_F64 = &signature{
+ in: []unsignedType{unsignedTypeI64},
+ out: []unsignedType{unsignedTypeF64},
+ }
+ signature_F32_I32 = &signature{
+ in: []unsignedType{unsignedTypeF32},
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_F32_I64 = &signature{
+ in: []unsignedType{unsignedTypeF32},
+ out: []unsignedType{unsignedTypeI64},
+ }
+ signature_F32_F64 = &signature{
+ in: []unsignedType{unsignedTypeF32},
+ out: []unsignedType{unsignedTypeF64},
+ }
+ signature_F32_F32 = &signature{
+ in: []unsignedType{unsignedTypeF32},
+ out: []unsignedType{unsignedTypeF32},
+ }
+ signature_F64_I32 = &signature{
+ in: []unsignedType{unsignedTypeF64},
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_F64_F32 = &signature{
+ in: []unsignedType{unsignedTypeF64},
+ out: []unsignedType{unsignedTypeF32},
+ }
+ signature_F64_I64 = &signature{
+ in: []unsignedType{unsignedTypeF64},
+ out: []unsignedType{unsignedTypeI64},
+ }
+ signature_F64_F64 = &signature{
+ in: []unsignedType{unsignedTypeF64},
+ out: []unsignedType{unsignedTypeF64},
+ }
+ signature_I32I32_None = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeI32},
+ }
+
+ signature_I32I32_I32 = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeI32},
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_I32I64_None = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeI64},
+ }
+ signature_I32F32_None = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeF32},
+ }
+ signature_I32F64_None = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeF64},
+ }
+ signature_I64I32_I32 = &signature{
+ in: []unsignedType{unsignedTypeI64, unsignedTypeI32},
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_I64I64_I32 = &signature{
+ in: []unsignedType{unsignedTypeI64, unsignedTypeI64},
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_I64I64_I64 = &signature{
+ in: []unsignedType{unsignedTypeI64, unsignedTypeI64},
+ out: []unsignedType{unsignedTypeI64},
+ }
+ signature_F32F32_I32 = &signature{
+ in: []unsignedType{unsignedTypeF32, unsignedTypeF32},
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_F32F32_F32 = &signature{
+ in: []unsignedType{unsignedTypeF32, unsignedTypeF32},
+ out: []unsignedType{unsignedTypeF32},
+ }
+ signature_F64F64_I32 = &signature{
+ in: []unsignedType{unsignedTypeF64, unsignedTypeF64},
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_F64F64_F64 = &signature{
+ in: []unsignedType{unsignedTypeF64, unsignedTypeF64},
+ out: []unsignedType{unsignedTypeF64},
+ }
+ signature_I32I32I32_None = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeI32, unsignedTypeI32},
+ }
+ signature_I32I64I32_None = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeI64, unsignedTypeI32},
+ }
+ signature_UnknownUnknownI32_Unknown = &signature{
+ in: []unsignedType{unsignedTypeUnknown, unsignedTypeUnknown, unsignedTypeI32},
+ out: []unsignedType{unsignedTypeUnknown},
+ }
+ signature_V128V128_V128 = &signature{
+ in: []unsignedType{unsignedTypeV128, unsignedTypeV128},
+ out: []unsignedType{unsignedTypeV128},
+ }
+ signature_V128V128V128_V32 = &signature{
+ in: []unsignedType{unsignedTypeV128, unsignedTypeV128, unsignedTypeV128},
+ out: []unsignedType{unsignedTypeV128},
+ }
+ signature_I32_V128 = &signature{
+ in: []unsignedType{unsignedTypeI32},
+ out: []unsignedType{unsignedTypeV128},
+ }
+ signature_I32V128_None = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeV128},
+ }
+ signature_I32V128_V128 = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeV128},
+ out: []unsignedType{unsignedTypeV128},
+ }
+ signature_V128I32_V128 = &signature{
+ in: []unsignedType{unsignedTypeV128, unsignedTypeI32},
+ out: []unsignedType{unsignedTypeV128},
+ }
+ signature_V128I64_V128 = &signature{
+ in: []unsignedType{unsignedTypeV128, unsignedTypeI64},
+ out: []unsignedType{unsignedTypeV128},
+ }
+ signature_V128F32_V128 = &signature{
+ in: []unsignedType{unsignedTypeV128, unsignedTypeF32},
+ out: []unsignedType{unsignedTypeV128},
+ }
+ signature_V128F64_V128 = &signature{
+ in: []unsignedType{unsignedTypeV128, unsignedTypeF64},
+ out: []unsignedType{unsignedTypeV128},
+ }
+ signature_V128_I32 = &signature{
+ in: []unsignedType{unsignedTypeV128},
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_V128_I64 = &signature{
+ in: []unsignedType{unsignedTypeV128},
+ out: []unsignedType{unsignedTypeI64},
+ }
+ signature_V128_F32 = &signature{
+ in: []unsignedType{unsignedTypeV128},
+ out: []unsignedType{unsignedTypeF32},
+ }
+ signature_V128_F64 = &signature{
+ in: []unsignedType{unsignedTypeV128},
+ out: []unsignedType{unsignedTypeF64},
+ }
+ signature_V128_V128 = &signature{
+ in: []unsignedType{unsignedTypeV128},
+ out: []unsignedType{unsignedTypeV128},
+ }
+ signature_I64_V128 = &signature{
+ in: []unsignedType{unsignedTypeI64},
+ out: []unsignedType{unsignedTypeV128},
+ }
+ signature_F32_V128 = &signature{
+ in: []unsignedType{unsignedTypeF32},
+ out: []unsignedType{unsignedTypeV128},
+ }
+ signature_F64_V128 = &signature{
+ in: []unsignedType{unsignedTypeF64},
+ out: []unsignedType{unsignedTypeV128},
+ }
+ signature_I32I64_I64 = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeI64},
+ out: []unsignedType{unsignedTypeI64},
+ }
+ signature_I32I32I64_I32 = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeI32, unsignedTypeI64},
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_I32I64I64_I32 = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeI64, unsignedTypeI64},
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_I32I32I32_I32 = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeI32, unsignedTypeI32},
+ out: []unsignedType{unsignedTypeI32},
+ }
+ signature_I32I64I64_I64 = &signature{
+ in: []unsignedType{unsignedTypeI32, unsignedTypeI64, unsignedTypeI64},
+ out: []unsignedType{unsignedTypeI64},
+ }
+)
+
+// wasmOpcodeSignature returns the signature of given Wasm opcode.
+// Note that some of opcodes' signature vary depending on
+// the function instance (for example, local types).
+// "index" parameter is not used by most of opcodes.
+// The returned signature is used for stack validation when lowering Wasm's opcodes to interpreterir.
+func (c *compiler) wasmOpcodeSignature(op wasm.Opcode, index uint32) (*signature, error) {
+ switch op {
+ case wasm.OpcodeUnreachable, wasm.OpcodeNop, wasm.OpcodeBlock, wasm.OpcodeLoop:
+ return signature_None_None, nil
+ case wasm.OpcodeIf:
+ return signature_I32_None, nil
+ case wasm.OpcodeElse, wasm.OpcodeEnd, wasm.OpcodeBr:
+ return signature_None_None, nil
+ case wasm.OpcodeBrIf, wasm.OpcodeBrTable:
+ return signature_I32_None, nil
+ case wasm.OpcodeReturn:
+ return signature_None_None, nil
+ case wasm.OpcodeCall:
+ return c.funcTypeToSigs.get(c.funcs[index], false /* direct */), nil
+ case wasm.OpcodeCallIndirect:
+ return c.funcTypeToSigs.get(index, true /* call_indirect */), nil
+ case wasm.OpcodeDrop:
+ return signature_Unknown_None, nil
+ case wasm.OpcodeSelect, wasm.OpcodeTypedSelect:
+ return signature_UnknownUnknownI32_Unknown, nil
+ case wasm.OpcodeLocalGet:
+ inputLen := uint32(len(c.sig.Params))
+ if l := uint32(len(c.localTypes)) + inputLen; index >= l {
+ return nil, fmt.Errorf("invalid local index for local.get %d >= %d", index, l)
+ }
+ var t wasm.ValueType
+ if index < inputLen {
+ t = c.sig.Params[index]
+ } else {
+ t = c.localTypes[index-inputLen]
+ }
+ return wasmValueTypeToUnsignedOutSignature(t), nil
+ case wasm.OpcodeLocalSet:
+ inputLen := uint32(len(c.sig.Params))
+ if l := uint32(len(c.localTypes)) + inputLen; index >= l {
+ return nil, fmt.Errorf("invalid local index for local.get %d >= %d", index, l)
+ }
+ var t wasm.ValueType
+ if index < inputLen {
+ t = c.sig.Params[index]
+ } else {
+ t = c.localTypes[index-inputLen]
+ }
+ return wasmValueTypeToUnsignedInSignature(t), nil
+ case wasm.OpcodeLocalTee:
+ inputLen := uint32(len(c.sig.Params))
+ if l := uint32(len(c.localTypes)) + inputLen; index >= l {
+ return nil, fmt.Errorf("invalid local index for local.get %d >= %d", index, l)
+ }
+ var t wasm.ValueType
+ if index < inputLen {
+ t = c.sig.Params[index]
+ } else {
+ t = c.localTypes[index-inputLen]
+ }
+ return wasmValueTypeToUnsignedInOutSignature(t), nil
+ case wasm.OpcodeGlobalGet:
+ if len(c.globals) <= int(index) {
+ return nil, fmt.Errorf("invalid global index for global.get %d >= %d", index, len(c.globals))
+ }
+ return wasmValueTypeToUnsignedOutSignature(c.globals[index].ValType), nil
+ case wasm.OpcodeGlobalSet:
+ if len(c.globals) <= int(index) {
+ return nil, fmt.Errorf("invalid global index for global.get %d >= %d", index, len(c.globals))
+ }
+ return wasmValueTypeToUnsignedInSignature(c.globals[index].ValType), nil
+ case wasm.OpcodeI32Load:
+ return signature_I32_I32, nil
+ case wasm.OpcodeI64Load:
+ return signature_I32_I64, nil
+ case wasm.OpcodeF32Load:
+ return signature_I32_F32, nil
+ case wasm.OpcodeF64Load:
+ return signature_I32_F64, nil
+ case wasm.OpcodeI32Load8S, wasm.OpcodeI32Load8U, wasm.OpcodeI32Load16S, wasm.OpcodeI32Load16U:
+ return signature_I32_I32, nil
+ case wasm.OpcodeI64Load8S, wasm.OpcodeI64Load8U, wasm.OpcodeI64Load16S, wasm.OpcodeI64Load16U,
+ wasm.OpcodeI64Load32S, wasm.OpcodeI64Load32U:
+ return signature_I32_I64, nil
+ case wasm.OpcodeI32Store:
+ return signature_I32I32_None, nil
+ case wasm.OpcodeI64Store:
+ return signature_I32I64_None, nil
+ case wasm.OpcodeF32Store:
+ return signature_I32F32_None, nil
+ case wasm.OpcodeF64Store:
+ return signature_I32F64_None, nil
+ case wasm.OpcodeI32Store8:
+ return signature_I32I32_None, nil
+ case wasm.OpcodeI32Store16:
+ return signature_I32I32_None, nil
+ case wasm.OpcodeI64Store8:
+ return signature_I32I64_None, nil
+ case wasm.OpcodeI64Store16:
+ return signature_I32I64_None, nil
+ case wasm.OpcodeI64Store32:
+ return signature_I32I64_None, nil
+ case wasm.OpcodeMemorySize:
+ return signature_None_I32, nil
+ case wasm.OpcodeMemoryGrow:
+ return signature_I32_I32, nil
+ case wasm.OpcodeI32Const:
+ return signature_None_I32, nil
+ case wasm.OpcodeI64Const:
+ return signature_None_I64, nil
+ case wasm.OpcodeF32Const:
+ return signature_None_F32, nil
+ case wasm.OpcodeF64Const:
+ return signature_None_F64, nil
+ case wasm.OpcodeI32Eqz:
+ return signature_I32_I32, nil
+ case wasm.OpcodeI32Eq, wasm.OpcodeI32Ne, wasm.OpcodeI32LtS,
+ wasm.OpcodeI32LtU, wasm.OpcodeI32GtS, wasm.OpcodeI32GtU,
+ wasm.OpcodeI32LeS, wasm.OpcodeI32LeU, wasm.OpcodeI32GeS,
+ wasm.OpcodeI32GeU:
+ return signature_I32I32_I32, nil
+ case wasm.OpcodeI64Eqz:
+ return signature_I64_I32, nil
+ case wasm.OpcodeI64Eq, wasm.OpcodeI64Ne, wasm.OpcodeI64LtS,
+ wasm.OpcodeI64LtU, wasm.OpcodeI64GtS, wasm.OpcodeI64GtU,
+ wasm.OpcodeI64LeS, wasm.OpcodeI64LeU, wasm.OpcodeI64GeS,
+ wasm.OpcodeI64GeU:
+ return signature_I64I64_I32, nil
+ case wasm.OpcodeF32Eq, wasm.OpcodeF32Ne, wasm.OpcodeF32Lt,
+ wasm.OpcodeF32Gt, wasm.OpcodeF32Le, wasm.OpcodeF32Ge:
+ return signature_F32F32_I32, nil
+ case wasm.OpcodeF64Eq, wasm.OpcodeF64Ne, wasm.OpcodeF64Lt,
+ wasm.OpcodeF64Gt, wasm.OpcodeF64Le, wasm.OpcodeF64Ge:
+ return signature_F64F64_I32, nil
+ case wasm.OpcodeI32Clz, wasm.OpcodeI32Ctz, wasm.OpcodeI32Popcnt:
+ return signature_I32_I32, nil
+ case wasm.OpcodeI32Add, wasm.OpcodeI32Sub, wasm.OpcodeI32Mul,
+ wasm.OpcodeI32DivS, wasm.OpcodeI32DivU, wasm.OpcodeI32RemS,
+ wasm.OpcodeI32RemU, wasm.OpcodeI32And, wasm.OpcodeI32Or,
+ wasm.OpcodeI32Xor, wasm.OpcodeI32Shl, wasm.OpcodeI32ShrS,
+ wasm.OpcodeI32ShrU, wasm.OpcodeI32Rotl, wasm.OpcodeI32Rotr:
+ return signature_I32I32_I32, nil
+ case wasm.OpcodeI64Clz, wasm.OpcodeI64Ctz, wasm.OpcodeI64Popcnt:
+ return signature_I64_I64, nil
+ case wasm.OpcodeI64Add, wasm.OpcodeI64Sub, wasm.OpcodeI64Mul,
+ wasm.OpcodeI64DivS, wasm.OpcodeI64DivU, wasm.OpcodeI64RemS,
+ wasm.OpcodeI64RemU, wasm.OpcodeI64And, wasm.OpcodeI64Or,
+ wasm.OpcodeI64Xor, wasm.OpcodeI64Shl, wasm.OpcodeI64ShrS,
+ wasm.OpcodeI64ShrU, wasm.OpcodeI64Rotl, wasm.OpcodeI64Rotr:
+ return signature_I64I64_I64, nil
+ case wasm.OpcodeF32Abs, wasm.OpcodeF32Neg, wasm.OpcodeF32Ceil,
+ wasm.OpcodeF32Floor, wasm.OpcodeF32Trunc, wasm.OpcodeF32Nearest,
+ wasm.OpcodeF32Sqrt:
+ return signature_F32_F32, nil
+ case wasm.OpcodeF32Add, wasm.OpcodeF32Sub, wasm.OpcodeF32Mul,
+ wasm.OpcodeF32Div, wasm.OpcodeF32Min, wasm.OpcodeF32Max,
+ wasm.OpcodeF32Copysign:
+ return signature_F32F32_F32, nil
+ case wasm.OpcodeF64Abs, wasm.OpcodeF64Neg, wasm.OpcodeF64Ceil,
+ wasm.OpcodeF64Floor, wasm.OpcodeF64Trunc, wasm.OpcodeF64Nearest,
+ wasm.OpcodeF64Sqrt:
+ return signature_F64_F64, nil
+ case wasm.OpcodeF64Add, wasm.OpcodeF64Sub, wasm.OpcodeF64Mul,
+ wasm.OpcodeF64Div, wasm.OpcodeF64Min, wasm.OpcodeF64Max,
+ wasm.OpcodeF64Copysign:
+ return signature_F64F64_F64, nil
+ case wasm.OpcodeI32WrapI64:
+ return signature_I64_I32, nil
+ case wasm.OpcodeI32TruncF32S, wasm.OpcodeI32TruncF32U:
+ return signature_F32_I32, nil
+ case wasm.OpcodeI32TruncF64S, wasm.OpcodeI32TruncF64U:
+ return signature_F64_I32, nil
+ case wasm.OpcodeI64ExtendI32S, wasm.OpcodeI64ExtendI32U:
+ return signature_I32_I64, nil
+ case wasm.OpcodeI64TruncF32S, wasm.OpcodeI64TruncF32U:
+ return signature_F32_I64, nil
+ case wasm.OpcodeI64TruncF64S, wasm.OpcodeI64TruncF64U:
+ return signature_F64_I64, nil
+ case wasm.OpcodeF32ConvertI32S, wasm.OpcodeF32ConvertI32U:
+ return signature_I32_F32, nil
+ case wasm.OpcodeF32ConvertI64S, wasm.OpcodeF32ConvertI64U:
+ return signature_I64_F32, nil
+ case wasm.OpcodeF32DemoteF64:
+ return signature_F64_F32, nil
+ case wasm.OpcodeF64ConvertI32S, wasm.OpcodeF64ConvertI32U:
+ return signature_I32_F64, nil
+ case wasm.OpcodeF64ConvertI64S, wasm.OpcodeF64ConvertI64U:
+ return signature_I64_F64, nil
+ case wasm.OpcodeF64PromoteF32:
+ return signature_F32_F64, nil
+ case wasm.OpcodeI32ReinterpretF32:
+ return signature_F32_I32, nil
+ case wasm.OpcodeI64ReinterpretF64:
+ return signature_F64_I64, nil
+ case wasm.OpcodeF32ReinterpretI32:
+ return signature_I32_F32, nil
+ case wasm.OpcodeF64ReinterpretI64:
+ return signature_I64_F64, nil
+ case wasm.OpcodeI32Extend8S, wasm.OpcodeI32Extend16S:
+ return signature_I32_I32, nil
+ case wasm.OpcodeI64Extend8S, wasm.OpcodeI64Extend16S, wasm.OpcodeI64Extend32S:
+ return signature_I64_I64, nil
+ case wasm.OpcodeTableGet:
+ // table.get takes table's offset and pushes the ref type value of opaque pointer as i64 value onto the stack.
+ return signature_I32_I64, nil
+ case wasm.OpcodeTableSet:
+ // table.set takes table's offset and the ref type value of opaque pointer as i64 value.
+ return signature_I32I64_None, nil
+ case wasm.OpcodeRefFunc:
+ // ref.func is translated as pushing the compiled function's opaque pointer (uint64) at interpreterir layer.
+ return signature_None_I64, nil
+ case wasm.OpcodeRefIsNull:
+ // ref.is_null is translated as checking if the uint64 on the top of the stack (opaque pointer) is zero or not.
+ return signature_I64_I32, nil
+ case wasm.OpcodeRefNull:
+ // ref.null is translated as i64.const 0.
+ return signature_None_I64, nil
+ case wasm.OpcodeMiscPrefix:
+ switch miscOp := c.body[c.pc+1]; miscOp {
+ case wasm.OpcodeMiscI32TruncSatF32S, wasm.OpcodeMiscI32TruncSatF32U:
+ return signature_F32_I32, nil
+ case wasm.OpcodeMiscI32TruncSatF64S, wasm.OpcodeMiscI32TruncSatF64U:
+ return signature_F64_I32, nil
+ case wasm.OpcodeMiscI64TruncSatF32S, wasm.OpcodeMiscI64TruncSatF32U:
+ return signature_F32_I64, nil
+ case wasm.OpcodeMiscI64TruncSatF64S, wasm.OpcodeMiscI64TruncSatF64U:
+ return signature_F64_I64, nil
+ case wasm.OpcodeMiscMemoryInit, wasm.OpcodeMiscMemoryCopy, wasm.OpcodeMiscMemoryFill,
+ wasm.OpcodeMiscTableInit, wasm.OpcodeMiscTableCopy:
+ return signature_I32I32I32_None, nil
+ case wasm.OpcodeMiscDataDrop, wasm.OpcodeMiscElemDrop:
+ return signature_None_None, nil
+ case wasm.OpcodeMiscTableGrow:
+ return signature_I64I32_I32, nil
+ case wasm.OpcodeMiscTableSize:
+ return signature_None_I32, nil
+ case wasm.OpcodeMiscTableFill:
+ return signature_I32I64I32_None, nil
+ default:
+ return nil, fmt.Errorf("unsupported misc instruction in interpreterir: 0x%x", op)
+ }
+ case wasm.OpcodeVecPrefix:
+ switch vecOp := c.body[c.pc+1]; vecOp {
+ case wasm.OpcodeVecV128Const:
+ return signature_None_V128, nil
+ case wasm.OpcodeVecV128Load, wasm.OpcodeVecV128Load8x8s, wasm.OpcodeVecV128Load8x8u,
+ wasm.OpcodeVecV128Load16x4s, wasm.OpcodeVecV128Load16x4u, wasm.OpcodeVecV128Load32x2s,
+ wasm.OpcodeVecV128Load32x2u, wasm.OpcodeVecV128Load8Splat, wasm.OpcodeVecV128Load16Splat,
+ wasm.OpcodeVecV128Load32Splat, wasm.OpcodeVecV128Load64Splat, wasm.OpcodeVecV128Load32zero,
+ wasm.OpcodeVecV128Load64zero:
+ return signature_I32_V128, nil
+ case wasm.OpcodeVecV128Load8Lane, wasm.OpcodeVecV128Load16Lane,
+ wasm.OpcodeVecV128Load32Lane, wasm.OpcodeVecV128Load64Lane:
+ return signature_I32V128_V128, nil
+ case wasm.OpcodeVecV128Store,
+ wasm.OpcodeVecV128Store8Lane,
+ wasm.OpcodeVecV128Store16Lane,
+ wasm.OpcodeVecV128Store32Lane,
+ wasm.OpcodeVecV128Store64Lane:
+ return signature_I32V128_None, nil
+ case wasm.OpcodeVecI8x16ExtractLaneS,
+ wasm.OpcodeVecI8x16ExtractLaneU,
+ wasm.OpcodeVecI16x8ExtractLaneS,
+ wasm.OpcodeVecI16x8ExtractLaneU,
+ wasm.OpcodeVecI32x4ExtractLane:
+ return signature_V128_I32, nil
+ case wasm.OpcodeVecI64x2ExtractLane:
+ return signature_V128_I64, nil
+ case wasm.OpcodeVecF32x4ExtractLane:
+ return signature_V128_F32, nil
+ case wasm.OpcodeVecF64x2ExtractLane:
+ return signature_V128_F64, nil
+ case wasm.OpcodeVecI8x16ReplaceLane, wasm.OpcodeVecI16x8ReplaceLane, wasm.OpcodeVecI32x4ReplaceLane,
+ wasm.OpcodeVecI8x16Shl, wasm.OpcodeVecI8x16ShrS, wasm.OpcodeVecI8x16ShrU,
+ wasm.OpcodeVecI16x8Shl, wasm.OpcodeVecI16x8ShrS, wasm.OpcodeVecI16x8ShrU,
+ wasm.OpcodeVecI32x4Shl, wasm.OpcodeVecI32x4ShrS, wasm.OpcodeVecI32x4ShrU,
+ wasm.OpcodeVecI64x2Shl, wasm.OpcodeVecI64x2ShrS, wasm.OpcodeVecI64x2ShrU:
+ return signature_V128I32_V128, nil
+ case wasm.OpcodeVecI64x2ReplaceLane:
+ return signature_V128I64_V128, nil
+ case wasm.OpcodeVecF32x4ReplaceLane:
+ return signature_V128F32_V128, nil
+ case wasm.OpcodeVecF64x2ReplaceLane:
+ return signature_V128F64_V128, nil
+ case wasm.OpcodeVecI8x16Splat,
+ wasm.OpcodeVecI16x8Splat,
+ wasm.OpcodeVecI32x4Splat:
+ return signature_I32_V128, nil
+ case wasm.OpcodeVecI64x2Splat:
+ return signature_I64_V128, nil
+ case wasm.OpcodeVecF32x4Splat:
+ return signature_F32_V128, nil
+ case wasm.OpcodeVecF64x2Splat:
+ return signature_F64_V128, nil
+ case wasm.OpcodeVecV128i8x16Shuffle, wasm.OpcodeVecI8x16Swizzle, wasm.OpcodeVecV128And, wasm.OpcodeVecV128Or, wasm.OpcodeVecV128Xor, wasm.OpcodeVecV128AndNot:
+ return signature_V128V128_V128, nil
+ case wasm.OpcodeVecI8x16AllTrue, wasm.OpcodeVecI16x8AllTrue, wasm.OpcodeVecI32x4AllTrue, wasm.OpcodeVecI64x2AllTrue,
+ wasm.OpcodeVecV128AnyTrue,
+ wasm.OpcodeVecI8x16BitMask, wasm.OpcodeVecI16x8BitMask, wasm.OpcodeVecI32x4BitMask, wasm.OpcodeVecI64x2BitMask:
+ return signature_V128_I32, nil
+ case wasm.OpcodeVecV128Not, wasm.OpcodeVecI8x16Neg, wasm.OpcodeVecI16x8Neg, wasm.OpcodeVecI32x4Neg, wasm.OpcodeVecI64x2Neg,
+ wasm.OpcodeVecF32x4Neg, wasm.OpcodeVecF64x2Neg, wasm.OpcodeVecF32x4Sqrt, wasm.OpcodeVecF64x2Sqrt,
+ wasm.OpcodeVecI8x16Abs, wasm.OpcodeVecI8x16Popcnt, wasm.OpcodeVecI16x8Abs, wasm.OpcodeVecI32x4Abs, wasm.OpcodeVecI64x2Abs,
+ wasm.OpcodeVecF32x4Abs, wasm.OpcodeVecF64x2Abs,
+ wasm.OpcodeVecF32x4Ceil, wasm.OpcodeVecF32x4Floor, wasm.OpcodeVecF32x4Trunc, wasm.OpcodeVecF32x4Nearest,
+ wasm.OpcodeVecF64x2Ceil, wasm.OpcodeVecF64x2Floor, wasm.OpcodeVecF64x2Trunc, wasm.OpcodeVecF64x2Nearest,
+ wasm.OpcodeVecI16x8ExtendLowI8x16S, wasm.OpcodeVecI16x8ExtendHighI8x16S, wasm.OpcodeVecI16x8ExtendLowI8x16U, wasm.OpcodeVecI16x8ExtendHighI8x16U,
+ wasm.OpcodeVecI32x4ExtendLowI16x8S, wasm.OpcodeVecI32x4ExtendHighI16x8S, wasm.OpcodeVecI32x4ExtendLowI16x8U, wasm.OpcodeVecI32x4ExtendHighI16x8U,
+ wasm.OpcodeVecI64x2ExtendLowI32x4S, wasm.OpcodeVecI64x2ExtendHighI32x4S, wasm.OpcodeVecI64x2ExtendLowI32x4U, wasm.OpcodeVecI64x2ExtendHighI32x4U,
+ wasm.OpcodeVecI16x8ExtaddPairwiseI8x16S, wasm.OpcodeVecI16x8ExtaddPairwiseI8x16U, wasm.OpcodeVecI32x4ExtaddPairwiseI16x8S, wasm.OpcodeVecI32x4ExtaddPairwiseI16x8U,
+ wasm.OpcodeVecF64x2PromoteLowF32x4Zero, wasm.OpcodeVecF32x4DemoteF64x2Zero,
+ wasm.OpcodeVecF32x4ConvertI32x4S, wasm.OpcodeVecF32x4ConvertI32x4U,
+ wasm.OpcodeVecF64x2ConvertLowI32x4S, wasm.OpcodeVecF64x2ConvertLowI32x4U,
+ wasm.OpcodeVecI32x4TruncSatF32x4S, wasm.OpcodeVecI32x4TruncSatF32x4U,
+ wasm.OpcodeVecI32x4TruncSatF64x2SZero, wasm.OpcodeVecI32x4TruncSatF64x2UZero:
+ return signature_V128_V128, nil
+ case wasm.OpcodeVecV128Bitselect:
+ return signature_V128V128V128_V32, nil
+ case wasm.OpcodeVecI8x16Eq, wasm.OpcodeVecI8x16Ne, wasm.OpcodeVecI8x16LtS, wasm.OpcodeVecI8x16LtU, wasm.OpcodeVecI8x16GtS,
+ wasm.OpcodeVecI8x16GtU, wasm.OpcodeVecI8x16LeS, wasm.OpcodeVecI8x16LeU, wasm.OpcodeVecI8x16GeS, wasm.OpcodeVecI8x16GeU,
+ wasm.OpcodeVecI16x8Eq, wasm.OpcodeVecI16x8Ne, wasm.OpcodeVecI16x8LtS, wasm.OpcodeVecI16x8LtU, wasm.OpcodeVecI16x8GtS,
+ wasm.OpcodeVecI16x8GtU, wasm.OpcodeVecI16x8LeS, wasm.OpcodeVecI16x8LeU, wasm.OpcodeVecI16x8GeS, wasm.OpcodeVecI16x8GeU,
+ wasm.OpcodeVecI32x4Eq, wasm.OpcodeVecI32x4Ne, wasm.OpcodeVecI32x4LtS, wasm.OpcodeVecI32x4LtU, wasm.OpcodeVecI32x4GtS,
+ wasm.OpcodeVecI32x4GtU, wasm.OpcodeVecI32x4LeS, wasm.OpcodeVecI32x4LeU, wasm.OpcodeVecI32x4GeS, wasm.OpcodeVecI32x4GeU,
+ wasm.OpcodeVecI64x2Eq, wasm.OpcodeVecI64x2Ne, wasm.OpcodeVecI64x2LtS, wasm.OpcodeVecI64x2GtS, wasm.OpcodeVecI64x2LeS,
+ wasm.OpcodeVecI64x2GeS, wasm.OpcodeVecF32x4Eq, wasm.OpcodeVecF32x4Ne, wasm.OpcodeVecF32x4Lt, wasm.OpcodeVecF32x4Gt,
+ wasm.OpcodeVecF32x4Le, wasm.OpcodeVecF32x4Ge, wasm.OpcodeVecF64x2Eq, wasm.OpcodeVecF64x2Ne, wasm.OpcodeVecF64x2Lt,
+ wasm.OpcodeVecF64x2Gt, wasm.OpcodeVecF64x2Le, wasm.OpcodeVecF64x2Ge,
+ wasm.OpcodeVecI8x16Add, wasm.OpcodeVecI8x16AddSatS, wasm.OpcodeVecI8x16AddSatU, wasm.OpcodeVecI8x16Sub,
+ wasm.OpcodeVecI8x16SubSatS, wasm.OpcodeVecI8x16SubSatU,
+ wasm.OpcodeVecI16x8Add, wasm.OpcodeVecI16x8AddSatS, wasm.OpcodeVecI16x8AddSatU, wasm.OpcodeVecI16x8Sub,
+ wasm.OpcodeVecI16x8SubSatS, wasm.OpcodeVecI16x8SubSatU, wasm.OpcodeVecI16x8Mul,
+ wasm.OpcodeVecI32x4Add, wasm.OpcodeVecI32x4Sub, wasm.OpcodeVecI32x4Mul,
+ wasm.OpcodeVecI64x2Add, wasm.OpcodeVecI64x2Sub, wasm.OpcodeVecI64x2Mul,
+ wasm.OpcodeVecF32x4Add, wasm.OpcodeVecF32x4Sub, wasm.OpcodeVecF32x4Mul, wasm.OpcodeVecF32x4Div,
+ wasm.OpcodeVecF64x2Add, wasm.OpcodeVecF64x2Sub, wasm.OpcodeVecF64x2Mul, wasm.OpcodeVecF64x2Div,
+ wasm.OpcodeVecI8x16MinS, wasm.OpcodeVecI8x16MinU, wasm.OpcodeVecI8x16MaxS, wasm.OpcodeVecI8x16MaxU, wasm.OpcodeVecI8x16AvgrU,
+ wasm.OpcodeVecI16x8MinS, wasm.OpcodeVecI16x8MinU, wasm.OpcodeVecI16x8MaxS, wasm.OpcodeVecI16x8MaxU, wasm.OpcodeVecI16x8AvgrU,
+ wasm.OpcodeVecI32x4MinS, wasm.OpcodeVecI32x4MinU, wasm.OpcodeVecI32x4MaxS, wasm.OpcodeVecI32x4MaxU,
+ wasm.OpcodeVecF32x4Min, wasm.OpcodeVecF32x4Max, wasm.OpcodeVecF64x2Min, wasm.OpcodeVecF64x2Max,
+ wasm.OpcodeVecF32x4Pmin, wasm.OpcodeVecF32x4Pmax, wasm.OpcodeVecF64x2Pmin, wasm.OpcodeVecF64x2Pmax,
+ wasm.OpcodeVecI16x8Q15mulrSatS,
+ wasm.OpcodeVecI16x8ExtMulLowI8x16S, wasm.OpcodeVecI16x8ExtMulHighI8x16S, wasm.OpcodeVecI16x8ExtMulLowI8x16U, wasm.OpcodeVecI16x8ExtMulHighI8x16U,
+ wasm.OpcodeVecI32x4ExtMulLowI16x8S, wasm.OpcodeVecI32x4ExtMulHighI16x8S, wasm.OpcodeVecI32x4ExtMulLowI16x8U, wasm.OpcodeVecI32x4ExtMulHighI16x8U,
+ wasm.OpcodeVecI64x2ExtMulLowI32x4S, wasm.OpcodeVecI64x2ExtMulHighI32x4S, wasm.OpcodeVecI64x2ExtMulLowI32x4U, wasm.OpcodeVecI64x2ExtMulHighI32x4U,
+ wasm.OpcodeVecI32x4DotI16x8S,
+ wasm.OpcodeVecI8x16NarrowI16x8S, wasm.OpcodeVecI8x16NarrowI16x8U, wasm.OpcodeVecI16x8NarrowI32x4S, wasm.OpcodeVecI16x8NarrowI32x4U:
+ return signature_V128V128_V128, nil
+ default:
+ return nil, fmt.Errorf("unsupported vector instruction in interpreterir: %s", wasm.VectorInstructionName(vecOp))
+ }
+ case wasm.OpcodeAtomicPrefix:
+ switch atomicOp := c.body[c.pc+1]; atomicOp {
+ case wasm.OpcodeAtomicMemoryNotify:
+ return signature_I32I32_I32, nil
+ case wasm.OpcodeAtomicMemoryWait32:
+ return signature_I32I32I64_I32, nil
+ case wasm.OpcodeAtomicMemoryWait64:
+ return signature_I32I64I64_I32, nil
+ case wasm.OpcodeAtomicFence:
+ return signature_None_None, nil
+ case wasm.OpcodeAtomicI32Load, wasm.OpcodeAtomicI32Load8U, wasm.OpcodeAtomicI32Load16U:
+ return signature_I32_I32, nil
+ case wasm.OpcodeAtomicI64Load, wasm.OpcodeAtomicI64Load8U, wasm.OpcodeAtomicI64Load16U, wasm.OpcodeAtomicI64Load32U:
+ return signature_I32_I64, nil
+ case wasm.OpcodeAtomicI32Store, wasm.OpcodeAtomicI32Store8, wasm.OpcodeAtomicI32Store16:
+ return signature_I32I32_None, nil
+ case wasm.OpcodeAtomicI64Store, wasm.OpcodeAtomicI64Store8, wasm.OpcodeAtomicI64Store16, wasm.OpcodeAtomicI64Store32:
+ return signature_I32I64_None, nil
+ case wasm.OpcodeAtomicI32RmwAdd, wasm.OpcodeAtomicI32RmwSub, wasm.OpcodeAtomicI32RmwAnd, wasm.OpcodeAtomicI32RmwOr, wasm.OpcodeAtomicI32RmwXor, wasm.OpcodeAtomicI32RmwXchg,
+ wasm.OpcodeAtomicI32Rmw8AddU, wasm.OpcodeAtomicI32Rmw8SubU, wasm.OpcodeAtomicI32Rmw8AndU, wasm.OpcodeAtomicI32Rmw8OrU, wasm.OpcodeAtomicI32Rmw8XorU, wasm.OpcodeAtomicI32Rmw8XchgU,
+ wasm.OpcodeAtomicI32Rmw16AddU, wasm.OpcodeAtomicI32Rmw16SubU, wasm.OpcodeAtomicI32Rmw16AndU, wasm.OpcodeAtomicI32Rmw16OrU, wasm.OpcodeAtomicI32Rmw16XorU, wasm.OpcodeAtomicI32Rmw16XchgU:
+ return signature_I32I32_I32, nil
+ case wasm.OpcodeAtomicI64RmwAdd, wasm.OpcodeAtomicI64RmwSub, wasm.OpcodeAtomicI64RmwAnd, wasm.OpcodeAtomicI64RmwOr, wasm.OpcodeAtomicI64RmwXor, wasm.OpcodeAtomicI64RmwXchg,
+ wasm.OpcodeAtomicI64Rmw8AddU, wasm.OpcodeAtomicI64Rmw8SubU, wasm.OpcodeAtomicI64Rmw8AndU, wasm.OpcodeAtomicI64Rmw8OrU, wasm.OpcodeAtomicI64Rmw8XorU, wasm.OpcodeAtomicI64Rmw8XchgU,
+ wasm.OpcodeAtomicI64Rmw16AddU, wasm.OpcodeAtomicI64Rmw16SubU, wasm.OpcodeAtomicI64Rmw16AndU, wasm.OpcodeAtomicI64Rmw16OrU, wasm.OpcodeAtomicI64Rmw16XorU, wasm.OpcodeAtomicI64Rmw16XchgU,
+ wasm.OpcodeAtomicI64Rmw32AddU, wasm.OpcodeAtomicI64Rmw32SubU, wasm.OpcodeAtomicI64Rmw32AndU, wasm.OpcodeAtomicI64Rmw32OrU, wasm.OpcodeAtomicI64Rmw32XorU, wasm.OpcodeAtomicI64Rmw32XchgU:
+ return signature_I32I64_I64, nil
+ case wasm.OpcodeAtomicI32RmwCmpxchg, wasm.OpcodeAtomicI32Rmw8CmpxchgU, wasm.OpcodeAtomicI32Rmw16CmpxchgU:
+ return signature_I32I32I32_I32, nil
+ case wasm.OpcodeAtomicI64RmwCmpxchg, wasm.OpcodeAtomicI64Rmw8CmpxchgU, wasm.OpcodeAtomicI64Rmw16CmpxchgU, wasm.OpcodeAtomicI64Rmw32CmpxchgU:
+ return signature_I32I64I64_I64, nil
+ default:
+ return nil, fmt.Errorf("unsupported atomic instruction in interpreterir: %s", wasm.AtomicInstructionName(atomicOp))
+ }
+ default:
+ return nil, fmt.Errorf("unsupported instruction in interpreterir: 0x%x", op)
+ }
+}
+
+// funcTypeToIRSignatures is the central cache for a module to get the *signature
+// for function calls.
+type funcTypeToIRSignatures struct {
+ directCalls []*signature
+ indirectCalls []*signature
+ wasmTypes []wasm.FunctionType
+}
+
+// get returns the *signature for the direct or indirect function call against functions whose type is at `typeIndex`.
+func (f *funcTypeToIRSignatures) get(typeIndex wasm.Index, indirect bool) *signature {
+ var sig *signature
+ if indirect {
+ sig = f.indirectCalls[typeIndex]
+ } else {
+ sig = f.directCalls[typeIndex]
+ }
+ if sig != nil {
+ return sig
+ }
+
+ tp := &f.wasmTypes[typeIndex]
+ if indirect {
+ sig = &signature{
+ in: make([]unsignedType, 0, len(tp.Params)+1), // +1 to reserve space for call indirect index.
+ out: make([]unsignedType, 0, len(tp.Results)),
+ }
+ } else {
+ sig = &signature{
+ in: make([]unsignedType, 0, len(tp.Params)),
+ out: make([]unsignedType, 0, len(tp.Results)),
+ }
+ }
+
+ for _, vt := range tp.Params {
+ sig.in = append(sig.in, wasmValueTypeTounsignedType(vt))
+ }
+ for _, vt := range tp.Results {
+ sig.out = append(sig.out, wasmValueTypeTounsignedType(vt))
+ }
+
+ if indirect {
+ sig.in = append(sig.in, unsignedTypeI32)
+ f.indirectCalls[typeIndex] = sig
+ } else {
+ f.directCalls[typeIndex] = sig
+ }
+ return sig
+}
+
+func wasmValueTypeTounsignedType(vt wasm.ValueType) unsignedType {
+ switch vt {
+ case wasm.ValueTypeI32:
+ return unsignedTypeI32
+ case wasm.ValueTypeI64,
+ // From interpreterir layer, ref type values are opaque 64-bit pointers.
+ wasm.ValueTypeExternref, wasm.ValueTypeFuncref:
+ return unsignedTypeI64
+ case wasm.ValueTypeF32:
+ return unsignedTypeF32
+ case wasm.ValueTypeF64:
+ return unsignedTypeF64
+ case wasm.ValueTypeV128:
+ return unsignedTypeV128
+ }
+ panic("unreachable")
+}
+
+func wasmValueTypeToUnsignedOutSignature(vt wasm.ValueType) *signature {
+ switch vt {
+ case wasm.ValueTypeI32:
+ return signature_None_I32
+ case wasm.ValueTypeI64,
+ // From interpreterir layer, ref type values are opaque 64-bit pointers.
+ wasm.ValueTypeExternref, wasm.ValueTypeFuncref:
+ return signature_None_I64
+ case wasm.ValueTypeF32:
+ return signature_None_F32
+ case wasm.ValueTypeF64:
+ return signature_None_F64
+ case wasm.ValueTypeV128:
+ return signature_None_V128
+ }
+ panic("unreachable")
+}
+
+func wasmValueTypeToUnsignedInSignature(vt wasm.ValueType) *signature {
+ switch vt {
+ case wasm.ValueTypeI32:
+ return signature_I32_None
+ case wasm.ValueTypeI64,
+ // From interpreterir layer, ref type values are opaque 64-bit pointers.
+ wasm.ValueTypeExternref, wasm.ValueTypeFuncref:
+ return signature_I64_None
+ case wasm.ValueTypeF32:
+ return signature_F32_None
+ case wasm.ValueTypeF64:
+ return signature_F64_None
+ case wasm.ValueTypeV128:
+ return signature_V128_None
+ }
+ panic("unreachable")
+}
+
+func wasmValueTypeToUnsignedInOutSignature(vt wasm.ValueType) *signature {
+ switch vt {
+ case wasm.ValueTypeI32:
+ return signature_I32_I32
+ case wasm.ValueTypeI64,
+ // At interpreterir layer, ref type values are opaque 64-bit pointers.
+ wasm.ValueTypeExternref, wasm.ValueTypeFuncref:
+ return signature_I64_I64
+ case wasm.ValueTypeF32:
+ return signature_F32_F32
+ case wasm.ValueTypeF64:
+ return signature_F64_F64
+ case wasm.ValueTypeV128:
+ return signature_V128_V128
+ }
+ panic("unreachable")
+}