summaryrefslogtreecommitdiff
path: root/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa
diff options
context:
space:
mode:
authorLibravatar kim <89579420+NyaaaWhatsUpDoc@users.noreply.github.com>2024-05-27 15:46:15 +0000
committerLibravatar GitHub <noreply@github.com>2024-05-27 17:46:15 +0200
commit1e7b32490dfdccddd04f46d4b0416b48d749d51b (patch)
tree62a11365933a5a11e0800af64cbdf9172e5e6e7a /vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa
parent[chore] Small styling + link issues (#2933) (diff)
downloadgotosocial-1e7b32490dfdccddd04f46d4b0416b48d749d51b.tar.xz
[experiment] add alternative wasm sqlite3 implementation available via build-tag (#2863)
This allows for building GoToSocial with [SQLite transpiled to WASM](https://github.com/ncruces/go-sqlite3) and accessed through [Wazero](https://wazero.io/).
Diffstat (limited to 'vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa')
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block.go407
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block_sort.go34
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block_sort_old.go24
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/builder.go731
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/cmp.go107
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/funcref.go12
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/instructions.go2967
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass.go417
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass_blk_layouts.go335
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass_cfg.go312
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/signature.go49
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/ssa.go14
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/type.go112
-rw-r--r--vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/vs.go87
14 files changed, 5608 insertions, 0 deletions
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block.go
new file mode 100644
index 000000000..10b6b4b62
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block.go
@@ -0,0 +1,407 @@
+package ssa
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi"
+)
+
+// BasicBlock represents the Basic Block of an SSA function.
+// Each BasicBlock always ends with branching instructions (e.g. Branch, Return, etc.),
+// and at most two branches are allowed. If there's two branches, these two are placed together at the end of the block.
+// In other words, there's no branching instruction in the middle of the block.
+//
+// Note: we use the "block argument" variant of SSA, instead of PHI functions. See the package level doc comments.
+//
+// Note: we use "parameter/param" as a placeholder which represents a variant of PHI, and "argument/arg" as an actual
+// Value passed to that "parameter/param".
+type BasicBlock interface {
+ // ID returns the unique ID of this block.
+ ID() BasicBlockID
+
+ // Name returns the unique string ID of this block. e.g. blk0, blk1, ...
+ Name() string
+
+ // AddParam adds the parameter to the block whose type specified by `t`.
+ AddParam(b Builder, t Type) Value
+
+ // Params returns the number of parameters to this block.
+ Params() int
+
+ // Param returns (Variable, Value) which corresponds to the i-th parameter of this block.
+ // The returned Value is the definition of the param in this block.
+ Param(i int) Value
+
+ // InsertInstruction inserts an instruction that implements Value into the tail of this block.
+ InsertInstruction(raw *Instruction)
+
+ // Root returns the root instruction of this block.
+ Root() *Instruction
+
+ // Tail returns the tail instruction of this block.
+ Tail() *Instruction
+
+ // EntryBlock returns true if this block represents the function entry.
+ EntryBlock() bool
+
+ // ReturnBlock returns ture if this block represents the function return.
+ ReturnBlock() bool
+
+ // FormatHeader returns the debug string of this block, not including instruction.
+ FormatHeader(b Builder) string
+
+ // Valid is true if this block is still valid even after optimizations.
+ Valid() bool
+
+ // Sealed is true if this block has been sealed.
+ Sealed() bool
+
+ // BeginPredIterator returns the first predecessor of this block.
+ BeginPredIterator() BasicBlock
+
+ // NextPredIterator returns the next predecessor of this block.
+ NextPredIterator() BasicBlock
+
+ // Preds returns the number of predecessors of this block.
+ Preds() int
+
+ // Pred returns the i-th predecessor of this block.
+ Pred(i int) BasicBlock
+
+ // Succs returns the number of successors of this block.
+ Succs() int
+
+ // Succ returns the i-th successor of this block.
+ Succ(i int) BasicBlock
+
+ // LoopHeader returns true if this block is a loop header.
+ LoopHeader() bool
+
+ // LoopNestingForestChildren returns the children of this block in the loop nesting forest.
+ LoopNestingForestChildren() []BasicBlock
+}
+
+type (
+ // basicBlock is a basic block in a SSA-transformed function.
+ basicBlock struct {
+ id BasicBlockID
+ rootInstr, currentInstr *Instruction
+ params []blockParam
+ predIter int
+ preds []basicBlockPredecessorInfo
+ success []*basicBlock
+ // singlePred is the alias to preds[0] for fast lookup, and only set after Seal is called.
+ singlePred *basicBlock
+ // lastDefinitions maps Variable to its last definition in this block.
+ lastDefinitions map[Variable]Value
+ // unknownsValues are used in builder.findValue. The usage is well-described in the paper.
+ unknownValues []unknownValue
+ // invalid is true if this block is made invalid during optimizations.
+ invalid bool
+ // sealed is true if this is sealed (all the predecessors are known).
+ sealed bool
+ // loopHeader is true if this block is a loop header:
+ //
+ // > A loop header (sometimes called the entry point of the loop) is a dominator that is the target
+ // > of a loop-forming back edge. The loop header dominates all blocks in the loop body.
+ // > A block may be a loop header for more than one loop. A loop may have multiple entry points,
+ // > in which case it has no "loop header".
+ //
+ // See https://en.wikipedia.org/wiki/Control-flow_graph for more details.
+ //
+ // This is modified during the subPassLoopDetection pass.
+ loopHeader bool
+
+ // loopNestingForestChildren holds the children of this block in the loop nesting forest.
+ // Non-empty if and only if this block is a loop header (i.e. loopHeader=true)
+ loopNestingForestChildren []BasicBlock
+
+ // reversePostOrder is used to sort all the blocks in the function in reverse post order.
+ // This is used in builder.LayoutBlocks.
+ reversePostOrder int
+
+ // child and sibling are the ones in the dominator tree.
+ child, sibling *basicBlock
+ }
+ // BasicBlockID is the unique ID of a basicBlock.
+ BasicBlockID uint32
+
+ // blockParam implements Value and represents a parameter to a basicBlock.
+ blockParam struct {
+ // value is the Value that corresponds to the parameter in this block,
+ // and can be considered as an output of PHI instruction in traditional SSA.
+ value Value
+ // typ is the type of the parameter.
+ typ Type
+ }
+
+ unknownValue struct {
+ // variable is the variable that this unknownValue represents.
+ variable Variable
+ // value is the value that this unknownValue represents.
+ value Value
+ }
+)
+
+const basicBlockIDReturnBlock = 0xffffffff
+
+// Name implements BasicBlock.Name.
+func (bb *basicBlock) Name() string {
+ if bb.id == basicBlockIDReturnBlock {
+ return "blk_ret"
+ } else {
+ return fmt.Sprintf("blk%d", bb.id)
+ }
+}
+
+// String implements fmt.Stringer for debugging.
+func (bid BasicBlockID) String() string {
+ if bid == basicBlockIDReturnBlock {
+ return "blk_ret"
+ } else {
+ return fmt.Sprintf("blk%d", bid)
+ }
+}
+
+// ID implements BasicBlock.ID.
+func (bb *basicBlock) ID() BasicBlockID {
+ return bb.id
+}
+
+// basicBlockPredecessorInfo is the information of a predecessor of a basicBlock.
+// predecessor is determined by a pair of block and the branch instruction used to jump to the successor.
+type basicBlockPredecessorInfo struct {
+ blk *basicBlock
+ branch *Instruction
+}
+
+// EntryBlock implements BasicBlock.EntryBlock.
+func (bb *basicBlock) EntryBlock() bool {
+ return bb.id == 0
+}
+
+// ReturnBlock implements BasicBlock.ReturnBlock.
+func (bb *basicBlock) ReturnBlock() bool {
+ return bb.id == basicBlockIDReturnBlock
+}
+
+// AddParam implements BasicBlock.AddParam.
+func (bb *basicBlock) AddParam(b Builder, typ Type) Value {
+ paramValue := b.allocateValue(typ)
+ bb.params = append(bb.params, blockParam{typ: typ, value: paramValue})
+ return paramValue
+}
+
+// addParamOn adds a parameter to this block whose value is already allocated.
+func (bb *basicBlock) addParamOn(typ Type, value Value) {
+ bb.params = append(bb.params, blockParam{typ: typ, value: value})
+}
+
+// Params implements BasicBlock.Params.
+func (bb *basicBlock) Params() int {
+ return len(bb.params)
+}
+
+// Param implements BasicBlock.Param.
+func (bb *basicBlock) Param(i int) Value {
+ p := &bb.params[i]
+ return p.value
+}
+
+// Valid implements BasicBlock.Valid.
+func (bb *basicBlock) Valid() bool {
+ return !bb.invalid
+}
+
+// Sealed implements BasicBlock.Sealed.
+func (bb *basicBlock) Sealed() bool {
+ return bb.sealed
+}
+
+// InsertInstruction implements BasicBlock.InsertInstruction.
+func (bb *basicBlock) InsertInstruction(next *Instruction) {
+ current := bb.currentInstr
+ if current != nil {
+ current.next = next
+ next.prev = current
+ } else {
+ bb.rootInstr = next
+ }
+ bb.currentInstr = next
+
+ switch next.opcode {
+ case OpcodeJump, OpcodeBrz, OpcodeBrnz:
+ target := next.blk.(*basicBlock)
+ target.addPred(bb, next)
+ case OpcodeBrTable:
+ for _, _target := range next.targets {
+ target := _target.(*basicBlock)
+ target.addPred(bb, next)
+ }
+ }
+}
+
+// NumPreds implements BasicBlock.NumPreds.
+func (bb *basicBlock) NumPreds() int {
+ return len(bb.preds)
+}
+
+// BeginPredIterator implements BasicBlock.BeginPredIterator.
+func (bb *basicBlock) BeginPredIterator() BasicBlock {
+ bb.predIter = 0
+ return bb.NextPredIterator()
+}
+
+// NextPredIterator implements BasicBlock.NextPredIterator.
+func (bb *basicBlock) NextPredIterator() BasicBlock {
+ if bb.predIter >= len(bb.preds) {
+ return nil
+ }
+ pred := bb.preds[bb.predIter].blk
+ bb.predIter++
+ return pred
+}
+
+// Preds implements BasicBlock.Preds.
+func (bb *basicBlock) Preds() int {
+ return len(bb.preds)
+}
+
+// Pred implements BasicBlock.Pred.
+func (bb *basicBlock) Pred(i int) BasicBlock {
+ return bb.preds[i].blk
+}
+
+// Succs implements BasicBlock.Succs.
+func (bb *basicBlock) Succs() int {
+ return len(bb.success)
+}
+
+// Succ implements BasicBlock.Succ.
+func (bb *basicBlock) Succ(i int) BasicBlock {
+ return bb.success[i]
+}
+
+// Root implements BasicBlock.Root.
+func (bb *basicBlock) Root() *Instruction {
+ return bb.rootInstr
+}
+
+// Tail implements BasicBlock.Tail.
+func (bb *basicBlock) Tail() *Instruction {
+ return bb.currentInstr
+}
+
+// reset resets the basicBlock to its initial state so that it can be reused for another function.
+func resetBasicBlock(bb *basicBlock) {
+ bb.params = bb.params[:0]
+ bb.rootInstr, bb.currentInstr = nil, nil
+ bb.preds = bb.preds[:0]
+ bb.success = bb.success[:0]
+ bb.invalid, bb.sealed = false, false
+ bb.singlePred = nil
+ bb.unknownValues = bb.unknownValues[:0]
+ bb.lastDefinitions = wazevoapi.ResetMap(bb.lastDefinitions)
+ bb.reversePostOrder = -1
+ bb.loopNestingForestChildren = bb.loopNestingForestChildren[:0]
+ bb.loopHeader = false
+ bb.sibling = nil
+ bb.child = nil
+}
+
+// addPred adds a predecessor to this block specified by the branch instruction.
+func (bb *basicBlock) addPred(blk BasicBlock, branch *Instruction) {
+ if bb.sealed {
+ panic("BUG: trying to add predecessor to a sealed block: " + bb.Name())
+ }
+
+ pred := blk.(*basicBlock)
+ for i := range bb.preds {
+ existingPred := &bb.preds[i]
+ if existingPred.blk == pred && existingPred.branch != branch {
+ // If the target is already added, then this must come from the same BrTable,
+ // otherwise such redundant branch should be eliminated by the frontend. (which should be simpler).
+ panic(fmt.Sprintf("BUG: redundant non BrTable jumps in %s whose targes are the same", bb.Name()))
+ }
+ }
+
+ bb.preds = append(bb.preds, basicBlockPredecessorInfo{
+ blk: pred,
+ branch: branch,
+ })
+
+ pred.success = append(pred.success, bb)
+}
+
+// FormatHeader implements BasicBlock.FormatHeader.
+func (bb *basicBlock) FormatHeader(b Builder) string {
+ ps := make([]string, len(bb.params))
+ for i, p := range bb.params {
+ ps[i] = p.value.formatWithType(b)
+ }
+
+ if len(bb.preds) > 0 {
+ preds := make([]string, 0, len(bb.preds))
+ for _, pred := range bb.preds {
+ if pred.blk.invalid {
+ continue
+ }
+ preds = append(preds, fmt.Sprintf("blk%d", pred.blk.id))
+
+ }
+ return fmt.Sprintf("blk%d: (%s) <-- (%s)",
+ bb.id, strings.Join(ps, ","), strings.Join(preds, ","))
+ } else {
+ return fmt.Sprintf("blk%d: (%s)", bb.id, strings.Join(ps, ", "))
+ }
+}
+
+// validates validates the basicBlock for debugging purpose.
+func (bb *basicBlock) validate(b *builder) {
+ if bb.invalid {
+ panic("BUG: trying to validate an invalid block: " + bb.Name())
+ }
+ if len(bb.preds) > 0 {
+ for _, pred := range bb.preds {
+ if pred.branch.opcode != OpcodeBrTable {
+ if target := pred.branch.blk; target != bb {
+ panic(fmt.Sprintf("BUG: '%s' is not branch to %s, but to %s",
+ pred.branch.Format(b), bb.Name(), target.Name()))
+ }
+ }
+
+ var exp int
+ if bb.ReturnBlock() {
+ exp = len(b.currentSignature.Results)
+ } else {
+ exp = len(bb.params)
+ }
+
+ if len(pred.branch.vs.View()) != exp {
+ panic(fmt.Sprintf(
+ "BUG: len(argument at %s) != len(params at %s): %d != %d: %s",
+ pred.blk.Name(), bb.Name(),
+ len(pred.branch.vs.View()), len(bb.params), pred.branch.Format(b),
+ ))
+ }
+
+ }
+ }
+}
+
+// String implements fmt.Stringer for debugging purpose only.
+func (bb *basicBlock) String() string {
+ return strconv.Itoa(int(bb.id))
+}
+
+// LoopNestingForestChildren implements BasicBlock.LoopNestingForestChildren.
+func (bb *basicBlock) LoopNestingForestChildren() []BasicBlock {
+ return bb.loopNestingForestChildren
+}
+
+// LoopHeader implements BasicBlock.LoopHeader.
+func (bb *basicBlock) LoopHeader() bool {
+ return bb.loopHeader
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block_sort.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block_sort.go
new file mode 100644
index 000000000..e1471edc3
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block_sort.go
@@ -0,0 +1,34 @@
+//go:build go1.21
+
+package ssa
+
+import (
+ "slices"
+)
+
+func sortBlocks(blocks []*basicBlock) {
+ slices.SortFunc(blocks, func(i, j *basicBlock) int {
+ jIsReturn := j.ReturnBlock()
+ iIsReturn := i.ReturnBlock()
+ if iIsReturn && jIsReturn {
+ return 0
+ }
+ if jIsReturn {
+ return 1
+ }
+ if iIsReturn {
+ return -1
+ }
+ iRoot, jRoot := i.rootInstr, j.rootInstr
+ if iRoot == nil && jRoot == nil { // For testing.
+ return 0
+ }
+ if jRoot == nil {
+ return 1
+ }
+ if iRoot == nil {
+ return -1
+ }
+ return i.rootInstr.id - j.rootInstr.id
+ })
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block_sort_old.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block_sort_old.go
new file mode 100644
index 000000000..9dc881dae
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/basic_block_sort_old.go
@@ -0,0 +1,24 @@
+//go:build !go1.21
+
+// TODO: delete after the floor Go version is 1.21
+
+package ssa
+
+import "sort"
+
+func sortBlocks(blocks []*basicBlock) {
+ sort.SliceStable(blocks, func(i, j int) bool {
+ iBlk, jBlk := blocks[i], blocks[j]
+ if jBlk.ReturnBlock() {
+ return true
+ }
+ if iBlk.ReturnBlock() {
+ return false
+ }
+ iRoot, jRoot := iBlk.rootInstr, jBlk.rootInstr
+ if iRoot == nil || jRoot == nil { // For testing.
+ return true
+ }
+ return iBlk.rootInstr.id < jBlk.rootInstr.id
+ })
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/builder.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/builder.go
new file mode 100644
index 000000000..1fc84d2ea
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/builder.go
@@ -0,0 +1,731 @@
+package ssa
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi"
+)
+
+// Builder is used to builds SSA consisting of Basic Blocks per function.
+type Builder interface {
+ // Init must be called to reuse this builder for the next function.
+ Init(typ *Signature)
+
+ // Signature returns the Signature of the currently-compiled function.
+ Signature() *Signature
+
+ // BlockIDMax returns the maximum value of BasicBlocksID existing in the currently-compiled function.
+ BlockIDMax() BasicBlockID
+
+ // AllocateBasicBlock creates a basic block in SSA function.
+ AllocateBasicBlock() BasicBlock
+
+ // CurrentBlock returns the currently handled BasicBlock which is set by the latest call to SetCurrentBlock.
+ CurrentBlock() BasicBlock
+
+ // EntryBlock returns the entry BasicBlock of the currently-compiled function.
+ EntryBlock() BasicBlock
+
+ // SetCurrentBlock sets the instruction insertion target to the BasicBlock `b`.
+ SetCurrentBlock(b BasicBlock)
+
+ // DeclareVariable declares a Variable of the given Type.
+ DeclareVariable(Type) Variable
+
+ // DefineVariable defines a variable in the `block` with value.
+ // The defining instruction will be inserted into the `block`.
+ DefineVariable(variable Variable, value Value, block BasicBlock)
+
+ // DefineVariableInCurrentBB is the same as DefineVariable except the definition is
+ // inserted into the current BasicBlock. Alias to DefineVariable(x, y, CurrentBlock()).
+ DefineVariableInCurrentBB(variable Variable, value Value)
+
+ // AllocateInstruction returns a new Instruction.
+ AllocateInstruction() *Instruction
+
+ // InsertInstruction executes BasicBlock.InsertInstruction for the currently handled basic block.
+ InsertInstruction(raw *Instruction)
+
+ // allocateValue allocates an unused Value.
+ allocateValue(typ Type) Value
+
+ // MustFindValue searches the latest definition of the given Variable and returns the result.
+ MustFindValue(variable Variable) Value
+
+ // MustFindValueInBlk is the same as MustFindValue except it searches the latest definition from the given BasicBlock.
+ MustFindValueInBlk(variable Variable, blk BasicBlock) Value
+
+ // FindValueInLinearPath tries to find the latest definition of the given Variable in the linear path to the current BasicBlock.
+ // If it cannot find the definition, or it's not sealed yet, it returns ValueInvalid.
+ FindValueInLinearPath(variable Variable) Value
+
+ // Seal declares that we've known all the predecessors to this block and were added via AddPred.
+ // After calling this, AddPred will be forbidden.
+ Seal(blk BasicBlock)
+
+ // AnnotateValue is for debugging purpose.
+ AnnotateValue(value Value, annotation string)
+
+ // DeclareSignature appends the *Signature to be referenced by various instructions (e.g. OpcodeCall).
+ DeclareSignature(signature *Signature)
+
+ // Signatures returns the slice of declared Signatures.
+ Signatures() []*Signature
+
+ // ResolveSignature returns the Signature which corresponds to SignatureID.
+ ResolveSignature(id SignatureID) *Signature
+
+ // RunPasses runs various passes on the constructed SSA function.
+ RunPasses()
+
+ // Format returns the debugging string of the SSA function.
+ Format() string
+
+ // BlockIteratorBegin initializes the state to iterate over all the valid BasicBlock(s) compiled.
+ // Combined with BlockIteratorNext, we can use this like:
+ //
+ // for blk := builder.BlockIteratorBegin(); blk != nil; blk = builder.BlockIteratorNext() {
+ // // ...
+ // }
+ //
+ // The returned blocks are ordered in the order of AllocateBasicBlock being called.
+ BlockIteratorBegin() BasicBlock
+
+ // BlockIteratorNext advances the state for iteration initialized by BlockIteratorBegin.
+ // Returns nil if there's no unseen BasicBlock.
+ BlockIteratorNext() BasicBlock
+
+ // ValueRefCounts returns the map of ValueID to its reference count.
+ // The returned slice must not be modified.
+ ValueRefCounts() []int
+
+ // BlockIteratorReversePostOrderBegin is almost the same as BlockIteratorBegin except it returns the BasicBlock in the reverse post-order.
+ // This is available after RunPasses is run.
+ BlockIteratorReversePostOrderBegin() BasicBlock
+
+ // BlockIteratorReversePostOrderNext is almost the same as BlockIteratorPostOrderNext except it returns the BasicBlock in the reverse post-order.
+ // This is available after RunPasses is run.
+ BlockIteratorReversePostOrderNext() BasicBlock
+
+ // ReturnBlock returns the BasicBlock which is used to return from the function.
+ ReturnBlock() BasicBlock
+
+ // InsertUndefined inserts an undefined instruction at the current position.
+ InsertUndefined()
+
+ // SetCurrentSourceOffset sets the current source offset. The incoming instruction will be annotated with this offset.
+ SetCurrentSourceOffset(line SourceOffset)
+
+ // LoopNestingForestRoots returns the roots of the loop nesting forest.
+ LoopNestingForestRoots() []BasicBlock
+
+ // LowestCommonAncestor returns the lowest common ancestor in the dominator tree of the given BasicBlock(s).
+ LowestCommonAncestor(blk1, blk2 BasicBlock) BasicBlock
+
+ // Idom returns the immediate dominator of the given BasicBlock.
+ Idom(blk BasicBlock) BasicBlock
+
+ VarLengthPool() *wazevoapi.VarLengthPool[Value]
+}
+
+// NewBuilder returns a new Builder implementation.
+func NewBuilder() Builder {
+ return &builder{
+ instructionsPool: wazevoapi.NewPool[Instruction](resetInstruction),
+ basicBlocksPool: wazevoapi.NewPool[basicBlock](resetBasicBlock),
+ varLengthPool: wazevoapi.NewVarLengthPool[Value](),
+ valueAnnotations: make(map[ValueID]string),
+ signatures: make(map[SignatureID]*Signature),
+ blkVisited: make(map[*basicBlock]int),
+ valueIDAliases: make(map[ValueID]Value),
+ redundantParameterIndexToValue: make(map[int]Value),
+ returnBlk: &basicBlock{id: basicBlockIDReturnBlock},
+ }
+}
+
+// builder implements Builder interface.
+type builder struct {
+ basicBlocksPool wazevoapi.Pool[basicBlock]
+ instructionsPool wazevoapi.Pool[Instruction]
+ varLengthPool wazevoapi.VarLengthPool[Value]
+ signatures map[SignatureID]*Signature
+ currentSignature *Signature
+
+ // reversePostOrderedBasicBlocks are the BasicBlock(s) ordered in the reverse post-order after passCalculateImmediateDominators.
+ reversePostOrderedBasicBlocks []*basicBlock
+ currentBB *basicBlock
+ returnBlk *basicBlock
+
+ // variables track the types for Variable with the index regarded Variable.
+ variables []Type
+ // nextValueID is used by builder.AllocateValue.
+ nextValueID ValueID
+ // nextVariable is used by builder.AllocateVariable.
+ nextVariable Variable
+
+ valueIDAliases map[ValueID]Value
+ valueAnnotations map[ValueID]string
+
+ // valueRefCounts is used to lower the SSA in backend, and will be calculated
+ // by the last SSA-level optimization pass.
+ valueRefCounts []int
+
+ // dominators stores the immediate dominator of each BasicBlock.
+ // The index is blockID of the BasicBlock.
+ dominators []*basicBlock
+ sparseTree dominatorSparseTree
+
+ // loopNestingForestRoots are the roots of the loop nesting forest.
+ loopNestingForestRoots []BasicBlock
+
+ // The followings are used for optimization passes/deterministic compilation.
+ instStack []*Instruction
+ blkVisited map[*basicBlock]int
+ valueIDToInstruction []*Instruction
+ blkStack []*basicBlock
+ blkStack2 []*basicBlock
+ ints []int
+ redundantParameterIndexToValue map[int]Value
+
+ // blockIterCur is used to implement blockIteratorBegin and blockIteratorNext.
+ blockIterCur int
+
+ // donePreBlockLayoutPasses is true if all the passes before LayoutBlocks are called.
+ donePreBlockLayoutPasses bool
+ // doneBlockLayout is true if LayoutBlocks is called.
+ doneBlockLayout bool
+ // donePostBlockLayoutPasses is true if all the passes after LayoutBlocks are called.
+ donePostBlockLayoutPasses bool
+
+ currentSourceOffset SourceOffset
+}
+
+func (b *builder) VarLengthPool() *wazevoapi.VarLengthPool[Value] {
+ return &b.varLengthPool
+}
+
+// ReturnBlock implements Builder.ReturnBlock.
+func (b *builder) ReturnBlock() BasicBlock {
+ return b.returnBlk
+}
+
+// Init implements Builder.Reset.
+func (b *builder) Init(s *Signature) {
+ b.nextVariable = 0
+ b.currentSignature = s
+ resetBasicBlock(b.returnBlk)
+ b.instructionsPool.Reset()
+ b.basicBlocksPool.Reset()
+ b.varLengthPool.Reset()
+ b.donePreBlockLayoutPasses = false
+ b.doneBlockLayout = false
+ b.donePostBlockLayoutPasses = false
+ for _, sig := range b.signatures {
+ sig.used = false
+ }
+
+ b.ints = b.ints[:0]
+ b.blkStack = b.blkStack[:0]
+ b.blkStack2 = b.blkStack2[:0]
+ b.dominators = b.dominators[:0]
+ b.loopNestingForestRoots = b.loopNestingForestRoots[:0]
+
+ for i := 0; i < b.basicBlocksPool.Allocated(); i++ {
+ blk := b.basicBlocksPool.View(i)
+ delete(b.blkVisited, blk)
+ }
+ b.basicBlocksPool.Reset()
+
+ for v := ValueID(0); v < b.nextValueID; v++ {
+ delete(b.valueAnnotations, v)
+ delete(b.valueIDAliases, v)
+ b.valueRefCounts[v] = 0
+ b.valueIDToInstruction[v] = nil
+ }
+ b.nextValueID = 0
+ b.reversePostOrderedBasicBlocks = b.reversePostOrderedBasicBlocks[:0]
+ b.doneBlockLayout = false
+ for i := range b.valueRefCounts {
+ b.valueRefCounts[i] = 0
+ }
+
+ b.currentSourceOffset = sourceOffsetUnknown
+}
+
+// Signature implements Builder.Signature.
+func (b *builder) Signature() *Signature {
+ return b.currentSignature
+}
+
+// AnnotateValue implements Builder.AnnotateValue.
+func (b *builder) AnnotateValue(value Value, a string) {
+ b.valueAnnotations[value.ID()] = a
+}
+
+// AllocateInstruction implements Builder.AllocateInstruction.
+func (b *builder) AllocateInstruction() *Instruction {
+ instr := b.instructionsPool.Allocate()
+ instr.id = b.instructionsPool.Allocated()
+ return instr
+}
+
+// DeclareSignature implements Builder.AnnotateValue.
+func (b *builder) DeclareSignature(s *Signature) {
+ b.signatures[s.ID] = s
+ s.used = false
+}
+
+// Signatures implements Builder.Signatures.
+func (b *builder) Signatures() (ret []*Signature) {
+ for _, sig := range b.signatures {
+ ret = append(ret, sig)
+ }
+ sort.Slice(ret, func(i, j int) bool {
+ return ret[i].ID < ret[j].ID
+ })
+ return
+}
+
+// SetCurrentSourceOffset implements Builder.SetCurrentSourceOffset.
+func (b *builder) SetCurrentSourceOffset(l SourceOffset) {
+ b.currentSourceOffset = l
+}
+
+func (b *builder) usedSignatures() (ret []*Signature) {
+ for _, sig := range b.signatures {
+ if sig.used {
+ ret = append(ret, sig)
+ }
+ }
+ sort.Slice(ret, func(i, j int) bool {
+ return ret[i].ID < ret[j].ID
+ })
+ return
+}
+
+// ResolveSignature implements Builder.ResolveSignature.
+func (b *builder) ResolveSignature(id SignatureID) *Signature {
+ return b.signatures[id]
+}
+
+// AllocateBasicBlock implements Builder.AllocateBasicBlock.
+func (b *builder) AllocateBasicBlock() BasicBlock {
+ return b.allocateBasicBlock()
+}
+
+// allocateBasicBlock allocates a new basicBlock.
+func (b *builder) allocateBasicBlock() *basicBlock {
+ id := BasicBlockID(b.basicBlocksPool.Allocated())
+ blk := b.basicBlocksPool.Allocate()
+ blk.id = id
+ return blk
+}
+
+// Idom implements Builder.Idom.
+func (b *builder) Idom(blk BasicBlock) BasicBlock {
+ return b.dominators[blk.ID()]
+}
+
+// InsertInstruction implements Builder.InsertInstruction.
+func (b *builder) InsertInstruction(instr *Instruction) {
+ b.currentBB.InsertInstruction(instr)
+
+ if l := b.currentSourceOffset; l.Valid() {
+ // Emit the source offset info only when the instruction has side effect because
+ // these are the only instructions that are accessed by stack unwinding.
+ // This reduces the significant amount of the offset info in the binary.
+ if instr.sideEffect() != sideEffectNone {
+ instr.annotateSourceOffset(l)
+ }
+ }
+
+ resultTypesFn := instructionReturnTypes[instr.opcode]
+ if resultTypesFn == nil {
+ panic("TODO: " + instr.Format(b))
+ }
+
+ t1, ts := resultTypesFn(b, instr)
+ if t1.invalid() {
+ return
+ }
+
+ r1 := b.allocateValue(t1)
+ instr.rValue = r1
+
+ tsl := len(ts)
+ if tsl == 0 {
+ return
+ }
+
+ rValues := b.varLengthPool.Allocate(tsl)
+ for i := 0; i < tsl; i++ {
+ rValues = rValues.Append(&b.varLengthPool, b.allocateValue(ts[i]))
+ }
+ instr.rValues = rValues
+}
+
+// DefineVariable implements Builder.DefineVariable.
+func (b *builder) DefineVariable(variable Variable, value Value, block BasicBlock) {
+ if b.variables[variable].invalid() {
+ panic("BUG: trying to define variable " + variable.String() + " but is not declared yet")
+ }
+
+ if b.variables[variable] != value.Type() {
+ panic(fmt.Sprintf("BUG: inconsistent type for variable %d: expected %s but got %s", variable, b.variables[variable], value.Type()))
+ }
+ bb := block.(*basicBlock)
+ bb.lastDefinitions[variable] = value
+}
+
+// DefineVariableInCurrentBB implements Builder.DefineVariableInCurrentBB.
+func (b *builder) DefineVariableInCurrentBB(variable Variable, value Value) {
+ b.DefineVariable(variable, value, b.currentBB)
+}
+
+// SetCurrentBlock implements Builder.SetCurrentBlock.
+func (b *builder) SetCurrentBlock(bb BasicBlock) {
+ b.currentBB = bb.(*basicBlock)
+}
+
+// CurrentBlock implements Builder.CurrentBlock.
+func (b *builder) CurrentBlock() BasicBlock {
+ return b.currentBB
+}
+
+// EntryBlock implements Builder.EntryBlock.
+func (b *builder) EntryBlock() BasicBlock {
+ return b.entryBlk()
+}
+
+// DeclareVariable implements Builder.DeclareVariable.
+func (b *builder) DeclareVariable(typ Type) Variable {
+ v := b.allocateVariable()
+ iv := int(v)
+ if l := len(b.variables); l <= iv {
+ b.variables = append(b.variables, make([]Type, 2*(l+1))...)
+ }
+ b.variables[v] = typ
+ return v
+}
+
+// allocateVariable allocates a new variable.
+func (b *builder) allocateVariable() (ret Variable) {
+ ret = b.nextVariable
+ b.nextVariable++
+ return
+}
+
+// allocateValue implements Builder.AllocateValue.
+func (b *builder) allocateValue(typ Type) (v Value) {
+ v = Value(b.nextValueID)
+ v = v.setType(typ)
+ b.nextValueID++
+ return
+}
+
+// FindValueInLinearPath implements Builder.FindValueInLinearPath.
+func (b *builder) FindValueInLinearPath(variable Variable) Value {
+ return b.findValueInLinearPath(variable, b.currentBB)
+}
+
+func (b *builder) findValueInLinearPath(variable Variable, blk *basicBlock) Value {
+ if val, ok := blk.lastDefinitions[variable]; ok {
+ return val
+ } else if !blk.sealed {
+ return ValueInvalid
+ }
+
+ if pred := blk.singlePred; pred != nil {
+ // If this block is sealed and have only one predecessor,
+ // we can use the value in that block without ambiguity on definition.
+ return b.findValueInLinearPath(variable, pred)
+ }
+ if len(blk.preds) == 1 {
+ panic("BUG")
+ }
+ return ValueInvalid
+}
+
+func (b *builder) MustFindValueInBlk(variable Variable, blk BasicBlock) Value {
+ typ := b.definedVariableType(variable)
+ return b.findValue(typ, variable, blk.(*basicBlock))
+}
+
+// MustFindValue implements Builder.MustFindValue.
+func (b *builder) MustFindValue(variable Variable) Value {
+ typ := b.definedVariableType(variable)
+ return b.findValue(typ, variable, b.currentBB)
+}
+
+// findValue recursively tries to find the latest definition of a `variable`. The algorithm is described in
+// the section 2 of the paper https://link.springer.com/content/pdf/10.1007/978-3-642-37051-9_6.pdf.
+//
+// TODO: reimplement this in iterative, not recursive, to avoid stack overflow.
+func (b *builder) findValue(typ Type, variable Variable, blk *basicBlock) Value {
+ if val, ok := blk.lastDefinitions[variable]; ok {
+ // The value is already defined in this block!
+ return val
+ } else if !blk.sealed { // Incomplete CFG as in the paper.
+ // If this is not sealed, that means it might have additional unknown predecessor later on.
+ // So we temporarily define the placeholder value here (not add as a parameter yet!),
+ // and record it as unknown.
+ // The unknown values are resolved when we call seal this block via BasicBlock.Seal().
+ value := b.allocateValue(typ)
+ if wazevoapi.SSALoggingEnabled {
+ fmt.Printf("adding unknown value placeholder for %s at %d\n", variable, blk.id)
+ }
+ blk.lastDefinitions[variable] = value
+ blk.unknownValues = append(blk.unknownValues, unknownValue{
+ variable: variable,
+ value: value,
+ })
+ return value
+ }
+
+ if pred := blk.singlePred; pred != nil {
+ // If this block is sealed and have only one predecessor,
+ // we can use the value in that block without ambiguity on definition.
+ return b.findValue(typ, variable, pred)
+ } else if len(blk.preds) == 0 {
+ panic("BUG: value is not defined for " + variable.String())
+ }
+
+ // If this block has multiple predecessors, we have to gather the definitions,
+ // and treat them as an argument to this block.
+ //
+ // The first thing is to define a new parameter to this block which may or may not be redundant, but
+ // later we eliminate trivial params in an optimization pass. This must be done before finding the
+ // definitions in the predecessors so that we can break the cycle.
+ paramValue := blk.AddParam(b, typ)
+ b.DefineVariable(variable, paramValue, blk)
+
+ // After the new param is added, we have to manipulate the original branching instructions
+ // in predecessors so that they would pass the definition of `variable` as the argument to
+ // the newly added PHI.
+ for i := range blk.preds {
+ pred := &blk.preds[i]
+ value := b.findValue(typ, variable, pred.blk)
+ pred.branch.addArgumentBranchInst(b, value)
+ }
+ return paramValue
+}
+
+// Seal implements Builder.Seal.
+func (b *builder) Seal(raw BasicBlock) {
+ blk := raw.(*basicBlock)
+ if len(blk.preds) == 1 {
+ blk.singlePred = blk.preds[0].blk
+ }
+ blk.sealed = true
+
+ for _, v := range blk.unknownValues {
+ variable, phiValue := v.variable, v.value
+ typ := b.definedVariableType(variable)
+ blk.addParamOn(typ, phiValue)
+ for i := range blk.preds {
+ pred := &blk.preds[i]
+ predValue := b.findValue(typ, variable, pred.blk)
+ if !predValue.Valid() {
+ panic("BUG: value is not defined anywhere in the predecessors in the CFG")
+ }
+ pred.branch.addArgumentBranchInst(b, predValue)
+ }
+ }
+}
+
+// definedVariableType returns the type of the given variable. If the variable is not defined yet, it panics.
+func (b *builder) definedVariableType(variable Variable) Type {
+ typ := b.variables[variable]
+ if typ.invalid() {
+ panic(fmt.Sprintf("%s is not defined yet", variable))
+ }
+ return typ
+}
+
+// Format implements Builder.Format.
+func (b *builder) Format() string {
+ str := strings.Builder{}
+ usedSigs := b.usedSignatures()
+ if len(usedSigs) > 0 {
+ str.WriteByte('\n')
+ str.WriteString("signatures:\n")
+ for _, sig := range usedSigs {
+ str.WriteByte('\t')
+ str.WriteString(sig.String())
+ str.WriteByte('\n')
+ }
+ }
+
+ var iterBegin, iterNext func() *basicBlock
+ if b.doneBlockLayout {
+ iterBegin, iterNext = b.blockIteratorReversePostOrderBegin, b.blockIteratorReversePostOrderNext
+ } else {
+ iterBegin, iterNext = b.blockIteratorBegin, b.blockIteratorNext
+ }
+ for bb := iterBegin(); bb != nil; bb = iterNext() {
+ str.WriteByte('\n')
+ str.WriteString(bb.FormatHeader(b))
+ str.WriteByte('\n')
+
+ for cur := bb.Root(); cur != nil; cur = cur.Next() {
+ str.WriteByte('\t')
+ str.WriteString(cur.Format(b))
+ str.WriteByte('\n')
+ }
+ }
+ return str.String()
+}
+
+// BlockIteratorNext implements Builder.BlockIteratorNext.
+func (b *builder) BlockIteratorNext() BasicBlock {
+ if blk := b.blockIteratorNext(); blk == nil {
+ return nil // BasicBlock((*basicBlock)(nil)) != BasicBlock(nil)
+ } else {
+ return blk
+ }
+}
+
+// BlockIteratorNext implements Builder.BlockIteratorNext.
+func (b *builder) blockIteratorNext() *basicBlock {
+ index := b.blockIterCur
+ for {
+ if index == b.basicBlocksPool.Allocated() {
+ return nil
+ }
+ ret := b.basicBlocksPool.View(index)
+ index++
+ if !ret.invalid {
+ b.blockIterCur = index
+ return ret
+ }
+ }
+}
+
+// BlockIteratorBegin implements Builder.BlockIteratorBegin.
+func (b *builder) BlockIteratorBegin() BasicBlock {
+ return b.blockIteratorBegin()
+}
+
+// BlockIteratorBegin implements Builder.BlockIteratorBegin.
+func (b *builder) blockIteratorBegin() *basicBlock {
+ b.blockIterCur = 0
+ return b.blockIteratorNext()
+}
+
+// BlockIteratorReversePostOrderBegin implements Builder.BlockIteratorReversePostOrderBegin.
+func (b *builder) BlockIteratorReversePostOrderBegin() BasicBlock {
+ return b.blockIteratorReversePostOrderBegin()
+}
+
+// BlockIteratorBegin implements Builder.BlockIteratorBegin.
+func (b *builder) blockIteratorReversePostOrderBegin() *basicBlock {
+ b.blockIterCur = 0
+ return b.blockIteratorReversePostOrderNext()
+}
+
+// BlockIteratorReversePostOrderNext implements Builder.BlockIteratorReversePostOrderNext.
+func (b *builder) BlockIteratorReversePostOrderNext() BasicBlock {
+ if blk := b.blockIteratorReversePostOrderNext(); blk == nil {
+ return nil // BasicBlock((*basicBlock)(nil)) != BasicBlock(nil)
+ } else {
+ return blk
+ }
+}
+
+// BlockIteratorNext implements Builder.BlockIteratorNext.
+func (b *builder) blockIteratorReversePostOrderNext() *basicBlock {
+ if b.blockIterCur >= len(b.reversePostOrderedBasicBlocks) {
+ return nil
+ } else {
+ ret := b.reversePostOrderedBasicBlocks[b.blockIterCur]
+ b.blockIterCur++
+ return ret
+ }
+}
+
+// ValueRefCounts implements Builder.ValueRefCounts.
+func (b *builder) ValueRefCounts() []int {
+ return b.valueRefCounts
+}
+
+// alias records the alias of the given values. The alias(es) will be
+// eliminated in the optimization pass via resolveArgumentAlias.
+func (b *builder) alias(dst, src Value) {
+ b.valueIDAliases[dst.ID()] = src
+}
+
+// resolveArgumentAlias resolves the alias of the arguments of the given instruction.
+func (b *builder) resolveArgumentAlias(instr *Instruction) {
+ if instr.v.Valid() {
+ instr.v = b.resolveAlias(instr.v)
+ }
+
+ if instr.v2.Valid() {
+ instr.v2 = b.resolveAlias(instr.v2)
+ }
+
+ if instr.v3.Valid() {
+ instr.v3 = b.resolveAlias(instr.v3)
+ }
+
+ view := instr.vs.View()
+ for i, v := range view {
+ view[i] = b.resolveAlias(v)
+ }
+}
+
+// resolveAlias resolves the alias of the given value.
+func (b *builder) resolveAlias(v Value) Value {
+ // Some aliases are chained, so we need to resolve them recursively.
+ for {
+ if src, ok := b.valueIDAliases[v.ID()]; ok {
+ v = src
+ } else {
+ break
+ }
+ }
+ return v
+}
+
+// entryBlk returns the entry block of the function.
+func (b *builder) entryBlk() *basicBlock {
+ return b.basicBlocksPool.View(0)
+}
+
+// isDominatedBy returns true if the given block `n` is dominated by the given block `d`.
+// Before calling this, the builder must pass by passCalculateImmediateDominators.
+func (b *builder) isDominatedBy(n *basicBlock, d *basicBlock) bool {
+ if len(b.dominators) == 0 {
+ panic("BUG: passCalculateImmediateDominators must be called before calling isDominatedBy")
+ }
+ ent := b.entryBlk()
+ doms := b.dominators
+ for n != d && n != ent {
+ n = doms[n.id]
+ }
+ return n == d
+}
+
+// BlockIDMax implements Builder.BlockIDMax.
+func (b *builder) BlockIDMax() BasicBlockID {
+ return BasicBlockID(b.basicBlocksPool.Allocated())
+}
+
+// InsertUndefined implements Builder.InsertUndefined.
+func (b *builder) InsertUndefined() {
+ instr := b.AllocateInstruction()
+ instr.opcode = OpcodeUndefined
+ b.InsertInstruction(instr)
+}
+
+// LoopNestingForestRoots implements Builder.LoopNestingForestRoots.
+func (b *builder) LoopNestingForestRoots() []BasicBlock {
+ return b.loopNestingForestRoots
+}
+
+// LowestCommonAncestor implements Builder.LowestCommonAncestor.
+func (b *builder) LowestCommonAncestor(blk1, blk2 BasicBlock) BasicBlock {
+ return b.sparseTree.findLCA(blk1.ID(), blk2.ID())
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/cmp.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/cmp.go
new file mode 100644
index 000000000..15b62ca8e
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/cmp.go
@@ -0,0 +1,107 @@
+package ssa
+
+// IntegerCmpCond represents a condition for integer comparison.
+type IntegerCmpCond byte
+
+const (
+ // IntegerCmpCondInvalid represents an invalid condition.
+ IntegerCmpCondInvalid IntegerCmpCond = iota
+ // IntegerCmpCondEqual represents "==".
+ IntegerCmpCondEqual
+ // IntegerCmpCondNotEqual represents "!=".
+ IntegerCmpCondNotEqual
+ // IntegerCmpCondSignedLessThan represents Signed "<".
+ IntegerCmpCondSignedLessThan
+ // IntegerCmpCondSignedGreaterThanOrEqual represents Signed ">=".
+ IntegerCmpCondSignedGreaterThanOrEqual
+ // IntegerCmpCondSignedGreaterThan represents Signed ">".
+ IntegerCmpCondSignedGreaterThan
+ // IntegerCmpCondSignedLessThanOrEqual represents Signed "<=".
+ IntegerCmpCondSignedLessThanOrEqual
+ // IntegerCmpCondUnsignedLessThan represents Unsigned "<".
+ IntegerCmpCondUnsignedLessThan
+ // IntegerCmpCondUnsignedGreaterThanOrEqual represents Unsigned ">=".
+ IntegerCmpCondUnsignedGreaterThanOrEqual
+ // IntegerCmpCondUnsignedGreaterThan represents Unsigned ">".
+ IntegerCmpCondUnsignedGreaterThan
+ // IntegerCmpCondUnsignedLessThanOrEqual represents Unsigned "<=".
+ IntegerCmpCondUnsignedLessThanOrEqual
+)
+
+// String implements fmt.Stringer.
+func (i IntegerCmpCond) String() string {
+ switch i {
+ case IntegerCmpCondEqual:
+ return "eq"
+ case IntegerCmpCondNotEqual:
+ return "neq"
+ case IntegerCmpCondSignedLessThan:
+ return "lt_s"
+ case IntegerCmpCondSignedGreaterThanOrEqual:
+ return "ge_s"
+ case IntegerCmpCondSignedGreaterThan:
+ return "gt_s"
+ case IntegerCmpCondSignedLessThanOrEqual:
+ return "le_s"
+ case IntegerCmpCondUnsignedLessThan:
+ return "lt_u"
+ case IntegerCmpCondUnsignedGreaterThanOrEqual:
+ return "ge_u"
+ case IntegerCmpCondUnsignedGreaterThan:
+ return "gt_u"
+ case IntegerCmpCondUnsignedLessThanOrEqual:
+ return "le_u"
+ default:
+ panic("invalid integer comparison condition")
+ }
+}
+
+// Signed returns true if the condition is signed integer comparison.
+func (i IntegerCmpCond) Signed() bool {
+ switch i {
+ case IntegerCmpCondSignedLessThan, IntegerCmpCondSignedGreaterThanOrEqual,
+ IntegerCmpCondSignedGreaterThan, IntegerCmpCondSignedLessThanOrEqual:
+ return true
+ default:
+ return false
+ }
+}
+
+type FloatCmpCond byte
+
+const (
+ // FloatCmpCondInvalid represents an invalid condition.
+ FloatCmpCondInvalid FloatCmpCond = iota
+ // FloatCmpCondEqual represents "==".
+ FloatCmpCondEqual
+ // FloatCmpCondNotEqual represents "!=".
+ FloatCmpCondNotEqual
+ // FloatCmpCondLessThan represents "<".
+ FloatCmpCondLessThan
+ // FloatCmpCondLessThanOrEqual represents "<=".
+ FloatCmpCondLessThanOrEqual
+ // FloatCmpCondGreaterThan represents ">".
+ FloatCmpCondGreaterThan
+ // FloatCmpCondGreaterThanOrEqual represents ">=".
+ FloatCmpCondGreaterThanOrEqual
+)
+
+// String implements fmt.Stringer.
+func (f FloatCmpCond) String() string {
+ switch f {
+ case FloatCmpCondEqual:
+ return "eq"
+ case FloatCmpCondNotEqual:
+ return "neq"
+ case FloatCmpCondLessThan:
+ return "lt"
+ case FloatCmpCondLessThanOrEqual:
+ return "le"
+ case FloatCmpCondGreaterThan:
+ return "gt"
+ case FloatCmpCondGreaterThanOrEqual:
+ return "ge"
+ default:
+ panic("invalid float comparison condition")
+ }
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/funcref.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/funcref.go
new file mode 100644
index 000000000..d9620762a
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/funcref.go
@@ -0,0 +1,12 @@
+package ssa
+
+import "fmt"
+
+// FuncRef is a unique identifier for a function of the frontend,
+// and is used to reference the function in function call.
+type FuncRef uint32
+
+// String implements fmt.Stringer.
+func (r FuncRef) String() string {
+ return fmt.Sprintf("f%d", r)
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/instructions.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/instructions.go
new file mode 100644
index 000000000..3e3482efc
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/instructions.go
@@ -0,0 +1,2967 @@
+package ssa
+
+import (
+ "fmt"
+ "math"
+ "strings"
+
+ "github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi"
+)
+
+// Opcode represents a SSA instruction.
+type Opcode uint32
+
+// Instruction represents an instruction whose opcode is specified by
+// Opcode. Since Go doesn't have union type, we use this flattened type
+// for all instructions, and therefore each field has different meaning
+// depending on Opcode.
+type Instruction struct {
+ // id is the unique ID of this instruction which ascends from 0 following the order of program.
+ id int
+ opcode Opcode
+ u1, u2 uint64
+ v Value
+ v2 Value
+ v3 Value
+ vs Values
+ typ Type
+ blk BasicBlock
+ targets []BasicBlock
+ prev, next *Instruction
+
+ rValue Value
+ rValues Values
+ gid InstructionGroupID
+ sourceOffset SourceOffset
+ live bool
+ alreadyLowered bool
+}
+
+// SourceOffset represents the offset of the source of an instruction.
+type SourceOffset int64
+
+const sourceOffsetUnknown = -1
+
+// Valid returns true if this source offset is valid.
+func (l SourceOffset) Valid() bool {
+ return l != sourceOffsetUnknown
+}
+
+func (i *Instruction) annotateSourceOffset(line SourceOffset) {
+ i.sourceOffset = line
+}
+
+// SourceOffset returns the source offset of this instruction.
+func (i *Instruction) SourceOffset() SourceOffset {
+ return i.sourceOffset
+}
+
+// Opcode returns the opcode of this instruction.
+func (i *Instruction) Opcode() Opcode {
+ return i.opcode
+}
+
+// GroupID returns the InstructionGroupID of this instruction.
+func (i *Instruction) GroupID() InstructionGroupID {
+ return i.gid
+}
+
+// MarkLowered marks this instruction as already lowered.
+func (i *Instruction) MarkLowered() {
+ i.alreadyLowered = true
+}
+
+// Lowered returns true if this instruction is already lowered.
+func (i *Instruction) Lowered() bool {
+ return i.alreadyLowered
+}
+
+// resetInstruction resets this instruction to the initial state.
+func resetInstruction(i *Instruction) {
+ *i = Instruction{}
+ i.v = ValueInvalid
+ i.v2 = ValueInvalid
+ i.v3 = ValueInvalid
+ i.rValue = ValueInvalid
+ i.typ = typeInvalid
+ i.vs = ValuesNil
+ i.sourceOffset = sourceOffsetUnknown
+}
+
+// InstructionGroupID is assigned to each instruction and represents a group of instructions
+// where each instruction is interchangeable with others except for the last instruction
+// in the group which has side effects. In short, InstructionGroupID is determined by the side effects of instructions.
+// That means, if there's an instruction with side effect between two instructions, then these two instructions
+// will have different instructionGroupID. Note that each block always ends with branching, which is with side effects,
+// therefore, instructions in different blocks always have different InstructionGroupID(s).
+//
+// The notable application of this is used in lowering SSA-level instruction to a ISA specific instruction,
+// where we eagerly try to merge multiple instructions into single operation etc. Such merging cannot be done
+// if these instruction have different InstructionGroupID since it will change the semantics of a program.
+//
+// See passDeadCodeElimination.
+type InstructionGroupID uint32
+
+// Returns Value(s) produced by this instruction if any.
+// The `first` is the first return value, and `rest` is the rest of the values.
+func (i *Instruction) Returns() (first Value, rest []Value) {
+ return i.rValue, i.rValues.View()
+}
+
+// Return returns a Value(s) produced by this instruction if any.
+// If there's multiple return values, only the first one is returned.
+func (i *Instruction) Return() (first Value) {
+ return i.rValue
+}
+
+// Args returns the arguments to this instruction.
+func (i *Instruction) Args() (v1, v2, v3 Value, vs []Value) {
+ return i.v, i.v2, i.v3, i.vs.View()
+}
+
+// Arg returns the first argument to this instruction.
+func (i *Instruction) Arg() Value {
+ return i.v
+}
+
+// Arg2 returns the first two arguments to this instruction.
+func (i *Instruction) Arg2() (Value, Value) {
+ return i.v, i.v2
+}
+
+// ArgWithLane returns the first argument to this instruction, and the lane type.
+func (i *Instruction) ArgWithLane() (Value, VecLane) {
+ return i.v, VecLane(i.u1)
+}
+
+// Arg2WithLane returns the first two arguments to this instruction, and the lane type.
+func (i *Instruction) Arg2WithLane() (Value, Value, VecLane) {
+ return i.v, i.v2, VecLane(i.u1)
+}
+
+// ShuffleData returns the first two arguments to this instruction and 2 uint64s `lo`, `hi`.
+//
+// Note: Each uint64 encodes a sequence of 8 bytes where each byte encodes a VecLane,
+// so that the 128bit integer `hi<<64|lo` packs a slice `[16]VecLane`,
+// where `lane[0]` is the least significant byte, and `lane[n]` is shifted to offset `n*8`.
+func (i *Instruction) ShuffleData() (v Value, v2 Value, lo uint64, hi uint64) {
+ return i.v, i.v2, i.u1, i.u2
+}
+
+// Arg3 returns the first three arguments to this instruction.
+func (i *Instruction) Arg3() (Value, Value, Value) {
+ return i.v, i.v2, i.v3
+}
+
+// Next returns the next instruction laid out next to itself.
+func (i *Instruction) Next() *Instruction {
+ return i.next
+}
+
+// Prev returns the previous instruction laid out prior to itself.
+func (i *Instruction) Prev() *Instruction {
+ return i.prev
+}
+
+// IsBranching returns true if this instruction is a branching instruction.
+func (i *Instruction) IsBranching() bool {
+ switch i.opcode {
+ case OpcodeJump, OpcodeBrz, OpcodeBrnz, OpcodeBrTable:
+ return true
+ default:
+ return false
+ }
+}
+
+// TODO: complete opcode comments.
+const (
+ OpcodeInvalid Opcode = iota
+
+ // OpcodeUndefined is a placeholder for undefined opcode. This can be used for debugging to intentionally
+ // cause a crash at certain point.
+ OpcodeUndefined
+
+ // OpcodeJump takes the list of args to the `block` and unconditionally jumps to it.
+ OpcodeJump
+
+ // OpcodeBrz branches into `blk` with `args` if the value `c` equals zero: `Brz c, blk, args`.
+ OpcodeBrz
+
+ // OpcodeBrnz branches into `blk` with `args` if the value `c` is not zero: `Brnz c, blk, args`.
+ OpcodeBrnz
+
+ // OpcodeBrTable takes the index value `index`, and branches into `labelX`. If the `index` is out of range,
+ // it branches into the last labelN: `BrTable index, [label1, label2, ... labelN]`.
+ OpcodeBrTable
+
+ // OpcodeExitWithCode exit the execution immediately.
+ OpcodeExitWithCode
+
+ // OpcodeExitIfTrueWithCode exits the execution immediately if the value `c` is not zero.
+ OpcodeExitIfTrueWithCode
+
+ // OpcodeReturn returns from the function: `return rvalues`.
+ OpcodeReturn
+
+ // OpcodeCall calls a function specified by the symbol FN with arguments `args`: `returnvals = Call FN, args...`
+ // This is a "near" call, which means the call target is known at compile time, and the target is relatively close
+ // to this function. If the target cannot be reached by near call, the backend fails to compile.
+ OpcodeCall
+
+ // OpcodeCallIndirect calls a function specified by `callee` which is a function address: `returnvals = call_indirect SIG, callee, args`.
+ // Note that this is different from call_indirect in Wasm, which also does type checking, etc.
+ OpcodeCallIndirect
+
+ // OpcodeSplat performs a vector splat operation: `v = Splat.lane x`.
+ OpcodeSplat
+
+ // OpcodeSwizzle performs a vector swizzle operation: `v = Swizzle.lane x, y`.
+ OpcodeSwizzle
+
+ // OpcodeInsertlane inserts a lane value into a vector: `v = InsertLane x, y, Idx`.
+ OpcodeInsertlane
+
+ // OpcodeExtractlane extracts a lane value from a vector: `v = ExtractLane x, Idx`.
+ OpcodeExtractlane
+
+ // OpcodeLoad loads a Type value from the [base + offset] address: `v = Load base, offset`.
+ OpcodeLoad
+
+ // OpcodeStore stores a Type value to the [base + offset] address: `Store v, base, offset`.
+ OpcodeStore
+
+ // OpcodeUload8 loads the 8-bit value from the [base + offset] address, zero-extended to 64 bits: `v = Uload8 base, offset`.
+ OpcodeUload8
+
+ // OpcodeSload8 loads the 8-bit value from the [base + offset] address, sign-extended to 64 bits: `v = Sload8 base, offset`.
+ OpcodeSload8
+
+ // OpcodeIstore8 stores the 8-bit value to the [base + offset] address, sign-extended to 64 bits: `Istore8 v, base, offset`.
+ OpcodeIstore8
+
+ // OpcodeUload16 loads the 16-bit value from the [base + offset] address, zero-extended to 64 bits: `v = Uload16 base, offset`.
+ OpcodeUload16
+
+ // OpcodeSload16 loads the 16-bit value from the [base + offset] address, sign-extended to 64 bits: `v = Sload16 base, offset`.
+ OpcodeSload16
+
+ // OpcodeIstore16 stores the 16-bit value to the [base + offset] address, zero-extended to 64 bits: `Istore16 v, base, offset`.
+ OpcodeIstore16
+
+ // OpcodeUload32 loads the 32-bit value from the [base + offset] address, zero-extended to 64 bits: `v = Uload32 base, offset`.
+ OpcodeUload32
+
+ // OpcodeSload32 loads the 32-bit value from the [base + offset] address, sign-extended to 64 bits: `v = Sload32 base, offset`.
+ OpcodeSload32
+
+ // OpcodeIstore32 stores the 32-bit value to the [base + offset] address, zero-extended to 64 bits: `Istore16 v, base, offset`.
+ OpcodeIstore32
+
+ // OpcodeLoadSplat represents a load that replicates the loaded value to all lanes `v = LoadSplat.lane p, Offset`.
+ OpcodeLoadSplat
+
+ // OpcodeVZeroExtLoad loads a scalar single/double precision floating point value from the [p + Offset] address,
+ // and zero-extend it to the V128 value: `v = VExtLoad p, Offset`.
+ OpcodeVZeroExtLoad
+
+ // OpcodeIconst represents the integer const.
+ OpcodeIconst
+
+ // OpcodeF32const represents the single-precision const.
+ OpcodeF32const
+
+ // OpcodeF64const represents the double-precision const.
+ OpcodeF64const
+
+ // OpcodeVconst represents the 128bit vector const.
+ OpcodeVconst
+
+ // OpcodeVbor computes binary or between two 128bit vectors: `v = bor x, y`.
+ OpcodeVbor
+
+ // OpcodeVbxor computes binary xor between two 128bit vectors: `v = bxor x, y`.
+ OpcodeVbxor
+
+ // OpcodeVband computes binary and between two 128bit vectors: `v = band x, y`.
+ OpcodeVband
+
+ // OpcodeVbandnot computes binary and-not between two 128bit vectors: `v = bandnot x, y`.
+ OpcodeVbandnot
+
+ // OpcodeVbnot negates a 128bit vector: `v = bnot x`.
+ OpcodeVbnot
+
+ // OpcodeVbitselect uses the bits in the control mask c to select the corresponding bit from x when 1
+ // and y when 0: `v = bitselect c, x, y`.
+ OpcodeVbitselect
+
+ // OpcodeShuffle shuffles two vectors using the given 128-bit immediate: `v = shuffle imm, x, y`.
+ // For each byte in the immediate, a value i in [0, 15] selects the i-th byte in vector x;
+ // i in [16, 31] selects the (i-16)-th byte in vector y.
+ OpcodeShuffle
+
+ // OpcodeSelect chooses between two values based on a condition `c`: `v = Select c, x, y`.
+ OpcodeSelect
+
+ // OpcodeVanyTrue performs a any true operation: `s = VanyTrue a`.
+ OpcodeVanyTrue
+
+ // OpcodeVallTrue performs a lane-wise all true operation: `s = VallTrue.lane a`.
+ OpcodeVallTrue
+
+ // OpcodeVhighBits performs a lane-wise extract of the high bits: `v = VhighBits.lane a`.
+ OpcodeVhighBits
+
+ // OpcodeIcmp compares two integer values with the given condition: `v = icmp Cond, x, y`.
+ OpcodeIcmp
+
+ // OpcodeVIcmp compares two integer values with the given condition: `v = vicmp Cond, x, y` on vector.
+ OpcodeVIcmp
+
+ // OpcodeIcmpImm compares an integer value with the immediate value on the given condition: `v = icmp_imm Cond, x, Y`.
+ OpcodeIcmpImm
+
+ // OpcodeIadd performs an integer addition: `v = Iadd x, y`.
+ OpcodeIadd
+
+ // OpcodeVIadd performs an integer addition: `v = VIadd.lane x, y` on vector.
+ OpcodeVIadd
+
+ // OpcodeVSaddSat performs a signed saturating vector addition: `v = VSaddSat.lane x, y` on vector.
+ OpcodeVSaddSat
+
+ // OpcodeVUaddSat performs an unsigned saturating vector addition: `v = VUaddSat.lane x, y` on vector.
+ OpcodeVUaddSat
+
+ // OpcodeIsub performs an integer subtraction: `v = Isub x, y`.
+ OpcodeIsub
+
+ // OpcodeVIsub performs an integer subtraction: `v = VIsub.lane x, y` on vector.
+ OpcodeVIsub
+
+ // OpcodeVSsubSat performs a signed saturating vector subtraction: `v = VSsubSat.lane x, y` on vector.
+ OpcodeVSsubSat
+
+ // OpcodeVUsubSat performs an unsigned saturating vector subtraction: `v = VUsubSat.lane x, y` on vector.
+ OpcodeVUsubSat
+
+ // OpcodeVImin performs a signed integer min: `v = VImin.lane x, y` on vector.
+ OpcodeVImin
+
+ // OpcodeVUmin performs an unsigned integer min: `v = VUmin.lane x, y` on vector.
+ OpcodeVUmin
+
+ // OpcodeVImax performs a signed integer max: `v = VImax.lane x, y` on vector.
+ OpcodeVImax
+
+ // OpcodeVUmax performs an unsigned integer max: `v = VUmax.lane x, y` on vector.
+ OpcodeVUmax
+
+ // OpcodeVAvgRound performs an unsigned integer avg, truncating to zero: `v = VAvgRound.lane x, y` on vector.
+ OpcodeVAvgRound
+
+ // OpcodeVImul performs an integer multiplication: `v = VImul.lane x, y` on vector.
+ OpcodeVImul
+
+ // OpcodeVIneg negates the given integer vector value: `v = VIneg x`.
+ OpcodeVIneg
+
+ // OpcodeVIpopcnt counts the number of 1-bits in the given vector: `v = VIpopcnt x`.
+ OpcodeVIpopcnt
+
+ // OpcodeVIabs returns the absolute value for the given vector value: `v = VIabs.lane x`.
+ OpcodeVIabs
+
+ // OpcodeVIshl shifts x left by (y mod lane-width): `v = VIshl.lane x, y` on vector.
+ OpcodeVIshl
+
+ // OpcodeVUshr shifts x right by (y mod lane-width), unsigned: `v = VUshr.lane x, y` on vector.
+ OpcodeVUshr
+
+ // OpcodeVSshr shifts x right by (y mod lane-width), signed: `v = VSshr.lane x, y` on vector.
+ OpcodeVSshr
+
+ // OpcodeVFabs takes the absolute value of a floating point value: `v = VFabs.lane x on vector.
+ OpcodeVFabs
+
+ // OpcodeVFmax takes the maximum of two floating point values: `v = VFmax.lane x, y on vector.
+ OpcodeVFmax
+
+ // OpcodeVFmin takes the minimum of two floating point values: `v = VFmin.lane x, y on vector.
+ OpcodeVFmin
+
+ // OpcodeVFneg negates the given floating point vector value: `v = VFneg x`.
+ OpcodeVFneg
+
+ // OpcodeVFadd performs a floating point addition: `v = VFadd.lane x, y` on vector.
+ OpcodeVFadd
+
+ // OpcodeVFsub performs a floating point subtraction: `v = VFsub.lane x, y` on vector.
+ OpcodeVFsub
+
+ // OpcodeVFmul performs a floating point multiplication: `v = VFmul.lane x, y` on vector.
+ OpcodeVFmul
+
+ // OpcodeVFdiv performs a floating point division: `v = VFdiv.lane x, y` on vector.
+ OpcodeVFdiv
+
+ // OpcodeVFcmp compares two float values with the given condition: `v = VFcmp.lane Cond, x, y` on float.
+ OpcodeVFcmp
+
+ // OpcodeVCeil takes the ceiling of the given floating point value: `v = ceil.lane x` on vector.
+ OpcodeVCeil
+
+ // OpcodeVFloor takes the floor of the given floating point value: `v = floor.lane x` on vector.
+ OpcodeVFloor
+
+ // OpcodeVTrunc takes the truncation of the given floating point value: `v = trunc.lane x` on vector.
+ OpcodeVTrunc
+
+ // OpcodeVNearest takes the nearest integer of the given floating point value: `v = nearest.lane x` on vector.
+ OpcodeVNearest
+
+ // OpcodeVMaxPseudo computes the lane-wise maximum value `v = VMaxPseudo.lane x, y` on vector defined as `x < y ? x : y`.
+ OpcodeVMaxPseudo
+
+ // OpcodeVMinPseudo computes the lane-wise minimum value `v = VMinPseudo.lane x, y` on vector defined as `y < x ? x : y`.
+ OpcodeVMinPseudo
+
+ // OpcodeVSqrt takes the minimum of two floating point values: `v = VFmin.lane x, y` on vector.
+ OpcodeVSqrt
+
+ // OpcodeVFcvtToUintSat converts a floating point value to an unsigned integer: `v = FcvtToUintSat.lane x` on vector.
+ OpcodeVFcvtToUintSat
+
+ // OpcodeVFcvtToSintSat converts a floating point value to a signed integer: `v = VFcvtToSintSat.lane x` on vector.
+ OpcodeVFcvtToSintSat
+
+ // OpcodeVFcvtFromUint converts a floating point value from an unsigned integer: `v = FcvtFromUint.lane x` on vector.
+ // x is always a 32-bit integer lane, and the result is either a 32-bit or 64-bit floating point-sized vector.
+ OpcodeVFcvtFromUint
+
+ // OpcodeVFcvtFromSint converts a floating point value from a signed integer: `v = VFcvtFromSint.lane x` on vector.
+ // x is always a 32-bit integer lane, and the result is either a 32-bit or 64-bit floating point-sized vector.
+ OpcodeVFcvtFromSint
+
+ // OpcodeImul performs an integer multiplication: `v = Imul x, y`.
+ OpcodeImul
+
+ // OpcodeUdiv performs the unsigned integer division `v = Udiv x, y`.
+ OpcodeUdiv
+
+ // OpcodeSdiv performs the signed integer division `v = Sdiv x, y`.
+ OpcodeSdiv
+
+ // OpcodeUrem computes the remainder of the unsigned integer division `v = Urem x, y`.
+ OpcodeUrem
+
+ // OpcodeSrem computes the remainder of the signed integer division `v = Srem x, y`.
+ OpcodeSrem
+
+ // OpcodeBand performs a binary and: `v = Band x, y`.
+ OpcodeBand
+
+ // OpcodeBor performs a binary or: `v = Bor x, y`.
+ OpcodeBor
+
+ // OpcodeBxor performs a binary xor: `v = Bxor x, y`.
+ OpcodeBxor
+
+ // OpcodeBnot performs a binary not: `v = Bnot x`.
+ OpcodeBnot
+
+ // OpcodeRotl rotates the given integer value to the left: `v = Rotl x, y`.
+ OpcodeRotl
+
+ // OpcodeRotr rotates the given integer value to the right: `v = Rotr x, y`.
+ OpcodeRotr
+
+ // OpcodeIshl does logical shift left: `v = Ishl x, y`.
+ OpcodeIshl
+
+ // OpcodeUshr does logical shift right: `v = Ushr x, y`.
+ OpcodeUshr
+
+ // OpcodeSshr does arithmetic shift right: `v = Sshr x, y`.
+ OpcodeSshr
+
+ // OpcodeClz counts the number of leading zeros: `v = clz x`.
+ OpcodeClz
+
+ // OpcodeCtz counts the number of trailing zeros: `v = ctz x`.
+ OpcodeCtz
+
+ // OpcodePopcnt counts the number of 1-bits: `v = popcnt x`.
+ OpcodePopcnt
+
+ // OpcodeFcmp compares two floating point values: `v = fcmp Cond, x, y`.
+ OpcodeFcmp
+
+ // OpcodeFadd performs a floating point addition: / `v = Fadd x, y`.
+ OpcodeFadd
+
+ // OpcodeFsub performs a floating point subtraction: `v = Fsub x, y`.
+ OpcodeFsub
+
+ // OpcodeFmul performs a floating point multiplication: `v = Fmul x, y`.
+ OpcodeFmul
+
+ // OpcodeSqmulRoundSat performs a lane-wise saturating rounding multiplication
+ // in Q15 format: `v = SqmulRoundSat.lane x,y` on vector.
+ OpcodeSqmulRoundSat
+
+ // OpcodeFdiv performs a floating point division: `v = Fdiv x, y`.
+ OpcodeFdiv
+
+ // OpcodeSqrt takes the square root of the given floating point value: `v = sqrt x`.
+ OpcodeSqrt
+
+ // OpcodeFneg negates the given floating point value: `v = Fneg x`.
+ OpcodeFneg
+
+ // OpcodeFabs takes the absolute value of the given floating point value: `v = fabs x`.
+ OpcodeFabs
+
+ // OpcodeFcopysign copies the sign of the second floating point value to the first floating point value:
+ // `v = Fcopysign x, y`.
+ OpcodeFcopysign
+
+ // OpcodeFmin takes the minimum of two floating point values: `v = fmin x, y`.
+ OpcodeFmin
+
+ // OpcodeFmax takes the maximum of two floating point values: `v = fmax x, y`.
+ OpcodeFmax
+
+ // OpcodeCeil takes the ceiling of the given floating point value: `v = ceil x`.
+ OpcodeCeil
+
+ // OpcodeFloor takes the floor of the given floating point value: `v = floor x`.
+ OpcodeFloor
+
+ // OpcodeTrunc takes the truncation of the given floating point value: `v = trunc x`.
+ OpcodeTrunc
+
+ // OpcodeNearest takes the nearest integer of the given floating point value: `v = nearest x`.
+ OpcodeNearest
+
+ // OpcodeBitcast is a bitcast operation: `v = bitcast x`.
+ OpcodeBitcast
+
+ // OpcodeIreduce narrow the given integer: `v = Ireduce x`.
+ OpcodeIreduce
+
+ // OpcodeSnarrow converts two input vectors x, y into a smaller lane vector by narrowing each lane, signed `v = Snarrow.lane x, y`.
+ OpcodeSnarrow
+
+ // OpcodeUnarrow converts two input vectors x, y into a smaller lane vector by narrowing each lane, unsigned `v = Unarrow.lane x, y`.
+ OpcodeUnarrow
+
+ // OpcodeSwidenLow converts low half of the smaller lane vector to a larger lane vector, sign extended: `v = SwidenLow.lane x`.
+ OpcodeSwidenLow
+
+ // OpcodeSwidenHigh converts high half of the smaller lane vector to a larger lane vector, sign extended: `v = SwidenHigh.lane x`.
+ OpcodeSwidenHigh
+
+ // OpcodeUwidenLow converts low half of the smaller lane vector to a larger lane vector, zero (unsigned) extended: `v = UwidenLow.lane x`.
+ OpcodeUwidenLow
+
+ // OpcodeUwidenHigh converts high half of the smaller lane vector to a larger lane vector, zero (unsigned) extended: `v = UwidenHigh.lane x`.
+ OpcodeUwidenHigh
+
+ // OpcodeExtIaddPairwise is a lane-wise integer extended pairwise addition producing extended results (twice wider results than the inputs): `v = extiadd_pairwise x, y` on vector.
+ OpcodeExtIaddPairwise
+
+ // OpcodeWideningPairwiseDotProductS is a lane-wise widening pairwise dot product with signed saturation: `v = WideningPairwiseDotProductS x, y` on vector.
+ // Currently, the only lane is i16, and the result is i32.
+ OpcodeWideningPairwiseDotProductS
+
+ // OpcodeUExtend zero-extends the given integer: `v = UExtend x, from->to`.
+ OpcodeUExtend
+
+ // OpcodeSExtend sign-extends the given integer: `v = SExtend x, from->to`.
+ OpcodeSExtend
+
+ // OpcodeFpromote promotes the given floating point value: `v = Fpromote x`.
+ OpcodeFpromote
+
+ // OpcodeFvpromoteLow converts the two lower single-precision floating point lanes
+ // to the two double-precision lanes of the result: `v = FvpromoteLow.lane x` on vector.
+ OpcodeFvpromoteLow
+
+ // OpcodeFdemote demotes the given float point value: `v = Fdemote x`.
+ OpcodeFdemote
+
+ // OpcodeFvdemote converts the two double-precision floating point lanes
+ // to two lower single-precision lanes of the result `v = Fvdemote.lane x`.
+ OpcodeFvdemote
+
+ // OpcodeFcvtToUint converts a floating point value to an unsigned integer: `v = FcvtToUint x`.
+ OpcodeFcvtToUint
+
+ // OpcodeFcvtToSint converts a floating point value to a signed integer: `v = FcvtToSint x`.
+ OpcodeFcvtToSint
+
+ // OpcodeFcvtToUintSat converts a floating point value to an unsigned integer: `v = FcvtToUintSat x` which saturates on overflow.
+ OpcodeFcvtToUintSat
+
+ // OpcodeFcvtToSintSat converts a floating point value to a signed integer: `v = FcvtToSintSat x` which saturates on overflow.
+ OpcodeFcvtToSintSat
+
+ // OpcodeFcvtFromUint converts an unsigned integer to a floating point value: `v = FcvtFromUint x`.
+ OpcodeFcvtFromUint
+
+ // OpcodeFcvtFromSint converts a signed integer to a floating point value: `v = FcvtFromSint x`.
+ OpcodeFcvtFromSint
+
+ // OpcodeAtomicRmw is atomic read-modify-write operation: `v = atomic_rmw op, p, offset, value`.
+ OpcodeAtomicRmw
+
+ // OpcodeAtomicCas is atomic compare-and-swap operation.
+ OpcodeAtomicCas
+
+ // OpcodeAtomicLoad is atomic load operation.
+ OpcodeAtomicLoad
+
+ // OpcodeAtomicStore is atomic store operation.
+ OpcodeAtomicStore
+
+ // OpcodeFence is a memory fence operation.
+ OpcodeFence
+
+ // opcodeEnd marks the end of the opcode list.
+ opcodeEnd
+)
+
+// AtomicRmwOp represents the atomic read-modify-write operation.
+type AtomicRmwOp byte
+
+const (
+ // AtomicRmwOpAdd is an atomic add operation.
+ AtomicRmwOpAdd AtomicRmwOp = iota
+ // AtomicRmwOpSub is an atomic sub operation.
+ AtomicRmwOpSub
+ // AtomicRmwOpAnd is an atomic and operation.
+ AtomicRmwOpAnd
+ // AtomicRmwOpOr is an atomic or operation.
+ AtomicRmwOpOr
+ // AtomicRmwOpXor is an atomic xor operation.
+ AtomicRmwOpXor
+ // AtomicRmwOpXchg is an atomic swap operation.
+ AtomicRmwOpXchg
+)
+
+// String implements the fmt.Stringer.
+func (op AtomicRmwOp) String() string {
+ switch op {
+ case AtomicRmwOpAdd:
+ return "add"
+ case AtomicRmwOpSub:
+ return "sub"
+ case AtomicRmwOpAnd:
+ return "and"
+ case AtomicRmwOpOr:
+ return "or"
+ case AtomicRmwOpXor:
+ return "xor"
+ case AtomicRmwOpXchg:
+ return "xchg"
+ }
+ panic(fmt.Sprintf("unknown AtomicRmwOp: %d", op))
+}
+
+// returnTypesFn provides the info to determine the type of instruction.
+// t1 is the type of the first result, ts are the types of the remaining results.
+type returnTypesFn func(b *builder, instr *Instruction) (t1 Type, ts []Type)
+
+var (
+ returnTypesFnNoReturns returnTypesFn = func(b *builder, instr *Instruction) (t1 Type, ts []Type) { return typeInvalid, nil }
+ returnTypesFnSingle = func(b *builder, instr *Instruction) (t1 Type, ts []Type) { return instr.typ, nil }
+ returnTypesFnI32 = func(b *builder, instr *Instruction) (t1 Type, ts []Type) { return TypeI32, nil }
+ returnTypesFnF32 = func(b *builder, instr *Instruction) (t1 Type, ts []Type) { return TypeF32, nil }
+ returnTypesFnF64 = func(b *builder, instr *Instruction) (t1 Type, ts []Type) { return TypeF64, nil }
+ returnTypesFnV128 = func(b *builder, instr *Instruction) (t1 Type, ts []Type) { return TypeV128, nil }
+)
+
+// sideEffect provides the info to determine if an instruction has side effects which
+// is used to determine if it can be optimized out, interchanged with others, etc.
+type sideEffect byte
+
+const (
+ sideEffectUnknown sideEffect = iota
+ // sideEffectStrict represents an instruction with side effects, and should be always alive plus cannot be reordered.
+ sideEffectStrict
+ // sideEffectTraps represents an instruction that can trap, and should be always alive but can be reordered within the group.
+ sideEffectTraps
+ // sideEffectNone represents an instruction without side effects, and can be eliminated if the result is not used, plus can be reordered within the group.
+ sideEffectNone
+)
+
+// instructionSideEffects provides the info to determine if an instruction has side effects.
+// Instructions with side effects must not be eliminated regardless whether the result is used or not.
+var instructionSideEffects = [opcodeEnd]sideEffect{
+ OpcodeUndefined: sideEffectStrict,
+ OpcodeJump: sideEffectStrict,
+ OpcodeIconst: sideEffectNone,
+ OpcodeCall: sideEffectStrict,
+ OpcodeCallIndirect: sideEffectStrict,
+ OpcodeIadd: sideEffectNone,
+ OpcodeImul: sideEffectNone,
+ OpcodeIsub: sideEffectNone,
+ OpcodeIcmp: sideEffectNone,
+ OpcodeExtractlane: sideEffectNone,
+ OpcodeInsertlane: sideEffectNone,
+ OpcodeBand: sideEffectNone,
+ OpcodeBor: sideEffectNone,
+ OpcodeBxor: sideEffectNone,
+ OpcodeRotl: sideEffectNone,
+ OpcodeRotr: sideEffectNone,
+ OpcodeFcmp: sideEffectNone,
+ OpcodeFadd: sideEffectNone,
+ OpcodeClz: sideEffectNone,
+ OpcodeCtz: sideEffectNone,
+ OpcodePopcnt: sideEffectNone,
+ OpcodeLoad: sideEffectNone,
+ OpcodeLoadSplat: sideEffectNone,
+ OpcodeUload8: sideEffectNone,
+ OpcodeUload16: sideEffectNone,
+ OpcodeUload32: sideEffectNone,
+ OpcodeSload8: sideEffectNone,
+ OpcodeSload16: sideEffectNone,
+ OpcodeSload32: sideEffectNone,
+ OpcodeSExtend: sideEffectNone,
+ OpcodeUExtend: sideEffectNone,
+ OpcodeSwidenLow: sideEffectNone,
+ OpcodeUwidenLow: sideEffectNone,
+ OpcodeSwidenHigh: sideEffectNone,
+ OpcodeUwidenHigh: sideEffectNone,
+ OpcodeSnarrow: sideEffectNone,
+ OpcodeUnarrow: sideEffectNone,
+ OpcodeSwizzle: sideEffectNone,
+ OpcodeShuffle: sideEffectNone,
+ OpcodeSplat: sideEffectNone,
+ OpcodeFsub: sideEffectNone,
+ OpcodeF32const: sideEffectNone,
+ OpcodeF64const: sideEffectNone,
+ OpcodeIshl: sideEffectNone,
+ OpcodeSshr: sideEffectNone,
+ OpcodeUshr: sideEffectNone,
+ OpcodeStore: sideEffectStrict,
+ OpcodeIstore8: sideEffectStrict,
+ OpcodeIstore16: sideEffectStrict,
+ OpcodeIstore32: sideEffectStrict,
+ OpcodeExitWithCode: sideEffectStrict,
+ OpcodeExitIfTrueWithCode: sideEffectStrict,
+ OpcodeReturn: sideEffectStrict,
+ OpcodeBrz: sideEffectStrict,
+ OpcodeBrnz: sideEffectStrict,
+ OpcodeBrTable: sideEffectStrict,
+ OpcodeFdiv: sideEffectNone,
+ OpcodeFmul: sideEffectNone,
+ OpcodeFmax: sideEffectNone,
+ OpcodeSqmulRoundSat: sideEffectNone,
+ OpcodeSelect: sideEffectNone,
+ OpcodeFmin: sideEffectNone,
+ OpcodeFneg: sideEffectNone,
+ OpcodeFcvtToSint: sideEffectTraps,
+ OpcodeFcvtToUint: sideEffectTraps,
+ OpcodeFcvtFromSint: sideEffectNone,
+ OpcodeFcvtFromUint: sideEffectNone,
+ OpcodeFcvtToSintSat: sideEffectNone,
+ OpcodeFcvtToUintSat: sideEffectNone,
+ OpcodeVFcvtFromUint: sideEffectNone,
+ OpcodeVFcvtFromSint: sideEffectNone,
+ OpcodeFdemote: sideEffectNone,
+ OpcodeFvpromoteLow: sideEffectNone,
+ OpcodeFvdemote: sideEffectNone,
+ OpcodeFpromote: sideEffectNone,
+ OpcodeBitcast: sideEffectNone,
+ OpcodeIreduce: sideEffectNone,
+ OpcodeSqrt: sideEffectNone,
+ OpcodeCeil: sideEffectNone,
+ OpcodeFloor: sideEffectNone,
+ OpcodeTrunc: sideEffectNone,
+ OpcodeNearest: sideEffectNone,
+ OpcodeSdiv: sideEffectTraps,
+ OpcodeSrem: sideEffectTraps,
+ OpcodeUdiv: sideEffectTraps,
+ OpcodeUrem: sideEffectTraps,
+ OpcodeFabs: sideEffectNone,
+ OpcodeFcopysign: sideEffectNone,
+ OpcodeExtIaddPairwise: sideEffectNone,
+ OpcodeVconst: sideEffectNone,
+ OpcodeVbor: sideEffectNone,
+ OpcodeVbxor: sideEffectNone,
+ OpcodeVband: sideEffectNone,
+ OpcodeVbandnot: sideEffectNone,
+ OpcodeVbnot: sideEffectNone,
+ OpcodeVbitselect: sideEffectNone,
+ OpcodeVanyTrue: sideEffectNone,
+ OpcodeVallTrue: sideEffectNone,
+ OpcodeVhighBits: sideEffectNone,
+ OpcodeVIadd: sideEffectNone,
+ OpcodeVSaddSat: sideEffectNone,
+ OpcodeVUaddSat: sideEffectNone,
+ OpcodeVIsub: sideEffectNone,
+ OpcodeVSsubSat: sideEffectNone,
+ OpcodeVUsubSat: sideEffectNone,
+ OpcodeVIcmp: sideEffectNone,
+ OpcodeVImin: sideEffectNone,
+ OpcodeVUmin: sideEffectNone,
+ OpcodeVImax: sideEffectNone,
+ OpcodeVUmax: sideEffectNone,
+ OpcodeVAvgRound: sideEffectNone,
+ OpcodeVImul: sideEffectNone,
+ OpcodeVIabs: sideEffectNone,
+ OpcodeVIneg: sideEffectNone,
+ OpcodeVIpopcnt: sideEffectNone,
+ OpcodeVIshl: sideEffectNone,
+ OpcodeVSshr: sideEffectNone,
+ OpcodeVUshr: sideEffectNone,
+ OpcodeVSqrt: sideEffectNone,
+ OpcodeVFabs: sideEffectNone,
+ OpcodeVFmin: sideEffectNone,
+ OpcodeVFmax: sideEffectNone,
+ OpcodeVFneg: sideEffectNone,
+ OpcodeVFadd: sideEffectNone,
+ OpcodeVFsub: sideEffectNone,
+ OpcodeVFmul: sideEffectNone,
+ OpcodeVFdiv: sideEffectNone,
+ OpcodeVFcmp: sideEffectNone,
+ OpcodeVCeil: sideEffectNone,
+ OpcodeVFloor: sideEffectNone,
+ OpcodeVTrunc: sideEffectNone,
+ OpcodeVNearest: sideEffectNone,
+ OpcodeVMaxPseudo: sideEffectNone,
+ OpcodeVMinPseudo: sideEffectNone,
+ OpcodeVFcvtToUintSat: sideEffectNone,
+ OpcodeVFcvtToSintSat: sideEffectNone,
+ OpcodeVZeroExtLoad: sideEffectNone,
+ OpcodeAtomicRmw: sideEffectStrict,
+ OpcodeAtomicLoad: sideEffectStrict,
+ OpcodeAtomicStore: sideEffectStrict,
+ OpcodeAtomicCas: sideEffectStrict,
+ OpcodeFence: sideEffectStrict,
+ OpcodeWideningPairwiseDotProductS: sideEffectNone,
+}
+
+// sideEffect returns true if this instruction has side effects.
+func (i *Instruction) sideEffect() sideEffect {
+ if e := instructionSideEffects[i.opcode]; e == sideEffectUnknown {
+ panic("BUG: side effect info not registered for " + i.opcode.String())
+ } else {
+ return e
+ }
+}
+
+// instructionReturnTypes provides the function to determine the return types of an instruction.
+var instructionReturnTypes = [opcodeEnd]returnTypesFn{
+ OpcodeExtIaddPairwise: returnTypesFnV128,
+ OpcodeVbor: returnTypesFnV128,
+ OpcodeVbxor: returnTypesFnV128,
+ OpcodeVband: returnTypesFnV128,
+ OpcodeVbnot: returnTypesFnV128,
+ OpcodeVbandnot: returnTypesFnV128,
+ OpcodeVbitselect: returnTypesFnV128,
+ OpcodeVanyTrue: returnTypesFnI32,
+ OpcodeVallTrue: returnTypesFnI32,
+ OpcodeVhighBits: returnTypesFnI32,
+ OpcodeVIadd: returnTypesFnV128,
+ OpcodeVSaddSat: returnTypesFnV128,
+ OpcodeVUaddSat: returnTypesFnV128,
+ OpcodeVIsub: returnTypesFnV128,
+ OpcodeVSsubSat: returnTypesFnV128,
+ OpcodeVUsubSat: returnTypesFnV128,
+ OpcodeVIcmp: returnTypesFnV128,
+ OpcodeVImin: returnTypesFnV128,
+ OpcodeVUmin: returnTypesFnV128,
+ OpcodeVImax: returnTypesFnV128,
+ OpcodeVUmax: returnTypesFnV128,
+ OpcodeVImul: returnTypesFnV128,
+ OpcodeVAvgRound: returnTypesFnV128,
+ OpcodeVIabs: returnTypesFnV128,
+ OpcodeVIneg: returnTypesFnV128,
+ OpcodeVIpopcnt: returnTypesFnV128,
+ OpcodeVIshl: returnTypesFnV128,
+ OpcodeVSshr: returnTypesFnV128,
+ OpcodeVUshr: returnTypesFnV128,
+ OpcodeExtractlane: returnTypesFnSingle,
+ OpcodeInsertlane: returnTypesFnV128,
+ OpcodeBand: returnTypesFnSingle,
+ OpcodeFcopysign: returnTypesFnSingle,
+ OpcodeBitcast: returnTypesFnSingle,
+ OpcodeBor: returnTypesFnSingle,
+ OpcodeBxor: returnTypesFnSingle,
+ OpcodeRotl: returnTypesFnSingle,
+ OpcodeRotr: returnTypesFnSingle,
+ OpcodeIshl: returnTypesFnSingle,
+ OpcodeSshr: returnTypesFnSingle,
+ OpcodeSdiv: returnTypesFnSingle,
+ OpcodeSrem: returnTypesFnSingle,
+ OpcodeUdiv: returnTypesFnSingle,
+ OpcodeUrem: returnTypesFnSingle,
+ OpcodeUshr: returnTypesFnSingle,
+ OpcodeJump: returnTypesFnNoReturns,
+ OpcodeUndefined: returnTypesFnNoReturns,
+ OpcodeIconst: returnTypesFnSingle,
+ OpcodeSelect: returnTypesFnSingle,
+ OpcodeSExtend: returnTypesFnSingle,
+ OpcodeUExtend: returnTypesFnSingle,
+ OpcodeSwidenLow: returnTypesFnV128,
+ OpcodeUwidenLow: returnTypesFnV128,
+ OpcodeSwidenHigh: returnTypesFnV128,
+ OpcodeUwidenHigh: returnTypesFnV128,
+ OpcodeSnarrow: returnTypesFnV128,
+ OpcodeUnarrow: returnTypesFnV128,
+ OpcodeSwizzle: returnTypesFnSingle,
+ OpcodeShuffle: returnTypesFnV128,
+ OpcodeSplat: returnTypesFnV128,
+ OpcodeIreduce: returnTypesFnSingle,
+ OpcodeFabs: returnTypesFnSingle,
+ OpcodeSqrt: returnTypesFnSingle,
+ OpcodeCeil: returnTypesFnSingle,
+ OpcodeFloor: returnTypesFnSingle,
+ OpcodeTrunc: returnTypesFnSingle,
+ OpcodeNearest: returnTypesFnSingle,
+ OpcodeCallIndirect: func(b *builder, instr *Instruction) (t1 Type, ts []Type) {
+ sigID := SignatureID(instr.u1)
+ sig, ok := b.signatures[sigID]
+ if !ok {
+ panic("BUG")
+ }
+ switch len(sig.Results) {
+ case 0:
+ t1 = typeInvalid
+ case 1:
+ t1 = sig.Results[0]
+ default:
+ t1, ts = sig.Results[0], sig.Results[1:]
+ }
+ return
+ },
+ OpcodeCall: func(b *builder, instr *Instruction) (t1 Type, ts []Type) {
+ sigID := SignatureID(instr.u2)
+ sig, ok := b.signatures[sigID]
+ if !ok {
+ panic("BUG")
+ }
+ switch len(sig.Results) {
+ case 0:
+ t1 = typeInvalid
+ case 1:
+ t1 = sig.Results[0]
+ default:
+ t1, ts = sig.Results[0], sig.Results[1:]
+ }
+ return
+ },
+ OpcodeLoad: returnTypesFnSingle,
+ OpcodeVZeroExtLoad: returnTypesFnV128,
+ OpcodeLoadSplat: returnTypesFnV128,
+ OpcodeIadd: returnTypesFnSingle,
+ OpcodeIsub: returnTypesFnSingle,
+ OpcodeImul: returnTypesFnSingle,
+ OpcodeIcmp: returnTypesFnI32,
+ OpcodeFcmp: returnTypesFnI32,
+ OpcodeFadd: returnTypesFnSingle,
+ OpcodeFsub: returnTypesFnSingle,
+ OpcodeFdiv: returnTypesFnSingle,
+ OpcodeFmul: returnTypesFnSingle,
+ OpcodeFmax: returnTypesFnSingle,
+ OpcodeFmin: returnTypesFnSingle,
+ OpcodeSqmulRoundSat: returnTypesFnV128,
+ OpcodeF32const: returnTypesFnF32,
+ OpcodeF64const: returnTypesFnF64,
+ OpcodeClz: returnTypesFnSingle,
+ OpcodeCtz: returnTypesFnSingle,
+ OpcodePopcnt: returnTypesFnSingle,
+ OpcodeStore: returnTypesFnNoReturns,
+ OpcodeIstore8: returnTypesFnNoReturns,
+ OpcodeIstore16: returnTypesFnNoReturns,
+ OpcodeIstore32: returnTypesFnNoReturns,
+ OpcodeExitWithCode: returnTypesFnNoReturns,
+ OpcodeExitIfTrueWithCode: returnTypesFnNoReturns,
+ OpcodeReturn: returnTypesFnNoReturns,
+ OpcodeBrz: returnTypesFnNoReturns,
+ OpcodeBrnz: returnTypesFnNoReturns,
+ OpcodeBrTable: returnTypesFnNoReturns,
+ OpcodeUload8: returnTypesFnSingle,
+ OpcodeUload16: returnTypesFnSingle,
+ OpcodeUload32: returnTypesFnSingle,
+ OpcodeSload8: returnTypesFnSingle,
+ OpcodeSload16: returnTypesFnSingle,
+ OpcodeSload32: returnTypesFnSingle,
+ OpcodeFcvtToSint: returnTypesFnSingle,
+ OpcodeFcvtToUint: returnTypesFnSingle,
+ OpcodeFcvtFromSint: returnTypesFnSingle,
+ OpcodeFcvtFromUint: returnTypesFnSingle,
+ OpcodeFcvtToSintSat: returnTypesFnSingle,
+ OpcodeFcvtToUintSat: returnTypesFnSingle,
+ OpcodeVFcvtFromUint: returnTypesFnV128,
+ OpcodeVFcvtFromSint: returnTypesFnV128,
+ OpcodeFneg: returnTypesFnSingle,
+ OpcodeFdemote: returnTypesFnF32,
+ OpcodeFvdemote: returnTypesFnV128,
+ OpcodeFvpromoteLow: returnTypesFnV128,
+ OpcodeFpromote: returnTypesFnF64,
+ OpcodeVconst: returnTypesFnV128,
+ OpcodeVFabs: returnTypesFnV128,
+ OpcodeVSqrt: returnTypesFnV128,
+ OpcodeVFmax: returnTypesFnV128,
+ OpcodeVFmin: returnTypesFnV128,
+ OpcodeVFneg: returnTypesFnV128,
+ OpcodeVFadd: returnTypesFnV128,
+ OpcodeVFsub: returnTypesFnV128,
+ OpcodeVFmul: returnTypesFnV128,
+ OpcodeVFdiv: returnTypesFnV128,
+ OpcodeVFcmp: returnTypesFnV128,
+ OpcodeVCeil: returnTypesFnV128,
+ OpcodeVFloor: returnTypesFnV128,
+ OpcodeVTrunc: returnTypesFnV128,
+ OpcodeVNearest: returnTypesFnV128,
+ OpcodeVMaxPseudo: returnTypesFnV128,
+ OpcodeVMinPseudo: returnTypesFnV128,
+ OpcodeVFcvtToUintSat: returnTypesFnV128,
+ OpcodeVFcvtToSintSat: returnTypesFnV128,
+ OpcodeAtomicRmw: returnTypesFnSingle,
+ OpcodeAtomicLoad: returnTypesFnSingle,
+ OpcodeAtomicStore: returnTypesFnNoReturns,
+ OpcodeAtomicCas: returnTypesFnSingle,
+ OpcodeFence: returnTypesFnNoReturns,
+ OpcodeWideningPairwiseDotProductS: returnTypesFnV128,
+}
+
+// AsLoad initializes this instruction as a store instruction with OpcodeLoad.
+func (i *Instruction) AsLoad(ptr Value, offset uint32, typ Type) *Instruction {
+ i.opcode = OpcodeLoad
+ i.v = ptr
+ i.u1 = uint64(offset)
+ i.typ = typ
+ return i
+}
+
+// AsExtLoad initializes this instruction as a store instruction with OpcodeLoad.
+func (i *Instruction) AsExtLoad(op Opcode, ptr Value, offset uint32, dst64bit bool) *Instruction {
+ i.opcode = op
+ i.v = ptr
+ i.u1 = uint64(offset)
+ if dst64bit {
+ i.typ = TypeI64
+ } else {
+ i.typ = TypeI32
+ }
+ return i
+}
+
+// AsVZeroExtLoad initializes this instruction as a store instruction with OpcodeVExtLoad.
+func (i *Instruction) AsVZeroExtLoad(ptr Value, offset uint32, scalarType Type) *Instruction {
+ i.opcode = OpcodeVZeroExtLoad
+ i.v = ptr
+ i.u1 = uint64(offset)
+ i.u2 = uint64(scalarType)
+ i.typ = TypeV128
+ return i
+}
+
+// VZeroExtLoadData returns the operands for a load instruction. The returned `typ` is the scalar type of the load target.
+func (i *Instruction) VZeroExtLoadData() (ptr Value, offset uint32, typ Type) {
+ return i.v, uint32(i.u1), Type(i.u2)
+}
+
+// AsLoadSplat initializes this instruction as a store instruction with OpcodeLoadSplat.
+func (i *Instruction) AsLoadSplat(ptr Value, offset uint32, lane VecLane) *Instruction {
+ i.opcode = OpcodeLoadSplat
+ i.v = ptr
+ i.u1 = uint64(offset)
+ i.u2 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// LoadData returns the operands for a load instruction.
+func (i *Instruction) LoadData() (ptr Value, offset uint32, typ Type) {
+ return i.v, uint32(i.u1), i.typ
+}
+
+// LoadSplatData returns the operands for a load splat instruction.
+func (i *Instruction) LoadSplatData() (ptr Value, offset uint32, lane VecLane) {
+ return i.v, uint32(i.u1), VecLane(i.u2)
+}
+
+// AsStore initializes this instruction as a store instruction with OpcodeStore.
+func (i *Instruction) AsStore(storeOp Opcode, value, ptr Value, offset uint32) *Instruction {
+ i.opcode = storeOp
+ i.v = value
+ i.v2 = ptr
+
+ var dstSize uint64
+ switch storeOp {
+ case OpcodeStore:
+ dstSize = uint64(value.Type().Bits())
+ case OpcodeIstore8:
+ dstSize = 8
+ case OpcodeIstore16:
+ dstSize = 16
+ case OpcodeIstore32:
+ dstSize = 32
+ default:
+ panic("invalid store opcode" + storeOp.String())
+ }
+ i.u1 = uint64(offset) | dstSize<<32
+ return i
+}
+
+// StoreData returns the operands for a store instruction.
+func (i *Instruction) StoreData() (value, ptr Value, offset uint32, storeSizeInBits byte) {
+ return i.v, i.v2, uint32(i.u1), byte(i.u1 >> 32)
+}
+
+// AsIconst64 initializes this instruction as a 64-bit integer constant instruction with OpcodeIconst.
+func (i *Instruction) AsIconst64(v uint64) *Instruction {
+ i.opcode = OpcodeIconst
+ i.typ = TypeI64
+ i.u1 = v
+ return i
+}
+
+// AsIconst32 initializes this instruction as a 32-bit integer constant instruction with OpcodeIconst.
+func (i *Instruction) AsIconst32(v uint32) *Instruction {
+ i.opcode = OpcodeIconst
+ i.typ = TypeI32
+ i.u1 = uint64(v)
+ return i
+}
+
+// AsIadd initializes this instruction as an integer addition instruction with OpcodeIadd.
+func (i *Instruction) AsIadd(x, y Value) *Instruction {
+ i.opcode = OpcodeIadd
+ i.v = x
+ i.v2 = y
+ i.typ = x.Type()
+ return i
+}
+
+// AsVIadd initializes this instruction as an integer addition instruction with OpcodeVIadd on a vector.
+func (i *Instruction) AsVIadd(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVIadd
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsWideningPairwiseDotProductS initializes this instruction as a lane-wise integer extended pairwise addition instruction
+// with OpcodeIaddPairwise on a vector.
+func (i *Instruction) AsWideningPairwiseDotProductS(x, y Value) *Instruction {
+ i.opcode = OpcodeWideningPairwiseDotProductS
+ i.v = x
+ i.v2 = y
+ i.typ = TypeV128
+ return i
+}
+
+// AsExtIaddPairwise initializes this instruction as a lane-wise integer extended pairwise addition instruction
+// with OpcodeIaddPairwise on a vector.
+func (i *Instruction) AsExtIaddPairwise(x Value, srcLane VecLane, signed bool) *Instruction {
+ i.opcode = OpcodeExtIaddPairwise
+ i.v = x
+ i.u1 = uint64(srcLane)
+ if signed {
+ i.u2 = 1
+ }
+ i.typ = TypeV128
+ return i
+}
+
+// ExtIaddPairwiseData returns the operands for a lane-wise integer extended pairwise addition instruction.
+func (i *Instruction) ExtIaddPairwiseData() (x Value, srcLane VecLane, signed bool) {
+ return i.v, VecLane(i.u1), i.u2 != 0
+}
+
+// AsVSaddSat initializes this instruction as a vector addition with saturation instruction with OpcodeVSaddSat on a vector.
+func (i *Instruction) AsVSaddSat(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVSaddSat
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVUaddSat initializes this instruction as a vector addition with saturation instruction with OpcodeVUaddSat on a vector.
+func (i *Instruction) AsVUaddSat(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVUaddSat
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVIsub initializes this instruction as an integer subtraction instruction with OpcodeVIsub on a vector.
+func (i *Instruction) AsVIsub(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVIsub
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVSsubSat initializes this instruction as a vector addition with saturation instruction with OpcodeVSsubSat on a vector.
+func (i *Instruction) AsVSsubSat(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVSsubSat
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVUsubSat initializes this instruction as a vector addition with saturation instruction with OpcodeVUsubSat on a vector.
+func (i *Instruction) AsVUsubSat(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVUsubSat
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVImin initializes this instruction as a signed integer min instruction with OpcodeVImin on a vector.
+func (i *Instruction) AsVImin(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVImin
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVUmin initializes this instruction as an unsigned integer min instruction with OpcodeVUmin on a vector.
+func (i *Instruction) AsVUmin(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVUmin
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVImax initializes this instruction as a signed integer max instruction with OpcodeVImax on a vector.
+func (i *Instruction) AsVImax(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVImax
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVUmax initializes this instruction as an unsigned integer max instruction with OpcodeVUmax on a vector.
+func (i *Instruction) AsVUmax(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVUmax
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVAvgRound initializes this instruction as an unsigned integer avg instruction, truncating to zero with OpcodeVAvgRound on a vector.
+func (i *Instruction) AsVAvgRound(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVAvgRound
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVImul initializes this instruction as an integer multiplication with OpcodeVImul on a vector.
+func (i *Instruction) AsVImul(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVImul
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsSqmulRoundSat initializes this instruction as a lane-wise saturating rounding multiplication
+// in Q15 format with OpcodeSqmulRoundSat on a vector.
+func (i *Instruction) AsSqmulRoundSat(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeSqmulRoundSat
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVIabs initializes this instruction as a vector absolute value with OpcodeVIabs.
+func (i *Instruction) AsVIabs(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVIabs
+ i.v = x
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVIneg initializes this instruction as a vector negation with OpcodeVIneg.
+func (i *Instruction) AsVIneg(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVIneg
+ i.v = x
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVIpopcnt initializes this instruction as a Population Count instruction with OpcodeVIpopcnt on a vector.
+func (i *Instruction) AsVIpopcnt(x Value, lane VecLane) *Instruction {
+ if lane != VecLaneI8x16 {
+ panic("Unsupported lane type " + lane.String())
+ }
+ i.opcode = OpcodeVIpopcnt
+ i.v = x
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVSqrt initializes this instruction as a sqrt instruction with OpcodeVSqrt on a vector.
+func (i *Instruction) AsVSqrt(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVSqrt
+ i.v = x
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVFabs initializes this instruction as a float abs instruction with OpcodeVFabs on a vector.
+func (i *Instruction) AsVFabs(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVFabs
+ i.v = x
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVFneg initializes this instruction as a float neg instruction with OpcodeVFneg on a vector.
+func (i *Instruction) AsVFneg(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVFneg
+ i.v = x
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVFmax initializes this instruction as a float max instruction with OpcodeVFmax on a vector.
+func (i *Instruction) AsVFmax(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVFmax
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVFmin initializes this instruction as a float min instruction with OpcodeVFmin on a vector.
+func (i *Instruction) AsVFmin(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVFmin
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVFadd initializes this instruction as a floating point add instruction with OpcodeVFadd on a vector.
+func (i *Instruction) AsVFadd(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVFadd
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVFsub initializes this instruction as a floating point subtraction instruction with OpcodeVFsub on a vector.
+func (i *Instruction) AsVFsub(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVFsub
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVFmul initializes this instruction as a floating point multiplication instruction with OpcodeVFmul on a vector.
+func (i *Instruction) AsVFmul(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVFmul
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVFdiv initializes this instruction as a floating point division instruction with OpcodeVFdiv on a vector.
+func (i *Instruction) AsVFdiv(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVFdiv
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsImul initializes this instruction as an integer addition instruction with OpcodeImul.
+func (i *Instruction) AsImul(x, y Value) *Instruction {
+ i.opcode = OpcodeImul
+ i.v = x
+ i.v2 = y
+ i.typ = x.Type()
+ return i
+}
+
+func (i *Instruction) Insert(b Builder) *Instruction {
+ b.InsertInstruction(i)
+ return i
+}
+
+// AsIsub initializes this instruction as an integer subtraction instruction with OpcodeIsub.
+func (i *Instruction) AsIsub(x, y Value) *Instruction {
+ i.opcode = OpcodeIsub
+ i.v = x
+ i.v2 = y
+ i.typ = x.Type()
+ return i
+}
+
+// AsIcmp initializes this instruction as an integer comparison instruction with OpcodeIcmp.
+func (i *Instruction) AsIcmp(x, y Value, c IntegerCmpCond) *Instruction {
+ i.opcode = OpcodeIcmp
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(c)
+ i.typ = TypeI32
+ return i
+}
+
+// AsFcmp initializes this instruction as an integer comparison instruction with OpcodeFcmp.
+func (i *Instruction) AsFcmp(x, y Value, c FloatCmpCond) {
+ i.opcode = OpcodeFcmp
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(c)
+ i.typ = TypeI32
+}
+
+// AsVIcmp initializes this instruction as an integer vector comparison instruction with OpcodeVIcmp.
+func (i *Instruction) AsVIcmp(x, y Value, c IntegerCmpCond, lane VecLane) *Instruction {
+ i.opcode = OpcodeVIcmp
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(c)
+ i.u2 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsVFcmp initializes this instruction as a float comparison instruction with OpcodeVFcmp on Vector.
+func (i *Instruction) AsVFcmp(x, y Value, c FloatCmpCond, lane VecLane) *Instruction {
+ i.opcode = OpcodeVFcmp
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(c)
+ i.typ = TypeV128
+ i.u2 = uint64(lane)
+ return i
+}
+
+// AsVCeil initializes this instruction as an instruction with OpcodeCeil.
+func (i *Instruction) AsVCeil(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVCeil
+ i.v = x
+ i.typ = x.Type()
+ i.u1 = uint64(lane)
+ return i
+}
+
+// AsVFloor initializes this instruction as an instruction with OpcodeFloor.
+func (i *Instruction) AsVFloor(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVFloor
+ i.v = x
+ i.typ = x.Type()
+ i.u1 = uint64(lane)
+ return i
+}
+
+// AsVTrunc initializes this instruction as an instruction with OpcodeTrunc.
+func (i *Instruction) AsVTrunc(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVTrunc
+ i.v = x
+ i.typ = x.Type()
+ i.u1 = uint64(lane)
+ return i
+}
+
+// AsVNearest initializes this instruction as an instruction with OpcodeNearest.
+func (i *Instruction) AsVNearest(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVNearest
+ i.v = x
+ i.typ = x.Type()
+ i.u1 = uint64(lane)
+ return i
+}
+
+// AsVMaxPseudo initializes this instruction as an instruction with OpcodeVMaxPseudo.
+func (i *Instruction) AsVMaxPseudo(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVMaxPseudo
+ i.typ = x.Type()
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ return i
+}
+
+// AsVMinPseudo initializes this instruction as an instruction with OpcodeVMinPseudo.
+func (i *Instruction) AsVMinPseudo(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVMinPseudo
+ i.typ = x.Type()
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ return i
+}
+
+// AsSDiv initializes this instruction as an integer bitwise and instruction with OpcodeSdiv.
+func (i *Instruction) AsSDiv(x, y, ctx Value) *Instruction {
+ i.opcode = OpcodeSdiv
+ i.v = x
+ i.v2 = y
+ i.v3 = ctx
+ i.typ = x.Type()
+ return i
+}
+
+// AsUDiv initializes this instruction as an integer bitwise and instruction with OpcodeUdiv.
+func (i *Instruction) AsUDiv(x, y, ctx Value) *Instruction {
+ i.opcode = OpcodeUdiv
+ i.v = x
+ i.v2 = y
+ i.v3 = ctx
+ i.typ = x.Type()
+ return i
+}
+
+// AsSRem initializes this instruction as an integer bitwise and instruction with OpcodeSrem.
+func (i *Instruction) AsSRem(x, y, ctx Value) *Instruction {
+ i.opcode = OpcodeSrem
+ i.v = x
+ i.v2 = y
+ i.v3 = ctx
+ i.typ = x.Type()
+ return i
+}
+
+// AsURem initializes this instruction as an integer bitwise and instruction with OpcodeUrem.
+func (i *Instruction) AsURem(x, y, ctx Value) *Instruction {
+ i.opcode = OpcodeUrem
+ i.v = x
+ i.v2 = y
+ i.v3 = ctx
+ i.typ = x.Type()
+ return i
+}
+
+// AsBand initializes this instruction as an integer bitwise and instruction with OpcodeBand.
+func (i *Instruction) AsBand(x, amount Value) *Instruction {
+ i.opcode = OpcodeBand
+ i.v = x
+ i.v2 = amount
+ i.typ = x.Type()
+ return i
+}
+
+// AsBor initializes this instruction as an integer bitwise or instruction with OpcodeBor.
+func (i *Instruction) AsBor(x, amount Value) {
+ i.opcode = OpcodeBor
+ i.v = x
+ i.v2 = amount
+ i.typ = x.Type()
+}
+
+// AsBxor initializes this instruction as an integer bitwise xor instruction with OpcodeBxor.
+func (i *Instruction) AsBxor(x, amount Value) {
+ i.opcode = OpcodeBxor
+ i.v = x
+ i.v2 = amount
+ i.typ = x.Type()
+}
+
+// AsIshl initializes this instruction as an integer shift left instruction with OpcodeIshl.
+func (i *Instruction) AsIshl(x, amount Value) *Instruction {
+ i.opcode = OpcodeIshl
+ i.v = x
+ i.v2 = amount
+ i.typ = x.Type()
+ return i
+}
+
+// AsVIshl initializes this instruction as an integer shift left instruction with OpcodeVIshl on vector.
+func (i *Instruction) AsVIshl(x, amount Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVIshl
+ i.v = x
+ i.v2 = amount
+ i.u1 = uint64(lane)
+ i.typ = x.Type()
+ return i
+}
+
+// AsUshr initializes this instruction as an integer unsigned shift right (logical shift right) instruction with OpcodeUshr.
+func (i *Instruction) AsUshr(x, amount Value) *Instruction {
+ i.opcode = OpcodeUshr
+ i.v = x
+ i.v2 = amount
+ i.typ = x.Type()
+ return i
+}
+
+// AsVUshr initializes this instruction as an integer unsigned shift right (logical shift right) instruction with OpcodeVUshr on vector.
+func (i *Instruction) AsVUshr(x, amount Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVUshr
+ i.v = x
+ i.v2 = amount
+ i.u1 = uint64(lane)
+ i.typ = x.Type()
+ return i
+}
+
+// AsSshr initializes this instruction as an integer signed shift right (arithmetic shift right) instruction with OpcodeSshr.
+func (i *Instruction) AsSshr(x, amount Value) *Instruction {
+ i.opcode = OpcodeSshr
+ i.v = x
+ i.v2 = amount
+ i.typ = x.Type()
+ return i
+}
+
+// AsVSshr initializes this instruction as an integer signed shift right (arithmetic shift right) instruction with OpcodeVSshr on vector.
+func (i *Instruction) AsVSshr(x, amount Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVSshr
+ i.v = x
+ i.v2 = amount
+ i.u1 = uint64(lane)
+ i.typ = x.Type()
+ return i
+}
+
+// AsExtractlane initializes this instruction as an extract lane instruction with OpcodeExtractlane on vector.
+func (i *Instruction) AsExtractlane(x Value, index byte, lane VecLane, signed bool) *Instruction {
+ i.opcode = OpcodeExtractlane
+ i.v = x
+ // We do not have a field for signedness, but `index` is a byte,
+ // so we just encode the flag in the high bits of `u1`.
+ i.u1 = uint64(index)
+ if signed {
+ i.u1 = i.u1 | 1<<32
+ }
+ i.u2 = uint64(lane)
+ switch lane {
+ case VecLaneI8x16, VecLaneI16x8, VecLaneI32x4:
+ i.typ = TypeI32
+ case VecLaneI64x2:
+ i.typ = TypeI64
+ case VecLaneF32x4:
+ i.typ = TypeF32
+ case VecLaneF64x2:
+ i.typ = TypeF64
+ }
+ return i
+}
+
+// AsInsertlane initializes this instruction as an insert lane instruction with OpcodeInsertlane on vector.
+func (i *Instruction) AsInsertlane(x, y Value, index byte, lane VecLane) *Instruction {
+ i.opcode = OpcodeInsertlane
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(index)
+ i.u2 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsShuffle initializes this instruction as a shuffle instruction with OpcodeShuffle on vector.
+func (i *Instruction) AsShuffle(x, y Value, lane []byte) *Instruction {
+ i.opcode = OpcodeShuffle
+ i.v = x
+ i.v2 = y
+ // Encode the 16 bytes as 8 bytes in u1, and 8 bytes in u2.
+ i.u1 = uint64(lane[7])<<56 | uint64(lane[6])<<48 | uint64(lane[5])<<40 | uint64(lane[4])<<32 | uint64(lane[3])<<24 | uint64(lane[2])<<16 | uint64(lane[1])<<8 | uint64(lane[0])
+ i.u2 = uint64(lane[15])<<56 | uint64(lane[14])<<48 | uint64(lane[13])<<40 | uint64(lane[12])<<32 | uint64(lane[11])<<24 | uint64(lane[10])<<16 | uint64(lane[9])<<8 | uint64(lane[8])
+ i.typ = TypeV128
+ return i
+}
+
+// AsSwizzle initializes this instruction as an insert lane instruction with OpcodeSwizzle on vector.
+func (i *Instruction) AsSwizzle(x, y Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeSwizzle
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsSplat initializes this instruction as an insert lane instruction with OpcodeSplat on vector.
+func (i *Instruction) AsSplat(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeSplat
+ i.v = x
+ i.u1 = uint64(lane)
+ i.typ = TypeV128
+ return i
+}
+
+// AsRotl initializes this instruction as a word rotate left instruction with OpcodeRotl.
+func (i *Instruction) AsRotl(x, amount Value) {
+ i.opcode = OpcodeRotl
+ i.v = x
+ i.v2 = amount
+ i.typ = x.Type()
+}
+
+// AsRotr initializes this instruction as a word rotate right instruction with OpcodeRotr.
+func (i *Instruction) AsRotr(x, amount Value) {
+ i.opcode = OpcodeRotr
+ i.v = x
+ i.v2 = amount
+ i.typ = x.Type()
+}
+
+// IcmpData returns the operands and comparison condition of this integer comparison instruction.
+func (i *Instruction) IcmpData() (x, y Value, c IntegerCmpCond) {
+ return i.v, i.v2, IntegerCmpCond(i.u1)
+}
+
+// FcmpData returns the operands and comparison condition of this floating-point comparison instruction.
+func (i *Instruction) FcmpData() (x, y Value, c FloatCmpCond) {
+ return i.v, i.v2, FloatCmpCond(i.u1)
+}
+
+// VIcmpData returns the operands and comparison condition of this integer comparison instruction on vector.
+func (i *Instruction) VIcmpData() (x, y Value, c IntegerCmpCond, l VecLane) {
+ return i.v, i.v2, IntegerCmpCond(i.u1), VecLane(i.u2)
+}
+
+// VFcmpData returns the operands and comparison condition of this float comparison instruction on vector.
+func (i *Instruction) VFcmpData() (x, y Value, c FloatCmpCond, l VecLane) {
+ return i.v, i.v2, FloatCmpCond(i.u1), VecLane(i.u2)
+}
+
+// ExtractlaneData returns the operands and sign flag of Extractlane on vector.
+func (i *Instruction) ExtractlaneData() (x Value, index byte, signed bool, l VecLane) {
+ x = i.v
+ index = byte(0b00001111 & i.u1)
+ signed = i.u1>>32 != 0
+ l = VecLane(i.u2)
+ return
+}
+
+// InsertlaneData returns the operands and sign flag of Insertlane on vector.
+func (i *Instruction) InsertlaneData() (x, y Value, index byte, l VecLane) {
+ x = i.v
+ y = i.v2
+ index = byte(i.u1)
+ l = VecLane(i.u2)
+ return
+}
+
+// AsFadd initializes this instruction as a floating-point addition instruction with OpcodeFadd.
+func (i *Instruction) AsFadd(x, y Value) {
+ i.opcode = OpcodeFadd
+ i.v = x
+ i.v2 = y
+ i.typ = x.Type()
+}
+
+// AsFsub initializes this instruction as a floating-point subtraction instruction with OpcodeFsub.
+func (i *Instruction) AsFsub(x, y Value) {
+ i.opcode = OpcodeFsub
+ i.v = x
+ i.v2 = y
+ i.typ = x.Type()
+}
+
+// AsFmul initializes this instruction as a floating-point multiplication instruction with OpcodeFmul.
+func (i *Instruction) AsFmul(x, y Value) {
+ i.opcode = OpcodeFmul
+ i.v = x
+ i.v2 = y
+ i.typ = x.Type()
+}
+
+// AsFdiv initializes this instruction as a floating-point division instruction with OpcodeFdiv.
+func (i *Instruction) AsFdiv(x, y Value) {
+ i.opcode = OpcodeFdiv
+ i.v = x
+ i.v2 = y
+ i.typ = x.Type()
+}
+
+// AsFmin initializes this instruction to take the minimum of two floating-points with OpcodeFmin.
+func (i *Instruction) AsFmin(x, y Value) {
+ i.opcode = OpcodeFmin
+ i.v = x
+ i.v2 = y
+ i.typ = x.Type()
+}
+
+// AsFmax initializes this instruction to take the maximum of two floating-points with OpcodeFmax.
+func (i *Instruction) AsFmax(x, y Value) {
+ i.opcode = OpcodeFmax
+ i.v = x
+ i.v2 = y
+ i.typ = x.Type()
+}
+
+// AsF32const initializes this instruction as a 32-bit floating-point constant instruction with OpcodeF32const.
+func (i *Instruction) AsF32const(f float32) *Instruction {
+ i.opcode = OpcodeF32const
+ i.typ = TypeF64
+ i.u1 = uint64(math.Float32bits(f))
+ return i
+}
+
+// AsF64const initializes this instruction as a 64-bit floating-point constant instruction with OpcodeF64const.
+func (i *Instruction) AsF64const(f float64) *Instruction {
+ i.opcode = OpcodeF64const
+ i.typ = TypeF64
+ i.u1 = math.Float64bits(f)
+ return i
+}
+
+// AsVconst initializes this instruction as a vector constant instruction with OpcodeVconst.
+func (i *Instruction) AsVconst(lo, hi uint64) *Instruction {
+ i.opcode = OpcodeVconst
+ i.typ = TypeV128
+ i.u1 = lo
+ i.u2 = hi
+ return i
+}
+
+// AsVbnot initializes this instruction as a vector negation instruction with OpcodeVbnot.
+func (i *Instruction) AsVbnot(v Value) *Instruction {
+ i.opcode = OpcodeVbnot
+ i.typ = TypeV128
+ i.v = v
+ return i
+}
+
+// AsVband initializes this instruction as an and vector instruction with OpcodeVband.
+func (i *Instruction) AsVband(x, y Value) *Instruction {
+ i.opcode = OpcodeVband
+ i.typ = TypeV128
+ i.v = x
+ i.v2 = y
+ return i
+}
+
+// AsVbor initializes this instruction as an or vector instruction with OpcodeVbor.
+func (i *Instruction) AsVbor(x, y Value) *Instruction {
+ i.opcode = OpcodeVbor
+ i.typ = TypeV128
+ i.v = x
+ i.v2 = y
+ return i
+}
+
+// AsVbxor initializes this instruction as a xor vector instruction with OpcodeVbxor.
+func (i *Instruction) AsVbxor(x, y Value) *Instruction {
+ i.opcode = OpcodeVbxor
+ i.typ = TypeV128
+ i.v = x
+ i.v2 = y
+ return i
+}
+
+// AsVbandnot initializes this instruction as an and-not vector instruction with OpcodeVbandnot.
+func (i *Instruction) AsVbandnot(x, y Value) *Instruction {
+ i.opcode = OpcodeVbandnot
+ i.typ = TypeV128
+ i.v = x
+ i.v2 = y
+ return i
+}
+
+// AsVbitselect initializes this instruction as a bit select vector instruction with OpcodeVbitselect.
+func (i *Instruction) AsVbitselect(c, x, y Value) *Instruction {
+ i.opcode = OpcodeVbitselect
+ i.typ = TypeV128
+ i.v = c
+ i.v2 = x
+ i.v3 = y
+ return i
+}
+
+// AsVanyTrue initializes this instruction as an anyTrue vector instruction with OpcodeVanyTrue.
+func (i *Instruction) AsVanyTrue(x Value) *Instruction {
+ i.opcode = OpcodeVanyTrue
+ i.typ = TypeI32
+ i.v = x
+ return i
+}
+
+// AsVallTrue initializes this instruction as an allTrue vector instruction with OpcodeVallTrue.
+func (i *Instruction) AsVallTrue(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVallTrue
+ i.typ = TypeI32
+ i.v = x
+ i.u1 = uint64(lane)
+ return i
+}
+
+// AsVhighBits initializes this instruction as a highBits vector instruction with OpcodeVhighBits.
+func (i *Instruction) AsVhighBits(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeVhighBits
+ i.typ = TypeI32
+ i.v = x
+ i.u1 = uint64(lane)
+ return i
+}
+
+// VconstData returns the operands of this vector constant instruction.
+func (i *Instruction) VconstData() (lo, hi uint64) {
+ return i.u1, i.u2
+}
+
+// AsReturn initializes this instruction as a return instruction with OpcodeReturn.
+func (i *Instruction) AsReturn(vs wazevoapi.VarLength[Value]) *Instruction {
+ i.opcode = OpcodeReturn
+ i.vs = vs
+ return i
+}
+
+// AsIreduce initializes this instruction as a reduction instruction with OpcodeIreduce.
+func (i *Instruction) AsIreduce(v Value, dstType Type) *Instruction {
+ i.opcode = OpcodeIreduce
+ i.v = v
+ i.typ = dstType
+ return i
+}
+
+// AsWiden initializes this instruction as a signed or unsigned widen instruction
+// on low half or high half of the given vector with OpcodeSwidenLow, OpcodeUwidenLow, OpcodeSwidenHigh, OpcodeUwidenHigh.
+func (i *Instruction) AsWiden(v Value, lane VecLane, signed, low bool) *Instruction {
+ switch {
+ case signed && low:
+ i.opcode = OpcodeSwidenLow
+ case !signed && low:
+ i.opcode = OpcodeUwidenLow
+ case signed && !low:
+ i.opcode = OpcodeSwidenHigh
+ case !signed && !low:
+ i.opcode = OpcodeUwidenHigh
+ }
+ i.v = v
+ i.u1 = uint64(lane)
+ return i
+}
+
+// AsAtomicLoad initializes this instruction as an atomic load.
+// The size is in bytes and must be 1, 2, 4, or 8.
+func (i *Instruction) AsAtomicLoad(addr Value, size uint64, typ Type) *Instruction {
+ i.opcode = OpcodeAtomicLoad
+ i.u1 = size
+ i.v = addr
+ i.typ = typ
+ return i
+}
+
+// AsAtomicLoad initializes this instruction as an atomic store.
+// The size is in bytes and must be 1, 2, 4, or 8.
+func (i *Instruction) AsAtomicStore(addr, val Value, size uint64) *Instruction {
+ i.opcode = OpcodeAtomicStore
+ i.u1 = size
+ i.v = addr
+ i.v2 = val
+ i.typ = val.Type()
+ return i
+}
+
+// AsAtomicRmw initializes this instruction as an atomic read-modify-write.
+// The size is in bytes and must be 1, 2, 4, or 8.
+func (i *Instruction) AsAtomicRmw(op AtomicRmwOp, addr, val Value, size uint64) *Instruction {
+ i.opcode = OpcodeAtomicRmw
+ i.u1 = uint64(op)
+ i.u2 = size
+ i.v = addr
+ i.v2 = val
+ i.typ = val.Type()
+ return i
+}
+
+// AsAtomicCas initializes this instruction as an atomic compare-and-swap.
+// The size is in bytes and must be 1, 2, 4, or 8.
+func (i *Instruction) AsAtomicCas(addr, exp, repl Value, size uint64) *Instruction {
+ i.opcode = OpcodeAtomicCas
+ i.u1 = size
+ i.v = addr
+ i.v2 = exp
+ i.v3 = repl
+ i.typ = repl.Type()
+ return i
+}
+
+// AsFence initializes this instruction as a memory fence.
+// A single byte immediate may be used to indicate fence ordering in the future
+// but is currently always 0 and ignored.
+func (i *Instruction) AsFence(order byte) *Instruction {
+ i.opcode = OpcodeFence
+ i.u1 = uint64(order)
+ return i
+}
+
+// AtomicRmwData returns the data for this atomic read-modify-write instruction.
+func (i *Instruction) AtomicRmwData() (op AtomicRmwOp, size uint64) {
+ return AtomicRmwOp(i.u1), i.u2
+}
+
+// AtomicTargetSize returns the target memory size of the atomic instruction.
+func (i *Instruction) AtomicTargetSize() (size uint64) {
+ return i.u1
+}
+
+// ReturnVals returns the return values of OpcodeReturn.
+func (i *Instruction) ReturnVals() []Value {
+ return i.vs.View()
+}
+
+// AsExitWithCode initializes this instruction as a trap instruction with OpcodeExitWithCode.
+func (i *Instruction) AsExitWithCode(ctx Value, code wazevoapi.ExitCode) {
+ i.opcode = OpcodeExitWithCode
+ i.v = ctx
+ i.u1 = uint64(code)
+}
+
+// AsExitIfTrueWithCode initializes this instruction as a trap instruction with OpcodeExitIfTrueWithCode.
+func (i *Instruction) AsExitIfTrueWithCode(ctx, c Value, code wazevoapi.ExitCode) *Instruction {
+ i.opcode = OpcodeExitIfTrueWithCode
+ i.v = ctx
+ i.v2 = c
+ i.u1 = uint64(code)
+ return i
+}
+
+// ExitWithCodeData returns the context and exit code of OpcodeExitWithCode.
+func (i *Instruction) ExitWithCodeData() (ctx Value, code wazevoapi.ExitCode) {
+ return i.v, wazevoapi.ExitCode(i.u1)
+}
+
+// ExitIfTrueWithCodeData returns the context and exit code of OpcodeExitWithCode.
+func (i *Instruction) ExitIfTrueWithCodeData() (ctx, c Value, code wazevoapi.ExitCode) {
+ return i.v, i.v2, wazevoapi.ExitCode(i.u1)
+}
+
+// InvertBrx inverts either OpcodeBrz or OpcodeBrnz to the other.
+func (i *Instruction) InvertBrx() {
+ switch i.opcode {
+ case OpcodeBrz:
+ i.opcode = OpcodeBrnz
+ case OpcodeBrnz:
+ i.opcode = OpcodeBrz
+ default:
+ panic("BUG")
+ }
+}
+
+// BranchData returns the branch data for this instruction necessary for backends.
+func (i *Instruction) BranchData() (condVal Value, blockArgs []Value, target BasicBlock) {
+ switch i.opcode {
+ case OpcodeJump:
+ condVal = ValueInvalid
+ case OpcodeBrz, OpcodeBrnz:
+ condVal = i.v
+ default:
+ panic("BUG")
+ }
+ blockArgs = i.vs.View()
+ target = i.blk
+ return
+}
+
+// BrTableData returns the branch table data for this instruction necessary for backends.
+func (i *Instruction) BrTableData() (index Value, targets []BasicBlock) {
+ if i.opcode != OpcodeBrTable {
+ panic("BUG: BrTableData only available for OpcodeBrTable")
+ }
+ index = i.v
+ targets = i.targets
+ return
+}
+
+// AsJump initializes this instruction as a jump instruction with OpcodeJump.
+func (i *Instruction) AsJump(vs Values, target BasicBlock) *Instruction {
+ i.opcode = OpcodeJump
+ i.vs = vs
+ i.blk = target
+ return i
+}
+
+// IsFallthroughJump returns true if this instruction is a fallthrough jump.
+func (i *Instruction) IsFallthroughJump() bool {
+ if i.opcode != OpcodeJump {
+ panic("BUG: IsFallthrough only available for OpcodeJump")
+ }
+ return i.opcode == OpcodeJump && i.u1 != 0
+}
+
+// AsFallthroughJump marks this instruction as a fallthrough jump.
+func (i *Instruction) AsFallthroughJump() {
+ if i.opcode != OpcodeJump {
+ panic("BUG: AsFallthroughJump only available for OpcodeJump")
+ }
+ i.u1 = 1
+}
+
+// AsBrz initializes this instruction as a branch-if-zero instruction with OpcodeBrz.
+func (i *Instruction) AsBrz(v Value, args Values, target BasicBlock) {
+ i.opcode = OpcodeBrz
+ i.v = v
+ i.vs = args
+ i.blk = target
+}
+
+// AsBrnz initializes this instruction as a branch-if-not-zero instruction with OpcodeBrnz.
+func (i *Instruction) AsBrnz(v Value, args Values, target BasicBlock) *Instruction {
+ i.opcode = OpcodeBrnz
+ i.v = v
+ i.vs = args
+ i.blk = target
+ return i
+}
+
+// AsBrTable initializes this instruction as a branch-table instruction with OpcodeBrTable.
+func (i *Instruction) AsBrTable(index Value, targets []BasicBlock) {
+ i.opcode = OpcodeBrTable
+ i.v = index
+ i.targets = targets
+}
+
+// AsCall initializes this instruction as a call instruction with OpcodeCall.
+func (i *Instruction) AsCall(ref FuncRef, sig *Signature, args Values) {
+ i.opcode = OpcodeCall
+ i.u1 = uint64(ref)
+ i.vs = args
+ i.u2 = uint64(sig.ID)
+ sig.used = true
+}
+
+// CallData returns the call data for this instruction necessary for backends.
+func (i *Instruction) CallData() (ref FuncRef, sigID SignatureID, args []Value) {
+ if i.opcode != OpcodeCall {
+ panic("BUG: CallData only available for OpcodeCall")
+ }
+ ref = FuncRef(i.u1)
+ sigID = SignatureID(i.u2)
+ args = i.vs.View()
+ return
+}
+
+// AsCallIndirect initializes this instruction as a call-indirect instruction with OpcodeCallIndirect.
+func (i *Instruction) AsCallIndirect(funcPtr Value, sig *Signature, args Values) *Instruction {
+ i.opcode = OpcodeCallIndirect
+ i.typ = TypeF64
+ i.vs = args
+ i.v = funcPtr
+ i.u1 = uint64(sig.ID)
+ sig.used = true
+ return i
+}
+
+// AsCallGoRuntimeMemmove is the same as AsCallIndirect, but with a special flag set to indicate that it is a call to the Go runtime memmove function.
+func (i *Instruction) AsCallGoRuntimeMemmove(funcPtr Value, sig *Signature, args Values) *Instruction {
+ i.AsCallIndirect(funcPtr, sig, args)
+ i.u2 = 1
+ return i
+}
+
+// CallIndirectData returns the call indirect data for this instruction necessary for backends.
+func (i *Instruction) CallIndirectData() (funcPtr Value, sigID SignatureID, args []Value, isGoMemmove bool) {
+ if i.opcode != OpcodeCallIndirect {
+ panic("BUG: CallIndirectData only available for OpcodeCallIndirect")
+ }
+ funcPtr = i.v
+ sigID = SignatureID(i.u1)
+ args = i.vs.View()
+ isGoMemmove = i.u2 == 1
+ return
+}
+
+// AsClz initializes this instruction as a Count Leading Zeroes instruction with OpcodeClz.
+func (i *Instruction) AsClz(x Value) {
+ i.opcode = OpcodeClz
+ i.v = x
+ i.typ = x.Type()
+}
+
+// AsCtz initializes this instruction as a Count Trailing Zeroes instruction with OpcodeCtz.
+func (i *Instruction) AsCtz(x Value) {
+ i.opcode = OpcodeCtz
+ i.v = x
+ i.typ = x.Type()
+}
+
+// AsPopcnt initializes this instruction as a Population Count instruction with OpcodePopcnt.
+func (i *Instruction) AsPopcnt(x Value) {
+ i.opcode = OpcodePopcnt
+ i.v = x
+ i.typ = x.Type()
+}
+
+// AsFneg initializes this instruction as an instruction with OpcodeFneg.
+func (i *Instruction) AsFneg(x Value) *Instruction {
+ i.opcode = OpcodeFneg
+ i.v = x
+ i.typ = x.Type()
+ return i
+}
+
+// AsSqrt initializes this instruction as an instruction with OpcodeSqrt.
+func (i *Instruction) AsSqrt(x Value) *Instruction {
+ i.opcode = OpcodeSqrt
+ i.v = x
+ i.typ = x.Type()
+ return i
+}
+
+// AsFabs initializes this instruction as an instruction with OpcodeFabs.
+func (i *Instruction) AsFabs(x Value) *Instruction {
+ i.opcode = OpcodeFabs
+ i.v = x
+ i.typ = x.Type()
+ return i
+}
+
+// AsFcopysign initializes this instruction as an instruction with OpcodeFcopysign.
+func (i *Instruction) AsFcopysign(x, y Value) *Instruction {
+ i.opcode = OpcodeFcopysign
+ i.v = x
+ i.v2 = y
+ i.typ = x.Type()
+ return i
+}
+
+// AsCeil initializes this instruction as an instruction with OpcodeCeil.
+func (i *Instruction) AsCeil(x Value) *Instruction {
+ i.opcode = OpcodeCeil
+ i.v = x
+ i.typ = x.Type()
+ return i
+}
+
+// AsFloor initializes this instruction as an instruction with OpcodeFloor.
+func (i *Instruction) AsFloor(x Value) *Instruction {
+ i.opcode = OpcodeFloor
+ i.v = x
+ i.typ = x.Type()
+ return i
+}
+
+// AsTrunc initializes this instruction as an instruction with OpcodeTrunc.
+func (i *Instruction) AsTrunc(x Value) *Instruction {
+ i.opcode = OpcodeTrunc
+ i.v = x
+ i.typ = x.Type()
+ return i
+}
+
+// AsNearest initializes this instruction as an instruction with OpcodeNearest.
+func (i *Instruction) AsNearest(x Value) *Instruction {
+ i.opcode = OpcodeNearest
+ i.v = x
+ i.typ = x.Type()
+ return i
+}
+
+// AsBitcast initializes this instruction as an instruction with OpcodeBitcast.
+func (i *Instruction) AsBitcast(x Value, dstType Type) *Instruction {
+ i.opcode = OpcodeBitcast
+ i.v = x
+ i.typ = dstType
+ return i
+}
+
+// BitcastData returns the operands for a bitcast instruction.
+func (i *Instruction) BitcastData() (x Value, dstType Type) {
+ return i.v, i.typ
+}
+
+// AsFdemote initializes this instruction as an instruction with OpcodeFdemote.
+func (i *Instruction) AsFdemote(x Value) {
+ i.opcode = OpcodeFdemote
+ i.v = x
+ i.typ = TypeF32
+}
+
+// AsFpromote initializes this instruction as an instruction with OpcodeFpromote.
+func (i *Instruction) AsFpromote(x Value) {
+ i.opcode = OpcodeFpromote
+ i.v = x
+ i.typ = TypeF64
+}
+
+// AsFcvtFromInt initializes this instruction as an instruction with either OpcodeFcvtFromUint or OpcodeFcvtFromSint
+func (i *Instruction) AsFcvtFromInt(x Value, signed bool, dst64bit bool) *Instruction {
+ if signed {
+ i.opcode = OpcodeFcvtFromSint
+ } else {
+ i.opcode = OpcodeFcvtFromUint
+ }
+ i.v = x
+ if dst64bit {
+ i.typ = TypeF64
+ } else {
+ i.typ = TypeF32
+ }
+ return i
+}
+
+// AsFcvtToInt initializes this instruction as an instruction with either OpcodeFcvtToUint or OpcodeFcvtToSint
+func (i *Instruction) AsFcvtToInt(x, ctx Value, signed bool, dst64bit bool, sat bool) *Instruction {
+ switch {
+ case signed && !sat:
+ i.opcode = OpcodeFcvtToSint
+ case !signed && !sat:
+ i.opcode = OpcodeFcvtToUint
+ case signed && sat:
+ i.opcode = OpcodeFcvtToSintSat
+ case !signed && sat:
+ i.opcode = OpcodeFcvtToUintSat
+ }
+ i.v = x
+ i.v2 = ctx
+ if dst64bit {
+ i.typ = TypeI64
+ } else {
+ i.typ = TypeI32
+ }
+ return i
+}
+
+// AsVFcvtToIntSat initializes this instruction as an instruction with either OpcodeVFcvtToSintSat or OpcodeVFcvtToUintSat
+func (i *Instruction) AsVFcvtToIntSat(x Value, lane VecLane, signed bool) *Instruction {
+ if signed {
+ i.opcode = OpcodeVFcvtToSintSat
+ } else {
+ i.opcode = OpcodeVFcvtToUintSat
+ }
+ i.v = x
+ i.u1 = uint64(lane)
+ return i
+}
+
+// AsVFcvtFromInt initializes this instruction as an instruction with either OpcodeVFcvtToSintSat or OpcodeVFcvtToUintSat
+func (i *Instruction) AsVFcvtFromInt(x Value, lane VecLane, signed bool) *Instruction {
+ if signed {
+ i.opcode = OpcodeVFcvtFromSint
+ } else {
+ i.opcode = OpcodeVFcvtFromUint
+ }
+ i.v = x
+ i.u1 = uint64(lane)
+ return i
+}
+
+// AsNarrow initializes this instruction as an instruction with either OpcodeSnarrow or OpcodeUnarrow
+func (i *Instruction) AsNarrow(x, y Value, lane VecLane, signed bool) *Instruction {
+ if signed {
+ i.opcode = OpcodeSnarrow
+ } else {
+ i.opcode = OpcodeUnarrow
+ }
+ i.v = x
+ i.v2 = y
+ i.u1 = uint64(lane)
+ return i
+}
+
+// AsFvpromoteLow initializes this instruction as an instruction with OpcodeFvpromoteLow
+func (i *Instruction) AsFvpromoteLow(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeFvpromoteLow
+ i.v = x
+ i.u1 = uint64(lane)
+ return i
+}
+
+// AsFvdemote initializes this instruction as an instruction with OpcodeFvdemote
+func (i *Instruction) AsFvdemote(x Value, lane VecLane) *Instruction {
+ i.opcode = OpcodeFvdemote
+ i.v = x
+ i.u1 = uint64(lane)
+ return i
+}
+
+// AsSExtend initializes this instruction as a sign extension instruction with OpcodeSExtend.
+func (i *Instruction) AsSExtend(v Value, from, to byte) *Instruction {
+ i.opcode = OpcodeSExtend
+ i.v = v
+ i.u1 = uint64(from)<<8 | uint64(to)
+ if to == 64 {
+ i.typ = TypeI64
+ } else {
+ i.typ = TypeI32
+ }
+ return i
+}
+
+// AsUExtend initializes this instruction as an unsigned extension instruction with OpcodeUExtend.
+func (i *Instruction) AsUExtend(v Value, from, to byte) *Instruction {
+ i.opcode = OpcodeUExtend
+ i.v = v
+ i.u1 = uint64(from)<<8 | uint64(to)
+ if to == 64 {
+ i.typ = TypeI64
+ } else {
+ i.typ = TypeI32
+ }
+ return i
+}
+
+func (i *Instruction) ExtendData() (from, to byte, signed bool) {
+ if i.opcode != OpcodeSExtend && i.opcode != OpcodeUExtend {
+ panic("BUG: ExtendData only available for OpcodeSExtend and OpcodeUExtend")
+ }
+ from = byte(i.u1 >> 8)
+ to = byte(i.u1)
+ signed = i.opcode == OpcodeSExtend
+ return
+}
+
+// AsSelect initializes this instruction as an unsigned extension instruction with OpcodeSelect.
+func (i *Instruction) AsSelect(c, x, y Value) *Instruction {
+ i.opcode = OpcodeSelect
+ i.v = c
+ i.v2 = x
+ i.v3 = y
+ i.typ = x.Type()
+ return i
+}
+
+// SelectData returns the select data for this instruction necessary for backends.
+func (i *Instruction) SelectData() (c, x, y Value) {
+ c = i.v
+ x = i.v2
+ y = i.v3
+ return
+}
+
+// ExtendFromToBits returns the from and to bit size for the extension instruction.
+func (i *Instruction) ExtendFromToBits() (from, to byte) {
+ from = byte(i.u1 >> 8)
+ to = byte(i.u1)
+ return
+}
+
+// Format returns a string representation of this instruction with the given builder.
+// For debugging purposes only.
+func (i *Instruction) Format(b Builder) string {
+ var instSuffix string
+ switch i.opcode {
+ case OpcodeExitWithCode:
+ instSuffix = fmt.Sprintf(" %s, %s", i.v.Format(b), wazevoapi.ExitCode(i.u1))
+ case OpcodeExitIfTrueWithCode:
+ instSuffix = fmt.Sprintf(" %s, %s, %s", i.v2.Format(b), i.v.Format(b), wazevoapi.ExitCode(i.u1))
+ case OpcodeIadd, OpcodeIsub, OpcodeImul, OpcodeFadd, OpcodeFsub, OpcodeFmin, OpcodeFmax, OpcodeFdiv, OpcodeFmul:
+ instSuffix = fmt.Sprintf(" %s, %s", i.v.Format(b), i.v2.Format(b))
+ case OpcodeIcmp:
+ instSuffix = fmt.Sprintf(" %s, %s, %s", IntegerCmpCond(i.u1), i.v.Format(b), i.v2.Format(b))
+ case OpcodeFcmp:
+ instSuffix = fmt.Sprintf(" %s, %s, %s", FloatCmpCond(i.u1), i.v.Format(b), i.v2.Format(b))
+ case OpcodeSExtend, OpcodeUExtend:
+ instSuffix = fmt.Sprintf(" %s, %d->%d", i.v.Format(b), i.u1>>8, i.u1&0xff)
+ case OpcodeCall, OpcodeCallIndirect:
+ view := i.vs.View()
+ vs := make([]string, len(view))
+ for idx := range vs {
+ vs[idx] = view[idx].Format(b)
+ }
+ if i.opcode == OpcodeCallIndirect {
+ instSuffix = fmt.Sprintf(" %s:%s, %s", i.v.Format(b), SignatureID(i.u1), strings.Join(vs, ", "))
+ } else {
+ instSuffix = fmt.Sprintf(" %s:%s, %s", FuncRef(i.u1), SignatureID(i.u2), strings.Join(vs, ", "))
+ }
+ case OpcodeStore, OpcodeIstore8, OpcodeIstore16, OpcodeIstore32:
+ instSuffix = fmt.Sprintf(" %s, %s, %#x", i.v.Format(b), i.v2.Format(b), uint32(i.u1))
+ case OpcodeLoad, OpcodeVZeroExtLoad:
+ instSuffix = fmt.Sprintf(" %s, %#x", i.v.Format(b), int32(i.u1))
+ case OpcodeLoadSplat:
+ instSuffix = fmt.Sprintf(".%s %s, %#x", VecLane(i.u2), i.v.Format(b), int32(i.u1))
+ case OpcodeUload8, OpcodeUload16, OpcodeUload32, OpcodeSload8, OpcodeSload16, OpcodeSload32:
+ instSuffix = fmt.Sprintf(" %s, %#x", i.v.Format(b), int32(i.u1))
+ case OpcodeSelect, OpcodeVbitselect:
+ instSuffix = fmt.Sprintf(" %s, %s, %s", i.v.Format(b), i.v2.Format(b), i.v3.Format(b))
+ case OpcodeIconst:
+ switch i.typ {
+ case TypeI32:
+ instSuffix = fmt.Sprintf("_32 %#x", uint32(i.u1))
+ case TypeI64:
+ instSuffix = fmt.Sprintf("_64 %#x", i.u1)
+ }
+ case OpcodeVconst:
+ instSuffix = fmt.Sprintf(" %016x %016x", i.u1, i.u2)
+ case OpcodeF32const:
+ instSuffix = fmt.Sprintf(" %f", math.Float32frombits(uint32(i.u1)))
+ case OpcodeF64const:
+ instSuffix = fmt.Sprintf(" %f", math.Float64frombits(i.u1))
+ case OpcodeReturn:
+ view := i.vs.View()
+ if len(view) == 0 {
+ break
+ }
+ vs := make([]string, len(view))
+ for idx := range vs {
+ vs[idx] = view[idx].Format(b)
+ }
+ instSuffix = fmt.Sprintf(" %s", strings.Join(vs, ", "))
+ case OpcodeJump:
+ view := i.vs.View()
+ vs := make([]string, len(view)+1)
+ if i.IsFallthroughJump() {
+ vs[0] = " fallthrough"
+ } else {
+ vs[0] = " " + i.blk.(*basicBlock).Name()
+ }
+ for idx := range view {
+ vs[idx+1] = view[idx].Format(b)
+ }
+
+ instSuffix = strings.Join(vs, ", ")
+ case OpcodeBrz, OpcodeBrnz:
+ view := i.vs.View()
+ vs := make([]string, len(view)+2)
+ vs[0] = " " + i.v.Format(b)
+ vs[1] = i.blk.(*basicBlock).Name()
+ for idx := range view {
+ vs[idx+2] = view[idx].Format(b)
+ }
+ instSuffix = strings.Join(vs, ", ")
+ case OpcodeBrTable:
+ // `BrTable index, [label1, label2, ... labelN]`
+ instSuffix = fmt.Sprintf(" %s", i.v.Format(b))
+ instSuffix += ", ["
+ for i, target := range i.targets {
+ blk := target.(*basicBlock)
+ if i == 0 {
+ instSuffix += blk.Name()
+ } else {
+ instSuffix += ", " + blk.Name()
+ }
+ }
+ instSuffix += "]"
+ case OpcodeBand, OpcodeBor, OpcodeBxor, OpcodeRotr, OpcodeRotl, OpcodeIshl, OpcodeSshr, OpcodeUshr,
+ OpcodeSdiv, OpcodeUdiv, OpcodeFcopysign, OpcodeSrem, OpcodeUrem,
+ OpcodeVbnot, OpcodeVbxor, OpcodeVbor, OpcodeVband, OpcodeVbandnot, OpcodeVIcmp, OpcodeVFcmp:
+ instSuffix = fmt.Sprintf(" %s, %s", i.v.Format(b), i.v2.Format(b))
+ case OpcodeUndefined:
+ case OpcodeClz, OpcodeCtz, OpcodePopcnt, OpcodeFneg, OpcodeFcvtToSint, OpcodeFcvtToUint, OpcodeFcvtFromSint,
+ OpcodeFcvtFromUint, OpcodeFcvtToSintSat, OpcodeFcvtToUintSat, OpcodeFdemote, OpcodeFpromote, OpcodeIreduce, OpcodeBitcast, OpcodeSqrt, OpcodeFabs,
+ OpcodeCeil, OpcodeFloor, OpcodeTrunc, OpcodeNearest:
+ instSuffix = " " + i.v.Format(b)
+ case OpcodeVIadd, OpcodeExtIaddPairwise, OpcodeVSaddSat, OpcodeVUaddSat, OpcodeVIsub, OpcodeVSsubSat, OpcodeVUsubSat,
+ OpcodeVImin, OpcodeVUmin, OpcodeVImax, OpcodeVUmax, OpcodeVImul, OpcodeVAvgRound,
+ OpcodeVFadd, OpcodeVFsub, OpcodeVFmul, OpcodeVFdiv,
+ OpcodeVIshl, OpcodeVSshr, OpcodeVUshr,
+ OpcodeVFmin, OpcodeVFmax, OpcodeVMinPseudo, OpcodeVMaxPseudo,
+ OpcodeSnarrow, OpcodeUnarrow, OpcodeSwizzle, OpcodeSqmulRoundSat:
+ instSuffix = fmt.Sprintf(".%s %s, %s", VecLane(i.u1), i.v.Format(b), i.v2.Format(b))
+ case OpcodeVIabs, OpcodeVIneg, OpcodeVIpopcnt, OpcodeVhighBits, OpcodeVallTrue, OpcodeVanyTrue,
+ OpcodeVFabs, OpcodeVFneg, OpcodeVSqrt, OpcodeVCeil, OpcodeVFloor, OpcodeVTrunc, OpcodeVNearest,
+ OpcodeVFcvtToUintSat, OpcodeVFcvtToSintSat, OpcodeVFcvtFromUint, OpcodeVFcvtFromSint,
+ OpcodeFvpromoteLow, OpcodeFvdemote, OpcodeSwidenLow, OpcodeUwidenLow, OpcodeSwidenHigh, OpcodeUwidenHigh,
+ OpcodeSplat:
+ instSuffix = fmt.Sprintf(".%s %s", VecLane(i.u1), i.v.Format(b))
+ case OpcodeExtractlane:
+ var signedness string
+ if i.u1 != 0 {
+ signedness = "signed"
+ } else {
+ signedness = "unsigned"
+ }
+ instSuffix = fmt.Sprintf(".%s %d, %s (%s)", VecLane(i.u2), 0x0000FFFF&i.u1, i.v.Format(b), signedness)
+ case OpcodeInsertlane:
+ instSuffix = fmt.Sprintf(".%s %d, %s, %s", VecLane(i.u2), i.u1, i.v.Format(b), i.v2.Format(b))
+ case OpcodeShuffle:
+ lanes := make([]byte, 16)
+ for idx := 0; idx < 8; idx++ {
+ lanes[idx] = byte(i.u1 >> (8 * idx))
+ }
+ for idx := 0; idx < 8; idx++ {
+ lanes[idx+8] = byte(i.u2 >> (8 * idx))
+ }
+ // Prints Shuffle.[0 1 2 3 4 5 6 7 ...] v2, v3
+ instSuffix = fmt.Sprintf(".%v %s, %s", lanes, i.v.Format(b), i.v2.Format(b))
+ case OpcodeAtomicRmw:
+ instSuffix = fmt.Sprintf(" %s_%d, %s, %s", AtomicRmwOp(i.u1), 8*i.u2, i.v.Format(b), i.v2.Format(b))
+ case OpcodeAtomicLoad:
+ instSuffix = fmt.Sprintf("_%d, %s", 8*i.u1, i.v.Format(b))
+ case OpcodeAtomicStore:
+ instSuffix = fmt.Sprintf("_%d, %s, %s", 8*i.u1, i.v.Format(b), i.v2.Format(b))
+ case OpcodeAtomicCas:
+ instSuffix = fmt.Sprintf("_%d, %s, %s, %s", 8*i.u1, i.v.Format(b), i.v2.Format(b), i.v3.Format(b))
+ case OpcodeFence:
+ instSuffix = fmt.Sprintf(" %d", i.u1)
+ case OpcodeWideningPairwiseDotProductS:
+ instSuffix = fmt.Sprintf(" %s, %s", i.v.Format(b), i.v2.Format(b))
+ default:
+ panic(fmt.Sprintf("TODO: format for %s", i.opcode))
+ }
+
+ instr := i.opcode.String() + instSuffix
+
+ var rvs []string
+ if rv := i.rValue; rv.Valid() {
+ rvs = append(rvs, rv.formatWithType(b))
+ }
+
+ for _, v := range i.rValues.View() {
+ rvs = append(rvs, v.formatWithType(b))
+ }
+
+ if len(rvs) > 0 {
+ return fmt.Sprintf("%s = %s", strings.Join(rvs, ", "), instr)
+ } else {
+ return instr
+ }
+}
+
+// addArgumentBranchInst adds an argument to this instruction.
+func (i *Instruction) addArgumentBranchInst(b *builder, v Value) {
+ switch i.opcode {
+ case OpcodeJump, OpcodeBrz, OpcodeBrnz:
+ i.vs = i.vs.Append(&b.varLengthPool, v)
+ default:
+ panic("BUG: " + i.opcode.String())
+ }
+}
+
+// Constant returns true if this instruction is a constant instruction.
+func (i *Instruction) Constant() bool {
+ switch i.opcode {
+ case OpcodeIconst, OpcodeF32const, OpcodeF64const:
+ return true
+ }
+ return false
+}
+
+// ConstantVal returns the constant value of this instruction.
+// How to interpret the return value depends on the opcode.
+func (i *Instruction) ConstantVal() (ret uint64) {
+ switch i.opcode {
+ case OpcodeIconst, OpcodeF32const, OpcodeF64const:
+ ret = i.u1
+ default:
+ panic("TODO")
+ }
+ return
+}
+
+// String implements fmt.Stringer.
+func (o Opcode) String() (ret string) {
+ switch o {
+ case OpcodeInvalid:
+ return "invalid"
+ case OpcodeUndefined:
+ return "Undefined"
+ case OpcodeJump:
+ return "Jump"
+ case OpcodeBrz:
+ return "Brz"
+ case OpcodeBrnz:
+ return "Brnz"
+ case OpcodeBrTable:
+ return "BrTable"
+ case OpcodeExitWithCode:
+ return "Exit"
+ case OpcodeExitIfTrueWithCode:
+ return "ExitIfTrue"
+ case OpcodeReturn:
+ return "Return"
+ case OpcodeCall:
+ return "Call"
+ case OpcodeCallIndirect:
+ return "CallIndirect"
+ case OpcodeSplat:
+ return "Splat"
+ case OpcodeSwizzle:
+ return "Swizzle"
+ case OpcodeInsertlane:
+ return "Insertlane"
+ case OpcodeExtractlane:
+ return "Extractlane"
+ case OpcodeLoad:
+ return "Load"
+ case OpcodeLoadSplat:
+ return "LoadSplat"
+ case OpcodeStore:
+ return "Store"
+ case OpcodeUload8:
+ return "Uload8"
+ case OpcodeSload8:
+ return "Sload8"
+ case OpcodeIstore8:
+ return "Istore8"
+ case OpcodeUload16:
+ return "Uload16"
+ case OpcodeSload16:
+ return "Sload16"
+ case OpcodeIstore16:
+ return "Istore16"
+ case OpcodeUload32:
+ return "Uload32"
+ case OpcodeSload32:
+ return "Sload32"
+ case OpcodeIstore32:
+ return "Istore32"
+ case OpcodeIconst:
+ return "Iconst"
+ case OpcodeF32const:
+ return "F32const"
+ case OpcodeF64const:
+ return "F64const"
+ case OpcodeVconst:
+ return "Vconst"
+ case OpcodeShuffle:
+ return "Shuffle"
+ case OpcodeSelect:
+ return "Select"
+ case OpcodeVanyTrue:
+ return "VanyTrue"
+ case OpcodeVallTrue:
+ return "VallTrue"
+ case OpcodeVhighBits:
+ return "VhighBits"
+ case OpcodeIcmp:
+ return "Icmp"
+ case OpcodeIcmpImm:
+ return "IcmpImm"
+ case OpcodeVIcmp:
+ return "VIcmp"
+ case OpcodeIadd:
+ return "Iadd"
+ case OpcodeIsub:
+ return "Isub"
+ case OpcodeImul:
+ return "Imul"
+ case OpcodeUdiv:
+ return "Udiv"
+ case OpcodeSdiv:
+ return "Sdiv"
+ case OpcodeUrem:
+ return "Urem"
+ case OpcodeSrem:
+ return "Srem"
+ case OpcodeBand:
+ return "Band"
+ case OpcodeBor:
+ return "Bor"
+ case OpcodeBxor:
+ return "Bxor"
+ case OpcodeBnot:
+ return "Bnot"
+ case OpcodeRotl:
+ return "Rotl"
+ case OpcodeRotr:
+ return "Rotr"
+ case OpcodeIshl:
+ return "Ishl"
+ case OpcodeUshr:
+ return "Ushr"
+ case OpcodeSshr:
+ return "Sshr"
+ case OpcodeClz:
+ return "Clz"
+ case OpcodeCtz:
+ return "Ctz"
+ case OpcodePopcnt:
+ return "Popcnt"
+ case OpcodeFcmp:
+ return "Fcmp"
+ case OpcodeFadd:
+ return "Fadd"
+ case OpcodeFsub:
+ return "Fsub"
+ case OpcodeFmul:
+ return "Fmul"
+ case OpcodeFdiv:
+ return "Fdiv"
+ case OpcodeSqmulRoundSat:
+ return "SqmulRoundSat"
+ case OpcodeSqrt:
+ return "Sqrt"
+ case OpcodeFneg:
+ return "Fneg"
+ case OpcodeFabs:
+ return "Fabs"
+ case OpcodeFcopysign:
+ return "Fcopysign"
+ case OpcodeFmin:
+ return "Fmin"
+ case OpcodeFmax:
+ return "Fmax"
+ case OpcodeCeil:
+ return "Ceil"
+ case OpcodeFloor:
+ return "Floor"
+ case OpcodeTrunc:
+ return "Trunc"
+ case OpcodeNearest:
+ return "Nearest"
+ case OpcodeBitcast:
+ return "Bitcast"
+ case OpcodeIreduce:
+ return "Ireduce"
+ case OpcodeSnarrow:
+ return "Snarrow"
+ case OpcodeUnarrow:
+ return "Unarrow"
+ case OpcodeSwidenLow:
+ return "SwidenLow"
+ case OpcodeSwidenHigh:
+ return "SwidenHigh"
+ case OpcodeUwidenLow:
+ return "UwidenLow"
+ case OpcodeUwidenHigh:
+ return "UwidenHigh"
+ case OpcodeExtIaddPairwise:
+ return "IaddPairwise"
+ case OpcodeWideningPairwiseDotProductS:
+ return "WideningPairwiseDotProductS"
+ case OpcodeUExtend:
+ return "UExtend"
+ case OpcodeSExtend:
+ return "SExtend"
+ case OpcodeFpromote:
+ return "Fpromote"
+ case OpcodeFdemote:
+ return "Fdemote"
+ case OpcodeFvdemote:
+ return "Fvdemote"
+ case OpcodeFcvtToUint:
+ return "FcvtToUint"
+ case OpcodeFcvtToSint:
+ return "FcvtToSint"
+ case OpcodeFcvtToUintSat:
+ return "FcvtToUintSat"
+ case OpcodeFcvtToSintSat:
+ return "FcvtToSintSat"
+ case OpcodeFcvtFromUint:
+ return "FcvtFromUint"
+ case OpcodeFcvtFromSint:
+ return "FcvtFromSint"
+ case OpcodeAtomicRmw:
+ return "AtomicRmw"
+ case OpcodeAtomicCas:
+ return "AtomicCas"
+ case OpcodeAtomicLoad:
+ return "AtomicLoad"
+ case OpcodeAtomicStore:
+ return "AtomicStore"
+ case OpcodeFence:
+ return "Fence"
+ case OpcodeVbor:
+ return "Vbor"
+ case OpcodeVbxor:
+ return "Vbxor"
+ case OpcodeVband:
+ return "Vband"
+ case OpcodeVbandnot:
+ return "Vbandnot"
+ case OpcodeVbnot:
+ return "Vbnot"
+ case OpcodeVbitselect:
+ return "Vbitselect"
+ case OpcodeVIadd:
+ return "VIadd"
+ case OpcodeVSaddSat:
+ return "VSaddSat"
+ case OpcodeVUaddSat:
+ return "VUaddSat"
+ case OpcodeVSsubSat:
+ return "VSsubSat"
+ case OpcodeVUsubSat:
+ return "VUsubSat"
+ case OpcodeVAvgRound:
+ return "OpcodeVAvgRound"
+ case OpcodeVIsub:
+ return "VIsub"
+ case OpcodeVImin:
+ return "VImin"
+ case OpcodeVUmin:
+ return "VUmin"
+ case OpcodeVImax:
+ return "VImax"
+ case OpcodeVUmax:
+ return "VUmax"
+ case OpcodeVImul:
+ return "VImul"
+ case OpcodeVIabs:
+ return "VIabs"
+ case OpcodeVIneg:
+ return "VIneg"
+ case OpcodeVIpopcnt:
+ return "VIpopcnt"
+ case OpcodeVIshl:
+ return "VIshl"
+ case OpcodeVUshr:
+ return "VUshr"
+ case OpcodeVSshr:
+ return "VSshr"
+ case OpcodeVFabs:
+ return "VFabs"
+ case OpcodeVFmax:
+ return "VFmax"
+ case OpcodeVFmin:
+ return "VFmin"
+ case OpcodeVFneg:
+ return "VFneg"
+ case OpcodeVFadd:
+ return "VFadd"
+ case OpcodeVFsub:
+ return "VFsub"
+ case OpcodeVFmul:
+ return "VFmul"
+ case OpcodeVFdiv:
+ return "VFdiv"
+ case OpcodeVFcmp:
+ return "VFcmp"
+ case OpcodeVCeil:
+ return "VCeil"
+ case OpcodeVFloor:
+ return "VFloor"
+ case OpcodeVTrunc:
+ return "VTrunc"
+ case OpcodeVNearest:
+ return "VNearest"
+ case OpcodeVMaxPseudo:
+ return "VMaxPseudo"
+ case OpcodeVMinPseudo:
+ return "VMinPseudo"
+ case OpcodeVSqrt:
+ return "VSqrt"
+ case OpcodeVFcvtToUintSat:
+ return "VFcvtToUintSat"
+ case OpcodeVFcvtToSintSat:
+ return "VFcvtToSintSat"
+ case OpcodeVFcvtFromUint:
+ return "VFcvtFromUint"
+ case OpcodeVFcvtFromSint:
+ return "VFcvtFromSint"
+ case OpcodeFvpromoteLow:
+ return "FvpromoteLow"
+ case OpcodeVZeroExtLoad:
+ return "VZeroExtLoad"
+ }
+ panic(fmt.Sprintf("unknown opcode %d", o))
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass.go
new file mode 100644
index 000000000..a2e986cd1
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass.go
@@ -0,0 +1,417 @@
+package ssa
+
+import (
+ "fmt"
+
+ "github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi"
+)
+
+// RunPasses implements Builder.RunPasses.
+//
+// The order here matters; some pass depends on the previous ones.
+//
+// Note that passes suffixed with "Opt" are the optimization passes, meaning that they edit the instructions and blocks
+// while the other passes are not, like passEstimateBranchProbabilities does not edit them, but only calculates the additional information.
+func (b *builder) RunPasses() {
+ b.runPreBlockLayoutPasses()
+ b.runBlockLayoutPass()
+ b.runPostBlockLayoutPasses()
+ b.runFinalizingPasses()
+}
+
+func (b *builder) runPreBlockLayoutPasses() {
+ passSortSuccessors(b)
+ passDeadBlockEliminationOpt(b)
+ passRedundantPhiEliminationOpt(b)
+ // The result of passCalculateImmediateDominators will be used by various passes below.
+ passCalculateImmediateDominators(b)
+ passNopInstElimination(b)
+
+ // TODO: implement either conversion of irreducible CFG into reducible one, or irreducible CFG detection where we panic.
+ // WebAssembly program shouldn't result in irreducible CFG, but we should handle it properly in just in case.
+ // See FixIrreducible pass in LLVM: https://llvm.org/doxygen/FixIrreducible_8cpp_source.html
+
+ // TODO: implement more optimization passes like:
+ // block coalescing.
+ // Copy-propagation.
+ // Constant folding.
+ // Common subexpression elimination.
+ // Arithmetic simplifications.
+ // and more!
+
+ // passDeadCodeEliminationOpt could be more accurate if we do this after other optimizations.
+ passDeadCodeEliminationOpt(b)
+ b.donePreBlockLayoutPasses = true
+}
+
+func (b *builder) runBlockLayoutPass() {
+ if !b.donePreBlockLayoutPasses {
+ panic("runBlockLayoutPass must be called after all pre passes are done")
+ }
+ passLayoutBlocks(b)
+ b.doneBlockLayout = true
+}
+
+// runPostBlockLayoutPasses runs the post block layout passes. After this point, CFG is somewhat stable,
+// but still can be modified before finalizing passes. At this point, critical edges are split by passLayoutBlocks.
+func (b *builder) runPostBlockLayoutPasses() {
+ if !b.doneBlockLayout {
+ panic("runPostBlockLayoutPasses must be called after block layout pass is done")
+ }
+ // TODO: Do more. e.g. tail duplication, loop unrolling, etc.
+
+ b.donePostBlockLayoutPasses = true
+}
+
+// runFinalizingPasses runs the finalizing passes. After this point, CFG should not be modified.
+func (b *builder) runFinalizingPasses() {
+ if !b.donePostBlockLayoutPasses {
+ panic("runFinalizingPasses must be called after post block layout passes are done")
+ }
+ // Critical edges are split, so we fix the loop nesting forest.
+ passBuildLoopNestingForest(b)
+ passBuildDominatorTree(b)
+ // Now that we know the final placement of the blocks, we can explicitly mark the fallthrough jumps.
+ b.markFallthroughJumps()
+}
+
+// passDeadBlockEliminationOpt searches the unreachable blocks, and sets the basicBlock.invalid flag true if so.
+func passDeadBlockEliminationOpt(b *builder) {
+ entryBlk := b.entryBlk()
+ b.clearBlkVisited()
+ b.blkStack = append(b.blkStack, entryBlk)
+ for len(b.blkStack) > 0 {
+ reachableBlk := b.blkStack[len(b.blkStack)-1]
+ b.blkStack = b.blkStack[:len(b.blkStack)-1]
+ b.blkVisited[reachableBlk] = 0 // the value won't be used in this pass.
+
+ if !reachableBlk.sealed && !reachableBlk.ReturnBlock() {
+ panic(fmt.Sprintf("%s is not sealed", reachableBlk))
+ }
+
+ if wazevoapi.SSAValidationEnabled {
+ reachableBlk.validate(b)
+ }
+
+ for _, succ := range reachableBlk.success {
+ if _, ok := b.blkVisited[succ]; ok {
+ continue
+ }
+ b.blkStack = append(b.blkStack, succ)
+ }
+ }
+
+ for blk := b.blockIteratorBegin(); blk != nil; blk = b.blockIteratorNext() {
+ if _, ok := b.blkVisited[blk]; !ok {
+ blk.invalid = true
+ }
+ }
+}
+
+// passRedundantPhiEliminationOpt eliminates the redundant PHIs (in our terminology, parameters of a block).
+func passRedundantPhiEliminationOpt(b *builder) {
+ redundantParameterIndexes := b.ints[:0] // reuse the slice from previous iterations.
+
+ // TODO: this might be costly for large programs, but at least, as far as I did the experiment, it's almost the
+ // same as the single iteration version in terms of the overall compilation time. That *might be* mostly thanks to the fact
+ // that removing many PHIs results in the reduction of the total instructions, not because of this indefinite iteration is
+ // relatively small. For example, sqlite speedtest binary results in the large number of redundant PHIs,
+ // the maximum number of iteration was 22, which seems to be acceptable but not that small either since the
+ // complexity here is O(BlockNum * Iterations) at the worst case where BlockNum might be the order of thousands.
+ for {
+ changed := false
+ _ = b.blockIteratorBegin() // skip entry block!
+ // Below, we intentionally use the named iteration variable name, as this comes with inevitable nested for loops!
+ for blk := b.blockIteratorNext(); blk != nil; blk = b.blockIteratorNext() {
+ paramNum := len(blk.params)
+
+ for paramIndex := 0; paramIndex < paramNum; paramIndex++ {
+ phiValue := blk.params[paramIndex].value
+ redundant := true
+
+ nonSelfReferencingValue := ValueInvalid
+ for predIndex := range blk.preds {
+ br := blk.preds[predIndex].branch
+ // Resolve the alias in the arguments so that we could use the previous iteration's result.
+ b.resolveArgumentAlias(br)
+ pred := br.vs.View()[paramIndex]
+ if pred == phiValue {
+ // This is self-referencing: PHI from the same PHI.
+ continue
+ }
+
+ if !nonSelfReferencingValue.Valid() {
+ nonSelfReferencingValue = pred
+ continue
+ }
+
+ if nonSelfReferencingValue != pred {
+ redundant = false
+ break
+ }
+ }
+
+ if !nonSelfReferencingValue.Valid() {
+ // This shouldn't happen, and must be a bug in builder.go.
+ panic("BUG: params added but only self-referencing")
+ }
+
+ if redundant {
+ b.redundantParameterIndexToValue[paramIndex] = nonSelfReferencingValue
+ redundantParameterIndexes = append(redundantParameterIndexes, paramIndex)
+ }
+ }
+
+ if len(b.redundantParameterIndexToValue) == 0 {
+ continue
+ }
+ changed = true
+
+ // Remove the redundant PHIs from the argument list of branching instructions.
+ for predIndex := range blk.preds {
+ var cur int
+ predBlk := blk.preds[predIndex]
+ branchInst := predBlk.branch
+ view := branchInst.vs.View()
+ for argIndex, value := range view {
+ if _, ok := b.redundantParameterIndexToValue[argIndex]; !ok {
+ view[cur] = value
+ cur++
+ }
+ }
+ branchInst.vs.Cut(cur)
+ }
+
+ // Still need to have the definition of the value of the PHI (previously as the parameter).
+ for _, redundantParamIndex := range redundantParameterIndexes {
+ phiValue := blk.params[redundantParamIndex].value
+ onlyValue := b.redundantParameterIndexToValue[redundantParamIndex]
+ // Create an alias in this block from the only phi argument to the phi value.
+ b.alias(phiValue, onlyValue)
+ }
+
+ // Finally, Remove the param from the blk.
+ var cur int
+ for paramIndex := 0; paramIndex < paramNum; paramIndex++ {
+ param := blk.params[paramIndex]
+ if _, ok := b.redundantParameterIndexToValue[paramIndex]; !ok {
+ blk.params[cur] = param
+ cur++
+ }
+ }
+ blk.params = blk.params[:cur]
+
+ // Clears the map for the next iteration.
+ for _, paramIndex := range redundantParameterIndexes {
+ delete(b.redundantParameterIndexToValue, paramIndex)
+ }
+ redundantParameterIndexes = redundantParameterIndexes[:0]
+ }
+
+ if !changed {
+ break
+ }
+ }
+
+ // Reuse the slice for the future passes.
+ b.ints = redundantParameterIndexes
+}
+
+// passDeadCodeEliminationOpt traverses all the instructions, and calculates the reference count of each Value, and
+// eliminates all the unnecessary instructions whose ref count is zero.
+// The results are stored at builder.valueRefCounts. This also assigns a InstructionGroupID to each Instruction
+// during the process. This is the last SSA-level optimization pass and after this,
+// the SSA function is ready to be used by backends.
+//
+// TODO: the algorithm here might not be efficient. Get back to this later.
+func passDeadCodeEliminationOpt(b *builder) {
+ nvid := int(b.nextValueID)
+ if nvid >= len(b.valueRefCounts) {
+ b.valueRefCounts = append(b.valueRefCounts, make([]int, b.nextValueID)...)
+ }
+ if nvid >= len(b.valueIDToInstruction) {
+ b.valueIDToInstruction = append(b.valueIDToInstruction, make([]*Instruction, b.nextValueID)...)
+ }
+
+ // First, we gather all the instructions with side effects.
+ liveInstructions := b.instStack[:0]
+ // During the process, we will assign InstructionGroupID to each instruction, which is not
+ // relevant to dead code elimination, but we need in the backend.
+ var gid InstructionGroupID
+ for blk := b.blockIteratorBegin(); blk != nil; blk = b.blockIteratorNext() {
+ for cur := blk.rootInstr; cur != nil; cur = cur.next {
+ cur.gid = gid
+ switch cur.sideEffect() {
+ case sideEffectTraps:
+ // The trappable should always be alive.
+ liveInstructions = append(liveInstructions, cur)
+ case sideEffectStrict:
+ liveInstructions = append(liveInstructions, cur)
+ // The strict side effect should create different instruction groups.
+ gid++
+ }
+
+ r1, rs := cur.Returns()
+ if r1.Valid() {
+ b.valueIDToInstruction[r1.ID()] = cur
+ }
+ for _, r := range rs {
+ b.valueIDToInstruction[r.ID()] = cur
+ }
+ }
+ }
+
+ // Find all the instructions referenced by live instructions transitively.
+ for len(liveInstructions) > 0 {
+ tail := len(liveInstructions) - 1
+ live := liveInstructions[tail]
+ liveInstructions = liveInstructions[:tail]
+ if live.live {
+ // If it's already marked alive, this is referenced multiple times,
+ // so we can skip it.
+ continue
+ }
+ live.live = true
+
+ // Before we walk, we need to resolve the alias first.
+ b.resolveArgumentAlias(live)
+
+ v1, v2, v3, vs := live.Args()
+ if v1.Valid() {
+ producingInst := b.valueIDToInstruction[v1.ID()]
+ if producingInst != nil {
+ liveInstructions = append(liveInstructions, producingInst)
+ }
+ }
+
+ if v2.Valid() {
+ producingInst := b.valueIDToInstruction[v2.ID()]
+ if producingInst != nil {
+ liveInstructions = append(liveInstructions, producingInst)
+ }
+ }
+
+ if v3.Valid() {
+ producingInst := b.valueIDToInstruction[v3.ID()]
+ if producingInst != nil {
+ liveInstructions = append(liveInstructions, producingInst)
+ }
+ }
+
+ for _, v := range vs {
+ producingInst := b.valueIDToInstruction[v.ID()]
+ if producingInst != nil {
+ liveInstructions = append(liveInstructions, producingInst)
+ }
+ }
+ }
+
+ // Now that all the live instructions are flagged as live=true, we eliminate all dead instructions.
+ for blk := b.blockIteratorBegin(); blk != nil; blk = b.blockIteratorNext() {
+ for cur := blk.rootInstr; cur != nil; cur = cur.next {
+ if !cur.live {
+ // Remove the instruction from the list.
+ if prev := cur.prev; prev != nil {
+ prev.next = cur.next
+ } else {
+ blk.rootInstr = cur.next
+ }
+ if next := cur.next; next != nil {
+ next.prev = cur.prev
+ }
+ continue
+ }
+
+ // If the value alive, we can be sure that arguments are used definitely.
+ // Hence, we can increment the value reference counts.
+ v1, v2, v3, vs := cur.Args()
+ if v1.Valid() {
+ b.incRefCount(v1.ID(), cur)
+ }
+ if v2.Valid() {
+ b.incRefCount(v2.ID(), cur)
+ }
+ if v3.Valid() {
+ b.incRefCount(v3.ID(), cur)
+ }
+ for _, v := range vs {
+ b.incRefCount(v.ID(), cur)
+ }
+ }
+ }
+
+ b.instStack = liveInstructions // we reuse the stack for the next iteration.
+}
+
+func (b *builder) incRefCount(id ValueID, from *Instruction) {
+ if wazevoapi.SSALoggingEnabled {
+ fmt.Printf("v%d referenced from %v\n", id, from.Format(b))
+ }
+ b.valueRefCounts[id]++
+}
+
+// clearBlkVisited clears the b.blkVisited map so that we can reuse it for multiple places.
+func (b *builder) clearBlkVisited() {
+ b.blkStack2 = b.blkStack2[:0]
+ for key := range b.blkVisited {
+ b.blkStack2 = append(b.blkStack2, key)
+ }
+ for _, blk := range b.blkStack2 {
+ delete(b.blkVisited, blk)
+ }
+ b.blkStack2 = b.blkStack2[:0]
+}
+
+// passNopInstElimination eliminates the instructions which is essentially a no-op.
+func passNopInstElimination(b *builder) {
+ if int(b.nextValueID) >= len(b.valueIDToInstruction) {
+ b.valueIDToInstruction = append(b.valueIDToInstruction, make([]*Instruction, b.nextValueID)...)
+ }
+
+ for blk := b.blockIteratorBegin(); blk != nil; blk = b.blockIteratorNext() {
+ for cur := blk.rootInstr; cur != nil; cur = cur.next {
+ r1, rs := cur.Returns()
+ if r1.Valid() {
+ b.valueIDToInstruction[r1.ID()] = cur
+ }
+ for _, r := range rs {
+ b.valueIDToInstruction[r.ID()] = cur
+ }
+ }
+ }
+
+ for blk := b.blockIteratorBegin(); blk != nil; blk = b.blockIteratorNext() {
+ for cur := blk.rootInstr; cur != nil; cur = cur.next {
+ switch cur.Opcode() {
+ // TODO: add more logics here.
+ case OpcodeIshl, OpcodeSshr, OpcodeUshr:
+ x, amount := cur.Arg2()
+ definingInst := b.valueIDToInstruction[amount.ID()]
+ if definingInst == nil {
+ // If there's no defining instruction, that means the amount is coming from the parameter.
+ continue
+ }
+ if definingInst.Constant() {
+ v := definingInst.ConstantVal()
+
+ if x.Type().Bits() == 64 {
+ v = v % 64
+ } else {
+ v = v % 32
+ }
+ if v == 0 {
+ b.alias(cur.Return(), x)
+ }
+ }
+ }
+ }
+ }
+}
+
+// passSortSuccessors sorts the successors of each block in the natural program order.
+func passSortSuccessors(b *builder) {
+ for i := 0; i < b.basicBlocksPool.Allocated(); i++ {
+ blk := b.basicBlocksPool.View(i)
+ sortBlocks(blk.success)
+ }
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass_blk_layouts.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass_blk_layouts.go
new file mode 100644
index 000000000..9068180a0
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass_blk_layouts.go
@@ -0,0 +1,335 @@
+package ssa
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi"
+)
+
+// passLayoutBlocks implements Builder.LayoutBlocks. This re-organizes builder.reversePostOrderedBasicBlocks.
+//
+// TODO: there are tons of room for improvement here. e.g. LLVM has BlockPlacementPass using BlockFrequencyInfo,
+// BranchProbabilityInfo, and LoopInfo to do a much better job. Also, if we have the profiling instrumentation
+// like ball-larus algorithm, then we could do profile-guided optimization. Basically all of them are trying
+// to maximize the fall-through opportunities which is most efficient.
+//
+// Here, fallthrough happens when a block ends with jump instruction whose target is the right next block in the
+// builder.reversePostOrderedBasicBlocks.
+//
+// Currently, we just place blocks using the DFS reverse post-order of the dominator tree with the heuristics:
+// 1. a split edge trampoline towards a loop header will be placed as a fallthrough.
+// 2. we invert the brz and brnz if it makes the fallthrough more likely.
+//
+// This heuristic is done in maybeInvertBranches function.
+func passLayoutBlocks(b *builder) {
+ b.clearBlkVisited()
+
+ // We might end up splitting critical edges which adds more basic blocks,
+ // so we store the currently existing basic blocks in nonSplitBlocks temporarily.
+ // That way we can iterate over the original basic blocks while appending new ones into reversePostOrderedBasicBlocks.
+ nonSplitBlocks := b.blkStack[:0]
+ for i, blk := range b.reversePostOrderedBasicBlocks {
+ if !blk.Valid() {
+ continue
+ }
+ nonSplitBlocks = append(nonSplitBlocks, blk)
+ if i != len(b.reversePostOrderedBasicBlocks)-1 {
+ _ = maybeInvertBranches(blk, b.reversePostOrderedBasicBlocks[i+1])
+ }
+ }
+
+ var trampolines []*basicBlock
+
+ // Reset the order slice since we update on the fly by splitting critical edges.
+ b.reversePostOrderedBasicBlocks = b.reversePostOrderedBasicBlocks[:0]
+ uninsertedTrampolines := b.blkStack2[:0]
+ for _, blk := range nonSplitBlocks {
+ for i := range blk.preds {
+ pred := blk.preds[i].blk
+ if _, ok := b.blkVisited[pred]; ok || !pred.Valid() {
+ continue
+ } else if pred.reversePostOrder < blk.reversePostOrder {
+ // This means the edge is critical, and this pred is the trampoline and yet to be inserted.
+ // Split edge trampolines must come before the destination in reverse post-order.
+ b.reversePostOrderedBasicBlocks = append(b.reversePostOrderedBasicBlocks, pred)
+ b.blkVisited[pred] = 0 // mark as inserted, the value is not used.
+ }
+ }
+
+ // Now that we've already added all the potential trampoline blocks incoming to this block,
+ // we can add this block itself.
+ b.reversePostOrderedBasicBlocks = append(b.reversePostOrderedBasicBlocks, blk)
+ b.blkVisited[blk] = 0 // mark as inserted, the value is not used.
+
+ if len(blk.success) < 2 {
+ // There won't be critical edge originating from this block.
+ continue
+ } else if blk.currentInstr.opcode == OpcodeBrTable {
+ // We don't split critical edges here, because at the construction site of BrTable, we already split the edges.
+ continue
+ }
+
+ for sidx, succ := range blk.success {
+ if !succ.ReturnBlock() && // If the successor is a return block, we need to split the edge any way because we need "epilogue" to be inserted.
+ // Plus if there's no multiple incoming edges to this successor, (pred, succ) is not critical.
+ len(succ.preds) < 2 {
+ continue
+ }
+
+ // Otherwise, we are sure this is a critical edge. To modify the CFG, we need to find the predecessor info
+ // from the successor.
+ var predInfo *basicBlockPredecessorInfo
+ for i := range succ.preds { // This linear search should not be a problem since the number of predecessors should almost always small.
+ pred := &succ.preds[i]
+ if pred.blk == blk {
+ predInfo = pred
+ break
+ }
+ }
+
+ if predInfo == nil {
+ // This must be a bug in somewhere around branch manipulation.
+ panic("BUG: predecessor info not found while the successor exists in successors list")
+ }
+
+ if wazevoapi.SSALoggingEnabled {
+ fmt.Printf("trying to split edge from %d->%d at %s\n",
+ blk.ID(), succ.ID(), predInfo.branch.Format(b))
+ }
+
+ trampoline := b.splitCriticalEdge(blk, succ, predInfo)
+ // Update the successors slice because the target is no longer the original `succ`.
+ blk.success[sidx] = trampoline
+
+ if wazevoapi.SSAValidationEnabled {
+ trampolines = append(trampolines, trampoline)
+ }
+
+ if wazevoapi.SSALoggingEnabled {
+ fmt.Printf("edge split from %d->%d at %s as %d->%d->%d \n",
+ blk.ID(), succ.ID(), predInfo.branch.Format(b),
+ blk.ID(), trampoline.ID(), succ.ID())
+ }
+
+ fallthroughBranch := blk.currentInstr
+ if fallthroughBranch.opcode == OpcodeJump && fallthroughBranch.blk == trampoline {
+ // This can be lowered as fallthrough at the end of the block.
+ b.reversePostOrderedBasicBlocks = append(b.reversePostOrderedBasicBlocks, trampoline)
+ b.blkVisited[trampoline] = 0 // mark as inserted, the value is not used.
+ } else {
+ uninsertedTrampolines = append(uninsertedTrampolines, trampoline)
+ }
+ }
+
+ for _, trampoline := range uninsertedTrampolines {
+ if trampoline.success[0].reversePostOrder <= trampoline.reversePostOrder { // "<=", not "<" because the target might be itself.
+ // This means the critical edge was backward, so we insert after the current block immediately.
+ b.reversePostOrderedBasicBlocks = append(b.reversePostOrderedBasicBlocks, trampoline)
+ b.blkVisited[trampoline] = 0 // mark as inserted, the value is not used.
+ } // If the target is forward, we can wait to insert until the target is inserted.
+ }
+ uninsertedTrampolines = uninsertedTrampolines[:0] // Reuse the stack for the next block.
+ }
+
+ if wazevoapi.SSALoggingEnabled {
+ var bs []string
+ for _, blk := range b.reversePostOrderedBasicBlocks {
+ bs = append(bs, blk.Name())
+ }
+ fmt.Println("ordered blocks: ", strings.Join(bs, ", "))
+ }
+
+ if wazevoapi.SSAValidationEnabled {
+ for _, trampoline := range trampolines {
+ if _, ok := b.blkVisited[trampoline]; !ok {
+ panic("BUG: trampoline block not inserted: " + trampoline.FormatHeader(b))
+ }
+ trampoline.validate(b)
+ }
+ }
+
+ // Reuse the stack for the next iteration.
+ b.blkStack2 = uninsertedTrampolines[:0]
+}
+
+// markFallthroughJumps finds the fallthrough jumps and marks them as such.
+func (b *builder) markFallthroughJumps() {
+ l := len(b.reversePostOrderedBasicBlocks) - 1
+ for i, blk := range b.reversePostOrderedBasicBlocks {
+ if i < l {
+ cur := blk.currentInstr
+ if cur.opcode == OpcodeJump && cur.blk == b.reversePostOrderedBasicBlocks[i+1] {
+ cur.AsFallthroughJump()
+ }
+ }
+ }
+}
+
+// maybeInvertBranches inverts the branch instructions if it is likely possible to the fallthrough more likely with simple heuristics.
+// nextInRPO is the next block in the reverse post-order.
+//
+// Returns true if the branch is inverted for testing purpose.
+func maybeInvertBranches(now *basicBlock, nextInRPO *basicBlock) bool {
+ fallthroughBranch := now.currentInstr
+ if fallthroughBranch.opcode == OpcodeBrTable {
+ return false
+ }
+
+ condBranch := fallthroughBranch.prev
+ if condBranch == nil || (condBranch.opcode != OpcodeBrnz && condBranch.opcode != OpcodeBrz) {
+ return false
+ }
+
+ if len(fallthroughBranch.vs.View()) != 0 || len(condBranch.vs.View()) != 0 {
+ // If either one of them has arguments, we don't invert the branches.
+ return false
+ }
+
+ // So this block has two branches (a conditional branch followed by an unconditional branch) at the end.
+ // We can invert the condition of the branch if it makes the fallthrough more likely.
+
+ fallthroughTarget, condTarget := fallthroughBranch.blk.(*basicBlock), condBranch.blk.(*basicBlock)
+
+ if fallthroughTarget.loopHeader {
+ // First, if the tail's target is loopHeader, we don't need to do anything here,
+ // because the edge is likely to be critical edge for complex loops (e.g. loop with branches inside it).
+ // That means, we will split the edge in the end of LayoutBlocks function, and insert the trampoline block
+ // right after this block, which will be fallthrough in any way.
+ return false
+ } else if condTarget.loopHeader {
+ // On the other hand, if the condBranch's target is loopHeader, we invert the condition of the branch
+ // so that we could get the fallthrough to the trampoline block.
+ goto invert
+ }
+
+ if fallthroughTarget == nextInRPO {
+ // Also, if the tail's target is the next block in the reverse post-order, we don't need to do anything here,
+ // because if this is not critical edge, we would end up placing these two blocks adjacent to each other.
+ // Even if it is the critical edge, we place the trampoline block right after this block, which will be fallthrough in any way.
+ return false
+ } else if condTarget == nextInRPO {
+ // If the condBranch's target is the next block in the reverse post-order, we invert the condition of the branch
+ // so that we could get the fallthrough to the block.
+ goto invert
+ } else {
+ return false
+ }
+
+invert:
+ for i := range fallthroughTarget.preds {
+ pred := &fallthroughTarget.preds[i]
+ if pred.branch == fallthroughBranch {
+ pred.branch = condBranch
+ break
+ }
+ }
+ for i := range condTarget.preds {
+ pred := &condTarget.preds[i]
+ if pred.branch == condBranch {
+ pred.branch = fallthroughBranch
+ break
+ }
+ }
+
+ condBranch.InvertBrx()
+ condBranch.blk = fallthroughTarget
+ fallthroughBranch.blk = condTarget
+ if wazevoapi.SSALoggingEnabled {
+ fmt.Printf("inverting branches at %d->%d and %d->%d\n",
+ now.ID(), fallthroughTarget.ID(), now.ID(), condTarget.ID())
+ }
+
+ return true
+}
+
+// splitCriticalEdge splits the critical edge between the given predecessor (`pred`) and successor (owning `predInfo`).
+//
+// - `pred` is the source of the critical edge,
+// - `succ` is the destination of the critical edge,
+// - `predInfo` is the predecessor info in the succ.preds slice which represents the critical edge.
+//
+// Why splitting critical edges is important? See following links:
+//
+// - https://en.wikipedia.org/wiki/Control-flow_graph
+// - https://nickdesaulniers.github.io/blog/2023/01/27/critical-edge-splitting/
+//
+// The returned basic block is the trampoline block which is inserted to split the critical edge.
+func (b *builder) splitCriticalEdge(pred, succ *basicBlock, predInfo *basicBlockPredecessorInfo) *basicBlock {
+ // In the following, we convert the following CFG:
+ //
+ // pred --(originalBranch)--> succ
+ //
+ // to the following CFG:
+ //
+ // pred --(newBranch)--> trampoline --(originalBranch)-> succ
+ //
+ // where trampoline is a new basic block which is created to split the critical edge.
+
+ trampoline := b.allocateBasicBlock()
+ if int(trampoline.id) >= len(b.dominators) {
+ b.dominators = append(b.dominators, make([]*basicBlock, trampoline.id+1)...)
+ }
+ b.dominators[trampoline.id] = pred
+
+ originalBranch := predInfo.branch
+
+ // Replace originalBranch with the newBranch.
+ newBranch := b.AllocateInstruction()
+ newBranch.opcode = originalBranch.opcode
+ newBranch.blk = trampoline
+ switch originalBranch.opcode {
+ case OpcodeJump:
+ case OpcodeBrz, OpcodeBrnz:
+ originalBranch.opcode = OpcodeJump // Trampoline consists of one unconditional branch.
+ newBranch.v = originalBranch.v
+ originalBranch.v = ValueInvalid
+ default:
+ panic("BUG: critical edge shouldn't be originated from br_table")
+ }
+ swapInstruction(pred, originalBranch, newBranch)
+
+ // Replace the original branch with the new branch.
+ trampoline.rootInstr = originalBranch
+ trampoline.currentInstr = originalBranch
+ trampoline.success = append(trampoline.success, succ) // Do not use []*basicBlock{pred} because we might have already allocated the slice.
+ trampoline.preds = append(trampoline.preds, // same as ^.
+ basicBlockPredecessorInfo{blk: pred, branch: newBranch})
+ b.Seal(trampoline)
+
+ // Update the original branch to point to the trampoline.
+ predInfo.blk = trampoline
+ predInfo.branch = originalBranch
+
+ if wazevoapi.SSAValidationEnabled {
+ trampoline.validate(b)
+ }
+
+ if len(trampoline.params) > 0 {
+ panic("trampoline should not have params")
+ }
+
+ // Assign the same order as the original block so that this will be placed before the actual destination.
+ trampoline.reversePostOrder = pred.reversePostOrder
+ return trampoline
+}
+
+// swapInstruction replaces `old` in the block `blk` with `New`.
+func swapInstruction(blk *basicBlock, old, New *Instruction) {
+ if blk.rootInstr == old {
+ blk.rootInstr = New
+ next := old.next
+ New.next = next
+ next.prev = New
+ } else {
+ if blk.currentInstr == old {
+ blk.currentInstr = New
+ }
+ prev := old.prev
+ prev.next, New.prev = New, prev
+ if next := old.next; next != nil {
+ New.next, next.prev = next, New
+ }
+ }
+ old.prev, old.next = nil, nil
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass_cfg.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass_cfg.go
new file mode 100644
index 000000000..50cb9c475
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/pass_cfg.go
@@ -0,0 +1,312 @@
+package ssa
+
+import (
+ "fmt"
+ "math"
+ "strings"
+
+ "github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi"
+)
+
+// passCalculateImmediateDominators calculates immediate dominators for each basic block.
+// The result is stored in b.dominators. This make it possible for the following passes to
+// use builder.isDominatedBy to check if a block is dominated by another block.
+//
+// At the last of pass, this function also does the loop detection and sets the basicBlock.loop flag.
+func passCalculateImmediateDominators(b *builder) {
+ reversePostOrder := b.reversePostOrderedBasicBlocks[:0]
+ exploreStack := b.blkStack[:0]
+ b.clearBlkVisited()
+
+ entryBlk := b.entryBlk()
+
+ // Store the reverse postorder from the entrypoint into reversePostOrder slice.
+ // This calculation of reverse postorder is not described in the paper,
+ // so we use heuristic to calculate it so that we could potentially handle arbitrary
+ // complex CFGs under the assumption that success is sorted in program's natural order.
+ // That means blk.success[i] always appears before blk.success[i+1] in the source program,
+ // which is a reasonable assumption as long as SSA Builder is properly used.
+ //
+ // First we push blocks in postorder iteratively visit successors of the entry block.
+ exploreStack = append(exploreStack, entryBlk)
+ const visitStateUnseen, visitStateSeen, visitStateDone = 0, 1, 2
+ b.blkVisited[entryBlk] = visitStateSeen
+ for len(exploreStack) > 0 {
+ tail := len(exploreStack) - 1
+ blk := exploreStack[tail]
+ exploreStack = exploreStack[:tail]
+ switch b.blkVisited[blk] {
+ case visitStateUnseen:
+ // This is likely a bug in the frontend.
+ panic("BUG: unsupported CFG")
+ case visitStateSeen:
+ // This is the first time to pop this block, and we have to see the successors first.
+ // So push this block again to the stack.
+ exploreStack = append(exploreStack, blk)
+ // And push the successors to the stack if necessary.
+ for _, succ := range blk.success {
+ if succ.ReturnBlock() || succ.invalid {
+ continue
+ }
+ if b.blkVisited[succ] == visitStateUnseen {
+ b.blkVisited[succ] = visitStateSeen
+ exploreStack = append(exploreStack, succ)
+ }
+ }
+ // Finally, we could pop this block once we pop all of its successors.
+ b.blkVisited[blk] = visitStateDone
+ case visitStateDone:
+ // Note: at this point we push blk in postorder despite its name.
+ reversePostOrder = append(reversePostOrder, blk)
+ }
+ }
+ // At this point, reversePostOrder has postorder actually, so we reverse it.
+ for i := len(reversePostOrder)/2 - 1; i >= 0; i-- {
+ j := len(reversePostOrder) - 1 - i
+ reversePostOrder[i], reversePostOrder[j] = reversePostOrder[j], reversePostOrder[i]
+ }
+
+ for i, blk := range reversePostOrder {
+ blk.reversePostOrder = i
+ }
+
+ // Reuse the dominators slice if possible from the previous computation of function.
+ b.dominators = b.dominators[:cap(b.dominators)]
+ if len(b.dominators) < b.basicBlocksPool.Allocated() {
+ // Generously reserve space in the slice because the slice will be reused future allocation.
+ b.dominators = append(b.dominators, make([]*basicBlock, b.basicBlocksPool.Allocated())...)
+ }
+ calculateDominators(reversePostOrder, b.dominators)
+
+ // Reuse the slices for the future use.
+ b.blkStack = exploreStack
+
+ // For the following passes.
+ b.reversePostOrderedBasicBlocks = reversePostOrder
+
+ // Ready to detect loops!
+ subPassLoopDetection(b)
+}
+
+// calculateDominators calculates the immediate dominator of each node in the CFG, and store the result in `doms`.
+// The algorithm is based on the one described in the paper "A Simple, Fast Dominance Algorithm"
+// https://www.cs.rice.edu/~keith/EMBED/dom.pdf which is a faster/simple alternative to the well known Lengauer-Tarjan algorithm.
+//
+// The following code almost matches the pseudocode in the paper with one exception (see the code comment below).
+//
+// The result slice `doms` must be pre-allocated with the size larger than the size of dfsBlocks.
+func calculateDominators(reversePostOrderedBlks []*basicBlock, doms []*basicBlock) {
+ entry, reversePostOrderedBlks := reversePostOrderedBlks[0], reversePostOrderedBlks[1: /* skips entry point */]
+ for _, blk := range reversePostOrderedBlks {
+ doms[blk.id] = nil
+ }
+ doms[entry.id] = entry
+
+ changed := true
+ for changed {
+ changed = false
+ for _, blk := range reversePostOrderedBlks {
+ var u *basicBlock
+ for i := range blk.preds {
+ pred := blk.preds[i].blk
+ // Skip if this pred is not reachable yet. Note that this is not described in the paper,
+ // but it is necessary to handle nested loops etc.
+ if doms[pred.id] == nil {
+ continue
+ }
+
+ if u == nil {
+ u = pred
+ continue
+ } else {
+ u = intersect(doms, u, pred)
+ }
+ }
+ if doms[blk.id] != u {
+ doms[blk.id] = u
+ changed = true
+ }
+ }
+ }
+}
+
+// intersect returns the common dominator of blk1 and blk2.
+//
+// This is the `intersect` function in the paper.
+func intersect(doms []*basicBlock, blk1 *basicBlock, blk2 *basicBlock) *basicBlock {
+ finger1, finger2 := blk1, blk2
+ for finger1 != finger2 {
+ // Move the 'finger1' upwards to its immediate dominator.
+ for finger1.reversePostOrder > finger2.reversePostOrder {
+ finger1 = doms[finger1.id]
+ }
+ // Move the 'finger2' upwards to its immediate dominator.
+ for finger2.reversePostOrder > finger1.reversePostOrder {
+ finger2 = doms[finger2.id]
+ }
+ }
+ return finger1
+}
+
+// subPassLoopDetection detects loops in the function using the immediate dominators.
+//
+// This is run at the last of passCalculateImmediateDominators.
+func subPassLoopDetection(b *builder) {
+ for blk := b.blockIteratorBegin(); blk != nil; blk = b.blockIteratorNext() {
+ for i := range blk.preds {
+ pred := blk.preds[i].blk
+ if pred.invalid {
+ continue
+ }
+ if b.isDominatedBy(pred, blk) {
+ blk.loopHeader = true
+ }
+ }
+ }
+}
+
+// buildLoopNestingForest builds the loop nesting forest for the function.
+// This must be called after branch splitting since it relies on the CFG.
+func passBuildLoopNestingForest(b *builder) {
+ ent := b.entryBlk()
+ doms := b.dominators
+ for _, blk := range b.reversePostOrderedBasicBlocks {
+ n := doms[blk.id]
+ for !n.loopHeader && n != ent {
+ n = doms[n.id]
+ }
+
+ if n == ent && blk.loopHeader {
+ b.loopNestingForestRoots = append(b.loopNestingForestRoots, blk)
+ } else if n == ent {
+ } else if n.loopHeader {
+ n.loopNestingForestChildren = append(n.loopNestingForestChildren, blk)
+ }
+ }
+
+ if wazevoapi.SSALoggingEnabled {
+ for _, root := range b.loopNestingForestRoots {
+ printLoopNestingForest(root.(*basicBlock), 0)
+ }
+ }
+}
+
+func printLoopNestingForest(root *basicBlock, depth int) {
+ fmt.Println(strings.Repeat("\t", depth), "loop nesting forest root:", root.ID())
+ for _, child := range root.loopNestingForestChildren {
+ fmt.Println(strings.Repeat("\t", depth+1), "child:", child.ID())
+ if child.LoopHeader() {
+ printLoopNestingForest(child.(*basicBlock), depth+2)
+ }
+ }
+}
+
+type dominatorSparseTree struct {
+ time int
+ euler []*basicBlock
+ first, depth []int
+ table [][]int
+}
+
+// passBuildDominatorTree builds the dominator tree for the function, and constructs builder.sparseTree.
+func passBuildDominatorTree(b *builder) {
+ // First we materialize the children of each node in the dominator tree.
+ idoms := b.dominators
+ for _, blk := range b.reversePostOrderedBasicBlocks {
+ parent := idoms[blk.id]
+ if parent == nil {
+ panic("BUG")
+ } else if parent == blk {
+ // This is the entry block.
+ continue
+ }
+ if prev := parent.child; prev == nil {
+ parent.child = blk
+ } else {
+ parent.child = blk
+ blk.sibling = prev
+ }
+ }
+
+ // Reset the state from the previous computation.
+ n := b.basicBlocksPool.Allocated()
+ st := &b.sparseTree
+ st.euler = append(st.euler[:0], make([]*basicBlock, 2*n-1)...)
+ st.first = append(st.first[:0], make([]int, n)...)
+ for i := range st.first {
+ st.first[i] = -1
+ }
+ st.depth = append(st.depth[:0], make([]int, 2*n-1)...)
+ st.time = 0
+
+ // Start building the sparse tree.
+ st.eulerTour(b.entryBlk(), 0)
+ st.buildSparseTable()
+}
+
+func (dt *dominatorSparseTree) eulerTour(node *basicBlock, height int) {
+ if wazevoapi.SSALoggingEnabled {
+ fmt.Println(strings.Repeat("\t", height), "euler tour:", node.ID())
+ }
+ dt.euler[dt.time] = node
+ dt.depth[dt.time] = height
+ if dt.first[node.id] == -1 {
+ dt.first[node.id] = dt.time
+ }
+ dt.time++
+
+ for child := node.child; child != nil; child = child.sibling {
+ dt.eulerTour(child, height+1)
+ dt.euler[dt.time] = node // add the current node again after visiting a child
+ dt.depth[dt.time] = height
+ dt.time++
+ }
+}
+
+// buildSparseTable builds a sparse table for RMQ queries.
+func (dt *dominatorSparseTree) buildSparseTable() {
+ n := len(dt.depth)
+ k := int(math.Log2(float64(n))) + 1
+ table := dt.table
+
+ if n >= len(table) {
+ table = append(table, make([][]int, n+1)...)
+ }
+ for i := range table {
+ if len(table[i]) < k {
+ table[i] = append(table[i], make([]int, k)...)
+ }
+ table[i][0] = i
+ }
+
+ for j := 1; 1<<j <= n; j++ {
+ for i := 0; i+(1<<j)-1 < n; i++ {
+ if dt.depth[table[i][j-1]] < dt.depth[table[i+(1<<(j-1))][j-1]] {
+ table[i][j] = table[i][j-1]
+ } else {
+ table[i][j] = table[i+(1<<(j-1))][j-1]
+ }
+ }
+ }
+ dt.table = table
+}
+
+// rmq performs a range minimum query on the sparse table.
+func (dt *dominatorSparseTree) rmq(l, r int) int {
+ table := dt.table
+ depth := dt.depth
+ j := int(math.Log2(float64(r - l + 1)))
+ if depth[table[l][j]] <= depth[table[r-(1<<j)+1][j]] {
+ return table[l][j]
+ }
+ return table[r-(1<<j)+1][j]
+}
+
+// findLCA finds the LCA using the Euler tour and RMQ.
+func (dt *dominatorSparseTree) findLCA(u, v BasicBlockID) *basicBlock {
+ first := dt.first
+ if first[u] > first[v] {
+ u, v = v, u
+ }
+ return dt.euler[dt.rmq(first[u], first[v])]
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/signature.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/signature.go
new file mode 100644
index 000000000..43483395a
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/signature.go
@@ -0,0 +1,49 @@
+package ssa
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Signature is a function prototype.
+type Signature struct {
+ // ID is a unique identifier for this signature used to lookup.
+ ID SignatureID
+ // Params and Results are the types of the parameters and results of the function.
+ Params, Results []Type
+
+ // used is true if this is used by the currently-compiled function.
+ // Debugging only.
+ used bool
+}
+
+// String implements fmt.Stringer.
+func (s *Signature) String() string {
+ str := strings.Builder{}
+ str.WriteString(s.ID.String())
+ str.WriteString(": ")
+ if len(s.Params) > 0 {
+ for _, typ := range s.Params {
+ str.WriteString(typ.String())
+ }
+ } else {
+ str.WriteByte('v')
+ }
+ str.WriteByte('_')
+ if len(s.Results) > 0 {
+ for _, typ := range s.Results {
+ str.WriteString(typ.String())
+ }
+ } else {
+ str.WriteByte('v')
+ }
+ return str.String()
+}
+
+// SignatureID is an unique identifier used to lookup.
+type SignatureID int
+
+// String implements fmt.Stringer.
+func (s SignatureID) String() string {
+ return fmt.Sprintf("sig%d", s)
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/ssa.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/ssa.go
new file mode 100644
index 000000000..b477e58bd
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/ssa.go
@@ -0,0 +1,14 @@
+// Package ssa is used to construct SSA function. By nature this is free of Wasm specific thing
+// and ISA.
+//
+// We use the "block argument" variant of SSA: https://en.wikipedia.org/wiki/Static_single-assignment_form#Block_arguments
+// which is equivalent to the traditional PHI function based one, but more convenient during optimizations.
+// However, in this package's source code comment, we might use PHI whenever it seems necessary in order to be aligned with
+// existing literatures, e.g. SSA level optimization algorithms are often described using PHI nodes.
+//
+// The rationale doc for the choice of "block argument" by MLIR of LLVM is worth a read:
+// https://mlir.llvm.org/docs/Rationale/Rationale/#block-arguments-vs-phi-nodes
+//
+// The algorithm to resolve variable definitions used here is based on the paper
+// "Simple and Efficient Construction of Static Single Assignment Form": https://link.springer.com/content/pdf/10.1007/978-3-642-37051-9_6.pdf.
+package ssa
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/type.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/type.go
new file mode 100644
index 000000000..e8c8cd9de
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/type.go
@@ -0,0 +1,112 @@
+package ssa
+
+type Type byte
+
+const (
+ typeInvalid Type = iota
+
+ // TODO: add 8, 16 bit types when it's needed for optimizations.
+
+ // TypeI32 represents an integer type with 32 bits.
+ TypeI32
+
+ // TypeI64 represents an integer type with 64 bits.
+ TypeI64
+
+ // TypeF32 represents 32-bit floats in the IEEE 754.
+ TypeF32
+
+ // TypeF64 represents 64-bit floats in the IEEE 754.
+ TypeF64
+
+ // TypeV128 represents 128-bit SIMD vectors.
+ TypeV128
+)
+
+// String implements fmt.Stringer.
+func (t Type) String() (ret string) {
+ switch t {
+ case typeInvalid:
+ return "invalid"
+ case TypeI32:
+ return "i32"
+ case TypeI64:
+ return "i64"
+ case TypeF32:
+ return "f32"
+ case TypeF64:
+ return "f64"
+ case TypeV128:
+ return "v128"
+ default:
+ panic(int(t))
+ }
+}
+
+// IsInt returns true if the type is an integer type.
+func (t Type) IsInt() bool {
+ return t == TypeI32 || t == TypeI64
+}
+
+// IsFloat returns true if the type is a floating point type.
+func (t Type) IsFloat() bool {
+ return t == TypeF32 || t == TypeF64
+}
+
+// Bits returns the number of bits required to represent the type.
+func (t Type) Bits() byte {
+ switch t {
+ case TypeI32, TypeF32:
+ return 32
+ case TypeI64, TypeF64:
+ return 64
+ case TypeV128:
+ return 128
+ default:
+ panic(int(t))
+ }
+}
+
+// Size returns the number of bytes required to represent the type.
+func (t Type) Size() byte {
+ return t.Bits() / 8
+}
+
+func (t Type) invalid() bool {
+ return t == typeInvalid
+}
+
+// VecLane represents a lane in a SIMD vector.
+type VecLane byte
+
+const (
+ VecLaneInvalid VecLane = 1 + iota
+ VecLaneI8x16
+ VecLaneI16x8
+ VecLaneI32x4
+ VecLaneI64x2
+ VecLaneF32x4
+ VecLaneF64x2
+)
+
+// String implements fmt.Stringer.
+func (vl VecLane) String() (ret string) {
+ switch vl {
+ case VecLaneInvalid:
+ return "invalid"
+ case VecLaneI8x16:
+ return "i8x16"
+ case VecLaneI16x8:
+ return "i16x8"
+ case VecLaneI32x4:
+ return "i32x4"
+ case VecLaneI64x2:
+ return "i64x2"
+ case VecLaneF32x4:
+ return "f32x4"
+ case VecLaneF64x2:
+ return "f64x2"
+ default:
+ panic(int(vl))
+ }
+}
diff --git a/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/vs.go b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/vs.go
new file mode 100644
index 000000000..bcf83cbf8
--- /dev/null
+++ b/vendor/github.com/tetratelabs/wazero/internal/engine/wazevo/ssa/vs.go
@@ -0,0 +1,87 @@
+package ssa
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi"
+)
+
+// Variable is a unique identifier for a source program's variable and will correspond to
+// multiple ssa Value(s).
+//
+// For example, `Local 1` is a Variable in WebAssembly, and Value(s) will be created for it
+// whenever it executes `local.set 1`.
+//
+// Variable is useful to track the SSA Values of a variable in the source program, and
+// can be used to find the corresponding latest SSA Value via Builder.FindValue.
+type Variable uint32
+
+// String implements fmt.Stringer.
+func (v Variable) String() string {
+ return fmt.Sprintf("var%d", v)
+}
+
+// Value represents an SSA value with a type information. The relationship with Variable is 1: N (including 0),
+// that means there might be multiple Variable(s) for a Value.
+//
+// Higher 32-bit is used to store Type for this value.
+type Value uint64
+
+// ValueID is the lower 32bit of Value, which is the pure identifier of Value without type info.
+type ValueID uint32
+
+const (
+ valueIDInvalid ValueID = math.MaxUint32
+ ValueInvalid Value = Value(valueIDInvalid)
+)
+
+// Format creates a debug string for this Value using the data stored in Builder.
+func (v Value) Format(b Builder) string {
+ if annotation, ok := b.(*builder).valueAnnotations[v.ID()]; ok {
+ return annotation
+ }
+ return fmt.Sprintf("v%d", v.ID())
+}
+
+func (v Value) formatWithType(b Builder) (ret string) {
+ if annotation, ok := b.(*builder).valueAnnotations[v.ID()]; ok {
+ ret = annotation + ":" + v.Type().String()
+ } else {
+ ret = fmt.Sprintf("v%d:%s", v.ID(), v.Type())
+ }
+
+ if wazevoapi.SSALoggingEnabled { // This is useful to check live value analysis bugs.
+ if bd := b.(*builder); bd.donePostBlockLayoutPasses {
+ id := v.ID()
+ ret += fmt.Sprintf("(ref=%d)", bd.valueRefCounts[id])
+ }
+ }
+ return ret
+}
+
+// Valid returns true if this value is valid.
+func (v Value) Valid() bool {
+ return v.ID() != valueIDInvalid
+}
+
+// Type returns the Type of this value.
+func (v Value) Type() Type {
+ return Type(v >> 32)
+}
+
+// ID returns the valueID of this value.
+func (v Value) ID() ValueID {
+ return ValueID(v)
+}
+
+// setType sets a type to this Value and returns the updated Value.
+func (v Value) setType(typ Type) Value {
+ return v | Value(typ)<<32
+}
+
+// Values is a slice of Value. Use this instead of []Value to reuse the underlying memory.
+type Values = wazevoapi.VarLength[Value]
+
+// ValuesNil is a nil Values.
+var ValuesNil = wazevoapi.NewNilVarLength[Value]()