1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
|
package wazevo
import (
"context"
"fmt"
"reflect"
"runtime"
"sync/atomic"
"unsafe"
"github.com/tetratelabs/wazero/api"
"github.com/tetratelabs/wazero/experimental"
"github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi"
"github.com/tetratelabs/wazero/internal/expctxkeys"
"github.com/tetratelabs/wazero/internal/internalapi"
"github.com/tetratelabs/wazero/internal/wasm"
"github.com/tetratelabs/wazero/internal/wasmdebug"
"github.com/tetratelabs/wazero/internal/wasmruntime"
)
type (
// callEngine implements api.Function.
callEngine struct {
internalapi.WazeroOnly
stack []byte
// stackTop is the pointer to the *aligned* top of the stack. This must be updated
// whenever the stack is changed. This is passed to the assembly function
// at the very beginning of api.Function Call/CallWithStack.
stackTop uintptr
// executable is the pointer to the executable code for this function.
executable *byte
preambleExecutable *byte
// parent is the *moduleEngine from which this callEngine is created.
parent *moduleEngine
// indexInModule is the index of the function in the module.
indexInModule wasm.Index
// sizeOfParamResultSlice is the size of the parameter/result slice.
sizeOfParamResultSlice int
requiredParams int
// execCtx holds various information to be read/written by assembly functions.
execCtx executionContext
// execCtxPtr holds the pointer to the executionContext which doesn't change after callEngine is created.
execCtxPtr uintptr
numberOfResults int
stackIteratorImpl stackIterator
}
// executionContext is the struct to be read/written by assembly functions.
executionContext struct {
// exitCode holds the wazevoapi.ExitCode describing the state of the function execution.
exitCode wazevoapi.ExitCode
// callerModuleContextPtr holds the moduleContextOpaque for Go function calls.
callerModuleContextPtr *byte
// originalFramePointer holds the original frame pointer of the caller of the assembly function.
originalFramePointer uintptr
// originalStackPointer holds the original stack pointer of the caller of the assembly function.
originalStackPointer uintptr
// goReturnAddress holds the return address to go back to the caller of the assembly function.
goReturnAddress uintptr
// stackBottomPtr holds the pointer to the bottom of the stack.
stackBottomPtr *byte
// goCallReturnAddress holds the return address to go back to the caller of the Go function.
goCallReturnAddress *byte
// stackPointerBeforeGoCall holds the stack pointer before calling a Go function.
stackPointerBeforeGoCall *uint64
// stackGrowRequiredSize holds the required size of stack grow.
stackGrowRequiredSize uintptr
// memoryGrowTrampolineAddress holds the address of memory grow trampoline function.
memoryGrowTrampolineAddress *byte
// stackGrowCallTrampolineAddress holds the address of stack grow trampoline function.
stackGrowCallTrampolineAddress *byte
// checkModuleExitCodeTrampolineAddress holds the address of check-module-exit-code function.
checkModuleExitCodeTrampolineAddress *byte
// savedRegisters is the opaque spaces for save/restore registers.
// We want to align 16 bytes for each register, so we use [64][2]uint64.
savedRegisters [64][2]uint64
// goFunctionCallCalleeModuleContextOpaque is the pointer to the target Go function's moduleContextOpaque.
goFunctionCallCalleeModuleContextOpaque uintptr
// tableGrowTrampolineAddress holds the address of table grow trampoline function.
tableGrowTrampolineAddress *byte
// refFuncTrampolineAddress holds the address of ref-func trampoline function.
refFuncTrampolineAddress *byte
// memmoveAddress holds the address of memmove function implemented by Go runtime. See memmove.go.
memmoveAddress uintptr
// framePointerBeforeGoCall holds the frame pointer before calling a Go function. Note: only used in amd64.
framePointerBeforeGoCall uintptr
// memoryWait32TrampolineAddress holds the address of memory_wait32 trampoline function.
memoryWait32TrampolineAddress *byte
// memoryWait32TrampolineAddress holds the address of memory_wait64 trampoline function.
memoryWait64TrampolineAddress *byte
// memoryNotifyTrampolineAddress holds the address of the memory_notify trampoline function.
memoryNotifyTrampolineAddress *byte
}
)
func (c *callEngine) requiredInitialStackSize() int {
const initialStackSizeDefault = 10240
stackSize := initialStackSizeDefault
paramResultInBytes := c.sizeOfParamResultSlice * 8 * 2 // * 8 because uint64 is 8 bytes, and *2 because we need both separated param/result slots.
required := paramResultInBytes + 32 + 16 // 32 is enough to accommodate the call frame info, and 16 exists just in case when []byte is not aligned to 16 bytes.
if required > stackSize {
stackSize = required
}
return stackSize
}
func (c *callEngine) init() {
stackSize := c.requiredInitialStackSize()
if wazevoapi.StackGuardCheckEnabled {
stackSize += wazevoapi.StackGuardCheckGuardPageSize
}
c.stack = make([]byte, stackSize)
c.stackTop = alignedStackTop(c.stack)
if wazevoapi.StackGuardCheckEnabled {
c.execCtx.stackBottomPtr = &c.stack[wazevoapi.StackGuardCheckGuardPageSize]
} else {
c.execCtx.stackBottomPtr = &c.stack[0]
}
c.execCtxPtr = uintptr(unsafe.Pointer(&c.execCtx))
}
// alignedStackTop returns 16-bytes aligned stack top of given stack.
// 16 bytes should be good for all platform (arm64/amd64).
func alignedStackTop(s []byte) uintptr {
stackAddr := uintptr(unsafe.Pointer(&s[len(s)-1]))
return stackAddr - (stackAddr & (16 - 1))
}
// Definition implements api.Function.
func (c *callEngine) Definition() api.FunctionDefinition {
return c.parent.module.Source.FunctionDefinition(c.indexInModule)
}
// Call implements api.Function.
func (c *callEngine) Call(ctx context.Context, params ...uint64) ([]uint64, error) {
if c.requiredParams != len(params) {
return nil, fmt.Errorf("expected %d params, but passed %d", c.requiredParams, len(params))
}
paramResultSlice := make([]uint64, c.sizeOfParamResultSlice)
copy(paramResultSlice, params)
if err := c.callWithStack(ctx, paramResultSlice); err != nil {
return nil, err
}
return paramResultSlice[:c.numberOfResults], nil
}
func (c *callEngine) addFrame(builder wasmdebug.ErrorBuilder, addr uintptr) (def api.FunctionDefinition, listener experimental.FunctionListener) {
eng := c.parent.parent.parent
cm := eng.compiledModuleOfAddr(addr)
if cm == nil {
// This case, the module might have been closed and deleted from the engine.
// We fall back to searching the imported modules that can be referenced from this callEngine.
// First, we check itself.
if checkAddrInBytes(addr, c.parent.parent.executable) {
cm = c.parent.parent
} else {
// Otherwise, search all imported modules. TODO: maybe recursive, but not sure it's useful in practice.
p := c.parent
for i := range p.importedFunctions {
candidate := p.importedFunctions[i].me.parent
if checkAddrInBytes(addr, candidate.executable) {
cm = candidate
break
}
}
}
}
if cm != nil {
index := cm.functionIndexOf(addr)
def = cm.module.FunctionDefinition(cm.module.ImportFunctionCount + index)
var sources []string
if dw := cm.module.DWARFLines; dw != nil {
sourceOffset := cm.getSourceOffset(addr)
sources = dw.Line(sourceOffset)
}
builder.AddFrame(def.DebugName(), def.ParamTypes(), def.ResultTypes(), sources)
if len(cm.listeners) > 0 {
listener = cm.listeners[index]
}
}
return
}
// CallWithStack implements api.Function.
func (c *callEngine) CallWithStack(ctx context.Context, paramResultStack []uint64) (err error) {
if c.sizeOfParamResultSlice > len(paramResultStack) {
return fmt.Errorf("need %d params, but stack size is %d", c.sizeOfParamResultSlice, len(paramResultStack))
}
return c.callWithStack(ctx, paramResultStack)
}
// CallWithStack implements api.Function.
func (c *callEngine) callWithStack(ctx context.Context, paramResultStack []uint64) (err error) {
snapshotEnabled := ctx.Value(expctxkeys.EnableSnapshotterKey{}) != nil
if snapshotEnabled {
ctx = context.WithValue(ctx, expctxkeys.SnapshotterKey{}, c)
}
if wazevoapi.StackGuardCheckEnabled {
defer func() {
wazevoapi.CheckStackGuardPage(c.stack)
}()
}
p := c.parent
ensureTermination := p.parent.ensureTermination
m := p.module
if ensureTermination {
select {
case <-ctx.Done():
// If the provided context is already done, close the module and return the error.
m.CloseWithCtxErr(ctx)
return m.FailIfClosed()
default:
}
}
var paramResultPtr *uint64
if len(paramResultStack) > 0 {
paramResultPtr = ¶mResultStack[0]
}
defer func() {
r := recover()
if s, ok := r.(*snapshot); ok {
// A snapshot that wasn't handled was created by a different call engine possibly from a nested wasm invocation,
// let it propagate up to be handled by the caller.
panic(s)
}
if r != nil {
type listenerForAbort struct {
def api.FunctionDefinition
lsn experimental.FunctionListener
}
var listeners []listenerForAbort
builder := wasmdebug.NewErrorBuilder()
def, lsn := c.addFrame(builder, uintptr(unsafe.Pointer(c.execCtx.goCallReturnAddress)))
if lsn != nil {
listeners = append(listeners, listenerForAbort{def, lsn})
}
returnAddrs := unwindStack(
uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)),
c.execCtx.framePointerBeforeGoCall,
c.stackTop,
nil,
)
for _, retAddr := range returnAddrs[:len(returnAddrs)-1] { // the last return addr is the trampoline, so we skip it.
def, lsn = c.addFrame(builder, retAddr)
if lsn != nil {
listeners = append(listeners, listenerForAbort{def, lsn})
}
}
err = builder.FromRecovered(r)
for _, lsn := range listeners {
lsn.lsn.Abort(ctx, m, lsn.def, err)
}
} else {
if err != wasmruntime.ErrRuntimeStackOverflow { // Stackoverflow case shouldn't be panic (to avoid extreme stack unwinding).
err = c.parent.module.FailIfClosed()
}
}
if err != nil {
// Ensures that we can reuse this callEngine even after an error.
c.execCtx.exitCode = wazevoapi.ExitCodeOK
}
}()
if ensureTermination {
done := m.CloseModuleOnCanceledOrTimeout(ctx)
defer done()
}
if c.stackTop&(16-1) != 0 {
panic("BUG: stack must be aligned to 16 bytes")
}
entrypoint(c.preambleExecutable, c.executable, c.execCtxPtr, c.parent.opaquePtr, paramResultPtr, c.stackTop)
for {
switch ec := c.execCtx.exitCode; ec & wazevoapi.ExitCodeMask {
case wazevoapi.ExitCodeOK:
return nil
case wazevoapi.ExitCodeGrowStack:
oldsp := uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall))
oldTop := c.stackTop
oldStack := c.stack
var newsp, newfp uintptr
if wazevoapi.StackGuardCheckEnabled {
newsp, newfp, err = c.growStackWithGuarded()
} else {
newsp, newfp, err = c.growStack()
}
if err != nil {
return err
}
adjustClonedStack(oldsp, oldTop, newsp, newfp, c.stackTop)
// Old stack must be alive until the new stack is adjusted.
runtime.KeepAlive(oldStack)
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr, newsp, newfp)
case wazevoapi.ExitCodeGrowMemory:
mod := c.callerModuleInstance()
mem := mod.MemoryInstance
s := goCallStackView(c.execCtx.stackPointerBeforeGoCall)
argRes := &s[0]
if res, ok := mem.Grow(uint32(*argRes)); !ok {
*argRes = uint64(0xffffffff) // = -1 in signed 32-bit integer.
} else {
*argRes = uint64(res)
}
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr, uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall)
case wazevoapi.ExitCodeTableGrow:
mod := c.callerModuleInstance()
s := goCallStackView(c.execCtx.stackPointerBeforeGoCall)
tableIndex, num, ref := uint32(s[0]), uint32(s[1]), uintptr(s[2])
table := mod.Tables[tableIndex]
s[0] = uint64(uint32(int32(table.Grow(num, ref))))
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr,
uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall)
case wazevoapi.ExitCodeCallGoFunction:
index := wazevoapi.GoFunctionIndexFromExitCode(ec)
f := hostModuleGoFuncFromOpaque[api.GoFunction](index, c.execCtx.goFunctionCallCalleeModuleContextOpaque)
func() {
if snapshotEnabled {
defer snapshotRecoverFn(c)
}
f.Call(ctx, goCallStackView(c.execCtx.stackPointerBeforeGoCall))
}()
// Back to the native code.
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr,
uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall)
case wazevoapi.ExitCodeCallGoFunctionWithListener:
index := wazevoapi.GoFunctionIndexFromExitCode(ec)
f := hostModuleGoFuncFromOpaque[api.GoFunction](index, c.execCtx.goFunctionCallCalleeModuleContextOpaque)
listeners := hostModuleListenersSliceFromOpaque(c.execCtx.goFunctionCallCalleeModuleContextOpaque)
s := goCallStackView(c.execCtx.stackPointerBeforeGoCall)
// Call Listener.Before.
callerModule := c.callerModuleInstance()
listener := listeners[index]
hostModule := hostModuleFromOpaque(c.execCtx.goFunctionCallCalleeModuleContextOpaque)
def := hostModule.FunctionDefinition(wasm.Index(index))
listener.Before(ctx, callerModule, def, s, c.stackIterator(true))
// Call into the Go function.
func() {
if snapshotEnabled {
defer snapshotRecoverFn(c)
}
f.Call(ctx, s)
}()
// Call Listener.After.
listener.After(ctx, callerModule, def, s)
// Back to the native code.
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr,
uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall)
case wazevoapi.ExitCodeCallGoModuleFunction:
index := wazevoapi.GoFunctionIndexFromExitCode(ec)
f := hostModuleGoFuncFromOpaque[api.GoModuleFunction](index, c.execCtx.goFunctionCallCalleeModuleContextOpaque)
mod := c.callerModuleInstance()
func() {
if snapshotEnabled {
defer snapshotRecoverFn(c)
}
f.Call(ctx, mod, goCallStackView(c.execCtx.stackPointerBeforeGoCall))
}()
// Back to the native code.
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr,
uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall)
case wazevoapi.ExitCodeCallGoModuleFunctionWithListener:
index := wazevoapi.GoFunctionIndexFromExitCode(ec)
f := hostModuleGoFuncFromOpaque[api.GoModuleFunction](index, c.execCtx.goFunctionCallCalleeModuleContextOpaque)
listeners := hostModuleListenersSliceFromOpaque(c.execCtx.goFunctionCallCalleeModuleContextOpaque)
s := goCallStackView(c.execCtx.stackPointerBeforeGoCall)
// Call Listener.Before.
callerModule := c.callerModuleInstance()
listener := listeners[index]
hostModule := hostModuleFromOpaque(c.execCtx.goFunctionCallCalleeModuleContextOpaque)
def := hostModule.FunctionDefinition(wasm.Index(index))
listener.Before(ctx, callerModule, def, s, c.stackIterator(true))
// Call into the Go function.
func() {
if snapshotEnabled {
defer snapshotRecoverFn(c)
}
f.Call(ctx, callerModule, s)
}()
// Call Listener.After.
listener.After(ctx, callerModule, def, s)
// Back to the native code.
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr,
uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall)
case wazevoapi.ExitCodeCallListenerBefore:
stack := goCallStackView(c.execCtx.stackPointerBeforeGoCall)
index := wasm.Index(stack[0])
mod := c.callerModuleInstance()
listener := mod.Engine.(*moduleEngine).listeners[index]
def := mod.Source.FunctionDefinition(index + mod.Source.ImportFunctionCount)
listener.Before(ctx, mod, def, stack[1:], c.stackIterator(false))
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr,
uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall)
case wazevoapi.ExitCodeCallListenerAfter:
stack := goCallStackView(c.execCtx.stackPointerBeforeGoCall)
index := wasm.Index(stack[0])
mod := c.callerModuleInstance()
listener := mod.Engine.(*moduleEngine).listeners[index]
def := mod.Source.FunctionDefinition(index + mod.Source.ImportFunctionCount)
listener.After(ctx, mod, def, stack[1:])
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr,
uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall)
case wazevoapi.ExitCodeCheckModuleExitCode:
// Note: this operation must be done in Go, not native code. The reason is that
// native code cannot be preempted and that means it can block forever if there are not
// enough OS threads (which we don't have control over).
if err := m.FailIfClosed(); err != nil {
panic(err)
}
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr,
uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall)
case wazevoapi.ExitCodeRefFunc:
mod := c.callerModuleInstance()
s := goCallStackView(c.execCtx.stackPointerBeforeGoCall)
funcIndex := wasm.Index(s[0])
ref := mod.Engine.FunctionInstanceReference(funcIndex)
s[0] = uint64(ref)
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr,
uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall)
case wazevoapi.ExitCodeMemoryWait32:
mod := c.callerModuleInstance()
mem := mod.MemoryInstance
if !mem.Shared {
panic(wasmruntime.ErrRuntimeExpectedSharedMemory)
}
s := goCallStackView(c.execCtx.stackPointerBeforeGoCall)
timeout, exp, addr := int64(s[0]), uint32(s[1]), uintptr(s[2])
base := uintptr(unsafe.Pointer(&mem.Buffer[0]))
offset := uint32(addr - base)
res := mem.Wait32(offset, exp, timeout, func(mem *wasm.MemoryInstance, offset uint32) uint32 {
addr := unsafe.Add(unsafe.Pointer(&mem.Buffer[0]), offset)
return atomic.LoadUint32((*uint32)(addr))
})
s[0] = res
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr,
uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall)
case wazevoapi.ExitCodeMemoryWait64:
mod := c.callerModuleInstance()
mem := mod.MemoryInstance
if !mem.Shared {
panic(wasmruntime.ErrRuntimeExpectedSharedMemory)
}
s := goCallStackView(c.execCtx.stackPointerBeforeGoCall)
timeout, exp, addr := int64(s[0]), uint64(s[1]), uintptr(s[2])
base := uintptr(unsafe.Pointer(&mem.Buffer[0]))
offset := uint32(addr - base)
res := mem.Wait64(offset, exp, timeout, func(mem *wasm.MemoryInstance, offset uint32) uint64 {
addr := unsafe.Add(unsafe.Pointer(&mem.Buffer[0]), offset)
return atomic.LoadUint64((*uint64)(addr))
})
s[0] = uint64(res)
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr,
uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall)
case wazevoapi.ExitCodeMemoryNotify:
mod := c.callerModuleInstance()
mem := mod.MemoryInstance
s := goCallStackView(c.execCtx.stackPointerBeforeGoCall)
count, addr := uint32(s[0]), s[1]
offset := uint32(uintptr(addr) - uintptr(unsafe.Pointer(&mem.Buffer[0])))
res := mem.Notify(offset, count)
s[0] = uint64(res)
c.execCtx.exitCode = wazevoapi.ExitCodeOK
afterGoFunctionCallEntrypoint(c.execCtx.goCallReturnAddress, c.execCtxPtr,
uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall)
case wazevoapi.ExitCodeUnreachable:
panic(wasmruntime.ErrRuntimeUnreachable)
case wazevoapi.ExitCodeMemoryOutOfBounds:
panic(wasmruntime.ErrRuntimeOutOfBoundsMemoryAccess)
case wazevoapi.ExitCodeTableOutOfBounds:
panic(wasmruntime.ErrRuntimeInvalidTableAccess)
case wazevoapi.ExitCodeIndirectCallNullPointer:
panic(wasmruntime.ErrRuntimeInvalidTableAccess)
case wazevoapi.ExitCodeIndirectCallTypeMismatch:
panic(wasmruntime.ErrRuntimeIndirectCallTypeMismatch)
case wazevoapi.ExitCodeIntegerOverflow:
panic(wasmruntime.ErrRuntimeIntegerOverflow)
case wazevoapi.ExitCodeIntegerDivisionByZero:
panic(wasmruntime.ErrRuntimeIntegerDivideByZero)
case wazevoapi.ExitCodeInvalidConversionToInteger:
panic(wasmruntime.ErrRuntimeInvalidConversionToInteger)
case wazevoapi.ExitCodeUnalignedAtomic:
panic(wasmruntime.ErrRuntimeUnalignedAtomic)
default:
panic("BUG")
}
}
}
func (c *callEngine) callerModuleInstance() *wasm.ModuleInstance {
return moduleInstanceFromOpaquePtr(c.execCtx.callerModuleContextPtr)
}
const callStackCeiling = uintptr(50000000) // in uint64 (8 bytes) == 400000000 bytes in total == 400mb.
func (c *callEngine) growStackWithGuarded() (newSP uintptr, newFP uintptr, err error) {
if wazevoapi.StackGuardCheckEnabled {
wazevoapi.CheckStackGuardPage(c.stack)
}
newSP, newFP, err = c.growStack()
if err != nil {
return
}
if wazevoapi.StackGuardCheckEnabled {
c.execCtx.stackBottomPtr = &c.stack[wazevoapi.StackGuardCheckGuardPageSize]
}
return
}
// growStack grows the stack, and returns the new stack pointer.
func (c *callEngine) growStack() (newSP, newFP uintptr, err error) {
currentLen := uintptr(len(c.stack))
if callStackCeiling < currentLen {
err = wasmruntime.ErrRuntimeStackOverflow
return
}
newLen := 2*currentLen + c.execCtx.stackGrowRequiredSize + 16 // Stack might be aligned to 16 bytes, so add 16 bytes just in case.
newSP, newFP, c.stackTop, c.stack = c.cloneStack(newLen)
c.execCtx.stackBottomPtr = &c.stack[0]
return
}
func (c *callEngine) cloneStack(l uintptr) (newSP, newFP, newTop uintptr, newStack []byte) {
newStack = make([]byte, l)
relSp := c.stackTop - uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall))
relFp := c.stackTop - c.execCtx.framePointerBeforeGoCall
// Copy the existing contents in the previous Go-allocated stack into the new one.
var prevStackAligned, newStackAligned []byte
{
//nolint:staticcheck
sh := (*reflect.SliceHeader)(unsafe.Pointer(&prevStackAligned))
sh.Data = c.stackTop - relSp
sh.Len = int(relSp)
sh.Cap = int(relSp)
}
newTop = alignedStackTop(newStack)
{
newSP = newTop - relSp
newFP = newTop - relFp
//nolint:staticcheck
sh := (*reflect.SliceHeader)(unsafe.Pointer(&newStackAligned))
sh.Data = newSP
sh.Len = int(relSp)
sh.Cap = int(relSp)
}
copy(newStackAligned, prevStackAligned)
return
}
func (c *callEngine) stackIterator(onHostCall bool) experimental.StackIterator {
c.stackIteratorImpl.reset(c, onHostCall)
return &c.stackIteratorImpl
}
// stackIterator implements experimental.StackIterator.
type stackIterator struct {
retAddrs []uintptr
retAddrCursor int
eng *engine
pc uint64
currentDef *wasm.FunctionDefinition
}
func (si *stackIterator) reset(c *callEngine, onHostCall bool) {
if onHostCall {
si.retAddrs = append(si.retAddrs[:0], uintptr(unsafe.Pointer(c.execCtx.goCallReturnAddress)))
} else {
si.retAddrs = si.retAddrs[:0]
}
si.retAddrs = unwindStack(uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall)), c.execCtx.framePointerBeforeGoCall, c.stackTop, si.retAddrs)
si.retAddrs = si.retAddrs[:len(si.retAddrs)-1] // the last return addr is the trampoline, so we skip it.
si.retAddrCursor = 0
si.eng = c.parent.parent.parent
}
// Next implements the same method as documented on experimental.StackIterator.
func (si *stackIterator) Next() bool {
if si.retAddrCursor >= len(si.retAddrs) {
return false
}
addr := si.retAddrs[si.retAddrCursor]
cm := si.eng.compiledModuleOfAddr(addr)
if cm != nil {
index := cm.functionIndexOf(addr)
def := cm.module.FunctionDefinition(cm.module.ImportFunctionCount + index)
si.currentDef = def
si.retAddrCursor++
si.pc = uint64(addr)
return true
}
return false
}
// ProgramCounter implements the same method as documented on experimental.StackIterator.
func (si *stackIterator) ProgramCounter() experimental.ProgramCounter {
return experimental.ProgramCounter(si.pc)
}
// Function implements the same method as documented on experimental.StackIterator.
func (si *stackIterator) Function() experimental.InternalFunction {
return si
}
// Definition implements the same method as documented on experimental.InternalFunction.
func (si *stackIterator) Definition() api.FunctionDefinition {
return si.currentDef
}
// SourceOffsetForPC implements the same method as documented on experimental.InternalFunction.
func (si *stackIterator) SourceOffsetForPC(pc experimental.ProgramCounter) uint64 {
upc := uintptr(pc)
cm := si.eng.compiledModuleOfAddr(upc)
return cm.getSourceOffset(upc)
}
// snapshot implements experimental.Snapshot
type snapshot struct {
sp, fp, top uintptr
returnAddress *byte
stack []byte
savedRegisters [64][2]uint64
ret []uint64
c *callEngine
}
// Snapshot implements the same method as documented on experimental.Snapshotter.
func (c *callEngine) Snapshot() experimental.Snapshot {
returnAddress := c.execCtx.goCallReturnAddress
oldTop, oldSp := c.stackTop, uintptr(unsafe.Pointer(c.execCtx.stackPointerBeforeGoCall))
newSP, newFP, newTop, newStack := c.cloneStack(uintptr(len(c.stack)) + 16)
adjustClonedStack(oldSp, oldTop, newSP, newFP, newTop)
return &snapshot{
sp: newSP,
fp: newFP,
top: newTop,
savedRegisters: c.execCtx.savedRegisters,
returnAddress: returnAddress,
stack: newStack,
c: c,
}
}
// Restore implements the same method as documented on experimental.Snapshot.
func (s *snapshot) Restore(ret []uint64) {
s.ret = ret
panic(s)
}
func (s *snapshot) doRestore() {
spp := *(**uint64)(unsafe.Pointer(&s.sp))
view := goCallStackView(spp)
copy(view, s.ret)
c := s.c
c.stack = s.stack
c.stackTop = s.top
ec := &c.execCtx
ec.stackBottomPtr = &c.stack[0]
ec.stackPointerBeforeGoCall = spp
ec.framePointerBeforeGoCall = s.fp
ec.goCallReturnAddress = s.returnAddress
ec.savedRegisters = s.savedRegisters
}
// Error implements the same method on error.
func (s *snapshot) Error() string {
return "unhandled snapshot restore, this generally indicates restore was called from a different " +
"exported function invocation than snapshot"
}
func snapshotRecoverFn(c *callEngine) {
if r := recover(); r != nil {
if s, ok := r.(*snapshot); ok && s.c == c {
s.doRestore()
} else {
panic(r)
}
}
}
|