summaryrefslogtreecommitdiff
path: root/vendor/modernc.org/libc
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/modernc.org/libc')
-rw-r--r--vendor/modernc.org/libc/Makefile8
-rw-r--r--vendor/modernc.org/libc/abi0_linux_amd64.go (renamed from vendor/modernc.org/libc/asm_linux_amd64.go)1805
-rw-r--r--vendor/modernc.org/libc/abi0_linux_amd64.s (renamed from vendor/modernc.org/libc/asm_linux_amd64.s)8335
-rw-r--r--vendor/modernc.org/libc/ccgo_linux_386.go305
-rw-r--r--vendor/modernc.org/libc/ccgo_linux_amd64.go303
-rw-r--r--vendor/modernc.org/libc/ccgo_linux_arm.go311
-rw-r--r--vendor/modernc.org/libc/ccgo_linux_arm64.go297
-rw-r--r--vendor/modernc.org/libc/ccgo_linux_loong64.go241
-rw-r--r--vendor/modernc.org/libc/ccgo_linux_ppc64le.go320
-rw-r--r--vendor/modernc.org/libc/ccgo_linux_riscv64.go241
-rw-r--r--vendor/modernc.org/libc/ccgo_linux_s390x.go297
-rw-r--r--vendor/modernc.org/libc/libc.go6
-rw-r--r--vendor/modernc.org/libc/libc_all.go26
-rw-r--r--vendor/modernc.org/libc/libc_freebsd.go8
-rw-r--r--vendor/modernc.org/libc/tls_linux_amd64.go14
-rw-r--r--vendor/modernc.org/libc/tls_linux_amd64.s16
16 files changed, 10874 insertions, 1659 deletions
diff --git a/vendor/modernc.org/libc/Makefile b/vendor/modernc.org/libc/Makefile
index 0365ae772..507d4ef95 100644
--- a/vendor/modernc.org/libc/Makefile
+++ b/vendor/modernc.org/libc/Makefile
@@ -2,7 +2,7 @@
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
-.PHONY: all build_all_targets check clean download edit editor generate dev membrk-test test work xtest short-test xlibc libc-test surface
+.PHONY: all build_all_targets check clean download edit editor generate dev membrk-test test work xtest short-test xlibc libc-test surface vet
SHELL=/bin/bash -o pipefail
@@ -14,7 +14,7 @@ all: editor
golint 2>&1
staticcheck 2>&1
-build_all_targets:
+build_all_targets: vet
./build_all_targets.sh
echo done
@@ -38,7 +38,6 @@ editor:
# gofmt -l -s -w *.go
go test -c -o /dev/null
go build -o /dev/null -v generator*.go
- go build -o /dev/null -v genasm.go
go vet 2>&1 | grep -n 'asm_' || true
generate: download
@@ -115,3 +114,6 @@ work:
surface:
surface > surface.new
surface surface.old surface.new > log-todo-surface || true
+
+vet:
+ go vet 2>&1 | grep abi0 | grep -v 'misuse' || true
diff --git a/vendor/modernc.org/libc/asm_linux_amd64.go b/vendor/modernc.org/libc/abi0_linux_amd64.go
index 8ed09ec2f..379224800 100644
--- a/vendor/modernc.org/libc/asm_linux_amd64.go
+++ b/vendor/modernc.org/libc/abi0_linux_amd64.go
@@ -1,18 +1,274 @@
-// Code generated for linux/amd64 by 'genasm', DO NOT EDIT.
+// Code generated for linux/amd64 by 'qbecc --abi0wrap .', DO NOT EDIT.
package libc
+import "unsafe"
+
+var _ unsafe.Pointer
+
+//go:noescape
+func Y_Exit(tls *TLS, ec int32)
+
+//go:noescape
+func Y_IO_feof_unlocked(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y_IO_ferror_unlocked(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y_IO_getc(tls *TLS, f1 uintptr) (r int32)
+
+//go:noescape
+func Y_IO_getc_unlocked(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y_IO_putc(tls *TLS, c1 int32, f1 uintptr) (r int32)
+
+//go:noescape
+func Y_IO_putc_unlocked(tls *TLS, c int32, f uintptr) (r int32)
+
+//go:noescape
+func Y___errno_location(tls *TLS) (r uintptr)
+
+//go:noescape
+func Y__aio_close(tls *TLS, fd int32) (_2 int32)
+
+//go:noescape
+func Y__asctime_r(tls *TLS, tm uintptr, buf uintptr) (r uintptr)
+
//go:noescape
func Y__assert_fail(tls *TLS, expr uintptr, file uintptr, line int32, func1 uintptr)
//go:noescape
+func Y__atomic_compare_exchangeInt16(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+
+//go:noescape
+func Y__atomic_compare_exchangeInt32(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+
+//go:noescape
+func Y__atomic_compare_exchangeInt64(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+
+//go:noescape
+func Y__atomic_compare_exchangeInt8(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+
+//go:noescape
+func Y__atomic_compare_exchangeUint16(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+
+//go:noescape
+func Y__atomic_compare_exchangeUint32(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+
+//go:noescape
+func Y__atomic_compare_exchangeUint64(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+
+//go:noescape
+func Y__atomic_compare_exchangeUint8(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+
+//go:noescape
+func Y__atomic_exchangeInt16(t *TLS, ptr, val, ret uintptr, _ int32)
+
+//go:noescape
+func Y__atomic_exchangeInt32(t *TLS, ptr, val, ret uintptr, _ int32)
+
+//go:noescape
+func Y__atomic_exchangeInt64(t *TLS, ptr, val, ret uintptr, _ int32)
+
+//go:noescape
+func Y__atomic_exchangeInt8(t *TLS, ptr, val, ret uintptr, _ int32)
+
+//go:noescape
+func Y__atomic_exchangeUint16(t *TLS, ptr, val, ret uintptr, _ int32)
+
+//go:noescape
+func Y__atomic_exchangeUint32(t *TLS, ptr, val, ret uintptr, _ int32)
+
+//go:noescape
+func Y__atomic_exchangeUint64(t *TLS, ptr, val, ret uintptr, _ int32)
+
+//go:noescape
+func Y__atomic_exchangeUint8(t *TLS, ptr, val, ret uintptr, _ int32)
+
+//go:noescape
+func Y__atomic_fetch_addInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+
+//go:noescape
+func Y__atomic_fetch_addInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+
+//go:noescape
+func Y__atomic_fetch_addInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+
+//go:noescape
+func Y__atomic_fetch_addInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+
+//go:noescape
+func Y__atomic_fetch_addUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+
+//go:noescape
+func Y__atomic_fetch_addUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+
+//go:noescape
+func Y__atomic_fetch_addUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+
+//go:noescape
+func Y__atomic_fetch_addUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+
+//go:noescape
+func Y__atomic_fetch_andInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+
+//go:noescape
+func Y__atomic_fetch_andInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+
+//go:noescape
+func Y__atomic_fetch_andInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+
+//go:noescape
+func Y__atomic_fetch_andInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+
+//go:noescape
+func Y__atomic_fetch_andUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+
+//go:noescape
+func Y__atomic_fetch_andUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+
+//go:noescape
+func Y__atomic_fetch_andUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+
+//go:noescape
+func Y__atomic_fetch_andUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+
+//go:noescape
+func Y__atomic_fetch_orInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+
+//go:noescape
+func Y__atomic_fetch_orInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+
+//go:noescape
+func Y__atomic_fetch_orInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+
+//go:noescape
+func Y__atomic_fetch_orInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+
+//go:noescape
+func Y__atomic_fetch_orUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+
+//go:noescape
+func Y__atomic_fetch_orUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+
+//go:noescape
+func Y__atomic_fetch_orUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+
+//go:noescape
+func Y__atomic_fetch_orUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+
+//go:noescape
+func Y__atomic_fetch_subInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+
+//go:noescape
+func Y__atomic_fetch_subInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+
+//go:noescape
+func Y__atomic_fetch_subInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+
+//go:noescape
+func Y__atomic_fetch_subInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+
+//go:noescape
+func Y__atomic_fetch_subUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+
+//go:noescape
+func Y__atomic_fetch_subUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+
+//go:noescape
+func Y__atomic_fetch_subUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+
+//go:noescape
+func Y__atomic_fetch_subUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+
+//go:noescape
+func Y__atomic_fetch_xorInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+
+//go:noescape
+func Y__atomic_fetch_xorInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+
+//go:noescape
+func Y__atomic_fetch_xorInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+
+//go:noescape
+func Y__atomic_fetch_xorInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+
+//go:noescape
+func Y__atomic_fetch_xorUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+
+//go:noescape
+func Y__atomic_fetch_xorUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+
+//go:noescape
+func Y__atomic_fetch_xorUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+
+//go:noescape
+func Y__atomic_fetch_xorUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+
+//go:noescape
+func Y__atomic_loadInt16(t *TLS, ptr, ret uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_loadInt32(t *TLS, ptr, ret uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_loadInt64(t *TLS, ptr, ret uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_loadInt8(t *TLS, ptr, ret uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_loadUint16(t *TLS, ptr, ret uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_loadUint32(t *TLS, ptr, ret uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_loadUint64(t *TLS, ptr, ret uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_loadUint8(t *TLS, ptr, ret uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_storeInt16(t *TLS, ptr, val uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_storeInt32(t *TLS, ptr, val uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_storeInt64(t *TLS, ptr, val uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_storeInt8(t *TLS, ptr, val uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_storeUint16(t *TLS, ptr, val uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_storeUint32(t *TLS, ptr, val uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_storeUint64(t *TLS, ptr, val uintptr, memorder int32)
+
+//go:noescape
+func Y__atomic_storeUint8(t *TLS, ptr, val uintptr, memorder int32)
+
+//go:noescape
+func Y__block_all_sigs(tls *TLS, set uintptr)
+
+//go:noescape
+func Y__block_app_sigs(tls *TLS, set uintptr)
+
+//go:noescape
func Y__builtin___memcpy_chk(t *TLS, dest, src uintptr, n, os Tsize_t) (r uintptr)
//go:noescape
-func Y__builtin___memmove_chk(t *TLS, dest, src uintptr, n, os Tsize_t) uintptr
+func Y__builtin___memmove_chk(t *TLS, dest, src uintptr, n, os Tsize_t) (_3 uintptr)
//go:noescape
-func Y__builtin___memset_chk(t *TLS, s uintptr, c int32, n, os Tsize_t) uintptr
+func Y__builtin___memset_chk(t *TLS, s uintptr, c int32, n, os Tsize_t) (_4 uintptr)
//go:noescape
func Y__builtin___snprintf_chk(t *TLS, str uintptr, maxlen Tsize_t, flag int32, os Tsize_t, format, args uintptr) (r int32)
@@ -24,7 +280,7 @@ func Y__builtin___sprintf_chk(t *TLS, s uintptr, flag int32, os Tsize_t, format,
func Y__builtin___strcat_chk(t *TLS, dest, src uintptr, os Tsize_t) (r uintptr)
//go:noescape
-func Y__builtin___strcpy_chk(t *TLS, dest, src uintptr, os Tsize_t) uintptr
+func Y__builtin___strcpy_chk(t *TLS, dest, src uintptr, os Tsize_t) (_3 uintptr)
//go:noescape
func Y__builtin___strncpy_chk(t *TLS, dest, src uintptr, n, os Tsize_t) (r uintptr)
@@ -36,70 +292,70 @@ func Y__builtin___vsnprintf_chk(t *TLS, str uintptr, maxlen Tsize_t, flag int32,
func Y__builtin_abort(t *TLS)
//go:noescape
-func Y__builtin_abs(t *TLS, j int32) int32
+func Y__builtin_abs(t *TLS, j int32) (_2 int32)
//go:noescape
-func Y__builtin_add_overflowInt64(t *TLS, a, b int64, res uintptr) int32
+func Y__builtin_add_overflowInt64(t *TLS, a, b int64, res uintptr) (_3 int32)
//go:noescape
-func Y__builtin_add_overflowUint32(t *TLS, a, b uint32, res uintptr) int32
+func Y__builtin_add_overflowUint32(t *TLS, a, b uint32, res uintptr) (_3 int32)
//go:noescape
-func Y__builtin_add_overflowUint64(t *TLS, a, b uint64, res uintptr) int32
+func Y__builtin_add_overflowUint64(t *TLS, a, b uint64, res uintptr) (_3 int32)
//go:noescape
-func Y__builtin_alloca(tls *TLS, size Tsize_t) uintptr
+func Y__builtin_alloca(tls *TLS, size Tsize_t) (_2 uintptr)
//go:noescape
-func Y__builtin_bswap16(t *TLS, x uint16) uint16
+func Y__builtin_bswap16(t *TLS, x uint16) (_2 uint16)
//go:noescape
-func Y__builtin_bswap32(t *TLS, x uint32) uint32
+func Y__builtin_bswap32(t *TLS, x uint32) (_2 uint32)
//go:noescape
-func Y__builtin_bswap64(t *TLS, x uint64) uint64
+func Y__builtin_bswap64(t *TLS, x uint64) (_2 uint64)
//go:noescape
func Y__builtin_bzero(t *TLS, s uintptr, n Tsize_t)
//go:noescape
-func Y__builtin_clz(t *TLS, n uint32) int32
+func Y__builtin_clz(t *TLS, n uint32) (_2 int32)
//go:noescape
-func Y__builtin_clzl(t *TLS, n ulong) int32
+func Y__builtin_clzl(t *TLS, n ulong) (_2 int32)
//go:noescape
-func Y__builtin_clzll(t *TLS, n uint64) int32
+func Y__builtin_clzll(t *TLS, n uint64) (_2 int32)
//go:noescape
-func Y__builtin_copysign(t *TLS, x, y float64) float64
+func Y__builtin_copysign(t *TLS, x, y float64) (_2 float64)
//go:noescape
-func Y__builtin_copysignf(t *TLS, x, y float32) float32
+func Y__builtin_copysignf(t *TLS, x, y float32) (_2 float32)
//go:noescape
-func Y__builtin_copysignl(t *TLS, x, y float64) float64
+func Y__builtin_copysignl(t *TLS, x, y float64) (_2 float64)
//go:noescape
-func Y__builtin_ctz(t *TLS, n uint32) int32
+func Y__builtin_ctz(t *TLS, n uint32) (_2 int32)
//go:noescape
-func Y__builtin_ctzl(tls *TLS, x ulong) int32
+func Y__builtin_ctzl(tls *TLS, x ulong) (_2 int32)
//go:noescape
func Y__builtin_exit(t *TLS, status int32)
//go:noescape
-func Y__builtin_expect(t *TLS, exp, c long) long
+func Y__builtin_expect(t *TLS, exp, c long) (_2 long)
//go:noescape
-func Y__builtin_fabs(t *TLS, x float64) float64
+func Y__builtin_fabs(t *TLS, x float64) (_2 float64)
//go:noescape
-func Y__builtin_fabsf(t *TLS, x float32) float32
+func Y__builtin_fabsf(t *TLS, x float32) (_2 float32)
//go:noescape
-func Y__builtin_fabsl(t *TLS, x float64) float64
+func Y__builtin_fabsl(t *TLS, x float64) (_2 float64)
//go:noescape
func Y__builtin_ffs(tls *TLS, i int32) (r int32)
@@ -117,49 +373,49 @@ func Y__builtin_fmin(tls *TLS, x float64, y float64) (r float64)
func Y__builtin_free(t *TLS, ptr uintptr)
//go:noescape
-func Y__builtin_getentropy(t *TLS, buf uintptr, n Tsize_t) int32
+func Y__builtin_getentropy(t *TLS, buf uintptr, n Tsize_t) (_3 int32)
//go:noescape
-func Y__builtin_huge_val(t *TLS) float64
+func Y__builtin_huge_val(t *TLS) (_1 float64)
//go:noescape
-func Y__builtin_huge_valf(t *TLS) float32
+func Y__builtin_huge_valf(t *TLS) (_1 float32)
//go:noescape
func Y__builtin_hypot(tls *TLS, x float64, y float64) (r float64)
//go:noescape
-func Y__builtin_inf(t *TLS) float64
+func Y__builtin_inf(t *TLS) (_1 float64)
//go:noescape
-func Y__builtin_inff(tls *TLS) float32
+func Y__builtin_inff(tls *TLS) (_1 float32)
//go:noescape
-func Y__builtin_infl(t *TLS) float64
+func Y__builtin_infl(t *TLS) (_1 float64)
//go:noescape
func Y__builtin_isblank(tls *TLS, c int32) (r int32)
//go:noescape
-func Y__builtin_isnan(t *TLS, x float64) int32
+func Y__builtin_isnan(t *TLS, x float64) (_2 int32)
//go:noescape
-func Y__builtin_isnanf(t *TLS, x float32) int32
+func Y__builtin_isnanf(t *TLS, x float32) (_2 int32)
//go:noescape
-func Y__builtin_isnanl(t *TLS, x float64) int32
+func Y__builtin_isnanl(t *TLS, x float64) (_2 int32)
//go:noescape
func Y__builtin_isprint(tls *TLS, c int32) (r int32)
//go:noescape
-func Y__builtin_isunordered(t *TLS, a, b float64) int32
+func Y__builtin_isunordered(t *TLS, a, b float64) (_2 int32)
//go:noescape
-func Y__builtin_llabs(tls *TLS, a int64) int64
+func Y__builtin_llabs(tls *TLS, a int64) (_2 int64)
//go:noescape
-func Y__builtin_log2(t *TLS, x float64) float64
+func Y__builtin_log2(t *TLS, x float64) (_2 float64)
//go:noescape
func Y__builtin_lrint(tls *TLS, x float64) (r long)
@@ -171,46 +427,46 @@ func Y__builtin_lrintf(tls *TLS, x float32) (r long)
func Y__builtin_lround(tls *TLS, x float64) (r long)
//go:noescape
-func Y__builtin_malloc(t *TLS, size Tsize_t) uintptr
+func Y__builtin_malloc(t *TLS, size Tsize_t) (_2 uintptr)
//go:noescape
-func Y__builtin_memcmp(t *TLS, s1, s2 uintptr, n Tsize_t) int32
+func Y__builtin_memcmp(t *TLS, s1, s2 uintptr, n Tsize_t) (_3 int32)
//go:noescape
func Y__builtin_memcpy(t *TLS, dest, src uintptr, n Tsize_t) (r uintptr)
//go:noescape
-func Y__builtin_memset(t *TLS, s uintptr, c int32, n Tsize_t) uintptr
+func Y__builtin_memset(t *TLS, s uintptr, c int32, n Tsize_t) (_4 uintptr)
//go:noescape
-func Y__builtin_mmap(t *TLS, addr uintptr, length Tsize_t, prot, flags, fd int32, offset Toff_t) uintptr
+func Y__builtin_mmap(t *TLS, addr uintptr, length Tsize_t, prot, flags, fd int32, offset Toff_t) (_5 uintptr)
//go:noescape
-func Y__builtin_mul_overflowInt64(t *TLS, a, b int64, res uintptr) int32
+func Y__builtin_mul_overflowInt64(t *TLS, a, b int64, res uintptr) (_3 int32)
//go:noescape
-func Y__builtin_mul_overflowUint128(t *TLS, a, b Uint128, res uintptr) int32
+func Y__builtin_mul_overflowUint128(t *TLS, a, b Uint128, res uintptr) (_3 int32)
//go:noescape
-func Y__builtin_mul_overflowUint64(t *TLS, a, b uint64, res uintptr) int32
+func Y__builtin_mul_overflowUint64(t *TLS, a, b uint64, res uintptr) (_3 int32)
//go:noescape
-func Y__builtin_nan(t *TLS, s uintptr) float64
+func Y__builtin_nan(t *TLS, s uintptr) (_2 float64)
//go:noescape
-func Y__builtin_nanf(tls *TLS, s uintptr) float32
+func Y__builtin_nanf(tls *TLS, s uintptr) (_2 float32)
//go:noescape
-func Y__builtin_nanl(t *TLS, s uintptr) float64
+func Y__builtin_nanl(t *TLS, s uintptr) (_2 float64)
//go:noescape
-func Y__builtin_object_size(t *TLS, p uintptr, typ int32) Tsize_t
+func Y__builtin_object_size(t *TLS, p uintptr, typ int32) (_3 Tsize_t)
//go:noescape
-func Y__builtin_popcount(t *TLS, x uint32) int32
+func Y__builtin_popcount(t *TLS, x uint32) (_2 int32)
//go:noescape
-func Y__builtin_popcountl(t *TLS, x ulong) int32
+func Y__builtin_popcountl(t *TLS, x ulong) (_2 int32)
//go:noescape
func Y__builtin_prefetch(t *TLS, addr, args uintptr)
@@ -228,25 +484,25 @@ func Y__builtin_round(tls *TLS, x float64) (r float64)
func Y__builtin_roundf(tls *TLS, x float32) (r float32)
//go:noescape
-func Y__builtin_snprintf(t *TLS, str uintptr, size Tsize_t, format, args uintptr) int32
+func Y__builtin_snprintf(t *TLS, str uintptr, size Tsize_t, format, args uintptr) (_4 int32)
//go:noescape
func Y__builtin_sprintf(t *TLS, str, format, args uintptr) (r int32)
//go:noescape
-func Y__builtin_strchr(t *TLS, s uintptr, c int32) uintptr
+func Y__builtin_strchr(t *TLS, s uintptr, c int32) (_3 uintptr)
//go:noescape
-func Y__builtin_strcmp(t *TLS, s1, s2 uintptr) int32
+func Y__builtin_strcmp(t *TLS, s1, s2 uintptr) (_2 int32)
//go:noescape
-func Y__builtin_strcpy(t *TLS, dest, src uintptr) uintptr
+func Y__builtin_strcpy(t *TLS, dest, src uintptr) (_2 uintptr)
//go:noescape
-func Y__builtin_strlen(t *TLS, s uintptr) Tsize_t
+func Y__builtin_strlen(t *TLS, s uintptr) (_2 Tsize_t)
//go:noescape
-func Y__builtin_sub_overflowInt64(t *TLS, a, b int64, res uintptr) int32
+func Y__builtin_sub_overflowInt64(t *TLS, a, b int64, res uintptr) (_3 int32)
//go:noescape
func Y__builtin_trap(t *TLS)
@@ -258,7 +514,1144 @@ func Y__builtin_trunc(tls *TLS, x float64) (r float64)
func Y__builtin_unreachable(t *TLS)
//go:noescape
-func Y__builtin_vsnprintf(t *TLS, str uintptr, size Tsize_t, format, va uintptr) int32
+func Y__builtin_vsnprintf(t *TLS, str uintptr, size Tsize_t, format, va uintptr) (_4 int32)
+
+//go:noescape
+func Y__c11_atomic_compare_exchange_strongInt16(t *TLS, ptr, expected uintptr, desired int16, success, failure int32) (_4 int32)
+
+//go:noescape
+func Y__c11_atomic_compare_exchange_strongInt32(t *TLS, ptr, expected uintptr, desired, success, failure int32) (_3 int32)
+
+//go:noescape
+func Y__c11_atomic_compare_exchange_strongInt64(t *TLS, ptr, expected uintptr, desired int64, success, failure int32) (_4 int32)
+
+//go:noescape
+func Y__c11_atomic_compare_exchange_strongInt8(t *TLS, ptr, expected uintptr, desired int8, success, failure int32) (_4 int32)
+
+//go:noescape
+func Y__c11_atomic_compare_exchange_strongUint16(t *TLS, ptr, expected uintptr, desired uint16, success, failure int32) (_4 int32)
+
+//go:noescape
+func Y__c11_atomic_compare_exchange_strongUint32(t *TLS, ptr, expected uintptr, desired uint32, success, failure int32) (_4 int32)
+
+//go:noescape
+func Y__c11_atomic_compare_exchange_strongUint64(t *TLS, ptr, expected uintptr, desired uint64, success, failure int32) (_4 int32)
+
+//go:noescape
+func Y__c11_atomic_compare_exchange_strongUint8(t *TLS, ptr, expected uintptr, desired uint8, success, failure int32) (_4 int32)
+
+//go:noescape
+func Y__c11_atomic_exchangeInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+
+//go:noescape
+func Y__c11_atomic_exchangeInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+
+//go:noescape
+func Y__c11_atomic_exchangeInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+
+//go:noescape
+func Y__c11_atomic_exchangeInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+
+//go:noescape
+func Y__c11_atomic_exchangeUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+
+//go:noescape
+func Y__c11_atomic_exchangeUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+
+//go:noescape
+func Y__c11_atomic_exchangeUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+
+//go:noescape
+func Y__c11_atomic_exchangeUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+
+//go:noescape
+func Y__c11_atomic_fetch_addInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+
+//go:noescape
+func Y__c11_atomic_fetch_addInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+
+//go:noescape
+func Y__c11_atomic_fetch_addInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+
+//go:noescape
+func Y__c11_atomic_fetch_addInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+
+//go:noescape
+func Y__c11_atomic_fetch_addUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+
+//go:noescape
+func Y__c11_atomic_fetch_addUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+
+//go:noescape
+func Y__c11_atomic_fetch_addUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+
+//go:noescape
+func Y__c11_atomic_fetch_addUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+
+//go:noescape
+func Y__c11_atomic_fetch_andInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+
+//go:noescape
+func Y__c11_atomic_fetch_andInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+
+//go:noescape
+func Y__c11_atomic_fetch_andInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+
+//go:noescape
+func Y__c11_atomic_fetch_andInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+
+//go:noescape
+func Y__c11_atomic_fetch_andUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+
+//go:noescape
+func Y__c11_atomic_fetch_andUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+
+//go:noescape
+func Y__c11_atomic_fetch_andUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+
+//go:noescape
+func Y__c11_atomic_fetch_andUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+
+//go:noescape
+func Y__c11_atomic_fetch_orInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+
+//go:noescape
+func Y__c11_atomic_fetch_orInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+
+//go:noescape
+func Y__c11_atomic_fetch_orInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+
+//go:noescape
+func Y__c11_atomic_fetch_orInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+
+//go:noescape
+func Y__c11_atomic_fetch_orUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+
+//go:noescape
+func Y__c11_atomic_fetch_orUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+
+//go:noescape
+func Y__c11_atomic_fetch_orUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+
+//go:noescape
+func Y__c11_atomic_fetch_orUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+
+//go:noescape
+func Y__c11_atomic_fetch_subInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+
+//go:noescape
+func Y__c11_atomic_fetch_subInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+
+//go:noescape
+func Y__c11_atomic_fetch_subInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+
+//go:noescape
+func Y__c11_atomic_fetch_subInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+
+//go:noescape
+func Y__c11_atomic_fetch_subUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+
+//go:noescape
+func Y__c11_atomic_fetch_subUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+
+//go:noescape
+func Y__c11_atomic_fetch_subUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+
+//go:noescape
+func Y__c11_atomic_fetch_subUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+
+//go:noescape
+func Y__c11_atomic_fetch_xorInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+
+//go:noescape
+func Y__c11_atomic_fetch_xorInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+
+//go:noescape
+func Y__c11_atomic_fetch_xorInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+
+//go:noescape
+func Y__c11_atomic_fetch_xorInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+
+//go:noescape
+func Y__c11_atomic_fetch_xorUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+
+//go:noescape
+func Y__c11_atomic_fetch_xorUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+
+//go:noescape
+func Y__c11_atomic_fetch_xorUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+
+//go:noescape
+func Y__c11_atomic_fetch_xorUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+
+//go:noescape
+func Y__c11_atomic_loadInt16(t *TLS, ptr uintptr, memorder int32) (r int16)
+
+//go:noescape
+func Y__c11_atomic_loadInt32(t *TLS, ptr uintptr, memorder int32) (r int32)
+
+//go:noescape
+func Y__c11_atomic_loadInt64(t *TLS, ptr uintptr, memorder int32) (r int64)
+
+//go:noescape
+func Y__c11_atomic_loadInt8(t *TLS, ptr uintptr, memorder int32) (r int8)
+
+//go:noescape
+func Y__c11_atomic_loadUint16(t *TLS, ptr uintptr, memorder int32) (r uint16)
+
+//go:noescape
+func Y__c11_atomic_loadUint32(t *TLS, ptr uintptr, memorder int32) (r uint32)
+
+//go:noescape
+func Y__c11_atomic_loadUint64(t *TLS, ptr uintptr, memorder int32) (r uint64)
+
+//go:noescape
+func Y__c11_atomic_loadUint8(t *TLS, ptr uintptr, memorder int32) (r uint8)
+
+//go:noescape
+func Y__c11_atomic_storeInt16(t *TLS, ptr uintptr, val int16, memorder int32)
+
+//go:noescape
+func Y__c11_atomic_storeInt32(t *TLS, ptr uintptr, val int32, memorder int32)
+
+//go:noescape
+func Y__c11_atomic_storeInt64(t *TLS, ptr uintptr, val int64, memorder int32)
+
+//go:noescape
+func Y__c11_atomic_storeInt8(t *TLS, ptr uintptr, val int8, memorder int32)
+
+//go:noescape
+func Y__c11_atomic_storeUint16(t *TLS, ptr uintptr, val uint16, memorder int32)
+
+//go:noescape
+func Y__c11_atomic_storeUint32(t *TLS, ptr uintptr, val uint32, memorder int32)
+
+//go:noescape
+func Y__c11_atomic_storeUint64(t *TLS, ptr uintptr, val uint64, memorder int32)
+
+//go:noescape
+func Y__c11_atomic_storeUint8(t *TLS, ptr uintptr, val uint8, memorder int32)
+
+//go:noescape
+func Y__ccgo_dmesg(t *TLS, fmt uintptr, va uintptr)
+
+//go:noescape
+func Y__ccgo_getMutexType(tls *TLS, m uintptr) (_2 int32)
+
+//go:noescape
+func Y__ccgo_in6addr_anyp(t *TLS) (_1 uintptr)
+
+//go:noescape
+func Y__ccgo_pthreadAttrGetDetachState(tls *TLS, a uintptr) (_2 int32)
+
+//go:noescape
+func Y__ccgo_pthreadMutexattrGettype(tls *TLS, a uintptr) (_2 int32)
+
+//go:noescape
+func Y__ccgo_sqlite3_log(t *TLS, iErrCode int32, zFormat uintptr, args uintptr)
+
+//go:noescape
+func Y__clock_gettime(tls *TLS, clk Tclockid_t, ts uintptr) (r1 int32)
+
+//go:noescape
+func Y__clock_nanosleep(tls *TLS, clk Tclockid_t, flags int32, req uintptr, rem uintptr) (r int32)
+
+//go:noescape
+func Y__cmsg_nxthdr(t *TLS, msgh, cmsg uintptr) (_2 uintptr)
+
+//go:noescape
+func Y__convert_scm_timestamps(tls *TLS, msg uintptr, csize Tsocklen_t)
+
+//go:noescape
+func Y__cos(tls *TLS, x float64, y float64) (r1 float64)
+
+//go:noescape
+func Y__cosdf(tls *TLS, x float64) (r1 float32)
+
+//go:noescape
+func Y__crypt_blowfish(tls *TLS, key uintptr, setting uintptr, output uintptr) (r uintptr)
+
+//go:noescape
+func Y__crypt_des(tls *TLS, key uintptr, setting uintptr, output uintptr) (r uintptr)
+
+//go:noescape
+func Y__crypt_md5(tls *TLS, key uintptr, setting uintptr, output uintptr) (r uintptr)
+
+//go:noescape
+func Y__crypt_r(tls *TLS, key uintptr, salt uintptr, data uintptr) (r uintptr)
+
+//go:noescape
+func Y__crypt_sha256(tls *TLS, key uintptr, setting uintptr, output uintptr) (r uintptr)
+
+//go:noescape
+func Y__crypt_sha512(tls *TLS, key uintptr, setting uintptr, output uintptr) (r uintptr)
+
+//go:noescape
+func Y__ctype_b_loc(tls *TLS) (r uintptr)
+
+//go:noescape
+func Y__ctype_get_mb_cur_max(tls *TLS) (r Tsize_t)
+
+//go:noescape
+func Y__ctype_tolower_loc(tls *TLS) (r uintptr)
+
+//go:noescape
+func Y__ctype_toupper_loc(tls *TLS) (r uintptr)
+
+//go:noescape
+func Y__des_setkey(tls *TLS, key uintptr, ekey uintptr)
+
+//go:noescape
+func Y__dn_expand(tls *TLS, base uintptr, end uintptr, src uintptr, dest uintptr, space int32) (r int32)
+
+//go:noescape
+func Y__dns_parse(tls *TLS, r uintptr, rlen int32, __ccgo_fp_callback uintptr, ctx uintptr) (r1 int32)
+
+//go:noescape
+func __ccgo_abi0___dns_parse_2(_0 *TLS, _1 uintptr, _2 int32, _3 uintptr, _4 int32, _5 uintptr, _6 int32, __ccgo_fp uintptr) (_7 int32)
+
+func __ccgo_abiInternal___dns_parse_2(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 int32, _3 uintptr, _4 int32, _5 uintptr, _6 int32) (_7 int32) {
+ return __ccgo_abi0___dns_parse_2(_0, _1, _2, _3, _4, _5, _6, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
+
+//go:noescape
+func Y__do_des(tls *TLS, l_in Tuint32_t, r_in Tuint32_t, l_out uintptr, r_out uintptr, count Tuint32_t, saltbits Tuint32_t, ekey uintptr)
+
+//go:noescape
+func Y__do_orphaned_stdio_locks(tls *TLS)
+
+//go:noescape
+func Y__dup3(tls *TLS, old int32, new1 int32, flags int32) (r1 int32)
+
+//go:noescape
+func Y__duplocale(tls *TLS, old Tlocale_t) (r Tlocale_t)
+
+//go:noescape
+func Y__env_rm_add(tls *TLS, old uintptr, new1 uintptr)
+
+//go:noescape
+func Y__errno_location(tls *TLS) (r uintptr)
+
+//go:noescape
+func Y__execvpe(tls *TLS, file uintptr, argv uintptr, envp uintptr) (r int32)
+
+//go:noescape
+func Y__expo2(tls *TLS, x float64, sign float64) (r float64)
+
+//go:noescape
+func Y__expo2f(tls *TLS, x float32, sign float32) (r float32)
+
+//go:noescape
+func Y__fbufsize(tls *TLS, f uintptr) (r Tsize_t)
+
+//go:noescape
+func Y__fclose_ca(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y__fdopen(tls *TLS, fd int32, mode uintptr) (r uintptr)
+
+//go:noescape
+func Y__fesetround(tls *TLS, r int32) (r1 int32)
+
+//go:noescape
+func Y__fgetwc_unlocked(tls *TLS, f uintptr) (r Twint_t)
+
+//go:noescape
+func Y__flbf(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y__floatscan(tls *TLS, f uintptr, prec int32, pok int32) (r float64)
+
+//go:noescape
+func Y__fmodeflags(tls *TLS, mode uintptr) (r int32)
+
+//go:noescape
+func Y__fopen_rb_ca(tls *TLS, filename uintptr, f uintptr, buf uintptr, len1 Tsize_t) (r uintptr)
+
+//go:noescape
+func Y__fpclassify(tls *TLS, x float64) (r int32)
+
+//go:noescape
+func Y__fpclassifyf(tls *TLS, x float32) (r int32)
+
+//go:noescape
+func Y__fpclassifyl(tls *TLS, x float64) (r int32)
+
+//go:noescape
+func Y__fpending(tls *TLS, f uintptr) (r Tsize_t)
+
+//go:noescape
+func Y__fpurge(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y__fputwc_unlocked(tls *TLS, c Twchar_t, f uintptr) (r Twint_t)
+
+//go:noescape
+func Y__freadable(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y__freadahead(tls *TLS, f uintptr) (r Tsize_t)
+
+//go:noescape
+func Y__freading(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y__freadptr(tls *TLS, f uintptr, sizep uintptr) (r uintptr)
+
+//go:noescape
+func Y__freadptrinc(tls *TLS, f uintptr, inc Tsize_t)
+
+//go:noescape
+func Y__freelocale(tls *TLS, l Tlocale_t)
+
+//go:noescape
+func Y__fseeko(tls *TLS, f uintptr, off Toff_t, whence int32) (r int32)
+
+//go:noescape
+func Y__fseeko_unlocked(tls *TLS, f uintptr, off Toff_t, whence int32) (r int32)
+
+//go:noescape
+func Y__fseterr(tls *TLS, f uintptr)
+
+//go:noescape
+func Y__fsetlocking(tls *TLS, f uintptr, type1 int32) (r int32)
+
+//go:noescape
+func Y__fstat(tls *TLS, fd int32, st uintptr) (r int32)
+
+//go:noescape
+func Y__fstatat(tls *TLS, fd int32, path uintptr, st uintptr, flag int32) (r int32)
+
+//go:noescape
+func Y__ftello(tls *TLS, f uintptr) (r Toff_t)
+
+//go:noescape
+func Y__ftello_unlocked(tls *TLS, f uintptr) (r Toff_t)
+
+//go:noescape
+func Y__funcs_on_quick_exit(tls *TLS)
+
+//go:noescape
+func Y__futimesat(tls *TLS, dirfd int32, pathname uintptr, times uintptr) (r int32)
+
+//go:noescape
+func Y__fwritable(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y__fwritex(tls *TLS, s uintptr, l Tsize_t, f uintptr) (r Tsize_t)
+
+//go:noescape
+func Y__fwriting(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y__fxstat(tls *TLS, ver int32, fd int32, buf uintptr) (r int32)
+
+//go:noescape
+func Y__fxstatat(tls *TLS, ver int32, fd int32, path uintptr, buf uintptr, flag int32) (r int32)
+
+//go:noescape
+func Y__get_handler_set(tls *TLS, set uintptr)
+
+//go:noescape
+func Y__get_locale(tls *TLS, cat int32, val uintptr) (r uintptr)
+
+//go:noescape
+func Y__get_resolv_conf(tls *TLS, conf uintptr, search uintptr, search_sz Tsize_t) (r int32)
+
+//go:noescape
+func Y__getauxval(tls *TLS, item uint64) (r uint64)
+
+//go:noescape
+func Y__getdelim(tls *TLS, s uintptr, n uintptr, delim int32, f uintptr) (r Tssize_t)
+
+//go:noescape
+func Y__getgr_a(tls *TLS, name uintptr, gid Tgid_t, gr uintptr, buf uintptr, size uintptr, mem uintptr, nmem uintptr, res uintptr) (r int32)
+
+//go:noescape
+func Y__getgrent_a(tls *TLS, f uintptr, gr uintptr, line uintptr, size uintptr, mem uintptr, nmem uintptr, res uintptr) (r int32)
+
+//go:noescape
+func Y__getopt_msg(tls *TLS, a uintptr, b uintptr, c uintptr, l Tsize_t)
+
+//go:noescape
+func Y__getpw_a(tls *TLS, name uintptr, uid Tuid_t, pw uintptr, buf uintptr, size uintptr, res uintptr) (r int32)
+
+//go:noescape
+func Y__getpwent_a(tls *TLS, f uintptr, pw uintptr, line uintptr, size uintptr, res uintptr) (r int32)
+
+//go:noescape
+func Y__gettextdomain(tls *TLS) (r uintptr)
+
+//go:noescape
+func Y__gmtime_r(tls *TLS, t uintptr, tm uintptr) (r uintptr)
+
+//go:noescape
+func Y__h_errno_location(tls *TLS) (r uintptr)
+
+//go:noescape
+func Y__inet_aton(tls *TLS, s0 uintptr, dest uintptr) (r int32)
+
+//go:noescape
+func Y__init_ssp(tls *TLS, entropy uintptr)
+
+//go:noescape
+func Y__intscan(tls *TLS, f uintptr, base uint32, pok int32, lim uint64) (r uint64)
+
+//go:noescape
+func Y__isalnum_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__isalpha_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__isblank_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iscntrl_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__isdigit_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__isfinite(tls *TLS, d float64) (_2 int32)
+
+//go:noescape
+func Y__isfinitef(tls *TLS, f float32) (_2 int32)
+
+//go:noescape
+func Y__isfinitel(tls *TLS, d float64) (_2 int32)
+
+//go:noescape
+func Y__isgraph_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__islower_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__isnan(t *TLS, x float64) (_2 int32)
+
+//go:noescape
+func Y__isnanf(t *TLS, arg float32) (_2 int32)
+
+//go:noescape
+func Y__isnanl(t *TLS, arg float64) (_2 int32)
+
+//go:noescape
+func Y__isoc99_fscanf(tls *TLS, f uintptr, fmt uintptr, va uintptr) (r int32)
+
+//go:noescape
+func Y__isoc99_fwscanf(tls *TLS, f uintptr, fmt uintptr, va uintptr) (r int32)
+
+//go:noescape
+func Y__isoc99_scanf(tls *TLS, fmt uintptr, va uintptr) (r int32)
+
+//go:noescape
+func Y__isoc99_sscanf(tls *TLS, s uintptr, fmt uintptr, va uintptr) (r int32)
+
+//go:noescape
+func Y__isoc99_swscanf(tls *TLS, s uintptr, fmt uintptr, va uintptr) (r int32)
+
+//go:noescape
+func Y__isoc99_vfscanf(tls *TLS, f uintptr, fmt uintptr, ap Tva_list) (r int32)
+
+//go:noescape
+func Y__isoc99_vfwscanf(tls *TLS, f uintptr, fmt uintptr, ap Tva_list) (r int32)
+
+//go:noescape
+func Y__isoc99_vscanf(tls *TLS, fmt uintptr, ap Tva_list) (r int32)
+
+//go:noescape
+func Y__isoc99_vsscanf(tls *TLS, s uintptr, fmt uintptr, ap Tva_list) (r int32)
+
+//go:noescape
+func Y__isoc99_vswscanf(tls *TLS, s uintptr, fmt uintptr, ap Tva_list) (r int32)
+
+//go:noescape
+func Y__isoc99_vwscanf(tls *TLS, fmt uintptr, ap Tva_list) (r int32)
+
+//go:noescape
+func Y__isoc99_wscanf(tls *TLS, fmt uintptr, va uintptr) (r int32)
+
+//go:noescape
+func Y__isprint_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__ispunct_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__isspace_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__isupper_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iswalnum_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iswalpha_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iswblank_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iswcntrl_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iswctype_l(tls *TLS, c Twint_t, t Twctype_t, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iswdigit_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iswgraph_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iswlower_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iswprint_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iswpunct_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iswspace_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iswupper_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__iswxdigit_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__isxdigit_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__lctrans(tls *TLS, msg uintptr, lm uintptr) (r uintptr)
+
+//go:noescape
+func Y__lctrans_cur(tls *TLS, msg uintptr) (r uintptr)
+
+//go:noescape
+func Y__lctrans_impl(tls *TLS, msg uintptr, lm uintptr) (r uintptr)
+
+//go:noescape
+func Y__ldexp_cexp(tls *TLS, z complex128, expt int32) (r complex128)
+
+//go:noescape
+func Y__ldexp_cexpf(tls *TLS, z complex64, expt int32) (r complex64)
+
+//go:noescape
+func Y__lgamma_r(tls *TLS, x float64, signgamp uintptr) (r1 float64)
+
+//go:noescape
+func Y__lgammaf_r(tls *TLS, x float32, signgamp uintptr) (r1 float32)
+
+//go:noescape
+func Y__lgammal_r(tls *TLS, x float64, sg uintptr) (r float64)
+
+//go:noescape
+func Y__libc_current_sigrtmax(tls *TLS) (r int32)
+
+//go:noescape
+func Y__libc_current_sigrtmin(tls *TLS) (r int32)
+
+//go:noescape
+func Y__libc_sigaction(tls *TLS, sig int32, sa uintptr, old uintptr) (r1 int32)
+
+//go:noescape
+func Y__loc_is_allocated(tls *TLS, loc Tlocale_t) (r int32)
+
+//go:noescape
+func Y__localtime_r(tls *TLS, t uintptr, tm uintptr) (r uintptr)
+
+//go:noescape
+func Y__lockfile(tls *TLS, file uintptr) (_2 int32)
+
+//go:noescape
+func Y__lookup_ipliteral(tls *TLS, buf uintptr, name uintptr, family int32) (r int32)
+
+//go:noescape
+func Y__lookup_name(tls *TLS, buf uintptr, canon uintptr, name uintptr, family int32, flags int32) (r int32)
+
+//go:noescape
+func Y__lookup_serv(tls *TLS, buf uintptr, name uintptr, proto int32, socktype int32, flags int32) (r int32)
+
+//go:noescape
+func Y__lseek(tls *TLS, fd int32, offset Toff_t, whence int32) (r Toff_t)
+
+//go:noescape
+func Y__lsysinfo(tls *TLS, info uintptr) (r int32)
+
+//go:noescape
+func Y__lxstat(tls *TLS, ver int32, path uintptr, buf uintptr) (r int32)
+
+//go:noescape
+func Y__madvise(tls *TLS, addr uintptr, len1 Tsize_t, advice int32) (r int32)
+
+//go:noescape
+func Y__map_file(tls *TLS, pathname uintptr, size uintptr) (r uintptr)
+
+//go:noescape
+func Y__math_divzero(tls *TLS, sign Tuint32_t) (r float64)
+
+//go:noescape
+func Y__math_divzerof(tls *TLS, sign Tuint32_t) (r float32)
+
+//go:noescape
+func Y__math_invalid(tls *TLS, x float64) (r float64)
+
+//go:noescape
+func Y__math_invalidf(tls *TLS, x float32) (r float32)
+
+//go:noescape
+func Y__math_oflow(tls *TLS, sign Tuint32_t) (r float64)
+
+//go:noescape
+func Y__math_oflowf(tls *TLS, sign Tuint32_t) (r float32)
+
+//go:noescape
+func Y__math_uflow(tls *TLS, sign Tuint32_t) (r float64)
+
+//go:noescape
+func Y__math_uflowf(tls *TLS, sign Tuint32_t) (r float32)
+
+//go:noescape
+func Y__math_xflow(tls *TLS, sign Tuint32_t, y2 float64) (r float64)
+
+//go:noescape
+func Y__math_xflowf(tls *TLS, sign Tuint32_t, y2 float32) (r float32)
+
+//go:noescape
+func Y__memrchr(tls *TLS, m uintptr, c int32, n Tsize_t) (r uintptr)
+
+//go:noescape
+func Y__mkostemps(tls *TLS, template uintptr, len1 int32, flags int32) (r int32)
+
+//go:noescape
+func Y__mmap(tls *TLS, start uintptr, len1 Tsize_t, prot int32, flags int32, fd int32, off Toff_t) (r uintptr)
+
+//go:noescape
+func Y__mo_lookup(tls *TLS, p uintptr, size Tsize_t, s uintptr) (r uintptr)
+
+//go:noescape
+func Y__month_to_secs(tls *TLS, month int32, is_leap int32) (r int32)
+
+//go:noescape
+func Y__mprotect(tls *TLS, addr uintptr, len1 Tsize_t, prot int32) (r int32)
+
+//go:noescape
+func Y__mremap(tls *TLS, old_addr uintptr, old_len Tsize_t, new_len Tsize_t, flags int32, va uintptr) (r uintptr)
+
+//go:noescape
+func Y__munmap(tls *TLS, start uintptr, len1 Tsize_t) (r int32)
+
+//go:noescape
+func Y__newlocale(tls *TLS, mask int32, name uintptr, loc Tlocale_t) (r Tlocale_t)
+
+//go:noescape
+func Y__nl_langinfo(tls *TLS, item Tnl_item) (r uintptr)
+
+//go:noescape
+func Y__nl_langinfo_l(tls *TLS, item Tnl_item, loc Tlocale_t) (r uintptr)
+
+//go:noescape
+func Y__nscd_query(tls *TLS, req Tint32_t, key uintptr, buf uintptr, len1 Tsize_t, swap uintptr) (r uintptr)
+
+//go:noescape
+func Y__ofl_add(tls *TLS, f uintptr) (r uintptr)
+
+//go:noescape
+func Y__ofl_lock(tls *TLS) (r uintptr)
+
+//go:noescape
+func Y__ofl_unlock(tls *TLS)
+
+//go:noescape
+func Y__overflow(tls *TLS, f uintptr, _c int32) (r int32)
+
+//go:noescape
+func Y__pleval(tls *TLS, s uintptr, n uint64) (r uint64)
+
+//go:noescape
+func Y__posix_getopt(tls *TLS, argc int32, argv uintptr, optstring uintptr) (r int32)
+
+//go:noescape
+func Y__procfdname(tls *TLS, buf uintptr, fd uint32)
+
+//go:noescape
+func Y__ptsname_r(tls *TLS, fd int32, buf uintptr, len1 Tsize_t) (r int32)
+
+//go:noescape
+func Y__putenv(tls *TLS, s uintptr, l Tsize_t, r uintptr) (r1 int32)
+
+//go:noescape
+func Y__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr)
+
+//go:noescape
+func __ccgo_abi0___qsort_r_3(_0 *TLS, _1 uintptr, _2 uintptr, _3 uintptr, __ccgo_fp uintptr) (_4 int32)
+
+func __ccgo_abiInternal___qsort_r_3(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 uintptr, _3 uintptr) (_4 int32) {
+ return __ccgo_abi0___qsort_r_3(_0, _1, _2, _3, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
+
+//go:noescape
+func Y__rand48_step(tls *TLS, xi uintptr, lc uintptr) (r Tuint64_t)
+
+//go:noescape
+func Y__register_locked_file(tls *TLS, f uintptr, self Tpthread_t)
+
+//go:noescape
+func Y__rem_pio2(tls *TLS, x float64, y uintptr) (r1 int32)
+
+//go:noescape
+func Y__rem_pio2_large(tls *TLS, x uintptr, y uintptr, e0 int32, nx int32, prec int32) (r int32)
+
+//go:noescape
+func Y__rem_pio2f(tls *TLS, x float32, y uintptr) (r int32)
+
+//go:noescape
+func Y__res_mkquery(tls *TLS, op int32, dname uintptr, class int32, type1 int32, data uintptr, datalen int32, newrr uintptr, buf uintptr, buflen int32) (r int32)
+
+//go:noescape
+func Y__res_msend(tls *TLS, nqueries int32, queries uintptr, qlens uintptr, answers uintptr, alens uintptr, asize int32) (r int32)
+
+//go:noescape
+func Y__res_msend_rc(tls *TLS, nqueries int32, queries uintptr, qlens uintptr, answers uintptr, alens uintptr, asize int32, conf uintptr) (r1 int32)
+
+//go:noescape
+func Y__res_send(tls *TLS, _msg uintptr, _msglen int32, _answer uintptr, _anslen int32) (r1 int32)
+
+//go:noescape
+func Y__res_state(tls *TLS) (r uintptr)
+
+//go:noescape
+func Y__reset_tls(tls *TLS)
+
+//go:noescape
+func Y__restore(tls *TLS)
+
+//go:noescape
+func Y__restore_rt(tls *TLS)
+
+//go:noescape
+func Y__restore_sigs(tls *TLS, set uintptr)
+
+//go:noescape
+func Y__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32)
+
+//go:noescape
+func __ccgo_abi0___rtnetlink_enumerate_2(_0 *TLS, _1 uintptr, _2 uintptr, __ccgo_fp uintptr) (_3 int32)
+
+func __ccgo_abiInternal___rtnetlink_enumerate_2(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 uintptr) (_3 int32) {
+ return __ccgo_abi0___rtnetlink_enumerate_2(_0, _1, _2, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
+
+//go:noescape
+func Y__secs_to_tm(tls *TLS, t int64, tm uintptr) (r int32)
+
+//go:noescape
+func Y__secs_to_zone(tls *TLS, t int64, local int32, isdst uintptr, offset uintptr, oppoff uintptr, zonename uintptr)
+
+//go:noescape
+func Y__setxid(tls *TLS, nr int32, id int32, eid int32, sid int32) (r int32)
+
+//go:noescape
+func Y__shgetc(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y__shlim(tls *TLS, f uintptr, lim Toff_t)
+
+//go:noescape
+func Y__shm_mapname(tls *TLS, name uintptr, buf uintptr) (r uintptr)
+
+//go:noescape
+func Y__sigaction(tls *TLS, sig int32, sa uintptr, old uintptr) (r1 int32)
+
+//go:noescape
+func Y__signbit(tls *TLS, x float64) (r int32)
+
+//go:noescape
+func Y__signbitf(tls *TLS, x float32) (r int32)
+
+//go:noescape
+func Y__signbitl(tls *TLS, x float64) (r int32)
+
+//go:noescape
+func Y__sigsetjmp_tail(tls *TLS, jb uintptr, ret int32) (r int32)
+
+//go:noescape
+func Y__sin(tls *TLS, x float64, y float64, iy int32) (r1 float64)
+
+//go:noescape
+func Y__sindf(tls *TLS, x float64) (r1 float32)
+
+//go:noescape
+func Y__stack_chk_fail(tls *TLS)
+
+//go:noescape
+func Y__stack_chk_fail_local(tls *TLS)
+
+//go:noescape
+func Y__stdio_close(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y__stdio_exit(tls *TLS)
+
+//go:noescape
+func Y__stdio_exit_needed(tls *TLS)
+
+//go:noescape
+func Y__stdio_read(tls *TLS, f uintptr, buf uintptr, len1 Tsize_t) (r Tsize_t)
+
+//go:noescape
+func Y__stdio_seek(tls *TLS, f uintptr, off Toff_t, whence int32) (r Toff_t)
+
+//go:noescape
+func Y__stdio_write(tls *TLS, f uintptr, buf uintptr, len1 Tsize_t) (r Tsize_t)
+
+//go:noescape
+func Y__stdout_write(tls *TLS, f uintptr, buf uintptr, len1 Tsize_t) (r Tsize_t)
+
+//go:noescape
+func Y__stpcpy(tls *TLS, d uintptr, s uintptr) (r uintptr)
+
+//go:noescape
+func Y__stpncpy(tls *TLS, d uintptr, s uintptr, n Tsize_t) (r uintptr)
+
+//go:noescape
+func Y__strcasecmp_l(tls *TLS, l uintptr, r uintptr, loc Tlocale_t) (r1 int32)
+
+//go:noescape
+func Y__strchrnul(tls *TLS, s uintptr, c int32) (r uintptr)
+
+//go:noescape
+func Y__strcoll_l(tls *TLS, l uintptr, r uintptr, loc Tlocale_t) (r1 int32)
+
+//go:noescape
+func Y__strerror_l(tls *TLS, e int32, loc Tlocale_t) (r uintptr)
+
+//go:noescape
+func Y__strftime_fmt_1(tls *TLS, s uintptr, l uintptr, f int32, tm uintptr, loc Tlocale_t, pad int32) (r uintptr)
+
+//go:noescape
+func Y__strftime_l(tls *TLS, s uintptr, n Tsize_t, f uintptr, tm uintptr, loc Tlocale_t) (r Tsize_t)
+
+//go:noescape
+func Y__strncasecmp_l(tls *TLS, l uintptr, r uintptr, n Tsize_t, loc Tlocale_t) (r1 int32)
+
+//go:noescape
+func Y__strtod_l(tls *TLS, s uintptr, p uintptr, l Tlocale_t) (r float64)
+
+//go:noescape
+func Y__strtof_l(tls *TLS, s uintptr, p uintptr, l Tlocale_t) (r float32)
+
+//go:noescape
+func Y__strtoimax_internal(tls *TLS, s uintptr, p uintptr, base int32) (r Tintmax_t)
+
+//go:noescape
+func Y__strtol_internal(tls *TLS, s uintptr, p uintptr, base int32) (r int64)
+
+//go:noescape
+func Y__strtold_l(tls *TLS, s uintptr, p uintptr, l Tlocale_t) (r float64)
+
+//go:noescape
+func Y__strtoll_internal(tls *TLS, s uintptr, p uintptr, base int32) (r int64)
+
+//go:noescape
+func Y__strtoul_internal(tls *TLS, s uintptr, p uintptr, base int32) (r uint64)
+
+//go:noescape
+func Y__strtoull_internal(tls *TLS, s uintptr, p uintptr, base int32) (r uint64)
+
+//go:noescape
+func Y__strtoumax_internal(tls *TLS, s uintptr, p uintptr, base int32) (r Tuintmax_t)
+
+//go:noescape
+func Y__strxfrm_l(tls *TLS, dest uintptr, src uintptr, n Tsize_t, loc Tlocale_t) (r Tsize_t)
+
+//go:noescape
+func Y__sync_synchronize(t *TLS)
+
+//go:noescape
+func Y__sync_val_compare_and_swapInt16(t *TLS, ptr uintptr, oldval, newval int16) (r int16)
+
+//go:noescape
+func Y__sync_val_compare_and_swapInt32(t *TLS, ptr uintptr, oldval, newval int32) (r int32)
+
+//go:noescape
+func Y__sync_val_compare_and_swapInt64(t *TLS, ptr uintptr, oldval, newval int64) (r int64)
+
+//go:noescape
+func Y__sync_val_compare_and_swapInt8(t *TLS, ptr uintptr, oldval, newval int8) (r int8)
+
+//go:noescape
+func Y__sync_val_compare_and_swapUint16(t *TLS, ptr uintptr, oldval, newval uint16) (r uint16)
+
+//go:noescape
+func Y__sync_val_compare_and_swapUint32(t *TLS, ptr uintptr, oldval, newval uint32) (r uint32)
+
+//go:noescape
+func Y__sync_val_compare_and_swapUint64(t *TLS, ptr uintptr, oldval, newval uint64) (r uint64)
+
+//go:noescape
+func Y__sync_val_compare_and_swapUint8(t *TLS, ptr uintptr, oldval, newval uint8) (r uint8)
+
+//go:noescape
+func Y__syscall0(tls *TLS, n long) (_2 long)
+
+//go:noescape
+func Y__syscall1(tls *TLS, n, a1 long) (_2 long)
+
+//go:noescape
+func Y__syscall2(tls *TLS, n, a1, a2 long) (_2 long)
+
+//go:noescape
+func Y__syscall3(tls *TLS, n, a1, a2, a3 long) (_2 long)
+
+//go:noescape
+func Y__syscall4(tls *TLS, n, a1, a2, a3, a4 long) (_2 long)
+
+//go:noescape
+func Y__syscall5(tls *TLS, n, a1, a2, a3, a4, a5 long) (_2 long)
+
+//go:noescape
+func Y__syscall6(tls *TLS, n, a1, a2, a3, a4, a5, a6 long) (_2 long)
+
+//go:noescape
+func Y__syscall_ret(tls *TLS, r uint64) (r1 int64)
+
+//go:noescape
+func Y__tan(tls *TLS, x float64, y float64, odd int32) (r1 float64)
+
+//go:noescape
+func Y__tandf(tls *TLS, x float64, odd int32) (r1 float32)
+
+//go:noescape
+func Y__tm_to_secs(tls *TLS, tm uintptr) (r int64)
+
+//go:noescape
+func Y__tm_to_tzname(tls *TLS, tm uintptr) (r uintptr)
+
+//go:noescape
+func Y__tolower_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__toread(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y__toread_needs_stdio_exit(tls *TLS)
+
+//go:noescape
+func Y__toupper_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+
+//go:noescape
+func Y__towctrans_l(tls *TLS, c Twint_t, t Twctrans_t, l Tlocale_t) (r Twint_t)
+
+//go:noescape
+func Y__towlower_l(tls *TLS, c Twint_t, l Tlocale_t) (r Twint_t)
+
+//go:noescape
+func Y__towrite(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y__towrite_needs_stdio_exit(tls *TLS)
+
+//go:noescape
+func Y__towupper_l(tls *TLS, c Twint_t, l Tlocale_t) (r Twint_t)
+
+//go:noescape
+func Y__tre_mem_alloc_impl(tls *TLS, mem Ttre_mem_t, provided int32, provided_block uintptr, zero int32, size Tsize_t) (r uintptr)
+
+//go:noescape
+func Y__tre_mem_destroy(tls *TLS, mem Ttre_mem_t)
+
+//go:noescape
+func Y__tre_mem_new_impl(tls *TLS, provided int32, provided_block uintptr) (r Ttre_mem_t)
+
+//go:noescape
+func Y__tsearch_balance(tls *TLS, p uintptr) (r int32)
+
+//go:noescape
+func Y__uflow(tls *TLS, f uintptr) (r int32)
+
+//go:noescape
+func Y__unlist_locked_file(tls *TLS, f uintptr)
+
+//go:noescape
+func Y__unlockfile(tls *TLS, file uintptr)
+
+//go:noescape
+func Y__uselocale(tls *TLS, new1 Tlocale_t) (r Tlocale_t)
+
+//go:noescape
+func Y__vm_wait(tls *TLS)
+
+//go:noescape
+func Y__wcscoll_l(tls *TLS, l uintptr, r uintptr, locale Tlocale_t) (r1 int32)
+
+//go:noescape
+func Y__wcsftime_l(tls *TLS, s uintptr, n Tsize_t, f uintptr, tm uintptr, loc Tlocale_t) (r Tsize_t)
+
+//go:noescape
+func Y__wcsxfrm_l(tls *TLS, dest uintptr, src uintptr, n Tsize_t, loc Tlocale_t) (r Tsize_t)
+
+//go:noescape
+func Y__wctrans_l(tls *TLS, s uintptr, l Tlocale_t) (r Twctrans_t)
+
+//go:noescape
+func Y__wctype_l(tls *TLS, s uintptr, l Tlocale_t) (r Twctype_t)
+
+//go:noescape
+func Y__xmknod(tls *TLS, ver int32, path uintptr, mode Tmode_t, dev uintptr) (r int32)
+
+//go:noescape
+func Y__xmknodat(tls *TLS, ver int32, fd int32, path uintptr, mode Tmode_t, dev uintptr) (r int32)
+
+//go:noescape
+func Y__xpg_basename(tls *TLS, s uintptr) (r uintptr)
+
+//go:noescape
+func Y__xpg_strerror_r(tls *TLS, err int32, buf uintptr, buflen Tsize_t) (r int32)
+
+//go:noescape
+func Y__xstat(tls *TLS, ver int32, path uintptr, buf uintptr) (r int32)
+
+//go:noescape
+func Y__year_to_secs(tls *TLS, year int64, is_leap uintptr) (r int64)
+
+//go:noescape
+func Y_exit(tls *TLS, status int32)
+
+//go:noescape
+func Y_flushlbf(tls *TLS)
+
+//go:noescape
+func Y_longjmp(t *TLS, env uintptr, val int32)
+
+//go:noescape
+func Y_obstack_begin(t *TLS, obstack uintptr, size, alignment int32, chunkfun, freefun uintptr) (_4 int32)
+
+//go:noescape
+func Y_obstack_newchunk(t *TLS, obstack uintptr, length int32) (_3 int32)
+
+//go:noescape
+func Y_pthread_cleanup_pop(tls *TLS, _ uintptr, run int32)
+
+//go:noescape
+func Y_pthread_cleanup_push(tls *TLS, _, f, x uintptr)
+
+//go:noescape
+func Y_setjmp(t *TLS, env uintptr) (_2 int32)
//go:noescape
func Ya64l(tls *TLS, s uintptr) (r int64)
@@ -312,7 +1705,7 @@ func Yadjtimex(tls *TLS, tx uintptr) (r int32)
func Yalarm(tls *TLS, seconds uint32) (r uint32)
//go:noescape
-func Yalloca(tls *TLS, size Tsize_t) uintptr
+func Yalloca(tls *TLS, size Tsize_t) (_2 uintptr)
//go:noescape
func Yalphasort(tls *TLS, a uintptr, b uintptr) (r int32)
@@ -348,7 +1741,17 @@ func Yasinl(tls *TLS, x float64) (r float64)
func Yasprintf(tls *TLS, s uintptr, fmt uintptr, va uintptr) (r int32)
//go:noescape
-func Yat_quick_exit(tls *TLS, func1 uintptr) (r1 int32)
+func Yat_quick_exit(tls *TLS, __ccgo_fp_func uintptr) (r1 int32)
+
+//go:noescape
+func __ccgo_abi0_at_quick_exit_0(_0 *TLS, __ccgo_fp uintptr)
+
+func __ccgo_abiInternal_at_quick_exit_0(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS) {
+ __ccgo_abi0_at_quick_exit_0(_0, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
func Yatan(tls *TLS, x3 float64) (r float64)
@@ -393,7 +1796,7 @@ func Yatol(tls *TLS, s uintptr) (r int64)
func Yatoll(tls *TLS, s uintptr) (r int64)
//go:noescape
-func Ybacktrace(t *TLS, buf uintptr, size int32) int32
+func Ybacktrace(t *TLS, buf uintptr, size int32) (_3 int32)
//go:noescape
func Ybacktrace_symbols_fd(t *TLS, buffer uintptr, size, fd int32)
@@ -420,7 +1823,17 @@ func Ybindtextdomain(tls *TLS, domainname uintptr, dirname uintptr) (r uintptr)
func Ybrk(tls *TLS, end uintptr) (r int32)
//go:noescape
-func Ybsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, cmp uintptr) (r uintptr)
+func Ybsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp uintptr) (r uintptr)
+
+//go:noescape
+func __ccgo_abi0_bsearch_4(_0 *TLS, _1 uintptr, _2 uintptr, __ccgo_fp uintptr) (_3 int32)
+
+func __ccgo_abiInternal_bsearch_4(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 uintptr) (_3 int32) {
+ return __ccgo_abi0_bsearch_4(_0, _1, _2, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
func Ybtowc(tls *TLS, c int32) (r Twint_t)
@@ -819,16 +2232,16 @@ func Ydirname(tls *TLS, s uintptr) (r uintptr)
func Ydiv(tls *TLS, num int32, den int32) (r Tdiv_t)
//go:noescape
-func Ydlclose(t *TLS, handle uintptr) int32
+func Ydlclose(t *TLS, handle uintptr) (_2 int32)
//go:noescape
-func Ydlerror(t *TLS) uintptr
+func Ydlerror(t *TLS) (_1 uintptr)
//go:noescape
-func Ydlopen(t *TLS, filename uintptr, flags int32) uintptr
+func Ydlopen(t *TLS, filename uintptr, flags int32) (_3 uintptr)
//go:noescape
-func Ydlsym(t *TLS, handle, symbol uintptr) uintptr
+func Ydlsym(t *TLS, handle, symbol uintptr) (_2 uintptr)
//go:noescape
func Ydn_comp(tls *TLS, src uintptr, dst uintptr, space int32, dnptrs uintptr, lastdnptr uintptr) (r int32)
@@ -1281,7 +2694,7 @@ func Yfopen64(tls *TLS, filename uintptr, mode uintptr) (r uintptr)
func Yfopencookie(tls *TLS, cookie uintptr, mode uintptr, iofuncs Tcookie_io_functions_t) (r uintptr)
//go:noescape
-func Yfork(t *TLS) int32
+func Yfork(t *TLS) (_1 int32)
//go:noescape
func Yfpathconf(tls *TLS, fd int32, name int32) (r int64)
@@ -1404,25 +2817,35 @@ func Yftruncate64(tls *TLS, fd int32, length Toff_t) (r int32)
func Yftrylockfile(tls *TLS, f uintptr) (r int32)
//go:noescape
-func Yfts64_close(t *TLS, ftsp uintptr) int32
+func Yfts64_close(t *TLS, ftsp uintptr) (_2 int32)
+
+//go:noescape
+func Yfts64_open(t *TLS, path_argv uintptr, options int32, compar uintptr) (_4 uintptr)
//go:noescape
-func Yfts64_open(t *TLS, path_argv uintptr, options int32, compar uintptr) uintptr
+func Yfts64_read(t *TLS, ftsp uintptr) (_2 uintptr)
//go:noescape
-func Yfts64_read(t *TLS, ftsp uintptr) uintptr
+func Yfts_close(t *TLS, ftsp uintptr) (_2 int32)
//go:noescape
-func Yfts_close(t *TLS, ftsp uintptr) int32
+func Yfts_open(t *TLS, path_argv uintptr, options int32, compar uintptr) (_4 uintptr)
//go:noescape
-func Yfts_open(t *TLS, path_argv uintptr, options int32, compar uintptr) uintptr
+func Yfts_read(t *TLS, ftsp uintptr) (_2 uintptr)
//go:noescape
-func Yfts_read(t *TLS, ftsp uintptr) uintptr
+func Yftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32) (r int32)
//go:noescape
-func Yftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32) (r int32)
+func __ccgo_abi0_ftw_1(_0 *TLS, _1 uintptr, _2 uintptr, _3 int32, __ccgo_fp uintptr) (_4 int32)
+
+func __ccgo_abiInternal_ftw_1(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 uintptr, _3 int32) (_4 int32) {
+ return __ccgo_abi0_ftw_1(_0, _1, _2, _3, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
func Yfunlockfile(tls *TLS, f uintptr)
@@ -1758,7 +3181,17 @@ func Ygetwchar_unlocked(tls *TLS) (r Twint_t)
func Ygetxattr(tls *TLS, path uintptr, name uintptr, value uintptr, size Tsize_t) (r Tssize_t)
//go:noescape
-func Yglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g_ uintptr) (r int32)
+func Yglob(tls *TLS, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, g_ uintptr) (r int32)
+
+//go:noescape
+func __ccgo_abi0_glob_2(_0 *TLS, _1 uintptr, _2 int32, __ccgo_fp uintptr) (_3 int32)
+
+func __ccgo_abiInternal_glob_2(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 int32) (_3 int32) {
+ return __ccgo_abi0_glob_2(_0, _1, _2, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
func Yglobfree(tls *TLS, g_ uintptr)
@@ -1878,7 +3311,7 @@ func Yinit_module(tls *TLS, a uintptr, b uint64, c uintptr) (r int32)
func Yinitstate(tls *TLS, seed uint32, state uintptr, size Tsize_t) (r uintptr)
//go:noescape
-func Yinitstate_r(t *TLS, seed uint32, statebuf uintptr, statelen Tsize_t, buf uintptr) int32
+func Yinitstate_r(t *TLS, seed uint32, statebuf uintptr, statelen Tsize_t, buf uintptr) (_5 int32)
//go:noescape
func Yinotify_add_watch(tls *TLS, fd int32, pathname uintptr, mask Tuint32_t) (r int32)
@@ -1956,13 +3389,13 @@ func Yislower(tls *TLS, c int32) (r int32)
func Yislower_l(tls *TLS, c int32, l Tlocale_t) (r int32)
//go:noescape
-func Yisnan(t *TLS, x float64) int32
+func Yisnan(t *TLS, x float64) (_2 int32)
//go:noescape
-func Yisnanf(t *TLS, arg float32) int32
+func Yisnanf(t *TLS, arg float32) (_2 int32)
//go:noescape
-func Yisnanl(t *TLS, arg float64) int32
+func Yisnanl(t *TLS, arg float64) (_2 int32)
//go:noescape
func Yisprint(tls *TLS, c int32) (r int32)
@@ -2136,7 +3569,17 @@ func Yldexpl(tls *TLS, x float64, n int32) (r float64)
func Yldiv(tls *TLS, num int64, den int64) (r Tldiv_t)
//go:noescape
-func Ylfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr)
+func Ylfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr)
+
+//go:noescape
+func __ccgo_abi0_lfind_4(_0 *TLS, _1 uintptr, _2 uintptr, __ccgo_fp uintptr) (_3 int32)
+
+func __ccgo_abiInternal_lfind_4(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 uintptr) (_3 int32) {
+ return __ccgo_abi0_lfind_4(_0, _1, _2, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
func Ylgamma(tls *TLS, x float64) (r float64)
@@ -2286,7 +3729,17 @@ func Ylroundf(tls *TLS, x float32) (r int64)
func Ylroundl(tls *TLS, x float64) (r int64)
//go:noescape
-func Ylsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr)
+func Ylsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr)
+
+//go:noescape
+func __ccgo_abi0_lsearch_4(_0 *TLS, _1 uintptr, _2 uintptr, __ccgo_fp uintptr) (_3 int32)
+
+func __ccgo_abiInternal_lsearch_4(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 uintptr) (_3 int32) {
+ return __ccgo_abi0_lsearch_4(_0, _1, _2, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
func Ylseek(tls *TLS, fd int32, offset Toff_t, whence int32) (r Toff_t)
@@ -2520,7 +3973,17 @@ func Ynexttowardf(tls *TLS, x3 float32, y3 float64) (r float32)
func Ynexttowardl(tls *TLS, x float64, y float64) (r float64)
//go:noescape
-func Ynftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1 int32)
+func Ynftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32) (r1 int32)
+
+//go:noescape
+func __ccgo_abi0_nftw_1(_0 *TLS, _1 uintptr, _2 uintptr, _3 int32, _4 uintptr, __ccgo_fp uintptr) (_5 int32)
+
+func __ccgo_abiInternal_nftw_1(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 uintptr, _3 int32, _4 uintptr) (_5 int32) {
+ return __ccgo_abi0_nftw_1(_0, _1, _2, _3, _4, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
func Yngettext(tls *TLS, msgid1 uintptr, msgid2 uintptr, n uint64) (r uintptr)
@@ -2571,7 +4034,7 @@ func Yntohs(tls *TLS, n Tuint16_t) (r Tuint16_t)
func Yobstack_free(t *TLS, obstack, obj uintptr)
//go:noescape
-func Yobstack_vprintf(t *TLS, obstack, template, va uintptr) int32
+func Yobstack_vprintf(t *TLS, obstack, template, va uintptr) (_2 int32)
//go:noescape
func Yopen(tls *TLS, filename uintptr, flags int32, va uintptr) (r int32)
@@ -2628,7 +4091,7 @@ func Ypivot_root(tls *TLS, new1 uintptr, old uintptr) (r int32)
func Ypoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32)
//go:noescape
-func Ypopen(t *TLS, command, type1 uintptr) uintptr
+func Ypopen(t *TLS, command, type1 uintptr) (_2 uintptr)
//go:noescape
func Yposix_close(tls *TLS, fd int32, flags int32) (r int32)
@@ -2763,25 +4226,25 @@ func Ypsiginfo(tls *TLS, si uintptr, msg uintptr)
func Ypsignal(tls *TLS, sig int32, msg uintptr)
//go:noescape
-func Ypthread_atfork(tls *TLS, prepare, parent, child uintptr) int32
+func Ypthread_atfork(tls *TLS, prepare, parent, child uintptr) (_2 int32)
//go:noescape
-func Ypthread_attr_destroy(tls *TLS, a uintptr) int32
+func Ypthread_attr_destroy(tls *TLS, a uintptr) (_2 int32)
//go:noescape
-func Ypthread_attr_getdetachstate(tls *TLS, a uintptr, state uintptr) int32
+func Ypthread_attr_getdetachstate(tls *TLS, a uintptr, state uintptr) (_3 int32)
//go:noescape
-func Ypthread_attr_init(tls *TLS, a uintptr) int32
+func Ypthread_attr_init(tls *TLS, a uintptr) (_2 int32)
//go:noescape
func Ypthread_attr_setdetachstate(tls *TLS, a uintptr, state int32) (r int32)
//go:noescape
-func Ypthread_attr_setscope(tls *TLS, a uintptr, scope int32) int32
+func Ypthread_attr_setscope(tls *TLS, a uintptr, scope int32) (_3 int32)
//go:noescape
-func Ypthread_attr_setstacksize(tls *TLS, a uintptr, stacksite Tsize_t) int32
+func Ypthread_attr_setstacksize(tls *TLS, a uintptr, stacksite Tsize_t) (_3 int32)
//go:noescape
func Ypthread_cleanup_pop(tls *TLS, run int32)
@@ -2790,82 +4253,82 @@ func Ypthread_cleanup_pop(tls *TLS, run int32)
func Ypthread_cleanup_push(tls *TLS, f, x uintptr)
//go:noescape
-func Ypthread_cond_broadcast(tls *TLS, c uintptr) int32
+func Ypthread_cond_broadcast(tls *TLS, c uintptr) (_2 int32)
//go:noescape
-func Ypthread_cond_destroy(tls *TLS, c uintptr) int32
+func Ypthread_cond_destroy(tls *TLS, c uintptr) (_2 int32)
//go:noescape
-func Ypthread_cond_init(tls *TLS, c, a uintptr) int32
+func Ypthread_cond_init(tls *TLS, c, a uintptr) (_2 int32)
//go:noescape
-func Ypthread_cond_signal(tls *TLS, c uintptr) int32
+func Ypthread_cond_signal(tls *TLS, c uintptr) (_2 int32)
//go:noescape
func Ypthread_cond_timedwait(tls *TLS, c, m, ts uintptr) (r int32)
//go:noescape
-func Ypthread_cond_wait(tls *TLS, c, m uintptr) int32
+func Ypthread_cond_wait(tls *TLS, c, m uintptr) (_2 int32)
//go:noescape
-func Ypthread_create(tls *TLS, res, attrp, entry, arg uintptr) int32
+func Ypthread_create(tls *TLS, res, attrp, entry, arg uintptr) (_2 int32)
//go:noescape
-func Ypthread_detach(tls *TLS, t uintptr) int32
+func Ypthread_detach(tls *TLS, t uintptr) (_2 int32)
//go:noescape
-func Ypthread_equal(tls *TLS, t, u uintptr) int32
+func Ypthread_equal(tls *TLS, t, u uintptr) (_2 int32)
//go:noescape
func Ypthread_exit(tls *TLS, result uintptr)
//go:noescape
-func Ypthread_getspecific(tls *TLS, k Tpthread_key_t) uintptr
+func Ypthread_getspecific(tls *TLS, k Tpthread_key_t) (_2 uintptr)
//go:noescape
func Ypthread_join(tls *TLS, t Tpthread_t, res uintptr) (r int32)
//go:noescape
-func Ypthread_key_create(tls *TLS, k uintptr, dtor uintptr) int32
+func Ypthread_key_create(tls *TLS, k uintptr, dtor uintptr) (_3 int32)
//go:noescape
-func Ypthread_key_delete(tls *TLS, k Tpthread_key_t) int32
+func Ypthread_key_delete(tls *TLS, k Tpthread_key_t) (_2 int32)
//go:noescape
-func Ypthread_mutex_destroy(tls *TLS, m uintptr) int32
+func Ypthread_mutex_destroy(tls *TLS, m uintptr) (_2 int32)
//go:noescape
-func Ypthread_mutex_init(tls *TLS, m, a uintptr) int32
+func Ypthread_mutex_init(tls *TLS, m, a uintptr) (_2 int32)
//go:noescape
-func Ypthread_mutex_lock(tls *TLS, m uintptr) int32
+func Ypthread_mutex_lock(tls *TLS, m uintptr) (_2 int32)
//go:noescape
-func Ypthread_mutex_trylock(tls *TLS, m uintptr) int32
+func Ypthread_mutex_trylock(tls *TLS, m uintptr) (_2 int32)
//go:noescape
-func Ypthread_mutex_unlock(tls *TLS, m uintptr) int32
+func Ypthread_mutex_unlock(tls *TLS, m uintptr) (_2 int32)
//go:noescape
-func Ypthread_mutexattr_destroy(tls *TLS, a uintptr) int32
+func Ypthread_mutexattr_destroy(tls *TLS, a uintptr) (_2 int32)
//go:noescape
-func Ypthread_mutexattr_init(tls *TLS, a uintptr) int32
+func Ypthread_mutexattr_init(tls *TLS, a uintptr) (_2 int32)
//go:noescape
-func Ypthread_mutexattr_settype(tls *TLS, a uintptr, typ int32) int32
+func Ypthread_mutexattr_settype(tls *TLS, a uintptr, typ int32) (_3 int32)
//go:noescape
-func Ypthread_self(tls *TLS) uintptr
+func Ypthread_self(tls *TLS) (_1 uintptr)
//go:noescape
-func Ypthread_setcancelstate(tls *TLS, new int32, old uintptr) int32
+func Ypthread_setcancelstate(tls *TLS, new int32, old uintptr) (_3 int32)
//go:noescape
-func Ypthread_setspecific(tls *TLS, k Tpthread_key_t, x uintptr) int32
+func Ypthread_setspecific(tls *TLS, k Tpthread_key_t, x uintptr) (_3 int32)
//go:noescape
-func Ypthread_sigmask(tls *TLS, now int32, set, old uintptr) int32
+func Ypthread_sigmask(tls *TLS, now int32, set, old uintptr) (_3 int32)
//go:noescape
func Yptrace(tls *TLS, req int32, va uintptr) (r int64)
@@ -2934,10 +4397,30 @@ func Ypwritev(tls *TLS, fd int32, iov uintptr, count int32, ofs Toff_t) (r Tssiz
func Ypwritev2(tls *TLS, fd int32, iov uintptr, count int32, ofs Toff_t, flags int32) (r Tssize_t)
//go:noescape
-func Yqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun)
+func Yqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun)
+
+//go:noescape
+func __ccgo_abi0_qsort_3(_0 *TLS, _1 uintptr, _2 uintptr, __ccgo_fp uintptr) (_3 int32)
+
+func __ccgo_abiInternal_qsort_3(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 uintptr) (_3 int32) {
+ return __ccgo_abi0_qsort_3(_0, _1, _2, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
-func Yqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr)
+func Yqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr)
+
+//go:noescape
+func __ccgo_abi0_qsort_r_3(_0 *TLS, _1 uintptr, _2 uintptr, _3 uintptr, __ccgo_fp uintptr) (_4 int32)
+
+func __ccgo_abiInternal_qsort_r_3(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 uintptr, _3 uintptr) (_4 int32) {
+ return __ccgo_abi0_qsort_r_3(_0, _1, _2, _3, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
func Yquick_exit(tls *TLS, code int32)
@@ -2958,7 +4441,7 @@ func Yrand_r(tls *TLS, seed uintptr) (r int32)
func Yrandom(tls *TLS) (r int64)
//go:noescape
-func Yrandom_r(t *TLS, buf, result uintptr) int32
+func Yrandom_r(t *TLS, buf, result uintptr) (_2 int32)
//go:noescape
func Yread(tls *TLS, fd int32, buf uintptr, count Tsize_t) (r Tssize_t)
@@ -3057,7 +4540,7 @@ func Yrename(tls *TLS, old uintptr, new1 uintptr) (r int32)
func Yrenameat(tls *TLS, oldfd int32, old uintptr, newfd int32, new1 uintptr) (r int32)
//go:noescape
-func Yrenameat2(t *TLS, olddirfd int32, oldpath uintptr, newdirfd int32, newpath uintptr, flags int32) int32
+func Yrenameat2(t *TLS, olddirfd int32, oldpath uintptr, newdirfd int32, newpath uintptr, flags int32) (_6 int32)
//go:noescape
func Yres_init(tls *TLS) (r int32)
@@ -3126,13 +4609,33 @@ func Yscalbnf(tls *TLS, x float32, n int32) (r float32)
func Yscalbnl(tls *TLS, x float64, n int32) (r float64)
//go:noescape
-func Yscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r int32)
+func Yscandir(tls *TLS, path uintptr, res uintptr, __ccgo_fp_sel uintptr, __ccgo_fp_cmp uintptr) (r int32)
+
+//go:noescape
+func __ccgo_abi0_scandir_2(_0 *TLS, _1 uintptr, __ccgo_fp uintptr) (_2 int32)
+
+func __ccgo_abiInternal_scandir_2(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr) (_2 int32) {
+ return __ccgo_abi0_scandir_2(_0, _1, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
+
+//go:noescape
+func __ccgo_abi0_scandir_3(_0 *TLS, _1 uintptr, _2 uintptr, __ccgo_fp uintptr) (_3 int32)
+
+func __ccgo_abiInternal_scandir_3(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 uintptr) (_3 int32) {
+ return __ccgo_abi0_scandir_3(_0, _1, _2, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
func Yscanf(tls *TLS, fmt uintptr, va uintptr) (r int32)
//go:noescape
-func Ysched_yield(tls *TLS) int32
+func Ysched_yield(tls *TLS) (_1 int32)
//go:noescape
func Ysecure_getenv(tls *TLS, name uintptr) (r uintptr)
@@ -3207,7 +4710,7 @@ func Ysethostname(tls *TLS, name uintptr, len1 Tsize_t) (r int32)
func Ysetitimer(tls *TLS, which int32, new1 uintptr, old uintptr) (r1 int32)
//go:noescape
-func Ysetjmp(t *TLS, env uintptr) int32
+func Ysetjmp(t *TLS, env uintptr) (_2 int32)
//go:noescape
func Ysetkey(tls *TLS, key uintptr)
@@ -3657,7 +5160,7 @@ func Ysyscall(tls *TLS, n int64, va uintptr) (r int64)
func Ysysconf(tls *TLS, name int32) (r int64)
//go:noescape
-func Ysysctlbyname(t *TLS, name, oldp, oldlenp, newp uintptr, newlen Tsize_t) int32
+func Ysysctlbyname(t *TLS, name, oldp, oldlenp, newp uintptr, newlen Tsize_t) (_3 int32)
//go:noescape
func Ysysinfo(tls *TLS, info uintptr) (r int32)
@@ -3666,7 +5169,7 @@ func Ysysinfo(tls *TLS, info uintptr) (r int32)
func Ysyslog(tls *TLS, priority int32, message uintptr, va uintptr)
//go:noescape
-func Ysystem(t *TLS, command uintptr) int32
+func Ysystem(t *TLS, command uintptr) (_2 int32)
//go:noescape
func Ytan(tls *TLS, x3 float64) (r float64)
@@ -3720,10 +5223,30 @@ func Ytcsetpgrp(tls *TLS, fd int32, pgrp Tpid_t) (r int32)
func Ytcsetwinsize(tls *TLS, fd int32, wsz uintptr) (r int32)
//go:noescape
-func Ytdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr)
+func Ytdelete(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr)
//go:noescape
-func Ytdestroy(tls *TLS, root uintptr, freekey uintptr)
+func __ccgo_abi0_tdelete_2(_0 *TLS, _1 uintptr, _2 uintptr, __ccgo_fp uintptr) (_3 int32)
+
+func __ccgo_abiInternal_tdelete_2(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 uintptr) (_3 int32) {
+ return __ccgo_abi0_tdelete_2(_0, _1, _2, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
+
+//go:noescape
+func Ytdestroy(tls *TLS, root uintptr, __ccgo_fp_freekey uintptr)
+
+//go:noescape
+func __ccgo_abi0_tdestroy_1(_0 *TLS, _1 uintptr, __ccgo_fp uintptr)
+
+func __ccgo_abiInternal_tdestroy_1(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr) {
+ __ccgo_abi0_tdestroy_1(_0, _1, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
func Ytee(tls *TLS, src int32, dest int32, len1 Tsize_t, flags uint32) (r Tssize_t)
@@ -3738,7 +5261,17 @@ func Ytempnam(tls *TLS, dir uintptr, pfx uintptr) (r1 uintptr)
func Ytextdomain(tls *TLS, domainname uintptr) (r uintptr)
//go:noescape
-func Ytfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr)
+func Ytfind(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr)
+
+//go:noescape
+func __ccgo_abi0_tfind_2(_0 *TLS, _1 uintptr, _2 uintptr, __ccgo_fp uintptr) (_3 int32)
+
+func __ccgo_abiInternal_tfind_2(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 uintptr) (_3 int32) {
+ return __ccgo_abi0_tfind_2(_0, _1, _2, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
func Ytgamma(tls *TLS, x3 float64) (r1 float64)
@@ -3834,7 +5367,17 @@ func Ytruncf(tls *TLS, x3 float32) (r float32)
func Ytruncl(tls *TLS, x float64) (r float64)
//go:noescape
-func Ytsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr)
+func Ytsearch(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r1 uintptr)
+
+//go:noescape
+func __ccgo_abi0_tsearch_2(_0 *TLS, _1 uintptr, _2 uintptr, __ccgo_fp uintptr) (_3 int32)
+
+func __ccgo_abiInternal_tsearch_2(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 uintptr) (_3 int32) {
+ return __ccgo_abi0_tsearch_2(_0, _1, _2, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
func Yttyname(tls *TLS, fd int32) (r uintptr)
@@ -3843,7 +5386,17 @@ func Yttyname(tls *TLS, fd int32) (r uintptr)
func Yttyname_r(tls *TLS, fd int32, name uintptr, size Tsize_t) (r int32)
//go:noescape
-func Ytwalk(tls *TLS, root uintptr, action uintptr)
+func Ytwalk(tls *TLS, root uintptr, __ccgo_fp_action uintptr)
+
+//go:noescape
+func __ccgo_abi0_twalk_1(_0 *TLS, _1 uintptr, _2 int32, _3 int32, __ccgo_fp uintptr)
+
+func __ccgo_abiInternal_twalk_1(tls *TLS, dest, abi0CodePtr uintptr) {
+ f := func(_0 *TLS, _1 uintptr, _2 int32, _3 int32) {
+ __ccgo_abi0_twalk_1(_0, _1, _2, _3, abi0CodePtr)
+ }
+ *(*[2]uintptr)(unsafe.Pointer(dest)) = *(*[2]uintptr)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(&f))))
+}
//go:noescape
func Ytzset(tls *TLS)
@@ -3918,7 +5471,7 @@ func Yuuid_copy(t *TLS, dst, src uintptr)
func Yuuid_generate_random(t *TLS, out uintptr)
//go:noescape
-func Yuuid_parse(t *TLS, in uintptr, uu uintptr) int32
+func Yuuid_parse(t *TLS, in uintptr, uu uintptr) (_3 int32)
//go:noescape
func Yuuid_unparse(t *TLS, uu, out uintptr)
diff --git a/vendor/modernc.org/libc/asm_linux_amd64.s b/vendor/modernc.org/libc/abi0_linux_amd64.s
index 0264b405b..0f896ff25 100644
--- a/vendor/modernc.org/libc/asm_linux_amd64.s
+++ b/vendor/modernc.org/libc/abi0_linux_amd64.s
@@ -1,8 +1,140 @@
-// Code generated for linux/amd64 by 'genasm', DO NOT EDIT.
+// Code generated for linux/amd64 by 'qbecc --abi0wrap .', DO NOT EDIT.
#include "funcdata.h"
#include "textflag.h"
+// func Y_Exit(tls *TLS, ec int32)
+TEXT ·Y_Exit(SB),$16-12
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL ec+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X_Exit(SB)
+ RET
+
+// func Y_IO_feof_unlocked(tls *TLS, f uintptr) (r int32)
+TEXT ·Y_IO_feof_unlocked(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X_IO_feof_unlocked(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y_IO_ferror_unlocked(tls *TLS, f uintptr) (r int32)
+TEXT ·Y_IO_ferror_unlocked(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X_IO_ferror_unlocked(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y_IO_getc(tls *TLS, f1 uintptr) (r int32)
+TEXT ·Y_IO_getc(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f1+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X_IO_getc(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y_IO_getc_unlocked(tls *TLS, f uintptr) (r int32)
+TEXT ·Y_IO_getc_unlocked(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X_IO_getc_unlocked(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y_IO_putc(tls *TLS, c1 int32, f1 uintptr) (r int32)
+TEXT ·Y_IO_putc(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c1+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ f1+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X_IO_putc(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y_IO_putc_unlocked(tls *TLS, c int32, f uintptr) (r int32)
+TEXT ·Y_IO_putc_unlocked(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ f+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X_IO_putc_unlocked(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y___errno_location(tls *TLS) (r uintptr)
+TEXT ·Y___errno_location(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X___errno_location(SB)
+ MOVQ 8(SP), AX
+ MOVQ AX, r+8(FP)
+ RET
+
+// func Y__aio_close(tls *TLS, fd int32) (_2 int32)
+TEXT ·Y__aio_close(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL fd+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__aio_close(SB)
+ MOVL 16(SP), AX
+ MOVL AX, _2+16(FP)
+ RET
+
+// func Y__asctime_r(tls *TLS, tm uintptr, buf uintptr) (r uintptr)
+TEXT ·Y__asctime_r(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ tm+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ buf+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__asctime_r(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
// func Y__assert_fail(tls *TLS, expr uintptr, file uintptr, line int32, func1 uintptr)
TEXT ·Y__assert_fail(SB),$40-40
GO_ARGS
@@ -20,6 +152,1268 @@ TEXT ·Y__assert_fail(SB),$40-40
CALL ·X__assert_fail(SB)
RET
+// func Y__atomic_compare_exchangeInt16(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+TEXT ·Y__atomic_compare_exchangeInt16(SB),$56-52
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ desired+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL weak+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVL success+36(FP), AX
+ MOVL AX, 36(SP)
+ MOVL failure+40(FP), AX
+ MOVL AX, 40(SP)
+ CALL ·X__atomic_compare_exchangeInt16(SB)
+ MOVL 48(SP), AX
+ MOVL AX, _3+48(FP)
+ RET
+
+// func Y__atomic_compare_exchangeInt32(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+TEXT ·Y__atomic_compare_exchangeInt32(SB),$56-52
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ desired+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL weak+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVL success+36(FP), AX
+ MOVL AX, 36(SP)
+ MOVL failure+40(FP), AX
+ MOVL AX, 40(SP)
+ CALL ·X__atomic_compare_exchangeInt32(SB)
+ MOVL 48(SP), AX
+ MOVL AX, _3+48(FP)
+ RET
+
+// func Y__atomic_compare_exchangeInt64(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+TEXT ·Y__atomic_compare_exchangeInt64(SB),$56-52
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ desired+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL weak+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVL success+36(FP), AX
+ MOVL AX, 36(SP)
+ MOVL failure+40(FP), AX
+ MOVL AX, 40(SP)
+ CALL ·X__atomic_compare_exchangeInt64(SB)
+ MOVL 48(SP), AX
+ MOVL AX, _3+48(FP)
+ RET
+
+// func Y__atomic_compare_exchangeInt8(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+TEXT ·Y__atomic_compare_exchangeInt8(SB),$56-52
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ desired+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL weak+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVL success+36(FP), AX
+ MOVL AX, 36(SP)
+ MOVL failure+40(FP), AX
+ MOVL AX, 40(SP)
+ CALL ·X__atomic_compare_exchangeInt8(SB)
+ MOVL 48(SP), AX
+ MOVL AX, _3+48(FP)
+ RET
+
+// func Y__atomic_compare_exchangeUint16(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+TEXT ·Y__atomic_compare_exchangeUint16(SB),$56-52
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ desired+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL weak+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVL success+36(FP), AX
+ MOVL AX, 36(SP)
+ MOVL failure+40(FP), AX
+ MOVL AX, 40(SP)
+ CALL ·X__atomic_compare_exchangeUint16(SB)
+ MOVL 48(SP), AX
+ MOVL AX, _3+48(FP)
+ RET
+
+// func Y__atomic_compare_exchangeUint32(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+TEXT ·Y__atomic_compare_exchangeUint32(SB),$56-52
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ desired+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL weak+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVL success+36(FP), AX
+ MOVL AX, 36(SP)
+ MOVL failure+40(FP), AX
+ MOVL AX, 40(SP)
+ CALL ·X__atomic_compare_exchangeUint32(SB)
+ MOVL 48(SP), AX
+ MOVL AX, _3+48(FP)
+ RET
+
+// func Y__atomic_compare_exchangeUint64(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+TEXT ·Y__atomic_compare_exchangeUint64(SB),$56-52
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ desired+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL weak+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVL success+36(FP), AX
+ MOVL AX, 36(SP)
+ MOVL failure+40(FP), AX
+ MOVL AX, 40(SP)
+ CALL ·X__atomic_compare_exchangeUint64(SB)
+ MOVL 48(SP), AX
+ MOVL AX, _3+48(FP)
+ RET
+
+// func Y__atomic_compare_exchangeUint8(t *TLS, ptr, expected, desired uintptr, weak, success, failure int32) (_3 int32)
+TEXT ·Y__atomic_compare_exchangeUint8(SB),$56-52
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ desired+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL weak+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVL success+36(FP), AX
+ MOVL AX, 36(SP)
+ MOVL failure+40(FP), AX
+ MOVL AX, 40(SP)
+ CALL ·X__atomic_compare_exchangeUint8(SB)
+ MOVL 48(SP), AX
+ MOVL AX, _3+48(FP)
+ RET
+
+// func Y__atomic_exchangeInt16(t *TLS, ptr, val, ret uintptr, _ int32)
+TEXT ·Y__atomic_exchangeInt16(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ ret+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL _+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__atomic_exchangeInt16(SB)
+ RET
+
+// func Y__atomic_exchangeInt32(t *TLS, ptr, val, ret uintptr, _ int32)
+TEXT ·Y__atomic_exchangeInt32(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ ret+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL _+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__atomic_exchangeInt32(SB)
+ RET
+
+// func Y__atomic_exchangeInt64(t *TLS, ptr, val, ret uintptr, _ int32)
+TEXT ·Y__atomic_exchangeInt64(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ ret+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL _+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__atomic_exchangeInt64(SB)
+ RET
+
+// func Y__atomic_exchangeInt8(t *TLS, ptr, val, ret uintptr, _ int32)
+TEXT ·Y__atomic_exchangeInt8(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ ret+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL _+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__atomic_exchangeInt8(SB)
+ RET
+
+// func Y__atomic_exchangeUint16(t *TLS, ptr, val, ret uintptr, _ int32)
+TEXT ·Y__atomic_exchangeUint16(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ ret+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL _+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__atomic_exchangeUint16(SB)
+ RET
+
+// func Y__atomic_exchangeUint32(t *TLS, ptr, val, ret uintptr, _ int32)
+TEXT ·Y__atomic_exchangeUint32(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ ret+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL _+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__atomic_exchangeUint32(SB)
+ RET
+
+// func Y__atomic_exchangeUint64(t *TLS, ptr, val, ret uintptr, _ int32)
+TEXT ·Y__atomic_exchangeUint64(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ ret+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL _+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__atomic_exchangeUint64(SB)
+ RET
+
+// func Y__atomic_exchangeUint8(t *TLS, ptr, val, ret uintptr, _ int32)
+TEXT ·Y__atomic_exchangeUint8(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ ret+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL _+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__atomic_exchangeUint8(SB)
+ RET
+
+// func Y__atomic_fetch_addInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+TEXT ·Y__atomic_fetch_addInt16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_addInt16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_addInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+TEXT ·Y__atomic_fetch_addInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_addInt32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_addInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+TEXT ·Y__atomic_fetch_addInt64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_fetch_addInt64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__atomic_fetch_addInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+TEXT ·Y__atomic_fetch_addInt8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_addInt8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_addUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+TEXT ·Y__atomic_fetch_addUint16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_addUint16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_addUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+TEXT ·Y__atomic_fetch_addUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_addUint32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_addUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+TEXT ·Y__atomic_fetch_addUint64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_fetch_addUint64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__atomic_fetch_addUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+TEXT ·Y__atomic_fetch_addUint8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_addUint8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_andInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+TEXT ·Y__atomic_fetch_andInt16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_andInt16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_andInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+TEXT ·Y__atomic_fetch_andInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_andInt32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_andInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+TEXT ·Y__atomic_fetch_andInt64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_fetch_andInt64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__atomic_fetch_andInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+TEXT ·Y__atomic_fetch_andInt8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_andInt8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_andUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+TEXT ·Y__atomic_fetch_andUint16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_andUint16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_andUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+TEXT ·Y__atomic_fetch_andUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_andUint32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_andUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+TEXT ·Y__atomic_fetch_andUint64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_fetch_andUint64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__atomic_fetch_andUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+TEXT ·Y__atomic_fetch_andUint8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_andUint8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_orInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+TEXT ·Y__atomic_fetch_orInt16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_orInt16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_orInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+TEXT ·Y__atomic_fetch_orInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_orInt32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_orInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+TEXT ·Y__atomic_fetch_orInt64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_fetch_orInt64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__atomic_fetch_orInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+TEXT ·Y__atomic_fetch_orInt8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_orInt8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_orUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+TEXT ·Y__atomic_fetch_orUint16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_orUint16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_orUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+TEXT ·Y__atomic_fetch_orUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_orUint32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_orUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+TEXT ·Y__atomic_fetch_orUint64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_fetch_orUint64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__atomic_fetch_orUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+TEXT ·Y__atomic_fetch_orUint8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_orUint8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_subInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+TEXT ·Y__atomic_fetch_subInt16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_subInt16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_subInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+TEXT ·Y__atomic_fetch_subInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_subInt32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_subInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+TEXT ·Y__atomic_fetch_subInt64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_fetch_subInt64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__atomic_fetch_subInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+TEXT ·Y__atomic_fetch_subInt8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_subInt8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_subUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+TEXT ·Y__atomic_fetch_subUint16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_subUint16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_subUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+TEXT ·Y__atomic_fetch_subUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_subUint32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_subUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+TEXT ·Y__atomic_fetch_subUint64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_fetch_subUint64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__atomic_fetch_subUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+TEXT ·Y__atomic_fetch_subUint8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_subUint8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_xorInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+TEXT ·Y__atomic_fetch_xorInt16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_xorInt16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_xorInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+TEXT ·Y__atomic_fetch_xorInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_xorInt32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_xorInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+TEXT ·Y__atomic_fetch_xorInt64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_fetch_xorInt64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__atomic_fetch_xorInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+TEXT ·Y__atomic_fetch_xorInt8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_xorInt8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_xorUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+TEXT ·Y__atomic_fetch_xorUint16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_xorUint16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_xorUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+TEXT ·Y__atomic_fetch_xorUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_xorUint32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__atomic_fetch_xorUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+TEXT ·Y__atomic_fetch_xorUint64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_fetch_xorUint64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__atomic_fetch_xorUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+TEXT ·Y__atomic_fetch_xorUint8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__atomic_fetch_xorUint8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__atomic_loadInt16(t *TLS, ptr, ret uintptr, memorder int32)
+TEXT ·Y__atomic_loadInt16(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ ret+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_loadInt16(SB)
+ RET
+
+// func Y__atomic_loadInt32(t *TLS, ptr, ret uintptr, memorder int32)
+TEXT ·Y__atomic_loadInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ ret+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_loadInt32(SB)
+ RET
+
+// func Y__atomic_loadInt64(t *TLS, ptr, ret uintptr, memorder int32)
+TEXT ·Y__atomic_loadInt64(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ ret+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_loadInt64(SB)
+ RET
+
+// func Y__atomic_loadInt8(t *TLS, ptr, ret uintptr, memorder int32)
+TEXT ·Y__atomic_loadInt8(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ ret+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_loadInt8(SB)
+ RET
+
+// func Y__atomic_loadUint16(t *TLS, ptr, ret uintptr, memorder int32)
+TEXT ·Y__atomic_loadUint16(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ ret+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_loadUint16(SB)
+ RET
+
+// func Y__atomic_loadUint32(t *TLS, ptr, ret uintptr, memorder int32)
+TEXT ·Y__atomic_loadUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ ret+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_loadUint32(SB)
+ RET
+
+// func Y__atomic_loadUint64(t *TLS, ptr, ret uintptr, memorder int32)
+TEXT ·Y__atomic_loadUint64(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ ret+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_loadUint64(SB)
+ RET
+
+// func Y__atomic_loadUint8(t *TLS, ptr, ret uintptr, memorder int32)
+TEXT ·Y__atomic_loadUint8(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ ret+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_loadUint8(SB)
+ RET
+
+// func Y__atomic_storeInt16(t *TLS, ptr, val uintptr, memorder int32)
+TEXT ·Y__atomic_storeInt16(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_storeInt16(SB)
+ RET
+
+// func Y__atomic_storeInt32(t *TLS, ptr, val uintptr, memorder int32)
+TEXT ·Y__atomic_storeInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_storeInt32(SB)
+ RET
+
+// func Y__atomic_storeInt64(t *TLS, ptr, val uintptr, memorder int32)
+TEXT ·Y__atomic_storeInt64(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_storeInt64(SB)
+ RET
+
+// func Y__atomic_storeInt8(t *TLS, ptr, val uintptr, memorder int32)
+TEXT ·Y__atomic_storeInt8(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_storeInt8(SB)
+ RET
+
+// func Y__atomic_storeUint16(t *TLS, ptr, val uintptr, memorder int32)
+TEXT ·Y__atomic_storeUint16(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_storeUint16(SB)
+ RET
+
+// func Y__atomic_storeUint32(t *TLS, ptr, val uintptr, memorder int32)
+TEXT ·Y__atomic_storeUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_storeUint32(SB)
+ RET
+
+// func Y__atomic_storeUint64(t *TLS, ptr, val uintptr, memorder int32)
+TEXT ·Y__atomic_storeUint64(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_storeUint64(SB)
+ RET
+
+// func Y__atomic_storeUint8(t *TLS, ptr, val uintptr, memorder int32)
+TEXT ·Y__atomic_storeUint8(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__atomic_storeUint8(SB)
+ RET
+
+// func Y__block_all_sigs(tls *TLS, set uintptr)
+TEXT ·Y__block_all_sigs(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ set+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__block_all_sigs(SB)
+ RET
+
+// func Y__block_app_sigs(tls *TLS, set uintptr)
+TEXT ·Y__block_app_sigs(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ set+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__block_app_sigs(SB)
+ RET
+
// func Y__builtin___memcpy_chk(t *TLS, dest, src uintptr, n, os Tsize_t) (r uintptr)
TEXT ·Y__builtin___memcpy_chk(SB),$48-48
GO_ARGS
@@ -39,7 +1433,7 @@ TEXT ·Y__builtin___memcpy_chk(SB),$48-48
MOVQ AX, r+40(FP)
RET
-// func Y__builtin___memmove_chk(t *TLS, dest, src uintptr, n, os Tsize_t) uintptr
+// func Y__builtin___memmove_chk(t *TLS, dest, src uintptr, n, os Tsize_t) (_3 uintptr)
TEXT ·Y__builtin___memmove_chk(SB),$48-48
GO_ARGS
NO_LOCAL_POINTERS
@@ -55,10 +1449,10 @@ TEXT ·Y__builtin___memmove_chk(SB),$48-48
MOVQ AX, 32(SP)
CALL ·X__builtin___memmove_chk(SB)
MOVQ 40(SP), AX
- MOVQ AX, ret+40(FP)
+ MOVQ AX, _3+40(FP)
RET
-// func Y__builtin___memset_chk(t *TLS, s uintptr, c int32, n, os Tsize_t) uintptr
+// func Y__builtin___memset_chk(t *TLS, s uintptr, c int32, n, os Tsize_t) (_4 uintptr)
TEXT ·Y__builtin___memset_chk(SB),$48-48
GO_ARGS
NO_LOCAL_POINTERS
@@ -74,7 +1468,7 @@ TEXT ·Y__builtin___memset_chk(SB),$48-48
MOVQ AX, 32(SP)
CALL ·X__builtin___memset_chk(SB)
MOVQ 40(SP), AX
- MOVQ AX, ret+40(FP)
+ MOVQ AX, _4+40(FP)
RET
// func Y__builtin___snprintf_chk(t *TLS, str uintptr, maxlen Tsize_t, flag int32, os Tsize_t, format, args uintptr) (r int32)
@@ -138,7 +1532,7 @@ TEXT ·Y__builtin___strcat_chk(SB),$40-40
MOVQ AX, r+32(FP)
RET
-// func Y__builtin___strcpy_chk(t *TLS, dest, src uintptr, os Tsize_t) uintptr
+// func Y__builtin___strcpy_chk(t *TLS, dest, src uintptr, os Tsize_t) (_3 uintptr)
TEXT ·Y__builtin___strcpy_chk(SB),$40-40
GO_ARGS
NO_LOCAL_POINTERS
@@ -152,7 +1546,7 @@ TEXT ·Y__builtin___strcpy_chk(SB),$40-40
MOVQ AX, 24(SP)
CALL ·X__builtin___strcpy_chk(SB)
MOVQ 32(SP), AX
- MOVQ AX, ret+32(FP)
+ MOVQ AX, _3+32(FP)
RET
// func Y__builtin___strncpy_chk(t *TLS, dest, src uintptr, n, os Tsize_t) (r uintptr)
@@ -206,7 +1600,7 @@ TEXT ·Y__builtin_abort(SB),$8-8
CALL ·X__builtin_abort(SB)
RET
-// func Y__builtin_abs(t *TLS, j int32) int32
+// func Y__builtin_abs(t *TLS, j int32) (_2 int32)
TEXT ·Y__builtin_abs(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -216,10 +1610,10 @@ TEXT ·Y__builtin_abs(SB),$24-20
MOVL AX, 8(SP)
CALL ·X__builtin_abs(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Y__builtin_add_overflowInt64(t *TLS, a, b int64, res uintptr) int32
+// func Y__builtin_add_overflowInt64(t *TLS, a, b int64, res uintptr) (_3 int32)
TEXT ·Y__builtin_add_overflowInt64(SB),$40-36
GO_ARGS
NO_LOCAL_POINTERS
@@ -233,10 +1627,10 @@ TEXT ·Y__builtin_add_overflowInt64(SB),$40-36
MOVQ AX, 24(SP)
CALL ·X__builtin_add_overflowInt64(SB)
MOVL 32(SP), AX
- MOVL AX, ret+32(FP)
+ MOVL AX, _3+32(FP)
RET
-// func Y__builtin_add_overflowUint32(t *TLS, a, b uint32, res uintptr) int32
+// func Y__builtin_add_overflowUint32(t *TLS, a, b uint32, res uintptr) (_3 int32)
TEXT ·Y__builtin_add_overflowUint32(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -250,10 +1644,10 @@ TEXT ·Y__builtin_add_overflowUint32(SB),$32-28
MOVQ AX, 16(SP)
CALL ·X__builtin_add_overflowUint32(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _3+24(FP)
RET
-// func Y__builtin_add_overflowUint64(t *TLS, a, b uint64, res uintptr) int32
+// func Y__builtin_add_overflowUint64(t *TLS, a, b uint64, res uintptr) (_3 int32)
TEXT ·Y__builtin_add_overflowUint64(SB),$40-36
GO_ARGS
NO_LOCAL_POINTERS
@@ -267,10 +1661,10 @@ TEXT ·Y__builtin_add_overflowUint64(SB),$40-36
MOVQ AX, 24(SP)
CALL ·X__builtin_add_overflowUint64(SB)
MOVL 32(SP), AX
- MOVL AX, ret+32(FP)
+ MOVL AX, _3+32(FP)
RET
-// func Y__builtin_alloca(tls *TLS, size Tsize_t) uintptr
+// func Y__builtin_alloca(tls *TLS, size Tsize_t) (_2 uintptr)
TEXT ·Y__builtin_alloca(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -280,10 +1674,10 @@ TEXT ·Y__builtin_alloca(SB),$24-24
MOVQ AX, 8(SP)
CALL ·X__builtin_alloca(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
-// func Y__builtin_bswap16(t *TLS, x uint16) uint16
+// func Y__builtin_bswap16(t *TLS, x uint16) (_2 uint16)
TEXT ·Y__builtin_bswap16(SB),$24-18
GO_ARGS
NO_LOCAL_POINTERS
@@ -293,10 +1687,10 @@ TEXT ·Y__builtin_bswap16(SB),$24-18
MOVW AX, 8(SP)
CALL ·X__builtin_bswap16(SB)
MOVW 16(SP), AX
- MOVW AX, ret+16(FP)
+ MOVW AX, _2+16(FP)
RET
-// func Y__builtin_bswap32(t *TLS, x uint32) uint32
+// func Y__builtin_bswap32(t *TLS, x uint32) (_2 uint32)
TEXT ·Y__builtin_bswap32(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -306,10 +1700,10 @@ TEXT ·Y__builtin_bswap32(SB),$24-20
MOVL AX, 8(SP)
CALL ·X__builtin_bswap32(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Y__builtin_bswap64(t *TLS, x uint64) uint64
+// func Y__builtin_bswap64(t *TLS, x uint64) (_2 uint64)
TEXT ·Y__builtin_bswap64(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -319,7 +1713,7 @@ TEXT ·Y__builtin_bswap64(SB),$24-24
MOVQ AX, 8(SP)
CALL ·X__builtin_bswap64(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
// func Y__builtin_bzero(t *TLS, s uintptr, n Tsize_t)
@@ -335,7 +1729,7 @@ TEXT ·Y__builtin_bzero(SB),$24-24
CALL ·X__builtin_bzero(SB)
RET
-// func Y__builtin_clz(t *TLS, n uint32) int32
+// func Y__builtin_clz(t *TLS, n uint32) (_2 int32)
TEXT ·Y__builtin_clz(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -345,10 +1739,10 @@ TEXT ·Y__builtin_clz(SB),$24-20
MOVL AX, 8(SP)
CALL ·X__builtin_clz(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Y__builtin_clzl(t *TLS, n ulong) int32
+// func Y__builtin_clzl(t *TLS, n ulong) (_2 int32)
TEXT ·Y__builtin_clzl(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -358,10 +1752,10 @@ TEXT ·Y__builtin_clzl(SB),$24-20
MOVQ AX, 8(SP)
CALL ·X__builtin_clzl(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Y__builtin_clzll(t *TLS, n uint64) int32
+// func Y__builtin_clzll(t *TLS, n uint64) (_2 int32)
TEXT ·Y__builtin_clzll(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -371,10 +1765,10 @@ TEXT ·Y__builtin_clzll(SB),$24-20
MOVQ AX, 8(SP)
CALL ·X__builtin_clzll(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Y__builtin_copysign(t *TLS, x, y float64) float64
+// func Y__builtin_copysign(t *TLS, x, y float64) (_2 float64)
TEXT ·Y__builtin_copysign(SB),$32-32
GO_ARGS
NO_LOCAL_POINTERS
@@ -386,10 +1780,10 @@ TEXT ·Y__builtin_copysign(SB),$32-32
MOVQ AX, 16(SP)
CALL ·X__builtin_copysign(SB)
MOVQ 24(SP), AX
- MOVQ AX, ret+24(FP)
+ MOVQ AX, _2+24(FP)
RET
-// func Y__builtin_copysignf(t *TLS, x, y float32) float32
+// func Y__builtin_copysignf(t *TLS, x, y float32) (_2 float32)
TEXT ·Y__builtin_copysignf(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -401,10 +1795,10 @@ TEXT ·Y__builtin_copysignf(SB),$24-20
MOVL AX, 12(SP)
CALL ·X__builtin_copysignf(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Y__builtin_copysignl(t *TLS, x, y float64) float64
+// func Y__builtin_copysignl(t *TLS, x, y float64) (_2 float64)
TEXT ·Y__builtin_copysignl(SB),$32-32
GO_ARGS
NO_LOCAL_POINTERS
@@ -416,10 +1810,10 @@ TEXT ·Y__builtin_copysignl(SB),$32-32
MOVQ AX, 16(SP)
CALL ·X__builtin_copysignl(SB)
MOVQ 24(SP), AX
- MOVQ AX, ret+24(FP)
+ MOVQ AX, _2+24(FP)
RET
-// func Y__builtin_ctz(t *TLS, n uint32) int32
+// func Y__builtin_ctz(t *TLS, n uint32) (_2 int32)
TEXT ·Y__builtin_ctz(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -429,10 +1823,10 @@ TEXT ·Y__builtin_ctz(SB),$24-20
MOVL AX, 8(SP)
CALL ·X__builtin_ctz(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Y__builtin_ctzl(tls *TLS, x ulong) int32
+// func Y__builtin_ctzl(tls *TLS, x ulong) (_2 int32)
TEXT ·Y__builtin_ctzl(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -442,7 +1836,7 @@ TEXT ·Y__builtin_ctzl(SB),$24-20
MOVQ AX, 8(SP)
CALL ·X__builtin_ctzl(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
// func Y__builtin_exit(t *TLS, status int32)
@@ -456,7 +1850,7 @@ TEXT ·Y__builtin_exit(SB),$16-12
CALL ·X__builtin_exit(SB)
RET
-// func Y__builtin_expect(t *TLS, exp, c long) long
+// func Y__builtin_expect(t *TLS, exp, c long) (_2 long)
TEXT ·Y__builtin_expect(SB),$32-32
GO_ARGS
NO_LOCAL_POINTERS
@@ -468,10 +1862,10 @@ TEXT ·Y__builtin_expect(SB),$32-32
MOVQ AX, 16(SP)
CALL ·X__builtin_expect(SB)
MOVQ 24(SP), AX
- MOVQ AX, ret+24(FP)
+ MOVQ AX, _2+24(FP)
RET
-// func Y__builtin_fabs(t *TLS, x float64) float64
+// func Y__builtin_fabs(t *TLS, x float64) (_2 float64)
TEXT ·Y__builtin_fabs(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -481,10 +1875,10 @@ TEXT ·Y__builtin_fabs(SB),$24-24
MOVQ AX, 8(SP)
CALL ·X__builtin_fabs(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
-// func Y__builtin_fabsf(t *TLS, x float32) float32
+// func Y__builtin_fabsf(t *TLS, x float32) (_2 float32)
TEXT ·Y__builtin_fabsf(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -494,10 +1888,10 @@ TEXT ·Y__builtin_fabsf(SB),$24-20
MOVL AX, 8(SP)
CALL ·X__builtin_fabsf(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Y__builtin_fabsl(t *TLS, x float64) float64
+// func Y__builtin_fabsl(t *TLS, x float64) (_2 float64)
TEXT ·Y__builtin_fabsl(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -507,7 +1901,7 @@ TEXT ·Y__builtin_fabsl(SB),$24-24
MOVQ AX, 8(SP)
CALL ·X__builtin_fabsl(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
// func Y__builtin_ffs(tls *TLS, i int32) (r int32)
@@ -581,7 +1975,7 @@ TEXT ·Y__builtin_free(SB),$16-16
CALL ·X__builtin_free(SB)
RET
-// func Y__builtin_getentropy(t *TLS, buf uintptr, n Tsize_t) int32
+// func Y__builtin_getentropy(t *TLS, buf uintptr, n Tsize_t) (_3 int32)
TEXT ·Y__builtin_getentropy(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -593,10 +1987,10 @@ TEXT ·Y__builtin_getentropy(SB),$32-28
MOVQ AX, 16(SP)
CALL ·X__builtin_getentropy(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _3+24(FP)
RET
-// func Y__builtin_huge_val(t *TLS) float64
+// func Y__builtin_huge_val(t *TLS) (_1 float64)
TEXT ·Y__builtin_huge_val(SB),$16-16
GO_ARGS
NO_LOCAL_POINTERS
@@ -604,10 +1998,10 @@ TEXT ·Y__builtin_huge_val(SB),$16-16
MOVQ AX, 0(SP)
CALL ·X__builtin_huge_val(SB)
MOVQ 8(SP), AX
- MOVQ AX, ret+8(FP)
+ MOVQ AX, _1+8(FP)
RET
-// func Y__builtin_huge_valf(t *TLS) float32
+// func Y__builtin_huge_valf(t *TLS) (_1 float32)
TEXT ·Y__builtin_huge_valf(SB),$16-12
GO_ARGS
NO_LOCAL_POINTERS
@@ -615,7 +2009,7 @@ TEXT ·Y__builtin_huge_valf(SB),$16-12
MOVQ AX, 0(SP)
CALL ·X__builtin_huge_valf(SB)
MOVL 8(SP), AX
- MOVL AX, ret+8(FP)
+ MOVL AX, _1+8(FP)
RET
// func Y__builtin_hypot(tls *TLS, x float64, y float64) (r float64)
@@ -633,7 +2027,7 @@ TEXT ·Y__builtin_hypot(SB),$32-32
MOVQ AX, r+24(FP)
RET
-// func Y__builtin_inf(t *TLS) float64
+// func Y__builtin_inf(t *TLS) (_1 float64)
TEXT ·Y__builtin_inf(SB),$16-16
GO_ARGS
NO_LOCAL_POINTERS
@@ -641,10 +2035,10 @@ TEXT ·Y__builtin_inf(SB),$16-16
MOVQ AX, 0(SP)
CALL ·X__builtin_inf(SB)
MOVQ 8(SP), AX
- MOVQ AX, ret+8(FP)
+ MOVQ AX, _1+8(FP)
RET
-// func Y__builtin_inff(tls *TLS) float32
+// func Y__builtin_inff(tls *TLS) (_1 float32)
TEXT ·Y__builtin_inff(SB),$16-12
GO_ARGS
NO_LOCAL_POINTERS
@@ -652,10 +2046,10 @@ TEXT ·Y__builtin_inff(SB),$16-12
MOVQ AX, 0(SP)
CALL ·X__builtin_inff(SB)
MOVL 8(SP), AX
- MOVL AX, ret+8(FP)
+ MOVL AX, _1+8(FP)
RET
-// func Y__builtin_infl(t *TLS) float64
+// func Y__builtin_infl(t *TLS) (_1 float64)
TEXT ·Y__builtin_infl(SB),$16-16
GO_ARGS
NO_LOCAL_POINTERS
@@ -663,7 +2057,7 @@ TEXT ·Y__builtin_infl(SB),$16-16
MOVQ AX, 0(SP)
CALL ·X__builtin_infl(SB)
MOVQ 8(SP), AX
- MOVQ AX, ret+8(FP)
+ MOVQ AX, _1+8(FP)
RET
// func Y__builtin_isblank(tls *TLS, c int32) (r int32)
@@ -679,7 +2073,7 @@ TEXT ·Y__builtin_isblank(SB),$24-20
MOVL AX, r+16(FP)
RET
-// func Y__builtin_isnan(t *TLS, x float64) int32
+// func Y__builtin_isnan(t *TLS, x float64) (_2 int32)
TEXT ·Y__builtin_isnan(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -689,10 +2083,10 @@ TEXT ·Y__builtin_isnan(SB),$24-20
MOVQ AX, 8(SP)
CALL ·X__builtin_isnan(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Y__builtin_isnanf(t *TLS, x float32) int32
+// func Y__builtin_isnanf(t *TLS, x float32) (_2 int32)
TEXT ·Y__builtin_isnanf(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -702,10 +2096,10 @@ TEXT ·Y__builtin_isnanf(SB),$24-20
MOVL AX, 8(SP)
CALL ·X__builtin_isnanf(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Y__builtin_isnanl(t *TLS, x float64) int32
+// func Y__builtin_isnanl(t *TLS, x float64) (_2 int32)
TEXT ·Y__builtin_isnanl(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -715,7 +2109,7 @@ TEXT ·Y__builtin_isnanl(SB),$24-20
MOVQ AX, 8(SP)
CALL ·X__builtin_isnanl(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
// func Y__builtin_isprint(tls *TLS, c int32) (r int32)
@@ -731,7 +2125,7 @@ TEXT ·Y__builtin_isprint(SB),$24-20
MOVL AX, r+16(FP)
RET
-// func Y__builtin_isunordered(t *TLS, a, b float64) int32
+// func Y__builtin_isunordered(t *TLS, a, b float64) (_2 int32)
TEXT ·Y__builtin_isunordered(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -743,10 +2137,10 @@ TEXT ·Y__builtin_isunordered(SB),$32-28
MOVQ AX, 16(SP)
CALL ·X__builtin_isunordered(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _2+24(FP)
RET
-// func Y__builtin_llabs(tls *TLS, a int64) int64
+// func Y__builtin_llabs(tls *TLS, a int64) (_2 int64)
TEXT ·Y__builtin_llabs(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -756,10 +2150,10 @@ TEXT ·Y__builtin_llabs(SB),$24-24
MOVQ AX, 8(SP)
CALL ·X__builtin_llabs(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
-// func Y__builtin_log2(t *TLS, x float64) float64
+// func Y__builtin_log2(t *TLS, x float64) (_2 float64)
TEXT ·Y__builtin_log2(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -769,7 +2163,7 @@ TEXT ·Y__builtin_log2(SB),$24-24
MOVQ AX, 8(SP)
CALL ·X__builtin_log2(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
// func Y__builtin_lrint(tls *TLS, x float64) (r long)
@@ -811,7 +2205,7 @@ TEXT ·Y__builtin_lround(SB),$24-24
MOVQ AX, r+16(FP)
RET
-// func Y__builtin_malloc(t *TLS, size Tsize_t) uintptr
+// func Y__builtin_malloc(t *TLS, size Tsize_t) (_2 uintptr)
TEXT ·Y__builtin_malloc(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -821,10 +2215,10 @@ TEXT ·Y__builtin_malloc(SB),$24-24
MOVQ AX, 8(SP)
CALL ·X__builtin_malloc(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
-// func Y__builtin_memcmp(t *TLS, s1, s2 uintptr, n Tsize_t) int32
+// func Y__builtin_memcmp(t *TLS, s1, s2 uintptr, n Tsize_t) (_3 int32)
TEXT ·Y__builtin_memcmp(SB),$40-36
GO_ARGS
NO_LOCAL_POINTERS
@@ -838,7 +2232,7 @@ TEXT ·Y__builtin_memcmp(SB),$40-36
MOVQ AX, 24(SP)
CALL ·X__builtin_memcmp(SB)
MOVL 32(SP), AX
- MOVL AX, ret+32(FP)
+ MOVL AX, _3+32(FP)
RET
// func Y__builtin_memcpy(t *TLS, dest, src uintptr, n Tsize_t) (r uintptr)
@@ -858,7 +2252,7 @@ TEXT ·Y__builtin_memcpy(SB),$40-40
MOVQ AX, r+32(FP)
RET
-// func Y__builtin_memset(t *TLS, s uintptr, c int32, n Tsize_t) uintptr
+// func Y__builtin_memset(t *TLS, s uintptr, c int32, n Tsize_t) (_4 uintptr)
TEXT ·Y__builtin_memset(SB),$40-40
GO_ARGS
NO_LOCAL_POINTERS
@@ -872,10 +2266,10 @@ TEXT ·Y__builtin_memset(SB),$40-40
MOVQ AX, 24(SP)
CALL ·X__builtin_memset(SB)
MOVQ 32(SP), AX
- MOVQ AX, ret+32(FP)
+ MOVQ AX, _4+32(FP)
RET
-// func Y__builtin_mmap(t *TLS, addr uintptr, length Tsize_t, prot, flags, fd int32, offset Toff_t) uintptr
+// func Y__builtin_mmap(t *TLS, addr uintptr, length Tsize_t, prot, flags, fd int32, offset Toff_t) (_5 uintptr)
TEXT ·Y__builtin_mmap(SB),$56-56
GO_ARGS
NO_LOCAL_POINTERS
@@ -895,10 +2289,10 @@ TEXT ·Y__builtin_mmap(SB),$56-56
MOVQ AX, 40(SP)
CALL ·X__builtin_mmap(SB)
MOVQ 48(SP), AX
- MOVQ AX, ret+48(FP)
+ MOVQ AX, _5+48(FP)
RET
-// func Y__builtin_mul_overflowInt64(t *TLS, a, b int64, res uintptr) int32
+// func Y__builtin_mul_overflowInt64(t *TLS, a, b int64, res uintptr) (_3 int32)
TEXT ·Y__builtin_mul_overflowInt64(SB),$40-36
GO_ARGS
NO_LOCAL_POINTERS
@@ -912,10 +2306,10 @@ TEXT ·Y__builtin_mul_overflowInt64(SB),$40-36
MOVQ AX, 24(SP)
CALL ·X__builtin_mul_overflowInt64(SB)
MOVL 32(SP), AX
- MOVL AX, ret+32(FP)
+ MOVL AX, _3+32(FP)
RET
-// func Y__builtin_mul_overflowUint128(t *TLS, a, b Uint128, res uintptr) int32
+// func Y__builtin_mul_overflowUint128(t *TLS, a, b Uint128, res uintptr) (_3 int32)
TEXT ·Y__builtin_mul_overflowUint128(SB),$56-52
GO_ARGS
NO_LOCAL_POINTERS
@@ -933,10 +2327,10 @@ TEXT ·Y__builtin_mul_overflowUint128(SB),$56-52
MOVQ AX, 40(SP)
CALL ·X__builtin_mul_overflowUint128(SB)
MOVL 48(SP), AX
- MOVL AX, ret+48(FP)
+ MOVL AX, _3+48(FP)
RET
-// func Y__builtin_mul_overflowUint64(t *TLS, a, b uint64, res uintptr) int32
+// func Y__builtin_mul_overflowUint64(t *TLS, a, b uint64, res uintptr) (_3 int32)
TEXT ·Y__builtin_mul_overflowUint64(SB),$40-36
GO_ARGS
NO_LOCAL_POINTERS
@@ -950,10 +2344,10 @@ TEXT ·Y__builtin_mul_overflowUint64(SB),$40-36
MOVQ AX, 24(SP)
CALL ·X__builtin_mul_overflowUint64(SB)
MOVL 32(SP), AX
- MOVL AX, ret+32(FP)
+ MOVL AX, _3+32(FP)
RET
-// func Y__builtin_nan(t *TLS, s uintptr) float64
+// func Y__builtin_nan(t *TLS, s uintptr) (_2 float64)
TEXT ·Y__builtin_nan(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -963,10 +2357,10 @@ TEXT ·Y__builtin_nan(SB),$24-24
MOVQ AX, 8(SP)
CALL ·X__builtin_nan(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
-// func Y__builtin_nanf(tls *TLS, s uintptr) float32
+// func Y__builtin_nanf(tls *TLS, s uintptr) (_2 float32)
TEXT ·Y__builtin_nanf(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -976,10 +2370,10 @@ TEXT ·Y__builtin_nanf(SB),$24-20
MOVQ AX, 8(SP)
CALL ·X__builtin_nanf(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Y__builtin_nanl(t *TLS, s uintptr) float64
+// func Y__builtin_nanl(t *TLS, s uintptr) (_2 float64)
TEXT ·Y__builtin_nanl(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -989,10 +2383,10 @@ TEXT ·Y__builtin_nanl(SB),$24-24
MOVQ AX, 8(SP)
CALL ·X__builtin_nanl(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
-// func Y__builtin_object_size(t *TLS, p uintptr, typ int32) Tsize_t
+// func Y__builtin_object_size(t *TLS, p uintptr, typ int32) (_3 Tsize_t)
TEXT ·Y__builtin_object_size(SB),$32-32
GO_ARGS
NO_LOCAL_POINTERS
@@ -1004,10 +2398,10 @@ TEXT ·Y__builtin_object_size(SB),$32-32
MOVL AX, 16(SP)
CALL ·X__builtin_object_size(SB)
MOVQ 24(SP), AX
- MOVQ AX, ret+24(FP)
+ MOVQ AX, _3+24(FP)
RET
-// func Y__builtin_popcount(t *TLS, x uint32) int32
+// func Y__builtin_popcount(t *TLS, x uint32) (_2 int32)
TEXT ·Y__builtin_popcount(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -1017,10 +2411,10 @@ TEXT ·Y__builtin_popcount(SB),$24-20
MOVL AX, 8(SP)
CALL ·X__builtin_popcount(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Y__builtin_popcountl(t *TLS, x ulong) int32
+// func Y__builtin_popcountl(t *TLS, x ulong) (_2 int32)
TEXT ·Y__builtin_popcountl(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -1030,7 +2424,7 @@ TEXT ·Y__builtin_popcountl(SB),$24-20
MOVQ AX, 8(SP)
CALL ·X__builtin_popcountl(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
// func Y__builtin_prefetch(t *TLS, addr, args uintptr)
@@ -1100,7 +2494,7 @@ TEXT ·Y__builtin_roundf(SB),$24-20
MOVL AX, r+16(FP)
RET
-// func Y__builtin_snprintf(t *TLS, str uintptr, size Tsize_t, format, args uintptr) int32
+// func Y__builtin_snprintf(t *TLS, str uintptr, size Tsize_t, format, args uintptr) (_4 int32)
TEXT ·Y__builtin_snprintf(SB),$48-44
GO_ARGS
NO_LOCAL_POINTERS
@@ -1116,7 +2510,7 @@ TEXT ·Y__builtin_snprintf(SB),$48-44
MOVQ AX, 32(SP)
CALL ·X__builtin_snprintf(SB)
MOVL 40(SP), AX
- MOVL AX, ret+40(FP)
+ MOVL AX, _4+40(FP)
RET
// func Y__builtin_sprintf(t *TLS, str, format, args uintptr) (r int32)
@@ -1136,7 +2530,7 @@ TEXT ·Y__builtin_sprintf(SB),$40-36
MOVL AX, r+32(FP)
RET
-// func Y__builtin_strchr(t *TLS, s uintptr, c int32) uintptr
+// func Y__builtin_strchr(t *TLS, s uintptr, c int32) (_3 uintptr)
TEXT ·Y__builtin_strchr(SB),$32-32
GO_ARGS
NO_LOCAL_POINTERS
@@ -1148,10 +2542,10 @@ TEXT ·Y__builtin_strchr(SB),$32-32
MOVL AX, 16(SP)
CALL ·X__builtin_strchr(SB)
MOVQ 24(SP), AX
- MOVQ AX, ret+24(FP)
+ MOVQ AX, _3+24(FP)
RET
-// func Y__builtin_strcmp(t *TLS, s1, s2 uintptr) int32
+// func Y__builtin_strcmp(t *TLS, s1, s2 uintptr) (_2 int32)
TEXT ·Y__builtin_strcmp(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -1163,10 +2557,10 @@ TEXT ·Y__builtin_strcmp(SB),$32-28
MOVQ AX, 16(SP)
CALL ·X__builtin_strcmp(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _2+24(FP)
RET
-// func Y__builtin_strcpy(t *TLS, dest, src uintptr) uintptr
+// func Y__builtin_strcpy(t *TLS, dest, src uintptr) (_2 uintptr)
TEXT ·Y__builtin_strcpy(SB),$32-32
GO_ARGS
NO_LOCAL_POINTERS
@@ -1178,10 +2572,10 @@ TEXT ·Y__builtin_strcpy(SB),$32-32
MOVQ AX, 16(SP)
CALL ·X__builtin_strcpy(SB)
MOVQ 24(SP), AX
- MOVQ AX, ret+24(FP)
+ MOVQ AX, _2+24(FP)
RET
-// func Y__builtin_strlen(t *TLS, s uintptr) Tsize_t
+// func Y__builtin_strlen(t *TLS, s uintptr) (_2 Tsize_t)
TEXT ·Y__builtin_strlen(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -1191,10 +2585,10 @@ TEXT ·Y__builtin_strlen(SB),$24-24
MOVQ AX, 8(SP)
CALL ·X__builtin_strlen(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
-// func Y__builtin_sub_overflowInt64(t *TLS, a, b int64, res uintptr) int32
+// func Y__builtin_sub_overflowInt64(t *TLS, a, b int64, res uintptr) (_3 int32)
TEXT ·Y__builtin_sub_overflowInt64(SB),$40-36
GO_ARGS
NO_LOCAL_POINTERS
@@ -1208,7 +2602,7 @@ TEXT ·Y__builtin_sub_overflowInt64(SB),$40-36
MOVQ AX, 24(SP)
CALL ·X__builtin_sub_overflowInt64(SB)
MOVL 32(SP), AX
- MOVL AX, ret+32(FP)
+ MOVL AX, _3+32(FP)
RET
// func Y__builtin_trap(t *TLS)
@@ -1242,7 +2636,7 @@ TEXT ·Y__builtin_unreachable(SB),$8-8
CALL ·X__builtin_unreachable(SB)
RET
-// func Y__builtin_vsnprintf(t *TLS, str uintptr, size Tsize_t, format, va uintptr) int32
+// func Y__builtin_vsnprintf(t *TLS, str uintptr, size Tsize_t, format, va uintptr) (_4 int32)
TEXT ·Y__builtin_vsnprintf(SB),$48-44
GO_ARGS
NO_LOCAL_POINTERS
@@ -1258,7 +2652,5884 @@ TEXT ·Y__builtin_vsnprintf(SB),$48-44
MOVQ AX, 32(SP)
CALL ·X__builtin_vsnprintf(SB)
MOVL 40(SP), AX
- MOVL AX, ret+40(FP)
+ MOVL AX, _4+40(FP)
+ RET
+
+// func Y__c11_atomic_compare_exchange_strongInt16(t *TLS, ptr, expected uintptr, desired int16, success, failure int32) (_4 int32)
+TEXT ·Y__c11_atomic_compare_exchange_strongInt16(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVW desired+24(FP), AX
+ MOVW AX, 24(SP)
+ MOVL success+28(FP), AX
+ MOVL AX, 28(SP)
+ MOVL failure+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__c11_atomic_compare_exchange_strongInt16(SB)
+ MOVL 40(SP), AX
+ MOVL AX, _4+40(FP)
+ RET
+
+// func Y__c11_atomic_compare_exchange_strongInt32(t *TLS, ptr, expected uintptr, desired, success, failure int32) (_3 int32)
+TEXT ·Y__c11_atomic_compare_exchange_strongInt32(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL desired+24(FP), AX
+ MOVL AX, 24(SP)
+ MOVL success+28(FP), AX
+ MOVL AX, 28(SP)
+ MOVL failure+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__c11_atomic_compare_exchange_strongInt32(SB)
+ MOVL 40(SP), AX
+ MOVL AX, _3+40(FP)
+ RET
+
+// func Y__c11_atomic_compare_exchange_strongInt64(t *TLS, ptr, expected uintptr, desired int64, success, failure int32) (_4 int32)
+TEXT ·Y__c11_atomic_compare_exchange_strongInt64(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ desired+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL success+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVL failure+36(FP), AX
+ MOVL AX, 36(SP)
+ CALL ·X__c11_atomic_compare_exchange_strongInt64(SB)
+ MOVL 40(SP), AX
+ MOVL AX, _4+40(FP)
+ RET
+
+// func Y__c11_atomic_compare_exchange_strongInt8(t *TLS, ptr, expected uintptr, desired int8, success, failure int32) (_4 int32)
+TEXT ·Y__c11_atomic_compare_exchange_strongInt8(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVB desired+24(FP), AX
+ MOVB AX, 24(SP)
+ MOVL success+28(FP), AX
+ MOVL AX, 28(SP)
+ MOVL failure+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__c11_atomic_compare_exchange_strongInt8(SB)
+ MOVL 40(SP), AX
+ MOVL AX, _4+40(FP)
+ RET
+
+// func Y__c11_atomic_compare_exchange_strongUint16(t *TLS, ptr, expected uintptr, desired uint16, success, failure int32) (_4 int32)
+TEXT ·Y__c11_atomic_compare_exchange_strongUint16(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVW desired+24(FP), AX
+ MOVW AX, 24(SP)
+ MOVL success+28(FP), AX
+ MOVL AX, 28(SP)
+ MOVL failure+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__c11_atomic_compare_exchange_strongUint16(SB)
+ MOVL 40(SP), AX
+ MOVL AX, _4+40(FP)
+ RET
+
+// func Y__c11_atomic_compare_exchange_strongUint32(t *TLS, ptr, expected uintptr, desired uint32, success, failure int32) (_4 int32)
+TEXT ·Y__c11_atomic_compare_exchange_strongUint32(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL desired+24(FP), AX
+ MOVL AX, 24(SP)
+ MOVL success+28(FP), AX
+ MOVL AX, 28(SP)
+ MOVL failure+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__c11_atomic_compare_exchange_strongUint32(SB)
+ MOVL 40(SP), AX
+ MOVL AX, _4+40(FP)
+ RET
+
+// func Y__c11_atomic_compare_exchange_strongUint64(t *TLS, ptr, expected uintptr, desired uint64, success, failure int32) (_4 int32)
+TEXT ·Y__c11_atomic_compare_exchange_strongUint64(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ desired+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL success+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVL failure+36(FP), AX
+ MOVL AX, 36(SP)
+ CALL ·X__c11_atomic_compare_exchange_strongUint64(SB)
+ MOVL 40(SP), AX
+ MOVL AX, _4+40(FP)
+ RET
+
+// func Y__c11_atomic_compare_exchange_strongUint8(t *TLS, ptr, expected uintptr, desired uint8, success, failure int32) (_4 int32)
+TEXT ·Y__c11_atomic_compare_exchange_strongUint8(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ expected+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVB desired+24(FP), AX
+ MOVB AX, 24(SP)
+ MOVL success+28(FP), AX
+ MOVL AX, 28(SP)
+ MOVL failure+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__c11_atomic_compare_exchange_strongUint8(SB)
+ MOVL 40(SP), AX
+ MOVL AX, _4+40(FP)
+ RET
+
+// func Y__c11_atomic_exchangeInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+TEXT ·Y__c11_atomic_exchangeInt16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_exchangeInt16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_exchangeInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+TEXT ·Y__c11_atomic_exchangeInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_exchangeInt32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_exchangeInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+TEXT ·Y__c11_atomic_exchangeInt64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_exchangeInt64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__c11_atomic_exchangeInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+TEXT ·Y__c11_atomic_exchangeInt8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_exchangeInt8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_exchangeUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+TEXT ·Y__c11_atomic_exchangeUint16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_exchangeUint16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_exchangeUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+TEXT ·Y__c11_atomic_exchangeUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_exchangeUint32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_exchangeUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+TEXT ·Y__c11_atomic_exchangeUint64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_exchangeUint64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__c11_atomic_exchangeUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+TEXT ·Y__c11_atomic_exchangeUint8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_exchangeUint8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_addInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+TEXT ·Y__c11_atomic_fetch_addInt16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_addInt16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_addInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+TEXT ·Y__c11_atomic_fetch_addInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_addInt32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_addInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+TEXT ·Y__c11_atomic_fetch_addInt64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_fetch_addInt64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__c11_atomic_fetch_addInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+TEXT ·Y__c11_atomic_fetch_addInt8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_addInt8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_addUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+TEXT ·Y__c11_atomic_fetch_addUint16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_addUint16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_addUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+TEXT ·Y__c11_atomic_fetch_addUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_addUint32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_addUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+TEXT ·Y__c11_atomic_fetch_addUint64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_fetch_addUint64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__c11_atomic_fetch_addUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+TEXT ·Y__c11_atomic_fetch_addUint8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_addUint8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_andInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+TEXT ·Y__c11_atomic_fetch_andInt16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_andInt16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_andInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+TEXT ·Y__c11_atomic_fetch_andInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_andInt32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_andInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+TEXT ·Y__c11_atomic_fetch_andInt64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_fetch_andInt64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__c11_atomic_fetch_andInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+TEXT ·Y__c11_atomic_fetch_andInt8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_andInt8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_andUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+TEXT ·Y__c11_atomic_fetch_andUint16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_andUint16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_andUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+TEXT ·Y__c11_atomic_fetch_andUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_andUint32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_andUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+TEXT ·Y__c11_atomic_fetch_andUint64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_fetch_andUint64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__c11_atomic_fetch_andUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+TEXT ·Y__c11_atomic_fetch_andUint8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_andUint8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_orInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+TEXT ·Y__c11_atomic_fetch_orInt16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_orInt16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_orInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+TEXT ·Y__c11_atomic_fetch_orInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_orInt32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_orInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+TEXT ·Y__c11_atomic_fetch_orInt64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_fetch_orInt64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__c11_atomic_fetch_orInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+TEXT ·Y__c11_atomic_fetch_orInt8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_orInt8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_orUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+TEXT ·Y__c11_atomic_fetch_orUint16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_orUint16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_orUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+TEXT ·Y__c11_atomic_fetch_orUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_orUint32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_orUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+TEXT ·Y__c11_atomic_fetch_orUint64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_fetch_orUint64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__c11_atomic_fetch_orUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+TEXT ·Y__c11_atomic_fetch_orUint8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_orUint8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_subInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+TEXT ·Y__c11_atomic_fetch_subInt16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_subInt16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_subInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+TEXT ·Y__c11_atomic_fetch_subInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_subInt32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_subInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+TEXT ·Y__c11_atomic_fetch_subInt64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_fetch_subInt64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__c11_atomic_fetch_subInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+TEXT ·Y__c11_atomic_fetch_subInt8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_subInt8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_subUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+TEXT ·Y__c11_atomic_fetch_subUint16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_subUint16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_subUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+TEXT ·Y__c11_atomic_fetch_subUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_subUint32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_subUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+TEXT ·Y__c11_atomic_fetch_subUint64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_fetch_subUint64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__c11_atomic_fetch_subUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+TEXT ·Y__c11_atomic_fetch_subUint8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_subUint8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_xorInt16(t *TLS, ptr uintptr, val int16, _ int32) (r int16)
+TEXT ·Y__c11_atomic_fetch_xorInt16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_xorInt16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_xorInt32(t *TLS, ptr uintptr, val int32, _ int32) (r int32)
+TEXT ·Y__c11_atomic_fetch_xorInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_xorInt32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_xorInt64(t *TLS, ptr uintptr, val int64, _ int32) (r int64)
+TEXT ·Y__c11_atomic_fetch_xorInt64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_fetch_xorInt64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__c11_atomic_fetch_xorInt8(t *TLS, ptr uintptr, val int8, _ int32) (r int8)
+TEXT ·Y__c11_atomic_fetch_xorInt8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_xorInt8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_xorUint16(t *TLS, ptr uintptr, val uint16, _ int32) (r uint16)
+TEXT ·Y__c11_atomic_fetch_xorUint16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_xorUint16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_xorUint32(t *TLS, ptr uintptr, val uint32, _ int32) (r uint32)
+TEXT ·Y__c11_atomic_fetch_xorUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_xorUint32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_fetch_xorUint64(t *TLS, ptr uintptr, val uint64, _ int32) (r uint64)
+TEXT ·Y__c11_atomic_fetch_xorUint64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_fetch_xorUint64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__c11_atomic_fetch_xorUint8(t *TLS, ptr uintptr, val uint8, _ int32) (r uint8)
+TEXT ·Y__c11_atomic_fetch_xorUint8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL _+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_fetch_xorUint8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_loadInt16(t *TLS, ptr uintptr, memorder int32) (r int16)
+TEXT ·Y__c11_atomic_loadInt16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL memorder+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__c11_atomic_loadInt16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_loadInt32(t *TLS, ptr uintptr, memorder int32) (r int32)
+TEXT ·Y__c11_atomic_loadInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL memorder+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__c11_atomic_loadInt32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_loadInt64(t *TLS, ptr uintptr, memorder int32) (r int64)
+TEXT ·Y__c11_atomic_loadInt64(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL memorder+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__c11_atomic_loadInt64(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_loadInt8(t *TLS, ptr uintptr, memorder int32) (r int8)
+TEXT ·Y__c11_atomic_loadInt8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL memorder+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__c11_atomic_loadInt8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_loadUint16(t *TLS, ptr uintptr, memorder int32) (r uint16)
+TEXT ·Y__c11_atomic_loadUint16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL memorder+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__c11_atomic_loadUint16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_loadUint32(t *TLS, ptr uintptr, memorder int32) (r uint32)
+TEXT ·Y__c11_atomic_loadUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL memorder+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__c11_atomic_loadUint32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_loadUint64(t *TLS, ptr uintptr, memorder int32) (r uint64)
+TEXT ·Y__c11_atomic_loadUint64(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL memorder+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__c11_atomic_loadUint64(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_loadUint8(t *TLS, ptr uintptr, memorder int32) (r uint8)
+TEXT ·Y__c11_atomic_loadUint8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL memorder+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__c11_atomic_loadUint8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__c11_atomic_storeInt16(t *TLS, ptr uintptr, val int16, memorder int32)
+TEXT ·Y__c11_atomic_storeInt16(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL memorder+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_storeInt16(SB)
+ RET
+
+// func Y__c11_atomic_storeInt32(t *TLS, ptr uintptr, val int32, memorder int32)
+TEXT ·Y__c11_atomic_storeInt32(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL memorder+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_storeInt32(SB)
+ RET
+
+// func Y__c11_atomic_storeInt64(t *TLS, ptr uintptr, val int64, memorder int32)
+TEXT ·Y__c11_atomic_storeInt64(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_storeInt64(SB)
+ RET
+
+// func Y__c11_atomic_storeInt8(t *TLS, ptr uintptr, val int8, memorder int32)
+TEXT ·Y__c11_atomic_storeInt8(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL memorder+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_storeInt8(SB)
+ RET
+
+// func Y__c11_atomic_storeUint16(t *TLS, ptr uintptr, val uint16, memorder int32)
+TEXT ·Y__c11_atomic_storeUint16(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW val+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVL memorder+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_storeUint16(SB)
+ RET
+
+// func Y__c11_atomic_storeUint32(t *TLS, ptr uintptr, val uint32, memorder int32)
+TEXT ·Y__c11_atomic_storeUint32(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL memorder+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_storeUint32(SB)
+ RET
+
+// func Y__c11_atomic_storeUint64(t *TLS, ptr uintptr, val uint64, memorder int32)
+TEXT ·Y__c11_atomic_storeUint64(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL memorder+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__c11_atomic_storeUint64(SB)
+ RET
+
+// func Y__c11_atomic_storeUint8(t *TLS, ptr uintptr, val uint8, memorder int32)
+TEXT ·Y__c11_atomic_storeUint8(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB val+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVL memorder+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__c11_atomic_storeUint8(SB)
+ RET
+
+// func Y__ccgo_dmesg(t *TLS, fmt uintptr, va uintptr)
+TEXT ·Y__ccgo_dmesg(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ fmt+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ va+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__ccgo_dmesg(SB)
+ RET
+
+// func Y__ccgo_getMutexType(tls *TLS, m uintptr) (_2 int32)
+TEXT ·Y__ccgo_getMutexType(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ m+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__ccgo_getMutexType(SB)
+ MOVL 16(SP), AX
+ MOVL AX, _2+16(FP)
+ RET
+
+// func Y__ccgo_in6addr_anyp(t *TLS) (_1 uintptr)
+TEXT ·Y__ccgo_in6addr_anyp(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__ccgo_in6addr_anyp(SB)
+ MOVQ 8(SP), AX
+ MOVQ AX, _1+8(FP)
+ RET
+
+// func Y__ccgo_pthreadAttrGetDetachState(tls *TLS, a uintptr) (_2 int32)
+TEXT ·Y__ccgo_pthreadAttrGetDetachState(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ a+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__ccgo_pthreadAttrGetDetachState(SB)
+ MOVL 16(SP), AX
+ MOVL AX, _2+16(FP)
+ RET
+
+// func Y__ccgo_pthreadMutexattrGettype(tls *TLS, a uintptr) (_2 int32)
+TEXT ·Y__ccgo_pthreadMutexattrGettype(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ a+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__ccgo_pthreadMutexattrGettype(SB)
+ MOVL 16(SP), AX
+ MOVL AX, _2+16(FP)
+ RET
+
+// func Y__ccgo_sqlite3_log(t *TLS, iErrCode int32, zFormat uintptr, args uintptr)
+TEXT ·Y__ccgo_sqlite3_log(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL iErrCode+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ zFormat+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ args+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__ccgo_sqlite3_log(SB)
+ RET
+
+// func Y__clock_gettime(tls *TLS, clk Tclockid_t, ts uintptr) (r1 int32)
+TEXT ·Y__clock_gettime(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL clk+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ ts+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__clock_gettime(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r1+24(FP)
+ RET
+
+// func Y__clock_nanosleep(tls *TLS, clk Tclockid_t, flags int32, req uintptr, rem uintptr) (r int32)
+TEXT ·Y__clock_nanosleep(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL clk+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL flags+12(FP), AX
+ MOVL AX, 12(SP)
+ MOVQ req+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ rem+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__clock_nanosleep(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__cmsg_nxthdr(t *TLS, msgh, cmsg uintptr) (_2 uintptr)
+TEXT ·Y__cmsg_nxthdr(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ msgh+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ cmsg+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__cmsg_nxthdr(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, _2+24(FP)
+ RET
+
+// func Y__convert_scm_timestamps(tls *TLS, msg uintptr, csize Tsocklen_t)
+TEXT ·Y__convert_scm_timestamps(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ msg+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL csize+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__convert_scm_timestamps(SB)
+ RET
+
+// func Y__cos(tls *TLS, x float64, y float64) (r1 float64)
+TEXT ·Y__cos(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ y+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__cos(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r1+24(FP)
+ RET
+
+// func Y__cosdf(tls *TLS, x float64) (r1 float32)
+TEXT ·Y__cosdf(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__cosdf(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r1+16(FP)
+ RET
+
+// func Y__crypt_blowfish(tls *TLS, key uintptr, setting uintptr, output uintptr) (r uintptr)
+TEXT ·Y__crypt_blowfish(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ key+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ setting+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ output+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__crypt_blowfish(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__crypt_des(tls *TLS, key uintptr, setting uintptr, output uintptr) (r uintptr)
+TEXT ·Y__crypt_des(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ key+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ setting+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ output+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__crypt_des(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__crypt_md5(tls *TLS, key uintptr, setting uintptr, output uintptr) (r uintptr)
+TEXT ·Y__crypt_md5(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ key+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ setting+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ output+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__crypt_md5(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__crypt_r(tls *TLS, key uintptr, salt uintptr, data uintptr) (r uintptr)
+TEXT ·Y__crypt_r(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ key+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ salt+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ data+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__crypt_r(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__crypt_sha256(tls *TLS, key uintptr, setting uintptr, output uintptr) (r uintptr)
+TEXT ·Y__crypt_sha256(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ key+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ setting+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ output+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__crypt_sha256(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__crypt_sha512(tls *TLS, key uintptr, setting uintptr, output uintptr) (r uintptr)
+TEXT ·Y__crypt_sha512(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ key+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ setting+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ output+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__crypt_sha512(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__ctype_b_loc(tls *TLS) (r uintptr)
+TEXT ·Y__ctype_b_loc(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__ctype_b_loc(SB)
+ MOVQ 8(SP), AX
+ MOVQ AX, r+8(FP)
+ RET
+
+// func Y__ctype_get_mb_cur_max(tls *TLS) (r Tsize_t)
+TEXT ·Y__ctype_get_mb_cur_max(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__ctype_get_mb_cur_max(SB)
+ MOVQ 8(SP), AX
+ MOVQ AX, r+8(FP)
+ RET
+
+// func Y__ctype_tolower_loc(tls *TLS) (r uintptr)
+TEXT ·Y__ctype_tolower_loc(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__ctype_tolower_loc(SB)
+ MOVQ 8(SP), AX
+ MOVQ AX, r+8(FP)
+ RET
+
+// func Y__ctype_toupper_loc(tls *TLS) (r uintptr)
+TEXT ·Y__ctype_toupper_loc(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__ctype_toupper_loc(SB)
+ MOVQ 8(SP), AX
+ MOVQ AX, r+8(FP)
+ RET
+
+// func Y__des_setkey(tls *TLS, key uintptr, ekey uintptr)
+TEXT ·Y__des_setkey(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ key+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ ekey+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__des_setkey(SB)
+ RET
+
+// func Y__dn_expand(tls *TLS, base uintptr, end uintptr, src uintptr, dest uintptr, space int32) (r int32)
+TEXT ·Y__dn_expand(SB),$56-52
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ base+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ end+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ src+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ dest+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVL space+40(FP), AX
+ MOVL AX, 40(SP)
+ CALL ·X__dn_expand(SB)
+ MOVL 48(SP), AX
+ MOVL AX, r+48(FP)
+ RET
+
+// func Y__dns_parse(tls *TLS, r uintptr, rlen int32, __ccgo_fp_callback uintptr, ctx uintptr) (r1 int32)
+TEXT ·Y__dns_parse(SB),$56-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_callback+24(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal___dns_parse_2(SB) // Create the closure for calling __ccgo_fp_callback
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ r+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL rlen+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 24(SP)
+ MOVQ ctx+32(FP), AX
+ MOVQ AX, 32(SP)
+ CALL ·X__dns_parse(SB)
+ MOVL 40(SP), AX
+ MOVL AX, r1+40(FP)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0___dns_parse_2(SB),$72-68
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL _2+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVQ _3+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL _4+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVQ _5+40(FP), AX
+ MOVQ AX, 40(SP)
+ MOVL _6+48(FP), AX
+ MOVL AX, 48(SP)
+ MOVQ __ccgo_fp+56(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 56(SP), AX
+ MOVL AX, _7+64(FP)
+ RET
+
+// func Y__do_des(tls *TLS, l_in Tuint32_t, r_in Tuint32_t, l_out uintptr, r_out uintptr, count Tuint32_t, saltbits Tuint32_t, ekey uintptr)
+TEXT ·Y__do_des(SB),$48-48
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL l_in+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL r_in+12(FP), AX
+ MOVL AX, 12(SP)
+ MOVQ l_out+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ r_out+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL count+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVL saltbits+36(FP), AX
+ MOVL AX, 36(SP)
+ MOVQ ekey+40(FP), AX
+ MOVQ AX, 40(SP)
+ CALL ·X__do_des(SB)
+ RET
+
+// func Y__do_orphaned_stdio_locks(tls *TLS)
+TEXT ·Y__do_orphaned_stdio_locks(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__do_orphaned_stdio_locks(SB)
+ RET
+
+// func Y__dup3(tls *TLS, old int32, new1 int32, flags int32) (r1 int32)
+TEXT ·Y__dup3(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL old+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL new1+12(FP), AX
+ MOVL AX, 12(SP)
+ MOVL flags+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__dup3(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r1+24(FP)
+ RET
+
+// func Y__duplocale(tls *TLS, old Tlocale_t) (r Tlocale_t)
+TEXT ·Y__duplocale(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ old+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__duplocale(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__env_rm_add(tls *TLS, old uintptr, new1 uintptr)
+TEXT ·Y__env_rm_add(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ old+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ new1+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__env_rm_add(SB)
+ RET
+
+// func Y__errno_location(tls *TLS) (r uintptr)
+TEXT ·Y__errno_location(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__errno_location(SB)
+ MOVQ 8(SP), AX
+ MOVQ AX, r+8(FP)
+ RET
+
+// func Y__execvpe(tls *TLS, file uintptr, argv uintptr, envp uintptr) (r int32)
+TEXT ·Y__execvpe(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ file+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ argv+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ envp+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__execvpe(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__expo2(tls *TLS, x float64, sign float64) (r float64)
+TEXT ·Y__expo2(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ sign+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__expo2(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__expo2f(tls *TLS, x float32, sign float32) (r float32)
+TEXT ·Y__expo2f(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL x+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL sign+12(FP), AX
+ MOVL AX, 12(SP)
+ CALL ·X__expo2f(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__fbufsize(tls *TLS, f uintptr) (r Tsize_t)
+TEXT ·Y__fbufsize(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__fbufsize(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__fclose_ca(tls *TLS, f uintptr) (r int32)
+TEXT ·Y__fclose_ca(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__fclose_ca(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__fdopen(tls *TLS, fd int32, mode uintptr) (r uintptr)
+TEXT ·Y__fdopen(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL fd+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ mode+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__fdopen(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__fesetround(tls *TLS, r int32) (r1 int32)
+TEXT ·Y__fesetround(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL r+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__fesetround(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r1+16(FP)
+ RET
+
+// func Y__fgetwc_unlocked(tls *TLS, f uintptr) (r Twint_t)
+TEXT ·Y__fgetwc_unlocked(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__fgetwc_unlocked(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__flbf(tls *TLS, f uintptr) (r int32)
+TEXT ·Y__flbf(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__flbf(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__floatscan(tls *TLS, f uintptr, prec int32, pok int32) (r float64)
+TEXT ·Y__floatscan(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL prec+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL pok+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__floatscan(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__fmodeflags(tls *TLS, mode uintptr) (r int32)
+TEXT ·Y__fmodeflags(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ mode+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__fmodeflags(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__fopen_rb_ca(tls *TLS, filename uintptr, f uintptr, buf uintptr, len1 Tsize_t) (r uintptr)
+TEXT ·Y__fopen_rb_ca(SB),$48-48
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ filename+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ f+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ buf+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ len1+32(FP), AX
+ MOVQ AX, 32(SP)
+ CALL ·X__fopen_rb_ca(SB)
+ MOVQ 40(SP), AX
+ MOVQ AX, r+40(FP)
+ RET
+
+// func Y__fpclassify(tls *TLS, x float64) (r int32)
+TEXT ·Y__fpclassify(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__fpclassify(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__fpclassifyf(tls *TLS, x float32) (r int32)
+TEXT ·Y__fpclassifyf(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL x+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__fpclassifyf(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__fpclassifyl(tls *TLS, x float64) (r int32)
+TEXT ·Y__fpclassifyl(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__fpclassifyl(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__fpending(tls *TLS, f uintptr) (r Tsize_t)
+TEXT ·Y__fpending(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__fpending(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__fpurge(tls *TLS, f uintptr) (r int32)
+TEXT ·Y__fpurge(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__fpurge(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__fputwc_unlocked(tls *TLS, c Twchar_t, f uintptr) (r Twint_t)
+TEXT ·Y__fputwc_unlocked(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ f+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__fputwc_unlocked(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__freadable(tls *TLS, f uintptr) (r int32)
+TEXT ·Y__freadable(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__freadable(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__freadahead(tls *TLS, f uintptr) (r Tsize_t)
+TEXT ·Y__freadahead(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__freadahead(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__freading(tls *TLS, f uintptr) (r int32)
+TEXT ·Y__freading(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__freading(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__freadptr(tls *TLS, f uintptr, sizep uintptr) (r uintptr)
+TEXT ·Y__freadptr(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ sizep+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__freadptr(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__freadptrinc(tls *TLS, f uintptr, inc Tsize_t)
+TEXT ·Y__freadptrinc(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ inc+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__freadptrinc(SB)
+ RET
+
+// func Y__freelocale(tls *TLS, l Tlocale_t)
+TEXT ·Y__freelocale(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ l+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__freelocale(SB)
+ RET
+
+// func Y__fseeko(tls *TLS, f uintptr, off Toff_t, whence int32) (r int32)
+TEXT ·Y__fseeko(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ off+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL whence+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__fseeko(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__fseeko_unlocked(tls *TLS, f uintptr, off Toff_t, whence int32) (r int32)
+TEXT ·Y__fseeko_unlocked(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ off+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL whence+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__fseeko_unlocked(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__fseterr(tls *TLS, f uintptr)
+TEXT ·Y__fseterr(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__fseterr(SB)
+ RET
+
+// func Y__fsetlocking(tls *TLS, f uintptr, type1 int32) (r int32)
+TEXT ·Y__fsetlocking(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL type1+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__fsetlocking(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__fstat(tls *TLS, fd int32, st uintptr) (r int32)
+TEXT ·Y__fstat(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL fd+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ st+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__fstat(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__fstatat(tls *TLS, fd int32, path uintptr, st uintptr, flag int32) (r int32)
+TEXT ·Y__fstatat(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL fd+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ path+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ st+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL flag+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__fstatat(SB)
+ MOVL 40(SP), AX
+ MOVL AX, r+40(FP)
+ RET
+
+// func Y__ftello(tls *TLS, f uintptr) (r Toff_t)
+TEXT ·Y__ftello(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__ftello(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__ftello_unlocked(tls *TLS, f uintptr) (r Toff_t)
+TEXT ·Y__ftello_unlocked(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__ftello_unlocked(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__funcs_on_quick_exit(tls *TLS)
+TEXT ·Y__funcs_on_quick_exit(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__funcs_on_quick_exit(SB)
+ RET
+
+// func Y__futimesat(tls *TLS, dirfd int32, pathname uintptr, times uintptr) (r int32)
+TEXT ·Y__futimesat(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL dirfd+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ pathname+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ times+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__futimesat(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__fwritable(tls *TLS, f uintptr) (r int32)
+TEXT ·Y__fwritable(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__fwritable(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__fwritex(tls *TLS, s uintptr, l Tsize_t, f uintptr) (r Tsize_t)
+TEXT ·Y__fwritex(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ f+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__fwritex(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__fwriting(tls *TLS, f uintptr) (r int32)
+TEXT ·Y__fwriting(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__fwriting(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__fxstat(tls *TLS, ver int32, fd int32, buf uintptr) (r int32)
+TEXT ·Y__fxstat(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL ver+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL fd+12(FP), AX
+ MOVL AX, 12(SP)
+ MOVQ buf+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__fxstat(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__fxstatat(tls *TLS, ver int32, fd int32, path uintptr, buf uintptr, flag int32) (r int32)
+TEXT ·Y__fxstatat(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL ver+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL fd+12(FP), AX
+ MOVL AX, 12(SP)
+ MOVQ path+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ buf+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL flag+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__fxstatat(SB)
+ MOVL 40(SP), AX
+ MOVL AX, r+40(FP)
+ RET
+
+// func Y__get_handler_set(tls *TLS, set uintptr)
+TEXT ·Y__get_handler_set(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ set+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__get_handler_set(SB)
+ RET
+
+// func Y__get_locale(tls *TLS, cat int32, val uintptr) (r uintptr)
+TEXT ·Y__get_locale(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL cat+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ val+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__get_locale(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__get_resolv_conf(tls *TLS, conf uintptr, search uintptr, search_sz Tsize_t) (r int32)
+TEXT ·Y__get_resolv_conf(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ conf+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ search+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ search_sz+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__get_resolv_conf(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__getauxval(tls *TLS, item uint64) (r uint64)
+TEXT ·Y__getauxval(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ item+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__getauxval(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__getdelim(tls *TLS, s uintptr, n uintptr, delim int32, f uintptr) (r Tssize_t)
+TEXT ·Y__getdelim(SB),$48-48
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ n+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL delim+24(FP), AX
+ MOVL AX, 24(SP)
+ MOVQ f+32(FP), AX
+ MOVQ AX, 32(SP)
+ CALL ·X__getdelim(SB)
+ MOVQ 40(SP), AX
+ MOVQ AX, r+40(FP)
+ RET
+
+// func Y__getgr_a(tls *TLS, name uintptr, gid Tgid_t, gr uintptr, buf uintptr, size uintptr, mem uintptr, nmem uintptr, res uintptr) (r int32)
+TEXT ·Y__getgr_a(SB),$80-76
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ name+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL gid+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVQ gr+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ buf+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ size+40(FP), AX
+ MOVQ AX, 40(SP)
+ MOVQ mem+48(FP), AX
+ MOVQ AX, 48(SP)
+ MOVQ nmem+56(FP), AX
+ MOVQ AX, 56(SP)
+ MOVQ res+64(FP), AX
+ MOVQ AX, 64(SP)
+ CALL ·X__getgr_a(SB)
+ MOVL 72(SP), AX
+ MOVL AX, r+72(FP)
+ RET
+
+// func Y__getgrent_a(tls *TLS, f uintptr, gr uintptr, line uintptr, size uintptr, mem uintptr, nmem uintptr, res uintptr) (r int32)
+TEXT ·Y__getgrent_a(SB),$72-68
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ gr+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ line+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ size+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ mem+40(FP), AX
+ MOVQ AX, 40(SP)
+ MOVQ nmem+48(FP), AX
+ MOVQ AX, 48(SP)
+ MOVQ res+56(FP), AX
+ MOVQ AX, 56(SP)
+ CALL ·X__getgrent_a(SB)
+ MOVL 64(SP), AX
+ MOVL AX, r+64(FP)
+ RET
+
+// func Y__getopt_msg(tls *TLS, a uintptr, b uintptr, c uintptr, l Tsize_t)
+TEXT ·Y__getopt_msg(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ a+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ b+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ c+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ l+32(FP), AX
+ MOVQ AX, 32(SP)
+ CALL ·X__getopt_msg(SB)
+ RET
+
+// func Y__getpw_a(tls *TLS, name uintptr, uid Tuid_t, pw uintptr, buf uintptr, size uintptr, res uintptr) (r int32)
+TEXT ·Y__getpw_a(SB),$64-60
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ name+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL uid+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVQ pw+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ buf+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ size+40(FP), AX
+ MOVQ AX, 40(SP)
+ MOVQ res+48(FP), AX
+ MOVQ AX, 48(SP)
+ CALL ·X__getpw_a(SB)
+ MOVL 56(SP), AX
+ MOVL AX, r+56(FP)
+ RET
+
+// func Y__getpwent_a(tls *TLS, f uintptr, pw uintptr, line uintptr, size uintptr, res uintptr) (r int32)
+TEXT ·Y__getpwent_a(SB),$56-52
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ pw+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ line+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ size+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ res+40(FP), AX
+ MOVQ AX, 40(SP)
+ CALL ·X__getpwent_a(SB)
+ MOVL 48(SP), AX
+ MOVL AX, r+48(FP)
+ RET
+
+// func Y__gettextdomain(tls *TLS) (r uintptr)
+TEXT ·Y__gettextdomain(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__gettextdomain(SB)
+ MOVQ 8(SP), AX
+ MOVQ AX, r+8(FP)
+ RET
+
+// func Y__gmtime_r(tls *TLS, t uintptr, tm uintptr) (r uintptr)
+TEXT ·Y__gmtime_r(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ t+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ tm+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__gmtime_r(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__h_errno_location(tls *TLS) (r uintptr)
+TEXT ·Y__h_errno_location(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__h_errno_location(SB)
+ MOVQ 8(SP), AX
+ MOVQ AX, r+8(FP)
+ RET
+
+// func Y__inet_aton(tls *TLS, s0 uintptr, dest uintptr) (r int32)
+TEXT ·Y__inet_aton(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s0+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ dest+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__inet_aton(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__init_ssp(tls *TLS, entropy uintptr)
+TEXT ·Y__init_ssp(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ entropy+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__init_ssp(SB)
+ RET
+
+// func Y__intscan(tls *TLS, f uintptr, base uint32, pok int32, lim uint64) (r uint64)
+TEXT ·Y__intscan(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL base+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL pok+20(FP), AX
+ MOVL AX, 20(SP)
+ MOVQ lim+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__intscan(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__isalnum_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__isalnum_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__isalnum_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__isalpha_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__isalpha_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__isalpha_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__isblank_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__isblank_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__isblank_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__iscntrl_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__iscntrl_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__iscntrl_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__isdigit_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__isdigit_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__isdigit_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__isfinite(tls *TLS, d float64) (_2 int32)
+TEXT ·Y__isfinite(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ d+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__isfinite(SB)
+ MOVL 16(SP), AX
+ MOVL AX, _2+16(FP)
+ RET
+
+// func Y__isfinitef(tls *TLS, f float32) (_2 int32)
+TEXT ·Y__isfinitef(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL f+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__isfinitef(SB)
+ MOVL 16(SP), AX
+ MOVL AX, _2+16(FP)
+ RET
+
+// func Y__isfinitel(tls *TLS, d float64) (_2 int32)
+TEXT ·Y__isfinitel(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ d+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__isfinitel(SB)
+ MOVL 16(SP), AX
+ MOVL AX, _2+16(FP)
+ RET
+
+// func Y__isgraph_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__isgraph_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__isgraph_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__islower_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__islower_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__islower_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__isnan(t *TLS, x float64) (_2 int32)
+TEXT ·Y__isnan(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__isnan(SB)
+ MOVL 16(SP), AX
+ MOVL AX, _2+16(FP)
+ RET
+
+// func Y__isnanf(t *TLS, arg float32) (_2 int32)
+TEXT ·Y__isnanf(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL arg+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__isnanf(SB)
+ MOVL 16(SP), AX
+ MOVL AX, _2+16(FP)
+ RET
+
+// func Y__isnanl(t *TLS, arg float64) (_2 int32)
+TEXT ·Y__isnanl(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ arg+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__isnanl(SB)
+ MOVL 16(SP), AX
+ MOVL AX, _2+16(FP)
+ RET
+
+// func Y__isoc99_fscanf(tls *TLS, f uintptr, fmt uintptr, va uintptr) (r int32)
+TEXT ·Y__isoc99_fscanf(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ fmt+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ va+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__isoc99_fscanf(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__isoc99_fwscanf(tls *TLS, f uintptr, fmt uintptr, va uintptr) (r int32)
+TEXT ·Y__isoc99_fwscanf(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ fmt+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ va+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__isoc99_fwscanf(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__isoc99_scanf(tls *TLS, fmt uintptr, va uintptr) (r int32)
+TEXT ·Y__isoc99_scanf(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ fmt+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ va+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__isoc99_scanf(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__isoc99_sscanf(tls *TLS, s uintptr, fmt uintptr, va uintptr) (r int32)
+TEXT ·Y__isoc99_sscanf(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ fmt+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ va+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__isoc99_sscanf(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__isoc99_swscanf(tls *TLS, s uintptr, fmt uintptr, va uintptr) (r int32)
+TEXT ·Y__isoc99_swscanf(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ fmt+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ va+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__isoc99_swscanf(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__isoc99_vfscanf(tls *TLS, f uintptr, fmt uintptr, ap Tva_list) (r int32)
+TEXT ·Y__isoc99_vfscanf(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ fmt+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ ap+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__isoc99_vfscanf(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__isoc99_vfwscanf(tls *TLS, f uintptr, fmt uintptr, ap Tva_list) (r int32)
+TEXT ·Y__isoc99_vfwscanf(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ fmt+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ ap+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__isoc99_vfwscanf(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__isoc99_vscanf(tls *TLS, fmt uintptr, ap Tva_list) (r int32)
+TEXT ·Y__isoc99_vscanf(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ fmt+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ ap+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__isoc99_vscanf(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__isoc99_vsscanf(tls *TLS, s uintptr, fmt uintptr, ap Tva_list) (r int32)
+TEXT ·Y__isoc99_vsscanf(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ fmt+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ ap+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__isoc99_vsscanf(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__isoc99_vswscanf(tls *TLS, s uintptr, fmt uintptr, ap Tva_list) (r int32)
+TEXT ·Y__isoc99_vswscanf(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ fmt+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ ap+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__isoc99_vswscanf(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__isoc99_vwscanf(tls *TLS, fmt uintptr, ap Tva_list) (r int32)
+TEXT ·Y__isoc99_vwscanf(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ fmt+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ ap+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__isoc99_vwscanf(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__isoc99_wscanf(tls *TLS, fmt uintptr, va uintptr) (r int32)
+TEXT ·Y__isoc99_wscanf(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ fmt+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ va+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__isoc99_wscanf(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__isprint_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__isprint_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__isprint_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__ispunct_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__ispunct_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__ispunct_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__isspace_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__isspace_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__isspace_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__isupper_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__isupper_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__isupper_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__iswalnum_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+TEXT ·Y__iswalnum_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__iswalnum_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__iswalpha_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+TEXT ·Y__iswalpha_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__iswalpha_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__iswblank_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+TEXT ·Y__iswblank_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__iswblank_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__iswcntrl_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+TEXT ·Y__iswcntrl_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__iswcntrl_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__iswctype_l(tls *TLS, c Twint_t, t Twctype_t, l Tlocale_t) (r int32)
+TEXT ·Y__iswctype_l(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ t+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ l+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__iswctype_l(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__iswdigit_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+TEXT ·Y__iswdigit_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__iswdigit_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__iswgraph_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+TEXT ·Y__iswgraph_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__iswgraph_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__iswlower_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+TEXT ·Y__iswlower_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__iswlower_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__iswprint_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+TEXT ·Y__iswprint_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__iswprint_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__iswpunct_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+TEXT ·Y__iswpunct_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__iswpunct_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__iswspace_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+TEXT ·Y__iswspace_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__iswspace_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__iswupper_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+TEXT ·Y__iswupper_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__iswupper_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__iswxdigit_l(tls *TLS, c Twint_t, l Tlocale_t) (r int32)
+TEXT ·Y__iswxdigit_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__iswxdigit_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__isxdigit_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__isxdigit_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__isxdigit_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__lctrans(tls *TLS, msg uintptr, lm uintptr) (r uintptr)
+TEXT ·Y__lctrans(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ msg+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ lm+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__lctrans(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__lctrans_cur(tls *TLS, msg uintptr) (r uintptr)
+TEXT ·Y__lctrans_cur(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ msg+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__lctrans_cur(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__lctrans_impl(tls *TLS, msg uintptr, lm uintptr) (r uintptr)
+TEXT ·Y__lctrans_impl(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ msg+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ lm+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__lctrans_impl(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__ldexp_cexp(tls *TLS, z complex128, expt int32) (r complex128)
+TEXT ·Y__ldexp_cexp(SB),$48-48
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ z_real+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ z_imag+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL expt+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__ldexp_cexp(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r_real+32(FP)
+ MOVQ 40(SP), AX
+ MOVQ AX, r_imag+40(FP)
+ RET
+
+// func Y__ldexp_cexpf(tls *TLS, z complex64, expt int32) (r complex64)
+TEXT ·Y__ldexp_cexpf(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL z_real+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL z_imag+12(FP), AX
+ MOVL AX, 12(SP)
+ MOVL expt+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__ldexp_cexpf(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r_real+24(FP)
+ MOVL 28(SP), AX
+ MOVL AX, r_imag+28(FP)
+ RET
+
+// func Y__lgamma_r(tls *TLS, x float64, signgamp uintptr) (r1 float64)
+TEXT ·Y__lgamma_r(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ signgamp+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__lgamma_r(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r1+24(FP)
+ RET
+
+// func Y__lgammaf_r(tls *TLS, x float32, signgamp uintptr) (r1 float32)
+TEXT ·Y__lgammaf_r(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL x+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ signgamp+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__lgammaf_r(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r1+24(FP)
+ RET
+
+// func Y__lgammal_r(tls *TLS, x float64, sg uintptr) (r float64)
+TEXT ·Y__lgammal_r(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ sg+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__lgammal_r(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__libc_current_sigrtmax(tls *TLS) (r int32)
+TEXT ·Y__libc_current_sigrtmax(SB),$16-12
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__libc_current_sigrtmax(SB)
+ MOVL 8(SP), AX
+ MOVL AX, r+8(FP)
+ RET
+
+// func Y__libc_current_sigrtmin(tls *TLS) (r int32)
+TEXT ·Y__libc_current_sigrtmin(SB),$16-12
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__libc_current_sigrtmin(SB)
+ MOVL 8(SP), AX
+ MOVL AX, r+8(FP)
+ RET
+
+// func Y__libc_sigaction(tls *TLS, sig int32, sa uintptr, old uintptr) (r1 int32)
+TEXT ·Y__libc_sigaction(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL sig+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ sa+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ old+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__libc_sigaction(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r1+32(FP)
+ RET
+
+// func Y__loc_is_allocated(tls *TLS, loc Tlocale_t) (r int32)
+TEXT ·Y__loc_is_allocated(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ loc+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__loc_is_allocated(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__localtime_r(tls *TLS, t uintptr, tm uintptr) (r uintptr)
+TEXT ·Y__localtime_r(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ t+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ tm+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__localtime_r(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__lockfile(tls *TLS, file uintptr) (_2 int32)
+TEXT ·Y__lockfile(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ file+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__lockfile(SB)
+ MOVL 16(SP), AX
+ MOVL AX, _2+16(FP)
+ RET
+
+// func Y__lookup_ipliteral(tls *TLS, buf uintptr, name uintptr, family int32) (r int32)
+TEXT ·Y__lookup_ipliteral(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ buf+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ name+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL family+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__lookup_ipliteral(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__lookup_name(tls *TLS, buf uintptr, canon uintptr, name uintptr, family int32, flags int32) (r int32)
+TEXT ·Y__lookup_name(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ buf+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ canon+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ name+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL family+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVL flags+36(FP), AX
+ MOVL AX, 36(SP)
+ CALL ·X__lookup_name(SB)
+ MOVL 40(SP), AX
+ MOVL AX, r+40(FP)
+ RET
+
+// func Y__lookup_serv(tls *TLS, buf uintptr, name uintptr, proto int32, socktype int32, flags int32) (r int32)
+TEXT ·Y__lookup_serv(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ buf+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ name+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL proto+24(FP), AX
+ MOVL AX, 24(SP)
+ MOVL socktype+28(FP), AX
+ MOVL AX, 28(SP)
+ MOVL flags+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__lookup_serv(SB)
+ MOVL 40(SP), AX
+ MOVL AX, r+40(FP)
+ RET
+
+// func Y__lseek(tls *TLS, fd int32, offset Toff_t, whence int32) (r Toff_t)
+TEXT ·Y__lseek(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL fd+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ offset+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL whence+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__lseek(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__lsysinfo(tls *TLS, info uintptr) (r int32)
+TEXT ·Y__lsysinfo(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ info+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__lsysinfo(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__lxstat(tls *TLS, ver int32, path uintptr, buf uintptr) (r int32)
+TEXT ·Y__lxstat(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL ver+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ path+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ buf+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__lxstat(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__madvise(tls *TLS, addr uintptr, len1 Tsize_t, advice int32) (r int32)
+TEXT ·Y__madvise(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ addr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ len1+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL advice+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__madvise(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__map_file(tls *TLS, pathname uintptr, size uintptr) (r uintptr)
+TEXT ·Y__map_file(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ pathname+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ size+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__map_file(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__math_divzero(tls *TLS, sign Tuint32_t) (r float64)
+TEXT ·Y__math_divzero(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL sign+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__math_divzero(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__math_divzerof(tls *TLS, sign Tuint32_t) (r float32)
+TEXT ·Y__math_divzerof(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL sign+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__math_divzerof(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__math_invalid(tls *TLS, x float64) (r float64)
+TEXT ·Y__math_invalid(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__math_invalid(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__math_invalidf(tls *TLS, x float32) (r float32)
+TEXT ·Y__math_invalidf(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL x+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__math_invalidf(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__math_oflow(tls *TLS, sign Tuint32_t) (r float64)
+TEXT ·Y__math_oflow(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL sign+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__math_oflow(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__math_oflowf(tls *TLS, sign Tuint32_t) (r float32)
+TEXT ·Y__math_oflowf(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL sign+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__math_oflowf(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__math_uflow(tls *TLS, sign Tuint32_t) (r float64)
+TEXT ·Y__math_uflow(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL sign+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__math_uflow(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__math_uflowf(tls *TLS, sign Tuint32_t) (r float32)
+TEXT ·Y__math_uflowf(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL sign+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__math_uflowf(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__math_xflow(tls *TLS, sign Tuint32_t, y2 float64) (r float64)
+TEXT ·Y__math_xflow(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL sign+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ y2+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__math_xflow(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__math_xflowf(tls *TLS, sign Tuint32_t, y2 float32) (r float32)
+TEXT ·Y__math_xflowf(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL sign+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL y2+12(FP), AX
+ MOVL AX, 12(SP)
+ CALL ·X__math_xflowf(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__memrchr(tls *TLS, m uintptr, c int32, n Tsize_t) (r uintptr)
+TEXT ·Y__memrchr(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ m+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL c+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVQ n+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__memrchr(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__mkostemps(tls *TLS, template uintptr, len1 int32, flags int32) (r int32)
+TEXT ·Y__mkostemps(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ template+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL len1+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL flags+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__mkostemps(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__mmap(tls *TLS, start uintptr, len1 Tsize_t, prot int32, flags int32, fd int32, off Toff_t) (r uintptr)
+TEXT ·Y__mmap(SB),$56-56
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ start+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ len1+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL prot+24(FP), AX
+ MOVL AX, 24(SP)
+ MOVL flags+28(FP), AX
+ MOVL AX, 28(SP)
+ MOVL fd+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVQ off+40(FP), AX
+ MOVQ AX, 40(SP)
+ CALL ·X__mmap(SB)
+ MOVQ 48(SP), AX
+ MOVQ AX, r+48(FP)
+ RET
+
+// func Y__mo_lookup(tls *TLS, p uintptr, size Tsize_t, s uintptr) (r uintptr)
+TEXT ·Y__mo_lookup(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ p+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ size+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ s+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__mo_lookup(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__month_to_secs(tls *TLS, month int32, is_leap int32) (r int32)
+TEXT ·Y__month_to_secs(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL month+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL is_leap+12(FP), AX
+ MOVL AX, 12(SP)
+ CALL ·X__month_to_secs(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__mprotect(tls *TLS, addr uintptr, len1 Tsize_t, prot int32) (r int32)
+TEXT ·Y__mprotect(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ addr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ len1+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL prot+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__mprotect(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__mremap(tls *TLS, old_addr uintptr, old_len Tsize_t, new_len Tsize_t, flags int32, va uintptr) (r uintptr)
+TEXT ·Y__mremap(SB),$56-56
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ old_addr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ old_len+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ new_len+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL flags+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVQ va+40(FP), AX
+ MOVQ AX, 40(SP)
+ CALL ·X__mremap(SB)
+ MOVQ 48(SP), AX
+ MOVQ AX, r+48(FP)
+ RET
+
+// func Y__munmap(tls *TLS, start uintptr, len1 Tsize_t) (r int32)
+TEXT ·Y__munmap(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ start+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ len1+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__munmap(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__newlocale(tls *TLS, mask int32, name uintptr, loc Tlocale_t) (r Tlocale_t)
+TEXT ·Y__newlocale(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL mask+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ name+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ loc+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__newlocale(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__nl_langinfo(tls *TLS, item Tnl_item) (r uintptr)
+TEXT ·Y__nl_langinfo(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL item+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__nl_langinfo(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__nl_langinfo_l(tls *TLS, item Tnl_item, loc Tlocale_t) (r uintptr)
+TEXT ·Y__nl_langinfo_l(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL item+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ loc+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__nl_langinfo_l(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__nscd_query(tls *TLS, req Tint32_t, key uintptr, buf uintptr, len1 Tsize_t, swap uintptr) (r uintptr)
+TEXT ·Y__nscd_query(SB),$56-56
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL req+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ key+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ buf+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ len1+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ swap+40(FP), AX
+ MOVQ AX, 40(SP)
+ CALL ·X__nscd_query(SB)
+ MOVQ 48(SP), AX
+ MOVQ AX, r+48(FP)
+ RET
+
+// func Y__ofl_add(tls *TLS, f uintptr) (r uintptr)
+TEXT ·Y__ofl_add(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__ofl_add(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__ofl_lock(tls *TLS) (r uintptr)
+TEXT ·Y__ofl_lock(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__ofl_lock(SB)
+ MOVQ 8(SP), AX
+ MOVQ AX, r+8(FP)
+ RET
+
+// func Y__ofl_unlock(tls *TLS)
+TEXT ·Y__ofl_unlock(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__ofl_unlock(SB)
+ RET
+
+// func Y__overflow(tls *TLS, f uintptr, _c int32) (r int32)
+TEXT ·Y__overflow(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL _c+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__overflow(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__pleval(tls *TLS, s uintptr, n uint64) (r uint64)
+TEXT ·Y__pleval(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ n+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__pleval(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__posix_getopt(tls *TLS, argc int32, argv uintptr, optstring uintptr) (r int32)
+TEXT ·Y__posix_getopt(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL argc+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ argv+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ optstring+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__posix_getopt(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__procfdname(tls *TLS, buf uintptr, fd uint32)
+TEXT ·Y__procfdname(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ buf+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL fd+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__procfdname(SB)
+ RET
+
+// func Y__ptsname_r(tls *TLS, fd int32, buf uintptr, len1 Tsize_t) (r int32)
+TEXT ·Y__ptsname_r(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL fd+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ buf+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ len1+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__ptsname_r(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__putenv(tls *TLS, s uintptr, l Tsize_t, r uintptr) (r1 int32)
+TEXT ·Y__putenv(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ r+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__putenv(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r1+32(FP)
+ RET
+
+// func Y__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr)
+TEXT ·Y__qsort_r(SB),$56-48
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_cmp+32(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal___qsort_r_3(SB) // Create the closure for calling __ccgo_fp_cmp
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ base+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ nel+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ width+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 32(SP)
+ MOVQ arg+40(FP), AX
+ MOVQ AX, 40(SP)
+ CALL ·X__qsort_r(SB)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0___qsort_r_3(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ _2+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ _3+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ __ccgo_fp+32(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 32(SP), AX
+ MOVL AX, _4+40(FP)
+ RET
+
+// func Y__rand48_step(tls *TLS, xi uintptr, lc uintptr) (r Tuint64_t)
+TEXT ·Y__rand48_step(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ xi+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ lc+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__rand48_step(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__register_locked_file(tls *TLS, f uintptr, self Tpthread_t)
+TEXT ·Y__register_locked_file(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ self+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__register_locked_file(SB)
+ RET
+
+// func Y__rem_pio2(tls *TLS, x float64, y uintptr) (r1 int32)
+TEXT ·Y__rem_pio2(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ y+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__rem_pio2(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r1+24(FP)
+ RET
+
+// func Y__rem_pio2_large(tls *TLS, x uintptr, y uintptr, e0 int32, nx int32, prec int32) (r int32)
+TEXT ·Y__rem_pio2_large(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ y+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL e0+24(FP), AX
+ MOVL AX, 24(SP)
+ MOVL nx+28(FP), AX
+ MOVL AX, 28(SP)
+ MOVL prec+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__rem_pio2_large(SB)
+ MOVL 40(SP), AX
+ MOVL AX, r+40(FP)
+ RET
+
+// func Y__rem_pio2f(tls *TLS, x float32, y uintptr) (r int32)
+TEXT ·Y__rem_pio2f(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL x+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ y+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__rem_pio2f(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__res_mkquery(tls *TLS, op int32, dname uintptr, class int32, type1 int32, data uintptr, datalen int32, newrr uintptr, buf uintptr, buflen int32) (r int32)
+TEXT ·Y__res_mkquery(SB),$80-76
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL op+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ dname+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL class+24(FP), AX
+ MOVL AX, 24(SP)
+ MOVL type1+28(FP), AX
+ MOVL AX, 28(SP)
+ MOVQ data+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVL datalen+40(FP), AX
+ MOVL AX, 40(SP)
+ MOVQ newrr+48(FP), AX
+ MOVQ AX, 48(SP)
+ MOVQ buf+56(FP), AX
+ MOVQ AX, 56(SP)
+ MOVL buflen+64(FP), AX
+ MOVL AX, 64(SP)
+ CALL ·X__res_mkquery(SB)
+ MOVL 72(SP), AX
+ MOVL AX, r+72(FP)
+ RET
+
+// func Y__res_msend(tls *TLS, nqueries int32, queries uintptr, qlens uintptr, answers uintptr, alens uintptr, asize int32) (r int32)
+TEXT ·Y__res_msend(SB),$64-60
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL nqueries+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ queries+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ qlens+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ answers+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ alens+40(FP), AX
+ MOVQ AX, 40(SP)
+ MOVL asize+48(FP), AX
+ MOVL AX, 48(SP)
+ CALL ·X__res_msend(SB)
+ MOVL 56(SP), AX
+ MOVL AX, r+56(FP)
+ RET
+
+// func Y__res_msend_rc(tls *TLS, nqueries int32, queries uintptr, qlens uintptr, answers uintptr, alens uintptr, asize int32, conf uintptr) (r1 int32)
+TEXT ·Y__res_msend_rc(SB),$72-68
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL nqueries+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ queries+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ qlens+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ answers+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ alens+40(FP), AX
+ MOVQ AX, 40(SP)
+ MOVL asize+48(FP), AX
+ MOVL AX, 48(SP)
+ MOVQ conf+56(FP), AX
+ MOVQ AX, 56(SP)
+ CALL ·X__res_msend_rc(SB)
+ MOVL 64(SP), AX
+ MOVL AX, r1+64(FP)
+ RET
+
+// func Y__res_send(tls *TLS, _msg uintptr, _msglen int32, _answer uintptr, _anslen int32) (r1 int32)
+TEXT ·Y__res_send(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _msg+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL _msglen+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVQ _answer+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL _anslen+32(FP), AX
+ MOVL AX, 32(SP)
+ CALL ·X__res_send(SB)
+ MOVL 40(SP), AX
+ MOVL AX, r1+40(FP)
+ RET
+
+// func Y__res_state(tls *TLS) (r uintptr)
+TEXT ·Y__res_state(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__res_state(SB)
+ MOVQ 8(SP), AX
+ MOVQ AX, r+8(FP)
+ RET
+
+// func Y__reset_tls(tls *TLS)
+TEXT ·Y__reset_tls(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__reset_tls(SB)
+ RET
+
+// func Y__restore(tls *TLS)
+TEXT ·Y__restore(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__restore(SB)
+ RET
+
+// func Y__restore_rt(tls *TLS)
+TEXT ·Y__restore_rt(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__restore_rt(SB)
+ RET
+
+// func Y__restore_sigs(tls *TLS, set uintptr)
+TEXT ·Y__restore_sigs(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ set+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__restore_sigs(SB)
+ RET
+
+// func Y__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32)
+TEXT ·Y__rtnetlink_enumerate(SB),$48-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_cb+16(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal___rtnetlink_enumerate_2(SB) // Create the closure for calling __ccgo_fp_cb
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL link_af+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL addr_af+12(FP), AX
+ MOVL AX, 12(SP)
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 24(SP)
+ MOVQ ctx+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__rtnetlink_enumerate(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r1+32(FP)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0___rtnetlink_enumerate_2(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ _2+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ __ccgo_fp+24(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 24(SP), AX
+ MOVL AX, _3+32(FP)
+ RET
+
+// func Y__secs_to_tm(tls *TLS, t int64, tm uintptr) (r int32)
+TEXT ·Y__secs_to_tm(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ t+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ tm+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__secs_to_tm(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__secs_to_zone(tls *TLS, t int64, local int32, isdst uintptr, offset uintptr, oppoff uintptr, zonename uintptr)
+TEXT ·Y__secs_to_zone(SB),$56-56
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ t+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL local+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVQ isdst+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ offset+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ oppoff+40(FP), AX
+ MOVQ AX, 40(SP)
+ MOVQ zonename+48(FP), AX
+ MOVQ AX, 48(SP)
+ CALL ·X__secs_to_zone(SB)
+ RET
+
+// func Y__setxid(tls *TLS, nr int32, id int32, eid int32, sid int32) (r int32)
+TEXT ·Y__setxid(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL nr+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL id+12(FP), AX
+ MOVL AX, 12(SP)
+ MOVL eid+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL sid+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__setxid(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__shgetc(tls *TLS, f uintptr) (r int32)
+TEXT ·Y__shgetc(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__shgetc(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__shlim(tls *TLS, f uintptr, lim Toff_t)
+TEXT ·Y__shlim(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ lim+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__shlim(SB)
+ RET
+
+// func Y__shm_mapname(tls *TLS, name uintptr, buf uintptr) (r uintptr)
+TEXT ·Y__shm_mapname(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ name+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ buf+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__shm_mapname(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__sigaction(tls *TLS, sig int32, sa uintptr, old uintptr) (r1 int32)
+TEXT ·Y__sigaction(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL sig+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ sa+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ old+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__sigaction(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r1+32(FP)
+ RET
+
+// func Y__signbit(tls *TLS, x float64) (r int32)
+TEXT ·Y__signbit(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__signbit(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__signbitf(tls *TLS, x float32) (r int32)
+TEXT ·Y__signbitf(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL x+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X__signbitf(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__signbitl(tls *TLS, x float64) (r int32)
+TEXT ·Y__signbitl(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__signbitl(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__sigsetjmp_tail(tls *TLS, jb uintptr, ret int32) (r int32)
+TEXT ·Y__sigsetjmp_tail(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ jb+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL ret+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__sigsetjmp_tail(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__sin(tls *TLS, x float64, y float64, iy int32) (r1 float64)
+TEXT ·Y__sin(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ y+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL iy+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__sin(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r1+32(FP)
+ RET
+
+// func Y__sindf(tls *TLS, x float64) (r1 float32)
+TEXT ·Y__sindf(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__sindf(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r1+16(FP)
+ RET
+
+// func Y__stack_chk_fail(tls *TLS)
+TEXT ·Y__stack_chk_fail(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__stack_chk_fail(SB)
+ RET
+
+// func Y__stack_chk_fail_local(tls *TLS)
+TEXT ·Y__stack_chk_fail_local(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__stack_chk_fail_local(SB)
+ RET
+
+// func Y__stdio_close(tls *TLS, f uintptr) (r int32)
+TEXT ·Y__stdio_close(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__stdio_close(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__stdio_exit(tls *TLS)
+TEXT ·Y__stdio_exit(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__stdio_exit(SB)
+ RET
+
+// func Y__stdio_exit_needed(tls *TLS)
+TEXT ·Y__stdio_exit_needed(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__stdio_exit_needed(SB)
+ RET
+
+// func Y__stdio_read(tls *TLS, f uintptr, buf uintptr, len1 Tsize_t) (r Tsize_t)
+TEXT ·Y__stdio_read(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ buf+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ len1+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__stdio_read(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__stdio_seek(tls *TLS, f uintptr, off Toff_t, whence int32) (r Toff_t)
+TEXT ·Y__stdio_seek(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ off+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL whence+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__stdio_seek(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__stdio_write(tls *TLS, f uintptr, buf uintptr, len1 Tsize_t) (r Tsize_t)
+TEXT ·Y__stdio_write(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ buf+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ len1+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__stdio_write(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__stdout_write(tls *TLS, f uintptr, buf uintptr, len1 Tsize_t) (r Tsize_t)
+TEXT ·Y__stdout_write(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ buf+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ len1+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__stdout_write(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__stpcpy(tls *TLS, d uintptr, s uintptr) (r uintptr)
+TEXT ·Y__stpcpy(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ d+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ s+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__stpcpy(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__stpncpy(tls *TLS, d uintptr, s uintptr, n Tsize_t) (r uintptr)
+TEXT ·Y__stpncpy(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ d+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ s+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ n+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__stpncpy(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__strcasecmp_l(tls *TLS, l uintptr, r uintptr, loc Tlocale_t) (r1 int32)
+TEXT ·Y__strcasecmp_l(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ l+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ r+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ loc+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__strcasecmp_l(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r1+32(FP)
+ RET
+
+// func Y__strchrnul(tls *TLS, s uintptr, c int32) (r uintptr)
+TEXT ·Y__strchrnul(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL c+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__strchrnul(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__strcoll_l(tls *TLS, l uintptr, r uintptr, loc Tlocale_t) (r1 int32)
+TEXT ·Y__strcoll_l(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ l+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ r+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ loc+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__strcoll_l(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r1+32(FP)
+ RET
+
+// func Y__strerror_l(tls *TLS, e int32, loc Tlocale_t) (r uintptr)
+TEXT ·Y__strerror_l(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL e+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ loc+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__strerror_l(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__strftime_fmt_1(tls *TLS, s uintptr, l uintptr, f int32, tm uintptr, loc Tlocale_t, pad int32) (r uintptr)
+TEXT ·Y__strftime_fmt_1(SB),$64-64
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL f+24(FP), AX
+ MOVL AX, 24(SP)
+ MOVQ tm+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ loc+40(FP), AX
+ MOVQ AX, 40(SP)
+ MOVL pad+48(FP), AX
+ MOVL AX, 48(SP)
+ CALL ·X__strftime_fmt_1(SB)
+ MOVQ 56(SP), AX
+ MOVQ AX, r+56(FP)
+ RET
+
+// func Y__strftime_l(tls *TLS, s uintptr, n Tsize_t, f uintptr, tm uintptr, loc Tlocale_t) (r Tsize_t)
+TEXT ·Y__strftime_l(SB),$56-56
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ n+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ f+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ tm+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ loc+40(FP), AX
+ MOVQ AX, 40(SP)
+ CALL ·X__strftime_l(SB)
+ MOVQ 48(SP), AX
+ MOVQ AX, r+48(FP)
+ RET
+
+// func Y__strncasecmp_l(tls *TLS, l uintptr, r uintptr, n Tsize_t, loc Tlocale_t) (r1 int32)
+TEXT ·Y__strncasecmp_l(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ l+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ r+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ n+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ loc+32(FP), AX
+ MOVQ AX, 32(SP)
+ CALL ·X__strncasecmp_l(SB)
+ MOVL 40(SP), AX
+ MOVL AX, r1+40(FP)
+ RET
+
+// func Y__strtod_l(tls *TLS, s uintptr, p uintptr, l Tlocale_t) (r float64)
+TEXT ·Y__strtod_l(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ p+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ l+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__strtod_l(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__strtof_l(tls *TLS, s uintptr, p uintptr, l Tlocale_t) (r float32)
+TEXT ·Y__strtof_l(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ p+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ l+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__strtof_l(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__strtoimax_internal(tls *TLS, s uintptr, p uintptr, base int32) (r Tintmax_t)
+TEXT ·Y__strtoimax_internal(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ p+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL base+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__strtoimax_internal(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__strtol_internal(tls *TLS, s uintptr, p uintptr, base int32) (r int64)
+TEXT ·Y__strtol_internal(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ p+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL base+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__strtol_internal(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__strtold_l(tls *TLS, s uintptr, p uintptr, l Tlocale_t) (r float64)
+TEXT ·Y__strtold_l(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ p+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ l+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__strtold_l(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__strtoll_internal(tls *TLS, s uintptr, p uintptr, base int32) (r int64)
+TEXT ·Y__strtoll_internal(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ p+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL base+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__strtoll_internal(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__strtoul_internal(tls *TLS, s uintptr, p uintptr, base int32) (r uint64)
+TEXT ·Y__strtoul_internal(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ p+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL base+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__strtoul_internal(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__strtoull_internal(tls *TLS, s uintptr, p uintptr, base int32) (r uint64)
+TEXT ·Y__strtoull_internal(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ p+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL base+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__strtoull_internal(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__strtoumax_internal(tls *TLS, s uintptr, p uintptr, base int32) (r Tuintmax_t)
+TEXT ·Y__strtoumax_internal(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ p+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL base+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__strtoumax_internal(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__strxfrm_l(tls *TLS, dest uintptr, src uintptr, n Tsize_t, loc Tlocale_t) (r Tsize_t)
+TEXT ·Y__strxfrm_l(SB),$48-48
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ dest+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ src+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ n+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ loc+32(FP), AX
+ MOVQ AX, 32(SP)
+ CALL ·X__strxfrm_l(SB)
+ MOVQ 40(SP), AX
+ MOVQ AX, r+40(FP)
+ RET
+
+// func Y__sync_synchronize(t *TLS)
+TEXT ·Y__sync_synchronize(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__sync_synchronize(SB)
+ RET
+
+// func Y__sync_val_compare_and_swapInt16(t *TLS, ptr uintptr, oldval, newval int16) (r int16)
+TEXT ·Y__sync_val_compare_and_swapInt16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW oldval+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVW newval+18(FP), AX
+ MOVW AX, 18(SP)
+ CALL ·X__sync_val_compare_and_swapInt16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__sync_val_compare_and_swapInt32(t *TLS, ptr uintptr, oldval, newval int32) (r int32)
+TEXT ·Y__sync_val_compare_and_swapInt32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL oldval+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL newval+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__sync_val_compare_and_swapInt32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__sync_val_compare_and_swapInt64(t *TLS, ptr uintptr, oldval, newval int64) (r int64)
+TEXT ·Y__sync_val_compare_and_swapInt64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ oldval+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ newval+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__sync_val_compare_and_swapInt64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__sync_val_compare_and_swapInt8(t *TLS, ptr uintptr, oldval, newval int8) (r int8)
+TEXT ·Y__sync_val_compare_and_swapInt8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB oldval+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVB newval+17(FP), AX
+ MOVB AX, 17(SP)
+ CALL ·X__sync_val_compare_and_swapInt8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__sync_val_compare_and_swapUint16(t *TLS, ptr uintptr, oldval, newval uint16) (r uint16)
+TEXT ·Y__sync_val_compare_and_swapUint16(SB),$32-26
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVW oldval+16(FP), AX
+ MOVW AX, 16(SP)
+ MOVW newval+18(FP), AX
+ MOVW AX, 18(SP)
+ CALL ·X__sync_val_compare_and_swapUint16(SB)
+ MOVW 24(SP), AX
+ MOVW AX, r+24(FP)
+ RET
+
+// func Y__sync_val_compare_and_swapUint32(t *TLS, ptr uintptr, oldval, newval uint32) (r uint32)
+TEXT ·Y__sync_val_compare_and_swapUint32(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL oldval+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL newval+20(FP), AX
+ MOVL AX, 20(SP)
+ CALL ·X__sync_val_compare_and_swapUint32(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__sync_val_compare_and_swapUint64(t *TLS, ptr uintptr, oldval, newval uint64) (r uint64)
+TEXT ·Y__sync_val_compare_and_swapUint64(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ oldval+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ newval+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__sync_val_compare_and_swapUint64(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r+32(FP)
+ RET
+
+// func Y__sync_val_compare_and_swapUint8(t *TLS, ptr uintptr, oldval, newval uint8) (r uint8)
+TEXT ·Y__sync_val_compare_and_swapUint8(SB),$32-25
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ ptr+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVB oldval+16(FP), AX
+ MOVB AX, 16(SP)
+ MOVB newval+17(FP), AX
+ MOVB AX, 17(SP)
+ CALL ·X__sync_val_compare_and_swapUint8(SB)
+ MOVB 24(SP), AX
+ MOVB AX, r+24(FP)
+ RET
+
+// func Y__syscall0(tls *TLS, n long) (_2 long)
+TEXT ·Y__syscall0(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ n+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__syscall0(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, _2+16(FP)
+ RET
+
+// func Y__syscall1(tls *TLS, n, a1 long) (_2 long)
+TEXT ·Y__syscall1(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ n+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ a1+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__syscall1(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, _2+24(FP)
+ RET
+
+// func Y__syscall2(tls *TLS, n, a1, a2 long) (_2 long)
+TEXT ·Y__syscall2(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ n+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ a1+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ a2+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__syscall2(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, _2+32(FP)
+ RET
+
+// func Y__syscall3(tls *TLS, n, a1, a2, a3 long) (_2 long)
+TEXT ·Y__syscall3(SB),$48-48
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ n+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ a1+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ a2+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ a3+32(FP), AX
+ MOVQ AX, 32(SP)
+ CALL ·X__syscall3(SB)
+ MOVQ 40(SP), AX
+ MOVQ AX, _2+40(FP)
+ RET
+
+// func Y__syscall4(tls *TLS, n, a1, a2, a3, a4 long) (_2 long)
+TEXT ·Y__syscall4(SB),$56-56
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ n+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ a1+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ a2+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ a3+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ a4+40(FP), AX
+ MOVQ AX, 40(SP)
+ CALL ·X__syscall4(SB)
+ MOVQ 48(SP), AX
+ MOVQ AX, _2+48(FP)
+ RET
+
+// func Y__syscall5(tls *TLS, n, a1, a2, a3, a4, a5 long) (_2 long)
+TEXT ·Y__syscall5(SB),$64-64
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ n+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ a1+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ a2+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ a3+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ a4+40(FP), AX
+ MOVQ AX, 40(SP)
+ MOVQ a5+48(FP), AX
+ MOVQ AX, 48(SP)
+ CALL ·X__syscall5(SB)
+ MOVQ 56(SP), AX
+ MOVQ AX, _2+56(FP)
+ RET
+
+// func Y__syscall6(tls *TLS, n, a1, a2, a3, a4, a5, a6 long) (_2 long)
+TEXT ·Y__syscall6(SB),$72-72
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ n+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ a1+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ a2+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ a3+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ a4+40(FP), AX
+ MOVQ AX, 40(SP)
+ MOVQ a5+48(FP), AX
+ MOVQ AX, 48(SP)
+ MOVQ a6+56(FP), AX
+ MOVQ AX, 56(SP)
+ CALL ·X__syscall6(SB)
+ MOVQ 64(SP), AX
+ MOVQ AX, _2+64(FP)
+ RET
+
+// func Y__syscall_ret(tls *TLS, r uint64) (r1 int64)
+TEXT ·Y__syscall_ret(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ r+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__syscall_ret(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r1+16(FP)
+ RET
+
+// func Y__tan(tls *TLS, x float64, y float64, odd int32) (r1 float64)
+TEXT ·Y__tan(SB),$40-40
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ y+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL odd+24(FP), AX
+ MOVL AX, 24(SP)
+ CALL ·X__tan(SB)
+ MOVQ 32(SP), AX
+ MOVQ AX, r1+32(FP)
+ RET
+
+// func Y__tandf(tls *TLS, x float64, odd int32) (r1 float32)
+TEXT ·Y__tandf(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ x+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL odd+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X__tandf(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r1+24(FP)
+ RET
+
+// func Y__tm_to_secs(tls *TLS, tm uintptr) (r int64)
+TEXT ·Y__tm_to_secs(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ tm+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__tm_to_secs(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__tm_to_tzname(tls *TLS, tm uintptr) (r uintptr)
+TEXT ·Y__tm_to_tzname(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ tm+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__tm_to_tzname(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__tolower_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__tolower_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__tolower_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__toread(tls *TLS, f uintptr) (r int32)
+TEXT ·Y__toread(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__toread(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__toread_needs_stdio_exit(tls *TLS)
+TEXT ·Y__toread_needs_stdio_exit(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__toread_needs_stdio_exit(SB)
+ RET
+
+// func Y__toupper_l(tls *TLS, c int32, l Tlocale_t) (r int32)
+TEXT ·Y__toupper_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__toupper_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__towctrans_l(tls *TLS, c Twint_t, t Twctrans_t, l Tlocale_t) (r Twint_t)
+TEXT ·Y__towctrans_l(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ t+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ l+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__towctrans_l(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__towlower_l(tls *TLS, c Twint_t, l Tlocale_t) (r Twint_t)
+TEXT ·Y__towlower_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__towlower_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__towrite(tls *TLS, f uintptr) (r int32)
+TEXT ·Y__towrite(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__towrite(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__towrite_needs_stdio_exit(tls *TLS)
+TEXT ·Y__towrite_needs_stdio_exit(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__towrite_needs_stdio_exit(SB)
+ RET
+
+// func Y__towupper_l(tls *TLS, c Twint_t, l Tlocale_t) (r Twint_t)
+TEXT ·Y__towupper_l(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL c+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__towupper_l(SB)
+ MOVL 24(SP), AX
+ MOVL AX, r+24(FP)
+ RET
+
+// func Y__tre_mem_alloc_impl(tls *TLS, mem Ttre_mem_t, provided int32, provided_block uintptr, zero int32, size Tsize_t) (r uintptr)
+TEXT ·Y__tre_mem_alloc_impl(SB),$56-56
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ mem+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL provided+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVQ provided_block+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVL zero+32(FP), AX
+ MOVL AX, 32(SP)
+ MOVQ size+40(FP), AX
+ MOVQ AX, 40(SP)
+ CALL ·X__tre_mem_alloc_impl(SB)
+ MOVQ 48(SP), AX
+ MOVQ AX, r+48(FP)
+ RET
+
+// func Y__tre_mem_destroy(tls *TLS, mem Ttre_mem_t)
+TEXT ·Y__tre_mem_destroy(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ mem+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__tre_mem_destroy(SB)
+ RET
+
+// func Y__tre_mem_new_impl(tls *TLS, provided int32, provided_block uintptr) (r Ttre_mem_t)
+TEXT ·Y__tre_mem_new_impl(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL provided+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ provided_block+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__tre_mem_new_impl(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__tsearch_balance(tls *TLS, p uintptr) (r int32)
+TEXT ·Y__tsearch_balance(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ p+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__tsearch_balance(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__uflow(tls *TLS, f uintptr) (r int32)
+TEXT ·Y__uflow(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__uflow(SB)
+ MOVL 16(SP), AX
+ MOVL AX, r+16(FP)
+ RET
+
+// func Y__unlist_locked_file(tls *TLS, f uintptr)
+TEXT ·Y__unlist_locked_file(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ f+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__unlist_locked_file(SB)
+ RET
+
+// func Y__unlockfile(tls *TLS, file uintptr)
+TEXT ·Y__unlockfile(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ file+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__unlockfile(SB)
+ RET
+
+// func Y__uselocale(tls *TLS, new1 Tlocale_t) (r Tlocale_t)
+TEXT ·Y__uselocale(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ new1+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__uselocale(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__vm_wait(tls *TLS)
+TEXT ·Y__vm_wait(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X__vm_wait(SB)
+ RET
+
+// func Y__wcscoll_l(tls *TLS, l uintptr, r uintptr, locale Tlocale_t) (r1 int32)
+TEXT ·Y__wcscoll_l(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ l+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ r+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ locale+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__wcscoll_l(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r1+32(FP)
+ RET
+
+// func Y__wcsftime_l(tls *TLS, s uintptr, n Tsize_t, f uintptr, tm uintptr, loc Tlocale_t) (r Tsize_t)
+TEXT ·Y__wcsftime_l(SB),$56-56
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ n+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ f+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ tm+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ loc+40(FP), AX
+ MOVQ AX, 40(SP)
+ CALL ·X__wcsftime_l(SB)
+ MOVQ 48(SP), AX
+ MOVQ AX, r+48(FP)
+ RET
+
+// func Y__wcsxfrm_l(tls *TLS, dest uintptr, src uintptr, n Tsize_t, loc Tlocale_t) (r Tsize_t)
+TEXT ·Y__wcsxfrm_l(SB),$48-48
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ dest+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ src+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ n+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ loc+32(FP), AX
+ MOVQ AX, 32(SP)
+ CALL ·X__wcsxfrm_l(SB)
+ MOVQ 40(SP), AX
+ MOVQ AX, r+40(FP)
+ RET
+
+// func Y__wctrans_l(tls *TLS, s uintptr, l Tlocale_t) (r Twctrans_t)
+TEXT ·Y__wctrans_l(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__wctrans_l(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__wctype_l(tls *TLS, s uintptr, l Tlocale_t) (r Twctype_t)
+TEXT ·Y__wctype_l(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ l+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__wctype_l(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y__xmknod(tls *TLS, ver int32, path uintptr, mode Tmode_t, dev uintptr) (r int32)
+TEXT ·Y__xmknod(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL ver+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ path+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL mode+24(FP), AX
+ MOVL AX, 24(SP)
+ MOVQ dev+32(FP), AX
+ MOVQ AX, 32(SP)
+ CALL ·X__xmknod(SB)
+ MOVL 40(SP), AX
+ MOVL AX, r+40(FP)
+ RET
+
+// func Y__xmknodat(tls *TLS, ver int32, fd int32, path uintptr, mode Tmode_t, dev uintptr) (r int32)
+TEXT ·Y__xmknodat(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL ver+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL fd+12(FP), AX
+ MOVL AX, 12(SP)
+ MOVQ path+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL mode+24(FP), AX
+ MOVL AX, 24(SP)
+ MOVQ dev+32(FP), AX
+ MOVQ AX, 32(SP)
+ CALL ·X__xmknodat(SB)
+ MOVL 40(SP), AX
+ MOVL AX, r+40(FP)
+ RET
+
+// func Y__xpg_basename(tls *TLS, s uintptr) (r uintptr)
+TEXT ·Y__xpg_basename(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ s+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X__xpg_basename(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, r+16(FP)
+ RET
+
+// func Y__xpg_strerror_r(tls *TLS, err int32, buf uintptr, buflen Tsize_t) (r int32)
+TEXT ·Y__xpg_strerror_r(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL err+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ buf+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ buflen+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__xpg_strerror_r(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__xstat(tls *TLS, ver int32, path uintptr, buf uintptr) (r int32)
+TEXT ·Y__xstat(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL ver+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVQ path+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ buf+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X__xstat(SB)
+ MOVL 32(SP), AX
+ MOVL AX, r+32(FP)
+ RET
+
+// func Y__year_to_secs(tls *TLS, year int64, is_leap uintptr) (r int64)
+TEXT ·Y__year_to_secs(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ year+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ is_leap+16(FP), AX
+ MOVQ AX, 16(SP)
+ CALL ·X__year_to_secs(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, r+24(FP)
+ RET
+
+// func Y_exit(tls *TLS, status int32)
+TEXT ·Y_exit(SB),$16-12
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVL status+8(FP), AX
+ MOVL AX, 8(SP)
+ CALL ·X_exit(SB)
+ RET
+
+// func Y_flushlbf(tls *TLS)
+TEXT ·Y_flushlbf(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·X_flushlbf(SB)
+ RET
+
+// func Y_longjmp(t *TLS, env uintptr, val int32)
+TEXT ·Y_longjmp(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ env+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL val+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X_longjmp(SB)
+ RET
+
+// func Y_obstack_begin(t *TLS, obstack uintptr, size, alignment int32, chunkfun, freefun uintptr) (_4 int32)
+TEXT ·Y_obstack_begin(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ obstack+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL size+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL alignment+20(FP), AX
+ MOVL AX, 20(SP)
+ MOVQ chunkfun+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ freefun+32(FP), AX
+ MOVQ AX, 32(SP)
+ CALL ·X_obstack_begin(SB)
+ MOVL 40(SP), AX
+ MOVL AX, _4+40(FP)
+ RET
+
+// func Y_obstack_newchunk(t *TLS, obstack uintptr, length int32) (_3 int32)
+TEXT ·Y_obstack_newchunk(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ obstack+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL length+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X_obstack_newchunk(SB)
+ MOVL 24(SP), AX
+ MOVL AX, _3+24(FP)
+ RET
+
+// func Y_pthread_cleanup_pop(tls *TLS, _ uintptr, run int32)
+TEXT ·Y_pthread_cleanup_pop(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL run+16(FP), AX
+ MOVL AX, 16(SP)
+ CALL ·X_pthread_cleanup_pop(SB)
+ RET
+
+// func Y_pthread_cleanup_push(tls *TLS, _, f, x uintptr)
+TEXT ·Y_pthread_cleanup_push(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ f+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ x+24(FP), AX
+ MOVQ AX, 24(SP)
+ CALL ·X_pthread_cleanup_push(SB)
+ RET
+
+// func Y_setjmp(t *TLS, env uintptr) (_2 int32)
+TEXT ·Y_setjmp(SB),$24-20
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ t+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ env+8(FP), AX
+ MOVQ AX, 8(SP)
+ CALL ·X_setjmp(SB)
+ MOVL 16(SP), AX
+ MOVL AX, _2+16(FP)
RET
// func Ya64l(tls *TLS, s uintptr) (r int64)
@@ -1494,7 +8765,7 @@ TEXT ·Yalarm(SB),$24-20
MOVL AX, r+16(FP)
RET
-// func Yalloca(tls *TLS, size Tsize_t) uintptr
+// func Yalloca(tls *TLS, size Tsize_t) (_2 uintptr)
TEXT ·Yalloca(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -1504,7 +8775,7 @@ TEXT ·Yalloca(SB),$24-24
MOVQ AX, 8(SP)
CALL ·Xalloca(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
// func Yalphasort(tls *TLS, a uintptr, b uintptr) (r int32)
@@ -1660,17 +8931,45 @@ TEXT ·Yasprintf(SB),$40-36
MOVL AX, r+32(FP)
RET
-// func Yat_quick_exit(tls *TLS, func1 uintptr) (r1 int32)
-TEXT ·Yat_quick_exit(SB),$24-20
+// func Yat_quick_exit(tls *TLS, __ccgo_fp_func uintptr) (r1 int32)
+TEXT ·Yat_quick_exit(SB),$32-20
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
- MOVQ func1+8(FP), AX
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_func+8(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_at_quick_exit_0(SB) // Create the closure for calling __ccgo_fp_func
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 8(SP)
CALL ·Xat_quick_exit(SB)
MOVL 16(SP), AX
MOVL AX, r1+16(FP)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0_at_quick_exit_0(SB),$16-16
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ __ccgo_fp+8(FP), AX
+ CALL *AX // Call the ABI0 code ptr
RET
// func Yatan(tls *TLS, x3 float64) (r float64)
@@ -1861,7 +9160,7 @@ TEXT ·Yatoll(SB),$24-24
MOVQ AX, r+16(FP)
RET
-// func Ybacktrace(t *TLS, buf uintptr, size int32) int32
+// func Ybacktrace(t *TLS, buf uintptr, size int32) (_3 int32)
TEXT ·Ybacktrace(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -1873,7 +9172,7 @@ TEXT ·Ybacktrace(SB),$32-28
MOVL AX, 16(SP)
CALL ·Xbacktrace(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _3+24(FP)
RET
// func Ybacktrace_symbols_fd(t *TLS, buffer uintptr, size, fd int32)
@@ -1996,10 +9295,24 @@ TEXT ·Ybrk(SB),$24-20
MOVL AX, r+16(FP)
RET
-// func Ybsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, cmp uintptr) (r uintptr)
-TEXT ·Ybsearch(SB),$56-56
+// func Ybsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp uintptr) (r uintptr)
+TEXT ·Ybsearch(SB),$64-56
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_cmp+40(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_bsearch_4(SB) // Create the closure for calling __ccgo_fp_cmp
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ key+8(FP), AX
@@ -2010,11 +9323,31 @@ TEXT ·Ybsearch(SB),$56-56
MOVQ AX, 24(SP)
MOVQ width+32(FP), AX
MOVQ AX, 32(SP)
- MOVQ cmp+40(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 40(SP)
CALL ·Xbsearch(SB)
MOVQ 48(SP), AX
MOVQ AX, r+48(FP)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0_bsearch_4(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ _2+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ __ccgo_fp+24(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 24(SP), AX
+ MOVL AX, _3+32(FP)
RET
// func Ybtowc(tls *TLS, c int32) (r Twint_t)
@@ -4077,7 +11410,7 @@ TEXT ·Ydiv(SB),$24-24
MOVL AX, r_Frem+20(FP)
RET
-// func Ydlclose(t *TLS, handle uintptr) int32
+// func Ydlclose(t *TLS, handle uintptr) (_2 int32)
TEXT ·Ydlclose(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -4087,10 +11420,10 @@ TEXT ·Ydlclose(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xdlclose(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Ydlerror(t *TLS) uintptr
+// func Ydlerror(t *TLS) (_1 uintptr)
TEXT ·Ydlerror(SB),$16-16
GO_ARGS
NO_LOCAL_POINTERS
@@ -4098,10 +11431,10 @@ TEXT ·Ydlerror(SB),$16-16
MOVQ AX, 0(SP)
CALL ·Xdlerror(SB)
MOVQ 8(SP), AX
- MOVQ AX, ret+8(FP)
+ MOVQ AX, _1+8(FP)
RET
-// func Ydlopen(t *TLS, filename uintptr, flags int32) uintptr
+// func Ydlopen(t *TLS, filename uintptr, flags int32) (_3 uintptr)
TEXT ·Ydlopen(SB),$32-32
GO_ARGS
NO_LOCAL_POINTERS
@@ -4113,10 +11446,10 @@ TEXT ·Ydlopen(SB),$32-32
MOVL AX, 16(SP)
CALL ·Xdlopen(SB)
MOVQ 24(SP), AX
- MOVQ AX, ret+24(FP)
+ MOVQ AX, _3+24(FP)
RET
-// func Ydlsym(t *TLS, handle, symbol uintptr) uintptr
+// func Ydlsym(t *TLS, handle, symbol uintptr) (_2 uintptr)
TEXT ·Ydlsym(SB),$32-32
GO_ARGS
NO_LOCAL_POINTERS
@@ -4128,7 +11461,7 @@ TEXT ·Ydlsym(SB),$32-32
MOVQ AX, 16(SP)
CALL ·Xdlsym(SB)
MOVQ 24(SP), AX
- MOVQ AX, ret+24(FP)
+ MOVQ AX, _2+24(FP)
RET
// func Ydn_comp(tls *TLS, src uintptr, dst uintptr, space int32, dnptrs uintptr, lastdnptr uintptr) (r int32)
@@ -6305,7 +13638,7 @@ TEXT ·Yfopencookie(SB),$64-64
MOVQ AX, r+56(FP)
RET
-// func Yfork(t *TLS) int32
+// func Yfork(t *TLS) (_1 int32)
TEXT ·Yfork(SB),$16-12
GO_ARGS
NO_LOCAL_POINTERS
@@ -6313,7 +13646,7 @@ TEXT ·Yfork(SB),$16-12
MOVQ AX, 0(SP)
CALL ·Xfork(SB)
MOVL 8(SP), AX
- MOVL AX, ret+8(FP)
+ MOVL AX, _1+8(FP)
RET
// func Yfpathconf(tls *TLS, fd int32, name int32) (r int64)
@@ -6916,7 +14249,7 @@ TEXT ·Yftrylockfile(SB),$24-20
MOVL AX, r+16(FP)
RET
-// func Yfts64_close(t *TLS, ftsp uintptr) int32
+// func Yfts64_close(t *TLS, ftsp uintptr) (_2 int32)
TEXT ·Yfts64_close(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -6926,10 +14259,10 @@ TEXT ·Yfts64_close(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xfts64_close(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Yfts64_open(t *TLS, path_argv uintptr, options int32, compar uintptr) uintptr
+// func Yfts64_open(t *TLS, path_argv uintptr, options int32, compar uintptr) (_4 uintptr)
TEXT ·Yfts64_open(SB),$40-40
GO_ARGS
NO_LOCAL_POINTERS
@@ -6943,10 +14276,10 @@ TEXT ·Yfts64_open(SB),$40-40
MOVQ AX, 24(SP)
CALL ·Xfts64_open(SB)
MOVQ 32(SP), AX
- MOVQ AX, ret+32(FP)
+ MOVQ AX, _4+32(FP)
RET
-// func Yfts64_read(t *TLS, ftsp uintptr) uintptr
+// func Yfts64_read(t *TLS, ftsp uintptr) (_2 uintptr)
TEXT ·Yfts64_read(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -6956,10 +14289,10 @@ TEXT ·Yfts64_read(SB),$24-24
MOVQ AX, 8(SP)
CALL ·Xfts64_read(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
-// func Yfts_close(t *TLS, ftsp uintptr) int32
+// func Yfts_close(t *TLS, ftsp uintptr) (_2 int32)
TEXT ·Yfts_close(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -6969,10 +14302,10 @@ TEXT ·Yfts_close(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xfts_close(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Yfts_open(t *TLS, path_argv uintptr, options int32, compar uintptr) uintptr
+// func Yfts_open(t *TLS, path_argv uintptr, options int32, compar uintptr) (_4 uintptr)
TEXT ·Yfts_open(SB),$40-40
GO_ARGS
NO_LOCAL_POINTERS
@@ -6986,10 +14319,10 @@ TEXT ·Yfts_open(SB),$40-40
MOVQ AX, 24(SP)
CALL ·Xfts_open(SB)
MOVQ 32(SP), AX
- MOVQ AX, ret+32(FP)
+ MOVQ AX, _4+32(FP)
RET
-// func Yfts_read(t *TLS, ftsp uintptr) uintptr
+// func Yfts_read(t *TLS, ftsp uintptr) (_2 uintptr)
TEXT ·Yfts_read(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -6999,24 +14332,60 @@ TEXT ·Yfts_read(SB),$24-24
MOVQ AX, 8(SP)
CALL ·Xfts_read(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
-// func Yftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32) (r int32)
-TEXT ·Yftw(SB),$40-36
+// func Yftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32) (r int32)
+TEXT ·Yftw(SB),$48-36
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_fn+16(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_ftw_1(SB) // Create the closure for calling __ccgo_fp_fn
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ path+8(FP), AX
MOVQ AX, 8(SP)
- MOVQ fn+16(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 16(SP)
MOVL fd_limit+24(FP), AX
MOVL AX, 24(SP)
CALL ·Xftw(SB)
MOVL 32(SP), AX
MOVL AX, r+32(FP)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0_ftw_1(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ _2+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _3+24(FP), AX
+ MOVL AX, 24(SP)
+ MOVQ __ccgo_fp+32(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 32(SP), AX
+ MOVL AX, _4+40(FP)
RET
// func Yfunlockfile(tls *TLS, f uintptr)
@@ -8650,23 +16019,57 @@ TEXT ·Ygetxattr(SB),$48-48
MOVQ AX, r+40(FP)
RET
-// func Yglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g_ uintptr) (r int32)
-TEXT ·Yglob(SB),$48-44
+// func Yglob(tls *TLS, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, g_ uintptr) (r int32)
+TEXT ·Yglob(SB),$56-44
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_errfunc+24(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_glob_2(SB) // Create the closure for calling __ccgo_fp_errfunc
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ pat+8(FP), AX
MOVQ AX, 8(SP)
MOVL flags+16(FP), AX
MOVL AX, 16(SP)
- MOVQ errfunc+24(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 24(SP)
MOVQ g_+32(FP), AX
MOVQ AX, 32(SP)
CALL ·Xglob(SB)
MOVL 40(SP), AX
MOVL AX, r+40(FP)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0_glob_2(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL _2+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVQ __ccgo_fp+24(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 24(SP), AX
+ MOVL AX, _3+32(FP)
RET
// func Yglobfree(tls *TLS, g_ uintptr)
@@ -9218,7 +16621,7 @@ TEXT ·Yinitstate(SB),$40-40
MOVQ AX, r+32(FP)
RET
-// func Yinitstate_r(t *TLS, seed uint32, statebuf uintptr, statelen Tsize_t, buf uintptr) int32
+// func Yinitstate_r(t *TLS, seed uint32, statebuf uintptr, statelen Tsize_t, buf uintptr) (_5 int32)
TEXT ·Yinitstate_r(SB),$48-44
GO_ARGS
NO_LOCAL_POINTERS
@@ -9234,7 +16637,7 @@ TEXT ·Yinitstate_r(SB),$48-44
MOVQ AX, 32(SP)
CALL ·Xinitstate_r(SB)
MOVL 40(SP), AX
- MOVL AX, ret+40(FP)
+ MOVL AX, _5+40(FP)
RET
// func Yinotify_add_watch(tls *TLS, fd int32, pathname uintptr, mask Tuint32_t) (r int32)
@@ -9588,7 +16991,7 @@ TEXT ·Yislower_l(SB),$32-28
MOVL AX, r+24(FP)
RET
-// func Yisnan(t *TLS, x float64) int32
+// func Yisnan(t *TLS, x float64) (_2 int32)
TEXT ·Yisnan(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -9598,10 +17001,10 @@ TEXT ·Yisnan(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xisnan(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Yisnanf(t *TLS, arg float32) int32
+// func Yisnanf(t *TLS, arg float32) (_2 int32)
TEXT ·Yisnanf(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -9611,10 +17014,10 @@ TEXT ·Yisnanf(SB),$24-20
MOVL AX, 8(SP)
CALL ·Xisnanf(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Yisnanl(t *TLS, arg float64) int32
+// func Yisnanl(t *TLS, arg float64) (_2 int32)
TEXT ·Yisnanl(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -9624,7 +17027,7 @@ TEXT ·Yisnanl(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xisnanl(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
// func Yisprint(tls *TLS, c int32) (r int32)
@@ -10430,10 +17833,24 @@ TEXT ·Yldiv(SB),$40-40
MOVQ AX, r_Frem+32(FP)
RET
-// func Ylfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr)
-TEXT ·Ylfind(SB),$56-56
+// func Ylfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr)
+TEXT ·Ylfind(SB),$64-56
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_compar+40(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_lfind_4(SB) // Create the closure for calling __ccgo_fp_compar
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ key+8(FP), AX
@@ -10444,11 +17861,31 @@ TEXT ·Ylfind(SB),$56-56
MOVQ AX, 24(SP)
MOVQ width+32(FP), AX
MOVQ AX, 32(SP)
- MOVQ compar+40(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 40(SP)
CALL ·Xlfind(SB)
MOVQ 48(SP), AX
MOVQ AX, r+48(FP)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0_lfind_4(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ _2+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ __ccgo_fp+24(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 24(SP), AX
+ MOVL AX, _3+32(FP)
RET
// func Ylgamma(tls *TLS, x float64) (r float64)
@@ -11128,10 +18565,24 @@ TEXT ·Ylroundl(SB),$24-24
MOVQ AX, r+16(FP)
RET
-// func Ylsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr)
-TEXT ·Ylsearch(SB),$56-56
+// func Ylsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr)
+TEXT ·Ylsearch(SB),$64-56
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_compar+40(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_lsearch_4(SB) // Create the closure for calling __ccgo_fp_compar
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ key+8(FP), AX
@@ -11142,11 +18593,31 @@ TEXT ·Ylsearch(SB),$56-56
MOVQ AX, 24(SP)
MOVQ width+32(FP), AX
MOVQ AX, 32(SP)
- MOVQ compar+40(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 40(SP)
CALL ·Xlsearch(SB)
MOVQ 48(SP), AX
MOVQ AX, r+48(FP)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0_lsearch_4(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ _2+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ __ccgo_fp+24(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 24(SP), AX
+ MOVL AX, _3+32(FP)
RET
// func Ylseek(tls *TLS, fd int32, offset Toff_t, whence int32) (r Toff_t)
@@ -12402,15 +19873,30 @@ TEXT ·Ynexttowardl(SB),$32-32
MOVQ AX, r+24(FP)
RET
-// func Ynftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1 int32)
-TEXT ·Ynftw(SB),$40-36
+// func Ynftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32) (r1 int32)
+TEXT ·Ynftw(SB),$48-36
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_fn+16(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_nftw_1(SB) // Create the closure for calling __ccgo_fp_fn
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ path+8(FP), AX
MOVQ AX, 8(SP)
- MOVQ fn+16(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 16(SP)
MOVL fd_limit+24(FP), AX
MOVL AX, 24(SP)
@@ -12419,6 +19905,29 @@ TEXT ·Ynftw(SB),$40-36
CALL ·Xnftw(SB)
MOVL 32(SP), AX
MOVL AX, r1+32(FP)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0_nftw_1(SB),$56-52
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ _2+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVL _3+24(FP), AX
+ MOVL AX, 24(SP)
+ MOVQ _4+32(FP), AX
+ MOVQ AX, 32(SP)
+ MOVQ __ccgo_fp+40(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 40(SP), AX
+ MOVL AX, _5+48(FP)
RET
// func Yngettext(tls *TLS, msgid1 uintptr, msgid2 uintptr, n uint64) (r uintptr)
@@ -12659,7 +20168,7 @@ TEXT ·Yobstack_free(SB),$24-24
CALL ·Xobstack_free(SB)
RET
-// func Yobstack_vprintf(t *TLS, obstack, template, va uintptr) int32
+// func Yobstack_vprintf(t *TLS, obstack, template, va uintptr) (_2 int32)
TEXT ·Yobstack_vprintf(SB),$40-36
GO_ARGS
NO_LOCAL_POINTERS
@@ -12673,7 +20182,7 @@ TEXT ·Yobstack_vprintf(SB),$40-36
MOVQ AX, 24(SP)
CALL ·Xobstack_vprintf(SB)
MOVL 32(SP), AX
- MOVL AX, ret+32(FP)
+ MOVL AX, _2+32(FP)
RET
// func Yopen(tls *TLS, filename uintptr, flags int32, va uintptr) (r int32)
@@ -12948,7 +20457,7 @@ TEXT ·Ypoll(SB),$40-36
MOVL AX, r+32(FP)
RET
-// func Ypopen(t *TLS, command, type1 uintptr) uintptr
+// func Ypopen(t *TLS, command, type1 uintptr) (_2 uintptr)
TEXT ·Ypopen(SB),$32-32
GO_ARGS
NO_LOCAL_POINTERS
@@ -12960,7 +20469,7 @@ TEXT ·Ypopen(SB),$32-32
MOVQ AX, 16(SP)
CALL ·Xpopen(SB)
MOVQ 24(SP), AX
- MOVQ AX, ret+24(FP)
+ MOVQ AX, _2+24(FP)
RET
// func Yposix_close(tls *TLS, fd int32, flags int32) (r int32)
@@ -13665,7 +21174,7 @@ TEXT ·Ypsignal(SB),$24-24
CALL ·Xpsignal(SB)
RET
-// func Ypthread_atfork(tls *TLS, prepare, parent, child uintptr) int32
+// func Ypthread_atfork(tls *TLS, prepare, parent, child uintptr) (_2 int32)
TEXT ·Ypthread_atfork(SB),$40-36
GO_ARGS
NO_LOCAL_POINTERS
@@ -13679,10 +21188,10 @@ TEXT ·Ypthread_atfork(SB),$40-36
MOVQ AX, 24(SP)
CALL ·Xpthread_atfork(SB)
MOVL 32(SP), AX
- MOVL AX, ret+32(FP)
+ MOVL AX, _2+32(FP)
RET
-// func Ypthread_attr_destroy(tls *TLS, a uintptr) int32
+// func Ypthread_attr_destroy(tls *TLS, a uintptr) (_2 int32)
TEXT ·Ypthread_attr_destroy(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -13692,10 +21201,10 @@ TEXT ·Ypthread_attr_destroy(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xpthread_attr_destroy(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Ypthread_attr_getdetachstate(tls *TLS, a uintptr, state uintptr) int32
+// func Ypthread_attr_getdetachstate(tls *TLS, a uintptr, state uintptr) (_3 int32)
TEXT ·Ypthread_attr_getdetachstate(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -13707,10 +21216,10 @@ TEXT ·Ypthread_attr_getdetachstate(SB),$32-28
MOVQ AX, 16(SP)
CALL ·Xpthread_attr_getdetachstate(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _3+24(FP)
RET
-// func Ypthread_attr_init(tls *TLS, a uintptr) int32
+// func Ypthread_attr_init(tls *TLS, a uintptr) (_2 int32)
TEXT ·Ypthread_attr_init(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -13720,7 +21229,7 @@ TEXT ·Ypthread_attr_init(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xpthread_attr_init(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
// func Ypthread_attr_setdetachstate(tls *TLS, a uintptr, state int32) (r int32)
@@ -13738,7 +21247,7 @@ TEXT ·Ypthread_attr_setdetachstate(SB),$32-28
MOVL AX, r+24(FP)
RET
-// func Ypthread_attr_setscope(tls *TLS, a uintptr, scope int32) int32
+// func Ypthread_attr_setscope(tls *TLS, a uintptr, scope int32) (_3 int32)
TEXT ·Ypthread_attr_setscope(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -13750,10 +21259,10 @@ TEXT ·Ypthread_attr_setscope(SB),$32-28
MOVL AX, 16(SP)
CALL ·Xpthread_attr_setscope(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _3+24(FP)
RET
-// func Ypthread_attr_setstacksize(tls *TLS, a uintptr, stacksite Tsize_t) int32
+// func Ypthread_attr_setstacksize(tls *TLS, a uintptr, stacksite Tsize_t) (_3 int32)
TEXT ·Ypthread_attr_setstacksize(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -13765,7 +21274,7 @@ TEXT ·Ypthread_attr_setstacksize(SB),$32-28
MOVQ AX, 16(SP)
CALL ·Xpthread_attr_setstacksize(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _3+24(FP)
RET
// func Ypthread_cleanup_pop(tls *TLS, run int32)
@@ -13792,7 +21301,7 @@ TEXT ·Ypthread_cleanup_push(SB),$24-24
CALL ·Xpthread_cleanup_push(SB)
RET
-// func Ypthread_cond_broadcast(tls *TLS, c uintptr) int32
+// func Ypthread_cond_broadcast(tls *TLS, c uintptr) (_2 int32)
TEXT ·Ypthread_cond_broadcast(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -13802,10 +21311,10 @@ TEXT ·Ypthread_cond_broadcast(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xpthread_cond_broadcast(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Ypthread_cond_destroy(tls *TLS, c uintptr) int32
+// func Ypthread_cond_destroy(tls *TLS, c uintptr) (_2 int32)
TEXT ·Ypthread_cond_destroy(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -13815,10 +21324,10 @@ TEXT ·Ypthread_cond_destroy(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xpthread_cond_destroy(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Ypthread_cond_init(tls *TLS, c, a uintptr) int32
+// func Ypthread_cond_init(tls *TLS, c, a uintptr) (_2 int32)
TEXT ·Ypthread_cond_init(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -13830,10 +21339,10 @@ TEXT ·Ypthread_cond_init(SB),$32-28
MOVQ AX, 16(SP)
CALL ·Xpthread_cond_init(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _2+24(FP)
RET
-// func Ypthread_cond_signal(tls *TLS, c uintptr) int32
+// func Ypthread_cond_signal(tls *TLS, c uintptr) (_2 int32)
TEXT ·Ypthread_cond_signal(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -13843,7 +21352,7 @@ TEXT ·Ypthread_cond_signal(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xpthread_cond_signal(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
// func Ypthread_cond_timedwait(tls *TLS, c, m, ts uintptr) (r int32)
@@ -13863,7 +21372,7 @@ TEXT ·Ypthread_cond_timedwait(SB),$40-36
MOVL AX, r+32(FP)
RET
-// func Ypthread_cond_wait(tls *TLS, c, m uintptr) int32
+// func Ypthread_cond_wait(tls *TLS, c, m uintptr) (_2 int32)
TEXT ·Ypthread_cond_wait(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -13875,10 +21384,10 @@ TEXT ·Ypthread_cond_wait(SB),$32-28
MOVQ AX, 16(SP)
CALL ·Xpthread_cond_wait(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _2+24(FP)
RET
-// func Ypthread_create(tls *TLS, res, attrp, entry, arg uintptr) int32
+// func Ypthread_create(tls *TLS, res, attrp, entry, arg uintptr) (_2 int32)
TEXT ·Ypthread_create(SB),$48-44
GO_ARGS
NO_LOCAL_POINTERS
@@ -13894,10 +21403,10 @@ TEXT ·Ypthread_create(SB),$48-44
MOVQ AX, 32(SP)
CALL ·Xpthread_create(SB)
MOVL 40(SP), AX
- MOVL AX, ret+40(FP)
+ MOVL AX, _2+40(FP)
RET
-// func Ypthread_detach(tls *TLS, t uintptr) int32
+// func Ypthread_detach(tls *TLS, t uintptr) (_2 int32)
TEXT ·Ypthread_detach(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -13907,10 +21416,10 @@ TEXT ·Ypthread_detach(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xpthread_detach(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Ypthread_equal(tls *TLS, t, u uintptr) int32
+// func Ypthread_equal(tls *TLS, t, u uintptr) (_2 int32)
TEXT ·Ypthread_equal(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -13922,7 +21431,7 @@ TEXT ·Ypthread_equal(SB),$32-28
MOVQ AX, 16(SP)
CALL ·Xpthread_equal(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _2+24(FP)
RET
// func Ypthread_exit(tls *TLS, result uintptr)
@@ -13936,7 +21445,7 @@ TEXT ·Ypthread_exit(SB),$16-16
CALL ·Xpthread_exit(SB)
RET
-// func Ypthread_getspecific(tls *TLS, k Tpthread_key_t) uintptr
+// func Ypthread_getspecific(tls *TLS, k Tpthread_key_t) (_2 uintptr)
TEXT ·Ypthread_getspecific(SB),$24-24
GO_ARGS
NO_LOCAL_POINTERS
@@ -13946,7 +21455,7 @@ TEXT ·Ypthread_getspecific(SB),$24-24
MOVL AX, 8(SP)
CALL ·Xpthread_getspecific(SB)
MOVQ 16(SP), AX
- MOVQ AX, ret+16(FP)
+ MOVQ AX, _2+16(FP)
RET
// func Ypthread_join(tls *TLS, t Tpthread_t, res uintptr) (r int32)
@@ -13964,7 +21473,7 @@ TEXT ·Ypthread_join(SB),$32-28
MOVL AX, r+24(FP)
RET
-// func Ypthread_key_create(tls *TLS, k uintptr, dtor uintptr) int32
+// func Ypthread_key_create(tls *TLS, k uintptr, dtor uintptr) (_3 int32)
TEXT ·Ypthread_key_create(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -13976,10 +21485,10 @@ TEXT ·Ypthread_key_create(SB),$32-28
MOVQ AX, 16(SP)
CALL ·Xpthread_key_create(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _3+24(FP)
RET
-// func Ypthread_key_delete(tls *TLS, k Tpthread_key_t) int32
+// func Ypthread_key_delete(tls *TLS, k Tpthread_key_t) (_2 int32)
TEXT ·Ypthread_key_delete(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -13989,10 +21498,10 @@ TEXT ·Ypthread_key_delete(SB),$24-20
MOVL AX, 8(SP)
CALL ·Xpthread_key_delete(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Ypthread_mutex_destroy(tls *TLS, m uintptr) int32
+// func Ypthread_mutex_destroy(tls *TLS, m uintptr) (_2 int32)
TEXT ·Ypthread_mutex_destroy(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -14002,10 +21511,10 @@ TEXT ·Ypthread_mutex_destroy(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xpthread_mutex_destroy(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Ypthread_mutex_init(tls *TLS, m, a uintptr) int32
+// func Ypthread_mutex_init(tls *TLS, m, a uintptr) (_2 int32)
TEXT ·Ypthread_mutex_init(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -14017,10 +21526,10 @@ TEXT ·Ypthread_mutex_init(SB),$32-28
MOVQ AX, 16(SP)
CALL ·Xpthread_mutex_init(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _2+24(FP)
RET
-// func Ypthread_mutex_lock(tls *TLS, m uintptr) int32
+// func Ypthread_mutex_lock(tls *TLS, m uintptr) (_2 int32)
TEXT ·Ypthread_mutex_lock(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -14030,10 +21539,10 @@ TEXT ·Ypthread_mutex_lock(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xpthread_mutex_lock(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Ypthread_mutex_trylock(tls *TLS, m uintptr) int32
+// func Ypthread_mutex_trylock(tls *TLS, m uintptr) (_2 int32)
TEXT ·Ypthread_mutex_trylock(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -14043,10 +21552,10 @@ TEXT ·Ypthread_mutex_trylock(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xpthread_mutex_trylock(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Ypthread_mutex_unlock(tls *TLS, m uintptr) int32
+// func Ypthread_mutex_unlock(tls *TLS, m uintptr) (_2 int32)
TEXT ·Ypthread_mutex_unlock(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -14056,10 +21565,10 @@ TEXT ·Ypthread_mutex_unlock(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xpthread_mutex_unlock(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Ypthread_mutexattr_destroy(tls *TLS, a uintptr) int32
+// func Ypthread_mutexattr_destroy(tls *TLS, a uintptr) (_2 int32)
TEXT ·Ypthread_mutexattr_destroy(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -14069,10 +21578,10 @@ TEXT ·Ypthread_mutexattr_destroy(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xpthread_mutexattr_destroy(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Ypthread_mutexattr_init(tls *TLS, a uintptr) int32
+// func Ypthread_mutexattr_init(tls *TLS, a uintptr) (_2 int32)
TEXT ·Ypthread_mutexattr_init(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -14082,10 +21591,10 @@ TEXT ·Ypthread_mutexattr_init(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xpthread_mutexattr_init(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
-// func Ypthread_mutexattr_settype(tls *TLS, a uintptr, typ int32) int32
+// func Ypthread_mutexattr_settype(tls *TLS, a uintptr, typ int32) (_3 int32)
TEXT ·Ypthread_mutexattr_settype(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -14097,10 +21606,10 @@ TEXT ·Ypthread_mutexattr_settype(SB),$32-28
MOVL AX, 16(SP)
CALL ·Xpthread_mutexattr_settype(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _3+24(FP)
RET
-// func Ypthread_self(tls *TLS) uintptr
+// func Ypthread_self(tls *TLS) (_1 uintptr)
TEXT ·Ypthread_self(SB),$16-16
GO_ARGS
NO_LOCAL_POINTERS
@@ -14108,10 +21617,10 @@ TEXT ·Ypthread_self(SB),$16-16
MOVQ AX, 0(SP)
CALL ·Xpthread_self(SB)
MOVQ 8(SP), AX
- MOVQ AX, ret+8(FP)
+ MOVQ AX, _1+8(FP)
RET
-// func Ypthread_setcancelstate(tls *TLS, new int32, old uintptr) int32
+// func Ypthread_setcancelstate(tls *TLS, new int32, old uintptr) (_3 int32)
TEXT ·Ypthread_setcancelstate(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -14123,10 +21632,10 @@ TEXT ·Ypthread_setcancelstate(SB),$32-28
MOVQ AX, 16(SP)
CALL ·Xpthread_setcancelstate(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _3+24(FP)
RET
-// func Ypthread_setspecific(tls *TLS, k Tpthread_key_t, x uintptr) int32
+// func Ypthread_setspecific(tls *TLS, k Tpthread_key_t, x uintptr) (_3 int32)
TEXT ·Ypthread_setspecific(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -14138,10 +21647,10 @@ TEXT ·Ypthread_setspecific(SB),$32-28
MOVQ AX, 16(SP)
CALL ·Xpthread_setspecific(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _3+24(FP)
RET
-// func Ypthread_sigmask(tls *TLS, now int32, set, old uintptr) int32
+// func Ypthread_sigmask(tls *TLS, now int32, set, old uintptr) (_3 int32)
TEXT ·Ypthread_sigmask(SB),$40-36
GO_ARGS
NO_LOCAL_POINTERS
@@ -14155,7 +21664,7 @@ TEXT ·Ypthread_sigmask(SB),$40-36
MOVQ AX, 24(SP)
CALL ·Xpthread_sigmask(SB)
MOVL 32(SP), AX
- MOVL AX, ret+32(FP)
+ MOVL AX, _3+32(FP)
RET
// func Yptrace(tls *TLS, req int32, va uintptr) (r int64)
@@ -14486,10 +21995,24 @@ TEXT ·Ypwritev2(SB),$56-56
MOVQ AX, r+48(FP)
RET
-// func Yqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun)
-TEXT ·Yqsort(SB),$40-40
+// func Yqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun)
+TEXT ·Yqsort(SB),$48-40
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_cmp+32(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_qsort_3(SB) // Create the closure for calling __ccgo_fp_cmp
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ base+8(FP), AX
@@ -14498,15 +22021,49 @@ TEXT ·Yqsort(SB),$40-40
MOVQ AX, 16(SP)
MOVQ width+24(FP), AX
MOVQ AX, 24(SP)
- MOVQ cmp+32(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 32(SP)
CALL ·Xqsort(SB)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
RET
-// func Yqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr)
-TEXT ·Yqsort_r(SB),$48-48
+TEXT ·__ccgo_abi0_qsort_3(SB),$40-36
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ _2+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ __ccgo_fp+24(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 24(SP), AX
+ MOVL AX, _3+32(FP)
+ RET
+
+// func Yqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr)
+TEXT ·Yqsort_r(SB),$56-48
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_cmp+32(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_qsort_r_3(SB) // Create the closure for calling __ccgo_fp_cmp
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ base+8(FP), AX
@@ -14515,11 +22072,33 @@ TEXT ·Yqsort_r(SB),$48-48
MOVQ AX, 16(SP)
MOVQ width+24(FP), AX
MOVQ AX, 24(SP)
- MOVQ cmp+32(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 32(SP)
MOVQ arg+40(FP), AX
MOVQ AX, 40(SP)
CALL ·Xqsort_r(SB)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0_qsort_r_3(SB),$48-44
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ _2+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ _3+24(FP), AX
+ MOVQ AX, 24(SP)
+ MOVQ __ccgo_fp+32(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 32(SP), AX
+ MOVL AX, _4+40(FP)
RET
// func Yquick_exit(tls *TLS, code int32)
@@ -14600,7 +22179,7 @@ TEXT ·Yrandom(SB),$16-16
MOVQ AX, r+8(FP)
RET
-// func Yrandom_r(t *TLS, buf, result uintptr) int32
+// func Yrandom_r(t *TLS, buf, result uintptr) (_2 int32)
TEXT ·Yrandom_r(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -14612,7 +22191,7 @@ TEXT ·Yrandom_r(SB),$32-28
MOVQ AX, 16(SP)
CALL ·Xrandom_r(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _2+24(FP)
RET
// func Yread(tls *TLS, fd int32, buf uintptr, count Tsize_t) (r Tssize_t)
@@ -15143,7 +22722,7 @@ TEXT ·Yrenameat(SB),$48-44
MOVL AX, r+40(FP)
RET
-// func Yrenameat2(t *TLS, olddirfd int32, oldpath uintptr, newdirfd int32, newpath uintptr, flags int32) int32
+// func Yrenameat2(t *TLS, olddirfd int32, oldpath uintptr, newdirfd int32, newpath uintptr, flags int32) (_6 int32)
TEXT ·Yrenameat2(SB),$56-52
GO_ARGS
NO_LOCAL_POINTERS
@@ -15161,7 +22740,7 @@ TEXT ·Yrenameat2(SB),$56-52
MOVL AX, 40(SP)
CALL ·Xrenameat2(SB)
MOVL 48(SP), AX
- MOVL AX, ret+48(FP)
+ MOVL AX, _6+48(FP)
RET
// func Yres_init(tls *TLS) (r int32)
@@ -15484,23 +23063,79 @@ TEXT ·Yscalbnl(SB),$32-32
MOVQ AX, r+24(FP)
RET
-// func Yscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r int32)
-TEXT ·Yscandir(SB),$48-44
+// func Yscandir(tls *TLS, path uintptr, res uintptr, __ccgo_fp_sel uintptr, __ccgo_fp_cmp uintptr) (r int32)
+TEXT ·Yscandir(SB),$56-44
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $32, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_sel+24(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_scandir_2(SB) // Create the closure for calling __ccgo_fp_sel
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $16, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_cmp+32(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_scandir_3(SB) // Create the closure for calling __ccgo_fp_cmp
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ path+8(FP), AX
MOVQ AX, 8(SP)
MOVQ res+16(FP), AX
MOVQ AX, 16(SP)
- MOVQ sel+24(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 24(SP)
- MOVQ cmp+32(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $16, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 32(SP)
CALL ·Xscandir(SB)
MOVL 40(SP), AX
MOVL AX, r+40(FP)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0_scandir_2(SB),$32-28
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp+16(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 16(SP), AX
+ MOVL AX, _2+24(FP)
+ RET
+
+TEXT ·__ccgo_abi0_scandir_3(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ _2+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ __ccgo_fp+24(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 24(SP), AX
+ MOVL AX, _3+32(FP)
RET
// func Yscanf(tls *TLS, fmt uintptr, va uintptr) (r int32)
@@ -15518,7 +23153,7 @@ TEXT ·Yscanf(SB),$32-28
MOVL AX, r+24(FP)
RET
-// func Ysched_yield(tls *TLS) int32
+// func Ysched_yield(tls *TLS) (_1 int32)
TEXT ·Ysched_yield(SB),$16-12
GO_ARGS
NO_LOCAL_POINTERS
@@ -15526,7 +23161,7 @@ TEXT ·Ysched_yield(SB),$16-12
MOVQ AX, 0(SP)
CALL ·Xsched_yield(SB)
MOVL 8(SP), AX
- MOVL AX, ret+8(FP)
+ MOVL AX, _1+8(FP)
RET
// func Ysecure_getenv(tls *TLS, name uintptr) (r uintptr)
@@ -15909,7 +23544,7 @@ TEXT ·Ysetitimer(SB),$40-36
MOVL AX, r1+32(FP)
RET
-// func Ysetjmp(t *TLS, env uintptr) int32
+// func Ysetjmp(t *TLS, env uintptr) (_2 int32)
TEXT ·Ysetjmp(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -15919,7 +23554,7 @@ TEXT ·Ysetjmp(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xsetjmp(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
// func Ysetkey(tls *TLS, key uintptr)
@@ -18179,7 +25814,7 @@ TEXT ·Ysysconf(SB),$24-24
MOVQ AX, r+16(FP)
RET
-// func Ysysctlbyname(t *TLS, name, oldp, oldlenp, newp uintptr, newlen Tsize_t) int32
+// func Ysysctlbyname(t *TLS, name, oldp, oldlenp, newp uintptr, newlen Tsize_t) (_3 int32)
TEXT ·Ysysctlbyname(SB),$56-52
GO_ARGS
NO_LOCAL_POINTERS
@@ -18197,7 +25832,7 @@ TEXT ·Ysysctlbyname(SB),$56-52
MOVQ AX, 40(SP)
CALL ·Xsysctlbyname(SB)
MOVL 48(SP), AX
- MOVL AX, ret+48(FP)
+ MOVL AX, _3+48(FP)
RET
// func Ysysinfo(tls *TLS, info uintptr) (r int32)
@@ -18228,7 +25863,7 @@ TEXT ·Ysyslog(SB),$32-32
CALL ·Xsyslog(SB)
RET
-// func Ysystem(t *TLS, command uintptr) int32
+// func Ysystem(t *TLS, command uintptr) (_2 int32)
TEXT ·Ysystem(SB),$24-20
GO_ARGS
NO_LOCAL_POINTERS
@@ -18238,7 +25873,7 @@ TEXT ·Ysystem(SB),$24-20
MOVQ AX, 8(SP)
CALL ·Xsystem(SB)
MOVL 16(SP), AX
- MOVL AX, ret+16(FP)
+ MOVL AX, _2+16(FP)
RET
// func Ytan(tls *TLS, x3 float64) (r float64)
@@ -18480,34 +26115,98 @@ TEXT ·Ytcsetwinsize(SB),$32-28
MOVL AX, r+24(FP)
RET
-// func Ytdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr)
-TEXT ·Ytdelete(SB),$40-40
+// func Ytdelete(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr)
+TEXT ·Ytdelete(SB),$48-40
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_cmp+24(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_tdelete_2(SB) // Create the closure for calling __ccgo_fp_cmp
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ key+8(FP), AX
MOVQ AX, 8(SP)
MOVQ rootp+16(FP), AX
MOVQ AX, 16(SP)
- MOVQ cmp+24(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 24(SP)
CALL ·Xtdelete(SB)
MOVQ 32(SP), AX
MOVQ AX, r+32(FP)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
RET
-// func Ytdestroy(tls *TLS, root uintptr, freekey uintptr)
-TEXT ·Ytdestroy(SB),$24-24
+TEXT ·__ccgo_abi0_tdelete_2(SB),$40-36
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ _2+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ __ccgo_fp+24(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 24(SP), AX
+ MOVL AX, _3+32(FP)
+ RET
+
+// func Ytdestroy(tls *TLS, root uintptr, __ccgo_fp_freekey uintptr)
+TEXT ·Ytdestroy(SB),$32-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_freekey+16(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_tdestroy_1(SB) // Create the closure for calling __ccgo_fp_freekey
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ root+8(FP), AX
MOVQ AX, 8(SP)
- MOVQ freekey+16(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 16(SP)
CALL ·Xtdestroy(SB)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0_tdestroy_1(SB),$24-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp+16(FP), AX
+ CALL *AX // Call the ABI0 code ptr
RET
// func Ytee(tls *TLS, src int32, dest int32, len1 Tsize_t, flags uint32) (r Tssize_t)
@@ -18570,21 +26269,55 @@ TEXT ·Ytextdomain(SB),$24-24
MOVQ AX, r+16(FP)
RET
-// func Ytfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr)
-TEXT ·Ytfind(SB),$40-40
+// func Ytfind(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr)
+TEXT ·Ytfind(SB),$48-40
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_cmp+24(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_tfind_2(SB) // Create the closure for calling __ccgo_fp_cmp
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ key+8(FP), AX
MOVQ AX, 8(SP)
MOVQ rootp+16(FP), AX
MOVQ AX, 16(SP)
- MOVQ cmp+24(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 24(SP)
CALL ·Xtfind(SB)
MOVQ 32(SP), AX
MOVQ AX, r+32(FP)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0_tfind_2(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ _2+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ __ccgo_fp+24(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 24(SP), AX
+ MOVL AX, _3+32(FP)
RET
// func Ytgamma(tls *TLS, x3 float64) (r1 float64)
@@ -19024,21 +26757,55 @@ TEXT ·Ytruncl(SB),$24-24
MOVQ AX, r+16(FP)
RET
-// func Ytsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr)
-TEXT ·Ytsearch(SB),$40-40
+// func Ytsearch(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r1 uintptr)
+TEXT ·Ytsearch(SB),$48-40
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_cmp+24(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_tsearch_2(SB) // Create the closure for calling __ccgo_fp_cmp
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ key+8(FP), AX
MOVQ AX, 8(SP)
MOVQ rootp+16(FP), AX
MOVQ AX, 16(SP)
- MOVQ cmp+24(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 24(SP)
CALL ·Xtsearch(SB)
MOVQ 32(SP), AX
MOVQ AX, r1+32(FP)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0_tsearch_2(SB),$40-36
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVQ _2+16(FP), AX
+ MOVQ AX, 16(SP)
+ MOVQ __ccgo_fp+24(FP), AX
+ CALL *AX // Call the ABI0 code ptr
+ MOVL 24(SP), AX
+ MOVL AX, _3+32(FP)
RET
// func Yttyname(tls *TLS, fd int32) (r uintptr)
@@ -19071,17 +26838,51 @@ TEXT ·Yttyname_r(SB),$40-36
MOVL AX, r+32(FP)
RET
-// func Ytwalk(tls *TLS, root uintptr, action uintptr)
-TEXT ·Ytwalk(SB),$24-24
+// func Ytwalk(tls *TLS, root uintptr, __ccgo_fp_action uintptr)
+TEXT ·Ytwalk(SB),$32-24
GO_ARGS
NO_LOCAL_POINTERS
+ MOVQ tls+0(FP), AX // alloc all ABI trampolines
+ MOVQ AX, 0(SP)
+ MOVQ $16, 8(SP) // 16*(number of func ptrs in signature)
+ CALL modernc·org∕libc·TLSAlloc(SB)
+ MOVQ 16(SP), AX
+ MOVQ AX, -8(BP) // Trampolines[0]
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ -8(BP), AX
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
+ MOVQ AX, 8(SP)
+ MOVQ __ccgo_fp_action+16(FP), AX // ABI0 code ptr
+ MOVQ AX, 16(SP)
+ CALL ·__ccgo_abiInternal_twalk_1(SB) // Create the closure for calling __ccgo_fp_action
MOVQ tls+0(FP), AX
MOVQ AX, 0(SP)
MOVQ root+8(FP), AX
MOVQ AX, 8(SP)
- MOVQ action+16(FP), AX
+ MOVQ -8(BP), AX // Trampolines[0]
+ ADDQ $0, AX // 16*(0-based ordinal number of the func ptr in signature)
MOVQ AX, 16(SP)
CALL ·Xtwalk(SB)
+ MOVQ tls+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ $0, 8(SP)
+ CALL modernc·org∕libc·TLSFree(SB)
+ RET
+
+TEXT ·__ccgo_abi0_twalk_1(SB),$32-32
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ _0+0(FP), AX
+ MOVQ AX, 0(SP)
+ MOVQ _1+8(FP), AX
+ MOVQ AX, 8(SP)
+ MOVL _2+16(FP), AX
+ MOVL AX, 16(SP)
+ MOVL _3+20(FP), AX
+ MOVL AX, 20(SP)
+ MOVQ __ccgo_fp+24(FP), AX
+ CALL *AX // Call the ABI0 code ptr
RET
// func Ytzset(tls *TLS)
@@ -19412,7 +27213,7 @@ TEXT ·Yuuid_generate_random(SB),$16-16
CALL ·Xuuid_generate_random(SB)
RET
-// func Yuuid_parse(t *TLS, in uintptr, uu uintptr) int32
+// func Yuuid_parse(t *TLS, in uintptr, uu uintptr) (_3 int32)
TEXT ·Yuuid_parse(SB),$32-28
GO_ARGS
NO_LOCAL_POINTERS
@@ -19424,7 +27225,7 @@ TEXT ·Yuuid_parse(SB),$32-28
MOVQ AX, 16(SP)
CALL ·Xuuid_parse(SB)
MOVL 24(SP), AX
- MOVL AX, ret+24(FP)
+ MOVL AX, _3+24(FP)
RET
// func Yuuid_unparse(t *TLS, uu, out uintptr)
diff --git a/vendor/modernc.org/libc/ccgo_linux_386.go b/vendor/modernc.org/libc/ccgo_linux_386.go
index d3ac8f649..91ec9c90e 100644
--- a/vendor/modernc.org/libc/ccgo_linux_386.go
+++ b/vendor/modernc.org/libc/ccgo_linux_386.go
@@ -25041,9 +25041,13 @@ func Xrewinddir(tls *TLS, dir uintptr) {
___unlock(tls, dir+20)
}
-func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r int32) {
+type t__ccgo_fp__Xscandir_2 = func(*TLS, uintptr) int32
+
+type t__ccgo_fp__Xscandir_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xscandir(tls *TLS, path uintptr, res uintptr, __ccgo_fp_sel uintptr, __ccgo_fp_cmp uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v res=%v sel=%v cmp=%v, (%v:)", tls, path, res, sel, cmp, origin(2))
+ trc("tls=%v path=%v res=%v __ccgo_fp_sel=%v __ccgo_fp_cmp=%v, (%v:)", tls, path, res, __ccgo_fp_sel, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var cnt, len1, v2, v3 Tsize_t
@@ -25065,7 +25069,7 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
if !(v1 != 0) {
break
}
- if sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{sel})))(tls, de) != 0) {
+ if __ccgo_fp_sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_sel})))(tls, de) != 0) {
continue
}
if cnt >= len1 {
@@ -25103,8 +25107,8 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
return -int32(1)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
- if cmp != 0 {
- Xqsort(tls, names, cnt, uint32(4), cmp)
+ if __ccgo_fp_cmp != 0 {
+ Xqsort(tls, names, cnt, uint32(4), __ccgo_fp_cmp)
}
*(*uintptr)(unsafe.Pointer(res)) = names
return Int32FromUint32(cnt)
@@ -25844,9 +25848,11 @@ func X__funcs_on_quick_exit(tls *TLS) {
}
}
-func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
+type t__ccgo_fp__Xat_quick_exit_0 = func(*TLS)
+
+func Xat_quick_exit(tls *TLS, __ccgo_fp_func uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v func1=%v, (%v:)", tls, func1, origin(2))
+ trc("tls=%v __ccgo_fp_func=%v, (%v:)", tls, __ccgo_fp_func, origin(2))
defer func() { trc("-> %v", r1) }()
}
var r, v1 int32
@@ -25858,7 +25864,7 @@ func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
} else {
v1 = _count
_count++
- _funcs[v1] = func1
+ _funcs[v1] = __ccgo_fp_func
}
___unlock(tls, uintptr(unsafe.Pointer(&_lock)))
return r
@@ -28457,15 +28463,17 @@ type TFTW = struct {
Flevel int32
}
-func Xftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32) (r int32) {
+type t__ccgo_fp__Xftw_1 = func(*TLS, uintptr, uintptr, int32) int32
+
+func Xftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v, (%v:)", tls, path, fn, fd_limit, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, origin(2))
defer func() { trc("-> %v", r) }()
}
/* The following cast assumes that calling a function with one
* argument more than it needs behaves as expected. This is
* actually undefined, but works on all real-world machines. */
- return Xnftw(tls, path, fn, fd_limit, int32(FTW_PHYS))
+ return Xnftw(tls, path, __ccgo_fp_fn, fd_limit, int32(FTW_PHYS))
}
const STATX_ALL = 4095
@@ -31556,15 +31564,6 @@ func Xfremovexattr(tls *TLS, fd int32, name uintptr) (r int32) {
return X__syscall_ret(tls, Uint32FromInt32(X__syscall2(tls, int32(SYS_fremovexattr), fd, int32(name))))
}
-type Tucontext_t2 = struct {
- Fuc_flags uint32
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t
- Fuc_sigmask Tsigset_t
- F__fpregs_mem [28]uint32
-}
-
func _dummy4(tls *TLS, msg uintptr, lm uintptr) (r uintptr) {
return msg
}
@@ -114504,16 +114503,7 @@ func Xgethostid(tls *TLS) (r int32) {
const optpos = 0
-type Tucontext_t3 = struct {
- Fuc_flags uint32
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t1
- Fuc_sigmask Tsigset_t
- F__fpregs_mem [28]uint32
-}
-
-type t__ucontext1 = Tucontext_t3
+type t__ucontext1 = Tucontext_t1
func X__getopt_msg(tls *TLS, a uintptr, b uintptr, c uintptr, l Tsize_t) {
if __ccgo_strace {
@@ -115661,7 +115651,7 @@ type Thistory = struct {
Fbase int32
}
-func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
+func _do_nftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
bp := tls.Alloc(192)
defer tls.Free(192)
var d, de, v10 uintptr
@@ -115767,7 +115757,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
}
if v8 = !(flags&Int32FromInt32(FTW_DEPTH) != 0); v8 {
- v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+176)
+ v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+176)
r = v7
}
if v8 && v7 != 0 {
@@ -115808,7 +115798,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*int8)(unsafe.Pointer(path + uintptr(j))) = int8('/')
Xstrcpy(tls, path+uintptr(j)+uintptr(1), de+19)
- v11 = _do_nftw(tls, path, fn, fd_limit-int32(1), flags, bp+144)
+ v11 = _do_nftw(tls, path, __ccgo_fp_fn, fd_limit-int32(1), flags, bp+144)
r = v11
if v11 != 0 {
Xclosedir(tls, d)
@@ -115823,7 +115813,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*int8)(unsafe.Pointer(path + uintptr(l))) = 0
if v13 = flags&int32(FTW_DEPTH) != 0; v13 {
- v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+176)
+ v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+176)
r = v12
}
if v13 && v12 != 0 {
@@ -115832,9 +115822,11 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
return 0
}
-func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1 int32) {
+type t__ccgo_fp__Xnftw_1 = func(*TLS, uintptr, uintptr, int32, uintptr) int32
+
+func Xnftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, fn, fd_limit, flags, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, flags, origin(2))
defer func() { trc("-> %v", r1) }()
}
bp := tls.Alloc(4112)
@@ -115854,7 +115846,7 @@ func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1
}
Xmemcpy(tls, bp+4, path, l+uint32(1))
_pthread_setcancelstate(tls, int32(PTHREAD_CANCEL_DISABLE), bp)
- r = _do_nftw(tls, bp+4, fn, fd_limit, flags, UintptrFromInt32(0))
+ r = _do_nftw(tls, bp+4, __ccgo_fp_fn, fd_limit, flags, UintptrFromInt32(0))
_pthread_setcancelstate(tls, *(*int32)(unsafe.Pointer(bp)), uintptr(0))
return r
}
@@ -116631,8 +116623,6 @@ type Tsockaddr_storage = struct {
F__ss_align uint32
}
-type t__ucontext2 = Tucontext_t2
-
var _lock2 [1]int32
var _log_ident [32]int8
var _log_opt int32
@@ -117070,15 +117060,6 @@ const SA = 194
const SB = 244
const bittab = 0
-type Tucontext_t4 = struct {
- Fuc_flags uint32
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t
- Fuc_sigmask Tsigset_t
- F__fpregs_mem [28]uint32
-}
-
func Xbtowc(tls *TLS, c int32) (r Twint_t) {
if __ccgo_strace {
trc("tls=%v c=%v, (%v:)", tls, c, origin(2))
@@ -118890,13 +118871,6 @@ const _ns_uop_delete = 0
const _ns_uop_add = 1
const _ns_uop_max = 2
-type Tns_tsig_key1 = struct {
- Fname [1025]int8
- Falg [1025]int8
- Fdata uintptr
- Flen1 int32
-}
-
type Tns_tsig_key = struct {
Fname [1025]int8
Falg [1025]int8
@@ -118904,14 +118878,6 @@ type Tns_tsig_key = struct {
Flen1 int32
}
-type Tns_tcp_tsig_state1 = struct {
- Fcounter int32
- Fkey uintptr
- Fctx uintptr
- Fsig [512]uint8
- Fsiglen int32
-}
-
type Tns_tcp_tsig_state = struct {
Fcounter int32
Fkey uintptr
@@ -119560,9 +119526,11 @@ type Tresolvconf = struct {
Ftimeout uint32
}
-func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__dns_parse_2 = func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32
+
+func X__dns_parse(tls *TLS, r uintptr, rlen int32, __ccgo_fp_callback uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v r=%v rlen=%v callback=%v ctx=%v, (%v:)", tls, r, rlen, callback, ctx, origin(2))
+ trc("tls=%v r=%v rlen=%v __ccgo_fp_callback=%v ctx=%v, (%v:)", tls, r, rlen, __ccgo_fp_callback, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var ancount, len1, qdcount, v1, v2 int32
@@ -119608,7 +119576,7 @@ func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr
if len1+int32(10) > int32(r+uintptr(rlen))-int32(p) {
return -int32(1)
}
- if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
+ if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
return -int32(1)
}
p += uintptr(int32(10) + len1)
@@ -121822,19 +121790,6 @@ func Xherror(tls *TLS, msg uintptr) {
Xfprintf(tls, uintptr(unsafe.Pointer(&X__stderr_FILE)), __ccgo_ts+987, VaList(bp+8, v1, v2, Xhstrerror(tls, *(*int32)(unsafe.Pointer(X__h_errno_location(tls))))))
}
-type Tcpu_set_t1 = struct {
- F__bits [32]uint32
-}
-
-type Tucontext_t5 = struct {
- Fuc_flags uint32
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t1
- Fuc_sigmask Tsigset_t
- F__fpregs_mem [28]uint32
-}
-
var _msgs1 = [84]int8{'H', 'o', 's', 't', ' ', 'n', 'o', 't', ' ', 'f', 'o', 'u', 'n', 'd', 0, 'T', 'r', 'y', ' ', 'a', 'g', 'a', 'i', 'n', 0, 'N', 'o', 'n', '-', 'r', 'e', 'c', 'o', 'v', 'e', 'r', 'a', 'b', 'l', 'e', ' ', 'e', 'r', 'r', 'o', 'r', 0, 'A', 'd', 'd', 'r', 'e', 's', 's', ' ', 'n', 'o', 't', ' ', 'a', 'v', 'a', 'i', 'l', 'a', 'b', 'l', 'e', 0, 0, 'U', 'n', 'k', 'n', 'o', 'w', 'n', ' ', 'e', 'r', 'r', 'o', 'r'}
func Xhstrerror(tls *TLS, ecode int32) (r uintptr) {
@@ -123645,7 +123600,7 @@ func X__lookup_serv(tls *TLS, buf uintptr, name uintptr, proto int32, socktype i
return v22
}
-func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, cb uintptr, ctx uintptr) (r1 int32) {
+func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
bp := tls.Alloc(8192)
defer tls.Free(8192)
var h uintptr
@@ -123735,7 +123690,7 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
if Int32FromUint16((*Tnlmsghdr)(unsafe.Pointer(h)).Fnlmsg_type) == int32(NLMSG_ERROR) {
return -int32(1)
}
- ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cb})))(tls, ctx, h)
+ ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cb})))(tls, ctx, h)
if ret != 0 {
return ret
}
@@ -123748,9 +123703,11 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
return r1
}
-func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__rtnetlink_enumerate_2 = func(*TLS, uintptr, uintptr) int32
+
+func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v link_af=%v addr_af=%v cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, cb, ctx, origin(2))
+ trc("tls=%v link_af=%v addr_af=%v __ccgo_fp_cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, __ccgo_fp_cb, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var fd, r int32
@@ -123759,9 +123716,9 @@ func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr,
if fd < 0 {
return -int32(1)
}
- r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, __ccgo_fp_cb, ctx)
if !(r != 0) {
- r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, __ccgo_fp_cb, ctx)
}
X__syscall1(tls, int32(SYS_close), fd)
return r
@@ -129099,7 +129056,7 @@ func _append(tls *TLS, tail uintptr, name uintptr, len1 Tsize_t, mark int32) (r
return 0
}
-func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, errfunc uintptr, tail uintptr) (r1 int32) {
+func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, tail uintptr) (r1 int32) {
bp := tls.Alloc(144)
defer tls.Free(144)
var de, dir, p, p2, v11, v2, v7, v8 uintptr
@@ -129208,7 +129165,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
}
if !(type1 != 0) && Xlstat(tls, buf, bp) != 0 {
- if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
return 0
@@ -129245,7 +129202,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
dir = Xopendir(tls, v7)
if !(dir != 0) {
- if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
+ if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
return int32(GLOB_ABORTED)
}
return 0
@@ -129297,7 +129254,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
} else {
v11 = __ccgo_ts
}
- r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, errfunc, tail)
+ r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, __ccgo_fp_errfunc, tail)
if r != 0 {
Xclosedir(tls, dir)
return r
@@ -129308,7 +129265,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
*(*int8)(unsafe.Pointer(p2)) = saved_sep
}
Xclosedir(tls, dir)
- if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
@@ -129420,9 +129377,11 @@ func _expand_tilde(tls *TLS, pat uintptr, buf uintptr, pos uintptr) (r int32) {
return 0
}
-func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r int32) {
+type t__ccgo_fp__Xglob_2 = func(*TLS, uintptr, int32) int32
+
+func Xglob(tls *TLS, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, g uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v pat=%v flags=%v errfunc=%v g=%v, (%v:)", tls, pat, flags, errfunc, g, origin(2))
+ trc("tls=%v pat=%v flags=%v __ccgo_fp_errfunc=%v g=%v, (%v:)", tls, pat, flags, __ccgo_fp_errfunc, g, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(4112)
@@ -129448,8 +129407,8 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
}
offs = v1
error1 = 0
- if !(errfunc != 0) {
- errfunc = __ccgo_fp(_ignore_err)
+ if !(__ccgo_fp_errfunc != 0) {
+ __ccgo_fp_errfunc = __ccgo_fp(_ignore_err)
}
if !(flags&Int32FromInt32(GLOB_APPEND) != 0) {
(*Tglob_t)(unsafe.Pointer(g)).Fgl_offs = offs
@@ -129468,7 +129427,7 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
error1 = _expand_tilde(tls, bp+4108, bp+8, bp+4104)
}
if !(error1 != 0) {
- error1 = _do_glob(tls, bp+8, *(*Tsize_t)(unsafe.Pointer(bp + 4104)), 0, *(*uintptr)(unsafe.Pointer(bp + 4108)), flags, errfunc, bp+4)
+ error1 = _do_glob(tls, bp+8, *(*Tsize_t)(unsafe.Pointer(bp + 4104)), 0, *(*uintptr)(unsafe.Pointer(bp + 4108)), flags, __ccgo_fp_errfunc, bp+4)
}
Xfree(tls, p)
}
@@ -134693,9 +134652,11 @@ func Xremque(tls *TLS, element uintptr) {
}
}
-func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -134711,7 +134672,7 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -134723,9 +134684,11 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
return Xmemcpy(tls, p+uintptr(n)*uintptr(v1), key, width)
}
-func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlfind_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -134741,7 +134704,7 @@ func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, co
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -134760,9 +134723,11 @@ type Tnode1 = struct {
Fh int32
}
-func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtdelete_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtdelete(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var a [49]uintptr
@@ -134786,7 +134751,7 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
return uintptr(0)
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -134831,9 +134796,11 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
return parent
}
-func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
+type t__ccgo_fp__Xtdestroy_1 = func(*TLS, uintptr)
+
+func Xtdestroy(tls *TLS, root uintptr, __ccgo_fp_freekey uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v freekey=%v, (%v:)", tls, root, freekey, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_freekey=%v, (%v:)", tls, root, __ccgo_fp_freekey, origin(2))
}
var r uintptr
_ = r
@@ -134841,17 +134808,19 @@ func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
if r == uintptr(0) {
return
}
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 4)), freekey)
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 4 + 1*4)), freekey)
- if freekey != 0 {
- (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 4)), __ccgo_fp_freekey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 4 + 1*4)), __ccgo_fp_freekey)
+ if __ccgo_fp_freekey != 0 {
+ (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
}
Xfree(tls, r)
}
-func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtfind_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtfind(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var c int32
@@ -134865,7 +134834,7 @@ func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -134953,9 +134922,11 @@ func X__tsearch_balance(tls *TLS, p uintptr) (r int32) {
return _rot(tls, p, n, BoolInt32(h0 < h1))
}
-func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
+type t__ccgo_fp__Xtsearch_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtsearch(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r1 uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r1) }()
}
var a [48]uintptr
@@ -134975,7 +134946,7 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
return n
}
@@ -135011,26 +134982,28 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
return r
}
-func _walk(tls *TLS, r uintptr, action uintptr, d int32) {
+func _walk(tls *TLS, r uintptr, __ccgo_fp_action uintptr, d int32) {
if !(r != 0) {
return
}
if (*Tnode1)(unsafe.Pointer(r)).Fh == int32(1) {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_leaf), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_leaf), d)
} else {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_preorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 4)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_postorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 4 + 1*4)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_endorder), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_preorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 4)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_postorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 4 + 1*4)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_endorder), d)
}
}
-func Xtwalk(tls *TLS, root uintptr, action uintptr) {
+type t__ccgo_fp__Xtwalk_1 = func(*TLS, uintptr, int32, int32)
+
+func Xtwalk(tls *TLS, root uintptr, __ccgo_fp_action uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v action=%v, (%v:)", tls, root, action, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_action=%v, (%v:)", tls, root, __ccgo_fp_action, origin(2))
}
- _walk(tls, root, action, 0)
+ _walk(tls, root, __ccgo_fp_action, 0)
}
func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
@@ -135041,8 +135014,6 @@ func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
return X__syscall_ret(tls, Uint32FromInt32(___syscall_cp(tls, int32(SYS_poll), int32(fds), Int32FromUint32(n), timeout, 0, 0, 0)))
}
-type t__ucontext3 = Tucontext_t5
-
func Xppoll(tls *TLS, fds uintptr, n Tnfds_t, to uintptr, mask uintptr) (r1 int32) {
if __ccgo_strace {
trc("tls=%v fds=%v n=%v to=%v mask=%v, (%v:)", tls, fds, n, to, mask, origin(2))
@@ -135102,8 +135073,6 @@ func Xppoll(tls *TLS, fds uintptr, n Tnfds_t, to uintptr, mask uintptr) (r1 int3
return X__syscall_ret(tls, Uint32FromInt32(___syscall_cp(tls, int32(SYS_ppoll), int32(fds), Int32FromUint32(n), int32(v5), int32(mask), int32(Int32FromInt32(_NSIG)/Int32FromInt32(8)), 0)))
}
-type t__ucontext4 = Tucontext_t4
-
func Xpselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, ts uintptr, mask uintptr) (r1 int32) {
if __ccgo_strace {
trc("tls=%v n=%v rfds=%v wfds=%v efds=%v ts=%v mask=%v, (%v:)", tls, n, rfds, wfds, efds, ts, mask, origin(2))
@@ -135168,15 +135137,6 @@ func Xpselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, ts ui
return X__syscall_ret(tls, Uint32FromInt32(___syscall_cp(tls, int32(SYS_pselect6), n, int32(rfds), int32(wfds), int32(efds), int32(v5), int32(bp+24))))
}
-type Tucontext_t6 = struct {
- Fuc_flags uint32
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t
- Fuc_sigmask Tsigset_t
- F__fpregs_mem [28]uint32
-}
-
func Xselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, tv uintptr) (r1 int32) {
if __ccgo_strace {
trc("tls=%v n=%v rfds=%v wfds=%v efds=%v tv=%v, (%v:)", tls, n, rfds, wfds, efds, tv, origin(2))
@@ -135623,15 +135583,6 @@ func Xsigaltstack(tls *TLS, ss uintptr, old uintptr) (r int32) {
const SST_SIZE = 8
-type Tucontext_t7 = struct {
- Fuc_flags uint32
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t1
- Fuc_sigmask Tsigset_t
- F__fpregs_mem [28]uint32
-}
-
func Xsigandset(tls *TLS, dest uintptr, left uintptr, right uintptr) (r1 int32) {
if __ccgo_strace {
trc("tls=%v dest=%v left=%v right=%v, (%v:)", tls, dest, left, right, origin(2))
@@ -144658,9 +144609,11 @@ func Xatoll(tls *TLS, s uintptr) (r int64) {
return v5
}
-func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xbsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, key, base, nel, width, cmp, origin(2))
+ trc("tls=%v key=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, base, nel, width, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var sign int32
@@ -144668,7 +144621,7 @@ func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, c
_, _ = sign, try
for nel > uint32(0) {
try = base + uintptr(width*(nel/uint32(2)))
- sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, try)
+ sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, try)
if sign < 0 {
nel /= uint32(2)
} else {
@@ -144938,7 +144891,7 @@ func _shr(tls *TLS, p uintptr, n int32) {
*(*Tsize_t)(unsafe.Pointer(p + 1*4)) >>= Uint32FromInt32(n)
}
-func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
+func _sift(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
bp := tls.Alloc(240)
defer tls.Free(240)
var i, v1, v2 int32
@@ -144950,10 +144903,10 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
for pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*4)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[57]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[57]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[57]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[57]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
break
}
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, rt, arg) >= 0 {
v1 = i
i++
(*(*[57]uintptr)(unsafe.Pointer(bp)))[v1] = lf
@@ -144970,7 +144923,7 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
_cycle(tls, width, bp, i)
}
-func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
+func _trinkle(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
bp := tls.Alloc(240)
defer tls.Free(240)
var i, trail, v1 int32
@@ -144984,13 +144937,13 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
(*(*[57]uintptr)(unsafe.Pointer(bp + 8)))[0] = head
for (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[0] != uint32(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[int32(1)] != uint32(0) {
stepson = head - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift)*4)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, stepson, (*(*[57]uintptr)(unsafe.Pointer(bp + 8)))[0], arg) <= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, stepson, (*(*[57]uintptr)(unsafe.Pointer(bp + 8)))[0], arg) <= 0 {
break
}
if !(trusty != 0) && pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*4)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, stepson, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, stepson, arg) >= 0 {
break
}
}
@@ -145005,13 +144958,15 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
}
if !(trusty != 0) {
_cycle(tls, width, bp+8, i)
- _sift(tls, head, width, cmp, arg, pshift, lp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, lp)
}
}
-func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__X__qsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
bp := tls.Alloc(208)
defer tls.Free(208)
@@ -145049,14 +145004,14 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
}
for head < high {
if (*(*[2]Tsize_t)(unsafe.Pointer(bp + 192)))[0]&uint32(3) == uint32(3) {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
_shr(tls, bp+192, int32(2))
pshift += int32(2)
} else {
if (*(*[48]Tsize_t)(unsafe.Pointer(bp)))[pshift-int32(1)] >= Uint32FromInt32(int32(high)-int32(head)) {
- _trinkle(tls, head, width, cmp, arg, bp+192, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+192, pshift, 0, bp)
} else {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
}
if pshift == int32(1) {
_shl(tls, bp+192, int32(1))
@@ -145069,7 +145024,7 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
*(*Tsize_t)(unsafe.Pointer(bp + 192)) |= uint32(1)
head += uintptr(width)
}
- _trinkle(tls, head, width, cmp, arg, bp+192, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+192, pshift, 0, bp)
for pshift != int32(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 192)))[0] != uint32(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 192)))[int32(1)] != uint32(0) {
if pshift <= int32(1) {
trail = _pntz(tls, bp+192)
@@ -145080,31 +145035,35 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
pshift -= int32(2)
*(*Tsize_t)(unsafe.Pointer(bp + 192)) ^= uint32(7)
_shr(tls, bp+192, int32(1))
- _trinkle(tls, head-uintptr((*(*[48]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, cmp, arg, bp+192, pshift+int32(1), int32(1), bp)
+ _trinkle(tls, head-uintptr((*(*[48]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, __ccgo_fp_cmp, arg, bp+192, pshift+int32(1), int32(1), bp)
_shl(tls, bp+192, int32(1))
*(*Tsize_t)(unsafe.Pointer(bp + 192)) |= uint32(1)
- _trinkle(tls, head-uintptr(width), width, cmp, arg, bp+192, pshift, int32(1), bp)
+ _trinkle(tls, head-uintptr(width), width, __ccgo_fp_cmp, arg, bp+192, pshift, int32(1), bp)
}
head -= uintptr(width)
}
}
-func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__Xqsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
- X__qsort_r(tls, base, nel, width, cmp, arg)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp_cmp, arg)
}
func _wrapper_cmp(tls *TLS, v1 uintptr, v2 uintptr, cmp uintptr) (r int32) {
return (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, v1, v2)
}
-func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun) {
+type t__ccgo_fp__Xqsort_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, base, nel, width, cmp, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, origin(2))
}
- X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), cmp)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), __ccgo_fp_cmp)
}
func _strtox(tls *TLS, s uintptr, p uintptr, prec int32) (r float64) {
@@ -153810,8 +153769,6 @@ func X__utimes_time32(tls *TLS, path uintptr, times32 uintptr) (r int32) {
return Xutimes(tls, path, v1)
}
-type t__ucontext5 = Tucontext_t7
-
func X__wait3_time32(tls *TLS, status uintptr, options int32, usage uintptr) (r1 Tpid_t) {
if __ccgo_strace {
trc("tls=%v status=%v options=%v usage=%v, (%v:)", tls, status, options, usage, origin(2))
diff --git a/vendor/modernc.org/libc/ccgo_linux_amd64.go b/vendor/modernc.org/libc/ccgo_linux_amd64.go
index 15c013442..a93029e55 100644
--- a/vendor/modernc.org/libc/ccgo_linux_amd64.go
+++ b/vendor/modernc.org/libc/ccgo_linux_amd64.go
@@ -24913,9 +24913,13 @@ func Xrewinddir(tls *TLS, dir uintptr) {
___unlock(tls, dir+20)
}
-func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r int32) {
+type t__ccgo_fp__Xscandir_2 = func(*TLS, uintptr) int32
+
+type t__ccgo_fp__Xscandir_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xscandir(tls *TLS, path uintptr, res uintptr, __ccgo_fp_sel uintptr, __ccgo_fp_cmp uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v res=%v sel=%v cmp=%v, (%v:)", tls, path, res, sel, cmp, origin(2))
+ trc("tls=%v path=%v res=%v __ccgo_fp_sel=%v __ccgo_fp_cmp=%v, (%v:)", tls, path, res, __ccgo_fp_sel, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var cnt, len1, v2, v3 Tsize_t
@@ -24937,7 +24941,7 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
if !(v1 != 0) {
break
}
- if sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{sel})))(tls, de) != 0) {
+ if __ccgo_fp_sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_sel})))(tls, de) != 0) {
continue
}
if cnt >= len1 {
@@ -24975,8 +24979,8 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
return -int32(1)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
- if cmp != 0 {
- Xqsort(tls, names, cnt, uint64(8), cmp)
+ if __ccgo_fp_cmp != 0 {
+ Xqsort(tls, names, cnt, uint64(8), __ccgo_fp_cmp)
}
*(*uintptr)(unsafe.Pointer(res)) = names
return Int32FromUint64(cnt)
@@ -25723,9 +25727,11 @@ func X__funcs_on_quick_exit(tls *TLS) {
}
}
-func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
+type t__ccgo_fp__Xat_quick_exit_0 = func(*TLS)
+
+func Xat_quick_exit(tls *TLS, __ccgo_fp_func uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v func1=%v, (%v:)", tls, func1, origin(2))
+ trc("tls=%v __ccgo_fp_func=%v, (%v:)", tls, __ccgo_fp_func, origin(2))
defer func() { trc("-> %v", r1) }()
}
var r, v1 int32
@@ -25737,7 +25743,7 @@ func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
} else {
v1 = _count
_count++
- _funcs[v1] = func1
+ _funcs[v1] = __ccgo_fp_func
}
___unlock(tls, uintptr(unsafe.Pointer(&_lock)))
return r
@@ -28206,15 +28212,17 @@ type TFTW = struct {
Flevel int32
}
-func Xftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32) (r int32) {
+type t__ccgo_fp__Xftw_1 = func(*TLS, uintptr, uintptr, int32) int32
+
+func Xftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v, (%v:)", tls, path, fn, fd_limit, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, origin(2))
defer func() { trc("-> %v", r) }()
}
/* The following cast assumes that calling a function with one
* argument more than it needs behaves as expected. This is
* actually undefined, but works on all real-world machines. */
- return Xnftw(tls, path, fn, fd_limit, int32(FTW_PHYS))
+ return Xnftw(tls, path, __ccgo_fp_fn, fd_limit, int32(FTW_PHYS))
}
const STATX_ALL = 4095
@@ -31199,15 +31207,6 @@ func Xfremovexattr(tls *TLS, fd int32, name uintptr) (r int32) {
return int32(X__syscall_ret(tls, Uint64FromInt64(X__syscall2(tls, int64(SYS_fremovexattr), int64(fd), int64(name)))))
}
-type Tucontext_t2 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t
- Fuc_sigmask Tsigset_t
- F__fpregs_mem [64]uint64
-}
-
func _dummy4(tls *TLS, msg uintptr, lm uintptr) (r uintptr) {
return msg
}
@@ -114152,16 +114151,7 @@ func Xgethostid(tls *TLS) (r int64) {
const optpos = 0
-type Tucontext_t3 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t1
- Fuc_sigmask Tsigset_t
- F__fpregs_mem [64]uint64
-}
-
-type t__ucontext1 = Tucontext_t3
+type t__ucontext1 = Tucontext_t1
func X__getopt_msg(tls *TLS, a uintptr, b uintptr, c uintptr, l Tsize_t) {
if __ccgo_strace {
@@ -115309,7 +115299,7 @@ type Thistory = struct {
Fbase int32
}
-func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
+func _do_nftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
bp := tls.Alloc(192)
defer tls.Free(192)
var d, de, v10 uintptr
@@ -115415,7 +115405,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
}
if v8 = !(flags&Int32FromInt32(FTW_DEPTH) != 0); v8 {
- v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+176)
+ v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+176)
r = v7
}
if v8 && v7 != 0 {
@@ -115456,7 +115446,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*int8)(unsafe.Pointer(path + uintptr(j))) = int8('/')
Xstrcpy(tls, path+uintptr(j)+uintptr(1), de+19)
- v11 = _do_nftw(tls, path, fn, fd_limit-int32(1), flags, bp+144)
+ v11 = _do_nftw(tls, path, __ccgo_fp_fn, fd_limit-int32(1), flags, bp+144)
r = v11
if v11 != 0 {
Xclosedir(tls, d)
@@ -115471,7 +115461,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*int8)(unsafe.Pointer(path + uintptr(l))) = 0
if v13 = flags&int32(FTW_DEPTH) != 0; v13 {
- v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+176)
+ v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+176)
r = v12
}
if v13 && v12 != 0 {
@@ -115480,9 +115470,11 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
return 0
}
-func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1 int32) {
+type t__ccgo_fp__Xnftw_1 = func(*TLS, uintptr, uintptr, int32, uintptr) int32
+
+func Xnftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, fn, fd_limit, flags, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, flags, origin(2))
defer func() { trc("-> %v", r1) }()
}
bp := tls.Alloc(4112)
@@ -115502,7 +115494,7 @@ func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1
}
Xmemcpy(tls, bp+4, path, l+uint64(1))
_pthread_setcancelstate(tls, int32(PTHREAD_CANCEL_DISABLE), bp)
- r = _do_nftw(tls, bp+4, fn, fd_limit, flags, UintptrFromInt32(0))
+ r = _do_nftw(tls, bp+4, __ccgo_fp_fn, fd_limit, flags, UintptrFromInt32(0))
_pthread_setcancelstate(tls, *(*int32)(unsafe.Pointer(bp)), uintptr(0))
return r
}
@@ -116282,8 +116274,6 @@ type Tsockaddr_storage = struct {
F__ss_align uint64
}
-type t__ucontext2 = Tucontext_t2
-
var _lock2 [1]int32
var _log_ident [32]int8
var _log_opt int32
@@ -116721,15 +116711,6 @@ const SA = 194
const SB = 244
const bittab = 0
-type Tucontext_t4 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t
- Fuc_sigmask Tsigset_t
- F__fpregs_mem [64]uint64
-}
-
func Xbtowc(tls *TLS, c int32) (r Twint_t) {
if __ccgo_strace {
trc("tls=%v c=%v, (%v:)", tls, c, origin(2))
@@ -118449,13 +118430,6 @@ const _ns_uop_delete = 0
const _ns_uop_add = 1
const _ns_uop_max = 2
-type Tns_tsig_key1 = struct {
- Fname [1025]int8
- Falg [1025]int8
- Fdata uintptr
- Flen1 int32
-}
-
type Tns_tsig_key = struct {
Fname [1025]int8
Falg [1025]int8
@@ -118463,14 +118437,6 @@ type Tns_tsig_key = struct {
Flen1 int32
}
-type Tns_tcp_tsig_state1 = struct {
- Fcounter int32
- Fkey uintptr
- Fctx uintptr
- Fsig [512]uint8
- Fsiglen int32
-}
-
type Tns_tcp_tsig_state = struct {
Fcounter int32
Fkey uintptr
@@ -119120,9 +119086,11 @@ type Tresolvconf = struct {
Ftimeout uint32
}
-func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__dns_parse_2 = func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32
+
+func X__dns_parse(tls *TLS, r uintptr, rlen int32, __ccgo_fp_callback uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v r=%v rlen=%v callback=%v ctx=%v, (%v:)", tls, r, rlen, callback, ctx, origin(2))
+ trc("tls=%v r=%v rlen=%v __ccgo_fp_callback=%v ctx=%v, (%v:)", tls, r, rlen, __ccgo_fp_callback, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var ancount, len1, qdcount, v1, v2 int32
@@ -119168,7 +119136,7 @@ func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr
if int64(len1+int32(10)) > int64(r+uintptr(rlen))-int64(p) {
return -int32(1)
}
- if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
+ if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
return -int32(1)
}
p += uintptr(int32(10) + len1)
@@ -121272,19 +121240,6 @@ func Xherror(tls *TLS, msg uintptr) {
Xfprintf(tls, uintptr(unsafe.Pointer(&X__stderr_FILE)), __ccgo_ts+1089, VaList(bp+8, v1, v2, Xhstrerror(tls, *(*int32)(unsafe.Pointer(X__h_errno_location(tls))))))
}
-type Tcpu_set_t1 = struct {
- F__bits [16]uint64
-}
-
-type Tucontext_t5 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t1
- Fuc_sigmask Tsigset_t
- F__fpregs_mem [64]uint64
-}
-
var _msgs1 = [84]int8{'H', 'o', 's', 't', ' ', 'n', 'o', 't', ' ', 'f', 'o', 'u', 'n', 'd', 0, 'T', 'r', 'y', ' ', 'a', 'g', 'a', 'i', 'n', 0, 'N', 'o', 'n', '-', 'r', 'e', 'c', 'o', 'v', 'e', 'r', 'a', 'b', 'l', 'e', ' ', 'e', 'r', 'r', 'o', 'r', 0, 'A', 'd', 'd', 'r', 'e', 's', 's', ' ', 'n', 'o', 't', ' ', 'a', 'v', 'a', 'i', 'l', 'a', 'b', 'l', 'e', 0, 0, 'U', 'n', 'k', 'n', 'o', 'w', 'n', ' ', 'e', 'r', 'r', 'o', 'r'}
func Xhstrerror(tls *TLS, ecode int32) (r uintptr) {
@@ -123072,7 +123027,7 @@ func X__lookup_serv(tls *TLS, buf uintptr, name uintptr, proto int32, socktype i
return v22
}
-func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, cb uintptr, ctx uintptr) (r1 int32) {
+func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
bp := tls.Alloc(8192)
defer tls.Free(8192)
var h uintptr
@@ -123162,7 +123117,7 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
if Int32FromUint16((*Tnlmsghdr)(unsafe.Pointer(h)).Fnlmsg_type) == int32(NLMSG_ERROR) {
return -int32(1)
}
- ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cb})))(tls, ctx, h)
+ ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cb})))(tls, ctx, h)
if ret != 0 {
return ret
}
@@ -123175,9 +123130,11 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
return r1
}
-func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__rtnetlink_enumerate_2 = func(*TLS, uintptr, uintptr) int32
+
+func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v link_af=%v addr_af=%v cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, cb, ctx, origin(2))
+ trc("tls=%v link_af=%v addr_af=%v __ccgo_fp_cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, __ccgo_fp_cb, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var fd, r int32
@@ -123186,9 +123143,9 @@ func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr,
if fd < 0 {
return -int32(1)
}
- r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, __ccgo_fp_cb, ctx)
if !(r != 0) {
- r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, __ccgo_fp_cb, ctx)
}
X__syscall1(tls, int64(SYS_close), int64(fd))
return r
@@ -128295,7 +128252,7 @@ func _append(tls *TLS, tail uintptr, name uintptr, len1 Tsize_t, mark int32) (r
return 0
}
-func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, errfunc uintptr, tail uintptr) (r1 int32) {
+func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, tail uintptr) (r1 int32) {
bp := tls.Alloc(144)
defer tls.Free(144)
var de, dir, p, p2, v11, v2, v7, v8 uintptr
@@ -128404,7 +128361,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
}
if !(type1 != 0) && Xlstat(tls, buf, bp) != 0 {
- if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
return 0
@@ -128441,7 +128398,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
dir = Xopendir(tls, v7)
if !(dir != 0) {
- if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
+ if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
return int32(GLOB_ABORTED)
}
return 0
@@ -128493,7 +128450,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
} else {
v11 = __ccgo_ts
}
- r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, errfunc, tail)
+ r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, __ccgo_fp_errfunc, tail)
if r != 0 {
Xclosedir(tls, dir)
return r
@@ -128504,7 +128461,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
*(*int8)(unsafe.Pointer(p2)) = saved_sep
}
Xclosedir(tls, dir)
- if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
@@ -128616,9 +128573,11 @@ func _expand_tilde(tls *TLS, pat uintptr, buf uintptr, pos uintptr) (r int32) {
return 0
}
-func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r int32) {
+type t__ccgo_fp__Xglob_2 = func(*TLS, uintptr, int32) int32
+
+func Xglob(tls *TLS, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, g uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v pat=%v flags=%v errfunc=%v g=%v, (%v:)", tls, pat, flags, errfunc, g, origin(2))
+ trc("tls=%v pat=%v flags=%v __ccgo_fp_errfunc=%v g=%v, (%v:)", tls, pat, flags, __ccgo_fp_errfunc, g, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(4128)
@@ -128644,8 +128603,8 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
}
offs = v1
error1 = 0
- if !(errfunc != 0) {
- errfunc = __ccgo_fp(_ignore_err)
+ if !(__ccgo_fp_errfunc != 0) {
+ __ccgo_fp_errfunc = __ccgo_fp(_ignore_err)
}
if !(flags&Int32FromInt32(GLOB_APPEND) != 0) {
(*Tglob_t)(unsafe.Pointer(g)).Fgl_offs = offs
@@ -128664,7 +128623,7 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
error1 = _expand_tilde(tls, bp+4120, bp+16, bp+4112)
}
if !(error1 != 0) {
- error1 = _do_glob(tls, bp+16, *(*Tsize_t)(unsafe.Pointer(bp + 4112)), 0, *(*uintptr)(unsafe.Pointer(bp + 4120)), flags, errfunc, bp+8)
+ error1 = _do_glob(tls, bp+16, *(*Tsize_t)(unsafe.Pointer(bp + 4112)), 0, *(*uintptr)(unsafe.Pointer(bp + 4120)), flags, __ccgo_fp_errfunc, bp+8)
}
Xfree(tls, p)
}
@@ -133888,9 +133847,11 @@ func Xremque(tls *TLS, element uintptr) {
}
}
-func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -133906,7 +133867,7 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -133918,9 +133879,11 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
return Xmemcpy(tls, p+uintptr(n)*uintptr(v1), key, width)
}
-func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlfind_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -133936,7 +133899,7 @@ func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, co
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -133955,9 +133918,11 @@ type Tnode1 = struct {
Fh int32
}
-func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtdelete_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtdelete(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var a [97]uintptr
@@ -133981,7 +133946,7 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
return uintptr(0)
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -134026,9 +133991,11 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
return parent
}
-func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
+type t__ccgo_fp__Xtdestroy_1 = func(*TLS, uintptr)
+
+func Xtdestroy(tls *TLS, root uintptr, __ccgo_fp_freekey uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v freekey=%v, (%v:)", tls, root, freekey, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_freekey=%v, (%v:)", tls, root, __ccgo_fp_freekey, origin(2))
}
var r uintptr
_ = r
@@ -134036,17 +134003,19 @@ func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
if r == uintptr(0) {
return
}
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8)), freekey)
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), freekey)
- if freekey != 0 {
- (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8)), __ccgo_fp_freekey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), __ccgo_fp_freekey)
+ if __ccgo_fp_freekey != 0 {
+ (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
}
Xfree(tls, r)
}
-func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtfind_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtfind(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var c int32
@@ -134060,7 +134029,7 @@ func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -134148,9 +134117,11 @@ func X__tsearch_balance(tls *TLS, p uintptr) (r int32) {
return _rot(tls, p, n, BoolInt32(h0 < h1))
}
-func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
+type t__ccgo_fp__Xtsearch_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtsearch(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r1 uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r1) }()
}
var a [96]uintptr
@@ -134170,7 +134141,7 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
return n
}
@@ -134206,26 +134177,28 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
return r
}
-func _walk(tls *TLS, r uintptr, action uintptr, d int32) {
+func _walk(tls *TLS, r uintptr, __ccgo_fp_action uintptr, d int32) {
if !(r != 0) {
return
}
if (*Tnode1)(unsafe.Pointer(r)).Fh == int32(1) {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_leaf), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_leaf), d)
} else {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_preorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_postorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_endorder), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_preorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_postorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_endorder), d)
}
}
-func Xtwalk(tls *TLS, root uintptr, action uintptr) {
+type t__ccgo_fp__Xtwalk_1 = func(*TLS, uintptr, int32, int32)
+
+func Xtwalk(tls *TLS, root uintptr, __ccgo_fp_action uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v action=%v, (%v:)", tls, root, action, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_action=%v, (%v:)", tls, root, __ccgo_fp_action, origin(2))
}
- _walk(tls, root, action, 0)
+ _walk(tls, root, __ccgo_fp_action, 0)
}
func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
@@ -134236,8 +134209,6 @@ func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
return int32(X__syscall_ret(tls, Uint64FromInt64(___syscall_cp(tls, int64(SYS_poll), int64(fds), Int64FromUint64(n), int64(timeout), 0, 0, 0))))
}
-type t__ucontext3 = Tucontext_t5
-
func Xppoll(tls *TLS, fds uintptr, n Tnfds_t, to uintptr, mask uintptr) (r int32) {
if __ccgo_strace {
trc("tls=%v fds=%v n=%v to=%v mask=%v, (%v:)", tls, fds, n, to, mask, origin(2))
@@ -134273,8 +134244,6 @@ func Xppoll(tls *TLS, fds uintptr, n Tnfds_t, to uintptr, mask uintptr) (r int32
return int32(X__syscall_ret(tls, Uint64FromInt64(___syscall_cp(tls, int64(SYS_ppoll), int64(fds), Int64FromUint64(n), int64(v3), int64(mask), int64(Int32FromInt32(_NSIG)/Int32FromInt32(8)), 0))))
}
-type t__ucontext4 = Tucontext_t4
-
func Xpselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, ts uintptr, mask uintptr) (r int32) {
if __ccgo_strace {
trc("tls=%v n=%v rfds=%v wfds=%v efds=%v ts=%v mask=%v, (%v:)", tls, n, rfds, wfds, efds, ts, mask, origin(2))
@@ -134315,15 +134284,6 @@ func Xpselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, ts ui
return int32(X__syscall_ret(tls, Uint64FromInt64(___syscall_cp(tls, int64(SYS_pselect6), int64(n), int64(rfds), int64(wfds), int64(efds), int64(v3), int64(bp+16)))))
}
-type Tucontext_t6 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t
- Fuc_sigmask Tsigset_t
- F__fpregs_mem [64]uint64
-}
-
func Xselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, tv uintptr) (r int32) {
if __ccgo_strace {
trc("tls=%v n=%v rfds=%v wfds=%v efds=%v tv=%v, (%v:)", tls, n, rfds, wfds, efds, tv, origin(2))
@@ -134739,15 +134699,6 @@ func Xsigaltstack(tls *TLS, ss uintptr, old uintptr) (r int32) {
const SST_SIZE = 8
-type Tucontext_t7 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t1
- Fuc_sigmask Tsigset_t
- F__fpregs_mem [64]uint64
-}
-
func Xsigandset(tls *TLS, dest uintptr, left uintptr, right uintptr) (r1 int32) {
if __ccgo_strace {
trc("tls=%v dest=%v left=%v right=%v, (%v:)", tls, dest, left, right, origin(2))
@@ -143794,9 +143745,11 @@ func Xatoll(tls *TLS, s uintptr) (r int64) {
return v5
}
-func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xbsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, key, base, nel, width, cmp, origin(2))
+ trc("tls=%v key=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, base, nel, width, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var sign int32
@@ -143804,7 +143757,7 @@ func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, c
_, _ = sign, try
for nel > uint64(0) {
try = base + uintptr(width*(nel/uint64(2)))
- sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, try)
+ sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, try)
if sign < 0 {
nel /= uint64(2)
} else {
@@ -144065,7 +144018,7 @@ func _shr(tls *TLS, p uintptr, n int32) {
*(*Tsize_t)(unsafe.Pointer(p + 1*8)) >>= Uint64FromInt32(n)
}
-func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
+func _sift(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
bp := tls.Alloc(912)
defer tls.Free(912)
var i, v1, v2 int32
@@ -144077,10 +144030,10 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
for pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
break
}
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, rt, arg) >= 0 {
v1 = i
i++
(*(*[113]uintptr)(unsafe.Pointer(bp)))[v1] = lf
@@ -144097,7 +144050,7 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
_cycle(tls, width, bp, i)
}
-func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
+func _trinkle(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
bp := tls.Alloc(928)
defer tls.Free(928)
var i, trail, v1 int32
@@ -144111,13 +144064,13 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
(*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0] = head
for (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[0] != uint64(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[int32(1)] != uint64(0) {
stepson = head - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift)*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, stepson, (*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0], arg) <= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, stepson, (*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0], arg) <= 0 {
break
}
if !(trusty != 0) && pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, stepson, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, stepson, arg) >= 0 {
break
}
}
@@ -144132,13 +144085,15 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
}
if !(trusty != 0) {
_cycle(tls, width, bp+16, i)
- _sift(tls, head, width, cmp, arg, pshift, lp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, lp)
}
}
-func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__X__qsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
bp := tls.Alloc(784)
defer tls.Free(784)
@@ -144176,14 +144131,14 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
}
for head < high {
if (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[0]&uint64(3) == uint64(3) {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
_shr(tls, bp+768, int32(2))
pshift += int32(2)
} else {
if (*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift-int32(1)] >= Uint64FromInt64(int64(high)-int64(head)) {
- _trinkle(tls, head, width, cmp, arg, bp+768, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+768, pshift, 0, bp)
} else {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
}
if pshift == int32(1) {
_shl(tls, bp+768, int32(1))
@@ -144196,7 +144151,7 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
*(*Tsize_t)(unsafe.Pointer(bp + 768)) |= uint64(1)
head += uintptr(width)
}
- _trinkle(tls, head, width, cmp, arg, bp+768, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+768, pshift, 0, bp)
for pshift != int32(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[0] != uint64(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[int32(1)] != uint64(0) {
if pshift <= int32(1) {
trail = _pntz(tls, bp+768)
@@ -144207,31 +144162,35 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
pshift -= int32(2)
*(*Tsize_t)(unsafe.Pointer(bp + 768)) ^= uint64(7)
_shr(tls, bp+768, int32(1))
- _trinkle(tls, head-uintptr((*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, cmp, arg, bp+768, pshift+int32(1), int32(1), bp)
+ _trinkle(tls, head-uintptr((*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, __ccgo_fp_cmp, arg, bp+768, pshift+int32(1), int32(1), bp)
_shl(tls, bp+768, int32(1))
*(*Tsize_t)(unsafe.Pointer(bp + 768)) |= uint64(1)
- _trinkle(tls, head-uintptr(width), width, cmp, arg, bp+768, pshift, int32(1), bp)
+ _trinkle(tls, head-uintptr(width), width, __ccgo_fp_cmp, arg, bp+768, pshift, int32(1), bp)
}
head -= uintptr(width)
}
}
-func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__Xqsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
- X__qsort_r(tls, base, nel, width, cmp, arg)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp_cmp, arg)
}
func _wrapper_cmp(tls *TLS, v1 uintptr, v2 uintptr, cmp uintptr) (r int32) {
return (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, v1, v2)
}
-func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun) {
+type t__ccgo_fp__Xqsort_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, base, nel, width, cmp, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, origin(2))
}
- X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), cmp)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), __ccgo_fp_cmp)
}
func _strtox(tls *TLS, s uintptr, p uintptr, prec int32) (r float64) {
diff --git a/vendor/modernc.org/libc/ccgo_linux_arm.go b/vendor/modernc.org/libc/ccgo_linux_arm.go
index e62167633..311fbccf0 100644
--- a/vendor/modernc.org/libc/ccgo_linux_arm.go
+++ b/vendor/modernc.org/libc/ccgo_linux_arm.go
@@ -25135,9 +25135,13 @@ func Xrewinddir(tls *TLS, dir uintptr) {
___unlock(tls, dir+20)
}
-func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r int32) {
+type t__ccgo_fp__Xscandir_2 = func(*TLS, uintptr) int32
+
+type t__ccgo_fp__Xscandir_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xscandir(tls *TLS, path uintptr, res uintptr, __ccgo_fp_sel uintptr, __ccgo_fp_cmp uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v res=%v sel=%v cmp=%v, (%v:)", tls, path, res, sel, cmp, origin(2))
+ trc("tls=%v path=%v res=%v __ccgo_fp_sel=%v __ccgo_fp_cmp=%v, (%v:)", tls, path, res, __ccgo_fp_sel, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var cnt, len1, v2, v3 Tsize_t
@@ -25159,7 +25163,7 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
if !(v1 != 0) {
break
}
- if sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{sel})))(tls, de) != 0) {
+ if __ccgo_fp_sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_sel})))(tls, de) != 0) {
continue
}
if cnt >= len1 {
@@ -25197,8 +25201,8 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
return -int32(1)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
- if cmp != 0 {
- Xqsort(tls, names, cnt, uint32(4), cmp)
+ if __ccgo_fp_cmp != 0 {
+ Xqsort(tls, names, cnt, uint32(4), __ccgo_fp_cmp)
}
*(*uintptr)(unsafe.Pointer(res)) = names
return Int32FromUint32(cnt)
@@ -25938,9 +25942,11 @@ func X__funcs_on_quick_exit(tls *TLS) {
}
}
-func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
+type t__ccgo_fp__Xat_quick_exit_0 = func(*TLS)
+
+func Xat_quick_exit(tls *TLS, __ccgo_fp_func uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v func1=%v, (%v:)", tls, func1, origin(2))
+ trc("tls=%v __ccgo_fp_func=%v, (%v:)", tls, __ccgo_fp_func, origin(2))
defer func() { trc("-> %v", r1) }()
}
var r, v1 int32
@@ -25952,7 +25958,7 @@ func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
} else {
v1 = _count
_count++
- _funcs[v1] = func1
+ _funcs[v1] = __ccgo_fp_func
}
___unlock(tls, uintptr(unsafe.Pointer(&_lock)))
return r
@@ -28457,15 +28463,17 @@ type TFTW = struct {
Flevel int32
}
-func Xftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32) (r int32) {
+type t__ccgo_fp__Xftw_1 = func(*TLS, uintptr, uintptr, int32) int32
+
+func Xftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v, (%v:)", tls, path, fn, fd_limit, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, origin(2))
defer func() { trc("-> %v", r) }()
}
/* The following cast assumes that calling a function with one
* argument more than it needs behaves as expected. This is
* actually undefined, but works on all real-world machines. */
- return Xnftw(tls, path, fn, fd_limit, int32(FTW_PHYS))
+ return Xnftw(tls, path, __ccgo_fp_fn, fd_limit, int32(FTW_PHYS))
}
const STATX_ALL = 4095
@@ -31529,16 +31537,6 @@ func Xfremovexattr(tls *TLS, fd int32, name uintptr) (r int32) {
return X__syscall_ret(tls, Uint32FromInt32(X__syscall2(tls, int32(SYS_fremovexattr), fd, int32(name))))
}
-type Tucontext_t2 = struct {
- F__ccgo_align [0]uint32
- Fuc_flags uint32
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t
- Fuc_sigmask Tsigset_t
- Fuc_regspace [64]uint64
-}
-
func _dummy4(tls *TLS, msg uintptr, lm uintptr) (r uintptr) {
return msg
}
@@ -114778,17 +114776,7 @@ func Xgethostid(tls *TLS) (r int32) {
const optpos = 0
-type Tucontext_t3 = struct {
- F__ccgo_align [0]uint32
- Fuc_flags uint32
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t1
- Fuc_sigmask Tsigset_t
- Fuc_regspace [64]uint64
-}
-
-type t__ucontext1 = Tucontext_t3
+type t__ucontext1 = Tucontext_t1
func X__getopt_msg(tls *TLS, a uintptr, b uintptr, c uintptr, l Tsize_t) {
if __ccgo_strace {
@@ -115943,7 +115931,7 @@ type Thistory = struct {
Fbase int32
}
-func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
+func _do_nftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
bp := tls.Alloc(192)
defer tls.Free(192)
var d, de, v10 uintptr
@@ -116049,7 +116037,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
}
if v8 = !(flags&Int32FromInt32(FTW_DEPTH) != 0); v8 {
- v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+184)
+ v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+184)
r = v7
}
if v8 && v7 != 0 {
@@ -116090,7 +116078,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*uint8)(unsafe.Pointer(path + uintptr(j))) = uint8('/')
Xstrcpy(tls, path+uintptr(j)+uintptr(1), de+19)
- v11 = _do_nftw(tls, path, fn, fd_limit-int32(1), flags, bp+152)
+ v11 = _do_nftw(tls, path, __ccgo_fp_fn, fd_limit-int32(1), flags, bp+152)
r = v11
if v11 != 0 {
Xclosedir(tls, d)
@@ -116105,7 +116093,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*uint8)(unsafe.Pointer(path + uintptr(l))) = uint8(0)
if v13 = flags&int32(FTW_DEPTH) != 0; v13 {
- v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+184)
+ v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+184)
r = v12
}
if v13 && v12 != 0 {
@@ -116114,9 +116102,11 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
return 0
}
-func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1 int32) {
+type t__ccgo_fp__Xnftw_1 = func(*TLS, uintptr, uintptr, int32, uintptr) int32
+
+func Xnftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, fn, fd_limit, flags, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, flags, origin(2))
defer func() { trc("-> %v", r1) }()
}
bp := tls.Alloc(4112)
@@ -116136,7 +116126,7 @@ func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1
}
_memcpy(tls, bp+4, path, l+uint32(1))
_pthread_setcancelstate(tls, int32(PTHREAD_CANCEL_DISABLE), bp)
- r = _do_nftw(tls, bp+4, fn, fd_limit, flags, UintptrFromInt32(0))
+ r = _do_nftw(tls, bp+4, __ccgo_fp_fn, fd_limit, flags, UintptrFromInt32(0))
_pthread_setcancelstate(tls, *(*int32)(unsafe.Pointer(bp)), uintptr(0))
return r
}
@@ -116913,8 +116903,6 @@ type Tsockaddr_storage = struct {
F__ss_align uint32
}
-type t__ucontext2 = Tucontext_t2
-
var _lock2 [1]int32
var _log_ident [32]uint8
var _log_opt int32
@@ -117352,16 +117340,6 @@ const SA = 194
const SB = 244
const bittab = 0
-type Tucontext_t4 = struct {
- F__ccgo_align [0]uint32
- Fuc_flags uint32
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t
- Fuc_sigmask Tsigset_t
- Fuc_regspace [64]uint64
-}
-
func Xbtowc(tls *TLS, c int32) (r Twint_t) {
if __ccgo_strace {
trc("tls=%v c=%v, (%v:)", tls, c, origin(2))
@@ -119077,13 +119055,6 @@ const _ns_uop_delete = 0
const _ns_uop_add = 1
const _ns_uop_max = 2
-type Tns_tsig_key1 = struct {
- Fname [1025]uint8
- Falg [1025]uint8
- Fdata uintptr
- Flen1 int32
-}
-
type Tns_tsig_key = struct {
Fname [1025]uint8
Falg [1025]uint8
@@ -119091,14 +119062,6 @@ type Tns_tsig_key = struct {
Flen1 int32
}
-type Tns_tcp_tsig_state1 = struct {
- Fcounter int32
- Fkey uintptr
- Fctx uintptr
- Fsig [512]uint8
- Fsiglen int32
-}
-
type Tns_tcp_tsig_state = struct {
Fcounter int32
Fkey uintptr
@@ -119748,9 +119711,11 @@ type Tresolvconf = struct {
Ftimeout uint32
}
-func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__dns_parse_2 = func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32
+
+func X__dns_parse(tls *TLS, r uintptr, rlen int32, __ccgo_fp_callback uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v r=%v rlen=%v callback=%v ctx=%v, (%v:)", tls, r, rlen, callback, ctx, origin(2))
+ trc("tls=%v r=%v rlen=%v __ccgo_fp_callback=%v ctx=%v, (%v:)", tls, r, rlen, __ccgo_fp_callback, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var ancount, len1, qdcount, v1, v2 int32
@@ -119796,7 +119761,7 @@ func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr
if len1+int32(10) > int32(r+uintptr(rlen))-int32(p) {
return -int32(1)
}
- if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
+ if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
return -int32(1)
}
p += uintptr(int32(10) + len1)
@@ -121896,20 +121861,6 @@ func Xherror(tls *TLS, msg uintptr) {
Xfprintf(tls, uintptr(unsafe.Pointer(&X__stderr_FILE)), __ccgo_ts+987, VaList(bp+8, v1, v2, Xhstrerror(tls, *(*int32)(unsafe.Pointer(X__h_errno_location(tls))))))
}
-type Tcpu_set_t1 = struct {
- F__bits [32]uint32
-}
-
-type Tucontext_t5 = struct {
- F__ccgo_align [0]uint32
- Fuc_flags uint32
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t1
- Fuc_sigmask Tsigset_t
- Fuc_regspace [64]uint64
-}
-
var _msgs1 = [84]uint8{'H', 'o', 's', 't', ' ', 'n', 'o', 't', ' ', 'f', 'o', 'u', 'n', 'd', 0, 'T', 'r', 'y', ' ', 'a', 'g', 'a', 'i', 'n', 0, 'N', 'o', 'n', '-', 'r', 'e', 'c', 'o', 'v', 'e', 'r', 'a', 'b', 'l', 'e', ' ', 'e', 'r', 'r', 'o', 'r', 0, 'A', 'd', 'd', 'r', 'e', 's', 's', ' ', 'n', 'o', 't', ' ', 'a', 'v', 'a', 'i', 'l', 'a', 'b', 'l', 'e', 0, 0, 'U', 'n', 'k', 'n', 'o', 'w', 'n', ' ', 'e', 'r', 'r', 'o', 'r'}
func Xhstrerror(tls *TLS, ecode int32) (r uintptr) {
@@ -123696,7 +123647,7 @@ func X__lookup_serv(tls *TLS, buf uintptr, name uintptr, proto int32, socktype i
return v22
}
-func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, cb uintptr, ctx uintptr) (r1 int32) {
+func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
bp := tls.Alloc(8192)
defer tls.Free(8192)
var h uintptr
@@ -123786,7 +123737,7 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
if Int32FromUint16((*Tnlmsghdr)(unsafe.Pointer(h)).Fnlmsg_type) == int32(NLMSG_ERROR) {
return -int32(1)
}
- ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cb})))(tls, ctx, h)
+ ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cb})))(tls, ctx, h)
if ret != 0 {
return ret
}
@@ -123799,9 +123750,11 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
return r1
}
-func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__rtnetlink_enumerate_2 = func(*TLS, uintptr, uintptr) int32
+
+func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v link_af=%v addr_af=%v cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, cb, ctx, origin(2))
+ trc("tls=%v link_af=%v addr_af=%v __ccgo_fp_cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, __ccgo_fp_cb, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var fd, r int32
@@ -123810,9 +123763,9 @@ func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr,
if fd < 0 {
return -int32(1)
}
- r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, __ccgo_fp_cb, ctx)
if !(r != 0) {
- r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, __ccgo_fp_cb, ctx)
}
X__syscall1(tls, int32(SYS_close), fd)
return r
@@ -128875,7 +128828,7 @@ func _append(tls *TLS, tail uintptr, name uintptr, len1 Tsize_t, mark int32) (r
return 0
}
-func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, errfunc uintptr, tail uintptr) (r1 int32) {
+func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, tail uintptr) (r1 int32) {
bp := tls.Alloc(160)
defer tls.Free(160)
var de, dir, p, p2, v11, v2, v7, v8 uintptr
@@ -128984,7 +128937,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
}
if !(type1 != 0) && Xlstat(tls, buf, bp) != 0 {
- if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
return 0
@@ -129021,7 +128974,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
dir = Xopendir(tls, v7)
if !(dir != 0) {
- if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
+ if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
return int32(GLOB_ABORTED)
}
return 0
@@ -129073,7 +129026,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
} else {
v11 = __ccgo_ts
}
- r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, errfunc, tail)
+ r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, __ccgo_fp_errfunc, tail)
if r != 0 {
Xclosedir(tls, dir)
return r
@@ -129084,7 +129037,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
*(*uint8)(unsafe.Pointer(p2)) = saved_sep
}
Xclosedir(tls, dir)
- if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
@@ -129196,9 +129149,11 @@ func _expand_tilde(tls *TLS, pat uintptr, buf uintptr, pos uintptr) (r int32) {
return 0
}
-func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r int32) {
+type t__ccgo_fp__Xglob_2 = func(*TLS, uintptr, int32) int32
+
+func Xglob(tls *TLS, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, g uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v pat=%v flags=%v errfunc=%v g=%v, (%v:)", tls, pat, flags, errfunc, g, origin(2))
+ trc("tls=%v pat=%v flags=%v __ccgo_fp_errfunc=%v g=%v, (%v:)", tls, pat, flags, __ccgo_fp_errfunc, g, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(4112)
@@ -129224,8 +129179,8 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
}
offs = v1
error1 = 0
- if !(errfunc != 0) {
- errfunc = __ccgo_fp(_ignore_err)
+ if !(__ccgo_fp_errfunc != 0) {
+ __ccgo_fp_errfunc = __ccgo_fp(_ignore_err)
}
if !(flags&Int32FromInt32(GLOB_APPEND) != 0) {
(*Tglob_t)(unsafe.Pointer(g)).Fgl_offs = offs
@@ -129244,7 +129199,7 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
error1 = _expand_tilde(tls, bp+4108, bp+8, bp+4104)
}
if !(error1 != 0) {
- error1 = _do_glob(tls, bp+8, *(*Tsize_t)(unsafe.Pointer(bp + 4104)), 0, *(*uintptr)(unsafe.Pointer(bp + 4108)), flags, errfunc, bp+4)
+ error1 = _do_glob(tls, bp+8, *(*Tsize_t)(unsafe.Pointer(bp + 4104)), 0, *(*uintptr)(unsafe.Pointer(bp + 4108)), flags, __ccgo_fp_errfunc, bp+4)
}
Xfree(tls, p)
}
@@ -134469,9 +134424,11 @@ func Xremque(tls *TLS, element uintptr) {
}
}
-func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -134487,7 +134444,7 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -134499,9 +134456,11 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
return _memcpy(tls, p+uintptr(n)*uintptr(v1), key, width)
}
-func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlfind_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -134517,7 +134476,7 @@ func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, co
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -134536,9 +134495,11 @@ type Tnode1 = struct {
Fh int32
}
-func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtdelete_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtdelete(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var a [49]uintptr
@@ -134562,7 +134523,7 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
return uintptr(0)
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -134607,9 +134568,11 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
return parent
}
-func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
+type t__ccgo_fp__Xtdestroy_1 = func(*TLS, uintptr)
+
+func Xtdestroy(tls *TLS, root uintptr, __ccgo_fp_freekey uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v freekey=%v, (%v:)", tls, root, freekey, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_freekey=%v, (%v:)", tls, root, __ccgo_fp_freekey, origin(2))
}
var r uintptr
_ = r
@@ -134617,17 +134580,19 @@ func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
if r == uintptr(0) {
return
}
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 4)), freekey)
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 4 + 1*4)), freekey)
- if freekey != 0 {
- (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 4)), __ccgo_fp_freekey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 4 + 1*4)), __ccgo_fp_freekey)
+ if __ccgo_fp_freekey != 0 {
+ (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
}
Xfree(tls, r)
}
-func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtfind_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtfind(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var c int32
@@ -134641,7 +134606,7 @@ func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -134729,9 +134694,11 @@ func X__tsearch_balance(tls *TLS, p uintptr) (r int32) {
return _rot(tls, p, n, BoolInt32(h0 < h1))
}
-func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
+type t__ccgo_fp__Xtsearch_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtsearch(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r1 uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r1) }()
}
var a [48]uintptr
@@ -134751,7 +134718,7 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
return n
}
@@ -134787,26 +134754,28 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
return r
}
-func _walk(tls *TLS, r uintptr, action uintptr, d int32) {
+func _walk(tls *TLS, r uintptr, __ccgo_fp_action uintptr, d int32) {
if !(r != 0) {
return
}
if (*Tnode1)(unsafe.Pointer(r)).Fh == int32(1) {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_leaf), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_leaf), d)
} else {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_preorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 4)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_postorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 4 + 1*4)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_endorder), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_preorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 4)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_postorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 4 + 1*4)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_endorder), d)
}
}
-func Xtwalk(tls *TLS, root uintptr, action uintptr) {
+type t__ccgo_fp__Xtwalk_1 = func(*TLS, uintptr, int32, int32)
+
+func Xtwalk(tls *TLS, root uintptr, __ccgo_fp_action uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v action=%v, (%v:)", tls, root, action, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_action=%v, (%v:)", tls, root, __ccgo_fp_action, origin(2))
}
- _walk(tls, root, action, 0)
+ _walk(tls, root, __ccgo_fp_action, 0)
}
func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
@@ -134817,8 +134786,6 @@ func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
return X__syscall_ret(tls, Uint32FromInt32(___syscall_cp(tls, int32(SYS_poll), int32(fds), Int32FromUint32(n), timeout, 0, 0, 0)))
}
-type t__ucontext3 = Tucontext_t5
-
func Xppoll(tls *TLS, fds uintptr, n Tnfds_t, to uintptr, mask uintptr) (r1 int32) {
if __ccgo_strace {
trc("tls=%v fds=%v n=%v to=%v mask=%v, (%v:)", tls, fds, n, to, mask, origin(2))
@@ -134878,8 +134845,6 @@ func Xppoll(tls *TLS, fds uintptr, n Tnfds_t, to uintptr, mask uintptr) (r1 int3
return X__syscall_ret(tls, Uint32FromInt32(___syscall_cp(tls, int32(SYS_ppoll), int32(fds), Int32FromUint32(n), int32(v5), int32(mask), int32(Int32FromInt32(_NSIG)/Int32FromInt32(8)), 0)))
}
-type t__ucontext4 = Tucontext_t4
-
func Xpselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, ts uintptr, mask uintptr) (r1 int32) {
if __ccgo_strace {
trc("tls=%v n=%v rfds=%v wfds=%v efds=%v ts=%v mask=%v, (%v:)", tls, n, rfds, wfds, efds, ts, mask, origin(2))
@@ -134944,16 +134909,6 @@ func Xpselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, ts ui
return X__syscall_ret(tls, Uint32FromInt32(___syscall_cp(tls, int32(SYS_pselect6), n, int32(rfds), int32(wfds), int32(efds), int32(v5), int32(bp+24))))
}
-type Tucontext_t6 = struct {
- F__ccgo_align [0]uint32
- Fuc_flags uint32
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t
- Fuc_sigmask Tsigset_t
- Fuc_regspace [64]uint64
-}
-
func Xselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, tv uintptr) (r1 int32) {
if __ccgo_strace {
trc("tls=%v n=%v rfds=%v wfds=%v efds=%v tv=%v, (%v:)", tls, n, rfds, wfds, efds, tv, origin(2))
@@ -135429,16 +135384,6 @@ func Xsigaltstack(tls *TLS, ss uintptr, old uintptr) (r int32) {
const SST_SIZE = 8
-type Tucontext_t7 = struct {
- F__ccgo_align [0]uint32
- Fuc_flags uint32
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t1
- Fuc_sigmask Tsigset_t
- Fuc_regspace [64]uint64
-}
-
func Xsigandset(tls *TLS, dest uintptr, left uintptr, right uintptr) (r1 int32) {
if __ccgo_strace {
trc("tls=%v dest=%v left=%v right=%v, (%v:)", tls, dest, left, right, origin(2))
@@ -144607,9 +144552,11 @@ func Xatoll(tls *TLS, s uintptr) (r int64) {
return v5
}
-func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xbsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, key, base, nel, width, cmp, origin(2))
+ trc("tls=%v key=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, base, nel, width, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var sign int32
@@ -144617,7 +144564,7 @@ func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, c
_, _ = sign, try
for nel > uint32(0) {
try = base + uintptr(width*(nel/uint32(2)))
- sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, try)
+ sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, try)
if sign < 0 {
nel /= uint32(2)
} else {
@@ -144896,7 +144843,7 @@ func _shr(tls *TLS, p uintptr, n int32) {
*(*Tsize_t)(unsafe.Pointer(p + 1*4)) >>= Uint32FromInt32(n)
}
-func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
+func _sift(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
bp := tls.Alloc(240)
defer tls.Free(240)
var i, v1, v2 int32
@@ -144908,10 +144855,10 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
for pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*4)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[57]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[57]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[57]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[57]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
break
}
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, rt, arg) >= 0 {
v1 = i
i++
(*(*[57]uintptr)(unsafe.Pointer(bp)))[v1] = lf
@@ -144928,7 +144875,7 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
_cycle(tls, width, bp, i)
}
-func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
+func _trinkle(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
bp := tls.Alloc(240)
defer tls.Free(240)
var i, trail, v1 int32
@@ -144942,13 +144889,13 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
(*(*[57]uintptr)(unsafe.Pointer(bp + 8)))[0] = head
for (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[0] != uint32(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[int32(1)] != uint32(0) {
stepson = head - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift)*4)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, stepson, (*(*[57]uintptr)(unsafe.Pointer(bp + 8)))[0], arg) <= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, stepson, (*(*[57]uintptr)(unsafe.Pointer(bp + 8)))[0], arg) <= 0 {
break
}
if !(trusty != 0) && pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*4)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, stepson, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, stepson, arg) >= 0 {
break
}
}
@@ -144963,13 +144910,15 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
}
if !(trusty != 0) {
_cycle(tls, width, bp+8, i)
- _sift(tls, head, width, cmp, arg, pshift, lp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, lp)
}
}
-func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__X__qsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
bp := tls.Alloc(208)
defer tls.Free(208)
@@ -145007,14 +144956,14 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
}
for head < high {
if (*(*[2]Tsize_t)(unsafe.Pointer(bp + 192)))[0]&uint32(3) == uint32(3) {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
_shr(tls, bp+192, int32(2))
pshift += int32(2)
} else {
if (*(*[48]Tsize_t)(unsafe.Pointer(bp)))[pshift-int32(1)] >= Uint32FromInt32(int32(high)-int32(head)) {
- _trinkle(tls, head, width, cmp, arg, bp+192, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+192, pshift, 0, bp)
} else {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
}
if pshift == int32(1) {
_shl(tls, bp+192, int32(1))
@@ -145027,7 +144976,7 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
*(*Tsize_t)(unsafe.Pointer(bp + 192)) |= uint32(1)
head += uintptr(width)
}
- _trinkle(tls, head, width, cmp, arg, bp+192, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+192, pshift, 0, bp)
for pshift != int32(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 192)))[0] != uint32(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 192)))[int32(1)] != uint32(0) {
if pshift <= int32(1) {
trail = _pntz(tls, bp+192)
@@ -145038,31 +144987,35 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
pshift -= int32(2)
*(*Tsize_t)(unsafe.Pointer(bp + 192)) ^= uint32(7)
_shr(tls, bp+192, int32(1))
- _trinkle(tls, head-uintptr((*(*[48]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, cmp, arg, bp+192, pshift+int32(1), int32(1), bp)
+ _trinkle(tls, head-uintptr((*(*[48]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, __ccgo_fp_cmp, arg, bp+192, pshift+int32(1), int32(1), bp)
_shl(tls, bp+192, int32(1))
*(*Tsize_t)(unsafe.Pointer(bp + 192)) |= uint32(1)
- _trinkle(tls, head-uintptr(width), width, cmp, arg, bp+192, pshift, int32(1), bp)
+ _trinkle(tls, head-uintptr(width), width, __ccgo_fp_cmp, arg, bp+192, pshift, int32(1), bp)
}
head -= uintptr(width)
}
}
-func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__Xqsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
- X__qsort_r(tls, base, nel, width, cmp, arg)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp_cmp, arg)
}
func _wrapper_cmp(tls *TLS, v1 uintptr, v2 uintptr, cmp uintptr) (r int32) {
return (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, v1, v2)
}
-func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun) {
+type t__ccgo_fp__Xqsort_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, base, nel, width, cmp, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, origin(2))
}
- X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), cmp)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), __ccgo_fp_cmp)
}
func _strtox(tls *TLS, s uintptr, p uintptr, prec int32) (r float64) {
@@ -153422,8 +153375,6 @@ func X__utimes_time32(tls *TLS, path uintptr, times32 uintptr) (r int32) {
return Xutimes(tls, path, v1)
}
-type t__ucontext5 = Tucontext_t7
-
func X__wait3_time32(tls *TLS, status uintptr, options int32, usage uintptr) (r1 Tpid_t) {
if __ccgo_strace {
trc("tls=%v status=%v options=%v usage=%v, (%v:)", tls, status, options, usage, origin(2))
diff --git a/vendor/modernc.org/libc/ccgo_linux_arm64.go b/vendor/modernc.org/libc/ccgo_linux_arm64.go
index cd1f7cb31..aef720a69 100644
--- a/vendor/modernc.org/libc/ccgo_linux_arm64.go
+++ b/vendor/modernc.org/libc/ccgo_linux_arm64.go
@@ -24841,9 +24841,13 @@ func Xrewinddir(tls *TLS, dir uintptr) {
___unlock(tls, dir+20)
}
-func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r int32) {
+type t__ccgo_fp__Xscandir_2 = func(*TLS, uintptr) int32
+
+type t__ccgo_fp__Xscandir_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xscandir(tls *TLS, path uintptr, res uintptr, __ccgo_fp_sel uintptr, __ccgo_fp_cmp uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v res=%v sel=%v cmp=%v, (%v:)", tls, path, res, sel, cmp, origin(2))
+ trc("tls=%v path=%v res=%v __ccgo_fp_sel=%v __ccgo_fp_cmp=%v, (%v:)", tls, path, res, __ccgo_fp_sel, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var cnt, len1, v2, v3 Tsize_t
@@ -24865,7 +24869,7 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
if !(v1 != 0) {
break
}
- if sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{sel})))(tls, de) != 0) {
+ if __ccgo_fp_sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_sel})))(tls, de) != 0) {
continue
}
if cnt >= len1 {
@@ -24903,8 +24907,8 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
return -int32(1)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
- if cmp != 0 {
- Xqsort(tls, names, cnt, uint64(8), cmp)
+ if __ccgo_fp_cmp != 0 {
+ Xqsort(tls, names, cnt, uint64(8), __ccgo_fp_cmp)
}
*(*uintptr)(unsafe.Pointer(res)) = names
return Int32FromUint64(cnt)
@@ -25650,9 +25654,11 @@ func X__funcs_on_quick_exit(tls *TLS) {
}
}
-func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
+type t__ccgo_fp__Xat_quick_exit_0 = func(*TLS)
+
+func Xat_quick_exit(tls *TLS, __ccgo_fp_func uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v func1=%v, (%v:)", tls, func1, origin(2))
+ trc("tls=%v __ccgo_fp_func=%v, (%v:)", tls, __ccgo_fp_func, origin(2))
defer func() { trc("-> %v", r1) }()
}
var r, v1 int32
@@ -25664,7 +25670,7 @@ func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
} else {
v1 = _count
_count++
- _funcs[v1] = func1
+ _funcs[v1] = __ccgo_fp_func
}
___unlock(tls, uintptr(unsafe.Pointer(&_lock)))
return r
@@ -28116,15 +28122,17 @@ type TFTW = struct {
Flevel int32
}
-func Xftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32) (r int32) {
+type t__ccgo_fp__Xftw_1 = func(*TLS, uintptr, uintptr, int32) int32
+
+func Xftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v, (%v:)", tls, path, fn, fd_limit, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, origin(2))
defer func() { trc("-> %v", r) }()
}
/* The following cast assumes that calling a function with one
* argument more than it needs behaves as expected. This is
* actually undefined, but works on all real-world machines. */
- return Xnftw(tls, path, fn, fd_limit, int32(FTW_PHYS))
+ return Xnftw(tls, path, __ccgo_fp_fn, fd_limit, int32(FTW_PHYS))
}
const STATX_ALL = 4095
@@ -31028,14 +31036,6 @@ func Xfremovexattr(tls *TLS, fd int32, name uintptr) (r int32) {
return int32(X__syscall_ret(tls, Uint64FromInt64(X__syscall2(tls, int64(SYS_fremovexattr), int64(fd), int64(name)))))
}
-type Tucontext_t2 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_sigmask Tsigset_t
- Fuc_mcontext Tmcontext_t
-}
-
func _dummy4(tls *TLS, msg uintptr, lm uintptr) (r uintptr) {
return msg
}
@@ -113983,15 +113983,7 @@ func Xgethostid(tls *TLS) (r int64) {
const optpos = 0
-type Tucontext_t3 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_sigmask Tsigset_t
- Fuc_mcontext Tmcontext_t1
-}
-
-type t__ucontext1 = Tucontext_t3
+type t__ucontext1 = Tucontext_t1
func X__getopt_msg(tls *TLS, a uintptr, b uintptr, c uintptr, l Tsize_t) {
if __ccgo_strace {
@@ -115139,7 +115131,7 @@ type Thistory = struct {
Fbase int32
}
-func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
+func _do_nftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
bp := tls.Alloc(176)
defer tls.Free(176)
var d, de, v10 uintptr
@@ -115245,7 +115237,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
}
if v8 = !(flags&Int32FromInt32(FTW_DEPTH) != 0); v8 {
- v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+160)
+ v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+160)
r = v7
}
if v8 && v7 != 0 {
@@ -115286,7 +115278,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*uint8)(unsafe.Pointer(path + uintptr(j))) = uint8('/')
Xstrcpy(tls, path+uintptr(j)+uintptr(1), de+19)
- v11 = _do_nftw(tls, path, fn, fd_limit-int32(1), flags, bp+128)
+ v11 = _do_nftw(tls, path, __ccgo_fp_fn, fd_limit-int32(1), flags, bp+128)
r = v11
if v11 != 0 {
Xclosedir(tls, d)
@@ -115301,7 +115293,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*uint8)(unsafe.Pointer(path + uintptr(l))) = uint8(0)
if v13 = flags&int32(FTW_DEPTH) != 0; v13 {
- v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+160)
+ v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+160)
r = v12
}
if v13 && v12 != 0 {
@@ -115310,9 +115302,11 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
return 0
}
-func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1 int32) {
+type t__ccgo_fp__Xnftw_1 = func(*TLS, uintptr, uintptr, int32, uintptr) int32
+
+func Xnftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, fn, fd_limit, flags, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, flags, origin(2))
defer func() { trc("-> %v", r1) }()
}
bp := tls.Alloc(4112)
@@ -115332,7 +115326,7 @@ func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1
}
Xmemcpy(tls, bp+4, path, l+uint64(1))
_pthread_setcancelstate(tls, int32(PTHREAD_CANCEL_DISABLE), bp)
- r = _do_nftw(tls, bp+4, fn, fd_limit, flags, UintptrFromInt32(0))
+ r = _do_nftw(tls, bp+4, __ccgo_fp_fn, fd_limit, flags, UintptrFromInt32(0))
_pthread_setcancelstate(tls, *(*int32)(unsafe.Pointer(bp)), uintptr(0))
return r
}
@@ -116112,8 +116106,6 @@ type Tsockaddr_storage = struct {
F__ss_align uint64
}
-type t__ucontext2 = Tucontext_t2
-
var _lock2 [1]int32
var _log_ident [32]uint8
var _log_opt int32
@@ -116551,14 +116543,6 @@ const SA = 194
const SB = 244
const bittab = 0
-type Tucontext_t4 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_sigmask Tsigset_t
- Fuc_mcontext Tmcontext_t
-}
-
func Xbtowc(tls *TLS, c int32) (r Twint_t) {
if __ccgo_strace {
trc("tls=%v c=%v, (%v:)", tls, c, origin(2))
@@ -118278,13 +118262,6 @@ const _ns_uop_delete = 0
const _ns_uop_add = 1
const _ns_uop_max = 2
-type Tns_tsig_key1 = struct {
- Fname [1025]uint8
- Falg [1025]uint8
- Fdata uintptr
- Flen1 int32
-}
-
type Tns_tsig_key = struct {
Fname [1025]uint8
Falg [1025]uint8
@@ -118292,14 +118269,6 @@ type Tns_tsig_key = struct {
Flen1 int32
}
-type Tns_tcp_tsig_state1 = struct {
- Fcounter int32
- Fkey uintptr
- Fctx uintptr
- Fsig [512]uint8
- Fsiglen int32
-}
-
type Tns_tcp_tsig_state = struct {
Fcounter int32
Fkey uintptr
@@ -118949,9 +118918,11 @@ type Tresolvconf = struct {
Ftimeout uint32
}
-func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__dns_parse_2 = func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32
+
+func X__dns_parse(tls *TLS, r uintptr, rlen int32, __ccgo_fp_callback uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v r=%v rlen=%v callback=%v ctx=%v, (%v:)", tls, r, rlen, callback, ctx, origin(2))
+ trc("tls=%v r=%v rlen=%v __ccgo_fp_callback=%v ctx=%v, (%v:)", tls, r, rlen, __ccgo_fp_callback, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var ancount, len1, qdcount, v1, v2 int32
@@ -118997,7 +118968,7 @@ func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr
if int64(len1+int32(10)) > int64(r+uintptr(rlen))-int64(p) {
return -int32(1)
}
- if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
+ if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
return -int32(1)
}
p += uintptr(int32(10) + len1)
@@ -121101,18 +121072,6 @@ func Xherror(tls *TLS, msg uintptr) {
Xfprintf(tls, uintptr(unsafe.Pointer(&X__stderr_FILE)), __ccgo_ts+1072, VaList(bp+8, v1, v2, Xhstrerror(tls, *(*int32)(unsafe.Pointer(X__h_errno_location(tls))))))
}
-type Tcpu_set_t1 = struct {
- F__bits [16]uint64
-}
-
-type Tucontext_t5 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_sigmask Tsigset_t
- Fuc_mcontext Tmcontext_t1
-}
-
var _msgs1 = [84]uint8{'H', 'o', 's', 't', ' ', 'n', 'o', 't', ' ', 'f', 'o', 'u', 'n', 'd', 0, 'T', 'r', 'y', ' ', 'a', 'g', 'a', 'i', 'n', 0, 'N', 'o', 'n', '-', 'r', 'e', 'c', 'o', 'v', 'e', 'r', 'a', 'b', 'l', 'e', ' ', 'e', 'r', 'r', 'o', 'r', 0, 'A', 'd', 'd', 'r', 'e', 's', 's', ' ', 'n', 'o', 't', ' ', 'a', 'v', 'a', 'i', 'l', 'a', 'b', 'l', 'e', 0, 0, 'U', 'n', 'k', 'n', 'o', 'w', 'n', ' ', 'e', 'r', 'r', 'o', 'r'}
func Xhstrerror(tls *TLS, ecode int32) (r uintptr) {
@@ -122900,7 +122859,7 @@ func X__lookup_serv(tls *TLS, buf uintptr, name uintptr, proto int32, socktype i
return v22
}
-func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, cb uintptr, ctx uintptr) (r1 int32) {
+func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
bp := tls.Alloc(8192)
defer tls.Free(8192)
var h uintptr
@@ -122990,7 +122949,7 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
if Int32FromUint16((*Tnlmsghdr)(unsafe.Pointer(h)).Fnlmsg_type) == int32(NLMSG_ERROR) {
return -int32(1)
}
- ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cb})))(tls, ctx, h)
+ ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cb})))(tls, ctx, h)
if ret != 0 {
return ret
}
@@ -123003,9 +122962,11 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
return r1
}
-func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__rtnetlink_enumerate_2 = func(*TLS, uintptr, uintptr) int32
+
+func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v link_af=%v addr_af=%v cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, cb, ctx, origin(2))
+ trc("tls=%v link_af=%v addr_af=%v __ccgo_fp_cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, __ccgo_fp_cb, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var fd, r int32
@@ -123014,9 +122975,9 @@ func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr,
if fd < 0 {
return -int32(1)
}
- r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, __ccgo_fp_cb, ctx)
if !(r != 0) {
- r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, __ccgo_fp_cb, ctx)
}
X__syscall1(tls, int64(SYS_close), int64(fd))
return r
@@ -128123,7 +128084,7 @@ func _append(tls *TLS, tail uintptr, name uintptr, len1 Tsize_t, mark int32) (r
return 0
}
-func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, errfunc uintptr, tail uintptr) (r1 int32) {
+func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, tail uintptr) (r1 int32) {
bp := tls.Alloc(128)
defer tls.Free(128)
var de, dir, p, p2, v11, v2, v7, v8 uintptr
@@ -128232,7 +128193,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
}
if !(type1 != 0) && Xlstat(tls, buf, bp) != 0 {
- if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
return 0
@@ -128269,7 +128230,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
dir = Xopendir(tls, v7)
if !(dir != 0) {
- if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
+ if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
return int32(GLOB_ABORTED)
}
return 0
@@ -128321,7 +128282,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
} else {
v11 = __ccgo_ts
}
- r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, errfunc, tail)
+ r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, __ccgo_fp_errfunc, tail)
if r != 0 {
Xclosedir(tls, dir)
return r
@@ -128332,7 +128293,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
*(*uint8)(unsafe.Pointer(p2)) = saved_sep
}
Xclosedir(tls, dir)
- if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
@@ -128444,9 +128405,11 @@ func _expand_tilde(tls *TLS, pat uintptr, buf uintptr, pos uintptr) (r int32) {
return 0
}
-func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r int32) {
+type t__ccgo_fp__Xglob_2 = func(*TLS, uintptr, int32) int32
+
+func Xglob(tls *TLS, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, g uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v pat=%v flags=%v errfunc=%v g=%v, (%v:)", tls, pat, flags, errfunc, g, origin(2))
+ trc("tls=%v pat=%v flags=%v __ccgo_fp_errfunc=%v g=%v, (%v:)", tls, pat, flags, __ccgo_fp_errfunc, g, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(4128)
@@ -128472,8 +128435,8 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
}
offs = v1
error1 = 0
- if !(errfunc != 0) {
- errfunc = __ccgo_fp(_ignore_err)
+ if !(__ccgo_fp_errfunc != 0) {
+ __ccgo_fp_errfunc = __ccgo_fp(_ignore_err)
}
if !(flags&Int32FromInt32(GLOB_APPEND) != 0) {
(*Tglob_t)(unsafe.Pointer(g)).Fgl_offs = offs
@@ -128492,7 +128455,7 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
error1 = _expand_tilde(tls, bp+4120, bp+16, bp+4112)
}
if !(error1 != 0) {
- error1 = _do_glob(tls, bp+16, *(*Tsize_t)(unsafe.Pointer(bp + 4112)), 0, *(*uintptr)(unsafe.Pointer(bp + 4120)), flags, errfunc, bp+8)
+ error1 = _do_glob(tls, bp+16, *(*Tsize_t)(unsafe.Pointer(bp + 4112)), 0, *(*uintptr)(unsafe.Pointer(bp + 4120)), flags, __ccgo_fp_errfunc, bp+8)
}
Xfree(tls, p)
}
@@ -133716,9 +133679,11 @@ func Xremque(tls *TLS, element uintptr) {
}
}
-func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -133734,7 +133699,7 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -133746,9 +133711,11 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
return Xmemcpy(tls, p+uintptr(n)*uintptr(v1), key, width)
}
-func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlfind_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -133764,7 +133731,7 @@ func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, co
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -133783,9 +133750,11 @@ type Tnode1 = struct {
Fh int32
}
-func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtdelete_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtdelete(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var a [97]uintptr
@@ -133809,7 +133778,7 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
return uintptr(0)
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -133854,9 +133823,11 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
return parent
}
-func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
+type t__ccgo_fp__Xtdestroy_1 = func(*TLS, uintptr)
+
+func Xtdestroy(tls *TLS, root uintptr, __ccgo_fp_freekey uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v freekey=%v, (%v:)", tls, root, freekey, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_freekey=%v, (%v:)", tls, root, __ccgo_fp_freekey, origin(2))
}
var r uintptr
_ = r
@@ -133864,17 +133835,19 @@ func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
if r == uintptr(0) {
return
}
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8)), freekey)
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), freekey)
- if freekey != 0 {
- (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8)), __ccgo_fp_freekey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), __ccgo_fp_freekey)
+ if __ccgo_fp_freekey != 0 {
+ (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
}
Xfree(tls, r)
}
-func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtfind_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtfind(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var c int32
@@ -133888,7 +133861,7 @@ func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -133976,9 +133949,11 @@ func X__tsearch_balance(tls *TLS, p uintptr) (r int32) {
return _rot(tls, p, n, BoolInt32(h0 < h1))
}
-func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
+type t__ccgo_fp__Xtsearch_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtsearch(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r1 uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r1) }()
}
var a [96]uintptr
@@ -133998,7 +133973,7 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
return n
}
@@ -134034,26 +134009,28 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
return r
}
-func _walk(tls *TLS, r uintptr, action uintptr, d int32) {
+func _walk(tls *TLS, r uintptr, __ccgo_fp_action uintptr, d int32) {
if !(r != 0) {
return
}
if (*Tnode1)(unsafe.Pointer(r)).Fh == int32(1) {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_leaf), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_leaf), d)
} else {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_preorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_postorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_endorder), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_preorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_postorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_endorder), d)
}
}
-func Xtwalk(tls *TLS, root uintptr, action uintptr) {
+type t__ccgo_fp__Xtwalk_1 = func(*TLS, uintptr, int32, int32)
+
+func Xtwalk(tls *TLS, root uintptr, __ccgo_fp_action uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v action=%v, (%v:)", tls, root, action, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_action=%v, (%v:)", tls, root, __ccgo_fp_action, origin(2))
}
- _walk(tls, root, action, 0)
+ _walk(tls, root, __ccgo_fp_action, 0)
}
func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
@@ -134077,8 +134054,6 @@ func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
return int32(X__syscall_ret(tls, Uint64FromInt64(___syscall_cp(tls, int64(SYS_ppoll), int64(fds), Int64FromUint64(n), int64(v1), int64(Int32FromInt32(0)), int64(Int32FromInt32(_NSIG)/Int32FromInt32(8)), 0))))
}
-type t__ucontext3 = Tucontext_t5
-
func Xppoll(tls *TLS, fds uintptr, n Tnfds_t, to uintptr, mask uintptr) (r int32) {
if __ccgo_strace {
trc("tls=%v fds=%v n=%v to=%v mask=%v, (%v:)", tls, fds, n, to, mask, origin(2))
@@ -134114,8 +134089,6 @@ func Xppoll(tls *TLS, fds uintptr, n Tnfds_t, to uintptr, mask uintptr) (r int32
return int32(X__syscall_ret(tls, Uint64FromInt64(___syscall_cp(tls, int64(SYS_ppoll), int64(fds), Int64FromUint64(n), int64(v3), int64(mask), int64(Int32FromInt32(_NSIG)/Int32FromInt32(8)), 0))))
}
-type t__ucontext4 = Tucontext_t4
-
func Xpselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, ts uintptr, mask uintptr) (r int32) {
if __ccgo_strace {
trc("tls=%v n=%v rfds=%v wfds=%v efds=%v ts=%v mask=%v, (%v:)", tls, n, rfds, wfds, efds, ts, mask, origin(2))
@@ -134156,14 +134129,6 @@ func Xpselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, ts ui
return int32(X__syscall_ret(tls, Uint64FromInt64(___syscall_cp(tls, int64(SYS_pselect6), int64(n), int64(rfds), int64(wfds), int64(efds), int64(v3), int64(bp+16)))))
}
-type Tucontext_t6 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_sigmask Tsigset_t
- Fuc_mcontext Tmcontext_t
-}
-
func Xselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, tv uintptr) (r int32) {
if __ccgo_strace {
trc("tls=%v n=%v rfds=%v wfds=%v efds=%v tv=%v, (%v:)", tls, n, rfds, wfds, efds, tv, origin(2))
@@ -134659,14 +134624,6 @@ func Xsigaltstack(tls *TLS, ss uintptr, old uintptr) (r int32) {
const SST_SIZE = 8
-type Tucontext_t7 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_sigmask Tsigset_t
- Fuc_mcontext Tmcontext_t1
-}
-
func Xsigandset(tls *TLS, dest uintptr, left uintptr, right uintptr) (r1 int32) {
if __ccgo_strace {
trc("tls=%v dest=%v left=%v right=%v, (%v:)", tls, dest, left, right, origin(2))
@@ -143903,9 +143860,11 @@ func Xatoll(tls *TLS, s uintptr) (r int64) {
return v5
}
-func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xbsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, key, base, nel, width, cmp, origin(2))
+ trc("tls=%v key=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, base, nel, width, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var sign int32
@@ -143913,7 +143872,7 @@ func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, c
_, _ = sign, try
for nel > uint64(0) {
try = base + uintptr(width*(nel/uint64(2)))
- sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, try)
+ sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, try)
if sign < 0 {
nel /= uint64(2)
} else {
@@ -144174,7 +144133,7 @@ func _shr(tls *TLS, p uintptr, n int32) {
*(*Tsize_t)(unsafe.Pointer(p + 1*8)) >>= Uint64FromInt32(n)
}
-func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
+func _sift(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
bp := tls.Alloc(912)
defer tls.Free(912)
var i, v1, v2 int32
@@ -144186,10 +144145,10 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
for pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
break
}
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, rt, arg) >= 0 {
v1 = i
i++
(*(*[113]uintptr)(unsafe.Pointer(bp)))[v1] = lf
@@ -144206,7 +144165,7 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
_cycle(tls, width, bp, i)
}
-func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
+func _trinkle(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
bp := tls.Alloc(928)
defer tls.Free(928)
var i, trail, v1 int32
@@ -144220,13 +144179,13 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
(*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0] = head
for (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[0] != uint64(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[int32(1)] != uint64(0) {
stepson = head - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift)*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, stepson, (*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0], arg) <= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, stepson, (*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0], arg) <= 0 {
break
}
if !(trusty != 0) && pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, stepson, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, stepson, arg) >= 0 {
break
}
}
@@ -144241,13 +144200,15 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
}
if !(trusty != 0) {
_cycle(tls, width, bp+16, i)
- _sift(tls, head, width, cmp, arg, pshift, lp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, lp)
}
}
-func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__X__qsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
bp := tls.Alloc(784)
defer tls.Free(784)
@@ -144285,14 +144246,14 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
}
for head < high {
if (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[0]&uint64(3) == uint64(3) {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
_shr(tls, bp+768, int32(2))
pshift += int32(2)
} else {
if (*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift-int32(1)] >= Uint64FromInt64(int64(high)-int64(head)) {
- _trinkle(tls, head, width, cmp, arg, bp+768, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+768, pshift, 0, bp)
} else {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
}
if pshift == int32(1) {
_shl(tls, bp+768, int32(1))
@@ -144305,7 +144266,7 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
*(*Tsize_t)(unsafe.Pointer(bp + 768)) |= uint64(1)
head += uintptr(width)
}
- _trinkle(tls, head, width, cmp, arg, bp+768, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+768, pshift, 0, bp)
for pshift != int32(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[0] != uint64(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[int32(1)] != uint64(0) {
if pshift <= int32(1) {
trail = _pntz(tls, bp+768)
@@ -144316,31 +144277,35 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
pshift -= int32(2)
*(*Tsize_t)(unsafe.Pointer(bp + 768)) ^= uint64(7)
_shr(tls, bp+768, int32(1))
- _trinkle(tls, head-uintptr((*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, cmp, arg, bp+768, pshift+int32(1), int32(1), bp)
+ _trinkle(tls, head-uintptr((*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, __ccgo_fp_cmp, arg, bp+768, pshift+int32(1), int32(1), bp)
_shl(tls, bp+768, int32(1))
*(*Tsize_t)(unsafe.Pointer(bp + 768)) |= uint64(1)
- _trinkle(tls, head-uintptr(width), width, cmp, arg, bp+768, pshift, int32(1), bp)
+ _trinkle(tls, head-uintptr(width), width, __ccgo_fp_cmp, arg, bp+768, pshift, int32(1), bp)
}
head -= uintptr(width)
}
}
-func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__Xqsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
- X__qsort_r(tls, base, nel, width, cmp, arg)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp_cmp, arg)
}
func _wrapper_cmp(tls *TLS, v1 uintptr, v2 uintptr, cmp uintptr) (r int32) {
return (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, v1, v2)
}
-func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun) {
+type t__ccgo_fp__Xqsort_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, base, nel, width, cmp, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, origin(2))
}
- X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), cmp)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), __ccgo_fp_cmp)
}
func _strtox(tls *TLS, s uintptr, p uintptr, prec int32) (r float64) {
diff --git a/vendor/modernc.org/libc/ccgo_linux_loong64.go b/vendor/modernc.org/libc/ccgo_linux_loong64.go
index 48b0b26f8..73ee111eb 100644
--- a/vendor/modernc.org/libc/ccgo_linux_loong64.go
+++ b/vendor/modernc.org/libc/ccgo_linux_loong64.go
@@ -24905,9 +24905,13 @@ func Xrewinddir(tls *TLS, dir uintptr) {
___unlock(tls, dir+20)
}
-func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r int32) {
+type t__ccgo_fp__Xscandir_2 = func(*TLS, uintptr) int32
+
+type t__ccgo_fp__Xscandir_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xscandir(tls *TLS, path uintptr, res uintptr, __ccgo_fp_sel uintptr, __ccgo_fp_cmp uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v res=%v sel=%v cmp=%v, (%v:)", tls, path, res, sel, cmp, origin(2))
+ trc("tls=%v path=%v res=%v __ccgo_fp_sel=%v __ccgo_fp_cmp=%v, (%v:)", tls, path, res, __ccgo_fp_sel, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var cnt, len1, v2, v3 Tsize_t
@@ -24929,7 +24933,7 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
if !(v1 != 0) {
break
}
- if sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{sel})))(tls, de) != 0) {
+ if __ccgo_fp_sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_sel})))(tls, de) != 0) {
continue
}
if cnt >= len1 {
@@ -24967,8 +24971,8 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
return -int32(1)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
- if cmp != 0 {
- Xqsort(tls, names, cnt, uint64(8), cmp)
+ if __ccgo_fp_cmp != 0 {
+ Xqsort(tls, names, cnt, uint64(8), __ccgo_fp_cmp)
}
*(*uintptr)(unsafe.Pointer(res)) = names
return Int32FromUint64(cnt)
@@ -25714,9 +25718,11 @@ func X__funcs_on_quick_exit(tls *TLS) {
}
}
-func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
+type t__ccgo_fp__Xat_quick_exit_0 = func(*TLS)
+
+func Xat_quick_exit(tls *TLS, __ccgo_fp_func uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v func1=%v, (%v:)", tls, func1, origin(2))
+ trc("tls=%v __ccgo_fp_func=%v, (%v:)", tls, __ccgo_fp_func, origin(2))
defer func() { trc("-> %v", r1) }()
}
var r, v1 int32
@@ -25728,7 +25734,7 @@ func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
} else {
v1 = _count
_count++
- _funcs[v1] = func1
+ _funcs[v1] = __ccgo_fp_func
}
___unlock(tls, uintptr(unsafe.Pointer(&_lock)))
return r
@@ -28104,15 +28110,17 @@ type TFTW = struct {
Flevel int32
}
-func Xftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32) (r int32) {
+type t__ccgo_fp__Xftw_1 = func(*TLS, uintptr, uintptr, int32) int32
+
+func Xftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v, (%v:)", tls, path, fn, fd_limit, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, origin(2))
defer func() { trc("-> %v", r) }()
}
/* The following cast assumes that calling a function with one
* argument more than it needs behaves as expected. This is
* actually undefined, but works on all real-world machines. */
- return Xnftw(tls, path, fn, fd_limit, int32(FTW_PHYS))
+ return Xnftw(tls, path, __ccgo_fp_fn, fd_limit, int32(FTW_PHYS))
}
const STATX_ALL = 4095
@@ -115115,7 +115123,7 @@ type Thistory = struct {
Fbase int32
}
-func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
+func _do_nftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
bp := tls.Alloc(176)
defer tls.Free(176)
var d, de, v10 uintptr
@@ -115221,7 +115229,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
}
if v8 = !(flags&Int32FromInt32(FTW_DEPTH) != 0); v8 {
- v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+160)
+ v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+160)
r = v7
}
if v8 && v7 != 0 {
@@ -115262,7 +115270,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*int8)(unsafe.Pointer(path + uintptr(j))) = int8('/')
Xstrcpy(tls, path+uintptr(j)+uintptr(1), de+19)
- v11 = _do_nftw(tls, path, fn, fd_limit-int32(1), flags, bp+128)
+ v11 = _do_nftw(tls, path, __ccgo_fp_fn, fd_limit-int32(1), flags, bp+128)
r = v11
if v11 != 0 {
Xclosedir(tls, d)
@@ -115277,7 +115285,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*int8)(unsafe.Pointer(path + uintptr(l))) = 0
if v13 = flags&int32(FTW_DEPTH) != 0; v13 {
- v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+160)
+ v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+160)
r = v12
}
if v13 && v12 != 0 {
@@ -115286,9 +115294,11 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
return 0
}
-func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1 int32) {
+type t__ccgo_fp__Xnftw_1 = func(*TLS, uintptr, uintptr, int32, uintptr) int32
+
+func Xnftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, fn, fd_limit, flags, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, flags, origin(2))
defer func() { trc("-> %v", r1) }()
}
bp := tls.Alloc(4112)
@@ -115308,7 +115318,7 @@ func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1
}
Xmemcpy(tls, bp+4, path, l+uint64(1))
_pthread_setcancelstate(tls, int32(PTHREAD_CANCEL_DISABLE), bp)
- r = _do_nftw(tls, bp+4, fn, fd_limit, flags, UintptrFromInt32(0))
+ r = _do_nftw(tls, bp+4, __ccgo_fp_fn, fd_limit, flags, UintptrFromInt32(0))
_pthread_setcancelstate(tls, *(*int32)(unsafe.Pointer(bp)), uintptr(0))
return r
}
@@ -118204,13 +118214,6 @@ const _ns_uop_delete = 0
const _ns_uop_add = 1
const _ns_uop_max = 2
-type Tns_tsig_key1 = struct {
- Fname [1025]int8
- Falg [1025]int8
- Fdata uintptr
- Flen1 int32
-}
-
type Tns_tsig_key = struct {
Fname [1025]int8
Falg [1025]int8
@@ -118218,14 +118221,6 @@ type Tns_tsig_key = struct {
Flen1 int32
}
-type Tns_tcp_tsig_state1 = struct {
- Fcounter int32
- Fkey uintptr
- Fctx uintptr
- Fsig [512]uint8
- Fsiglen int32
-}
-
type Tns_tcp_tsig_state = struct {
Fcounter int32
Fkey uintptr
@@ -118875,9 +118870,11 @@ type Tresolvconf = struct {
Ftimeout uint32
}
-func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__dns_parse_2 = func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32
+
+func X__dns_parse(tls *TLS, r uintptr, rlen int32, __ccgo_fp_callback uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v r=%v rlen=%v callback=%v ctx=%v, (%v:)", tls, r, rlen, callback, ctx, origin(2))
+ trc("tls=%v r=%v rlen=%v __ccgo_fp_callback=%v ctx=%v, (%v:)", tls, r, rlen, __ccgo_fp_callback, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var ancount, len1, qdcount, v1, v2 int32
@@ -118923,7 +118920,7 @@ func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr
if int64(len1+int32(10)) > int64(r+uintptr(rlen))-int64(p) {
return -int32(1)
}
- if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
+ if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
return -int32(1)
}
p += uintptr(int32(10) + len1)
@@ -121027,10 +121024,6 @@ func Xherror(tls *TLS, msg uintptr) {
Xfprintf(tls, uintptr(unsafe.Pointer(&X__stderr_FILE)), __ccgo_ts+1062, VaList(bp+8, v1, v2, Xhstrerror(tls, *(*int32)(unsafe.Pointer(X__h_errno_location(tls))))))
}
-type Tcpu_set_t1 = struct {
- F__bits [16]uint64
-}
-
var _msgs1 = [84]int8{'H', 'o', 's', 't', ' ', 'n', 'o', 't', ' ', 'f', 'o', 'u', 'n', 'd', 0, 'T', 'r', 'y', ' ', 'a', 'g', 'a', 'i', 'n', 0, 'N', 'o', 'n', '-', 'r', 'e', 'c', 'o', 'v', 'e', 'r', 'a', 'b', 'l', 'e', ' ', 'e', 'r', 'r', 'o', 'r', 0, 'A', 'd', 'd', 'r', 'e', 's', 's', ' ', 'n', 'o', 't', ' ', 'a', 'v', 'a', 'i', 'l', 'a', 'b', 'l', 'e', 0, 0, 'U', 'n', 'k', 'n', 'o', 'w', 'n', ' ', 'e', 'r', 'r', 'o', 'r'}
func Xhstrerror(tls *TLS, ecode int32) (r uintptr) {
@@ -122818,7 +122811,7 @@ func X__lookup_serv(tls *TLS, buf uintptr, name uintptr, proto int32, socktype i
return v22
}
-func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, cb uintptr, ctx uintptr) (r1 int32) {
+func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
bp := tls.Alloc(8192)
defer tls.Free(8192)
var h uintptr
@@ -122908,7 +122901,7 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
if Int32FromUint16((*Tnlmsghdr)(unsafe.Pointer(h)).Fnlmsg_type) == int32(NLMSG_ERROR) {
return -int32(1)
}
- ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cb})))(tls, ctx, h)
+ ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cb})))(tls, ctx, h)
if ret != 0 {
return ret
}
@@ -122921,9 +122914,11 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
return r1
}
-func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__rtnetlink_enumerate_2 = func(*TLS, uintptr, uintptr) int32
+
+func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v link_af=%v addr_af=%v cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, cb, ctx, origin(2))
+ trc("tls=%v link_af=%v addr_af=%v __ccgo_fp_cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, __ccgo_fp_cb, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var fd, r int32
@@ -122932,9 +122927,9 @@ func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr,
if fd < 0 {
return -int32(1)
}
- r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, __ccgo_fp_cb, ctx)
if !(r != 0) {
- r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, __ccgo_fp_cb, ctx)
}
X__syscall1(tls, int64(SYS_close), int64(fd))
return r
@@ -128041,7 +128036,7 @@ func _append(tls *TLS, tail uintptr, name uintptr, len1 Tsize_t, mark int32) (r
return 0
}
-func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, errfunc uintptr, tail uintptr) (r1 int32) {
+func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, tail uintptr) (r1 int32) {
bp := tls.Alloc(128)
defer tls.Free(128)
var de, dir, p, p2, v11, v2, v7, v8 uintptr
@@ -128150,7 +128145,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
}
if !(type1 != 0) && Xlstat(tls, buf, bp) != 0 {
- if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
return 0
@@ -128187,7 +128182,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
dir = Xopendir(tls, v7)
if !(dir != 0) {
- if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
+ if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
return int32(GLOB_ABORTED)
}
return 0
@@ -128239,7 +128234,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
} else {
v11 = __ccgo_ts
}
- r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, errfunc, tail)
+ r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, __ccgo_fp_errfunc, tail)
if r != 0 {
Xclosedir(tls, dir)
return r
@@ -128250,7 +128245,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
*(*int8)(unsafe.Pointer(p2)) = saved_sep
}
Xclosedir(tls, dir)
- if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
@@ -128362,9 +128357,11 @@ func _expand_tilde(tls *TLS, pat uintptr, buf uintptr, pos uintptr) (r int32) {
return 0
}
-func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r int32) {
+type t__ccgo_fp__Xglob_2 = func(*TLS, uintptr, int32) int32
+
+func Xglob(tls *TLS, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, g uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v pat=%v flags=%v errfunc=%v g=%v, (%v:)", tls, pat, flags, errfunc, g, origin(2))
+ trc("tls=%v pat=%v flags=%v __ccgo_fp_errfunc=%v g=%v, (%v:)", tls, pat, flags, __ccgo_fp_errfunc, g, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(4128)
@@ -128390,8 +128387,8 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
}
offs = v1
error1 = 0
- if !(errfunc != 0) {
- errfunc = __ccgo_fp(_ignore_err)
+ if !(__ccgo_fp_errfunc != 0) {
+ __ccgo_fp_errfunc = __ccgo_fp(_ignore_err)
}
if !(flags&Int32FromInt32(GLOB_APPEND) != 0) {
(*Tglob_t)(unsafe.Pointer(g)).Fgl_offs = offs
@@ -128410,7 +128407,7 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
error1 = _expand_tilde(tls, bp+4120, bp+16, bp+4112)
}
if !(error1 != 0) {
- error1 = _do_glob(tls, bp+16, *(*Tsize_t)(unsafe.Pointer(bp + 4112)), 0, *(*uintptr)(unsafe.Pointer(bp + 4120)), flags, errfunc, bp+8)
+ error1 = _do_glob(tls, bp+16, *(*Tsize_t)(unsafe.Pointer(bp + 4112)), 0, *(*uintptr)(unsafe.Pointer(bp + 4120)), flags, __ccgo_fp_errfunc, bp+8)
}
Xfree(tls, p)
}
@@ -133634,9 +133631,11 @@ func Xremque(tls *TLS, element uintptr) {
}
}
-func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -133652,7 +133651,7 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -133664,9 +133663,11 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
return Xmemcpy(tls, p+uintptr(n)*uintptr(v1), key, width)
}
-func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlfind_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -133682,7 +133683,7 @@ func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, co
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -133701,9 +133702,11 @@ type Tnode1 = struct {
Fh int32
}
-func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtdelete_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtdelete(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var a [97]uintptr
@@ -133727,7 +133730,7 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
return uintptr(0)
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -133772,9 +133775,11 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
return parent
}
-func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
+type t__ccgo_fp__Xtdestroy_1 = func(*TLS, uintptr)
+
+func Xtdestroy(tls *TLS, root uintptr, __ccgo_fp_freekey uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v freekey=%v, (%v:)", tls, root, freekey, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_freekey=%v, (%v:)", tls, root, __ccgo_fp_freekey, origin(2))
}
var r uintptr
_ = r
@@ -133782,17 +133787,19 @@ func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
if r == uintptr(0) {
return
}
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8)), freekey)
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), freekey)
- if freekey != 0 {
- (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8)), __ccgo_fp_freekey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), __ccgo_fp_freekey)
+ if __ccgo_fp_freekey != 0 {
+ (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
}
Xfree(tls, r)
}
-func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtfind_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtfind(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var c int32
@@ -133806,7 +133813,7 @@ func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -133894,9 +133901,11 @@ func X__tsearch_balance(tls *TLS, p uintptr) (r int32) {
return _rot(tls, p, n, BoolInt32(h0 < h1))
}
-func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
+type t__ccgo_fp__Xtsearch_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtsearch(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r1 uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r1) }()
}
var a [96]uintptr
@@ -133916,7 +133925,7 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
return n
}
@@ -133952,26 +133961,28 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
return r
}
-func _walk(tls *TLS, r uintptr, action uintptr, d int32) {
+func _walk(tls *TLS, r uintptr, __ccgo_fp_action uintptr, d int32) {
if !(r != 0) {
return
}
if (*Tnode1)(unsafe.Pointer(r)).Fh == int32(1) {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_leaf), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_leaf), d)
} else {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_preorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_postorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_endorder), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_preorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_postorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_endorder), d)
}
}
-func Xtwalk(tls *TLS, root uintptr, action uintptr) {
+type t__ccgo_fp__Xtwalk_1 = func(*TLS, uintptr, int32, int32)
+
+func Xtwalk(tls *TLS, root uintptr, __ccgo_fp_action uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v action=%v, (%v:)", tls, root, action, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_action=%v, (%v:)", tls, root, __ccgo_fp_action, origin(2))
}
- _walk(tls, root, action, 0)
+ _walk(tls, root, __ccgo_fp_action, 0)
}
func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
@@ -143887,9 +143898,11 @@ func Xatoll(tls *TLS, s uintptr) (r int64) {
return v5
}
-func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xbsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, key, base, nel, width, cmp, origin(2))
+ trc("tls=%v key=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, base, nel, width, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var sign int32
@@ -143897,7 +143910,7 @@ func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, c
_, _ = sign, try
for nel > uint64(0) {
try = base + uintptr(width*(nel/uint64(2)))
- sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, try)
+ sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, try)
if sign < 0 {
nel /= uint64(2)
} else {
@@ -144167,7 +144180,7 @@ func _shr(tls *TLS, p uintptr, n int32) {
*(*Tsize_t)(unsafe.Pointer(p + 1*8)) >>= Uint64FromInt32(n)
}
-func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
+func _sift(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
bp := tls.Alloc(912)
defer tls.Free(912)
var i, v1, v2 int32
@@ -144179,10 +144192,10 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
for pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
break
}
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, rt, arg) >= 0 {
v1 = i
i++
(*(*[113]uintptr)(unsafe.Pointer(bp)))[v1] = lf
@@ -144199,7 +144212,7 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
_cycle(tls, width, bp, i)
}
-func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
+func _trinkle(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
bp := tls.Alloc(928)
defer tls.Free(928)
var i, trail, v1 int32
@@ -144213,13 +144226,13 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
(*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0] = head
for (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[0] != uint64(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[int32(1)] != uint64(0) {
stepson = head - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift)*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, stepson, (*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0], arg) <= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, stepson, (*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0], arg) <= 0 {
break
}
if !(trusty != 0) && pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, stepson, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, stepson, arg) >= 0 {
break
}
}
@@ -144234,13 +144247,15 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
}
if !(trusty != 0) {
_cycle(tls, width, bp+16, i)
- _sift(tls, head, width, cmp, arg, pshift, lp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, lp)
}
}
-func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__X__qsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
bp := tls.Alloc(784)
defer tls.Free(784)
@@ -144278,14 +144293,14 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
}
for head < high {
if (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[0]&uint64(3) == uint64(3) {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
_shr(tls, bp+768, int32(2))
pshift += int32(2)
} else {
if (*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift-int32(1)] >= Uint64FromInt64(int64(high)-int64(head)) {
- _trinkle(tls, head, width, cmp, arg, bp+768, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+768, pshift, 0, bp)
} else {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
}
if pshift == int32(1) {
_shl(tls, bp+768, int32(1))
@@ -144298,7 +144313,7 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
*(*Tsize_t)(unsafe.Pointer(bp + 768)) |= uint64(1)
head += uintptr(width)
}
- _trinkle(tls, head, width, cmp, arg, bp+768, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+768, pshift, 0, bp)
for pshift != int32(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[0] != uint64(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[int32(1)] != uint64(0) {
if pshift <= int32(1) {
trail = _pntz(tls, bp+768)
@@ -144309,31 +144324,35 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
pshift -= int32(2)
*(*Tsize_t)(unsafe.Pointer(bp + 768)) ^= uint64(7)
_shr(tls, bp+768, int32(1))
- _trinkle(tls, head-uintptr((*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, cmp, arg, bp+768, pshift+int32(1), int32(1), bp)
+ _trinkle(tls, head-uintptr((*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, __ccgo_fp_cmp, arg, bp+768, pshift+int32(1), int32(1), bp)
_shl(tls, bp+768, int32(1))
*(*Tsize_t)(unsafe.Pointer(bp + 768)) |= uint64(1)
- _trinkle(tls, head-uintptr(width), width, cmp, arg, bp+768, pshift, int32(1), bp)
+ _trinkle(tls, head-uintptr(width), width, __ccgo_fp_cmp, arg, bp+768, pshift, int32(1), bp)
}
head -= uintptr(width)
}
}
-func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__Xqsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
- X__qsort_r(tls, base, nel, width, cmp, arg)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp_cmp, arg)
}
func _wrapper_cmp(tls *TLS, v1 uintptr, v2 uintptr, cmp uintptr) (r int32) {
return (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, v1, v2)
}
-func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun) {
+type t__ccgo_fp__Xqsort_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, base, nel, width, cmp, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, origin(2))
}
- X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), cmp)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), __ccgo_fp_cmp)
}
func _strtox(tls *TLS, s uintptr, p uintptr, prec int32) (r float64) {
diff --git a/vendor/modernc.org/libc/ccgo_linux_ppc64le.go b/vendor/modernc.org/libc/ccgo_linux_ppc64le.go
index 9244f4445..594b075ab 100644
--- a/vendor/modernc.org/libc/ccgo_linux_ppc64le.go
+++ b/vendor/modernc.org/libc/ccgo_linux_ppc64le.go
@@ -178,6 +178,7 @@ const __DBL_DIG__ = 15
const __DBL_HAS_DENORM__ = 1
const __DBL_HAS_INFINITY__ = 1
const __DBL_HAS_QUIET_NAN__ = 1
+const __DBL_IS_IEC_60559__ = 1
const __DBL_MANT_DIG__ = 53
const __DBL_MAX_10_EXP__ = 308
const __DBL_MAX_EXP__ = 1024
@@ -218,6 +219,7 @@ const __FLT128_EPSILON__ = 0
const __FLT128_HAS_DENORM__ = 1
const __FLT128_HAS_INFINITY__ = 1
const __FLT128_HAS_QUIET_NAN__ = 1
+const __FLT128_IS_IEC_60559__ = 1
const __FLT128_MANT_DIG__ = 113
const __FLT128_MAX_10_EXP__ = 4932
const __FLT128_MAX_EXP__ = 16384
@@ -233,6 +235,7 @@ const __FLT32X_EPSILON__ = 0
const __FLT32X_HAS_DENORM__ = 1
const __FLT32X_HAS_INFINITY__ = 1
const __FLT32X_HAS_QUIET_NAN__ = 1
+const __FLT32X_IS_IEC_60559__ = 1
const __FLT32X_MANT_DIG__ = 53
const __FLT32X_MAX_10_EXP__ = 308
const __FLT32X_MAX_EXP__ = 1024
@@ -248,6 +251,7 @@ const __FLT32_EPSILON__ = 0
const __FLT32_HAS_DENORM__ = 1
const __FLT32_HAS_INFINITY__ = 1
const __FLT32_HAS_QUIET_NAN__ = 1
+const __FLT32_IS_IEC_60559__ = 1
const __FLT32_MANT_DIG__ = 24
const __FLT32_MAX_10_EXP__ = 38
const __FLT32_MAX_EXP__ = 128
@@ -263,6 +267,7 @@ const __FLT64X_EPSILON__ = 0
const __FLT64X_HAS_DENORM__ = 1
const __FLT64X_HAS_INFINITY__ = 1
const __FLT64X_HAS_QUIET_NAN__ = 1
+const __FLT64X_IS_IEC_60559__ = 1
const __FLT64X_MANT_DIG__ = 113
const __FLT64X_MAX_10_EXP__ = 4932
const __FLT64X_MAX_EXP__ = 16384
@@ -278,6 +283,7 @@ const __FLT64_EPSILON__ = 0
const __FLT64_HAS_DENORM__ = 1
const __FLT64_HAS_INFINITY__ = 1
const __FLT64_HAS_QUIET_NAN__ = 1
+const __FLT64_IS_IEC_60559__ = 1
const __FLT64_MANT_DIG__ = 53
const __FLT64_MAX_10_EXP__ = 308
const __FLT64_MAX_EXP__ = 1024
@@ -295,6 +301,7 @@ const __FLT_EVAL_METHOD__ = 0
const __FLT_HAS_DENORM__ = 1
const __FLT_HAS_INFINITY__ = 1
const __FLT_HAS_QUIET_NAN__ = 1
+const __FLT_IS_IEC_60559__ = 1
const __FLT_MANT_DIG__ = 24
const __FLT_MAX_10_EXP__ = 38
const __FLT_MAX_EXP__ = 128
@@ -330,14 +337,15 @@ const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 = 1
const __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 = 1
const __GCC_IEC_559 = 2
const __GCC_IEC_559_COMPLEX = 2
+const __GNUC_EXECUTION_CHARSET_NAME = "UTF-8"
const __GNUC_MINOR__ = 2
-const __GNUC_PATCHLEVEL__ = 1
+const __GNUC_PATCHLEVEL__ = 0
const __GNUC_STDC_INLINE__ = 1
-const __GNUC__ = 10
-const __GXX_ABI_VERSION = 1014
+const __GNUC_WIDE_EXECUTION_CHARSET_NAME = "UTF-32LE"
+const __GNUC__ = 14
+const __GXX_ABI_VERSION = 1019
const __HAVE_BSWAP__ = 1
const __HAVE_SPECULATION_SAFE_VALUE = 1
-const __HTM__ = 1
const __INT16_MAX__ = 32767
const __INT32_MAX__ = 2147483647
const __INT32_TYPE__ = 0
@@ -373,6 +381,7 @@ const __LDBL_EPSILON__ = 0
const __LDBL_HAS_DENORM__ = 1
const __LDBL_HAS_INFINITY__ = 1
const __LDBL_HAS_QUIET_NAN__ = 1
+const __LDBL_IS_IEC_60559__ = 1
const __LDBL_MANT_DIG__ = 53
const __LDBL_MAX_10_EXP__ = 308
const __LDBL_MAX_EXP__ = 1024
@@ -411,6 +420,7 @@ const __RSQRTEF__ = 1
const __RSQRTE__ = 1
const __SCHAR_MAX__ = 127
const __SCHAR_WIDTH__ = 8
+const __SET_FPSCR_RN_RETURNS_FPSCR__ = 1
const __SHRT_MAX__ = 32767
const __SHRT_WIDTH__ = 16
const __SIG_ATOMIC_MAX__ = 2147483647
@@ -418,7 +428,9 @@ const __SIG_ATOMIC_MIN__ = -2147483648
const __SIG_ATOMIC_TYPE__ = 0
const __SIG_ATOMIC_WIDTH__ = 32
const __SIZEOF_DOUBLE__ = 8
+const __SIZEOF_FLOAT128__ = 16
const __SIZEOF_FLOAT__ = 4
+const __SIZEOF_IEEE128__ = 16
const __SIZEOF_INT128__ = 16
const __SIZEOF_INT__ = 4
const __SIZEOF_LONG_DOUBLE__ = 8
@@ -437,7 +449,6 @@ const __STDC_VERSION__ = 199901
const __STDC__ = 1
const __STRICT_ANSI__ = 1
const __STRUCT_PARM_ALIGN__ = 16
-const __TM_FENCE__ = 1
const __UINT16_MAX__ = 65535
const __UINT32_MAX__ = 4294967295
const __UINT64_MAX__ = 18446744073709551615
@@ -455,7 +466,7 @@ const __UINT_LEAST8_MAX__ = 255
const __USE_TIME_BITS64 = 1
const __VEC_ELEMENT_REG_ORDER__ = 1234
const __VEC__ = 10206
-const __VERSION__ = "10.2.1 20210110"
+const __VERSION__ = "14.2.0"
const __VSX__ = 1
const __WCHAR_MAX__ = 2147483647
const __WCHAR_MIN__ = -2147483648
@@ -25053,9 +25064,13 @@ func Xrewinddir(tls *TLS, dir uintptr) {
___unlock(tls, dir+20)
}
-func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r int32) {
+type t__ccgo_fp__Xscandir_2 = func(*TLS, uintptr) int32
+
+type t__ccgo_fp__Xscandir_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xscandir(tls *TLS, path uintptr, res uintptr, __ccgo_fp_sel uintptr, __ccgo_fp_cmp uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v res=%v sel=%v cmp=%v, (%v:)", tls, path, res, sel, cmp, origin(2))
+ trc("tls=%v path=%v res=%v __ccgo_fp_sel=%v __ccgo_fp_cmp=%v, (%v:)", tls, path, res, __ccgo_fp_sel, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var cnt, len1, v2, v3 Tsize_t
@@ -25077,7 +25092,7 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
if !(v1 != 0) {
break
}
- if sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{sel})))(tls, de) != 0) {
+ if __ccgo_fp_sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_sel})))(tls, de) != 0) {
continue
}
if cnt >= len1 {
@@ -25115,8 +25130,8 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
return -int32(1)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
- if cmp != 0 {
- Xqsort(tls, names, cnt, uint64(8), cmp)
+ if __ccgo_fp_cmp != 0 {
+ Xqsort(tls, names, cnt, uint64(8), __ccgo_fp_cmp)
}
*(*uintptr)(unsafe.Pointer(res)) = names
return Int32FromUint64(cnt)
@@ -25863,9 +25878,11 @@ func X__funcs_on_quick_exit(tls *TLS) {
}
}
-func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
+type t__ccgo_fp__Xat_quick_exit_0 = func(*TLS)
+
+func Xat_quick_exit(tls *TLS, __ccgo_fp_func uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v func1=%v, (%v:)", tls, func1, origin(2))
+ trc("tls=%v __ccgo_fp_func=%v, (%v:)", tls, __ccgo_fp_func, origin(2))
defer func() { trc("-> %v", r1) }()
}
var r, v1 int32
@@ -25877,7 +25894,7 @@ func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
} else {
v1 = _count
_count++
- _funcs[v1] = func1
+ _funcs[v1] = __ccgo_fp_func
}
___unlock(tls, uintptr(unsafe.Pointer(&_lock)))
return r
@@ -28398,15 +28415,17 @@ type TFTW = struct {
Flevel int32
}
-func Xftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32) (r int32) {
+type t__ccgo_fp__Xftw_1 = func(*TLS, uintptr, uintptr, int32) int32
+
+func Xftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v, (%v:)", tls, path, fn, fd_limit, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, origin(2))
defer func() { trc("-> %v", r) }()
}
/* The following cast assumes that calling a function with one
* argument more than it needs behaves as expected. This is
* actually undefined, but works on all real-world machines. */
- return Xnftw(tls, path, fn, fd_limit, int32(FTW_PHYS))
+ return Xnftw(tls, path, __ccgo_fp_fn, fd_limit, int32(FTW_PHYS))
}
const STATX_ALL = 4095
@@ -31343,14 +31362,6 @@ func Xfremovexattr(tls *TLS, fd int32, name uintptr) (r int32) {
return int32(X__syscall_ret(tls, Uint64FromInt64(X__syscall2(tls, int64(SYS_fremovexattr), int64(fd), int64(name)))))
}
-type Tucontext_t2 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_sigmask Tsigset_t
- Fuc_mcontext Tmcontext_t
-}
-
func _dummy4(tls *TLS, msg uintptr, lm uintptr) (r uintptr) {
return msg
}
@@ -114310,15 +114321,7 @@ func Xgethostid(tls *TLS) (r int64) {
const optpos = 0
-type Tucontext_t3 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_sigmask Tsigset_t
- Fuc_mcontext Tmcontext_t1
-}
-
-type t__ucontext1 = Tucontext_t3
+type t__ucontext1 = Tucontext_t1
func X__getopt_msg(tls *TLS, a uintptr, b uintptr, c uintptr, l Tsize_t) {
if __ccgo_strace {
@@ -115466,7 +115469,7 @@ type Thistory = struct {
Fbase int32
}
-func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
+func _do_nftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
bp := tls.Alloc(192)
defer tls.Free(192)
var d, de, v10 uintptr
@@ -115572,7 +115575,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
}
if v8 = !(flags&Int32FromInt32(FTW_DEPTH) != 0); v8 {
- v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+176)
+ v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+176)
r = v7
}
if v8 && v7 != 0 {
@@ -115613,7 +115616,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*uint8)(unsafe.Pointer(path + uintptr(j))) = uint8('/')
Xstrcpy(tls, path+uintptr(j)+uintptr(1), de+19)
- v11 = _do_nftw(tls, path, fn, fd_limit-int32(1), flags, bp+144)
+ v11 = _do_nftw(tls, path, __ccgo_fp_fn, fd_limit-int32(1), flags, bp+144)
r = v11
if v11 != 0 {
Xclosedir(tls, d)
@@ -115628,7 +115631,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*uint8)(unsafe.Pointer(path + uintptr(l))) = uint8(0)
if v13 = flags&int32(FTW_DEPTH) != 0; v13 {
- v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+176)
+ v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+176)
r = v12
}
if v13 && v12 != 0 {
@@ -115637,9 +115640,11 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
return 0
}
-func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1 int32) {
+type t__ccgo_fp__Xnftw_1 = func(*TLS, uintptr, uintptr, int32, uintptr) int32
+
+func Xnftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, fn, fd_limit, flags, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, flags, origin(2))
defer func() { trc("-> %v", r1) }()
}
bp := tls.Alloc(4112)
@@ -115659,7 +115664,7 @@ func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1
}
Xmemcpy(tls, bp+4, path, l+uint64(1))
_pthread_setcancelstate(tls, int32(PTHREAD_CANCEL_DISABLE), bp)
- r = _do_nftw(tls, bp+4, fn, fd_limit, flags, UintptrFromInt32(0))
+ r = _do_nftw(tls, bp+4, __ccgo_fp_fn, fd_limit, flags, UintptrFromInt32(0))
_pthread_setcancelstate(tls, *(*int32)(unsafe.Pointer(bp)), uintptr(0))
return r
}
@@ -116439,8 +116444,6 @@ type Tsockaddr_storage = struct {
F__ss_align uint64
}
-type t__ucontext2 = Tucontext_t2
-
var _lock2 [1]int32
var _log_ident [32]uint8
var _log_opt int32
@@ -116878,14 +116881,6 @@ const SA = 194
const SB = 244
const bittab = 0
-type Tucontext_t4 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_sigmask Tsigset_t
- Fuc_mcontext Tmcontext_t
-}
-
func Xbtowc(tls *TLS, c int32) (r Twint_t) {
if __ccgo_strace {
trc("tls=%v c=%v, (%v:)", tls, c, origin(2))
@@ -118701,13 +118696,6 @@ const _ns_uop_delete = 0
const _ns_uop_add = 1
const _ns_uop_max = 2
-type Tns_tsig_key1 = struct {
- Fname [1025]uint8
- Falg [1025]uint8
- Fdata uintptr
- Flen1 int32
-}
-
type Tns_tsig_key = struct {
Fname [1025]uint8
Falg [1025]uint8
@@ -118715,14 +118703,6 @@ type Tns_tsig_key = struct {
Flen1 int32
}
-type Tns_tcp_tsig_state1 = struct {
- Fcounter int32
- Fkey uintptr
- Fctx uintptr
- Fsig [512]uint8
- Fsiglen int32
-}
-
type Tns_tcp_tsig_state = struct {
Fcounter int32
Fkey uintptr
@@ -119372,9 +119352,11 @@ type Tresolvconf = struct {
Ftimeout uint32
}
-func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__dns_parse_2 = func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32
+
+func X__dns_parse(tls *TLS, r uintptr, rlen int32, __ccgo_fp_callback uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v r=%v rlen=%v callback=%v ctx=%v, (%v:)", tls, r, rlen, callback, ctx, origin(2))
+ trc("tls=%v r=%v rlen=%v __ccgo_fp_callback=%v ctx=%v, (%v:)", tls, r, rlen, __ccgo_fp_callback, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var ancount, len1, qdcount, v1, v2 int32
@@ -119420,7 +119402,7 @@ func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr
if int64(len1+int32(10)) > int64(r+uintptr(rlen))-int64(p) {
return -int32(1)
}
- if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
+ if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
return -int32(1)
}
p += uintptr(int32(10) + len1)
@@ -121638,18 +121620,6 @@ func Xherror(tls *TLS, msg uintptr) {
Xfprintf(tls, uintptr(unsafe.Pointer(&X__stderr_FILE)), __ccgo_ts+1114, VaList(bp+8, v1, v2, Xhstrerror(tls, *(*int32)(unsafe.Pointer(X__h_errno_location(tls))))))
}
-type Tcpu_set_t1 = struct {
- F__bits [16]uint64
-}
-
-type Tucontext_t5 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_sigmask Tsigset_t
- Fuc_mcontext Tmcontext_t1
-}
-
var _msgs1 = [84]uint8{'H', 'o', 's', 't', ' ', 'n', 'o', 't', ' ', 'f', 'o', 'u', 'n', 'd', 0, 'T', 'r', 'y', ' ', 'a', 'g', 'a', 'i', 'n', 0, 'N', 'o', 'n', '-', 'r', 'e', 'c', 'o', 'v', 'e', 'r', 'a', 'b', 'l', 'e', ' ', 'e', 'r', 'r', 'o', 'r', 0, 'A', 'd', 'd', 'r', 'e', 's', 's', ' ', 'n', 'o', 't', ' ', 'a', 'v', 'a', 'i', 'l', 'a', 'b', 'l', 'e', 0, 0, 'U', 'n', 'k', 'n', 'o', 'w', 'n', ' ', 'e', 'r', 'r', 'o', 'r'}
func Xhstrerror(tls *TLS, ecode int32) (r uintptr) {
@@ -123461,7 +123431,7 @@ func X__lookup_serv(tls *TLS, buf uintptr, name uintptr, proto int32, socktype i
return v22
}
-func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, cb uintptr, ctx uintptr) (r1 int32) {
+func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
bp := tls.Alloc(8192)
defer tls.Free(8192)
var h uintptr
@@ -123551,7 +123521,7 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
if Int32FromUint16((*Tnlmsghdr)(unsafe.Pointer(h)).Fnlmsg_type) == int32(NLMSG_ERROR) {
return -int32(1)
}
- ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cb})))(tls, ctx, h)
+ ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cb})))(tls, ctx, h)
if ret != 0 {
return ret
}
@@ -123564,9 +123534,11 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
return r1
}
-func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__rtnetlink_enumerate_2 = func(*TLS, uintptr, uintptr) int32
+
+func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v link_af=%v addr_af=%v cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, cb, ctx, origin(2))
+ trc("tls=%v link_af=%v addr_af=%v __ccgo_fp_cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, __ccgo_fp_cb, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var fd, r int32
@@ -123575,9 +123547,9 @@ func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr,
if fd < 0 {
return -int32(1)
}
- r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, __ccgo_fp_cb, ctx)
if !(r != 0) {
- r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, __ccgo_fp_cb, ctx)
}
X__syscall1(tls, int64(SYS_close), int64(fd))
return r
@@ -128958,7 +128930,7 @@ func _append(tls *TLS, tail uintptr, name uintptr, len1 Tsize_t, mark int32) (r
return 0
}
-func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, errfunc uintptr, tail uintptr) (r1 int32) {
+func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, tail uintptr) (r1 int32) {
bp := tls.Alloc(144)
defer tls.Free(144)
var de, dir, p, p2, v11, v2, v7, v8 uintptr
@@ -129067,7 +129039,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
}
if !(type1 != 0) && Xlstat(tls, buf, bp) != 0 {
- if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
return 0
@@ -129104,7 +129076,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
dir = Xopendir(tls, v7)
if !(dir != 0) {
- if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
+ if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
return int32(GLOB_ABORTED)
}
return 0
@@ -129156,7 +129128,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
} else {
v11 = __ccgo_ts
}
- r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, errfunc, tail)
+ r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, __ccgo_fp_errfunc, tail)
if r != 0 {
Xclosedir(tls, dir)
return r
@@ -129167,7 +129139,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
*(*uint8)(unsafe.Pointer(p2)) = saved_sep
}
Xclosedir(tls, dir)
- if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
@@ -129279,9 +129251,11 @@ func _expand_tilde(tls *TLS, pat uintptr, buf uintptr, pos uintptr) (r int32) {
return 0
}
-func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r int32) {
+type t__ccgo_fp__Xglob_2 = func(*TLS, uintptr, int32) int32
+
+func Xglob(tls *TLS, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, g uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v pat=%v flags=%v errfunc=%v g=%v, (%v:)", tls, pat, flags, errfunc, g, origin(2))
+ trc("tls=%v pat=%v flags=%v __ccgo_fp_errfunc=%v g=%v, (%v:)", tls, pat, flags, __ccgo_fp_errfunc, g, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(4128)
@@ -129307,8 +129281,8 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
}
offs = v1
error1 = 0
- if !(errfunc != 0) {
- errfunc = __ccgo_fp(_ignore_err)
+ if !(__ccgo_fp_errfunc != 0) {
+ __ccgo_fp_errfunc = __ccgo_fp(_ignore_err)
}
if !(flags&Int32FromInt32(GLOB_APPEND) != 0) {
(*Tglob_t)(unsafe.Pointer(g)).Fgl_offs = offs
@@ -129327,7 +129301,7 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
error1 = _expand_tilde(tls, bp+4120, bp+16, bp+4112)
}
if !(error1 != 0) {
- error1 = _do_glob(tls, bp+16, *(*Tsize_t)(unsafe.Pointer(bp + 4112)), 0, *(*uintptr)(unsafe.Pointer(bp + 4120)), flags, errfunc, bp+8)
+ error1 = _do_glob(tls, bp+16, *(*Tsize_t)(unsafe.Pointer(bp + 4112)), 0, *(*uintptr)(unsafe.Pointer(bp + 4120)), flags, __ccgo_fp_errfunc, bp+8)
}
Xfree(tls, p)
}
@@ -134551,9 +134525,11 @@ func Xremque(tls *TLS, element uintptr) {
}
}
-func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -134569,7 +134545,7 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -134581,9 +134557,11 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
return Xmemcpy(tls, p+uintptr(n)*uintptr(v1), key, width)
}
-func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlfind_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -134599,7 +134577,7 @@ func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, co
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -134618,9 +134596,11 @@ type Tnode1 = struct {
Fh int32
}
-func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtdelete_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtdelete(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var a [97]uintptr
@@ -134644,7 +134624,7 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
return uintptr(0)
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -134689,9 +134669,11 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
return parent
}
-func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
+type t__ccgo_fp__Xtdestroy_1 = func(*TLS, uintptr)
+
+func Xtdestroy(tls *TLS, root uintptr, __ccgo_fp_freekey uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v freekey=%v, (%v:)", tls, root, freekey, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_freekey=%v, (%v:)", tls, root, __ccgo_fp_freekey, origin(2))
}
var r uintptr
_ = r
@@ -134699,17 +134681,19 @@ func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
if r == uintptr(0) {
return
}
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8)), freekey)
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), freekey)
- if freekey != 0 {
- (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8)), __ccgo_fp_freekey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), __ccgo_fp_freekey)
+ if __ccgo_fp_freekey != 0 {
+ (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
}
Xfree(tls, r)
}
-func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtfind_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtfind(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var c int32
@@ -134723,7 +134707,7 @@ func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -134811,9 +134795,11 @@ func X__tsearch_balance(tls *TLS, p uintptr) (r int32) {
return _rot(tls, p, n, BoolInt32(h0 < h1))
}
-func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
+type t__ccgo_fp__Xtsearch_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtsearch(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r1 uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r1) }()
}
var a [96]uintptr
@@ -134833,7 +134819,7 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
return n
}
@@ -134869,26 +134855,28 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
return r
}
-func _walk(tls *TLS, r uintptr, action uintptr, d int32) {
+func _walk(tls *TLS, r uintptr, __ccgo_fp_action uintptr, d int32) {
if !(r != 0) {
return
}
if (*Tnode1)(unsafe.Pointer(r)).Fh == int32(1) {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_leaf), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_leaf), d)
} else {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_preorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_postorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_endorder), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_preorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_postorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_endorder), d)
}
}
-func Xtwalk(tls *TLS, root uintptr, action uintptr) {
+type t__ccgo_fp__Xtwalk_1 = func(*TLS, uintptr, int32, int32)
+
+func Xtwalk(tls *TLS, root uintptr, __ccgo_fp_action uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v action=%v, (%v:)", tls, root, action, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_action=%v, (%v:)", tls, root, __ccgo_fp_action, origin(2))
}
- _walk(tls, root, action, 0)
+ _walk(tls, root, __ccgo_fp_action, 0)
}
func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
@@ -134899,8 +134887,6 @@ func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
return int32(X__syscall_ret(tls, Uint64FromInt64(___syscall_cp(tls, int64(SYS_poll), int64(fds), Int64FromUint64(n), int64(timeout), 0, 0, 0))))
}
-type t__ucontext3 = Tucontext_t5
-
func Xppoll(tls *TLS, fds uintptr, n Tnfds_t, to uintptr, mask uintptr) (r int32) {
if __ccgo_strace {
trc("tls=%v fds=%v n=%v to=%v mask=%v, (%v:)", tls, fds, n, to, mask, origin(2))
@@ -134936,8 +134922,6 @@ func Xppoll(tls *TLS, fds uintptr, n Tnfds_t, to uintptr, mask uintptr) (r int32
return int32(X__syscall_ret(tls, Uint64FromInt64(___syscall_cp(tls, int64(SYS_ppoll), int64(fds), Int64FromUint64(n), int64(v3), int64(mask), int64(Int32FromInt32(_NSIG)/Int32FromInt32(8)), 0))))
}
-type t__ucontext4 = Tucontext_t4
-
func Xpselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, ts uintptr, mask uintptr) (r int32) {
if __ccgo_strace {
trc("tls=%v n=%v rfds=%v wfds=%v efds=%v ts=%v mask=%v, (%v:)", tls, n, rfds, wfds, efds, ts, mask, origin(2))
@@ -134978,14 +134962,6 @@ func Xpselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, ts ui
return int32(X__syscall_ret(tls, Uint64FromInt64(___syscall_cp(tls, int64(SYS_pselect6), int64(n), int64(rfds), int64(wfds), int64(efds), int64(v3), int64(bp+16)))))
}
-type Tucontext_t6 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_sigmask Tsigset_t
- Fuc_mcontext Tmcontext_t
-}
-
func Xselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, tv uintptr) (r int32) {
if __ccgo_strace {
trc("tls=%v n=%v rfds=%v wfds=%v efds=%v tv=%v, (%v:)", tls, n, rfds, wfds, efds, tv, origin(2))
@@ -135505,14 +135481,6 @@ func Xsigaltstack(tls *TLS, ss uintptr, old uintptr) (r int32) {
const SST_SIZE = 8
-type Tucontext_t7 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_sigmask Tsigset_t
- Fuc_mcontext Tmcontext_t1
-}
-
func Xsigandset(tls *TLS, dest uintptr, left uintptr, right uintptr) (r1 int32) {
if __ccgo_strace {
trc("tls=%v dest=%v left=%v right=%v, (%v:)", tls, dest, left, right, origin(2))
@@ -144886,9 +144854,11 @@ func Xatoll(tls *TLS, s uintptr) (r int64) {
return v5
}
-func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xbsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, key, base, nel, width, cmp, origin(2))
+ trc("tls=%v key=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, base, nel, width, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var sign int32
@@ -144896,7 +144866,7 @@ func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, c
_, _ = sign, try
for nel > uint64(0) {
try = base + uintptr(width*(nel/uint64(2)))
- sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, try)
+ sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, try)
if sign < 0 {
nel /= uint64(2)
} else {
@@ -145166,7 +145136,7 @@ func _shr(tls *TLS, p uintptr, n int32) {
*(*Tsize_t)(unsafe.Pointer(p + 1*8)) >>= Uint64FromInt32(n)
}
-func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
+func _sift(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
bp := tls.Alloc(912)
defer tls.Free(912)
var i, v1, v2 int32
@@ -145178,10 +145148,10 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
for pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
break
}
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, rt, arg) >= 0 {
v1 = i
i++
(*(*[113]uintptr)(unsafe.Pointer(bp)))[v1] = lf
@@ -145198,7 +145168,7 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
_cycle(tls, width, bp, i)
}
-func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
+func _trinkle(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
bp := tls.Alloc(928)
defer tls.Free(928)
var i, trail, v1 int32
@@ -145212,13 +145182,13 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
(*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0] = head
for (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[0] != uint64(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[int32(1)] != uint64(0) {
stepson = head - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift)*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, stepson, (*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0], arg) <= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, stepson, (*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0], arg) <= 0 {
break
}
if !(trusty != 0) && pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, stepson, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, stepson, arg) >= 0 {
break
}
}
@@ -145233,13 +145203,15 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
}
if !(trusty != 0) {
_cycle(tls, width, bp+16, i)
- _sift(tls, head, width, cmp, arg, pshift, lp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, lp)
}
}
-func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__X__qsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
bp := tls.Alloc(784)
defer tls.Free(784)
@@ -145277,14 +145249,14 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
}
for head < high {
if (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[0]&uint64(3) == uint64(3) {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
_shr(tls, bp+768, int32(2))
pshift += int32(2)
} else {
if (*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift-int32(1)] >= Uint64FromInt64(int64(high)-int64(head)) {
- _trinkle(tls, head, width, cmp, arg, bp+768, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+768, pshift, 0, bp)
} else {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
}
if pshift == int32(1) {
_shl(tls, bp+768, int32(1))
@@ -145297,7 +145269,7 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
*(*Tsize_t)(unsafe.Pointer(bp + 768)) |= uint64(1)
head += uintptr(width)
}
- _trinkle(tls, head, width, cmp, arg, bp+768, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+768, pshift, 0, bp)
for pshift != int32(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[0] != uint64(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[int32(1)] != uint64(0) {
if pshift <= int32(1) {
trail = _pntz(tls, bp+768)
@@ -145308,31 +145280,35 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
pshift -= int32(2)
*(*Tsize_t)(unsafe.Pointer(bp + 768)) ^= uint64(7)
_shr(tls, bp+768, int32(1))
- _trinkle(tls, head-uintptr((*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, cmp, arg, bp+768, pshift+int32(1), int32(1), bp)
+ _trinkle(tls, head-uintptr((*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, __ccgo_fp_cmp, arg, bp+768, pshift+int32(1), int32(1), bp)
_shl(tls, bp+768, int32(1))
*(*Tsize_t)(unsafe.Pointer(bp + 768)) |= uint64(1)
- _trinkle(tls, head-uintptr(width), width, cmp, arg, bp+768, pshift, int32(1), bp)
+ _trinkle(tls, head-uintptr(width), width, __ccgo_fp_cmp, arg, bp+768, pshift, int32(1), bp)
}
head -= uintptr(width)
}
}
-func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__Xqsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
- X__qsort_r(tls, base, nel, width, cmp, arg)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp_cmp, arg)
}
func _wrapper_cmp(tls *TLS, v1 uintptr, v2 uintptr, cmp uintptr) (r int32) {
return (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, v1, v2)
}
-func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun) {
+type t__ccgo_fp__Xqsort_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, base, nel, width, cmp, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, origin(2))
}
- X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), cmp)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), __ccgo_fp_cmp)
}
func _strtox(tls *TLS, s uintptr, p uintptr, prec int32) (r float64) {
diff --git a/vendor/modernc.org/libc/ccgo_linux_riscv64.go b/vendor/modernc.org/libc/ccgo_linux_riscv64.go
index 7a0edb178..5d28c7f72 100644
--- a/vendor/modernc.org/libc/ccgo_linux_riscv64.go
+++ b/vendor/modernc.org/libc/ccgo_linux_riscv64.go
@@ -24811,9 +24811,13 @@ func Xrewinddir(tls *TLS, dir uintptr) {
___unlock(tls, dir+20)
}
-func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r int32) {
+type t__ccgo_fp__Xscandir_2 = func(*TLS, uintptr) int32
+
+type t__ccgo_fp__Xscandir_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xscandir(tls *TLS, path uintptr, res uintptr, __ccgo_fp_sel uintptr, __ccgo_fp_cmp uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v res=%v sel=%v cmp=%v, (%v:)", tls, path, res, sel, cmp, origin(2))
+ trc("tls=%v path=%v res=%v __ccgo_fp_sel=%v __ccgo_fp_cmp=%v, (%v:)", tls, path, res, __ccgo_fp_sel, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var cnt, len1, v2, v3 Tsize_t
@@ -24835,7 +24839,7 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
if !(v1 != 0) {
break
}
- if sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{sel})))(tls, de) != 0) {
+ if __ccgo_fp_sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_sel})))(tls, de) != 0) {
continue
}
if cnt >= len1 {
@@ -24873,8 +24877,8 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
return -int32(1)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
- if cmp != 0 {
- Xqsort(tls, names, cnt, uint64(8), cmp)
+ if __ccgo_fp_cmp != 0 {
+ Xqsort(tls, names, cnt, uint64(8), __ccgo_fp_cmp)
}
*(*uintptr)(unsafe.Pointer(res)) = names
return Int32FromUint64(cnt)
@@ -25620,9 +25624,11 @@ func X__funcs_on_quick_exit(tls *TLS) {
}
}
-func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
+type t__ccgo_fp__Xat_quick_exit_0 = func(*TLS)
+
+func Xat_quick_exit(tls *TLS, __ccgo_fp_func uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v func1=%v, (%v:)", tls, func1, origin(2))
+ trc("tls=%v __ccgo_fp_func=%v, (%v:)", tls, __ccgo_fp_func, origin(2))
defer func() { trc("-> %v", r1) }()
}
var r, v1 int32
@@ -25634,7 +25640,7 @@ func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
} else {
v1 = _count
_count++
- _funcs[v1] = func1
+ _funcs[v1] = __ccgo_fp_func
}
___unlock(tls, uintptr(unsafe.Pointer(&_lock)))
return r
@@ -28198,15 +28204,17 @@ type TFTW = struct {
Flevel int32
}
-func Xftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32) (r int32) {
+type t__ccgo_fp__Xftw_1 = func(*TLS, uintptr, uintptr, int32) int32
+
+func Xftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v, (%v:)", tls, path, fn, fd_limit, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, origin(2))
defer func() { trc("-> %v", r) }()
}
/* The following cast assumes that calling a function with one
* argument more than it needs behaves as expected. This is
* actually undefined, but works on all real-world machines. */
- return Xnftw(tls, path, fn, fd_limit, int32(FTW_PHYS))
+ return Xnftw(tls, path, __ccgo_fp_fn, fd_limit, int32(FTW_PHYS))
}
const STATX_ALL = 4095
@@ -115234,7 +115242,7 @@ type Thistory = struct {
Fbase int32
}
-func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
+func _do_nftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
bp := tls.Alloc(176)
defer tls.Free(176)
var d, de, v10 uintptr
@@ -115340,7 +115348,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
}
if v8 = !(flags&Int32FromInt32(FTW_DEPTH) != 0); v8 {
- v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+160)
+ v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+160)
r = v7
}
if v8 && v7 != 0 {
@@ -115381,7 +115389,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*uint8)(unsafe.Pointer(path + uintptr(j))) = uint8('/')
Xstrcpy(tls, path+uintptr(j)+uintptr(1), de+19)
- v11 = _do_nftw(tls, path, fn, fd_limit-int32(1), flags, bp+128)
+ v11 = _do_nftw(tls, path, __ccgo_fp_fn, fd_limit-int32(1), flags, bp+128)
r = v11
if v11 != 0 {
Xclosedir(tls, d)
@@ -115396,7 +115404,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*uint8)(unsafe.Pointer(path + uintptr(l))) = uint8(0)
if v13 = flags&int32(FTW_DEPTH) != 0; v13 {
- v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+160)
+ v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+160)
r = v12
}
if v13 && v12 != 0 {
@@ -115405,9 +115413,11 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
return 0
}
-func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1 int32) {
+type t__ccgo_fp__Xnftw_1 = func(*TLS, uintptr, uintptr, int32, uintptr) int32
+
+func Xnftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, fn, fd_limit, flags, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, flags, origin(2))
defer func() { trc("-> %v", r1) }()
}
bp := tls.Alloc(4112)
@@ -115427,7 +115437,7 @@ func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1
}
Xmemcpy(tls, bp+4, path, l+uint64(1))
_pthread_setcancelstate(tls, int32(PTHREAD_CANCEL_DISABLE), bp)
- r = _do_nftw(tls, bp+4, fn, fd_limit, flags, UintptrFromInt32(0))
+ r = _do_nftw(tls, bp+4, __ccgo_fp_fn, fd_limit, flags, UintptrFromInt32(0))
_pthread_setcancelstate(tls, *(*int32)(unsafe.Pointer(bp)), uintptr(0))
return r
}
@@ -118363,13 +118373,6 @@ const _ns_uop_delete = 0
const _ns_uop_add = 1
const _ns_uop_max = 2
-type Tns_tsig_key1 = struct {
- Fname [1025]uint8
- Falg [1025]uint8
- Fdata uintptr
- Flen1 int32
-}
-
type Tns_tsig_key = struct {
Fname [1025]uint8
Falg [1025]uint8
@@ -118377,14 +118380,6 @@ type Tns_tsig_key = struct {
Flen1 int32
}
-type Tns_tcp_tsig_state1 = struct {
- Fcounter int32
- Fkey uintptr
- Fctx uintptr
- Fsig [512]uint8
- Fsiglen int32
-}
-
type Tns_tcp_tsig_state = struct {
Fcounter int32
Fkey uintptr
@@ -119034,9 +119029,11 @@ type Tresolvconf = struct {
Ftimeout uint32
}
-func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__dns_parse_2 = func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32
+
+func X__dns_parse(tls *TLS, r uintptr, rlen int32, __ccgo_fp_callback uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v r=%v rlen=%v callback=%v ctx=%v, (%v:)", tls, r, rlen, callback, ctx, origin(2))
+ trc("tls=%v r=%v rlen=%v __ccgo_fp_callback=%v ctx=%v, (%v:)", tls, r, rlen, __ccgo_fp_callback, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var ancount, len1, qdcount, v1, v2 int32
@@ -119082,7 +119079,7 @@ func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr
if int64(len1+int32(10)) > int64(r+uintptr(rlen))-int64(p) {
return -int32(1)
}
- if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
+ if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
return -int32(1)
}
p += uintptr(int32(10) + len1)
@@ -121186,10 +121183,6 @@ func Xherror(tls *TLS, msg uintptr) {
Xfprintf(tls, uintptr(unsafe.Pointer(&X__stderr_FILE)), __ccgo_ts+1111, VaList(bp+8, v1, v2, Xhstrerror(tls, *(*int32)(unsafe.Pointer(X__h_errno_location(tls))))))
}
-type Tcpu_set_t1 = struct {
- F__bits [16]uint64
-}
-
var _msgs1 = [84]uint8{'H', 'o', 's', 't', ' ', 'n', 'o', 't', ' ', 'f', 'o', 'u', 'n', 'd', 0, 'T', 'r', 'y', ' ', 'a', 'g', 'a', 'i', 'n', 0, 'N', 'o', 'n', '-', 'r', 'e', 'c', 'o', 'v', 'e', 'r', 'a', 'b', 'l', 'e', ' ', 'e', 'r', 'r', 'o', 'r', 0, 'A', 'd', 'd', 'r', 'e', 's', 's', ' ', 'n', 'o', 't', ' ', 'a', 'v', 'a', 'i', 'l', 'a', 'b', 'l', 'e', 0, 0, 'U', 'n', 'k', 'n', 'o', 'w', 'n', ' ', 'e', 'r', 'r', 'o', 'r'}
func Xhstrerror(tls *TLS, ecode int32) (r uintptr) {
@@ -122977,7 +122970,7 @@ func X__lookup_serv(tls *TLS, buf uintptr, name uintptr, proto int32, socktype i
return v22
}
-func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, cb uintptr, ctx uintptr) (r1 int32) {
+func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
bp := tls.Alloc(8192)
defer tls.Free(8192)
var h uintptr
@@ -123067,7 +123060,7 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
if Int32FromUint16((*Tnlmsghdr)(unsafe.Pointer(h)).Fnlmsg_type) == int32(NLMSG_ERROR) {
return -int32(1)
}
- ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cb})))(tls, ctx, h)
+ ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cb})))(tls, ctx, h)
if ret != 0 {
return ret
}
@@ -123080,9 +123073,11 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
return r1
}
-func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__rtnetlink_enumerate_2 = func(*TLS, uintptr, uintptr) int32
+
+func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v link_af=%v addr_af=%v cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, cb, ctx, origin(2))
+ trc("tls=%v link_af=%v addr_af=%v __ccgo_fp_cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, __ccgo_fp_cb, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var fd, r int32
@@ -123091,9 +123086,9 @@ func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr,
if fd < 0 {
return -int32(1)
}
- r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, __ccgo_fp_cb, ctx)
if !(r != 0) {
- r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, __ccgo_fp_cb, ctx)
}
X__syscall1(tls, int64(SYS_close), int64(fd))
return r
@@ -128200,7 +128195,7 @@ func _append(tls *TLS, tail uintptr, name uintptr, len1 Tsize_t, mark int32) (r
return 0
}
-func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, errfunc uintptr, tail uintptr) (r1 int32) {
+func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, tail uintptr) (r1 int32) {
bp := tls.Alloc(128)
defer tls.Free(128)
var de, dir, p, p2, v11, v2, v7, v8 uintptr
@@ -128309,7 +128304,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
}
if !(type1 != 0) && Xlstat(tls, buf, bp) != 0 {
- if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
return 0
@@ -128346,7 +128341,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
dir = Xopendir(tls, v7)
if !(dir != 0) {
- if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
+ if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
return int32(GLOB_ABORTED)
}
return 0
@@ -128398,7 +128393,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
} else {
v11 = __ccgo_ts
}
- r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, errfunc, tail)
+ r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, __ccgo_fp_errfunc, tail)
if r != 0 {
Xclosedir(tls, dir)
return r
@@ -128409,7 +128404,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
*(*uint8)(unsafe.Pointer(p2)) = saved_sep
}
Xclosedir(tls, dir)
- if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
@@ -128521,9 +128516,11 @@ func _expand_tilde(tls *TLS, pat uintptr, buf uintptr, pos uintptr) (r int32) {
return 0
}
-func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r int32) {
+type t__ccgo_fp__Xglob_2 = func(*TLS, uintptr, int32) int32
+
+func Xglob(tls *TLS, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, g uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v pat=%v flags=%v errfunc=%v g=%v, (%v:)", tls, pat, flags, errfunc, g, origin(2))
+ trc("tls=%v pat=%v flags=%v __ccgo_fp_errfunc=%v g=%v, (%v:)", tls, pat, flags, __ccgo_fp_errfunc, g, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(4128)
@@ -128549,8 +128546,8 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
}
offs = v1
error1 = 0
- if !(errfunc != 0) {
- errfunc = __ccgo_fp(_ignore_err)
+ if !(__ccgo_fp_errfunc != 0) {
+ __ccgo_fp_errfunc = __ccgo_fp(_ignore_err)
}
if !(flags&Int32FromInt32(GLOB_APPEND) != 0) {
(*Tglob_t)(unsafe.Pointer(g)).Fgl_offs = offs
@@ -128569,7 +128566,7 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
error1 = _expand_tilde(tls, bp+4120, bp+16, bp+4112)
}
if !(error1 != 0) {
- error1 = _do_glob(tls, bp+16, *(*Tsize_t)(unsafe.Pointer(bp + 4112)), 0, *(*uintptr)(unsafe.Pointer(bp + 4120)), flags, errfunc, bp+8)
+ error1 = _do_glob(tls, bp+16, *(*Tsize_t)(unsafe.Pointer(bp + 4112)), 0, *(*uintptr)(unsafe.Pointer(bp + 4120)), flags, __ccgo_fp_errfunc, bp+8)
}
Xfree(tls, p)
}
@@ -133793,9 +133790,11 @@ func Xremque(tls *TLS, element uintptr) {
}
}
-func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -133811,7 +133810,7 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -133823,9 +133822,11 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
return Xmemcpy(tls, p+uintptr(n)*uintptr(v1), key, width)
}
-func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlfind_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -133841,7 +133842,7 @@ func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, co
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -133860,9 +133861,11 @@ type Tnode1 = struct {
Fh int32
}
-func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtdelete_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtdelete(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var a [97]uintptr
@@ -133886,7 +133889,7 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
return uintptr(0)
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -133931,9 +133934,11 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
return parent
}
-func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
+type t__ccgo_fp__Xtdestroy_1 = func(*TLS, uintptr)
+
+func Xtdestroy(tls *TLS, root uintptr, __ccgo_fp_freekey uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v freekey=%v, (%v:)", tls, root, freekey, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_freekey=%v, (%v:)", tls, root, __ccgo_fp_freekey, origin(2))
}
var r uintptr
_ = r
@@ -133941,17 +133946,19 @@ func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
if r == uintptr(0) {
return
}
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8)), freekey)
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), freekey)
- if freekey != 0 {
- (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8)), __ccgo_fp_freekey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), __ccgo_fp_freekey)
+ if __ccgo_fp_freekey != 0 {
+ (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
}
Xfree(tls, r)
}
-func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtfind_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtfind(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var c int32
@@ -133965,7 +133972,7 @@ func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -134053,9 +134060,11 @@ func X__tsearch_balance(tls *TLS, p uintptr) (r int32) {
return _rot(tls, p, n, BoolInt32(h0 < h1))
}
-func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
+type t__ccgo_fp__Xtsearch_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtsearch(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r1 uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r1) }()
}
var a [96]uintptr
@@ -134075,7 +134084,7 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
return n
}
@@ -134111,26 +134120,28 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
return r
}
-func _walk(tls *TLS, r uintptr, action uintptr, d int32) {
+func _walk(tls *TLS, r uintptr, __ccgo_fp_action uintptr, d int32) {
if !(r != 0) {
return
}
if (*Tnode1)(unsafe.Pointer(r)).Fh == int32(1) {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_leaf), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_leaf), d)
} else {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_preorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_postorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_endorder), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_preorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_postorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_endorder), d)
}
}
-func Xtwalk(tls *TLS, root uintptr, action uintptr) {
+type t__ccgo_fp__Xtwalk_1 = func(*TLS, uintptr, int32, int32)
+
+func Xtwalk(tls *TLS, root uintptr, __ccgo_fp_action uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v action=%v, (%v:)", tls, root, action, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_action=%v, (%v:)", tls, root, __ccgo_fp_action, origin(2))
}
- _walk(tls, root, action, 0)
+ _walk(tls, root, __ccgo_fp_action, 0)
}
func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
@@ -143819,9 +143830,11 @@ func Xatoll(tls *TLS, s uintptr) (r int64) {
return v5
}
-func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xbsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, key, base, nel, width, cmp, origin(2))
+ trc("tls=%v key=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, base, nel, width, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var sign int32
@@ -143829,7 +143842,7 @@ func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, c
_, _ = sign, try
for nel > uint64(0) {
try = base + uintptr(width*(nel/uint64(2)))
- sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, try)
+ sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, try)
if sign < 0 {
nel /= uint64(2)
} else {
@@ -144090,7 +144103,7 @@ func _shr(tls *TLS, p uintptr, n int32) {
*(*Tsize_t)(unsafe.Pointer(p + 1*8)) >>= Uint64FromInt32(n)
}
-func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
+func _sift(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
bp := tls.Alloc(912)
defer tls.Free(912)
var i, v1, v2 int32
@@ -144102,10 +144115,10 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
for pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
break
}
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, rt, arg) >= 0 {
v1 = i
i++
(*(*[113]uintptr)(unsafe.Pointer(bp)))[v1] = lf
@@ -144122,7 +144135,7 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
_cycle(tls, width, bp, i)
}
-func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
+func _trinkle(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
bp := tls.Alloc(928)
defer tls.Free(928)
var i, trail, v1 int32
@@ -144136,13 +144149,13 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
(*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0] = head
for (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[0] != uint64(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[int32(1)] != uint64(0) {
stepson = head - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift)*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, stepson, (*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0], arg) <= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, stepson, (*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0], arg) <= 0 {
break
}
if !(trusty != 0) && pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, stepson, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, stepson, arg) >= 0 {
break
}
}
@@ -144157,13 +144170,15 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
}
if !(trusty != 0) {
_cycle(tls, width, bp+16, i)
- _sift(tls, head, width, cmp, arg, pshift, lp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, lp)
}
}
-func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__X__qsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
bp := tls.Alloc(784)
defer tls.Free(784)
@@ -144201,14 +144216,14 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
}
for head < high {
if (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[0]&uint64(3) == uint64(3) {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
_shr(tls, bp+768, int32(2))
pshift += int32(2)
} else {
if (*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift-int32(1)] >= Uint64FromInt64(int64(high)-int64(head)) {
- _trinkle(tls, head, width, cmp, arg, bp+768, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+768, pshift, 0, bp)
} else {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
}
if pshift == int32(1) {
_shl(tls, bp+768, int32(1))
@@ -144221,7 +144236,7 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
*(*Tsize_t)(unsafe.Pointer(bp + 768)) |= uint64(1)
head += uintptr(width)
}
- _trinkle(tls, head, width, cmp, arg, bp+768, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+768, pshift, 0, bp)
for pshift != int32(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[0] != uint64(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[int32(1)] != uint64(0) {
if pshift <= int32(1) {
trail = _pntz(tls, bp+768)
@@ -144232,31 +144247,35 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
pshift -= int32(2)
*(*Tsize_t)(unsafe.Pointer(bp + 768)) ^= uint64(7)
_shr(tls, bp+768, int32(1))
- _trinkle(tls, head-uintptr((*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, cmp, arg, bp+768, pshift+int32(1), int32(1), bp)
+ _trinkle(tls, head-uintptr((*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, __ccgo_fp_cmp, arg, bp+768, pshift+int32(1), int32(1), bp)
_shl(tls, bp+768, int32(1))
*(*Tsize_t)(unsafe.Pointer(bp + 768)) |= uint64(1)
- _trinkle(tls, head-uintptr(width), width, cmp, arg, bp+768, pshift, int32(1), bp)
+ _trinkle(tls, head-uintptr(width), width, __ccgo_fp_cmp, arg, bp+768, pshift, int32(1), bp)
}
head -= uintptr(width)
}
}
-func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__Xqsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
- X__qsort_r(tls, base, nel, width, cmp, arg)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp_cmp, arg)
}
func _wrapper_cmp(tls *TLS, v1 uintptr, v2 uintptr, cmp uintptr) (r int32) {
return (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, v1, v2)
}
-func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun) {
+type t__ccgo_fp__Xqsort_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, base, nel, width, cmp, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, origin(2))
}
- X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), cmp)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), __ccgo_fp_cmp)
}
func _strtox(tls *TLS, s uintptr, p uintptr, prec int32) (r float64) {
diff --git a/vendor/modernc.org/libc/ccgo_linux_s390x.go b/vendor/modernc.org/libc/ccgo_linux_s390x.go
index 9adb2e4c8..994152718 100644
--- a/vendor/modernc.org/libc/ccgo_linux_s390x.go
+++ b/vendor/modernc.org/libc/ccgo_linux_s390x.go
@@ -24882,9 +24882,13 @@ func Xrewinddir(tls *TLS, dir uintptr) {
___unlock(tls, dir+20)
}
-func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r int32) {
+type t__ccgo_fp__Xscandir_2 = func(*TLS, uintptr) int32
+
+type t__ccgo_fp__Xscandir_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xscandir(tls *TLS, path uintptr, res uintptr, __ccgo_fp_sel uintptr, __ccgo_fp_cmp uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v res=%v sel=%v cmp=%v, (%v:)", tls, path, res, sel, cmp, origin(2))
+ trc("tls=%v path=%v res=%v __ccgo_fp_sel=%v __ccgo_fp_cmp=%v, (%v:)", tls, path, res, __ccgo_fp_sel, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var cnt, len1, v2, v3 Tsize_t
@@ -24906,7 +24910,7 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
if !(v1 != 0) {
break
}
- if sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{sel})))(tls, de) != 0) {
+ if __ccgo_fp_sel != 0 && !((*(*func(*TLS, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_sel})))(tls, de) != 0) {
continue
}
if cnt >= len1 {
@@ -24944,8 +24948,8 @@ func Xscandir(tls *TLS, path uintptr, res uintptr, sel uintptr, cmp uintptr) (r
return -int32(1)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
- if cmp != 0 {
- Xqsort(tls, names, cnt, uint64(8), cmp)
+ if __ccgo_fp_cmp != 0 {
+ Xqsort(tls, names, cnt, uint64(8), __ccgo_fp_cmp)
}
*(*uintptr)(unsafe.Pointer(res)) = names
return Int32FromUint64(cnt)
@@ -25692,9 +25696,11 @@ func X__funcs_on_quick_exit(tls *TLS) {
}
}
-func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
+type t__ccgo_fp__Xat_quick_exit_0 = func(*TLS)
+
+func Xat_quick_exit(tls *TLS, __ccgo_fp_func uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v func1=%v, (%v:)", tls, func1, origin(2))
+ trc("tls=%v __ccgo_fp_func=%v, (%v:)", tls, __ccgo_fp_func, origin(2))
defer func() { trc("-> %v", r1) }()
}
var r, v1 int32
@@ -25706,7 +25712,7 @@ func Xat_quick_exit(tls *TLS, func1 uintptr) (r1 int32) {
} else {
v1 = _count
_count++
- _funcs[v1] = func1
+ _funcs[v1] = __ccgo_fp_func
}
___unlock(tls, uintptr(unsafe.Pointer(&_lock)))
return r
@@ -28199,15 +28205,17 @@ type TFTW = struct {
Flevel int32
}
-func Xftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32) (r int32) {
+type t__ccgo_fp__Xftw_1 = func(*TLS, uintptr, uintptr, int32) int32
+
+func Xftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32) (r int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v, (%v:)", tls, path, fn, fd_limit, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, origin(2))
defer func() { trc("-> %v", r) }()
}
/* The following cast assumes that calling a function with one
* argument more than it needs behaves as expected. This is
* actually undefined, but works on all real-world machines. */
- return Xnftw(tls, path, fn, fd_limit, int32(FTW_PHYS))
+ return Xnftw(tls, path, __ccgo_fp_fn, fd_limit, int32(FTW_PHYS))
}
const STATX_ALL = 4095
@@ -31109,14 +31117,6 @@ func Xfremovexattr(tls *TLS, fd int32, name uintptr) (r int32) {
return int32(X__syscall_ret(tls, Uint64FromInt64(X__syscall2(tls, int64(SYS_fremovexattr), int64(fd), int64(name)))))
}
-type Tucontext_t2 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t
- Fuc_sigmask Tsigset_t
-}
-
func _dummy4(tls *TLS, msg uintptr, lm uintptr) (r uintptr) {
return msg
}
@@ -114017,15 +114017,7 @@ func Xgethostid(tls *TLS) (r int64) {
const optpos = 0
-type Tucontext_t3 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t1
- Fuc_sigmask Tsigset_t
-}
-
-type t__ucontext1 = Tucontext_t3
+type t__ucontext1 = Tucontext_t1
func X__getopt_msg(tls *TLS, a uintptr, b uintptr, c uintptr, l Tsize_t) {
if __ccgo_strace {
@@ -115173,7 +115165,7 @@ type Thistory = struct {
Fbase int32
}
-func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
+func _do_nftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32, h uintptr) (r1 int32) {
bp := tls.Alloc(192)
defer tls.Free(192)
var d, de, v10 uintptr
@@ -115279,7 +115271,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
}
if v8 = !(flags&Int32FromInt32(FTW_DEPTH) != 0); v8 {
- v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+176)
+ v7 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+176)
r = v7
}
if v8 && v7 != 0 {
@@ -115320,7 +115312,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*uint8)(unsafe.Pointer(path + uintptr(j))) = uint8('/')
Xstrcpy(tls, path+uintptr(j)+uintptr(1), de+19)
- v11 = _do_nftw(tls, path, fn, fd_limit-int32(1), flags, bp+144)
+ v11 = _do_nftw(tls, path, __ccgo_fp_fn, fd_limit-int32(1), flags, bp+144)
r = v11
if v11 != 0 {
Xclosedir(tls, d)
@@ -115335,7 +115327,7 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
}
*(*uint8)(unsafe.Pointer(path + uintptr(l))) = uint8(0)
if v13 = flags&int32(FTW_DEPTH) != 0; v13 {
- v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{fn})))(tls, path, bp, type1, bp+176)
+ v12 = (*(*func(*TLS, uintptr, uintptr, int32, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_fn})))(tls, path, bp, type1, bp+176)
r = v12
}
if v13 && v12 != 0 {
@@ -115344,9 +115336,11 @@ func _do_nftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32, h
return 0
}
-func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1 int32) {
+type t__ccgo_fp__Xnftw_1 = func(*TLS, uintptr, uintptr, int32, uintptr) int32
+
+func Xnftw(tls *TLS, path uintptr, __ccgo_fp_fn uintptr, fd_limit int32, flags int32) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v path=%v fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, fn, fd_limit, flags, origin(2))
+ trc("tls=%v path=%v __ccgo_fp_fn=%v fd_limit=%v flags=%v, (%v:)", tls, path, __ccgo_fp_fn, fd_limit, flags, origin(2))
defer func() { trc("-> %v", r1) }()
}
bp := tls.Alloc(4112)
@@ -115366,7 +115360,7 @@ func Xnftw(tls *TLS, path uintptr, fn uintptr, fd_limit int32, flags int32) (r1
}
Xmemcpy(tls, bp+4, path, l+uint64(1))
_pthread_setcancelstate(tls, int32(PTHREAD_CANCEL_DISABLE), bp)
- r = _do_nftw(tls, bp+4, fn, fd_limit, flags, UintptrFromInt32(0))
+ r = _do_nftw(tls, bp+4, __ccgo_fp_fn, fd_limit, flags, UintptrFromInt32(0))
_pthread_setcancelstate(tls, *(*int32)(unsafe.Pointer(bp)), uintptr(0))
return r
}
@@ -116146,8 +116140,6 @@ type Tsockaddr_storage = struct {
F__ss_align uint64
}
-type t__ucontext2 = Tucontext_t2
-
var _lock2 [1]int32
var _log_ident [32]uint8
var _log_opt int32
@@ -116551,14 +116543,6 @@ const SA = 194
const SB = 244
const bittab = 0
-type Tucontext_t4 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t
- Fuc_sigmask Tsigset_t
-}
-
func Xbtowc(tls *TLS, c int32) (r Twint_t) {
if __ccgo_strace {
trc("tls=%v c=%v, (%v:)", tls, c, origin(2))
@@ -118374,13 +118358,6 @@ const _ns_uop_delete = 0
const _ns_uop_add = 1
const _ns_uop_max = 2
-type Tns_tsig_key1 = struct {
- Fname [1025]uint8
- Falg [1025]uint8
- Fdata uintptr
- Flen1 int32
-}
-
type Tns_tsig_key = struct {
Fname [1025]uint8
Falg [1025]uint8
@@ -118388,14 +118365,6 @@ type Tns_tsig_key = struct {
Flen1 int32
}
-type Tns_tcp_tsig_state1 = struct {
- Fcounter int32
- Fkey uintptr
- Fctx uintptr
- Fsig [512]uint8
- Fsiglen int32
-}
-
type Tns_tcp_tsig_state = struct {
Fcounter int32
Fkey uintptr
@@ -119045,9 +119014,11 @@ type Tresolvconf = struct {
Ftimeout uint32
}
-func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__dns_parse_2 = func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32
+
+func X__dns_parse(tls *TLS, r uintptr, rlen int32, __ccgo_fp_callback uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v r=%v rlen=%v callback=%v ctx=%v, (%v:)", tls, r, rlen, callback, ctx, origin(2))
+ trc("tls=%v r=%v rlen=%v __ccgo_fp_callback=%v ctx=%v, (%v:)", tls, r, rlen, __ccgo_fp_callback, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var ancount, len1, qdcount, v1, v2 int32
@@ -119093,7 +119064,7 @@ func X__dns_parse(tls *TLS, r uintptr, rlen int32, callback uintptr, ctx uintptr
if int64(len1+int32(10)) > int64(r+uintptr(rlen))-int64(p) {
return -int32(1)
}
- if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
+ if (*(*func(*TLS, uintptr, int32, uintptr, int32, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_callback})))(tls, ctx, Int32FromUint8(*(*uint8)(unsafe.Pointer(p + 1))), p+uintptr(10), len1, r, rlen) < 0 {
return -int32(1)
}
p += uintptr(int32(10) + len1)
@@ -121311,18 +121282,6 @@ func Xherror(tls *TLS, msg uintptr) {
Xfprintf(tls, uintptr(unsafe.Pointer(&X__stderr_FILE)), __ccgo_ts+1102, VaList(bp+8, v1, v2, Xhstrerror(tls, *(*int32)(unsafe.Pointer(X__h_errno_location(tls))))))
}
-type Tcpu_set_t1 = struct {
- F__bits [16]uint64
-}
-
-type Tucontext_t5 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t1
- Fuc_sigmask Tsigset_t
-}
-
var _msgs1 = [84]uint8{'H', 'o', 's', 't', ' ', 'n', 'o', 't', ' ', 'f', 'o', 'u', 'n', 'd', 0, 'T', 'r', 'y', ' ', 'a', 'g', 'a', 'i', 'n', 0, 'N', 'o', 'n', '-', 'r', 'e', 'c', 'o', 'v', 'e', 'r', 'a', 'b', 'l', 'e', ' ', 'e', 'r', 'r', 'o', 'r', 0, 'A', 'd', 'd', 'r', 'e', 's', 's', ' ', 'n', 'o', 't', ' ', 'a', 'v', 'a', 'i', 'l', 'a', 'b', 'l', 'e', 0, 0, 'U', 'n', 'k', 'n', 'o', 'w', 'n', ' ', 'e', 'r', 'r', 'o', 'r'}
func Xhstrerror(tls *TLS, ecode int32) (r uintptr) {
@@ -123134,7 +123093,7 @@ func X__lookup_serv(tls *TLS, buf uintptr, name uintptr, proto int32, socktype i
return v22
}
-func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, cb uintptr, ctx uintptr) (r1 int32) {
+func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
bp := tls.Alloc(8192)
defer tls.Free(8192)
var h uintptr
@@ -123224,7 +123183,7 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
if Int32FromUint16((*Tnlmsghdr)(unsafe.Pointer(h)).Fnlmsg_type) == int32(NLMSG_ERROR) {
return -int32(1)
}
- ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cb})))(tls, ctx, h)
+ ret = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cb})))(tls, ctx, h)
if ret != 0 {
return ret
}
@@ -123237,9 +123196,11 @@ func ___netlink_enumerate(tls *TLS, fd int32, seq uint32, type1 int32, af int32,
return r1
}
-func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr, ctx uintptr) (r1 int32) {
+type t__ccgo_fp__X__rtnetlink_enumerate_2 = func(*TLS, uintptr, uintptr) int32
+
+func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, __ccgo_fp_cb uintptr, ctx uintptr) (r1 int32) {
if __ccgo_strace {
- trc("tls=%v link_af=%v addr_af=%v cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, cb, ctx, origin(2))
+ trc("tls=%v link_af=%v addr_af=%v __ccgo_fp_cb=%v ctx=%v, (%v:)", tls, link_af, addr_af, __ccgo_fp_cb, ctx, origin(2))
defer func() { trc("-> %v", r1) }()
}
var fd, r int32
@@ -123248,9 +123209,9 @@ func X__rtnetlink_enumerate(tls *TLS, link_af int32, addr_af int32, cb uintptr,
if fd < 0 {
return -int32(1)
}
- r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(1), int32(RTM_GETLINK), link_af, __ccgo_fp_cb, ctx)
if !(r != 0) {
- r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, cb, ctx)
+ r = ___netlink_enumerate(tls, fd, uint32(2), int32(RTM_GETADDR), addr_af, __ccgo_fp_cb, ctx)
}
X__syscall1(tls, int64(SYS_close), int64(fd))
return r
@@ -128631,7 +128592,7 @@ func _append(tls *TLS, tail uintptr, name uintptr, len1 Tsize_t, mark int32) (r
return 0
}
-func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, errfunc uintptr, tail uintptr) (r1 int32) {
+func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, tail uintptr) (r1 int32) {
bp := tls.Alloc(144)
defer tls.Free(144)
var de, dir, p, p2, v11, v2, v7, v8 uintptr
@@ -128740,7 +128701,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
}
if !(type1 != 0) && Xlstat(tls, buf, bp) != 0 {
- if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if *(*int32)(unsafe.Pointer(X__errno_location(tls))) != int32(ENOENT) && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
return 0
@@ -128777,7 +128738,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
}
dir = Xopendir(tls, v7)
if !(dir != 0) {
- if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
+ if (*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0 {
return int32(GLOB_ABORTED)
}
return 0
@@ -128829,7 +128790,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
} else {
v11 = __ccgo_ts
}
- r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, errfunc, tail)
+ r = _do_glob(tls, buf, pos+l, Int32FromUint8((*Tdirent)(unsafe.Pointer(de)).Fd_type), v11, flags, __ccgo_fp_errfunc, tail)
if r != 0 {
Xclosedir(tls, dir)
return r
@@ -128840,7 +128801,7 @@ func _do_glob(tls *TLS, buf uintptr, pos Tsize_t, type1 int32, pat uintptr, flag
*(*uint8)(unsafe.Pointer(p2)) = saved_sep
}
Xclosedir(tls, dir)
- if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
+ if readerr != 0 && ((*(*func(*TLS, uintptr, int32) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_errfunc})))(tls, buf, *(*int32)(unsafe.Pointer(X__errno_location(tls)))) != 0 || flags&int32(GLOB_ERR) != 0) {
return int32(GLOB_ABORTED)
}
*(*int32)(unsafe.Pointer(X__errno_location(tls))) = old_errno
@@ -128952,9 +128913,11 @@ func _expand_tilde(tls *TLS, pat uintptr, buf uintptr, pos uintptr) (r int32) {
return 0
}
-func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r int32) {
+type t__ccgo_fp__Xglob_2 = func(*TLS, uintptr, int32) int32
+
+func Xglob(tls *TLS, pat uintptr, flags int32, __ccgo_fp_errfunc uintptr, g uintptr) (r int32) {
if __ccgo_strace {
- trc("tls=%v pat=%v flags=%v errfunc=%v g=%v, (%v:)", tls, pat, flags, errfunc, g, origin(2))
+ trc("tls=%v pat=%v flags=%v __ccgo_fp_errfunc=%v g=%v, (%v:)", tls, pat, flags, __ccgo_fp_errfunc, g, origin(2))
defer func() { trc("-> %v", r) }()
}
bp := tls.Alloc(4128)
@@ -128980,8 +128943,8 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
}
offs = v1
error1 = 0
- if !(errfunc != 0) {
- errfunc = __ccgo_fp(_ignore_err)
+ if !(__ccgo_fp_errfunc != 0) {
+ __ccgo_fp_errfunc = __ccgo_fp(_ignore_err)
}
if !(flags&Int32FromInt32(GLOB_APPEND) != 0) {
(*Tglob_t)(unsafe.Pointer(g)).Fgl_offs = offs
@@ -129000,7 +128963,7 @@ func Xglob(tls *TLS, pat uintptr, flags int32, errfunc uintptr, g uintptr) (r in
error1 = _expand_tilde(tls, bp+4120, bp+16, bp+4112)
}
if !(error1 != 0) {
- error1 = _do_glob(tls, bp+16, *(*Tsize_t)(unsafe.Pointer(bp + 4112)), 0, *(*uintptr)(unsafe.Pointer(bp + 4120)), flags, errfunc, bp+8)
+ error1 = _do_glob(tls, bp+16, *(*Tsize_t)(unsafe.Pointer(bp + 4112)), 0, *(*uintptr)(unsafe.Pointer(bp + 4120)), flags, __ccgo_fp_errfunc, bp+8)
}
Xfree(tls, p)
}
@@ -134224,9 +134187,11 @@ func Xremque(tls *TLS, element uintptr) {
}
}
-func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -134242,7 +134207,7 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -134254,9 +134219,11 @@ func Xlsearch(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t,
return Xmemcpy(tls, p+uintptr(n)*uintptr(v1), key, width)
}
-func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, compar uintptr) (r uintptr) {
+type t__ccgo_fp__Xlfind_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, __ccgo_fp_compar uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nelp=%v width=%v compar=%v, (%v:)", tls, key, base, nelp, width, compar, origin(2))
+ trc("tls=%v key=%v base=%v nelp=%v width=%v __ccgo_fp_compar=%v, (%v:)", tls, key, base, nelp, width, __ccgo_fp_compar, origin(2))
defer func() { trc("-> %v", r) }()
}
var i, n Tsize_t
@@ -134272,7 +134239,7 @@ func Xlfind(tls *TLS, key uintptr, base uintptr, nelp uintptr, width Tsize_t, co
if !(i < n) {
break
}
- if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
+ if (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_compar})))(tls, key, p+uintptr(i)*uintptr(v1)) == 0 {
return p + uintptr(i)*uintptr(v1)
}
goto _2
@@ -134291,9 +134258,11 @@ type Tnode1 = struct {
Fh int32
}
-func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtdelete_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtdelete(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var a [97]uintptr
@@ -134317,7 +134286,7 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
return uintptr(0)
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -134362,9 +134331,11 @@ func Xtdelete(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
return parent
}
-func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
+type t__ccgo_fp__Xtdestroy_1 = func(*TLS, uintptr)
+
+func Xtdestroy(tls *TLS, root uintptr, __ccgo_fp_freekey uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v freekey=%v, (%v:)", tls, root, freekey, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_freekey=%v, (%v:)", tls, root, __ccgo_fp_freekey, origin(2))
}
var r uintptr
_ = r
@@ -134372,17 +134343,19 @@ func Xtdestroy(tls *TLS, root uintptr, freekey uintptr) {
if r == uintptr(0) {
return
}
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8)), freekey)
- Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), freekey)
- if freekey != 0 {
- (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8)), __ccgo_fp_freekey)
+ Xtdestroy(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), __ccgo_fp_freekey)
+ if __ccgo_fp_freekey != 0 {
+ (*(*func(*TLS, uintptr))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_freekey})))(tls, (*Tnode1)(unsafe.Pointer(r)).Fkey)
}
Xfree(tls, r)
}
-func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xtfind_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtfind(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var c int32
@@ -134396,7 +134369,7 @@ func Xtfind(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
break
}
@@ -134484,9 +134457,11 @@ func X__tsearch_balance(tls *TLS, p uintptr) (r int32) {
return _rot(tls, p, n, BoolInt32(h0 < h1))
}
-func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
+type t__ccgo_fp__Xtsearch_2 = func(*TLS, uintptr, uintptr) int32
+
+func Xtsearch(tls *TLS, key uintptr, rootp uintptr, __ccgo_fp_cmp uintptr) (r1 uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v rootp=%v cmp=%v, (%v:)", tls, key, rootp, cmp, origin(2))
+ trc("tls=%v key=%v rootp=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, rootp, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r1) }()
}
var a [96]uintptr
@@ -134506,7 +134481,7 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
if !(n != 0) {
break
}
- c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
+ c = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, (*Tnode1)(unsafe.Pointer(n)).Fkey)
if !(c != 0) {
return n
}
@@ -134542,26 +134517,28 @@ func Xtsearch(tls *TLS, key uintptr, rootp uintptr, cmp uintptr) (r1 uintptr) {
return r
}
-func _walk(tls *TLS, r uintptr, action uintptr, d int32) {
+func _walk(tls *TLS, r uintptr, __ccgo_fp_action uintptr, d int32) {
if !(r != 0) {
return
}
if (*Tnode1)(unsafe.Pointer(r)).Fh == int32(1) {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_leaf), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_leaf), d)
} else {
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_preorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_postorder), d)
- _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), action, d+int32(1))
- (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{action})))(tls, r, int32(_endorder), d)
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_preorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_postorder), d)
+ _walk(tls, *(*uintptr)(unsafe.Pointer(r + 8 + 1*8)), __ccgo_fp_action, d+int32(1))
+ (*(*func(*TLS, uintptr, TVISIT, int32))(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_action})))(tls, r, int32(_endorder), d)
}
}
-func Xtwalk(tls *TLS, root uintptr, action uintptr) {
+type t__ccgo_fp__Xtwalk_1 = func(*TLS, uintptr, int32, int32)
+
+func Xtwalk(tls *TLS, root uintptr, __ccgo_fp_action uintptr) {
if __ccgo_strace {
- trc("tls=%v root=%v action=%v, (%v:)", tls, root, action, origin(2))
+ trc("tls=%v root=%v __ccgo_fp_action=%v, (%v:)", tls, root, __ccgo_fp_action, origin(2))
}
- _walk(tls, root, action, 0)
+ _walk(tls, root, __ccgo_fp_action, 0)
}
func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
@@ -134572,8 +134549,6 @@ func Xpoll(tls *TLS, fds uintptr, n Tnfds_t, timeout int32) (r int32) {
return int32(X__syscall_ret(tls, Uint64FromInt64(___syscall_cp(tls, int64(SYS_poll), int64(fds), Int64FromUint64(n), int64(timeout), 0, 0, 0))))
}
-type t__ucontext3 = Tucontext_t5
-
func Xppoll(tls *TLS, fds uintptr, n Tnfds_t, to uintptr, mask uintptr) (r int32) {
if __ccgo_strace {
trc("tls=%v fds=%v n=%v to=%v mask=%v, (%v:)", tls, fds, n, to, mask, origin(2))
@@ -134609,8 +134584,6 @@ func Xppoll(tls *TLS, fds uintptr, n Tnfds_t, to uintptr, mask uintptr) (r int32
return int32(X__syscall_ret(tls, Uint64FromInt64(___syscall_cp(tls, int64(SYS_ppoll), int64(fds), Int64FromUint64(n), int64(v3), int64(mask), int64(Int32FromInt32(_NSIG)/Int32FromInt32(8)), 0))))
}
-type t__ucontext4 = Tucontext_t4
-
func Xpselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, ts uintptr, mask uintptr) (r int32) {
if __ccgo_strace {
trc("tls=%v n=%v rfds=%v wfds=%v efds=%v ts=%v mask=%v, (%v:)", tls, n, rfds, wfds, efds, ts, mask, origin(2))
@@ -134651,14 +134624,6 @@ func Xpselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, ts ui
return int32(X__syscall_ret(tls, Uint64FromInt64(___syscall_cp(tls, int64(SYS_pselect6), int64(n), int64(rfds), int64(wfds), int64(efds), int64(v3), int64(bp+16)))))
}
-type Tucontext_t6 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t
- Fuc_sigmask Tsigset_t
-}
-
func Xselect(tls *TLS, n int32, rfds uintptr, wfds uintptr, efds uintptr, tv uintptr) (r int32) {
if __ccgo_strace {
trc("tls=%v n=%v rfds=%v wfds=%v efds=%v tv=%v, (%v:)", tls, n, rfds, wfds, efds, tv, origin(2))
@@ -135148,14 +135113,6 @@ func Xsigaltstack(tls *TLS, ss uintptr, old uintptr) (r int32) {
const SST_SIZE = 8
-type Tucontext_t7 = struct {
- Fuc_flags uint64
- Fuc_link uintptr
- Fuc_stack Tstack_t
- Fuc_mcontext Tmcontext_t1
- Fuc_sigmask Tsigset_t
-}
-
func Xsigandset(tls *TLS, dest uintptr, left uintptr, right uintptr) (r1 int32) {
if __ccgo_strace {
trc("tls=%v dest=%v left=%v right=%v, (%v:)", tls, dest, left, right, origin(2))
@@ -144285,9 +144242,11 @@ func Xatoll(tls *TLS, s uintptr) (r int64) {
return v5
}
-func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, cmp uintptr) (r uintptr) {
+type t__ccgo_fp__Xbsearch_4 = func(*TLS, uintptr, uintptr) int32
+
+func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp uintptr) (r uintptr) {
if __ccgo_strace {
- trc("tls=%v key=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, key, base, nel, width, cmp, origin(2))
+ trc("tls=%v key=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, key, base, nel, width, __ccgo_fp_cmp, origin(2))
defer func() { trc("-> %v", r) }()
}
var sign int32
@@ -144295,7 +144254,7 @@ func Xbsearch(tls *TLS, key uintptr, base uintptr, nel Tsize_t, width Tsize_t, c
_, _ = sign, try
for nel > uint64(0) {
try = base + uintptr(width*(nel/uint64(2)))
- sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, key, try)
+ sign = (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, key, try)
if sign < 0 {
nel /= uint64(2)
} else {
@@ -144565,7 +144524,7 @@ func _shr(tls *TLS, p uintptr, n int32) {
*(*Tsize_t)(unsafe.Pointer(p + 1*8)) >>= Uint64FromInt32(n)
}
-func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
+func _sift(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pshift int32, lp uintptr) {
bp := tls.Alloc(912)
defer tls.Free(912)
var i, v1, v2 int32
@@ -144577,10 +144536,10 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
for pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], lf, arg) >= 0 && (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, (*(*[113]uintptr)(unsafe.Pointer(bp)))[0], rt, arg) >= 0 {
break
}
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, rt, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, rt, arg) >= 0 {
v1 = i
i++
(*(*[113]uintptr)(unsafe.Pointer(bp)))[v1] = lf
@@ -144597,7 +144556,7 @@ func _sift(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pshi
_cycle(tls, width, bp, i)
}
-func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
+func _trinkle(tls *TLS, head uintptr, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr, pp uintptr, pshift int32, trusty int32, lp uintptr) {
bp := tls.Alloc(928)
defer tls.Free(928)
var i, trail, v1 int32
@@ -144611,13 +144570,13 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
(*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0] = head
for (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[0] != uint64(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp)))[int32(1)] != uint64(0) {
stepson = head - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift)*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, stepson, (*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0], arg) <= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, stepson, (*(*[113]uintptr)(unsafe.Pointer(bp + 16)))[0], arg) <= 0 {
break
}
if !(trusty != 0) && pshift > int32(1) {
rt = head - uintptr(width)
lf = head - uintptr(width) - uintptr(*(*Tsize_t)(unsafe.Pointer(lp + uintptr(pshift-int32(2))*8)))
- if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, lf, stepson, arg) >= 0 {
+ if (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, rt, stepson, arg) >= 0 || (*(*func(*TLS, uintptr, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{__ccgo_fp_cmp})))(tls, lf, stepson, arg) >= 0 {
break
}
}
@@ -144632,13 +144591,15 @@ func _trinkle(tls *TLS, head uintptr, width Tsize_t, cmp Tcmpfun, arg uintptr, p
}
if !(trusty != 0) {
_cycle(tls, width, bp+16, i)
- _sift(tls, head, width, cmp, arg, pshift, lp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, lp)
}
}
-func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__X__qsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
bp := tls.Alloc(784)
defer tls.Free(784)
@@ -144676,14 +144637,14 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
}
for head < high {
if (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[0]&uint64(3) == uint64(3) {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
_shr(tls, bp+768, int32(2))
pshift += int32(2)
} else {
if (*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift-int32(1)] >= Uint64FromInt64(int64(high)-int64(head)) {
- _trinkle(tls, head, width, cmp, arg, bp+768, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+768, pshift, 0, bp)
} else {
- _sift(tls, head, width, cmp, arg, pshift, bp)
+ _sift(tls, head, width, __ccgo_fp_cmp, arg, pshift, bp)
}
if pshift == int32(1) {
_shl(tls, bp+768, int32(1))
@@ -144696,7 +144657,7 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
*(*Tsize_t)(unsafe.Pointer(bp + 768)) |= uint64(1)
head += uintptr(width)
}
- _trinkle(tls, head, width, cmp, arg, bp+768, pshift, 0, bp)
+ _trinkle(tls, head, width, __ccgo_fp_cmp, arg, bp+768, pshift, 0, bp)
for pshift != int32(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[0] != uint64(1) || (*(*[2]Tsize_t)(unsafe.Pointer(bp + 768)))[int32(1)] != uint64(0) {
if pshift <= int32(1) {
trail = _pntz(tls, bp+768)
@@ -144707,31 +144668,35 @@ func X__qsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun,
pshift -= int32(2)
*(*Tsize_t)(unsafe.Pointer(bp + 768)) ^= uint64(7)
_shr(tls, bp+768, int32(1))
- _trinkle(tls, head-uintptr((*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, cmp, arg, bp+768, pshift+int32(1), int32(1), bp)
+ _trinkle(tls, head-uintptr((*(*[96]Tsize_t)(unsafe.Pointer(bp)))[pshift])-uintptr(width), width, __ccgo_fp_cmp, arg, bp+768, pshift+int32(1), int32(1), bp)
_shl(tls, bp+768, int32(1))
*(*Tsize_t)(unsafe.Pointer(bp + 768)) |= uint64(1)
- _trinkle(tls, head-uintptr(width), width, cmp, arg, bp+768, pshift, int32(1), bp)
+ _trinkle(tls, head-uintptr(width), width, __ccgo_fp_cmp, arg, bp+768, pshift, int32(1), bp)
}
head -= uintptr(width)
}
}
-func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun, arg uintptr) {
+type t__ccgo_fp__Xqsort_r_3 = func(*TLS, uintptr, uintptr, uintptr) int32
+
+func Xqsort_r(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun, arg uintptr) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v arg=%v, (%v:)", tls, base, nel, width, cmp, arg, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v arg=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, arg, origin(2))
}
- X__qsort_r(tls, base, nel, width, cmp, arg)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp_cmp, arg)
}
func _wrapper_cmp(tls *TLS, v1 uintptr, v2 uintptr, cmp uintptr) (r int32) {
return (*(*func(*TLS, uintptr, uintptr) int32)(unsafe.Pointer(&struct{ uintptr }{cmp})))(tls, v1, v2)
}
-func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, cmp Tcmpfun) {
+type t__ccgo_fp__Xqsort_3 = func(*TLS, uintptr, uintptr) int32
+
+func Xqsort(tls *TLS, base uintptr, nel Tsize_t, width Tsize_t, __ccgo_fp_cmp Tcmpfun) {
if __ccgo_strace {
- trc("tls=%v base=%v nel=%v width=%v cmp=%v, (%v:)", tls, base, nel, width, cmp, origin(2))
+ trc("tls=%v base=%v nel=%v width=%v __ccgo_fp_cmp=%v, (%v:)", tls, base, nel, width, __ccgo_fp_cmp, origin(2))
}
- X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), cmp)
+ X__qsort_r(tls, base, nel, width, __ccgo_fp(_wrapper_cmp), __ccgo_fp_cmp)
}
func _strtox(tls *TLS, s uintptr, p uintptr, prec int32) (r float64) {
diff --git a/vendor/modernc.org/libc/libc.go b/vendor/modernc.org/libc/libc.go
index dab50ebf3..6dfcd57a0 100644
--- a/vendor/modernc.org/libc/libc.go
+++ b/vendor/modernc.org/libc/libc.go
@@ -4,9 +4,9 @@
//go:build !linux || mips64le
-// go.generate echo package libc > ccgo.go
-//
-//go:generate go fmt -l -s -w ./...
+///go.generate echo package libc > ccgo.go
+///go:generate go fmt -l -s -w ./...
+
package libc // import "modernc.org/libc"
//TODO use O_RDONLY etc. from fcntl header
diff --git a/vendor/modernc.org/libc/libc_all.go b/vendor/modernc.org/libc/libc_all.go
index 001687e3c..0e20d95e2 100644
--- a/vendor/modernc.org/libc/libc_all.go
+++ b/vendor/modernc.org/libc/libc_all.go
@@ -6,6 +6,7 @@
package libc // import "modernc.org/libc"
import (
+ "math"
"sync/atomic"
"unsafe"
@@ -51,3 +52,28 @@ func GoString(s uintptr) string {
func GoBytes(s uintptr, len int) []byte {
return unsafe.Slice((*byte)(unsafe.Pointer(s)), len)
}
+
+func X__isfinitef(tls *TLS, f float32) int32 {
+ d := float64(f)
+ if !math.IsInf(d, 0) && !math.IsNaN(d) {
+ return 1
+ }
+
+ return 0
+}
+
+func X__isfinite(tls *TLS, d float64) int32 {
+ if !math.IsInf(d, 0) && !math.IsNaN(d) {
+ return 1
+ }
+
+ return 0
+}
+
+func X__isfinitel(tls *TLS, d float64) int32 {
+ if !math.IsInf(d, 0) && !math.IsNaN(d) {
+ return 1
+ }
+
+ return 0
+}
diff --git a/vendor/modernc.org/libc/libc_freebsd.go b/vendor/modernc.org/libc/libc_freebsd.go
index d177340ce..259325ff6 100644
--- a/vendor/modernc.org/libc/libc_freebsd.go
+++ b/vendor/modernc.org/libc/libc_freebsd.go
@@ -2250,14 +2250,6 @@ __3:
return Xtolower(tls, int32(*(*uint8)(unsafe.Pointer(l)))) - Xtolower(tls, int32(*(*uint8)(unsafe.Pointer(r))))
}
-func X__isfinite(tls *TLS, d float64) int32 {
- if !math.IsInf(d, 0) && !math.IsNaN(d) {
- return 1
- }
-
- return 0
-}
-
func X__signbit(tls *TLS, x float64) (r int32) {
return int32(math.Float64bits(x) >> 63)
}
diff --git a/vendor/modernc.org/libc/tls_linux_amd64.go b/vendor/modernc.org/libc/tls_linux_amd64.go
index d001bada0..cba2c4437 100644
--- a/vendor/modernc.org/libc/tls_linux_amd64.go
+++ b/vendor/modernc.org/libc/tls_linux_amd64.go
@@ -10,6 +10,12 @@ func TLSAlloc(p0 *TLS, p1 int) uintptr
//go:noescape
func TLSFree(p0 *TLS, p1 int)
+//go:noescape
+func TLSAllocaEntry(p0 *TLS)
+
+//go:noescape
+func TLSAllocaExit(p0 *TLS)
+
func tlsAlloc(tls *TLS, n int) uintptr {
return tls.Alloc(n)
}
@@ -17,3 +23,11 @@ func tlsAlloc(tls *TLS, n int) uintptr {
func tlsFree(tls *TLS, n int) {
tls.Free(n)
}
+
+func tlsAllocaEntry(tls *TLS) {
+ tls.AllocaEntry()
+}
+
+func tlsAllocaExit(tls *TLS) {
+ tls.AllocaExit()
+}
diff --git a/vendor/modernc.org/libc/tls_linux_amd64.s b/vendor/modernc.org/libc/tls_linux_amd64.s
index 9d55f5f04..d72678788 100644
--- a/vendor/modernc.org/libc/tls_linux_amd64.s
+++ b/vendor/modernc.org/libc/tls_linux_amd64.s
@@ -22,3 +22,19 @@ TEXT ·TLSFree(SB),$16-16
MOVQ AX, 8(SP)
CALL ·tlsFree(SB)
RET
+
+TEXT ·TLSAllocaEntry(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ p0+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·tlsAllocaEntry(SB)
+ RET
+
+TEXT ·TLSAllocaExit(SB),$8-8
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ p0+0(FP), AX
+ MOVQ AX, 0(SP)
+ CALL ·tlsAllocaExit(SB)
+ RET