summaryrefslogtreecommitdiff
path: root/vendor/github.com/cilium/ebpf
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/cilium/ebpf')
-rw-r--r--vendor/github.com/cilium/ebpf/.gitignore1
-rw-r--r--vendor/github.com/cilium/ebpf/.golangci.yaml28
-rw-r--r--vendor/github.com/cilium/ebpf/ARCHITECTURE.md10
-rw-r--r--vendor/github.com/cilium/ebpf/CONTRIBUTING.md25
-rw-r--r--vendor/github.com/cilium/ebpf/MAINTAINERS.md8
-rw-r--r--vendor/github.com/cilium/ebpf/Makefile93
-rw-r--r--vendor/github.com/cilium/ebpf/README.md71
-rw-r--r--vendor/github.com/cilium/ebpf/asm/func.go101
-rw-r--r--vendor/github.com/cilium/ebpf/asm/func_string.go98
-rw-r--r--vendor/github.com/cilium/ebpf/asm/instruction.go581
-rw-r--r--vendor/github.com/cilium/ebpf/asm/jump.go74
-rw-r--r--vendor/github.com/cilium/ebpf/asm/load_store.go2
-rw-r--r--vendor/github.com/cilium/ebpf/asm/metadata.go80
-rw-r--r--vendor/github.com/cilium/ebpf/asm/opcode.go120
-rw-r--r--vendor/github.com/cilium/ebpf/asm/opcode_string.go18
-rw-r--r--vendor/github.com/cilium/ebpf/asm/register.go1
-rw-r--r--vendor/github.com/cilium/ebpf/attachtype_string.go65
-rw-r--r--vendor/github.com/cilium/ebpf/btf/btf.go897
-rw-r--r--vendor/github.com/cilium/ebpf/btf/btf_types.go (renamed from vendor/github.com/cilium/ebpf/internal/btf/btf_types.go)120
-rw-r--r--vendor/github.com/cilium/ebpf/btf/btf_types_string.go44
-rw-r--r--vendor/github.com/cilium/ebpf/btf/core.go972
-rw-r--r--vendor/github.com/cilium/ebpf/btf/doc.go (renamed from vendor/github.com/cilium/ebpf/internal/btf/doc.go)3
-rw-r--r--vendor/github.com/cilium/ebpf/btf/ext_info.go721
-rw-r--r--vendor/github.com/cilium/ebpf/btf/format.go319
-rw-r--r--vendor/github.com/cilium/ebpf/btf/handle.go121
-rw-r--r--vendor/github.com/cilium/ebpf/btf/strings.go128
-rw-r--r--vendor/github.com/cilium/ebpf/btf/types.go1212
-rw-r--r--vendor/github.com/cilium/ebpf/collection.go699
-rw-r--r--vendor/github.com/cilium/ebpf/doc.go9
-rw-r--r--vendor/github.com/cilium/ebpf/elf_reader.go829
-rw-r--r--vendor/github.com/cilium/ebpf/elf_reader_fuzz.go21
-rw-r--r--vendor/github.com/cilium/ebpf/info.go158
-rw-r--r--vendor/github.com/cilium/ebpf/internal/align.go6
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/btf.go791
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/core.go388
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/ext_info.go281
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/fuzz.go49
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/strings.go60
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/types.go871
-rw-r--r--vendor/github.com/cilium/ebpf/internal/cpu.go4
-rw-r--r--vendor/github.com/cilium/ebpf/internal/elf.go50
-rw-r--r--vendor/github.com/cilium/ebpf/internal/endian.go24
-rw-r--r--vendor/github.com/cilium/ebpf/internal/endian_be.go13
-rw-r--r--vendor/github.com/cilium/ebpf/internal/endian_le.go13
-rw-r--r--vendor/github.com/cilium/ebpf/internal/errors.go205
-rw-r--r--vendor/github.com/cilium/ebpf/internal/fd.go69
-rw-r--r--vendor/github.com/cilium/ebpf/internal/feature.go48
-rw-r--r--vendor/github.com/cilium/ebpf/internal/io.go48
-rw-r--r--vendor/github.com/cilium/ebpf/internal/output.go84
-rw-r--r--vendor/github.com/cilium/ebpf/internal/pinning.go77
-rw-r--r--vendor/github.com/cilium/ebpf/internal/ptr_64.go14
-rw-r--r--vendor/github.com/cilium/ebpf/internal/sys/doc.go6
-rw-r--r--vendor/github.com/cilium/ebpf/internal/sys/fd.go96
-rw-r--r--vendor/github.com/cilium/ebpf/internal/sys/ptr.go (renamed from vendor/github.com/cilium/ebpf/internal/ptr.go)24
-rw-r--r--vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go (renamed from vendor/github.com/cilium/ebpf/internal/ptr_32_be.go)3
-rw-r--r--vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go (renamed from vendor/github.com/cilium/ebpf/internal/ptr_32_le.go)3
-rw-r--r--vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go14
-rw-r--r--vendor/github.com/cilium/ebpf/internal/sys/syscall.go126
-rw-r--r--vendor/github.com/cilium/ebpf/internal/sys/types.go1052
-rw-r--r--vendor/github.com/cilium/ebpf/internal/syscall.go179
-rw-r--r--vendor/github.com/cilium/ebpf/internal/syscall_string.go56
-rw-r--r--vendor/github.com/cilium/ebpf/internal/unix/types_linux.go72
-rw-r--r--vendor/github.com/cilium/ebpf/internal/unix/types_other.go64
-rw-r--r--vendor/github.com/cilium/ebpf/internal/vdso.go150
-rw-r--r--vendor/github.com/cilium/ebpf/internal/version.go122
-rw-r--r--vendor/github.com/cilium/ebpf/link/cgroup.go24
-rw-r--r--vendor/github.com/cilium/ebpf/link/iter.go86
-rw-r--r--vendor/github.com/cilium/ebpf/link/kprobe.go568
-rw-r--r--vendor/github.com/cilium/ebpf/link/link.go251
-rw-r--r--vendor/github.com/cilium/ebpf/link/netns.go28
-rw-r--r--vendor/github.com/cilium/ebpf/link/perf_event.go394
-rw-r--r--vendor/github.com/cilium/ebpf/link/platform.go25
-rw-r--r--vendor/github.com/cilium/ebpf/link/program.go14
-rw-r--r--vendor/github.com/cilium/ebpf/link/raw_tracepoint.go62
-rw-r--r--vendor/github.com/cilium/ebpf/link/socket_filter.go40
-rw-r--r--vendor/github.com/cilium/ebpf/link/syscalls.go108
-rw-r--r--vendor/github.com/cilium/ebpf/link/tracepoint.go77
-rw-r--r--vendor/github.com/cilium/ebpf/link/tracing.go141
-rw-r--r--vendor/github.com/cilium/ebpf/link/uprobe.go373
-rw-r--r--vendor/github.com/cilium/ebpf/link/xdp.go54
-rw-r--r--vendor/github.com/cilium/ebpf/linker.go271
-rw-r--r--vendor/github.com/cilium/ebpf/map.go708
-rw-r--r--vendor/github.com/cilium/ebpf/marshalers.go83
-rw-r--r--vendor/github.com/cilium/ebpf/pinning.go42
-rw-r--r--vendor/github.com/cilium/ebpf/prog.go675
-rw-r--r--vendor/github.com/cilium/ebpf/run-tests.sh156
-rw-r--r--vendor/github.com/cilium/ebpf/syscalls.go491
-rw-r--r--vendor/github.com/cilium/ebpf/types.go75
-rw-r--r--vendor/github.com/cilium/ebpf/types_string.go72
89 files changed, 12367 insertions, 5142 deletions
diff --git a/vendor/github.com/cilium/ebpf/.gitignore b/vendor/github.com/cilium/ebpf/.gitignore
index 38b15653c..b46162b8e 100644
--- a/vendor/github.com/cilium/ebpf/.gitignore
+++ b/vendor/github.com/cilium/ebpf/.gitignore
@@ -5,6 +5,7 @@
*.so
*.dylib
*.o
+!*_bpf*.o
# Test binary, build with `go test -c`
*.test
diff --git a/vendor/github.com/cilium/ebpf/.golangci.yaml b/vendor/github.com/cilium/ebpf/.golangci.yaml
new file mode 100644
index 000000000..dc62dd6d0
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/.golangci.yaml
@@ -0,0 +1,28 @@
+---
+issues:
+ exclude-rules:
+ # syscall param structs will have unused fields in Go code.
+ - path: syscall.*.go
+ linters:
+ - structcheck
+
+linters:
+ disable-all: true
+ enable:
+ - deadcode
+ - errcheck
+ - goimports
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - staticcheck
+ - structcheck
+ - typecheck
+ - unused
+ - varcheck
+
+ # Could be enabled later:
+ # - gocyclo
+ # - maligned
+ # - gosec
diff --git a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
index aee9c0a0d..8cd7e2486 100644
--- a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
+++ b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
@@ -57,7 +57,7 @@ Objects
loading a spec will fail because the kernel is too old, or a feature is not
enabled. There are multiple ways the library deals with that:
-* Fallback: older kernels don't allowing naming programs and maps. The library
+* Fallback: older kernels don't allow naming programs and maps. The library
automatically detects support for names, and omits them during load if
necessary. This works since name is primarily a debug aid.
@@ -68,7 +68,7 @@ enabled. There are multiple ways the library deals with that:
Once program and map objects are loaded they expose the kernel's low-level API,
e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer
wrappers on top of the low-level API, like `MapIterator`. The low-level API is
-useful as an out when our higher-level API doesn't support a particular use case.
+useful when our higher-level API doesn't support a particular use case.
Links
---
@@ -78,3 +78,9 @@ tend to use bpf_link to do so. Older hooks unfortunately use a combination of
syscalls, netlink messages, etc. Adding support for a new link type should not
pull in large dependencies like netlink, so XDP programs or tracepoints are
out of scope.
+
+Each bpf_link_type has one corresponding Go type, e.g. `link.tracing` corresponds
+to BPF_LINK_TRACING. In general, these types should be unexported as long as they
+don't export methods outside of the Link interface. Each Go type may have multiple
+exported constructors. For example `AttachTracing` and `AttachLSM` create a
+tracing link, but are distinct functions since they may require different arguments.
diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
index 97c794f3a..0d29eae81 100644
--- a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
+++ b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
@@ -6,8 +6,8 @@ are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get
a better understanding for the high-level goals.
New features must be accompanied by tests. Before starting work on any large
-feature, please [join](https://cilium.herokuapp.com/) the
-[#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack to
+feature, please [join](https://ebpf.io/slack) the
+[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack to
discuss the design first.
When submitting pull requests, consider writing details about what problem you
@@ -18,6 +18,23 @@ reason about the proposed changes.
## Running the tests
Many of the tests require privileges to set resource limits and load eBPF code.
-The easiest way to obtain these is to run the tests with `sudo`:
+The easiest way to obtain these is to run the tests with `sudo`.
+
+To test the current package with your local kernel you can simply run:
+```
+go test -exec sudo ./...
+```
+
+To test the current package with a different kernel version you can use the [run-tests.sh](run-tests.sh) script.
+It requires [virtme](https://github.com/amluto/virtme) and qemu to be installed.
+
+Examples:
+
+```bash
+# Run all tests on a 5.4 kernel
+./run-tests.sh 5.4
+
+# Run a subset of tests:
+./run-tests.sh 5.4 go test ./link
+```
- sudo go test ./... \ No newline at end of file
diff --git a/vendor/github.com/cilium/ebpf/MAINTAINERS.md b/vendor/github.com/cilium/ebpf/MAINTAINERS.md
new file mode 100644
index 000000000..9c18e7e76
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/MAINTAINERS.md
@@ -0,0 +1,8 @@
+# Maintainers
+
+ * [Lorenz Bauer]
+ * [Timo Beckers] (Isovalent)
+
+
+[Lorenz Bauer]: https://github.com/lmb
+[Timo Beckers]: https://github.com/ti-mo
diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile
index 5d4195833..2d5f04c37 100644
--- a/vendor/github.com/cilium/ebpf/Makefile
+++ b/vendor/github.com/cilium/ebpf/Makefile
@@ -1,67 +1,110 @@
# The development version of clang is distributed as the 'clang' binary,
# while stable/released versions have a version number attached.
# Pin the default clang to a stable version.
-CLANG ?= clang-11
-CFLAGS := -target bpf -O2 -g -Wall -Werror $(CFLAGS)
+CLANG ?= clang-14
+STRIP ?= llvm-strip-14
+OBJCOPY ?= llvm-objcopy-14
+CFLAGS := -O2 -g -Wall -Werror $(CFLAGS)
+
+CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/
# Obtain an absolute path to the directory of the Makefile.
# Assume the Makefile is in the root of the repository.
REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
UIDGID := $(shell stat -c '%u:%g' ${REPODIR})
+# Prefer podman if installed, otherwise use docker.
+# Note: Setting the var at runtime will always override.
+CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker)
+CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), --log-driver=none, --user "${UIDGID}")
+
IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE)
VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION)
+
# clang <8 doesn't tag relocs properly (STT_NOTYPE)
# clang 9 is the first version emitting BTF
TARGETS := \
testdata/loader-clang-7 \
testdata/loader-clang-9 \
- testdata/loader-clang-11 \
+ testdata/loader-$(CLANG) \
+ testdata/btf_map_init \
testdata/invalid_map \
testdata/raw_tracepoint \
testdata/invalid_map_static \
- testdata/initialized_btf_map \
+ testdata/invalid_btf_map_init \
testdata/strings \
- internal/btf/testdata/relocs
+ testdata/freplace \
+ testdata/iproute2_map_compat \
+ testdata/map_spin_lock \
+ testdata/subprog_reloc \
+ testdata/fwd_decl \
+ btf/testdata/relocs \
+ btf/testdata/relocs_read \
+ btf/testdata/relocs_read_tgt
-.PHONY: all clean docker-all docker-shell
+.PHONY: all clean container-all container-shell generate
-.DEFAULT_TARGET = docker-all
+.DEFAULT_TARGET = container-all
-# Build all ELF binaries using a Dockerized LLVM toolchain.
-docker-all:
- docker run --rm --user "${UIDGID}" \
+# Build all ELF binaries using a containerized LLVM toolchain.
+container-all:
+ ${CONTAINER_ENGINE} run --rm ${CONTAINER_RUN_ARGS} \
-v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \
+ --env CFLAGS="-fdebug-prefix-map=/ebpf=." \
+ --env HOME="/tmp" \
"${IMAGE}:${VERSION}" \
- make all
+ $(MAKE) all
-# (debug) Drop the user into a shell inside the Docker container as root.
-docker-shell:
- docker run --rm -ti \
+# (debug) Drop the user into a shell inside the container as root.
+container-shell:
+ ${CONTAINER_ENGINE} run --rm -ti \
-v "${REPODIR}":/ebpf -w /ebpf \
"${IMAGE}:${VERSION}"
clean:
-$(RM) testdata/*.elf
- -$(RM) internal/btf/testdata/*.elf
+ -$(RM) btf/testdata/*.elf
+
+format:
+ find . -type f -name "*.c" | xargs clang-format -i
+
+all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate
+ ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf
+ ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf
-all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS))
+# $BPF_CLANG is used in go:generate invocations.
+generate: export BPF_CLANG := $(CLANG)
+generate: export BPF_CFLAGS := $(CFLAGS)
+generate:
+ go generate ./cmd/bpf2go/test
+ go generate ./internal/sys
+ cd examples/ && go generate ./...
testdata/loader-%-el.elf: testdata/loader.c
- $* $(CFLAGS) -mlittle-endian -c $< -o $@
+ $* $(CFLAGS) -target bpfel -c $< -o $@
+ $(STRIP) -g $@
testdata/loader-%-eb.elf: testdata/loader.c
- $* $(CFLAGS) -mbig-endian -c $< -o $@
+ $* $(CFLAGS) -target bpfeb -c $< -o $@
+ $(STRIP) -g $@
%-el.elf: %.c
- $(CLANG) $(CFLAGS) -mlittle-endian -c $< -o $@
+ $(CLANG) $(CFLAGS) -target bpfel -c $< -o $@
+ $(STRIP) -g $@
%-eb.elf : %.c
- $(CLANG) $(CFLAGS) -mbig-endian -c $< -o $@
+ $(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@
+ $(STRIP) -g $@
-# Usage: make VMLINUX=/path/to/vmlinux vmlinux-btf
-.PHONY: vmlinux-btf
-vmlinux-btf: internal/btf/testdata/vmlinux-btf.gz
-internal/btf/testdata/vmlinux-btf.gz: $(VMLINUX)
- objcopy --dump-section .BTF=/dev/stdout "$<" /dev/null | gzip > "$@"
+.PHONY: generate-btf
+generate-btf: KERNEL_VERSION?=5.18
+generate-btf:
+ $(eval TMP := $(shell mktemp -d))
+ curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION).bz" -o "$(TMP)/bzImage"
+ ./testdata/extract-vmlinux "$(TMP)/bzImage" > "$(TMP)/vmlinux"
+ $(OBJCOPY) --dump-section .BTF=/dev/stdout "$(TMP)/vmlinux" /dev/null | gzip > "btf/testdata/vmlinux.btf.gz"
+ curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION)-selftests-bpf.tgz" -o "$(TMP)/selftests.tgz"
+ tar -xf "$(TMP)/selftests.tgz" --to-stdout tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.ko | \
+ $(OBJCOPY) --dump-section .BTF="btf/testdata/btf_testmod.btf" - /dev/null
+ $(RM) -r "$(TMP)"
diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md
index 7f504d334..3e490de71 100644
--- a/vendor/github.com/cilium/ebpf/README.md
+++ b/vendor/github.com/cilium/ebpf/README.md
@@ -2,44 +2,60 @@
[![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf)
+![HoneyGopher](.github/images/cilium-ebpf.png)
+
eBPF is a pure Go library that provides utilities for loading, compiling, and
debugging eBPF programs. It has minimal external dependencies and is intended to
be used in long running processes.
+The library is maintained by [Cloudflare](https://www.cloudflare.com) and
+[Cilium](https://www.cilium.io).
+
+See [ebpf.io](https://ebpf.io) for other projects from the eBPF ecosystem.
+
+## Getting Started
+
+A small collection of Go and eBPF programs that serve as examples for building
+your own tools can be found under [examples/](examples/).
+
+Contributions are highly encouraged, as they highlight certain use cases of
+eBPF and the library, and help shape the future of the project.
+
+## Getting Help
+
+Please
+[join](https://ebpf.io/slack) the
+[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you
+have questions regarding the library.
+
+## Packages
+
+This library includes the following packages:
+
* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic
- assembler
+ assembler, allowing you to write eBPF assembly instructions directly
+ within your Go code. (You don't need to use this if you prefer to write your eBPF program in C.)
+* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows
+ compiling and embedding eBPF programs written in C within Go code. As well as
+ compiling the C code, it auto-generates Go code for loading and manipulating
+ the eBPF program and map objects.
* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF
to various hooks
* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a
`PERF_EVENT_ARRAY`
-* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows
- embedding eBPF in Go
-
-The library is maintained by [Cloudflare](https://www.cloudflare.com) and
-[Cilium](https://www.cilium.io). Feel free to
-[join](https://cilium.herokuapp.com/) the
-[#libbpf-go](https://cilium.slack.com/messages/libbpf-go) channel on Slack.
-
-## Current status
-
-The package is production ready, but **the API is explicitly unstable right
-now**. Expect to update your code if you want to follow along.
+* [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a
+ `BPF_MAP_TYPE_RINGBUF` map
+* [features](https://pkg.go.dev/github.com/cilium/ebpf/features) implements the equivalent
+ of `bpftool feature probe` for discovering BPF-related kernel features using native Go.
+* [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift
+ the `RLIMIT_MEMLOCK` constraint on kernels before 5.11.
## Requirements
* A version of Go that is [supported by
upstream](https://golang.org/doc/devel/release.html#policy)
-* Linux 4.9, 4.19 or 5.4 (versions in-between should work, but are not tested)
-
-## Useful resources
-
-* [eBPF.io](https://ebpf.io) (recommended)
-* [Cilium eBPF documentation](https://docs.cilium.io/en/latest/bpf/#bpf-guide)
- (recommended)
-* [Linux documentation on
- BPF](https://www.kernel.org/doc/html/latest/networking/filter.html)
-* [eBPF features by Linux
- version](https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md)
+* Linux >= 4.9. CI is run against kernel.org LTS releases. 4.4 should work but is
+ not tested against.
## Regenerating Testdata
@@ -47,8 +63,15 @@ Run `make` in the root of this repository to rebuild testdata in all
subpackages. This requires Docker, as it relies on a standardized build
environment to keep the build output stable.
+It is possible to regenerate data using Podman by overriding the `CONTAINER_*`
+variables: `CONTAINER_ENGINE=podman CONTAINER_RUN_ARGS= make`.
+
The toolchain image build files are kept in [testdata/docker/](testdata/docker/).
## License
MIT
+
+### eBPF Gopher
+
+The eBPF honeygopher is based on the Go gopher designed by Renee French.
diff --git a/vendor/github.com/cilium/ebpf/asm/func.go b/vendor/github.com/cilium/ebpf/asm/func.go
index 97f794cdb..a14e9e2c3 100644
--- a/vendor/github.com/cilium/ebpf/asm/func.go
+++ b/vendor/github.com/cilium/ebpf/asm/func.go
@@ -5,9 +5,13 @@ package asm
// BuiltinFunc is a built-in eBPF function.
type BuiltinFunc int32
+func (_ BuiltinFunc) Max() BuiltinFunc {
+ return maxBuiltinFunc - 1
+}
+
// eBPF built-in functions
//
-// You can renegerate this list using the following gawk script:
+// You can regenerate this list using the following gawk script:
//
// /FN\(.+\),/ {
// match($1, /\((.+)\)/, r)
@@ -132,6 +136,101 @@ const (
FnSkStorageDelete
FnSendSignal
FnTcpGenSyncookie
+ FnSkbOutput
+ FnProbeReadUser
+ FnProbeReadKernel
+ FnProbeReadUserStr
+ FnProbeReadKernelStr
+ FnTcpSendAck
+ FnSendSignalThread
+ FnJiffies64
+ FnReadBranchRecords
+ FnGetNsCurrentPidTgid
+ FnXdpOutput
+ FnGetNetnsCookie
+ FnGetCurrentAncestorCgroupId
+ FnSkAssign
+ FnKtimeGetBootNs
+ FnSeqPrintf
+ FnSeqWrite
+ FnSkCgroupId
+ FnSkAncestorCgroupId
+ FnRingbufOutput
+ FnRingbufReserve
+ FnRingbufSubmit
+ FnRingbufDiscard
+ FnRingbufQuery
+ FnCsumLevel
+ FnSkcToTcp6Sock
+ FnSkcToTcpSock
+ FnSkcToTcpTimewaitSock
+ FnSkcToTcpRequestSock
+ FnSkcToUdp6Sock
+ FnGetTaskStack
+ FnLoadHdrOpt
+ FnStoreHdrOpt
+ FnReserveHdrOpt
+ FnInodeStorageGet
+ FnInodeStorageDelete
+ FnDPath
+ FnCopyFromUser
+ FnSnprintfBtf
+ FnSeqPrintfBtf
+ FnSkbCgroupClassid
+ FnRedirectNeigh
+ FnPerCpuPtr
+ FnThisCpuPtr
+ FnRedirectPeer
+ FnTaskStorageGet
+ FnTaskStorageDelete
+ FnGetCurrentTaskBtf
+ FnBprmOptsSet
+ FnKtimeGetCoarseNs
+ FnImaInodeHash
+ FnSockFromFile
+ FnCheckMtu
+ FnForEachMapElem
+ FnSnprintf
+ FnSysBpf
+ FnBtfFindByNameKind
+ FnSysClose
+ FnTimerInit
+ FnTimerSetCallback
+ FnTimerStart
+ FnTimerCancel
+ FnGetFuncIp
+ FnGetAttachCookie
+ FnTaskPtRegs
+ FnGetBranchSnapshot
+ FnTraceVprintk
+ FnSkcToUnixSock
+ FnKallsymsLookupName
+ FnFindVma
+ FnLoop
+ FnStrncmp
+ FnGetFuncArg
+ FnGetFuncRet
+ FnGetFuncArgCnt
+ FnGetRetval
+ FnSetRetval
+ FnXdpGetBuffLen
+ FnXdpLoadBytes
+ FnXdpStoreBytes
+ FnCopyFromUserTask
+ FnSkbSetTstamp
+ FnImaFileHash
+ FnKptrXchg
+ FnMapLookupPercpuElem
+ FnSkcToMptcpSock
+ FnDynptrFromMem
+ FnRingbufReserveDynptr
+ FnRingbufSubmitDynptr
+ FnRingbufDiscardDynptr
+ FnDynptrRead
+ FnDynptrWrite
+ FnDynptrData
+
+ maxBuiltinFunc
)
// Call emits a function call.
diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go
index 8860b9fdb..b7431b7f6 100644
--- a/vendor/github.com/cilium/ebpf/asm/func_string.go
+++ b/vendor/github.com/cilium/ebpf/asm/func_string.go
@@ -119,11 +119,105 @@ func _() {
_ = x[FnSkStorageDelete-108]
_ = x[FnSendSignal-109]
_ = x[FnTcpGenSyncookie-110]
+ _ = x[FnSkbOutput-111]
+ _ = x[FnProbeReadUser-112]
+ _ = x[FnProbeReadKernel-113]
+ _ = x[FnProbeReadUserStr-114]
+ _ = x[FnProbeReadKernelStr-115]
+ _ = x[FnTcpSendAck-116]
+ _ = x[FnSendSignalThread-117]
+ _ = x[FnJiffies64-118]
+ _ = x[FnReadBranchRecords-119]
+ _ = x[FnGetNsCurrentPidTgid-120]
+ _ = x[FnXdpOutput-121]
+ _ = x[FnGetNetnsCookie-122]
+ _ = x[FnGetCurrentAncestorCgroupId-123]
+ _ = x[FnSkAssign-124]
+ _ = x[FnKtimeGetBootNs-125]
+ _ = x[FnSeqPrintf-126]
+ _ = x[FnSeqWrite-127]
+ _ = x[FnSkCgroupId-128]
+ _ = x[FnSkAncestorCgroupId-129]
+ _ = x[FnRingbufOutput-130]
+ _ = x[FnRingbufReserve-131]
+ _ = x[FnRingbufSubmit-132]
+ _ = x[FnRingbufDiscard-133]
+ _ = x[FnRingbufQuery-134]
+ _ = x[FnCsumLevel-135]
+ _ = x[FnSkcToTcp6Sock-136]
+ _ = x[FnSkcToTcpSock-137]
+ _ = x[FnSkcToTcpTimewaitSock-138]
+ _ = x[FnSkcToTcpRequestSock-139]
+ _ = x[FnSkcToUdp6Sock-140]
+ _ = x[FnGetTaskStack-141]
+ _ = x[FnLoadHdrOpt-142]
+ _ = x[FnStoreHdrOpt-143]
+ _ = x[FnReserveHdrOpt-144]
+ _ = x[FnInodeStorageGet-145]
+ _ = x[FnInodeStorageDelete-146]
+ _ = x[FnDPath-147]
+ _ = x[FnCopyFromUser-148]
+ _ = x[FnSnprintfBtf-149]
+ _ = x[FnSeqPrintfBtf-150]
+ _ = x[FnSkbCgroupClassid-151]
+ _ = x[FnRedirectNeigh-152]
+ _ = x[FnPerCpuPtr-153]
+ _ = x[FnThisCpuPtr-154]
+ _ = x[FnRedirectPeer-155]
+ _ = x[FnTaskStorageGet-156]
+ _ = x[FnTaskStorageDelete-157]
+ _ = x[FnGetCurrentTaskBtf-158]
+ _ = x[FnBprmOptsSet-159]
+ _ = x[FnKtimeGetCoarseNs-160]
+ _ = x[FnImaInodeHash-161]
+ _ = x[FnSockFromFile-162]
+ _ = x[FnCheckMtu-163]
+ _ = x[FnForEachMapElem-164]
+ _ = x[FnSnprintf-165]
+ _ = x[FnSysBpf-166]
+ _ = x[FnBtfFindByNameKind-167]
+ _ = x[FnSysClose-168]
+ _ = x[FnTimerInit-169]
+ _ = x[FnTimerSetCallback-170]
+ _ = x[FnTimerStart-171]
+ _ = x[FnTimerCancel-172]
+ _ = x[FnGetFuncIp-173]
+ _ = x[FnGetAttachCookie-174]
+ _ = x[FnTaskPtRegs-175]
+ _ = x[FnGetBranchSnapshot-176]
+ _ = x[FnTraceVprintk-177]
+ _ = x[FnSkcToUnixSock-178]
+ _ = x[FnKallsymsLookupName-179]
+ _ = x[FnFindVma-180]
+ _ = x[FnLoop-181]
+ _ = x[FnStrncmp-182]
+ _ = x[FnGetFuncArg-183]
+ _ = x[FnGetFuncRet-184]
+ _ = x[FnGetFuncArgCnt-185]
+ _ = x[FnGetRetval-186]
+ _ = x[FnSetRetval-187]
+ _ = x[FnXdpGetBuffLen-188]
+ _ = x[FnXdpLoadBytes-189]
+ _ = x[FnXdpStoreBytes-190]
+ _ = x[FnCopyFromUserTask-191]
+ _ = x[FnSkbSetTstamp-192]
+ _ = x[FnImaFileHash-193]
+ _ = x[FnKptrXchg-194]
+ _ = x[FnMapLookupPercpuElem-195]
+ _ = x[FnSkcToMptcpSock-196]
+ _ = x[FnDynptrFromMem-197]
+ _ = x[FnRingbufReserveDynptr-198]
+ _ = x[FnRingbufSubmitDynptr-199]
+ _ = x[FnRingbufDiscardDynptr-200]
+ _ = x[FnDynptrRead-201]
+ _ = x[FnDynptrWrite-202]
+ _ = x[FnDynptrData-203]
+ _ = x[maxBuiltinFunc-204]
}
-const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookie"
+const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDatamaxBuiltinFunc"
-var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632}
+var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3011}
func (i BuiltinFunc) String() string {
if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) {
diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go
index 5d9d820e5..f17d88b51 100644
--- a/vendor/github.com/cilium/ebpf/asm/instruction.go
+++ b/vendor/github.com/cilium/ebpf/asm/instruction.go
@@ -8,8 +8,10 @@ import (
"fmt"
"io"
"math"
+ "sort"
"strings"
+ "github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
@@ -19,6 +21,10 @@ const InstructionSize = 8
// RawInstructionOffset is an offset in units of raw BPF instructions.
type RawInstructionOffset uint64
+var ErrUnreferencedSymbol = errors.New("unreferenced symbol")
+var ErrUnsatisfiedMapReference = errors.New("unsatisfied map reference")
+var ErrUnsatisfiedProgramReference = errors.New("unsatisfied program reference")
+
// Bytes returns the offset of an instruction in bytes.
func (rio RawInstructionOffset) Bytes() uint64 {
return uint64(rio) * InstructionSize
@@ -26,50 +32,57 @@ func (rio RawInstructionOffset) Bytes() uint64 {
// Instruction is a single eBPF instruction.
type Instruction struct {
- OpCode OpCode
- Dst Register
- Src Register
- Offset int16
- Constant int64
- Reference string
- Symbol string
-}
-
-// Sym creates a symbol.
-func (ins Instruction) Sym(name string) Instruction {
- ins.Symbol = name
- return ins
+ OpCode OpCode
+ Dst Register
+ Src Register
+ Offset int16
+ Constant int64
+
+ // Metadata contains optional metadata about this instruction.
+ Metadata Metadata
}
// Unmarshal decodes a BPF instruction.
func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) {
- var bi bpfInstruction
- err := binary.Read(r, bo, &bi)
- if err != nil {
+ data := make([]byte, InstructionSize)
+ if _, err := io.ReadFull(r, data); err != nil {
return 0, err
}
- ins.OpCode = bi.OpCode
- ins.Offset = bi.Offset
- ins.Constant = int64(bi.Constant)
- ins.Dst, ins.Src, err = bi.Registers.Unmarshal(bo)
- if err != nil {
- return 0, fmt.Errorf("can't unmarshal registers: %s", err)
+ ins.OpCode = OpCode(data[0])
+
+ regs := data[1]
+ switch bo {
+ case binary.LittleEndian:
+ ins.Dst, ins.Src = Register(regs&0xF), Register(regs>>4)
+ case binary.BigEndian:
+ ins.Dst, ins.Src = Register(regs>>4), Register(regs&0xf)
}
- if !bi.OpCode.isDWordLoad() {
+ ins.Offset = int16(bo.Uint16(data[2:4]))
+ // Convert to int32 before widening to int64
+ // to ensure the signed bit is carried over.
+ ins.Constant = int64(int32(bo.Uint32(data[4:8])))
+
+ if !ins.OpCode.IsDWordLoad() {
return InstructionSize, nil
}
- var bi2 bpfInstruction
- if err := binary.Read(r, bo, &bi2); err != nil {
+ // Pull another instruction from the stream to retrieve the second
+ // half of the 64-bit immediate value.
+ if _, err := io.ReadFull(r, data); err != nil {
// No Wrap, to avoid io.EOF clash
return 0, errors.New("64bit immediate is missing second half")
}
- if bi2.OpCode != 0 || bi2.Offset != 0 || bi2.Registers != 0 {
+
+ // Require that all fields other than the value are zero.
+ if bo.Uint32(data[0:4]) != 0 {
return 0, errors.New("64bit immediate has non-zero fields")
}
- ins.Constant = int64(uint64(uint32(bi2.Constant))<<32 | uint64(uint32(bi.Constant)))
+
+ cons1 := uint32(ins.Constant)
+ cons2 := int32(bo.Uint32(data[4:8]))
+ ins.Constant = int64(cons2)<<32 | int64(cons1)
return 2 * InstructionSize, nil
}
@@ -80,7 +93,7 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
return 0, errors.New("invalid opcode")
}
- isDWordLoad := ins.OpCode.isDWordLoad()
+ isDWordLoad := ins.OpCode.IsDWordLoad()
cons := int32(ins.Constant)
if isDWordLoad {
@@ -93,14 +106,12 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
return 0, fmt.Errorf("can't marshal registers: %s", err)
}
- bpfi := bpfInstruction{
- ins.OpCode,
- regs,
- ins.Offset,
- cons,
- }
-
- if err := binary.Write(w, bo, &bpfi); err != nil {
+ data := make([]byte, InstructionSize)
+ data[0] = byte(ins.OpCode)
+ data[1] = byte(regs)
+ bo.PutUint16(data[2:4], uint16(ins.Offset))
+ bo.PutUint32(data[4:8], uint32(cons))
+ if _, err := w.Write(data); err != nil {
return 0, err
}
@@ -108,45 +119,83 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error)
return InstructionSize, nil
}
- bpfi = bpfInstruction{
- Constant: int32(ins.Constant >> 32),
- }
-
- if err := binary.Write(w, bo, &bpfi); err != nil {
+ // The first half of the second part of a double-wide instruction
+ // must be zero. The second half carries the value.
+ bo.PutUint32(data[0:4], 0)
+ bo.PutUint32(data[4:8], uint32(ins.Constant>>32))
+ if _, err := w.Write(data); err != nil {
return 0, err
}
return 2 * InstructionSize, nil
}
+// AssociateMap associates a Map with this Instruction.
+//
+// Implicitly clears the Instruction's Reference field.
+//
+// Returns an error if the Instruction is not a map load.
+func (ins *Instruction) AssociateMap(m FDer) error {
+ if !ins.IsLoadFromMap() {
+ return errors.New("not a load from a map")
+ }
+
+ ins.Metadata.Set(referenceMeta{}, nil)
+ ins.Metadata.Set(mapMeta{}, m)
+
+ return nil
+}
+
// RewriteMapPtr changes an instruction to use a new map fd.
//
// Returns an error if the instruction doesn't load a map.
+//
+// Deprecated: use AssociateMap instead. If you cannot provide a Map,
+// wrap an fd in a type implementing FDer.
func (ins *Instruction) RewriteMapPtr(fd int) error {
- if !ins.OpCode.isDWordLoad() {
- return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
- }
-
- if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue {
+ if !ins.IsLoadFromMap() {
return errors.New("not a load from a map")
}
+ ins.encodeMapFD(fd)
+
+ return nil
+}
+
+func (ins *Instruction) encodeMapFD(fd int) {
// Preserve the offset value for direct map loads.
offset := uint64(ins.Constant) & (math.MaxUint32 << 32)
rawFd := uint64(uint32(fd))
ins.Constant = int64(offset | rawFd)
- return nil
}
-func (ins *Instruction) mapPtr() uint32 {
- return uint32(uint64(ins.Constant) & math.MaxUint32)
+// MapPtr returns the map fd for this instruction.
+//
+// The result is undefined if the instruction is not a load from a map,
+// see IsLoadFromMap.
+//
+// Deprecated: use Map() instead.
+func (ins *Instruction) MapPtr() int {
+ // If there is a map associated with the instruction, return its FD.
+ if fd := ins.Metadata.Get(mapMeta{}); fd != nil {
+ return fd.(FDer).FD()
+ }
+
+ // Fall back to the fd stored in the Constant field
+ return ins.mapFd()
+}
+
+// mapFd returns the map file descriptor stored in the 32 least significant
+// bits of ins' Constant field.
+func (ins *Instruction) mapFd() int {
+ return int(int32(ins.Constant))
}
// RewriteMapOffset changes the offset of a direct load from a map.
//
// Returns an error if the instruction is not a direct load.
func (ins *Instruction) RewriteMapOffset(offset uint32) error {
- if !ins.OpCode.isDWordLoad() {
+ if !ins.OpCode.IsDWordLoad() {
return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
}
@@ -163,10 +212,10 @@ func (ins *Instruction) mapOffset() uint32 {
return uint32(uint64(ins.Constant) >> 32)
}
-// isLoadFromMap returns true if the instruction loads from a map.
+// IsLoadFromMap returns true if the instruction loads from a map.
//
// This covers both loading the map pointer and direct map value loads.
-func (ins *Instruction) isLoadFromMap() bool {
+func (ins *Instruction) IsLoadFromMap() bool {
return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue)
}
@@ -177,6 +226,29 @@ func (ins *Instruction) IsFunctionCall() bool {
return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall
}
+// IsLoadOfFunctionPointer returns true if the instruction loads a function pointer.
+func (ins *Instruction) IsLoadOfFunctionPointer() bool {
+ return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc
+}
+
+// IsFunctionReference returns true if the instruction references another BPF
+// function, either by invoking a Call jump operation or by loading a function
+// pointer.
+func (ins *Instruction) IsFunctionReference() bool {
+ return ins.IsFunctionCall() || ins.IsLoadOfFunctionPointer()
+}
+
+// IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call.
+func (ins *Instruction) IsBuiltinCall() bool {
+ return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0
+}
+
+// IsConstantLoad returns true if the instruction loads a constant of the
+// given size.
+func (ins *Instruction) IsConstantLoad(size Size) bool {
+ return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0
+}
+
// Format implements fmt.Formatter.
func (ins Instruction) Format(f fmt.State, c rune) {
if c != 'v' {
@@ -197,22 +269,31 @@ func (ins Instruction) Format(f fmt.State, c rune) {
return
}
- if ins.isLoadFromMap() {
- fd := int32(ins.mapPtr())
+ if ins.IsLoadFromMap() {
+ fd := ins.mapFd()
+ m := ins.Map()
switch ins.Src {
case PseudoMapFD:
- fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd)
+ if m != nil {
+ fmt.Fprintf(f, "LoadMapPtr dst: %s map: %s", ins.Dst, m)
+ } else {
+ fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd)
+ }
case PseudoMapValue:
- fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset())
+ if m != nil {
+ fmt.Fprintf(f, "LoadMapValue dst: %s, map: %s off: %d", ins.Dst, m, ins.mapOffset())
+ } else {
+ fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset())
+ }
}
goto ref
}
fmt.Fprintf(f, "%v ", op)
- switch cls := op.Class(); cls {
- case LdClass, LdXClass, StClass, StXClass:
+ switch cls := op.Class(); {
+ case cls.isLoadOrStore():
switch op.Mode() {
case ImmMode:
fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant)
@@ -226,7 +307,7 @@ func (ins Instruction) Format(f fmt.State, c rune) {
fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src)
}
- case ALU64Class, ALUClass:
+ case cls.IsALU():
fmt.Fprintf(f, "dst: %s ", ins.Dst)
if op.ALUOp() == Swap || op.Source() == ImmSource {
fmt.Fprintf(f, "imm: %d", ins.Constant)
@@ -234,7 +315,7 @@ func (ins Instruction) Format(f fmt.State, c rune) {
fmt.Fprintf(f, "src: %s", ins.Src)
}
- case JumpClass:
+ case cls.IsJump():
switch jop := op.JumpOp(); jop {
case Call:
if ins.Src == PseudoCall {
@@ -255,42 +336,212 @@ func (ins Instruction) Format(f fmt.State, c rune) {
}
ref:
- if ins.Reference != "" {
- fmt.Fprintf(f, " <%s>", ins.Reference)
+ if ins.Reference() != "" {
+ fmt.Fprintf(f, " <%s>", ins.Reference())
}
}
+func (ins Instruction) equal(other Instruction) bool {
+ return ins.OpCode == other.OpCode &&
+ ins.Dst == other.Dst &&
+ ins.Src == other.Src &&
+ ins.Offset == other.Offset &&
+ ins.Constant == other.Constant
+}
+
+// Size returns the amount of bytes ins would occupy in binary form.
+func (ins Instruction) Size() uint64 {
+ return uint64(InstructionSize * ins.OpCode.rawInstructions())
+}
+
+type symbolMeta struct{}
+
+// WithSymbol marks the Instruction as a Symbol, which other Instructions
+// can point to using corresponding calls to WithReference.
+func (ins Instruction) WithSymbol(name string) Instruction {
+ ins.Metadata.Set(symbolMeta{}, name)
+ return ins
+}
+
+// Sym creates a symbol.
+//
+// Deprecated: use WithSymbol instead.
+func (ins Instruction) Sym(name string) Instruction {
+ return ins.WithSymbol(name)
+}
+
+// Symbol returns the value ins has been marked with using WithSymbol,
+// otherwise returns an empty string. A symbol is often an Instruction
+// at the start of a function body.
+func (ins Instruction) Symbol() string {
+ sym, _ := ins.Metadata.Get(symbolMeta{}).(string)
+ return sym
+}
+
+type referenceMeta struct{}
+
+// WithReference makes ins reference another Symbol or map by name.
+func (ins Instruction) WithReference(ref string) Instruction {
+ ins.Metadata.Set(referenceMeta{}, ref)
+ return ins
+}
+
+// Reference returns the Symbol or map name referenced by ins, if any.
+func (ins Instruction) Reference() string {
+ ref, _ := ins.Metadata.Get(referenceMeta{}).(string)
+ return ref
+}
+
+type mapMeta struct{}
+
+// Map returns the Map referenced by ins, if any.
+// An Instruction will contain a Map if e.g. it references an existing,
+// pinned map that was opened during ELF loading.
+func (ins Instruction) Map() FDer {
+ fd, _ := ins.Metadata.Get(mapMeta{}).(FDer)
+ return fd
+}
+
+type sourceMeta struct{}
+
+// WithSource adds source information about the Instruction.
+func (ins Instruction) WithSource(src fmt.Stringer) Instruction {
+ ins.Metadata.Set(sourceMeta{}, src)
+ return ins
+}
+
+// Source returns source information about the Instruction. The field is
+// present when the compiler emits BTF line info about the Instruction and
+// usually contains the line of source code responsible for it.
+func (ins Instruction) Source() fmt.Stringer {
+ str, _ := ins.Metadata.Get(sourceMeta{}).(fmt.Stringer)
+ return str
+}
+
+// A Comment can be passed to Instruction.WithSource to add a comment
+// to an instruction.
+type Comment string
+
+func (s Comment) String() string {
+ return string(s)
+}
+
+// FDer represents a resource tied to an underlying file descriptor.
+// Used as a stand-in for e.g. ebpf.Map since that type cannot be
+// imported here and FD() is the only method we rely on.
+type FDer interface {
+ FD() int
+}
+
// Instructions is an eBPF program.
type Instructions []Instruction
+// Unmarshal unmarshals an Instructions from a binary instruction stream.
+// All instructions in insns are replaced by instructions decoded from r.
+func (insns *Instructions) Unmarshal(r io.Reader, bo binary.ByteOrder) error {
+ if len(*insns) > 0 {
+ *insns = nil
+ }
+
+ var offset uint64
+ for {
+ var ins Instruction
+ n, err := ins.Unmarshal(r, bo)
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ return fmt.Errorf("offset %d: %w", offset, err)
+ }
+
+ *insns = append(*insns, ins)
+ offset += n
+ }
+
+ return nil
+}
+
+// Name returns the name of the function insns belongs to, if any.
+func (insns Instructions) Name() string {
+ if len(insns) == 0 {
+ return ""
+ }
+ return insns[0].Symbol()
+}
+
func (insns Instructions) String() string {
return fmt.Sprint(insns)
}
+// Size returns the amount of bytes insns would occupy in binary form.
+func (insns Instructions) Size() uint64 {
+ var sum uint64
+ for _, ins := range insns {
+ sum += ins.Size()
+ }
+ return sum
+}
+
+// AssociateMap updates all Instructions that Reference the given symbol
+// to point to an existing Map m instead.
+//
+// Returns ErrUnreferencedSymbol error if no references to symbol are found
+// in insns. If symbol is anything else than the symbol name of map (e.g.
+// a bpf2bpf subprogram), an error is returned.
+func (insns Instructions) AssociateMap(symbol string, m FDer) error {
+ if symbol == "" {
+ return errors.New("empty symbol")
+ }
+
+ var found bool
+ for i := range insns {
+ ins := &insns[i]
+ if ins.Reference() != symbol {
+ continue
+ }
+
+ if err := ins.AssociateMap(m); err != nil {
+ return err
+ }
+
+ found = true
+ }
+
+ if !found {
+ return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol)
+ }
+
+ return nil
+}
+
// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd.
//
-// Returns an error if the symbol isn't used, see IsUnreferencedSymbol.
+// Returns ErrUnreferencedSymbol if the symbol isn't used.
+//
+// Deprecated: use AssociateMap instead.
func (insns Instructions) RewriteMapPtr(symbol string, fd int) error {
if symbol == "" {
return errors.New("empty symbol")
}
- found := false
+ var found bool
for i := range insns {
ins := &insns[i]
- if ins.Reference != symbol {
+ if ins.Reference() != symbol {
continue
}
- if err := ins.RewriteMapPtr(fd); err != nil {
- return err
+ if !ins.IsLoadFromMap() {
+ return errors.New("not a load from a map")
}
+ ins.encodeMapFD(fd)
+
found = true
}
if !found {
- return &unreferencedSymbolError{symbol}
+ return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol)
}
return nil
@@ -302,31 +553,61 @@ func (insns Instructions) SymbolOffsets() (map[string]int, error) {
offsets := make(map[string]int)
for i, ins := range insns {
- if ins.Symbol == "" {
+ if ins.Symbol() == "" {
continue
}
- if _, ok := offsets[ins.Symbol]; ok {
- return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol)
+ if _, ok := offsets[ins.Symbol()]; ok {
+ return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol())
}
- offsets[ins.Symbol] = i
+ offsets[ins.Symbol()] = i
}
return offsets, nil
}
+// FunctionReferences returns a set of symbol names these Instructions make
+// bpf-to-bpf calls to.
+func (insns Instructions) FunctionReferences() []string {
+ calls := make(map[string]struct{})
+ for _, ins := range insns {
+ if ins.Constant != -1 {
+ // BPF-to-BPF calls have -1 constants.
+ continue
+ }
+
+ if ins.Reference() == "" {
+ continue
+ }
+
+ if !ins.IsFunctionReference() {
+ continue
+ }
+
+ calls[ins.Reference()] = struct{}{}
+ }
+
+ result := make([]string, 0, len(calls))
+ for call := range calls {
+ result = append(result, call)
+ }
+
+ sort.Strings(result)
+ return result
+}
+
// ReferenceOffsets returns the set of references and their offset in
// the instructions.
func (insns Instructions) ReferenceOffsets() map[string][]int {
offsets := make(map[string][]int)
for i, ins := range insns {
- if ins.Reference == "" {
+ if ins.Reference() == "" {
continue
}
- offsets[ins.Reference] = append(offsets[ins.Reference], i)
+ offsets[ins.Reference()] = append(offsets[ins.Reference()], i)
}
return offsets
@@ -337,7 +618,7 @@ func (insns Instructions) ReferenceOffsets() map[string][]int {
// You can control indentation of symbols by
// specifying a width. Setting a precision controls the indentation of
// instructions.
-// The default character is a tab, which can be overriden by specifying
+// The default character is a tab, which can be overridden by specifying
// the ' ' space flag.
func (insns Instructions) Format(f fmt.State, c rune) {
if c != 's' && c != 'v' {
@@ -377,20 +658,36 @@ func (insns Instructions) Format(f fmt.State, c rune) {
iter := insns.Iterate()
for iter.Next() {
- if iter.Ins.Symbol != "" {
- fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol)
+ if iter.Ins.Symbol() != "" {
+ fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol())
+ }
+ if src := iter.Ins.Source(); src != nil {
+ line := strings.TrimSpace(src.String())
+ if line != "" {
+ fmt.Fprintf(f, "%s%*s; %s\n", indent, offsetWidth, " ", line)
+ }
}
fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins)
}
-
- return
}
// Marshal encodes a BPF program into the kernel format.
+//
+// insns may be modified if there are unresolved jumps or bpf2bpf calls.
+//
+// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction
+// without a matching Symbol Instruction within insns.
func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
+ if err := insns.encodeFunctionReferences(); err != nil {
+ return err
+ }
+
+ if err := insns.encodeMapPointers(); err != nil {
+ return err
+ }
+
for i, ins := range insns {
- _, err := ins.Marshal(w, bo)
- if err != nil {
+ if _, err := ins.Marshal(w, bo); err != nil {
return fmt.Errorf("instruction %d: %w", i, err)
}
}
@@ -405,7 +702,7 @@ func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) {
h := sha1.New()
for i, ins := range insns {
- if ins.isLoadFromMap() {
+ if ins.IsLoadFromMap() {
ins.Constant = 0
}
_, err := ins.Marshal(h, bo)
@@ -416,6 +713,95 @@ func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) {
return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil
}
+// encodeFunctionReferences populates the Offset (or Constant, depending on
+// the instruction type) field of instructions with a Reference field to point
+// to the offset of the corresponding instruction with a matching Symbol field.
+//
+// Only Reference Instructions that are either jumps or BPF function references
+// (calls or function pointer loads) are populated.
+//
+// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction
+// without at least one corresponding Symbol Instruction within insns.
+func (insns Instructions) encodeFunctionReferences() error {
+ // Index the offsets of instructions tagged as a symbol.
+ symbolOffsets := make(map[string]RawInstructionOffset)
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+
+ if ins.Symbol() == "" {
+ continue
+ }
+
+ if _, ok := symbolOffsets[ins.Symbol()]; ok {
+ return fmt.Errorf("duplicate symbol %s", ins.Symbol())
+ }
+
+ symbolOffsets[ins.Symbol()] = iter.Offset
+ }
+
+ // Find all instructions tagged as references to other symbols.
+ // Depending on the instruction type, populate their constant or offset
+ // fields to point to the symbol they refer to within the insn stream.
+ iter = insns.Iterate()
+ for iter.Next() {
+ i := iter.Index
+ offset := iter.Offset
+ ins := iter.Ins
+
+ if ins.Reference() == "" {
+ continue
+ }
+
+ switch {
+ case ins.IsFunctionReference() && ins.Constant == -1:
+ symOffset, ok := symbolOffsets[ins.Reference()]
+ if !ok {
+ return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference)
+ }
+
+ ins.Constant = int64(symOffset - offset - 1)
+
+ case ins.OpCode.Class().IsJump() && ins.Offset == -1:
+ symOffset, ok := symbolOffsets[ins.Reference()]
+ if !ok {
+ return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference)
+ }
+
+ ins.Offset = int16(symOffset - offset - 1)
+ }
+ }
+
+ return nil
+}
+
+// encodeMapPointers finds all Map Instructions and encodes their FDs
+// into their Constant fields.
+func (insns Instructions) encodeMapPointers() error {
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+
+ if !ins.IsLoadFromMap() {
+ continue
+ }
+
+ m := ins.Map()
+ if m == nil {
+ continue
+ }
+
+ fd := m.FD()
+ if fd < 0 {
+ return fmt.Errorf("map %s: %w", m, sys.ErrClosedFd)
+ }
+
+ ins.encodeMapFD(m.FD())
+ }
+
+ return nil
+}
+
// Iterate allows iterating a BPF program while keeping track of
// various offsets.
//
@@ -451,13 +837,6 @@ func (iter *InstructionIterator) Next() bool {
return true
}
-type bpfInstruction struct {
- OpCode OpCode
- Registers bpfRegisters
- Offset int16
- Constant int32
-}
-
type bpfRegisters uint8
func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) {
@@ -471,28 +850,10 @@ func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, erro
}
}
-func (r bpfRegisters) Unmarshal(bo binary.ByteOrder) (dst, src Register, err error) {
- switch bo {
- case binary.LittleEndian:
- return Register(r & 0xF), Register(r >> 4), nil
- case binary.BigEndian:
- return Register(r >> 4), Register(r & 0xf), nil
- default:
- return 0, 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
- }
-}
-
-type unreferencedSymbolError struct {
- symbol string
-}
-
-func (use *unreferencedSymbolError) Error() string {
- return fmt.Sprintf("unreferenced symbol %s", use.symbol)
-}
-
// IsUnreferencedSymbol returns true if err was caused by
// an unreferenced symbol.
+//
+// Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol).
func IsUnreferencedSymbol(err error) bool {
- _, ok := err.(*unreferencedSymbolError)
- return ok
+ return errors.Is(err, ErrUnreferencedSymbol)
}
diff --git a/vendor/github.com/cilium/ebpf/asm/jump.go b/vendor/github.com/cilium/ebpf/asm/jump.go
index 7757179de..e31e42cac 100644
--- a/vendor/github.com/cilium/ebpf/asm/jump.go
+++ b/vendor/github.com/cilium/ebpf/asm/jump.go
@@ -60,50 +60,68 @@ func (op JumpOp) Op(source Source) OpCode {
return OpCode(JumpClass).SetJumpOp(op).SetSource(source)
}
-// Imm compares dst to value, and adjusts PC by offset if the condition is fulfilled.
+// Imm compares 64 bit dst to 64 bit value (sign extended), and adjusts PC by offset if the condition is fulfilled.
func (op JumpOp) Imm(dst Register, value int32, label string) Instruction {
- if op == Exit || op == Call || op == Ja {
- return Instruction{OpCode: InvalidOpCode}
- }
+ return Instruction{
+ OpCode: op.opCode(JumpClass, ImmSource),
+ Dst: dst,
+ Offset: -1,
+ Constant: int64(value),
+ }.WithReference(label)
+}
+// Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled.
+// Requires kernel 5.1.
+func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction {
return Instruction{
- OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(ImmSource),
- Dst: dst,
- Offset: -1,
- Constant: int64(value),
- Reference: label,
- }
+ OpCode: op.opCode(Jump32Class, ImmSource),
+ Dst: dst,
+ Offset: -1,
+ Constant: int64(value),
+ }.WithReference(label)
}
-// Reg compares dst to src, and adjusts PC by offset if the condition is fulfilled.
+// Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled.
func (op JumpOp) Reg(dst, src Register, label string) Instruction {
- if op == Exit || op == Call || op == Ja {
- return Instruction{OpCode: InvalidOpCode}
- }
+ return Instruction{
+ OpCode: op.opCode(JumpClass, RegSource),
+ Dst: dst,
+ Src: src,
+ Offset: -1,
+ }.WithReference(label)
+}
+// Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled.
+// Requires kernel 5.1.
+func (op JumpOp) Reg32(dst, src Register, label string) Instruction {
return Instruction{
- OpCode: OpCode(JumpClass).SetJumpOp(op).SetSource(RegSource),
- Dst: dst,
- Src: src,
- Offset: -1,
- Reference: label,
+ OpCode: op.opCode(Jump32Class, RegSource),
+ Dst: dst,
+ Src: src,
+ Offset: -1,
+ }.WithReference(label)
+}
+
+func (op JumpOp) opCode(class Class, source Source) OpCode {
+ if op == Exit || op == Call || op == Ja {
+ return InvalidOpCode
}
+
+ return OpCode(class).SetJumpOp(op).SetSource(source)
}
// Label adjusts PC to the address of the label.
func (op JumpOp) Label(label string) Instruction {
if op == Call {
return Instruction{
- OpCode: OpCode(JumpClass).SetJumpOp(Call),
- Src: PseudoCall,
- Constant: -1,
- Reference: label,
- }
+ OpCode: OpCode(JumpClass).SetJumpOp(Call),
+ Src: PseudoCall,
+ Constant: -1,
+ }.WithReference(label)
}
return Instruction{
- OpCode: OpCode(JumpClass).SetJumpOp(op),
- Offset: -1,
- Reference: label,
- }
+ OpCode: OpCode(JumpClass).SetJumpOp(op),
+ Offset: -1,
+ }.WithReference(label)
}
diff --git a/vendor/github.com/cilium/ebpf/asm/load_store.go b/vendor/github.com/cilium/ebpf/asm/load_store.go
index 2d0ec648e..85ed286b0 100644
--- a/vendor/github.com/cilium/ebpf/asm/load_store.go
+++ b/vendor/github.com/cilium/ebpf/asm/load_store.go
@@ -111,7 +111,7 @@ func LoadMapPtr(dst Register, fd int) Instruction {
OpCode: LoadImmOp(DWord),
Dst: dst,
Src: PseudoMapFD,
- Constant: int64(fd),
+ Constant: int64(uint32(fd)),
}
}
diff --git a/vendor/github.com/cilium/ebpf/asm/metadata.go b/vendor/github.com/cilium/ebpf/asm/metadata.go
new file mode 100644
index 000000000..dd368a936
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/metadata.go
@@ -0,0 +1,80 @@
+package asm
+
+// Metadata contains metadata about an instruction.
+type Metadata struct {
+ head *metaElement
+}
+
+type metaElement struct {
+ next *metaElement
+ key, value interface{}
+}
+
+// Find the element containing key.
+//
+// Returns nil if there is no such element.
+func (m *Metadata) find(key interface{}) *metaElement {
+ for e := m.head; e != nil; e = e.next {
+ if e.key == key {
+ return e
+ }
+ }
+ return nil
+}
+
+// Remove an element from the linked list.
+//
+// Copies as many elements of the list as necessary to remove r, but doesn't
+// perform a full copy.
+func (m *Metadata) remove(r *metaElement) {
+ current := &m.head
+ for e := m.head; e != nil; e = e.next {
+ if e == r {
+ // We've found the element we want to remove.
+ *current = e.next
+
+ // No need to copy the tail.
+ return
+ }
+
+ // There is another element in front of the one we want to remove.
+ // We have to copy it to be able to change metaElement.next.
+ cpy := &metaElement{key: e.key, value: e.value}
+ *current = cpy
+ current = &cpy.next
+ }
+}
+
+// Set a key to a value.
+//
+// If value is nil, the key is removed. Avoids modifying old metadata by
+// copying if necessary.
+func (m *Metadata) Set(key, value interface{}) {
+ if e := m.find(key); e != nil {
+ if e.value == value {
+ // Key is present and the value is the same. Nothing to do.
+ return
+ }
+
+ // Key is present with a different value. Create a copy of the list
+ // which doesn't have the element in it.
+ m.remove(e)
+ }
+
+ // m.head is now a linked list that doesn't contain key.
+ if value == nil {
+ return
+ }
+
+ m.head = &metaElement{key: key, value: value, next: m.head}
+}
+
+// Get the value of a key.
+//
+// Returns nil if no value with the given key is present.
+func (m *Metadata) Get(key interface{}) interface{} {
+ if e := m.find(key); e != nil {
+ return e.value
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/opcode.go b/vendor/github.com/cilium/ebpf/asm/opcode.go
index dc4564a98..b11917e18 100644
--- a/vendor/github.com/cilium/ebpf/asm/opcode.go
+++ b/vendor/github.com/cilium/ebpf/asm/opcode.go
@@ -7,14 +7,6 @@ import (
//go:generate stringer -output opcode_string.go -type=Class
-type encoding int
-
-const (
- unknownEncoding encoding = iota
- loadOrStore
- jumpOrALU
-)
-
// Class of operations
//
// msb lsb
@@ -26,31 +18,52 @@ type Class uint8
const classMask OpCode = 0x07
const (
- // LdClass load memory
+ // LdClass loads immediate values into registers.
+ // Also used for non-standard load operations from cBPF.
LdClass Class = 0x00
- // LdXClass load memory from constant
+ // LdXClass loads memory into registers.
LdXClass Class = 0x01
- // StClass load register from memory
+ // StClass stores immediate values to memory.
StClass Class = 0x02
- // StXClass load register from constant
+ // StXClass stores registers to memory.
StXClass Class = 0x03
- // ALUClass arithmetic operators
+ // ALUClass describes arithmetic operators.
ALUClass Class = 0x04
- // JumpClass jump operators
+ // JumpClass describes jump operators.
JumpClass Class = 0x05
- // ALU64Class arithmetic in 64 bit mode
+ // Jump32Class describes jump operators with 32-bit comparisons.
+ // Requires kernel 5.1.
+ Jump32Class Class = 0x06
+ // ALU64Class describes arithmetic operators in 64-bit mode.
ALU64Class Class = 0x07
)
-func (cls Class) encoding() encoding {
- switch cls {
- case LdClass, LdXClass, StClass, StXClass:
- return loadOrStore
- case ALU64Class, ALUClass, JumpClass:
- return jumpOrALU
- default:
- return unknownEncoding
- }
+// IsLoad checks if this is either LdClass or LdXClass.
+func (cls Class) IsLoad() bool {
+ return cls == LdClass || cls == LdXClass
+}
+
+// IsStore checks if this is either StClass or StXClass.
+func (cls Class) IsStore() bool {
+ return cls == StClass || cls == StXClass
+}
+
+func (cls Class) isLoadOrStore() bool {
+ return cls.IsLoad() || cls.IsStore()
+}
+
+// IsALU checks if this is either ALUClass or ALU64Class.
+func (cls Class) IsALU() bool {
+ return cls == ALUClass || cls == ALU64Class
+}
+
+// IsJump checks if this is either JumpClass or Jump32Class.
+func (cls Class) IsJump() bool {
+ return cls == JumpClass || cls == Jump32Class
+}
+
+func (cls Class) isJumpOrALU() bool {
+ return cls.IsJump() || cls.IsALU()
}
// OpCode is a packed eBPF opcode.
@@ -69,13 +82,13 @@ const InvalidOpCode OpCode = 0xff
// rawInstructions returns the number of BPF instructions required
// to encode this opcode.
func (op OpCode) rawInstructions() int {
- if op.isDWordLoad() {
+ if op.IsDWordLoad() {
return 2
}
return 1
}
-func (op OpCode) isDWordLoad() bool {
+func (op OpCode) IsDWordLoad() bool {
return op == LoadImmOp(DWord)
}
@@ -86,7 +99,7 @@ func (op OpCode) Class() Class {
// Mode returns the mode for load and store operations.
func (op OpCode) Mode() Mode {
- if op.Class().encoding() != loadOrStore {
+ if !op.Class().isLoadOrStore() {
return InvalidMode
}
return Mode(op & modeMask)
@@ -94,7 +107,7 @@ func (op OpCode) Mode() Mode {
// Size returns the size for load and store operations.
func (op OpCode) Size() Size {
- if op.Class().encoding() != loadOrStore {
+ if !op.Class().isLoadOrStore() {
return InvalidSize
}
return Size(op & sizeMask)
@@ -102,7 +115,7 @@ func (op OpCode) Size() Size {
// Source returns the source for branch and ALU operations.
func (op OpCode) Source() Source {
- if op.Class().encoding() != jumpOrALU || op.ALUOp() == Swap {
+ if !op.Class().isJumpOrALU() || op.ALUOp() == Swap {
return InvalidSource
}
return Source(op & sourceMask)
@@ -110,7 +123,7 @@ func (op OpCode) Source() Source {
// ALUOp returns the ALUOp.
func (op OpCode) ALUOp() ALUOp {
- if op.Class().encoding() != jumpOrALU {
+ if !op.Class().IsALU() {
return InvalidALUOp
}
return ALUOp(op & aluMask)
@@ -125,18 +138,27 @@ func (op OpCode) Endianness() Endianness {
}
// JumpOp returns the JumpOp.
+// Returns InvalidJumpOp if it doesn't encode a jump.
func (op OpCode) JumpOp() JumpOp {
- if op.Class().encoding() != jumpOrALU {
+ if !op.Class().IsJump() {
return InvalidJumpOp
}
- return JumpOp(op & jumpMask)
+
+ jumpOp := JumpOp(op & jumpMask)
+
+ // Some JumpOps are only supported by JumpClass, not Jump32Class.
+ if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call || jumpOp == Ja) {
+ return InvalidJumpOp
+ }
+
+ return jumpOp
}
// SetMode sets the mode on load and store operations.
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetMode(mode Mode) OpCode {
- if op.Class().encoding() != loadOrStore || !valid(OpCode(mode), modeMask) {
+ if !op.Class().isLoadOrStore() || !valid(OpCode(mode), modeMask) {
return InvalidOpCode
}
return (op & ^modeMask) | OpCode(mode)
@@ -146,7 +168,7 @@ func (op OpCode) SetMode(mode Mode) OpCode {
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetSize(size Size) OpCode {
- if op.Class().encoding() != loadOrStore || !valid(OpCode(size), sizeMask) {
+ if !op.Class().isLoadOrStore() || !valid(OpCode(size), sizeMask) {
return InvalidOpCode
}
return (op & ^sizeMask) | OpCode(size)
@@ -156,7 +178,7 @@ func (op OpCode) SetSize(size Size) OpCode {
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetSource(source Source) OpCode {
- if op.Class().encoding() != jumpOrALU || !valid(OpCode(source), sourceMask) {
+ if !op.Class().isJumpOrALU() || !valid(OpCode(source), sourceMask) {
return InvalidOpCode
}
return (op & ^sourceMask) | OpCode(source)
@@ -166,8 +188,7 @@ func (op OpCode) SetSource(source Source) OpCode {
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetALUOp(alu ALUOp) OpCode {
- class := op.Class()
- if (class != ALUClass && class != ALU64Class) || !valid(OpCode(alu), aluMask) {
+ if !op.Class().IsALU() || !valid(OpCode(alu), aluMask) {
return InvalidOpCode
}
return (op & ^aluMask) | OpCode(alu)
@@ -177,17 +198,25 @@ func (op OpCode) SetALUOp(alu ALUOp) OpCode {
//
// Returns InvalidOpCode if op is of the wrong class.
func (op OpCode) SetJumpOp(jump JumpOp) OpCode {
- if op.Class() != JumpClass || !valid(OpCode(jump), jumpMask) {
+ if !op.Class().IsJump() || !valid(OpCode(jump), jumpMask) {
+ return InvalidOpCode
+ }
+
+ newOp := (op & ^jumpMask) | OpCode(jump)
+
+ // Check newOp is legal.
+ if newOp.JumpOp() == InvalidJumpOp {
return InvalidOpCode
}
- return (op & ^jumpMask) | OpCode(jump)
+
+ return newOp
}
func (op OpCode) String() string {
var f strings.Builder
- switch class := op.Class(); class {
- case LdClass, LdXClass, StClass, StXClass:
+ switch class := op.Class(); {
+ case class.isLoadOrStore():
f.WriteString(strings.TrimSuffix(class.String(), "Class"))
mode := op.Mode()
@@ -204,7 +233,7 @@ func (op OpCode) String() string {
f.WriteString("B")
}
- case ALU64Class, ALUClass:
+ case class.IsALU():
f.WriteString(op.ALUOp().String())
if op.ALUOp() == Swap {
@@ -218,8 +247,13 @@ func (op OpCode) String() string {
f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
}
- case JumpClass:
+ case class.IsJump():
f.WriteString(op.JumpOp().String())
+
+ if class == Jump32Class {
+ f.WriteString("32")
+ }
+
if jop := op.JumpOp(); jop != Exit && jop != Call {
f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
}
diff --git a/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/vendor/github.com/cilium/ebpf/asm/opcode_string.go
index 079ce1db0..58bc3e7e7 100644
--- a/vendor/github.com/cilium/ebpf/asm/opcode_string.go
+++ b/vendor/github.com/cilium/ebpf/asm/opcode_string.go
@@ -14,25 +14,17 @@ func _() {
_ = x[StXClass-3]
_ = x[ALUClass-4]
_ = x[JumpClass-5]
+ _ = x[Jump32Class-6]
_ = x[ALU64Class-7]
}
-const (
- _Class_name_0 = "LdClassLdXClassStClassStXClassALUClassJumpClass"
- _Class_name_1 = "ALU64Class"
-)
+const _Class_name = "LdClassLdXClassStClassStXClassALUClassJumpClassJump32ClassALU64Class"
-var (
- _Class_index_0 = [...]uint8{0, 7, 15, 22, 30, 38, 47}
-)
+var _Class_index = [...]uint8{0, 7, 15, 22, 30, 38, 47, 58, 68}
func (i Class) String() string {
- switch {
- case 0 <= i && i <= 5:
- return _Class_name_0[_Class_index_0[i]:_Class_index_0[i+1]]
- case i == 7:
- return _Class_name_1
- default:
+ if i >= Class(len(_Class_index)-1) {
return "Class(" + strconv.FormatInt(int64(i), 10) + ")"
}
+ return _Class_name[_Class_index[i]:_Class_index[i+1]]
}
diff --git a/vendor/github.com/cilium/ebpf/asm/register.go b/vendor/github.com/cilium/ebpf/asm/register.go
index 76cb44bff..dd5d44f1c 100644
--- a/vendor/github.com/cilium/ebpf/asm/register.go
+++ b/vendor/github.com/cilium/ebpf/asm/register.go
@@ -38,6 +38,7 @@ const (
PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD
PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE
PseudoCall = R1 // BPF_PSEUDO_CALL
+ PseudoFunc = R4 // BPF_PSEUDO_FUNC
)
func (r Register) String() string {
diff --git a/vendor/github.com/cilium/ebpf/attachtype_string.go b/vendor/github.com/cilium/ebpf/attachtype_string.go
new file mode 100644
index 000000000..de355ed90
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/attachtype_string.go
@@ -0,0 +1,65 @@
+// Code generated by "stringer -type AttachType -trimprefix Attach"; DO NOT EDIT.
+
+package ebpf
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[AttachNone-0]
+ _ = x[AttachCGroupInetIngress-0]
+ _ = x[AttachCGroupInetEgress-1]
+ _ = x[AttachCGroupInetSockCreate-2]
+ _ = x[AttachCGroupSockOps-3]
+ _ = x[AttachSkSKBStreamParser-4]
+ _ = x[AttachSkSKBStreamVerdict-5]
+ _ = x[AttachCGroupDevice-6]
+ _ = x[AttachSkMsgVerdict-7]
+ _ = x[AttachCGroupInet4Bind-8]
+ _ = x[AttachCGroupInet6Bind-9]
+ _ = x[AttachCGroupInet4Connect-10]
+ _ = x[AttachCGroupInet6Connect-11]
+ _ = x[AttachCGroupInet4PostBind-12]
+ _ = x[AttachCGroupInet6PostBind-13]
+ _ = x[AttachCGroupUDP4Sendmsg-14]
+ _ = x[AttachCGroupUDP6Sendmsg-15]
+ _ = x[AttachLircMode2-16]
+ _ = x[AttachFlowDissector-17]
+ _ = x[AttachCGroupSysctl-18]
+ _ = x[AttachCGroupUDP4Recvmsg-19]
+ _ = x[AttachCGroupUDP6Recvmsg-20]
+ _ = x[AttachCGroupGetsockopt-21]
+ _ = x[AttachCGroupSetsockopt-22]
+ _ = x[AttachTraceRawTp-23]
+ _ = x[AttachTraceFEntry-24]
+ _ = x[AttachTraceFExit-25]
+ _ = x[AttachModifyReturn-26]
+ _ = x[AttachLSMMac-27]
+ _ = x[AttachTraceIter-28]
+ _ = x[AttachCgroupInet4GetPeername-29]
+ _ = x[AttachCgroupInet6GetPeername-30]
+ _ = x[AttachCgroupInet4GetSockname-31]
+ _ = x[AttachCgroupInet6GetSockname-32]
+ _ = x[AttachXDPDevMap-33]
+ _ = x[AttachCgroupInetSockRelease-34]
+ _ = x[AttachXDPCPUMap-35]
+ _ = x[AttachSkLookup-36]
+ _ = x[AttachXDP-37]
+ _ = x[AttachSkSKBVerdict-38]
+ _ = x[AttachSkReuseportSelect-39]
+ _ = x[AttachSkReuseportSelectOrMigrate-40]
+ _ = x[AttachPerfEvent-41]
+}
+
+const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEvent"
+
+var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610}
+
+func (i AttachType) String() string {
+ if i >= AttachType(len(_AttachType_index)-1) {
+ return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/btf.go b/vendor/github.com/cilium/ebpf/btf/btf.go
new file mode 100644
index 000000000..a5969332a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/btf.go
@@ -0,0 +1,897 @@
+package btf
+
+import (
+ "bufio"
+ "bytes"
+ "debug/elf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "reflect"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+const btfMagic = 0xeB9F
+
+// Errors returned by BTF functions.
+var (
+ ErrNotSupported = internal.ErrNotSupported
+ ErrNotFound = errors.New("not found")
+ ErrNoExtendedInfo = errors.New("no extended info")
+)
+
+// ID represents the unique ID of a BTF object.
+type ID = sys.BTFID
+
+// Spec represents decoded BTF.
+type Spec struct {
+ // Data from .BTF.
+ rawTypes []rawType
+ strings *stringTable
+
+ // All types contained by the spec. For the base type, the position of
+ // a type in the slice is its ID.
+ types types
+
+ // Type IDs indexed by type.
+ typeIDs map[Type]TypeID
+
+ // Types indexed by essential name.
+ // Includes all struct flavors and types with the same name.
+ namedTypes map[essentialName][]Type
+
+ byteOrder binary.ByteOrder
+}
+
+type btfHeader struct {
+ Magic uint16
+ Version uint8
+ Flags uint8
+ HdrLen uint32
+
+ TypeOff uint32
+ TypeLen uint32
+ StringOff uint32
+ StringLen uint32
+}
+
+// typeStart returns the offset from the beginning of the .BTF section
+// to the start of its type entries.
+func (h *btfHeader) typeStart() int64 {
+ return int64(h.HdrLen + h.TypeOff)
+}
+
+// stringStart returns the offset from the beginning of the .BTF section
+// to the start of its string table.
+func (h *btfHeader) stringStart() int64 {
+ return int64(h.HdrLen + h.StringOff)
+}
+
+// LoadSpec opens file and calls LoadSpecFromReader on it.
+func LoadSpec(file string) (*Spec, error) {
+ fh, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer fh.Close()
+
+ return LoadSpecFromReader(fh)
+}
+
+// LoadSpecFromReader reads from an ELF or a raw BTF blob.
+//
+// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos
+// may be nil.
+func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
+ file, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ if bo := guessRawBTFByteOrder(rd); bo != nil {
+ // Try to parse a naked BTF blob. This will return an error if
+ // we encounter a Datasec, since we can't fix it up.
+ spec, err := loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil)
+ return spec, err
+ }
+
+ return nil, err
+ }
+
+ return loadSpecFromELF(file)
+}
+
+// LoadSpecAndExtInfosFromReader reads from an ELF.
+//
+// ExtInfos may be nil if the ELF doesn't contain section metadta.
+// Returns ErrNotFound if the ELF contains no BTF.
+func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
+ file, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ spec, err := loadSpecFromELF(file)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ extInfos, err := loadExtInfosFromELF(file, spec.types, spec.strings)
+ if err != nil && !errors.Is(err, ErrNotFound) {
+ return nil, nil, err
+ }
+
+ return spec, extInfos, nil
+}
+
+// variableOffsets extracts all symbols offsets from an ELF and indexes them by
+// section and variable name.
+//
+// References to variables in BTF data sections carry unsigned 32-bit offsets.
+// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well
+// beyond this range. Since these symbols cannot be described by BTF info,
+// ignore them here.
+func variableOffsets(file *internal.SafeELFFile) (map[variable]uint32, error) {
+ symbols, err := file.Symbols()
+ if err != nil {
+ return nil, fmt.Errorf("can't read symbols: %v", err)
+ }
+
+ variableOffsets := make(map[variable]uint32)
+ for _, symbol := range symbols {
+ if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
+ // Ignore things like SHN_ABS
+ continue
+ }
+
+ if symbol.Value > math.MaxUint32 {
+ // VarSecinfo offset is u32, cannot reference symbols in higher regions.
+ continue
+ }
+
+ if int(symbol.Section) >= len(file.Sections) {
+ return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
+ }
+
+ secName := file.Sections[symbol.Section].Name
+ variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
+ }
+
+ return variableOffsets, nil
+}
+
+func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
+ var (
+ btfSection *elf.Section
+ sectionSizes = make(map[string]uint32)
+ )
+
+ for _, sec := range file.Sections {
+ switch sec.Name {
+ case ".BTF":
+ btfSection = sec
+ default:
+ if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
+ break
+ }
+
+ if sec.Size > math.MaxUint32 {
+ return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
+ }
+
+ sectionSizes[sec.Name] = uint32(sec.Size)
+ }
+ }
+
+ if btfSection == nil {
+ return nil, fmt.Errorf("btf: %w", ErrNotFound)
+ }
+
+ vars, err := variableOffsets(file)
+ if err != nil {
+ return nil, err
+ }
+
+ if btfSection.ReaderAt == nil {
+ return nil, fmt.Errorf("compressed BTF is not supported")
+ }
+
+ rawTypes, rawStrings, err := parseBTF(btfSection.ReaderAt, file.ByteOrder, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ err = fixupDatasec(rawTypes, rawStrings, sectionSizes, vars)
+ if err != nil {
+ return nil, err
+ }
+
+ return inflateSpec(rawTypes, rawStrings, file.ByteOrder, nil)
+}
+
+func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder,
+ baseTypes types, baseStrings *stringTable) (*Spec, error) {
+
+ rawTypes, rawStrings, err := parseBTF(btf, bo, baseStrings)
+ if err != nil {
+ return nil, err
+ }
+
+ return inflateSpec(rawTypes, rawStrings, bo, baseTypes)
+}
+
+func inflateSpec(rawTypes []rawType, rawStrings *stringTable, bo binary.ByteOrder,
+ baseTypes types) (*Spec, error) {
+
+ types, err := inflateRawTypes(rawTypes, baseTypes, rawStrings)
+ if err != nil {
+ return nil, err
+ }
+
+ typeIDs, typesByName := indexTypes(types, TypeID(len(baseTypes)))
+
+ return &Spec{
+ rawTypes: rawTypes,
+ namedTypes: typesByName,
+ typeIDs: typeIDs,
+ types: types,
+ strings: rawStrings,
+ byteOrder: bo,
+ }, nil
+}
+
+func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essentialName][]Type) {
+ namedTypes := 0
+ for _, typ := range types {
+ if typ.TypeName() != "" {
+ // Do a pre-pass to figure out how big types by name has to be.
+ // Most types have unique names, so it's OK to ignore essentialName
+ // here.
+ namedTypes++
+ }
+ }
+
+ typeIDs := make(map[Type]TypeID, len(types))
+ typesByName := make(map[essentialName][]Type, namedTypes)
+
+ for i, typ := range types {
+ if name := newEssentialName(typ.TypeName()); name != "" {
+ typesByName[name] = append(typesByName[name], typ)
+ }
+ typeIDs[typ] = TypeID(i) + typeIDOffset
+ }
+
+ return typeIDs, typesByName
+}
+
+// LoadKernelSpec returns the current kernel's BTF information.
+//
+// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
+// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
+func LoadKernelSpec() (*Spec, error) {
+ fh, err := os.Open("/sys/kernel/btf/vmlinux")
+ if err == nil {
+ defer fh.Close()
+
+ return loadRawSpec(fh, internal.NativeEndian, nil, nil)
+ }
+
+ file, err := findVMLinux()
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return loadSpecFromELF(file)
+}
+
+// findVMLinux scans multiple well-known paths for vmlinux kernel images.
+func findVMLinux() (*internal.SafeELFFile, error) {
+ release, err := internal.KernelRelease()
+ if err != nil {
+ return nil, err
+ }
+
+ // use same list of locations as libbpf
+ // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
+ locations := []string{
+ "/boot/vmlinux-%s",
+ "/lib/modules/%s/vmlinux-%[1]s",
+ "/lib/modules/%s/build/vmlinux",
+ "/usr/lib/modules/%s/kernel/vmlinux",
+ "/usr/lib/debug/boot/vmlinux-%s",
+ "/usr/lib/debug/boot/vmlinux-%s.debug",
+ "/usr/lib/debug/lib/modules/%s/vmlinux",
+ }
+
+ for _, loc := range locations {
+ file, err := internal.OpenSafeELFFile(fmt.Sprintf(loc, release))
+ if errors.Is(err, os.ErrNotExist) {
+ continue
+ }
+ return file, err
+ }
+
+ return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported)
+}
+
+// parseBTFHeader parses the header of the .BTF section.
+func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) {
+ var header btfHeader
+ if err := binary.Read(r, bo, &header); err != nil {
+ return nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ if header.Magic != btfMagic {
+ return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
+ }
+
+ if header.Version != 1 {
+ return nil, fmt.Errorf("unexpected version %v", header.Version)
+ }
+
+ if header.Flags != 0 {
+ return nil, fmt.Errorf("unsupported flags %v", header.Flags)
+ }
+
+ remainder := int64(header.HdrLen) - int64(binary.Size(&header))
+ if remainder < 0 {
+ return nil, errors.New("header length shorter than btfHeader size")
+ }
+
+ if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil {
+ return nil, fmt.Errorf("header padding: %v", err)
+ }
+
+ return &header, nil
+}
+
+func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder {
+ buf := new(bufio.Reader)
+ for _, bo := range []binary.ByteOrder{
+ binary.LittleEndian,
+ binary.BigEndian,
+ } {
+ buf.Reset(io.NewSectionReader(r, 0, math.MaxInt64))
+ if _, err := parseBTFHeader(buf, bo); err == nil {
+ return bo
+ }
+ }
+
+ return nil
+}
+
+// parseBTF reads a .BTF section into memory and parses it into a list of
+// raw types and a string table.
+func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable) ([]rawType, *stringTable, error) {
+ buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64)
+ header, err := parseBTFHeader(buf, bo)
+ if err != nil {
+ return nil, nil, fmt.Errorf("parsing .BTF header: %v", err)
+ }
+
+ rawStrings, err := readStringTable(io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)),
+ baseStrings)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't read type names: %w", err)
+ }
+
+ buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen)))
+ rawTypes, err := readTypes(buf, bo, header.TypeLen)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't read types: %w", err)
+ }
+
+ return rawTypes, rawStrings, nil
+}
+
+type variable struct {
+ section string
+ name string
+}
+
+func fixupDatasec(rawTypes []rawType, rawStrings *stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
+ for i, rawType := range rawTypes {
+ if rawType.Kind() != kindDatasec {
+ continue
+ }
+
+ name, err := rawStrings.Lookup(rawType.NameOff)
+ if err != nil {
+ return err
+ }
+
+ if name == ".kconfig" || name == ".ksyms" {
+ return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
+ }
+
+ if rawTypes[i].SizeType != 0 {
+ continue
+ }
+
+ size, ok := sectionSizes[name]
+ if !ok {
+ return fmt.Errorf("data section %s: missing size", name)
+ }
+
+ rawTypes[i].SizeType = size
+
+ secinfos := rawType.data.([]btfVarSecinfo)
+ for j, secInfo := range secinfos {
+ id := int(secInfo.Type - 1)
+ if id >= len(rawTypes) {
+ return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
+ }
+
+ varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
+ if err != nil {
+ return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
+ }
+
+ offset, ok := variableOffsets[variable{name, varName}]
+ if !ok {
+ return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
+ }
+
+ secinfos[j].Offset = offset
+ }
+ }
+
+ return nil
+}
+
+// Copy creates a copy of Spec.
+func (s *Spec) Copy() *Spec {
+ types := copyTypes(s.types, nil)
+
+ typeIDOffset := TypeID(0)
+ if len(s.types) != 0 {
+ typeIDOffset = s.typeIDs[s.types[0]]
+ }
+ typeIDs, typesByName := indexTypes(types, typeIDOffset)
+
+ // NB: Other parts of spec are not copied since they are immutable.
+ return &Spec{
+ s.rawTypes,
+ s.strings,
+ types,
+ typeIDs,
+ typesByName,
+ s.byteOrder,
+ }
+}
+
+type marshalOpts struct {
+ ByteOrder binary.ByteOrder
+ StripFuncLinkage bool
+}
+
+func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
+ var (
+ buf bytes.Buffer
+ header = new(btfHeader)
+ headerLen = binary.Size(header)
+ )
+
+ // Reserve space for the header. We have to write it last since
+ // we don't know the size of the type section yet.
+ _, _ = buf.Write(make([]byte, headerLen))
+
+ // Write type section, just after the header.
+ for _, raw := range s.rawTypes {
+ switch {
+ case opts.StripFuncLinkage && raw.Kind() == kindFunc:
+ raw.SetLinkage(StaticFunc)
+ }
+
+ if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
+ return nil, fmt.Errorf("can't marshal BTF: %w", err)
+ }
+ }
+
+ typeLen := uint32(buf.Len() - headerLen)
+
+ // Write string section after type section.
+ stringsLen := s.strings.Length()
+ buf.Grow(stringsLen)
+ if err := s.strings.Marshal(&buf); err != nil {
+ return nil, err
+ }
+
+ // Fill out the header, and write it out.
+ header = &btfHeader{
+ Magic: btfMagic,
+ Version: 1,
+ Flags: 0,
+ HdrLen: uint32(headerLen),
+ TypeOff: 0,
+ TypeLen: typeLen,
+ StringOff: typeLen,
+ StringLen: uint32(stringsLen),
+ }
+
+ raw := buf.Bytes()
+ err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
+ if err != nil {
+ return nil, fmt.Errorf("can't write header: %v", err)
+ }
+
+ return raw, nil
+}
+
+type sliceWriter []byte
+
+func (sw sliceWriter) Write(p []byte) (int, error) {
+ if len(p) != len(sw) {
+ return 0, errors.New("size doesn't match")
+ }
+
+ return copy(sw, p), nil
+}
+
+// TypeByID returns the BTF Type with the given type ID.
+//
+// Returns an error wrapping ErrNotFound if a Type with the given ID
+// does not exist in the Spec.
+func (s *Spec) TypeByID(id TypeID) (Type, error) {
+ return s.types.ByID(id)
+}
+
+// TypeID returns the ID for a given Type.
+//
+// Returns an error wrapping ErrNoFound if the type isn't part of the Spec.
+func (s *Spec) TypeID(typ Type) (TypeID, error) {
+ if _, ok := typ.(*Void); ok {
+ // Equality is weird for void, since it is a zero sized type.
+ return 0, nil
+ }
+
+ id, ok := s.typeIDs[typ]
+ if !ok {
+ return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound)
+ }
+
+ return id, nil
+}
+
+// AnyTypesByName returns a list of BTF Types with the given name.
+//
+// If the BTF blob describes multiple compilation units like vmlinux, multiple
+// Types with the same name and kind can exist, but might not describe the same
+// data structure.
+//
+// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
+func (s *Spec) AnyTypesByName(name string) ([]Type, error) {
+ types := s.namedTypes[newEssentialName(name)]
+ if len(types) == 0 {
+ return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound)
+ }
+
+ // Return a copy to prevent changes to namedTypes.
+ result := make([]Type, 0, len(types))
+ for _, t := range types {
+ // Match against the full name, not just the essential one
+ // in case the type being looked up is a struct flavor.
+ if t.TypeName() == name {
+ result = append(result, t)
+ }
+ }
+ return result, nil
+}
+
+// AnyTypeByName returns a Type with the given name.
+//
+// Returns an error if multiple types of that name exist.
+func (s *Spec) AnyTypeByName(name string) (Type, error) {
+ types, err := s.AnyTypesByName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(types) > 1 {
+ return nil, fmt.Errorf("found multiple types: %v", types)
+ }
+
+ return types[0], nil
+}
+
+// TypeByName searches for a Type with a specific name. Since multiple
+// Types with the same name can exist, the parameter typ is taken to
+// narrow down the search in case of a clash.
+//
+// typ must be a non-nil pointer to an implementation of a Type.
+// On success, the address of the found Type will be copied to typ.
+//
+// Returns an error wrapping ErrNotFound if no matching
+// Type exists in the Spec. If multiple candidates are found,
+// an error is returned.
+func (s *Spec) TypeByName(name string, typ interface{}) error {
+ typValue := reflect.ValueOf(typ)
+ if typValue.Kind() != reflect.Ptr {
+ return fmt.Errorf("%T is not a pointer", typ)
+ }
+
+ typPtr := typValue.Elem()
+ if !typPtr.CanSet() {
+ return fmt.Errorf("%T cannot be set", typ)
+ }
+
+ wanted := typPtr.Type()
+ if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) {
+ return fmt.Errorf("%T does not satisfy Type interface", typ)
+ }
+
+ types, err := s.AnyTypesByName(name)
+ if err != nil {
+ return err
+ }
+
+ var candidate Type
+ for _, typ := range types {
+ if reflect.TypeOf(typ) != wanted {
+ continue
+ }
+
+ if candidate != nil {
+ return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
+ }
+
+ candidate = typ
+ }
+
+ if candidate == nil {
+ return fmt.Errorf("type %s: %w", name, ErrNotFound)
+ }
+
+ typPtr.Set(reflect.ValueOf(candidate))
+
+ return nil
+}
+
+// LoadSplitSpecFromReader loads split BTF from a reader.
+//
+// Types from base are used to resolve references in the split BTF.
+// The returned Spec only contains types from the split BTF, not from the base.
+func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) {
+ return loadRawSpec(r, internal.NativeEndian, base.types, base.strings)
+}
+
+// TypesIterator iterates over types of a given spec.
+type TypesIterator struct {
+ spec *Spec
+ index int
+ // The last visited type in the spec.
+ Type Type
+}
+
+// Iterate returns the types iterator.
+func (s *Spec) Iterate() *TypesIterator {
+ return &TypesIterator{spec: s, index: 0}
+}
+
+// Next returns true as long as there are any remaining types.
+func (iter *TypesIterator) Next() bool {
+ if len(iter.spec.types) <= iter.index {
+ return false
+ }
+
+ iter.Type = iter.spec.types[iter.index]
+ iter.index++
+ return true
+}
+
+// Handle is a reference to BTF loaded into the kernel.
+type Handle struct {
+ fd *sys.FD
+
+ // Size of the raw BTF in bytes.
+ size uint32
+}
+
+// NewHandle loads BTF into the kernel.
+//
+// Returns ErrNotSupported if BTF is not supported.
+func NewHandle(spec *Spec) (*Handle, error) {
+ if err := haveBTF(); err != nil {
+ return nil, err
+ }
+
+ if spec.byteOrder != internal.NativeEndian {
+ return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
+ }
+
+ btf, err := spec.marshal(marshalOpts{
+ ByteOrder: internal.NativeEndian,
+ StripFuncLinkage: haveFuncLinkage() != nil,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("can't marshal BTF: %w", err)
+ }
+
+ if uint64(len(btf)) > math.MaxUint32 {
+ return nil, errors.New("BTF exceeds the maximum size")
+ }
+
+ attr := &sys.BtfLoadAttr{
+ Btf: sys.NewSlicePointer(btf),
+ BtfSize: uint32(len(btf)),
+ }
+
+ fd, err := sys.BtfLoad(attr)
+ if err != nil {
+ logBuf := make([]byte, 64*1024)
+ attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
+ attr.BtfLogSize = uint32(len(logBuf))
+ attr.BtfLogLevel = 1
+ // NB: The syscall will never return ENOSPC as of 5.18-rc4.
+ _, _ = sys.BtfLoad(attr)
+ return nil, internal.ErrorWithLog(err, logBuf)
+ }
+
+ return &Handle{fd, attr.BtfSize}, nil
+}
+
+// NewHandleFromID returns the BTF handle for a given id.
+//
+// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible.
+//
+// Returns ErrNotExist, if there is no BTF with the given id.
+//
+// Requires CAP_SYS_ADMIN.
+func NewHandleFromID(id ID) (*Handle, error) {
+ fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{
+ Id: uint32(id),
+ })
+ if err != nil {
+ return nil, fmt.Errorf("get FD for ID %d: %w", id, err)
+ }
+
+ info, err := newHandleInfoFromFD(fd)
+ if err != nil {
+ _ = fd.Close()
+ return nil, err
+ }
+
+ return &Handle{fd, info.size}, nil
+}
+
+// Spec parses the kernel BTF into Go types.
+//
+// base is used to decode split BTF and may be nil.
+func (h *Handle) Spec(base *Spec) (*Spec, error) {
+ var btfInfo sys.BtfInfo
+ btfBuffer := make([]byte, h.size)
+ btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer)
+
+ if err := sys.ObjInfo(h.fd, &btfInfo); err != nil {
+ return nil, err
+ }
+
+ var baseTypes types
+ var baseStrings *stringTable
+ if base != nil {
+ baseTypes = base.types
+ baseStrings = base.strings
+ }
+
+ return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, baseTypes, baseStrings)
+}
+
+// Close destroys the handle.
+//
+// Subsequent calls to FD will return an invalid value.
+func (h *Handle) Close() error {
+ if h == nil {
+ return nil
+ }
+
+ return h.fd.Close()
+}
+
+// FD returns the file descriptor for the handle.
+func (h *Handle) FD() int {
+ return h.fd.Int()
+}
+
+// Info returns metadata about the handle.
+func (h *Handle) Info() (*HandleInfo, error) {
+ return newHandleInfoFromFD(h.fd)
+}
+
+func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
+ const minHeaderLength = 24
+
+ typesLen := uint32(binary.Size(types))
+ header := btfHeader{
+ Magic: btfMagic,
+ Version: 1,
+ HdrLen: minHeaderLength,
+ TypeOff: 0,
+ TypeLen: typesLen,
+ StringOff: typesLen,
+ StringLen: uint32(len(strings)),
+ }
+
+ buf := new(bytes.Buffer)
+ _ = binary.Write(buf, bo, &header)
+ _ = binary.Write(buf, bo, types)
+ buf.Write(strings)
+
+ return buf.Bytes()
+}
+
+var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
+ var (
+ types struct {
+ Integer btfType
+ Var btfType
+ btfVar struct{ Linkage uint32 }
+ }
+ strings = []byte{0, 'a', 0}
+ )
+
+ // We use a BTF_KIND_VAR here, to make sure that
+ // the kernel understands BTF at least as well as we
+ // do. BTF_KIND_VAR was introduced ~5.1.
+ types.Integer.SetKind(kindPointer)
+ types.Var.NameOff = 1
+ types.Var.SetKind(kindVar)
+ types.Var.SizeType = 1
+
+ btf := marshalBTF(&types, strings, internal.NativeEndian)
+
+ fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
+ Btf: sys.NewSlicePointer(btf),
+ BtfSize: uint32(len(btf)),
+ })
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
+ // Treat both EINVAL and EPERM as not supported: loading the program
+ // might still succeed without BTF.
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+
+ fd.Close()
+ return nil
+})
+
+var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
+ if err := haveBTF(); err != nil {
+ return err
+ }
+
+ var (
+ types struct {
+ FuncProto btfType
+ Func btfType
+ }
+ strings = []byte{0, 'a', 0}
+ )
+
+ types.FuncProto.SetKind(kindFuncProto)
+ types.Func.SetKind(kindFunc)
+ types.Func.SizeType = 1 // aka FuncProto
+ types.Func.NameOff = 1
+ types.Func.SetLinkage(GlobalFunc)
+
+ btf := marshalBTF(&types, strings, internal.NativeEndian)
+
+ fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
+ Btf: sys.NewSlicePointer(btf),
+ BtfSize: uint32(len(btf)),
+ })
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+
+ fd.Close()
+ return nil
+})
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go b/vendor/github.com/cilium/ebpf/btf/btf_types.go
index a4cde3fe8..481018049 100644
--- a/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go
+++ b/vendor/github.com/cilium/ebpf/btf/btf_types.go
@@ -6,6 +6,8 @@ import (
"io"
)
+//go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage
+
// btfKind describes a Type.
type btfKind uint8
@@ -29,19 +31,32 @@ const (
// Added ~5.1
kindVar
kindDatasec
+ // Added ~5.13
+ kindFloat
+)
+
+// FuncLinkage describes BTF function linkage metadata.
+type FuncLinkage int
+
+// Equivalent of enum btf_func_linkage.
+const (
+ StaticFunc FuncLinkage = iota // static
+ GlobalFunc // global
+ ExternFunc // extern
)
-type btfFuncLinkage uint8
+// VarLinkage describes BTF variable linkage metadata.
+type VarLinkage int
const (
- linkageStatic btfFuncLinkage = iota
- linkageGlobal
- linkageExtern
+ StaticVar VarLinkage = iota // static
+ GlobalVar // global
+ ExternVar // extern
)
const (
btfTypeKindShift = 24
- btfTypeKindLen = 4
+ btfTypeKindLen = 5
btfTypeVlenShift = 0
btfTypeVlenMask = 16
btfTypeKindFlagShift = 31
@@ -54,8 +69,8 @@ type btfType struct {
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members), linkage
* bits 16-23: unused
- * bits 24-27: kind (e.g. int, ptr, array...etc)
- * bits 28-30: unused
+ * bits 24-28: kind (e.g. int, ptr, array...etc)
+ * bits 29-30: unused
* bit 31: kind_flag, currently used by
* struct, union and fwd
*/
@@ -104,6 +119,8 @@ func (k btfKind) String() string {
return "Variable"
case kindDatasec:
return "Section"
+ case kindFloat:
+ return "Float"
default:
return fmt.Sprintf("Unknown (%d)", k)
}
@@ -113,13 +130,22 @@ func mask(len uint32) uint32 {
return (1 << len) - 1
}
+func readBits(value, len, shift uint32) uint32 {
+ return (value >> shift) & mask(len)
+}
+
+func writeBits(value, len, shift, new uint32) uint32 {
+ value &^= mask(len) << shift
+ value |= (new & mask(len)) << shift
+ return value
+}
+
func (bt *btfType) info(len, shift uint32) uint32 {
- return (bt.Info >> shift) & mask(len)
+ return readBits(bt.Info, len, shift)
}
func (bt *btfType) setInfo(value, len, shift uint32) {
- bt.Info &^= mask(len) << shift
- bt.Info |= (value & mask(len)) << shift
+ bt.Info = writeBits(bt.Info, len, shift, value)
}
func (bt *btfType) Kind() btfKind {
@@ -142,11 +168,11 @@ func (bt *btfType) KindFlag() bool {
return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
}
-func (bt *btfType) Linkage() btfFuncLinkage {
- return btfFuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
+func (bt *btfType) Linkage() FuncLinkage {
+ return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
}
-func (bt *btfType) SetLinkage(linkage btfFuncLinkage) {
+func (bt *btfType) SetLinkage(linkage FuncLinkage) {
bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
}
@@ -160,6 +186,10 @@ func (bt *btfType) Size() uint32 {
return bt.SizeType
}
+func (bt *btfType) SetSize(size uint32) {
+ bt.SizeType = size
+}
+
type rawType struct {
btfType
data interface{}
@@ -177,6 +207,50 @@ func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
return binary.Write(w, bo, rt.data)
}
+// btfInt encodes additional data for integers.
+//
+// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b
+// ? = undefined
+// e = encoding
+// o = offset (bitfields?)
+// b = bits (bitfields)
+type btfInt struct {
+ Raw uint32
+}
+
+const (
+ btfIntEncodingLen = 4
+ btfIntEncodingShift = 24
+ btfIntOffsetLen = 8
+ btfIntOffsetShift = 16
+ btfIntBitsLen = 8
+ btfIntBitsShift = 0
+)
+
+func (bi btfInt) Encoding() IntEncoding {
+ return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift))
+}
+
+func (bi *btfInt) SetEncoding(e IntEncoding) {
+ bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e))
+}
+
+func (bi btfInt) Offset() Bits {
+ return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift))
+}
+
+func (bi *btfInt) SetOffset(offset uint32) {
+ bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset)
+}
+
+func (bi btfInt) Bits() Bits {
+ return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift))
+}
+
+func (bi *btfInt) SetBits(bits byte) {
+ bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits))
+}
+
type btfArray struct {
Type TypeID
IndexType TypeID
@@ -209,11 +283,14 @@ type btfParam struct {
Type TypeID
}
-func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
- var (
- header btfType
- types []rawType
- )
+func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, error) {
+ var header btfType
+ // because of the interleaving between types and struct members it is difficult to
+ // precompute the numbers of raw types this will parse
+ // this "guess" is a good first estimation
+ sizeOfbtfType := uintptr(binary.Size(btfType{}))
+ tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2
+ types := make([]rawType, 0, tyMaxCount)
for id := TypeID(1); ; id++ {
if err := binary.Read(r, bo, &header); err == io.EOF {
@@ -225,7 +302,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
var data interface{}
switch header.Kind() {
case kindInt:
- data = new(uint32)
+ data = new(btfInt)
case kindPointer:
case kindArray:
data = new(btfArray)
@@ -247,6 +324,7 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
data = new(btfVariable)
case kindDatasec:
data = make([]btfVarSecinfo, header.Vlen())
+ case kindFloat:
default:
return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
}
@@ -263,7 +341,3 @@ func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
types = append(types, rawType{header, data})
}
}
-
-func intEncoding(raw uint32) (IntEncoding, uint32, byte) {
- return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff)
-}
diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types_string.go b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go
new file mode 100644
index 000000000..0e0c17d68
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go
@@ -0,0 +1,44 @@
+// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage"; DO NOT EDIT.
+
+package btf
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[StaticFunc-0]
+ _ = x[GlobalFunc-1]
+ _ = x[ExternFunc-2]
+}
+
+const _FuncLinkage_name = "staticglobalextern"
+
+var _FuncLinkage_index = [...]uint8{0, 6, 12, 18}
+
+func (i FuncLinkage) String() string {
+ if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) {
+ return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[StaticVar-0]
+ _ = x[GlobalVar-1]
+ _ = x[ExternVar-2]
+}
+
+const _VarLinkage_name = "staticglobalextern"
+
+var _VarLinkage_index = [...]uint8{0, 6, 12, 18}
+
+func (i VarLinkage) String() string {
+ if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) {
+ return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/core.go b/vendor/github.com/cilium/ebpf/btf/core.go
new file mode 100644
index 000000000..c48754809
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/core.go
@@ -0,0 +1,972 @@
+package btf
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/ebpf/asm"
+)
+
+// Code in this file is derived from libbpf, which is available under a BSD
+// 2-Clause license.
+
+// COREFixup is the result of computing a CO-RE relocation for a target.
+type COREFixup struct {
+ kind coreKind
+ local uint32
+ target uint32
+ // True if there is no valid fixup. The instruction is replaced with an
+ // invalid dummy.
+ poison bool
+ // True if the validation of the local value should be skipped. Used by
+ // some kinds of bitfield relocations.
+ skipLocalValidation bool
+}
+
+func (f *COREFixup) equal(other COREFixup) bool {
+ return f.local == other.local && f.target == other.target
+}
+
+func (f *COREFixup) String() string {
+ if f.poison {
+ return fmt.Sprintf("%s=poison", f.kind)
+ }
+ return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target)
+}
+
+func (f *COREFixup) Apply(ins *asm.Instruction) error {
+ if f.poison {
+ const badRelo = 0xbad2310
+
+ *ins = asm.BuiltinFunc(badRelo).Call()
+ return nil
+ }
+
+ switch class := ins.OpCode.Class(); class {
+ case asm.LdXClass, asm.StClass, asm.StXClass:
+ if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset {
+ return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local)
+ }
+
+ if f.target > math.MaxInt16 {
+ return fmt.Errorf("offset %d exceeds MaxInt16", f.target)
+ }
+
+ ins.Offset = int16(f.target)
+
+ case asm.LdClass:
+ if !ins.IsConstantLoad(asm.DWord) {
+ return fmt.Errorf("not a dword-sized immediate load")
+ }
+
+ if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
+ return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f)
+ }
+
+ ins.Constant = int64(f.target)
+
+ case asm.ALUClass:
+ if ins.OpCode.ALUOp() == asm.Swap {
+ return fmt.Errorf("relocation against swap")
+ }
+
+ fallthrough
+
+ case asm.ALU64Class:
+ if src := ins.OpCode.Source(); src != asm.ImmSource {
+ return fmt.Errorf("invalid source %s", src)
+ }
+
+ if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
+ return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins)
+ }
+
+ if f.target > math.MaxInt32 {
+ return fmt.Errorf("immediate %d exceeds MaxInt32", f.target)
+ }
+
+ ins.Constant = int64(f.target)
+
+ default:
+ return fmt.Errorf("invalid class %s", class)
+ }
+
+ return nil
+}
+
+func (f COREFixup) isNonExistant() bool {
+ return f.kind.checksForExistence() && f.target == 0
+}
+
+// coreKind is the type of CO-RE relocation as specified in BPF source code.
+type coreKind uint32
+
+const (
+ reloFieldByteOffset coreKind = iota /* field byte offset */
+ reloFieldByteSize /* field size in bytes */
+ reloFieldExists /* field existence in target kernel */
+ reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
+ reloFieldLShiftU64 /* bitfield-specific left bitshift */
+ reloFieldRShiftU64 /* bitfield-specific right bitshift */
+ reloTypeIDLocal /* type ID in local BPF object */
+ reloTypeIDTarget /* type ID in target kernel */
+ reloTypeExists /* type existence in target kernel */
+ reloTypeSize /* type size in bytes */
+ reloEnumvalExists /* enum value existence in target kernel */
+ reloEnumvalValue /* enum value integer value */
+)
+
+func (k coreKind) checksForExistence() bool {
+ return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
+}
+
+func (k coreKind) String() string {
+ switch k {
+ case reloFieldByteOffset:
+ return "byte_off"
+ case reloFieldByteSize:
+ return "byte_sz"
+ case reloFieldExists:
+ return "field_exists"
+ case reloFieldSigned:
+ return "signed"
+ case reloFieldLShiftU64:
+ return "lshift_u64"
+ case reloFieldRShiftU64:
+ return "rshift_u64"
+ case reloTypeIDLocal:
+ return "local_type_id"
+ case reloTypeIDTarget:
+ return "target_type_id"
+ case reloTypeExists:
+ return "type_exists"
+ case reloTypeSize:
+ return "type_size"
+ case reloEnumvalExists:
+ return "enumval_exists"
+ case reloEnumvalValue:
+ return "enumval_value"
+ default:
+ return "unknown"
+ }
+}
+
+// CORERelocate calculates the difference in types between local and target.
+//
+// Returns a list of fixups which can be applied to instructions to make them
+// match the target type(s).
+//
+// Fixups are returned in the order of relos, e.g. fixup[i] is the solution
+// for relos[i].
+func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, error) {
+ if local.byteOrder != target.byteOrder {
+ return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
+ }
+
+ type reloGroup struct {
+ relos []*CORERelocation
+ // Position of each relocation in relos.
+ indices []int
+ }
+
+ // Split relocations into per Type lists.
+ relosByType := make(map[Type]*reloGroup)
+ result := make([]COREFixup, len(relos))
+ for i, relo := range relos {
+ if relo.kind == reloTypeIDLocal {
+ // Filtering out reloTypeIDLocal here makes our lives a lot easier
+ // down the line, since it doesn't have a target at all.
+ if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
+ return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
+ }
+
+ id, err := local.TypeID(relo.typ)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %w", relo.kind, err)
+ }
+
+ result[i] = COREFixup{
+ kind: relo.kind,
+ local: uint32(id),
+ target: uint32(id),
+ }
+ continue
+ }
+
+ group, ok := relosByType[relo.typ]
+ if !ok {
+ group = &reloGroup{}
+ relosByType[relo.typ] = group
+ }
+ group.relos = append(group.relos, relo)
+ group.indices = append(group.indices, i)
+ }
+
+ for localType, group := range relosByType {
+ localTypeName := localType.TypeName()
+ if localTypeName == "" {
+ return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
+ }
+
+ targets := target.namedTypes[newEssentialName(localTypeName)]
+ fixups, err := coreCalculateFixups(local, target, localType, targets, group.relos)
+ if err != nil {
+ return nil, fmt.Errorf("relocate %s: %w", localType, err)
+ }
+
+ for j, index := range group.indices {
+ result[index] = fixups[j]
+ }
+ }
+
+ return result, nil
+}
+
+var errAmbiguousRelocation = errors.New("ambiguous relocation")
+var errImpossibleRelocation = errors.New("impossible relocation")
+
+// coreCalculateFixups calculates the fixups for the given relocations using
+// the "best" target.
+//
+// The best target is determined by scoring: the less poisoning we have to do
+// the better the target is.
+func coreCalculateFixups(localSpec, targetSpec *Spec, local Type, targets []Type, relos []*CORERelocation) ([]COREFixup, error) {
+ localID, err := localSpec.TypeID(local)
+ if err != nil {
+ return nil, fmt.Errorf("local type ID: %w", err)
+ }
+ local = Copy(local, UnderlyingType)
+
+ bestScore := len(relos)
+ var bestFixups []COREFixup
+ for i := range targets {
+ targetID, err := targetSpec.TypeID(targets[i])
+ if err != nil {
+ return nil, fmt.Errorf("target type ID: %w", err)
+ }
+ target := Copy(targets[i], UnderlyingType)
+
+ score := 0 // lower is better
+ fixups := make([]COREFixup, 0, len(relos))
+ for _, relo := range relos {
+ fixup, err := coreCalculateFixup(localSpec.byteOrder, local, localID, target, targetID, relo)
+ if err != nil {
+ return nil, fmt.Errorf("target %s: %w", target, err)
+ }
+ if fixup.poison || fixup.isNonExistant() {
+ score++
+ }
+ fixups = append(fixups, fixup)
+ }
+
+ if score > bestScore {
+ // We have a better target already, ignore this one.
+ continue
+ }
+
+ if score < bestScore {
+ // This is the best target yet, use it.
+ bestScore = score
+ bestFixups = fixups
+ continue
+ }
+
+ // Some other target has the same score as the current one. Make sure
+ // the fixups agree with each other.
+ for i, fixup := range bestFixups {
+ if !fixup.equal(fixups[i]) {
+ return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation)
+ }
+ }
+ }
+
+ if bestFixups == nil {
+ // Nothing at all matched, probably because there are no suitable
+ // targets at all.
+ //
+ // Poison everything except checksForExistence.
+ bestFixups = make([]COREFixup, len(relos))
+ for i, relo := range relos {
+ if relo.kind.checksForExistence() {
+ bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0}
+ } else {
+ bestFixups[i] = COREFixup{kind: relo.kind, poison: true}
+ }
+ }
+ }
+
+ return bestFixups, nil
+}
+
+// coreCalculateFixup calculates the fixup for a single local type, target type
+// and relocation.
+func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo *CORERelocation) (COREFixup, error) {
+ fixup := func(local, target uint32) (COREFixup, error) {
+ return COREFixup{kind: relo.kind, local: local, target: target}, nil
+ }
+ fixupWithoutValidation := func(local, target uint32) (COREFixup, error) {
+ return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil
+ }
+ poison := func() (COREFixup, error) {
+ if relo.kind.checksForExistence() {
+ return fixup(1, 0)
+ }
+ return COREFixup{kind: relo.kind, poison: true}, nil
+ }
+ zero := COREFixup{}
+
+ switch relo.kind {
+ case reloTypeIDTarget, reloTypeSize, reloTypeExists:
+ if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
+ return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
+ }
+
+ err := coreAreTypesCompatible(local, target)
+ if errors.Is(err, errImpossibleRelocation) {
+ return poison()
+ }
+ if err != nil {
+ return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
+ }
+
+ switch relo.kind {
+ case reloTypeExists:
+ return fixup(1, 1)
+
+ case reloTypeIDTarget:
+ return fixup(uint32(localID), uint32(targetID))
+
+ case reloTypeSize:
+ localSize, err := Sizeof(local)
+ if err != nil {
+ return zero, err
+ }
+
+ targetSize, err := Sizeof(target)
+ if err != nil {
+ return zero, err
+ }
+
+ return fixup(uint32(localSize), uint32(targetSize))
+ }
+
+ case reloEnumvalValue, reloEnumvalExists:
+ localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target)
+ if errors.Is(err, errImpossibleRelocation) {
+ return poison()
+ }
+ if err != nil {
+ return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
+ }
+
+ switch relo.kind {
+ case reloEnumvalExists:
+ return fixup(1, 1)
+
+ case reloEnumvalValue:
+ return fixup(uint32(localValue.Value), uint32(targetValue.Value))
+ }
+
+ case reloFieldSigned:
+ switch local.(type) {
+ case *Enum:
+ return fixup(1, 1)
+ case *Int:
+ return fixup(
+ uint32(local.(*Int).Encoding&Signed),
+ uint32(target.(*Int).Encoding&Signed),
+ )
+ default:
+ return fixupWithoutValidation(0, 0)
+ }
+
+ case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64:
+ if _, ok := target.(*Fwd); ok {
+ // We can't relocate fields using a forward declaration, so
+ // skip it. If a non-forward declaration is present in the BTF
+ // we'll find it in one of the other iterations.
+ return poison()
+ }
+
+ localField, targetField, err := coreFindField(local, relo.accessor, target)
+ if errors.Is(err, errImpossibleRelocation) {
+ return poison()
+ }
+ if err != nil {
+ return zero, fmt.Errorf("target %s: %w", target, err)
+ }
+
+ maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) {
+ f.skipLocalValidation = localField.bitfieldSize > 0
+ return f, err
+ }
+
+ switch relo.kind {
+ case reloFieldExists:
+ return fixup(1, 1)
+
+ case reloFieldByteOffset:
+ return maybeSkipValidation(fixup(localField.offset, targetField.offset))
+
+ case reloFieldByteSize:
+ localSize, err := Sizeof(localField.Type)
+ if err != nil {
+ return zero, err
+ }
+
+ targetSize, err := Sizeof(targetField.Type)
+ if err != nil {
+ return zero, err
+ }
+ return maybeSkipValidation(fixup(uint32(localSize), uint32(targetSize)))
+
+ case reloFieldLShiftU64:
+ var target uint32
+ if byteOrder == binary.LittleEndian {
+ targetSize, err := targetField.sizeBits()
+ if err != nil {
+ return zero, err
+ }
+
+ target = uint32(64 - targetField.bitfieldOffset - targetSize)
+ } else {
+ loadWidth, err := Sizeof(targetField.Type)
+ if err != nil {
+ return zero, err
+ }
+
+ target = uint32(64 - Bits(loadWidth*8) + targetField.bitfieldOffset)
+ }
+ return fixupWithoutValidation(0, target)
+
+ case reloFieldRShiftU64:
+ targetSize, err := targetField.sizeBits()
+ if err != nil {
+ return zero, err
+ }
+
+ return fixupWithoutValidation(0, uint32(64-targetSize))
+ }
+ }
+
+ return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported)
+}
+
+/* coreAccessor contains a path through a struct. It contains at least one index.
+ *
+ * The interpretation depends on the kind of the relocation. The following is
+ * taken from struct bpf_core_relo in libbpf_internal.h:
+ *
+ * - for field-based relocations, string encodes an accessed field using
+ * a sequence of field and array indices, separated by colon (:). It's
+ * conceptually very close to LLVM's getelementptr ([0]) instruction's
+ * arguments for identifying offset to a field.
+ * - for type-based relocations, strings is expected to be just "0";
+ * - for enum value-based relocations, string contains an index of enum
+ * value within its enum type;
+ *
+ * Example to provide a better feel.
+ *
+ * struct sample {
+ * int a;
+ * struct {
+ * int b[10];
+ * };
+ * };
+ *
+ * struct sample s = ...;
+ * int x = &s->a; // encoded as "0:0" (a is field #0)
+ * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
+ * // b is field #0 inside anon struct, accessing elem #5)
+ * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
+ */
+type coreAccessor []int
+
+func parseCOREAccessor(accessor string) (coreAccessor, error) {
+ if accessor == "" {
+ return nil, fmt.Errorf("empty accessor")
+ }
+
+ parts := strings.Split(accessor, ":")
+ result := make(coreAccessor, 0, len(parts))
+ for _, part := range parts {
+ // 31 bits to avoid overflowing int on 32 bit platforms.
+ index, err := strconv.ParseUint(part, 10, 31)
+ if err != nil {
+ return nil, fmt.Errorf("accessor index %q: %s", part, err)
+ }
+
+ result = append(result, int(index))
+ }
+
+ return result, nil
+}
+
+func (ca coreAccessor) String() string {
+ strs := make([]string, 0, len(ca))
+ for _, i := range ca {
+ strs = append(strs, strconv.Itoa(i))
+ }
+ return strings.Join(strs, ":")
+}
+
+func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
+ e, ok := t.(*Enum)
+ if !ok {
+ return nil, fmt.Errorf("not an enum: %s", t)
+ }
+
+ if len(ca) > 1 {
+ return nil, fmt.Errorf("invalid accessor %s for enum", ca)
+ }
+
+ i := ca[0]
+ if i >= len(e.Values) {
+ return nil, fmt.Errorf("invalid index %d for %s", i, e)
+ }
+
+ return &e.Values[i], nil
+}
+
+// coreField represents the position of a "child" of a composite type from the
+// start of that type.
+//
+// /- start of composite
+// | offset * 8 | bitfieldOffset | bitfieldSize | ... |
+// \- start of field end of field -/
+type coreField struct {
+ Type Type
+
+ // The position of the field from the start of the composite type in bytes.
+ offset uint32
+
+ // The offset of the bitfield in bits from the start of the field.
+ bitfieldOffset Bits
+
+ // The size of the bitfield in bits.
+ //
+ // Zero if the field is not a bitfield.
+ bitfieldSize Bits
+}
+
+func (cf *coreField) adjustOffsetToNthElement(n int) error {
+ size, err := Sizeof(cf.Type)
+ if err != nil {
+ return err
+ }
+
+ cf.offset += uint32(n) * uint32(size)
+ return nil
+}
+
+func (cf *coreField) adjustOffsetBits(offset Bits) error {
+ align, err := alignof(cf.Type)
+ if err != nil {
+ return err
+ }
+
+ // We can compute the load offset by:
+ // 1) converting the bit offset to bytes with a flooring division.
+ // 2) dividing and multiplying that offset by the alignment, yielding the
+ // load size aligned offset.
+ offsetBytes := uint32(offset/8) / uint32(align) * uint32(align)
+
+ // The number of bits remaining is the bit offset less the number of bits
+ // we can "skip" with the aligned offset.
+ cf.bitfieldOffset = offset - Bits(offsetBytes*8)
+
+ // We know that cf.offset is aligned at to at least align since we get it
+ // from the compiler via BTF. Adding an aligned offsetBytes preserves the
+ // alignment.
+ cf.offset += offsetBytes
+ return nil
+}
+
+func (cf *coreField) sizeBits() (Bits, error) {
+ if cf.bitfieldSize > 0 {
+ return cf.bitfieldSize, nil
+ }
+
+ // Someone is trying to access a non-bitfield via a bit shift relocation.
+ // This happens when a field changes from a bitfield to a regular field
+ // between kernel versions. Synthesise the size to make the shifts work.
+ size, err := Sizeof(cf.Type)
+ if err != nil {
+ return 0, nil
+ }
+ return Bits(size * 8), nil
+}
+
+// coreFindField descends into the local type using the accessor and tries to
+// find an equivalent field in target at each step.
+//
+// Returns the field and the offset of the field from the start of
+// target in bits.
+func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) {
+ local := coreField{Type: localT}
+ target := coreField{Type: targetT}
+
+ // The first index is used to offset a pointer of the base type like
+ // when accessing an array.
+ if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
+ return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
+ }
+
+ var localMaybeFlex, targetMaybeFlex bool
+ for i, acc := range localAcc[1:] {
+ switch localType := local.Type.(type) {
+ case composite:
+ // For composite types acc is used to find the field in the local type,
+ // and then we try to find a field in target with the same name.
+ localMembers := localType.members()
+ if acc >= len(localMembers) {
+ return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType)
+ }
+
+ localMember := localMembers[acc]
+ if localMember.Name == "" {
+ _, ok := localMember.Type.(composite)
+ if !ok {
+ return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
+ }
+
+ // This is an anonymous struct or union, ignore it.
+ local = coreField{
+ Type: localMember.Type,
+ offset: local.offset + localMember.Offset.Bytes(),
+ }
+ localMaybeFlex = false
+ continue
+ }
+
+ targetType, ok := target.Type.(composite)
+ if !ok {
+ return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
+ }
+
+ targetMember, last, err := coreFindMember(targetType, localMember.Name)
+ if err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ local = coreField{
+ Type: localMember.Type,
+ offset: local.offset,
+ bitfieldSize: localMember.BitfieldSize,
+ }
+ localMaybeFlex = acc == len(localMembers)-1
+
+ target = coreField{
+ Type: targetMember.Type,
+ offset: target.offset,
+ bitfieldSize: targetMember.BitfieldSize,
+ }
+ targetMaybeFlex = last
+
+ if local.bitfieldSize == 0 && target.bitfieldSize == 0 {
+ local.offset += localMember.Offset.Bytes()
+ target.offset += targetMember.Offset.Bytes()
+ break
+ }
+
+ // Either of the members is a bitfield. Make sure we're at the
+ // end of the accessor.
+ if next := i + 1; next < len(localAcc[1:]) {
+ return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield")
+ }
+
+ if err := local.adjustOffsetBits(localMember.Offset); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ if err := target.adjustOffsetBits(targetMember.Offset); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ case *Array:
+ // For arrays, acc is the index in the target.
+ targetType, ok := target.Type.(*Array)
+ if !ok {
+ return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
+ }
+
+ if localType.Nelems == 0 && !localMaybeFlex {
+ return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array")
+ }
+ if targetType.Nelems == 0 && !targetMaybeFlex {
+ return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array")
+ }
+
+ if localType.Nelems > 0 && acc >= int(localType.Nelems) {
+ return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc)
+ }
+ if targetType.Nelems > 0 && acc >= int(targetType.Nelems) {
+ return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation)
+ }
+
+ local = coreField{
+ Type: localType.Type,
+ offset: local.offset,
+ }
+ localMaybeFlex = false
+
+ if err := local.adjustOffsetToNthElement(acc); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ target = coreField{
+ Type: targetType.Type,
+ offset: target.offset,
+ }
+ targetMaybeFlex = false
+
+ if err := target.adjustOffsetToNthElement(acc); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ default:
+ return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported)
+ }
+
+ if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
+ return coreField{}, coreField{}, err
+ }
+ }
+
+ return local, target, nil
+}
+
+// coreFindMember finds a member in a composite type while handling anonymous
+// structs and unions.
+func coreFindMember(typ composite, name string) (Member, bool, error) {
+ if name == "" {
+ return Member{}, false, errors.New("can't search for anonymous member")
+ }
+
+ type offsetTarget struct {
+ composite
+ offset Bits
+ }
+
+ targets := []offsetTarget{{typ, 0}}
+ visited := make(map[composite]bool)
+
+ for i := 0; i < len(targets); i++ {
+ target := targets[i]
+
+ // Only visit targets once to prevent infinite recursion.
+ if visited[target] {
+ continue
+ }
+ if len(visited) >= maxTypeDepth {
+ // This check is different than libbpf, which restricts the entire
+ // path to BPF_CORE_SPEC_MAX_LEN items.
+ return Member{}, false, fmt.Errorf("type is nested too deep")
+ }
+ visited[target] = true
+
+ members := target.members()
+ for j, member := range members {
+ if member.Name == name {
+ // NB: This is safe because member is a copy.
+ member.Offset += target.offset
+ return member, j == len(members)-1, nil
+ }
+
+ // The names don't match, but this member could be an anonymous struct
+ // or union.
+ if member.Name != "" {
+ continue
+ }
+
+ comp, ok := member.Type.(composite)
+ if !ok {
+ return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
+ }
+
+ targets = append(targets, offsetTarget{comp, target.offset + member.Offset})
+ }
+ }
+
+ return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation)
+}
+
+// coreFindEnumValue follows localAcc to find the equivalent enum value in target.
+func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) {
+ localValue, err := localAcc.enumValue(local)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ targetEnum, ok := target.(*Enum)
+ if !ok {
+ return nil, nil, errImpossibleRelocation
+ }
+
+ localName := newEssentialName(localValue.Name)
+ for i, targetValue := range targetEnum.Values {
+ if newEssentialName(targetValue.Name) != localName {
+ continue
+ }
+
+ return localValue, &targetEnum.Values[i], nil
+ }
+
+ return nil, nil, errImpossibleRelocation
+}
+
+/* The comment below is from bpf_core_types_are_compat in libbpf.c:
+ *
+ * Check local and target types for compatibility. This check is used for
+ * type-based CO-RE relocations and follow slightly different rules than
+ * field-based relocations. This function assumes that root types were already
+ * checked for name match. Beyond that initial root-level name check, names
+ * are completely ignored. Compatibility rules are as follows:
+ * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
+ * kind should match for local and target types (i.e., STRUCT is not
+ * compatible with UNION);
+ * - for ENUMs, the size is ignored;
+ * - for INT, size and signedness are ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - CONST/VOLATILE/RESTRICT modifiers are ignored;
+ * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
+ * - FUNC_PROTOs are compatible if they have compatible signature: same
+ * number of input args and compatible return and argument types.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ *
+ * Returns errImpossibleRelocation if types are not compatible.
+ */
+func coreAreTypesCompatible(localType Type, targetType Type) error {
+ var (
+ localTs, targetTs typeDeque
+ l, t = &localType, &targetType
+ depth = 0
+ )
+
+ for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
+ if depth >= maxTypeDepth {
+ return errors.New("types are nested too deep")
+ }
+
+ localType = *l
+ targetType = *t
+
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
+ return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
+ }
+
+ switch lv := (localType).(type) {
+ case *Void, *Struct, *Union, *Enum, *Fwd, *Int:
+ // Nothing to do here
+
+ case *Pointer, *Array:
+ depth++
+ localType.walk(&localTs)
+ targetType.walk(&targetTs)
+
+ case *FuncProto:
+ tv := targetType.(*FuncProto)
+ if len(lv.Params) != len(tv.Params) {
+ return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation)
+ }
+
+ depth++
+ localType.walk(&localTs)
+ targetType.walk(&targetTs)
+
+ default:
+ return fmt.Errorf("unsupported type %T", localType)
+ }
+ }
+
+ if l != nil {
+ return fmt.Errorf("dangling local type %T", *l)
+ }
+
+ if t != nil {
+ return fmt.Errorf("dangling target type %T", *t)
+ }
+
+ return nil
+}
+
+/* coreAreMembersCompatible checks two types for field-based relocation compatibility.
+ *
+ * The comment below is from bpf_core_fields_are_compat in libbpf.c:
+ *
+ * Check two types for compatibility for the purpose of field access
+ * relocation. const/volatile/restrict and typedefs are skipped to ensure we
+ * are relocating semantically compatible entities:
+ * - any two STRUCTs/UNIONs are compatible and can be mixed;
+ * - any two FWDs are compatible, if their names match (modulo flavor suffix);
+ * - any two PTRs are always compatible;
+ * - for ENUMs, names should be the same (ignoring flavor suffix) or at
+ * least one of enums should be anonymous;
+ * - for ENUMs, check sizes, names are ignored;
+ * - for INT, size and signedness are ignored;
+ * - any two FLOATs are always compatible;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * [ NB: coreAreMembersCompatible doesn't recurse, this check is done
+ * by coreFindField. ]
+ * - everything else shouldn't be ever a target of relocation.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ *
+ * Returns errImpossibleRelocation if the members are not compatible.
+ */
+func coreAreMembersCompatible(localType Type, targetType Type) error {
+ doNamesMatch := func(a, b string) error {
+ if a == "" || b == "" {
+ // allow anonymous and named type to match
+ return nil
+ }
+
+ if newEssentialName(a) == newEssentialName(b) {
+ return nil
+ }
+
+ return fmt.Errorf("names don't match: %w", errImpossibleRelocation)
+ }
+
+ _, lok := localType.(composite)
+ _, tok := targetType.(composite)
+ if lok && tok {
+ return nil
+ }
+
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
+ return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
+ }
+
+ switch lv := localType.(type) {
+ case *Array, *Pointer, *Float, *Int:
+ return nil
+
+ case *Enum:
+ tv := targetType.(*Enum)
+ return doNamesMatch(lv.Name, tv.Name)
+
+ case *Fwd:
+ tv := targetType.(*Fwd)
+ return doNamesMatch(lv.Name, tv.Name)
+
+ default:
+ return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/doc.go b/vendor/github.com/cilium/ebpf/btf/doc.go
index ad2576cb2..b1f4b1fc3 100644
--- a/vendor/github.com/cilium/ebpf/internal/btf/doc.go
+++ b/vendor/github.com/cilium/ebpf/btf/doc.go
@@ -2,7 +2,4 @@
//
// The canonical documentation lives in the Linux kernel repository and is
// available at https://www.kernel.org/doc/html/latest/bpf/btf.html
-//
-// The API is very much unstable. You should only use this via the main
-// ebpf library.
package btf
diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go
new file mode 100644
index 000000000..2c0e1afe2
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go
@@ -0,0 +1,721 @@
+package btf
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+)
+
+// ExtInfos contains ELF section metadata.
+type ExtInfos struct {
+ // The slices are sorted by offset in ascending order.
+ funcInfos map[string][]funcInfo
+ lineInfos map[string][]lineInfo
+ relocationInfos map[string][]coreRelocationInfo
+}
+
+// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF.
+//
+// Returns an error wrapping ErrNotFound if no ext infos are present.
+func loadExtInfosFromELF(file *internal.SafeELFFile, ts types, strings *stringTable) (*ExtInfos, error) {
+ section := file.Section(".BTF.ext")
+ if section == nil {
+ return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound)
+ }
+
+ if section.ReaderAt == nil {
+ return nil, fmt.Errorf("compressed ext_info is not supported")
+ }
+
+ return loadExtInfos(section.ReaderAt, file.ByteOrder, ts, strings)
+}
+
+// loadExtInfos parses bare ext infos.
+func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringTable) (*ExtInfos, error) {
+ // Open unbuffered section reader. binary.Read() calls io.ReadFull on
+ // the header structs, resulting in one syscall per header.
+ headerRd := io.NewSectionReader(r, 0, math.MaxInt64)
+ extHeader, err := parseBTFExtHeader(headerRd, bo)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF extension header: %w", err)
+ }
+
+ coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err)
+ }
+
+ buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen))
+ btfFuncInfos, err := parseFuncInfos(buf, bo, strings)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF function info: %w", err)
+ }
+
+ funcInfos := make(map[string][]funcInfo, len(btfFuncInfos))
+ for section, bfis := range btfFuncInfos {
+ funcInfos[section], err = newFuncInfos(bfis, ts)
+ if err != nil {
+ return nil, fmt.Errorf("section %s: func infos: %w", section, err)
+ }
+ }
+
+ buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen))
+ btfLineInfos, err := parseLineInfos(buf, bo, strings)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF line info: %w", err)
+ }
+
+ lineInfos := make(map[string][]lineInfo, len(btfLineInfos))
+ for section, blis := range btfLineInfos {
+ lineInfos[section], err = newLineInfos(blis, strings)
+ if err != nil {
+ return nil, fmt.Errorf("section %s: line infos: %w", section, err)
+ }
+ }
+
+ if coreHeader == nil || coreHeader.COREReloLen == 0 {
+ return &ExtInfos{funcInfos, lineInfos, nil}, nil
+ }
+
+ var btfCORERelos map[string][]bpfCORERelo
+ buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen))
+ btfCORERelos, err = parseCORERelos(buf, bo, strings)
+ if err != nil {
+ return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err)
+ }
+
+ coreRelos := make(map[string][]coreRelocationInfo, len(btfCORERelos))
+ for section, brs := range btfCORERelos {
+ coreRelos[section], err = newRelocationInfos(brs, ts, strings)
+ if err != nil {
+ return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err)
+ }
+ }
+
+ return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil
+}
+
+type funcInfoMeta struct{}
+type coreRelocationMeta struct{}
+
+// Assign per-section metadata from BTF to a section's instructions.
+func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
+ funcInfos := ei.funcInfos[section]
+ lineInfos := ei.lineInfos[section]
+ reloInfos := ei.relocationInfos[section]
+
+ iter := insns.Iterate()
+ for iter.Next() {
+ if len(funcInfos) > 0 && funcInfos[0].offset == iter.Offset {
+ iter.Ins.Metadata.Set(funcInfoMeta{}, funcInfos[0].fn)
+ funcInfos = funcInfos[1:]
+ }
+
+ if len(lineInfos) > 0 && lineInfos[0].offset == iter.Offset {
+ *iter.Ins = iter.Ins.WithSource(lineInfos[0].line)
+ lineInfos = lineInfos[1:]
+ }
+
+ if len(reloInfos) > 0 && reloInfos[0].offset == iter.Offset {
+ iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos[0].relo)
+ reloInfos = reloInfos[1:]
+ }
+ }
+}
+
+// MarshalExtInfos encodes function and line info embedded in insns into kernel
+// wire format.
+func MarshalExtInfos(insns asm.Instructions, typeID func(Type) (TypeID, error)) (funcInfos, lineInfos []byte, _ error) {
+ iter := insns.Iterate()
+ var fiBuf, liBuf bytes.Buffer
+ for iter.Next() {
+ if fn := FuncMetadata(iter.Ins); fn != nil {
+ fi := &funcInfo{
+ fn: fn,
+ offset: iter.Offset,
+ }
+ if err := fi.marshal(&fiBuf, typeID); err != nil {
+ return nil, nil, fmt.Errorf("write func info: %w", err)
+ }
+ }
+
+ if line, ok := iter.Ins.Source().(*Line); ok {
+ li := &lineInfo{
+ line: line,
+ offset: iter.Offset,
+ }
+ if err := li.marshal(&liBuf); err != nil {
+ return nil, nil, fmt.Errorf("write line info: %w", err)
+ }
+ }
+ }
+ return fiBuf.Bytes(), liBuf.Bytes(), nil
+}
+
+// btfExtHeader is found at the start of the .BTF.ext section.
+type btfExtHeader struct {
+ Magic uint16
+ Version uint8
+ Flags uint8
+
+ // HdrLen is larger than the size of struct btfExtHeader when it is
+ // immediately followed by a btfExtCOREHeader.
+ HdrLen uint32
+
+ FuncInfoOff uint32
+ FuncInfoLen uint32
+ LineInfoOff uint32
+ LineInfoLen uint32
+}
+
+// parseBTFExtHeader parses the header of the .BTF.ext section.
+func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) {
+ var header btfExtHeader
+ if err := binary.Read(r, bo, &header); err != nil {
+ return nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ if header.Magic != btfMagic {
+ return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
+ }
+
+ if header.Version != 1 {
+ return nil, fmt.Errorf("unexpected version %v", header.Version)
+ }
+
+ if header.Flags != 0 {
+ return nil, fmt.Errorf("unsupported flags %v", header.Flags)
+ }
+
+ if int64(header.HdrLen) < int64(binary.Size(&header)) {
+ return nil, fmt.Errorf("header length shorter than btfExtHeader size")
+ }
+
+ return &header, nil
+}
+
+// funcInfoStart returns the offset from the beginning of the .BTF.ext section
+// to the start of its func_info entries.
+func (h *btfExtHeader) funcInfoStart() int64 {
+ return int64(h.HdrLen + h.FuncInfoOff)
+}
+
+// lineInfoStart returns the offset from the beginning of the .BTF.ext section
+// to the start of its line_info entries.
+func (h *btfExtHeader) lineInfoStart() int64 {
+ return int64(h.HdrLen + h.LineInfoOff)
+}
+
+// coreReloStart returns the offset from the beginning of the .BTF.ext section
+// to the start of its CO-RE relocation entries.
+func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 {
+ return int64(h.HdrLen + ch.COREReloOff)
+}
+
+// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen
+// field is larger than its size.
+type btfExtCOREHeader struct {
+ COREReloOff uint32
+ COREReloLen uint32
+}
+
+// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional
+// header bytes are present, extHeader.HdrLen will be larger than the struct,
+// indicating the presence of a CO-RE extension header.
+func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) {
+ extHdrSize := int64(binary.Size(&extHeader))
+ remainder := int64(extHeader.HdrLen) - extHdrSize
+
+ if remainder == 0 {
+ return nil, nil
+ }
+
+ var coreHeader btfExtCOREHeader
+ if err := binary.Read(r, bo, &coreHeader); err != nil {
+ return nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ return &coreHeader, nil
+}
+
+type btfExtInfoSec struct {
+ SecNameOff uint32
+ NumInfo uint32
+}
+
+// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext,
+// appearing within func_info and line_info sub-sections.
+// These headers appear once for each program section in the ELF and are
+// followed by one or more func/line_info records for the section.
+func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) {
+ var infoHeader btfExtInfoSec
+ if err := binary.Read(r, bo, &infoHeader); err != nil {
+ return "", nil, fmt.Errorf("read ext info header: %w", err)
+ }
+
+ secName, err := strings.Lookup(infoHeader.SecNameOff)
+ if err != nil {
+ return "", nil, fmt.Errorf("get section name: %w", err)
+ }
+ if secName == "" {
+ return "", nil, fmt.Errorf("extinfo header refers to empty section name")
+ }
+
+ if infoHeader.NumInfo == 0 {
+ return "", nil, fmt.Errorf("section %s has zero records", secName)
+ }
+
+ return secName, &infoHeader, nil
+}
+
+// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos
+// or line_infos segment that describes the length of all extInfoRecords in
+// that segment.
+func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) {
+ const maxRecordSize = 256
+
+ var recordSize uint32
+ if err := binary.Read(r, bo, &recordSize); err != nil {
+ return 0, fmt.Errorf("can't read record size: %v", err)
+ }
+
+ if recordSize < 4 {
+ // Need at least InsnOff worth of bytes per record.
+ return 0, errors.New("record size too short")
+ }
+ if recordSize > maxRecordSize {
+ return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
+ }
+
+ return recordSize, nil
+}
+
+// The size of a FuncInfo in BTF wire format.
+var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{}))
+
+type funcInfo struct {
+ fn *Func
+ offset asm.RawInstructionOffset
+}
+
+type bpfFuncInfo struct {
+ // Instruction offset of the function within an ELF section.
+ InsnOff uint32
+ TypeID TypeID
+}
+
+func newFuncInfo(fi bpfFuncInfo, ts types) (*funcInfo, error) {
+ typ, err := ts.ByID(fi.TypeID)
+ if err != nil {
+ return nil, err
+ }
+
+ fn, ok := typ.(*Func)
+ if !ok {
+ return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ)
+ }
+
+ // C doesn't have anonymous functions, but check just in case.
+ if fn.Name == "" {
+ return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID)
+ }
+
+ return &funcInfo{
+ fn,
+ asm.RawInstructionOffset(fi.InsnOff),
+ }, nil
+}
+
+func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) {
+ fis := make([]funcInfo, 0, len(bfis))
+ for _, bfi := range bfis {
+ fi, err := newFuncInfo(bfi, ts)
+ if err != nil {
+ return nil, fmt.Errorf("offset %d: %w", bfi.InsnOff, err)
+ }
+ fis = append(fis, *fi)
+ }
+ sort.Slice(fis, func(i, j int) bool {
+ return fis[i].offset <= fis[j].offset
+ })
+ return fis, nil
+}
+
+// marshal into the BTF wire format.
+func (fi *funcInfo) marshal(w io.Writer, typeID func(Type) (TypeID, error)) error {
+ id, err := typeID(fi.fn)
+ if err != nil {
+ return err
+ }
+ bfi := bpfFuncInfo{
+ InsnOff: uint32(fi.offset),
+ TypeID: id,
+ }
+ return binary.Write(w, internal.NativeEndian, &bfi)
+}
+
+// parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of
+// func infos indexed by section name.
+func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) {
+ recordSize, err := parseExtInfoRecordSize(r, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ result := make(map[string][]bpfFuncInfo)
+ for {
+ secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", secName, err)
+ }
+
+ result[secName] = records
+ }
+}
+
+// parseFuncInfoRecords parses a stream of func_infos into a funcInfos.
+// These records appear after a btf_ext_info_sec header in the func_info
+// sub-section of .BTF.ext.
+func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfFuncInfo, error) {
+ var out []bpfFuncInfo
+ var fi bpfFuncInfo
+
+ if exp, got := FuncInfoSize, recordSize; exp != got {
+ // BTF blob's record size is longer than we know how to parse.
+ return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got)
+ }
+
+ for i := uint32(0); i < recordNum; i++ {
+ if err := binary.Read(r, bo, &fi); err != nil {
+ return nil, fmt.Errorf("can't read function info: %v", err)
+ }
+
+ if fi.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff)
+ }
+
+ // ELF tracks offset in bytes, the kernel expects raw BPF instructions.
+ // Convert as early as possible.
+ fi.InsnOff /= asm.InstructionSize
+
+ out = append(out, fi)
+ }
+
+ return out, nil
+}
+
+var LineInfoSize = uint32(binary.Size(bpfLineInfo{}))
+
+// Line represents the location and contents of a single line of source
+// code a BPF ELF was compiled from.
+type Line struct {
+ fileName string
+ line string
+ lineNumber uint32
+ lineColumn uint32
+
+ // TODO: We should get rid of the fields below, but for that we need to be
+ // able to write BTF.
+
+ fileNameOff uint32
+ lineOff uint32
+}
+
+func (li *Line) FileName() string {
+ return li.fileName
+}
+
+func (li *Line) Line() string {
+ return li.line
+}
+
+func (li *Line) LineNumber() uint32 {
+ return li.lineNumber
+}
+
+func (li *Line) LineColumn() uint32 {
+ return li.lineColumn
+}
+
+func (li *Line) String() string {
+ return li.line
+}
+
+type lineInfo struct {
+ line *Line
+ offset asm.RawInstructionOffset
+}
+
+// Constants for the format of bpfLineInfo.LineCol.
+const (
+ bpfLineShift = 10
+ bpfLineMax = (1 << (32 - bpfLineShift)) - 1
+ bpfColumnMax = (1 << bpfLineShift) - 1
+)
+
+type bpfLineInfo struct {
+ // Instruction offset of the line within the whole instruction stream, in instructions.
+ InsnOff uint32
+ FileNameOff uint32
+ LineOff uint32
+ LineCol uint32
+}
+
+func newLineInfo(li bpfLineInfo, strings *stringTable) (*lineInfo, error) {
+ line, err := strings.Lookup(li.LineOff)
+ if err != nil {
+ return nil, fmt.Errorf("lookup of line: %w", err)
+ }
+
+ fileName, err := strings.Lookup(li.FileNameOff)
+ if err != nil {
+ return nil, fmt.Errorf("lookup of filename: %w", err)
+ }
+
+ lineNumber := li.LineCol >> bpfLineShift
+ lineColumn := li.LineCol & bpfColumnMax
+
+ return &lineInfo{
+ &Line{
+ fileName,
+ line,
+ lineNumber,
+ lineColumn,
+ li.FileNameOff,
+ li.LineOff,
+ },
+ asm.RawInstructionOffset(li.InsnOff),
+ }, nil
+}
+
+func newLineInfos(blis []bpfLineInfo, strings *stringTable) ([]lineInfo, error) {
+ lis := make([]lineInfo, 0, len(blis))
+ for _, bli := range blis {
+ li, err := newLineInfo(bli, strings)
+ if err != nil {
+ return nil, fmt.Errorf("offset %d: %w", bli.InsnOff, err)
+ }
+ lis = append(lis, *li)
+ }
+ sort.Slice(lis, func(i, j int) bool {
+ return lis[i].offset <= lis[j].offset
+ })
+ return lis, nil
+}
+
+// marshal writes the binary representation of the LineInfo to w.
+func (li *lineInfo) marshal(w io.Writer) error {
+ line := li.line
+ if line.lineNumber > bpfLineMax {
+ return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax)
+ }
+
+ if line.lineColumn > bpfColumnMax {
+ return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax)
+ }
+
+ bli := bpfLineInfo{
+ uint32(li.offset),
+ line.fileNameOff,
+ line.lineOff,
+ (line.lineNumber << bpfLineShift) | line.lineColumn,
+ }
+ return binary.Write(w, internal.NativeEndian, &bli)
+}
+
+// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of
+// line infos indexed by section name.
+func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) {
+ recordSize, err := parseExtInfoRecordSize(r, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ result := make(map[string][]bpfLineInfo)
+ for {
+ secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", secName, err)
+ }
+
+ result[secName] = records
+ }
+}
+
+// parseLineInfoRecords parses a stream of line_infos into a lineInfos.
+// These records appear after a btf_ext_info_sec header in the line_info
+// sub-section of .BTF.ext.
+func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfLineInfo, error) {
+ var out []bpfLineInfo
+ var li bpfLineInfo
+
+ if exp, got := uint32(binary.Size(li)), recordSize; exp != got {
+ // BTF blob's record size is longer than we know how to parse.
+ return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got)
+ }
+
+ for i := uint32(0); i < recordNum; i++ {
+ if err := binary.Read(r, bo, &li); err != nil {
+ return nil, fmt.Errorf("can't read line info: %v", err)
+ }
+
+ if li.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff)
+ }
+
+ // ELF tracks offset in bytes, the kernel expects raw BPF instructions.
+ // Convert as early as possible.
+ li.InsnOff /= asm.InstructionSize
+
+ out = append(out, li)
+ }
+
+ return out, nil
+}
+
+// bpfCORERelo matches the kernel's struct bpf_core_relo.
+type bpfCORERelo struct {
+ InsnOff uint32
+ TypeID TypeID
+ AccessStrOff uint32
+ Kind coreKind
+}
+
+type CORERelocation struct {
+ typ Type
+ accessor coreAccessor
+ kind coreKind
+}
+
+func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation {
+ relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation)
+ return relo
+}
+
+type coreRelocationInfo struct {
+ relo *CORERelocation
+ offset asm.RawInstructionOffset
+}
+
+func newRelocationInfo(relo bpfCORERelo, ts types, strings *stringTable) (*coreRelocationInfo, error) {
+ typ, err := ts.ByID(relo.TypeID)
+ if err != nil {
+ return nil, err
+ }
+
+ accessorStr, err := strings.Lookup(relo.AccessStrOff)
+ if err != nil {
+ return nil, err
+ }
+
+ accessor, err := parseCOREAccessor(accessorStr)
+ if err != nil {
+ return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
+ }
+
+ return &coreRelocationInfo{
+ &CORERelocation{
+ typ,
+ accessor,
+ relo.Kind,
+ },
+ asm.RawInstructionOffset(relo.InsnOff),
+ }, nil
+}
+
+func newRelocationInfos(brs []bpfCORERelo, ts types, strings *stringTable) ([]coreRelocationInfo, error) {
+ rs := make([]coreRelocationInfo, 0, len(brs))
+ for _, br := range brs {
+ relo, err := newRelocationInfo(br, ts, strings)
+ if err != nil {
+ return nil, fmt.Errorf("offset %d: %w", br.InsnOff, err)
+ }
+ rs = append(rs, *relo)
+ }
+ sort.Slice(rs, func(i, j int) bool {
+ return rs[i].offset < rs[j].offset
+ })
+ return rs, nil
+}
+
+var extInfoReloSize = binary.Size(bpfCORERelo{})
+
+// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of
+// CO-RE relocations indexed by section name.
+func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) {
+ recordSize, err := parseExtInfoRecordSize(r, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ if recordSize != uint32(extInfoReloSize) {
+ return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
+ }
+
+ result := make(map[string][]bpfCORERelo)
+ for {
+ secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ records, err := parseCOREReloRecords(r, bo, recordSize, infoHeader.NumInfo)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", secName, err)
+ }
+
+ result[secName] = records
+ }
+}
+
+// parseCOREReloRecords parses a stream of CO-RE relocation entries into a
+// coreRelos. These records appear after a btf_ext_info_sec header in the
+// core_relos sub-section of .BTF.ext.
+func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfCORERelo, error) {
+ var out []bpfCORERelo
+
+ var relo bpfCORERelo
+ for i := uint32(0); i < recordNum; i++ {
+ if err := binary.Read(r, bo, &relo); err != nil {
+ return nil, fmt.Errorf("can't read CO-RE relocation: %v", err)
+ }
+
+ if relo.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff)
+ }
+
+ // ELF tracks offset in bytes, the kernel expects raw BPF instructions.
+ // Convert as early as possible.
+ relo.InsnOff /= asm.InstructionSize
+
+ out = append(out, relo)
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/format.go b/vendor/github.com/cilium/ebpf/btf/format.go
new file mode 100644
index 000000000..e7688a2a6
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/format.go
@@ -0,0 +1,319 @@
+package btf
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+var errNestedTooDeep = errors.New("nested too deep")
+
+// GoFormatter converts a Type to Go syntax.
+//
+// A zero GoFormatter is valid to use.
+type GoFormatter struct {
+ w strings.Builder
+
+ // Types present in this map are referred to using the given name if they
+ // are encountered when outputting another type.
+ Names map[Type]string
+
+ // Identifier is called for each field of struct-like types. By default the
+ // field name is used as is.
+ Identifier func(string) string
+
+ // EnumIdentifier is called for each element of an enum. By default the
+ // name of the enum type is concatenated with Identifier(element).
+ EnumIdentifier func(name, element string) string
+}
+
+// TypeDeclaration generates a Go type declaration for a BTF type.
+func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) {
+ gf.w.Reset()
+ if err := gf.writeTypeDecl(name, typ); err != nil {
+ return "", err
+ }
+ return gf.w.String(), nil
+}
+
+func (gf *GoFormatter) identifier(s string) string {
+ if gf.Identifier != nil {
+ return gf.Identifier(s)
+ }
+
+ return s
+}
+
+func (gf *GoFormatter) enumIdentifier(name, element string) string {
+ if gf.EnumIdentifier != nil {
+ return gf.EnumIdentifier(name, element)
+ }
+
+ return name + gf.identifier(element)
+}
+
+// writeTypeDecl outputs a declaration of the given type.
+//
+// It encodes https://golang.org/ref/spec#Type_declarations:
+//
+// type foo struct { bar uint32; }
+// type bar int32
+func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error {
+ if name == "" {
+ return fmt.Errorf("need a name for type %s", typ)
+ }
+
+ switch v := skipQualifiers(typ).(type) {
+ case *Enum:
+ fmt.Fprintf(&gf.w, "type %s ", name)
+ switch v.Size {
+ case 1:
+ gf.w.WriteString("int8")
+ case 2:
+ gf.w.WriteString("int16")
+ case 4:
+ gf.w.WriteString("int32")
+ case 8:
+ gf.w.WriteString("int64")
+ default:
+ return fmt.Errorf("%s: invalid enum size %d", typ, v.Size)
+ }
+
+ if len(v.Values) == 0 {
+ return nil
+ }
+
+ gf.w.WriteString("; const ( ")
+ for _, ev := range v.Values {
+ id := gf.enumIdentifier(name, ev.Name)
+ fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, ev.Value)
+ }
+ gf.w.WriteString(")")
+
+ return nil
+
+ default:
+ fmt.Fprintf(&gf.w, "type %s ", name)
+ return gf.writeTypeLit(v, 0)
+ }
+}
+
+// writeType outputs the name of a named type or a literal describing the type.
+//
+// It encodes https://golang.org/ref/spec#Types.
+//
+// foo (if foo is a named type)
+// uint32
+func (gf *GoFormatter) writeType(typ Type, depth int) error {
+ typ = skipQualifiers(typ)
+
+ name := gf.Names[typ]
+ if name != "" {
+ gf.w.WriteString(name)
+ return nil
+ }
+
+ return gf.writeTypeLit(typ, depth)
+}
+
+// writeTypeLit outputs a literal describing the type.
+//
+// The function ignores named types.
+//
+// It encodes https://golang.org/ref/spec#TypeLit.
+//
+// struct { bar uint32; }
+// uint32
+func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
+ depth++
+ if depth > maxTypeDepth {
+ return errNestedTooDeep
+ }
+
+ var err error
+ switch v := skipQualifiers(typ).(type) {
+ case *Int:
+ gf.writeIntLit(v)
+
+ case *Enum:
+ gf.w.WriteString("int32")
+
+ case *Typedef:
+ err = gf.writeType(v.Type, depth)
+
+ case *Array:
+ fmt.Fprintf(&gf.w, "[%d]", v.Nelems)
+ err = gf.writeType(v.Type, depth)
+
+ case *Struct:
+ err = gf.writeStructLit(v.Size, v.Members, depth)
+
+ case *Union:
+ // Always choose the first member to represent the union in Go.
+ err = gf.writeStructLit(v.Size, v.Members[:1], depth)
+
+ case *Datasec:
+ err = gf.writeDatasecLit(v, depth)
+
+ default:
+ return fmt.Errorf("type %T: %w", v, ErrNotSupported)
+ }
+
+ if err != nil {
+ return fmt.Errorf("%s: %w", typ, err)
+ }
+
+ return nil
+}
+
+func (gf *GoFormatter) writeIntLit(i *Int) {
+ // NB: Encoding.IsChar is ignored.
+ if i.Encoding.IsBool() && i.Size == 1 {
+ gf.w.WriteString("bool")
+ return
+ }
+
+ bits := i.Size * 8
+ if i.Encoding.IsSigned() {
+ fmt.Fprintf(&gf.w, "int%d", bits)
+ } else {
+ fmt.Fprintf(&gf.w, "uint%d", bits)
+ }
+}
+
+func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error {
+ gf.w.WriteString("struct { ")
+
+ prevOffset := uint32(0)
+ skippedBitfield := false
+ for i, m := range members {
+ if m.BitfieldSize > 0 {
+ skippedBitfield = true
+ continue
+ }
+
+ offset := m.Offset.Bytes()
+ if n := offset - prevOffset; skippedBitfield && n > 0 {
+ fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n)
+ } else {
+ gf.writePadding(n)
+ }
+
+ size, err := Sizeof(m.Type)
+ if err != nil {
+ return fmt.Errorf("field %d: %w", i, err)
+ }
+ prevOffset = offset + uint32(size)
+
+ if err := gf.writeStructField(m, depth); err != nil {
+ return fmt.Errorf("field %d: %w", i, err)
+ }
+ }
+
+ gf.writePadding(size - prevOffset)
+ gf.w.WriteString("}")
+ return nil
+}
+
+func (gf *GoFormatter) writeStructField(m Member, depth int) error {
+ if m.BitfieldSize > 0 {
+ return fmt.Errorf("bitfields are not supported")
+ }
+ if m.Offset%8 != 0 {
+ return fmt.Errorf("unsupported offset %d", m.Offset)
+ }
+
+ if m.Name == "" {
+ // Special case a nested anonymous union like
+ // struct foo { union { int bar; int baz }; }
+ // by replacing the whole union with its first member.
+ union, ok := m.Type.(*Union)
+ if !ok {
+ return fmt.Errorf("anonymous fields are not supported")
+
+ }
+
+ if len(union.Members) == 0 {
+ return errors.New("empty anonymous union")
+ }
+
+ depth++
+ if depth > maxTypeDepth {
+ return errNestedTooDeep
+ }
+
+ m := union.Members[0]
+ size, err := Sizeof(m.Type)
+ if err != nil {
+ return err
+ }
+
+ if err := gf.writeStructField(m, depth); err != nil {
+ return err
+ }
+
+ gf.writePadding(union.Size - uint32(size))
+ return nil
+
+ }
+
+ fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name))
+
+ if err := gf.writeType(m.Type, depth); err != nil {
+ return err
+ }
+
+ gf.w.WriteString("; ")
+ return nil
+}
+
+func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error {
+ gf.w.WriteString("struct { ")
+
+ prevOffset := uint32(0)
+ for i, vsi := range ds.Vars {
+ v := vsi.Type.(*Var)
+ if v.Linkage != GlobalVar {
+ // Ignore static, extern, etc. for now.
+ continue
+ }
+
+ if v.Name == "" {
+ return fmt.Errorf("variable %d: empty name", i)
+ }
+
+ gf.writePadding(vsi.Offset - prevOffset)
+ prevOffset = vsi.Offset + vsi.Size
+
+ fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name))
+
+ if err := gf.writeType(v.Type, depth); err != nil {
+ return fmt.Errorf("variable %d: %w", i, err)
+ }
+
+ gf.w.WriteString("; ")
+ }
+
+ gf.writePadding(ds.Size - prevOffset)
+ gf.w.WriteString("}")
+ return nil
+}
+
+func (gf *GoFormatter) writePadding(bytes uint32) {
+ if bytes > 0 {
+ fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes)
+ }
+}
+
+func skipQualifiers(typ Type) Type {
+ result := typ
+ for depth := 0; depth <= maxTypeDepth; depth++ {
+ switch v := (result).(type) {
+ case qualifier:
+ result = v.qualify()
+ default:
+ return result
+ }
+ }
+ return &cycle{typ}
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/handle.go b/vendor/github.com/cilium/ebpf/btf/handle.go
new file mode 100644
index 000000000..128e9b35c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/handle.go
@@ -0,0 +1,121 @@
+package btf
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// HandleInfo describes a Handle.
+type HandleInfo struct {
+ // ID of this handle in the kernel. The ID is only valid as long as the
+ // associated handle is kept alive.
+ ID ID
+
+ // Name is an identifying name for the BTF, currently only used by the
+ // kernel.
+ Name string
+
+ // IsKernel is true if the BTF originated with the kernel and not
+ // userspace.
+ IsKernel bool
+
+ // Size of the raw BTF in bytes.
+ size uint32
+}
+
+func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) {
+ // We invoke the syscall once with a empty BTF and name buffers to get size
+ // information to allocate buffers. Then we invoke it a second time with
+ // buffers to receive the data.
+ var btfInfo sys.BtfInfo
+ if err := sys.ObjInfo(fd, &btfInfo); err != nil {
+ return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err)
+ }
+
+ if btfInfo.NameLen > 0 {
+ // NameLen doesn't account for the terminating NUL.
+ btfInfo.NameLen++
+ }
+
+ // Don't pull raw BTF by default, since it may be quite large.
+ btfSize := btfInfo.BtfSize
+ btfInfo.BtfSize = 0
+
+ nameBuffer := make([]byte, btfInfo.NameLen)
+ btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer)
+ if err := sys.ObjInfo(fd, &btfInfo); err != nil {
+ return nil, err
+ }
+
+ return &HandleInfo{
+ ID: ID(btfInfo.Id),
+ Name: unix.ByteSliceToString(nameBuffer),
+ IsKernel: btfInfo.KernelBtf != 0,
+ size: btfSize,
+ }, nil
+}
+
+// IsModule returns true if the BTF is for the kernel itself.
+func (i *HandleInfo) IsVmlinux() bool {
+ return i.IsKernel && i.Name == "vmlinux"
+}
+
+// IsModule returns true if the BTF is for a kernel module.
+func (i *HandleInfo) IsModule() bool {
+ return i.IsKernel && i.Name != "vmlinux"
+}
+
+// HandleIterator allows enumerating BTF blobs loaded into the kernel.
+type HandleIterator struct {
+ // The ID of the last retrieved handle. Only valid after a call to Next.
+ ID ID
+ err error
+}
+
+// Next retrieves a handle for the next BTF blob.
+//
+// [Handle.Close] is called if *handle is non-nil to avoid leaking fds.
+//
+// Returns true if another BTF blob was found. Call [HandleIterator.Err] after
+// the function returns false.
+func (it *HandleIterator) Next(handle **Handle) bool {
+ if *handle != nil {
+ (*handle).Close()
+ *handle = nil
+ }
+
+ id := it.ID
+ for {
+ attr := &sys.BtfGetNextIdAttr{Id: id}
+ err := sys.BtfGetNextId(attr)
+ if errors.Is(err, os.ErrNotExist) {
+ // There are no more BTF objects.
+ return false
+ } else if err != nil {
+ it.err = fmt.Errorf("get next BTF ID: %w", err)
+ return false
+ }
+
+ id = attr.NextId
+ *handle, err = NewHandleFromID(id)
+ if errors.Is(err, os.ErrNotExist) {
+ // Try again with the next ID.
+ continue
+ } else if err != nil {
+ it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err)
+ return false
+ }
+
+ it.ID = id
+ return true
+ }
+}
+
+// Err returns an error if iteration failed for some reason.
+func (it *HandleIterator) Err() error {
+ return it.err
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/strings.go b/vendor/github.com/cilium/ebpf/btf/strings.go
new file mode 100644
index 000000000..67626e0dd
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/strings.go
@@ -0,0 +1,128 @@
+package btf
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+)
+
+type stringTable struct {
+ base *stringTable
+ offsets []uint32
+ strings []string
+}
+
+// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc.
+type sizedReader interface {
+ io.Reader
+ Size() int64
+}
+
+func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) {
+ // When parsing split BTF's string table, the first entry offset is derived
+ // from the last entry offset of the base BTF.
+ firstStringOffset := uint32(0)
+ if base != nil {
+ idx := len(base.offsets) - 1
+ firstStringOffset = base.offsets[idx] + uint32(len(base.strings[idx])) + 1
+ }
+
+ // Derived from vmlinux BTF.
+ const averageStringLength = 16
+
+ n := int(r.Size() / averageStringLength)
+ offsets := make([]uint32, 0, n)
+ strings := make([]string, 0, n)
+
+ offset := firstStringOffset
+ scanner := bufio.NewScanner(r)
+ scanner.Split(splitNull)
+ for scanner.Scan() {
+ str := scanner.Text()
+ offsets = append(offsets, offset)
+ strings = append(strings, str)
+ offset += uint32(len(str)) + 1
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ if len(strings) == 0 {
+ return nil, errors.New("string table is empty")
+ }
+
+ if firstStringOffset == 0 && strings[0] != "" {
+ return nil, errors.New("first item in string table is non-empty")
+ }
+
+ return &stringTable{base, offsets, strings}, nil
+}
+
+func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ i := bytes.IndexByte(data, 0)
+ if i == -1 {
+ if atEOF && len(data) > 0 {
+ return 0, nil, errors.New("string table isn't null terminated")
+ }
+ return 0, nil, nil
+ }
+
+ return i + 1, data[:i], nil
+}
+
+func (st *stringTable) Lookup(offset uint32) (string, error) {
+ if st.base != nil && offset <= st.base.offsets[len(st.base.offsets)-1] {
+ return st.base.lookup(offset)
+ }
+ return st.lookup(offset)
+}
+
+func (st *stringTable) lookup(offset uint32) (string, error) {
+ i := search(st.offsets, offset)
+ if i == len(st.offsets) || st.offsets[i] != offset {
+ return "", fmt.Errorf("offset %d isn't start of a string", offset)
+ }
+
+ return st.strings[i], nil
+}
+
+func (st *stringTable) Length() int {
+ last := len(st.offsets) - 1
+ return int(st.offsets[last]) + len(st.strings[last]) + 1
+}
+
+func (st *stringTable) Marshal(w io.Writer) error {
+ for _, str := range st.strings {
+ _, err := io.WriteString(w, str)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write([]byte{0})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// search is a copy of sort.Search specialised for uint32.
+//
+// Licensed under https://go.dev/LICENSE
+func search(ints []uint32, needle uint32) int {
+ // Define f(-1) == false and f(n) == true.
+ // Invariant: f(i-1) == false, f(j) == true.
+ i, j := 0, len(ints)
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if !(ints[h] >= needle) {
+ i = h + 1 // preserves f(i-1) == false
+ } else {
+ j = h // preserves f(j) == true
+ }
+ }
+ // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
+ return i
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go
new file mode 100644
index 000000000..402a363c2
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/types.go
@@ -0,0 +1,1212 @@
+package btf
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strings"
+
+ "github.com/cilium/ebpf/asm"
+)
+
+const maxTypeDepth = 32
+
+// TypeID identifies a type in a BTF section.
+type TypeID uint32
+
+// Type represents a type described by BTF.
+type Type interface {
+ // Type can be formatted using the %s and %v verbs. %s outputs only the
+ // identity of the type, without any detail. %v outputs additional detail.
+ //
+ // Use the '+' flag to include the address of the type.
+ //
+ // Use the width to specify how many levels of detail to output, for example
+ // %1v will output detail for the root type and a short description of its
+ // children. %2v would output details of the root type and its children
+ // as well as a short description of the grandchildren.
+ fmt.Formatter
+
+ // Name of the type, empty for anonymous types and types that cannot
+ // carry a name, like Void and Pointer.
+ TypeName() string
+
+ // Make a copy of the type, without copying Type members.
+ copy() Type
+
+ // Enumerate all nested Types. Repeated calls must visit nested
+ // types in the same order.
+ walk(*typeDeque)
+}
+
+var (
+ _ Type = (*Int)(nil)
+ _ Type = (*Struct)(nil)
+ _ Type = (*Union)(nil)
+ _ Type = (*Enum)(nil)
+ _ Type = (*Fwd)(nil)
+ _ Type = (*Func)(nil)
+ _ Type = (*Typedef)(nil)
+ _ Type = (*Var)(nil)
+ _ Type = (*Datasec)(nil)
+ _ Type = (*Float)(nil)
+)
+
+// types is a list of Type.
+//
+// The order determines the ID of a type.
+type types []Type
+
+func (ts types) ByID(id TypeID) (Type, error) {
+ if int(id) > len(ts) {
+ return nil, fmt.Errorf("type ID %d: %w", id, ErrNotFound)
+ }
+ return ts[id], nil
+}
+
+// Void is the unit type of BTF.
+type Void struct{}
+
+func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) }
+func (v *Void) TypeName() string { return "" }
+func (v *Void) size() uint32 { return 0 }
+func (v *Void) copy() Type { return (*Void)(nil) }
+func (v *Void) walk(*typeDeque) {}
+
+type IntEncoding byte
+
+const (
+ Signed IntEncoding = 1 << iota
+ Char
+ Bool
+)
+
+func (ie IntEncoding) IsSigned() bool {
+ return ie&Signed != 0
+}
+
+func (ie IntEncoding) IsChar() bool {
+ return ie&Char != 0
+}
+
+func (ie IntEncoding) IsBool() bool {
+ return ie&Bool != 0
+}
+
+func (ie IntEncoding) String() string {
+ switch {
+ case ie.IsChar() && ie.IsSigned():
+ return "char"
+ case ie.IsChar() && !ie.IsSigned():
+ return "uchar"
+ case ie.IsBool():
+ return "bool"
+ case ie.IsSigned():
+ return "signed"
+ default:
+ return "unsigned"
+ }
+}
+
+// Int is an integer of a given length.
+//
+// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
+type Int struct {
+ Name string
+
+ // The size of the integer in bytes.
+ Size uint32
+ Encoding IntEncoding
+}
+
+func (i *Int) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, i, i.Encoding, "size=", i.Size*8)
+}
+
+func (i *Int) TypeName() string { return i.Name }
+func (i *Int) size() uint32 { return i.Size }
+func (i *Int) walk(*typeDeque) {}
+func (i *Int) copy() Type {
+ cpy := *i
+ return &cpy
+}
+
+// Pointer is a pointer to another type.
+type Pointer struct {
+ Target Type
+}
+
+func (p *Pointer) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, p, "target=", p.Target)
+}
+
+func (p *Pointer) TypeName() string { return "" }
+func (p *Pointer) size() uint32 { return 8 }
+func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) }
+func (p *Pointer) copy() Type {
+ cpy := *p
+ return &cpy
+}
+
+// Array is an array with a fixed number of elements.
+type Array struct {
+ Index Type
+ Type Type
+ Nelems uint32
+}
+
+func (arr *Array) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, arr, "index=", arr.Index, "type=", arr.Type, "n=", arr.Nelems)
+}
+
+func (arr *Array) TypeName() string { return "" }
+
+func (arr *Array) walk(tdq *typeDeque) {
+ tdq.push(&arr.Index)
+ tdq.push(&arr.Type)
+}
+
+func (arr *Array) copy() Type {
+ cpy := *arr
+ return &cpy
+}
+
+// Struct is a compound type of consecutive members.
+type Struct struct {
+ Name string
+ // The size of the struct including padding, in bytes
+ Size uint32
+ Members []Member
+}
+
+func (s *Struct) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, s, "fields=", len(s.Members))
+}
+
+func (s *Struct) TypeName() string { return s.Name }
+
+func (s *Struct) size() uint32 { return s.Size }
+
+func (s *Struct) walk(tdq *typeDeque) {
+ for i := range s.Members {
+ tdq.push(&s.Members[i].Type)
+ }
+}
+
+func (s *Struct) copy() Type {
+ cpy := *s
+ cpy.Members = copyMembers(s.Members)
+ return &cpy
+}
+
+func (s *Struct) members() []Member {
+ return s.Members
+}
+
+// Union is a compound type where members occupy the same memory.
+type Union struct {
+ Name string
+ // The size of the union including padding, in bytes.
+ Size uint32
+ Members []Member
+}
+
+func (u *Union) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, u, "fields=", len(u.Members))
+}
+
+func (u *Union) TypeName() string { return u.Name }
+
+func (u *Union) size() uint32 { return u.Size }
+
+func (u *Union) walk(tdq *typeDeque) {
+ for i := range u.Members {
+ tdq.push(&u.Members[i].Type)
+ }
+}
+
+func (u *Union) copy() Type {
+ cpy := *u
+ cpy.Members = copyMembers(u.Members)
+ return &cpy
+}
+
+func (u *Union) members() []Member {
+ return u.Members
+}
+
+func copyMembers(orig []Member) []Member {
+ cpy := make([]Member, len(orig))
+ copy(cpy, orig)
+ return cpy
+}
+
+type composite interface {
+ members() []Member
+}
+
+var (
+ _ composite = (*Struct)(nil)
+ _ composite = (*Union)(nil)
+)
+
+// A value in bits.
+type Bits uint32
+
+// Bytes converts a bit value into bytes.
+func (b Bits) Bytes() uint32 {
+ return uint32(b / 8)
+}
+
+// Member is part of a Struct or Union.
+//
+// It is not a valid Type.
+type Member struct {
+ Name string
+ Type Type
+ Offset Bits
+ BitfieldSize Bits
+}
+
+// Enum lists possible values.
+type Enum struct {
+ Name string
+ // Size of the enum value in bytes.
+ Size uint32
+ Values []EnumValue
+}
+
+func (e *Enum) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, e, "size=", e.Size, "values=", len(e.Values))
+}
+
+func (e *Enum) TypeName() string { return e.Name }
+
+// EnumValue is part of an Enum
+//
+// Is is not a valid Type
+type EnumValue struct {
+ Name string
+ Value int32
+}
+
+func (e *Enum) size() uint32 { return e.Size }
+func (e *Enum) walk(*typeDeque) {}
+func (e *Enum) copy() Type {
+ cpy := *e
+ cpy.Values = make([]EnumValue, len(e.Values))
+ copy(cpy.Values, e.Values)
+ return &cpy
+}
+
+// FwdKind is the type of forward declaration.
+type FwdKind int
+
+// Valid types of forward declaration.
+const (
+ FwdStruct FwdKind = iota
+ FwdUnion
+)
+
+func (fk FwdKind) String() string {
+ switch fk {
+ case FwdStruct:
+ return "struct"
+ case FwdUnion:
+ return "union"
+ default:
+ return fmt.Sprintf("%T(%d)", fk, int(fk))
+ }
+}
+
+// Fwd is a forward declaration of a Type.
+type Fwd struct {
+ Name string
+ Kind FwdKind
+}
+
+func (f *Fwd) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, f, f.Kind)
+}
+
+func (f *Fwd) TypeName() string { return f.Name }
+
+func (f *Fwd) walk(*typeDeque) {}
+func (f *Fwd) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// Typedef is an alias of a Type.
+type Typedef struct {
+ Name string
+ Type Type
+}
+
+func (td *Typedef) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, td, td.Type)
+}
+
+func (td *Typedef) TypeName() string { return td.Name }
+
+func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) }
+func (td *Typedef) copy() Type {
+ cpy := *td
+ return &cpy
+}
+
+// Volatile is a qualifier.
+type Volatile struct {
+ Type Type
+}
+
+func (v *Volatile) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, v, v.Type)
+}
+
+func (v *Volatile) TypeName() string { return "" }
+
+func (v *Volatile) qualify() Type { return v.Type }
+func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) }
+func (v *Volatile) copy() Type {
+ cpy := *v
+ return &cpy
+}
+
+// Const is a qualifier.
+type Const struct {
+ Type Type
+}
+
+func (c *Const) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, c, c.Type)
+}
+
+func (c *Const) TypeName() string { return "" }
+
+func (c *Const) qualify() Type { return c.Type }
+func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) }
+func (c *Const) copy() Type {
+ cpy := *c
+ return &cpy
+}
+
+// Restrict is a qualifier.
+type Restrict struct {
+ Type Type
+}
+
+func (r *Restrict) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, r, r.Type)
+}
+
+func (r *Restrict) TypeName() string { return "" }
+
+func (r *Restrict) qualify() Type { return r.Type }
+func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) }
+func (r *Restrict) copy() Type {
+ cpy := *r
+ return &cpy
+}
+
+// Func is a function definition.
+type Func struct {
+ Name string
+ Type Type
+ Linkage FuncLinkage
+}
+
+func FuncMetadata(ins *asm.Instruction) *Func {
+ fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func)
+ return fn
+}
+
+func (f *Func) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, f, f.Linkage, "proto=", f.Type)
+}
+
+func (f *Func) TypeName() string { return f.Name }
+
+func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) }
+func (f *Func) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// FuncProto is a function declaration.
+type FuncProto struct {
+ Return Type
+ Params []FuncParam
+}
+
+func (fp *FuncProto) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, fp, "args=", len(fp.Params), "return=", fp.Return)
+}
+
+func (fp *FuncProto) TypeName() string { return "" }
+
+func (fp *FuncProto) walk(tdq *typeDeque) {
+ tdq.push(&fp.Return)
+ for i := range fp.Params {
+ tdq.push(&fp.Params[i].Type)
+ }
+}
+
+func (fp *FuncProto) copy() Type {
+ cpy := *fp
+ cpy.Params = make([]FuncParam, len(fp.Params))
+ copy(cpy.Params, fp.Params)
+ return &cpy
+}
+
+type FuncParam struct {
+ Name string
+ Type Type
+}
+
+// Var is a global variable.
+type Var struct {
+ Name string
+ Type Type
+ Linkage VarLinkage
+}
+
+func (v *Var) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, v, v.Linkage)
+}
+
+func (v *Var) TypeName() string { return v.Name }
+
+func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) }
+func (v *Var) copy() Type {
+ cpy := *v
+ return &cpy
+}
+
+// Datasec is a global program section containing data.
+type Datasec struct {
+ Name string
+ Size uint32
+ Vars []VarSecinfo
+}
+
+func (ds *Datasec) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, ds)
+}
+
+func (ds *Datasec) TypeName() string { return ds.Name }
+
+func (ds *Datasec) size() uint32 { return ds.Size }
+
+func (ds *Datasec) walk(tdq *typeDeque) {
+ for i := range ds.Vars {
+ tdq.push(&ds.Vars[i].Type)
+ }
+}
+
+func (ds *Datasec) copy() Type {
+ cpy := *ds
+ cpy.Vars = make([]VarSecinfo, len(ds.Vars))
+ copy(cpy.Vars, ds.Vars)
+ return &cpy
+}
+
+// VarSecinfo describes variable in a Datasec.
+//
+// It is not a valid Type.
+type VarSecinfo struct {
+ Type Type
+ Offset uint32
+ Size uint32
+}
+
+// Float is a float of a given length.
+type Float struct {
+ Name string
+
+ // The size of the float in bytes.
+ Size uint32
+}
+
+func (f *Float) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, f, "size=", f.Size*8)
+}
+
+func (f *Float) TypeName() string { return f.Name }
+func (f *Float) size() uint32 { return f.Size }
+func (f *Float) walk(*typeDeque) {}
+func (f *Float) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// cycle is a type which had to be elided since it exceeded maxTypeDepth.
+type cycle struct {
+ root Type
+}
+
+func (c *cycle) ID() TypeID { return math.MaxUint32 }
+func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) }
+func (c *cycle) TypeName() string { return "" }
+func (c *cycle) walk(*typeDeque) {}
+func (c *cycle) copy() Type {
+ cpy := *c
+ return &cpy
+}
+
+type sizer interface {
+ size() uint32
+}
+
+var (
+ _ sizer = (*Int)(nil)
+ _ sizer = (*Pointer)(nil)
+ _ sizer = (*Struct)(nil)
+ _ sizer = (*Union)(nil)
+ _ sizer = (*Enum)(nil)
+ _ sizer = (*Datasec)(nil)
+)
+
+type qualifier interface {
+ qualify() Type
+}
+
+var (
+ _ qualifier = (*Const)(nil)
+ _ qualifier = (*Restrict)(nil)
+ _ qualifier = (*Volatile)(nil)
+)
+
+// Sizeof returns the size of a type in bytes.
+//
+// Returns an error if the size can't be computed.
+func Sizeof(typ Type) (int, error) {
+ var (
+ n = int64(1)
+ elem int64
+ )
+
+ for i := 0; i < maxTypeDepth; i++ {
+ switch v := typ.(type) {
+ case *Array:
+ if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
+ return 0, fmt.Errorf("type %s: overflow", typ)
+ }
+
+ // Arrays may be of zero length, which allows
+ // n to be zero as well.
+ n *= int64(v.Nelems)
+ typ = v.Type
+ continue
+
+ case sizer:
+ elem = int64(v.size())
+
+ case *Typedef:
+ typ = v.Type
+ continue
+
+ case qualifier:
+ typ = v.qualify()
+ continue
+
+ default:
+ return 0, fmt.Errorf("unsized type %T", typ)
+ }
+
+ if n > 0 && elem > math.MaxInt64/n {
+ return 0, fmt.Errorf("type %s: overflow", typ)
+ }
+
+ size := n * elem
+ if int64(int(size)) != size {
+ return 0, fmt.Errorf("type %s: overflow", typ)
+ }
+
+ return int(size), nil
+ }
+
+ return 0, fmt.Errorf("type %s: exceeded type depth", typ)
+}
+
+// alignof returns the alignment of a type.
+//
+// Currently only supports the subset of types necessary for bitfield relocations.
+func alignof(typ Type) (int, error) {
+ switch t := UnderlyingType(typ).(type) {
+ case *Enum:
+ return int(t.size()), nil
+ case *Int:
+ return int(t.Size), nil
+ default:
+ return 0, fmt.Errorf("can't calculate alignment of %T", t)
+ }
+}
+
+// Transformer modifies a given Type and returns the result.
+//
+// For example, UnderlyingType removes any qualifiers or typedefs from a type.
+// See the example on Copy for how to use a transform.
+type Transformer func(Type) Type
+
+// Copy a Type recursively.
+//
+// typ may form a cycle. If transform is not nil, it is called with the
+// to be copied type, and the returned value is copied instead.
+func Copy(typ Type, transform Transformer) Type {
+ copies := make(copier)
+ copies.copy(&typ, transform)
+ return typ
+}
+
+// copy a slice of Types recursively.
+//
+// See Copy for the semantics.
+func copyTypes(types []Type, transform Transformer) []Type {
+ result := make([]Type, len(types))
+ copy(result, types)
+
+ copies := make(copier)
+ for i := range result {
+ copies.copy(&result[i], transform)
+ }
+
+ return result
+}
+
+type copier map[Type]Type
+
+func (c copier) copy(typ *Type, transform Transformer) {
+ var work typeDeque
+ for t := typ; t != nil; t = work.pop() {
+ // *t is the identity of the type.
+ if cpy := c[*t]; cpy != nil {
+ *t = cpy
+ continue
+ }
+
+ var cpy Type
+ if transform != nil {
+ cpy = transform(*t).copy()
+ } else {
+ cpy = (*t).copy()
+ }
+
+ c[*t] = cpy
+ *t = cpy
+
+ // Mark any nested types for copying.
+ cpy.walk(&work)
+ }
+}
+
+// typeDeque keeps track of pointers to types which still
+// need to be visited.
+type typeDeque struct {
+ types []*Type
+ read, write uint64
+ mask uint64
+}
+
+func (dq *typeDeque) empty() bool {
+ return dq.read == dq.write
+}
+
+// push adds a type to the stack.
+func (dq *typeDeque) push(t *Type) {
+ if dq.write-dq.read < uint64(len(dq.types)) {
+ dq.types[dq.write&dq.mask] = t
+ dq.write++
+ return
+ }
+
+ new := len(dq.types) * 2
+ if new == 0 {
+ new = 8
+ }
+
+ types := make([]*Type, new)
+ pivot := dq.read & dq.mask
+ n := copy(types, dq.types[pivot:])
+ n += copy(types[n:], dq.types[:pivot])
+ types[n] = t
+
+ dq.types = types
+ dq.mask = uint64(new) - 1
+ dq.read, dq.write = 0, uint64(n+1)
+}
+
+// shift returns the first element or null.
+func (dq *typeDeque) shift() *Type {
+ if dq.empty() {
+ return nil
+ }
+
+ index := dq.read & dq.mask
+ t := dq.types[index]
+ dq.types[index] = nil
+ dq.read++
+ return t
+}
+
+// pop returns the last element or null.
+func (dq *typeDeque) pop() *Type {
+ if dq.empty() {
+ return nil
+ }
+
+ dq.write--
+ index := dq.write & dq.mask
+ t := dq.types[index]
+ dq.types[index] = nil
+ return t
+}
+
+// all returns all elements.
+//
+// The deque is empty after calling this method.
+func (dq *typeDeque) all() []*Type {
+ length := dq.write - dq.read
+ types := make([]*Type, 0, length)
+ for t := dq.shift(); t != nil; t = dq.shift() {
+ types = append(types, t)
+ }
+ return types
+}
+
+// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
+// it into a graph of Types connected via pointers.
+//
+// If baseTypes are provided, then the raw types are
+// considered to be of a split BTF (e.g., a kernel module).
+//
+// Returns a slice of types indexed by TypeID. Since BTF ignores compilation
+// units, multiple types may share the same name. A Type may form a cyclic graph
+// by pointing at itself.
+func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTable) ([]Type, error) {
+ types := make([]Type, 0, len(rawTypes)+1) // +1 for Void added to base types
+
+ typeIDOffset := TypeID(1) // Void is TypeID(0), so the rest starts from TypeID(1)
+
+ if baseTypes == nil {
+ // Void is defined to always be type ID 0, and is thus omitted from BTF.
+ types = append(types, (*Void)(nil))
+ } else {
+ // For split BTF, the next ID is max base BTF type ID + 1
+ typeIDOffset = TypeID(len(baseTypes))
+ }
+
+ type fixupDef struct {
+ id TypeID
+ typ *Type
+ }
+
+ var fixups []fixupDef
+ fixup := func(id TypeID, typ *Type) {
+ if id < TypeID(len(baseTypes)) {
+ *typ = baseTypes[id]
+ return
+ }
+
+ idx := id
+ if baseTypes != nil {
+ idx = id - TypeID(len(baseTypes))
+ }
+ if idx < TypeID(len(types)) {
+ // We've already inflated this type, fix it up immediately.
+ *typ = types[idx]
+ return
+ }
+ fixups = append(fixups, fixupDef{id, typ})
+ }
+
+ type assertion struct {
+ typ *Type
+ want reflect.Type
+ }
+
+ var assertions []assertion
+ assert := func(typ *Type, want reflect.Type) error {
+ if *typ != nil {
+ // The type has already been fixed up, check the type immediately.
+ if reflect.TypeOf(*typ) != want {
+ return fmt.Errorf("expected %s, got %T", want, *typ)
+ }
+ return nil
+ }
+ assertions = append(assertions, assertion{typ, want})
+ return nil
+ }
+
+ type bitfieldFixupDef struct {
+ id TypeID
+ m *Member
+ }
+
+ var (
+ legacyBitfields = make(map[TypeID][2]Bits) // offset, size
+ bitfieldFixups []bitfieldFixupDef
+ )
+ convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) {
+ // NB: The fixup below relies on pre-allocating this array to
+ // work, since otherwise append might re-allocate members.
+ members := make([]Member, 0, len(raw))
+ for i, btfMember := range raw {
+ name, err := rawStrings.Lookup(btfMember.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
+ }
+
+ members = append(members, Member{
+ Name: name,
+ Offset: Bits(btfMember.Offset),
+ })
+
+ m := &members[i]
+ fixup(raw[i].Type, &m.Type)
+
+ if kindFlag {
+ m.BitfieldSize = Bits(btfMember.Offset >> 24)
+ m.Offset &= 0xffffff
+ // We ignore legacy bitfield definitions if the current composite
+ // is a new-style bitfield. This is kind of safe since offset and
+ // size on the type of the member must be zero if kindFlat is set
+ // according to spec.
+ continue
+ }
+
+ // This may be a legacy bitfield, try to fix it up.
+ data, ok := legacyBitfields[raw[i].Type]
+ if ok {
+ // Bingo!
+ m.Offset += data[0]
+ m.BitfieldSize = data[1]
+ continue
+ }
+
+ if m.Type != nil {
+ // We couldn't find a legacy bitfield, but we know that the member's
+ // type has already been inflated. Hence we know that it can't be
+ // a legacy bitfield and there is nothing left to do.
+ continue
+ }
+
+ // We don't have fixup data, and the type we're pointing
+ // at hasn't been inflated yet. No choice but to defer
+ // the fixup.
+ bitfieldFixups = append(bitfieldFixups, bitfieldFixupDef{
+ raw[i].Type,
+ m,
+ })
+ }
+ return members, nil
+ }
+
+ for i, raw := range rawTypes {
+ var (
+ id = typeIDOffset + TypeID(i)
+ typ Type
+ )
+
+ name, err := rawStrings.Lookup(raw.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for type id %d: %w", id, err)
+ }
+
+ switch raw.Kind() {
+ case kindInt:
+ size := raw.Size()
+ bi := raw.data.(*btfInt)
+ if bi.Offset() > 0 || bi.Bits().Bytes() != size {
+ legacyBitfields[id] = [2]Bits{bi.Offset(), bi.Bits()}
+ }
+ typ = &Int{name, raw.Size(), bi.Encoding()}
+
+ case kindPointer:
+ ptr := &Pointer{nil}
+ fixup(raw.Type(), &ptr.Target)
+ typ = ptr
+
+ case kindArray:
+ btfArr := raw.data.(*btfArray)
+ arr := &Array{nil, nil, btfArr.Nelems}
+ fixup(btfArr.IndexType, &arr.Index)
+ fixup(btfArr.Type, &arr.Type)
+ typ = arr
+
+ case kindStruct:
+ members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
+ if err != nil {
+ return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
+ }
+ typ = &Struct{name, raw.Size(), members}
+
+ case kindUnion:
+ members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
+ if err != nil {
+ return nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
+ }
+ typ = &Union{name, raw.Size(), members}
+
+ case kindEnum:
+ rawvals := raw.data.([]btfEnum)
+ vals := make([]EnumValue, 0, len(rawvals))
+ for i, btfVal := range rawvals {
+ name, err := rawStrings.Lookup(btfVal.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for enum value %d: %s", i, err)
+ }
+ vals = append(vals, EnumValue{
+ Name: name,
+ Value: btfVal.Val,
+ })
+ }
+ typ = &Enum{name, raw.Size(), vals}
+
+ case kindForward:
+ if raw.KindFlag() {
+ typ = &Fwd{name, FwdUnion}
+ } else {
+ typ = &Fwd{name, FwdStruct}
+ }
+
+ case kindTypedef:
+ typedef := &Typedef{name, nil}
+ fixup(raw.Type(), &typedef.Type)
+ typ = typedef
+
+ case kindVolatile:
+ volatile := &Volatile{nil}
+ fixup(raw.Type(), &volatile.Type)
+ typ = volatile
+
+ case kindConst:
+ cnst := &Const{nil}
+ fixup(raw.Type(), &cnst.Type)
+ typ = cnst
+
+ case kindRestrict:
+ restrict := &Restrict{nil}
+ fixup(raw.Type(), &restrict.Type)
+ typ = restrict
+
+ case kindFunc:
+ fn := &Func{name, nil, raw.Linkage()}
+ fixup(raw.Type(), &fn.Type)
+ if err := assert(&fn.Type, reflect.TypeOf((*FuncProto)(nil))); err != nil {
+ return nil, err
+ }
+ typ = fn
+
+ case kindFuncProto:
+ rawparams := raw.data.([]btfParam)
+ params := make([]FuncParam, 0, len(rawparams))
+ for i, param := range rawparams {
+ name, err := rawStrings.Lookup(param.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
+ }
+ params = append(params, FuncParam{
+ Name: name,
+ })
+ }
+ for i := range params {
+ fixup(rawparams[i].Type, &params[i].Type)
+ }
+
+ fp := &FuncProto{nil, params}
+ fixup(raw.Type(), &fp.Return)
+ typ = fp
+
+ case kindVar:
+ variable := raw.data.(*btfVariable)
+ v := &Var{name, nil, VarLinkage(variable.Linkage)}
+ fixup(raw.Type(), &v.Type)
+ typ = v
+
+ case kindDatasec:
+ btfVars := raw.data.([]btfVarSecinfo)
+ vars := make([]VarSecinfo, 0, len(btfVars))
+ for _, btfVar := range btfVars {
+ vars = append(vars, VarSecinfo{
+ Offset: btfVar.Offset,
+ Size: btfVar.Size,
+ })
+ }
+ for i := range vars {
+ fixup(btfVars[i].Type, &vars[i].Type)
+ if err := assert(&vars[i].Type, reflect.TypeOf((*Var)(nil))); err != nil {
+ return nil, err
+ }
+ }
+ typ = &Datasec{name, raw.SizeType, vars}
+
+ case kindFloat:
+ typ = &Float{name, raw.Size()}
+
+ default:
+ return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
+ }
+
+ types = append(types, typ)
+ }
+
+ for _, fixup := range fixups {
+ i := int(fixup.id)
+ if i >= len(types)+len(baseTypes) {
+ return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
+ }
+ if i < len(baseTypes) {
+ return nil, fmt.Errorf("fixup for base type id %d is not expected", i)
+ }
+
+ *fixup.typ = types[i-len(baseTypes)]
+ }
+
+ for _, bitfieldFixup := range bitfieldFixups {
+ if bitfieldFixup.id < TypeID(len(baseTypes)) {
+ return nil, fmt.Errorf("bitfield fixup from split to base types is not expected")
+ }
+
+ data, ok := legacyBitfields[bitfieldFixup.id]
+ if ok {
+ // This is indeed a legacy bitfield, fix it up.
+ bitfieldFixup.m.Offset += data[0]
+ bitfieldFixup.m.BitfieldSize = data[1]
+ }
+ }
+
+ for _, assertion := range assertions {
+ if reflect.TypeOf(*assertion.typ) != assertion.want {
+ return nil, fmt.Errorf("expected %s, got %T", assertion.want, *assertion.typ)
+ }
+ }
+
+ return types, nil
+}
+
+// essentialName represents the name of a BTF type stripped of any flavor
+// suffixes after a ___ delimiter.
+type essentialName string
+
+// newEssentialName returns name without a ___ suffix.
+//
+// CO-RE has the concept of 'struct flavors', which are used to deal with
+// changes in kernel data structures. Anything after three underscores
+// in a type name is ignored for the purpose of finding a candidate type
+// in the kernel's BTF.
+func newEssentialName(name string) essentialName {
+ if name == "" {
+ return ""
+ }
+ lastIdx := strings.LastIndex(name, "___")
+ if lastIdx > 0 {
+ return essentialName(name[:lastIdx])
+ }
+ return essentialName(name)
+}
+
+// UnderlyingType skips qualifiers and Typedefs.
+func UnderlyingType(typ Type) Type {
+ result := typ
+ for depth := 0; depth <= maxTypeDepth; depth++ {
+ switch v := (result).(type) {
+ case qualifier:
+ result = v.qualify()
+ case *Typedef:
+ result = v.Type
+ default:
+ return result
+ }
+ }
+ return &cycle{typ}
+}
+
+type formatState struct {
+ fmt.State
+ depth int
+}
+
+// formattableType is a subset of Type, to ease unit testing of formatType.
+type formattableType interface {
+ fmt.Formatter
+ TypeName() string
+}
+
+// formatType formats a type in a canonical form.
+//
+// Handles cyclical types by only printing cycles up to a certain depth. Elements
+// in extra are separated by spaces unless the preceding element is a string
+// ending in '='.
+func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{}) {
+ if verb != 'v' && verb != 's' {
+ fmt.Fprintf(f, "{UNRECOGNIZED: %c}", verb)
+ return
+ }
+
+ // This is the same as %T, but elides the package name. Assumes that
+ // formattableType is implemented by a pointer receiver.
+ goTypeName := reflect.TypeOf(t).Elem().Name()
+ _, _ = io.WriteString(f, goTypeName)
+
+ if name := t.TypeName(); name != "" {
+ // Output BTF type name if present.
+ fmt.Fprintf(f, ":%q", name)
+ }
+
+ if f.Flag('+') {
+ // Output address if requested.
+ fmt.Fprintf(f, ":%#p", t)
+ }
+
+ if verb == 's' {
+ // %s omits details.
+ return
+ }
+
+ var depth int
+ if ps, ok := f.(*formatState); ok {
+ depth = ps.depth
+ f = ps.State
+ }
+
+ maxDepth, ok := f.Width()
+ if !ok {
+ maxDepth = 0
+ }
+
+ if depth > maxDepth {
+ // We've reached the maximum depth. This avoids infinite recursion even
+ // for cyclical types.
+ return
+ }
+
+ if len(extra) == 0 {
+ return
+ }
+
+ wantSpace := false
+ _, _ = io.WriteString(f, "[")
+ for _, arg := range extra {
+ if wantSpace {
+ _, _ = io.WriteString(f, " ")
+ }
+
+ switch v := arg.(type) {
+ case string:
+ _, _ = io.WriteString(f, v)
+ wantSpace = len(v) > 0 && v[len(v)-1] != '='
+ continue
+
+ case formattableType:
+ v.Format(&formatState{f, depth + 1}, verb)
+
+ default:
+ fmt.Fprint(f, arg)
+ }
+
+ wantSpace = true
+ }
+ _, _ = io.WriteString(f, "]")
+}
diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go
index 8e3629003..8c2ddc380 100644
--- a/vendor/github.com/cilium/ebpf/collection.go
+++ b/vendor/github.com/cilium/ebpf/collection.go
@@ -1,15 +1,14 @@
package ebpf
import (
+ "encoding/binary"
"errors"
"fmt"
- "math"
"reflect"
"strings"
"github.com/cilium/ebpf/asm"
- "github.com/cilium/ebpf/internal"
- "github.com/cilium/ebpf/internal/btf"
+ "github.com/cilium/ebpf/btf"
)
// CollectionOptions control loading a collection into the kernel.
@@ -18,12 +17,31 @@ import (
type CollectionOptions struct {
Maps MapOptions
Programs ProgramOptions
+
+ // MapReplacements takes a set of Maps that will be used instead of
+ // creating new ones when loading the CollectionSpec.
+ //
+ // For each given Map, there must be a corresponding MapSpec in
+ // CollectionSpec.Maps, and its type, key/value size, max entries and flags
+ // must match the values of the MapSpec.
+ //
+ // The given Maps are Clone()d before being used in the Collection, so the
+ // caller can Close() them freely when they are no longer needed.
+ MapReplacements map[string]*Map
}
// CollectionSpec describes a collection.
type CollectionSpec struct {
Maps map[string]*MapSpec
Programs map[string]*ProgramSpec
+
+ // Types holds type information about Maps and Programs.
+ // Modifications to Types are currently undefined behaviour.
+ Types *btf.Spec
+
+ // ByteOrder specifies whether the ELF was compiled for
+ // big-endian or little-endian architectures.
+ ByteOrder binary.ByteOrder
}
// Copy returns a recursive copy of the spec.
@@ -33,8 +51,10 @@ func (cs *CollectionSpec) Copy() *CollectionSpec {
}
cpy := CollectionSpec{
- Maps: make(map[string]*MapSpec, len(cs.Maps)),
- Programs: make(map[string]*ProgramSpec, len(cs.Programs)),
+ Maps: make(map[string]*MapSpec, len(cs.Maps)),
+ Programs: make(map[string]*ProgramSpec, len(cs.Programs)),
+ ByteOrder: cs.ByteOrder,
+ Types: cs.Types,
}
for name, spec := range cs.Maps {
@@ -54,19 +74,21 @@ func (cs *CollectionSpec) Copy() *CollectionSpec {
// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps.
//
// Returns an error if a named map isn't used in at least one program.
+//
+// Deprecated: Pass CollectionOptions.MapReplacements when loading the Collection
+// instead.
func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
for symbol, m := range maps {
// have we seen a program that uses this symbol / map
seen := false
- fd := m.FD()
for progName, progSpec := range cs.Programs {
- err := progSpec.Instructions.RewriteMapPtr(symbol, fd)
+ err := progSpec.Instructions.AssociateMap(symbol, m)
switch {
case err == nil:
seen = true
- case asm.IsUnreferencedSymbol(err):
+ case errors.Is(err, asm.ErrUnreferencedSymbol):
// Not all programs need to use the map
default:
@@ -89,8 +111,8 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
//
// The constant must be defined like so in the C program:
//
-// static volatile const type foobar;
-// static volatile const type foobar = default;
+// volatile const type foobar;
+// volatile const type foobar = default;
//
// Replacement values must be of the same length as the C sizeof(type).
// If necessary, they are marshalled according to the same rules as
@@ -100,48 +122,81 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
//
// Returns an error if a constant doesn't exist.
func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error {
- rodata := cs.Maps[".rodata"]
- if rodata == nil {
- return errors.New("missing .rodata section")
- }
+ replaced := make(map[string]bool)
- if rodata.BTF == nil {
- return errors.New(".rodata section has no BTF")
- }
+ for name, spec := range cs.Maps {
+ if !strings.HasPrefix(name, ".rodata") {
+ continue
+ }
- if n := len(rodata.Contents); n != 1 {
- return fmt.Errorf("expected one key in .rodata, found %d", n)
- }
+ b, ds, err := spec.dataSection()
+ if errors.Is(err, errMapNoBTFValue) {
+ // Data sections without a BTF Datasec are valid, but don't support
+ // constant replacements.
+ continue
+ }
+ if err != nil {
+ return fmt.Errorf("map %s: %w", name, err)
+ }
+
+ // MapSpec.Copy() performs a shallow copy. Fully copy the byte slice
+ // to avoid any changes affecting other copies of the MapSpec.
+ cpy := make([]byte, len(b))
+ copy(cpy, b)
+
+ for _, v := range ds.Vars {
+ vname := v.Type.TypeName()
+ replacement, ok := consts[vname]
+ if !ok {
+ continue
+ }
- kv := rodata.Contents[0]
- value, ok := kv.Value.([]byte)
- if !ok {
- return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value)
+ if replaced[vname] {
+ return fmt.Errorf("section %s: duplicate variable %s", name, vname)
+ }
+
+ if int(v.Offset+v.Size) > len(cpy) {
+ return fmt.Errorf("section %s: offset %d(+%d) for variable %s is out of bounds", name, v.Offset, v.Size, vname)
+ }
+
+ b, err := marshalBytes(replacement, int(v.Size))
+ if err != nil {
+ return fmt.Errorf("marshaling constant replacement %s: %w", vname, err)
+ }
+
+ copy(cpy[v.Offset:v.Offset+v.Size], b)
+
+ replaced[vname] = true
+ }
+
+ spec.Contents[0] = MapKV{Key: uint32(0), Value: cpy}
}
- buf := make([]byte, len(value))
- copy(buf, value)
+ var missing []string
+ for c := range consts {
+ if !replaced[c] {
+ missing = append(missing, c)
+ }
+ }
- err := patchValue(buf, btf.MapValue(rodata.BTF), consts)
- if err != nil {
- return err
+ if len(missing) != 0 {
+ return fmt.Errorf("spec is missing one or more constants: %s", strings.Join(missing, ","))
}
- rodata.Contents[0] = MapKV{kv.Key, buf}
return nil
}
// Assign the contents of a CollectionSpec to a struct.
//
-// This function is a short-cut to manually checking the presence
-// of maps and programs in a collection spec. Consider using bpf2go if this
-// sounds useful.
+// This function is a shortcut to manually checking the presence
+// of maps and programs in a CollectionSpec. Consider using bpf2go
+// if this sounds useful.
//
-// The argument to must be a pointer to a struct. A field of the
+// 'to' must be a pointer to a struct. A field of the
// struct is updated with values from Programs or Maps if it
// has an `ebpf` tag and its type is *ProgramSpec or *MapSpec.
-// The tag gives the name of the program or map as found in
-// the CollectionSpec.
+// The tag's value specifies the name of the program or map as
+// found in the CollectionSpec.
//
// struct {
// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"`
@@ -149,42 +204,50 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error
// Ignored int
// }
//
-// Returns an error if any of the fields can't be found, or
-// if the same map or program is assigned multiple times.
+// Returns an error if any of the eBPF objects can't be found, or
+// if the same MapSpec or ProgramSpec is assigned multiple times.
func (cs *CollectionSpec) Assign(to interface{}) error {
- valueOf := func(typ reflect.Type, name string) (reflect.Value, error) {
+ // Assign() only supports assigning ProgramSpecs and MapSpecs,
+ // so doesn't load any resources into the kernel.
+ getValue := func(typ reflect.Type, name string) (interface{}, error) {
switch typ {
+
case reflect.TypeOf((*ProgramSpec)(nil)):
- p := cs.Programs[name]
- if p == nil {
- return reflect.Value{}, fmt.Errorf("missing program %q", name)
+ if p := cs.Programs[name]; p != nil {
+ return p, nil
}
- return reflect.ValueOf(p), nil
+ return nil, fmt.Errorf("missing program %q", name)
+
case reflect.TypeOf((*MapSpec)(nil)):
- m := cs.Maps[name]
- if m == nil {
- return reflect.Value{}, fmt.Errorf("missing map %q", name)
+ if m := cs.Maps[name]; m != nil {
+ return m, nil
}
- return reflect.ValueOf(m), nil
+ return nil, fmt.Errorf("missing map %q", name)
+
default:
- return reflect.Value{}, fmt.Errorf("unsupported type %s", typ)
+ return nil, fmt.Errorf("unsupported type %s", typ)
}
}
- return assignValues(to, valueOf)
+ return assignValues(to, getValue)
}
-// LoadAndAssign maps and programs into the kernel and assign them to a struct.
+// LoadAndAssign loads Maps and Programs into the kernel and assigns them
+// to a struct.
//
-// This function is a short-cut to manually checking the presence
-// of maps and programs in a collection spec. Consider using bpf2go if this
-// sounds useful.
+// Omitting Map/Program.Close() during application shutdown is an error.
+// See the package documentation for details around Map and Program lifecycle.
//
-// The argument to must be a pointer to a struct. A field of the
-// struct is updated with values from Programs or Maps if it
-// has an `ebpf` tag and its type is *Program or *Map.
-// The tag gives the name of the program or map as found in
-// the CollectionSpec.
+// This function is a shortcut to manually checking the presence
+// of maps and programs in a CollectionSpec. Consider using bpf2go
+// if this sounds useful.
+//
+// 'to' must be a pointer to a struct. A field of the struct is updated with
+// a Program or Map if it has an `ebpf` tag and its type is *Program or *Map.
+// The tag's value specifies the name of the program or map as found in the
+// CollectionSpec. Before updating the struct, the requested objects and their
+// dependent resources are loaded into the kernel and populated with values if
+// specified.
//
// struct {
// Foo *ebpf.Program `ebpf:"xdp_foo"`
@@ -195,39 +258,70 @@ func (cs *CollectionSpec) Assign(to interface{}) error {
// opts may be nil.
//
// Returns an error if any of the fields can't be found, or
-// if the same map or program is assigned multiple times.
+// if the same Map or Program is assigned multiple times.
func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error {
- if opts == nil {
- opts = &CollectionOptions{}
+ loader, err := newCollectionLoader(cs, opts)
+ if err != nil {
+ return err
}
+ defer loader.close()
- loadMap, loadProgram, done, cleanup := lazyLoadCollection(cs, opts)
- defer cleanup()
+ // Support assigning Programs and Maps, lazy-loading the required objects.
+ assignedMaps := make(map[string]bool)
+ assignedProgs := make(map[string]bool)
- valueOf := func(typ reflect.Type, name string) (reflect.Value, error) {
+ getValue := func(typ reflect.Type, name string) (interface{}, error) {
switch typ {
+
case reflect.TypeOf((*Program)(nil)):
- p, err := loadProgram(name)
- if err != nil {
- return reflect.Value{}, err
- }
- return reflect.ValueOf(p), nil
+ assignedProgs[name] = true
+ return loader.loadProgram(name)
+
case reflect.TypeOf((*Map)(nil)):
- m, err := loadMap(name)
- if err != nil {
- return reflect.Value{}, err
- }
- return reflect.ValueOf(m), nil
+ assignedMaps[name] = true
+ return loader.loadMap(name)
+
default:
- return reflect.Value{}, fmt.Errorf("unsupported type %s", typ)
+ return nil, fmt.Errorf("unsupported type %s", typ)
}
}
- if err := assignValues(to, valueOf); err != nil {
+ // Load the Maps and Programs requested by the annotated struct.
+ if err := assignValues(to, getValue); err != nil {
return err
}
- done()
+ // Populate the requested maps. Has a chance of lazy-loading other dependent maps.
+ if err := loader.populateMaps(); err != nil {
+ return err
+ }
+
+ // Evaluate the loader's objects after all (lazy)loading has taken place.
+ for n, m := range loader.maps {
+ switch m.typ {
+ case ProgramArray:
+ // Require all lazy-loaded ProgramArrays to be assigned to the given object.
+ // The kernel empties a ProgramArray once the last user space reference
+ // to it closes, which leads to failed tail calls. Combined with the library
+ // closing map fds via GC finalizers this can lead to surprising behaviour.
+ // Only allow unassigned ProgramArrays when the library hasn't pre-populated
+ // any entries from static value declarations. At this point, we know the map
+ // is empty and there's no way for the caller to interact with the map going
+ // forward.
+ if !assignedMaps[n] && len(cs.Maps[n].Contents) > 0 {
+ return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n)
+ }
+ }
+ }
+
+ // Prevent loader.cleanup() from closing assigned Maps and Programs.
+ for m := range assignedMaps {
+ delete(loader.maps, m)
+ }
+ for p := range assignedProgs {
+ delete(loader.programs, p)
+ }
+
return nil
}
@@ -238,42 +332,73 @@ type Collection struct {
Maps map[string]*Map
}
-// NewCollection creates a Collection from a specification.
+// NewCollection creates a Collection from the given spec, creating and
+// loading its declared resources into the kernel.
+//
+// Omitting Collection.Close() during application shutdown is an error.
+// See the package documentation for details around Map and Program lifecycle.
func NewCollection(spec *CollectionSpec) (*Collection, error) {
return NewCollectionWithOptions(spec, CollectionOptions{})
}
-// NewCollectionWithOptions creates a Collection from a specification.
+// NewCollectionWithOptions creates a Collection from the given spec using
+// options, creating and loading its declared resources into the kernel.
+//
+// Omitting Collection.Close() during application shutdown is an error.
+// See the package documentation for details around Map and Program lifecycle.
func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) {
- loadMap, loadProgram, done, cleanup := lazyLoadCollection(spec, &opts)
- defer cleanup()
+ loader, err := newCollectionLoader(spec, &opts)
+ if err != nil {
+ return nil, err
+ }
+ defer loader.close()
+ // Create maps first, as their fds need to be linked into programs.
for mapName := range spec.Maps {
- _, err := loadMap(mapName)
- if err != nil {
+ if _, err := loader.loadMap(mapName); err != nil {
return nil, err
}
}
- for progName := range spec.Programs {
- _, err := loadProgram(progName)
- if err != nil {
+ for progName, prog := range spec.Programs {
+ if prog.Type == UnspecifiedProgram {
+ continue
+ }
+
+ if _, err := loader.loadProgram(progName); err != nil {
return nil, err
}
}
- maps, progs := done()
+ // Maps can contain Program and Map stubs, so populate them after
+ // all Maps and Programs have been successfully loaded.
+ if err := loader.populateMaps(); err != nil {
+ return nil, err
+ }
+
+ // Prevent loader.cleanup from closing maps and programs.
+ maps, progs := loader.maps, loader.programs
+ loader.maps, loader.programs = nil, nil
+
return &Collection{
progs,
maps,
}, nil
}
-type btfHandleCache map[*btf.Spec]*btf.Handle
+type handleCache struct {
+ btfHandles map[*btf.Spec]*btf.Handle
+}
-func (btfs btfHandleCache) load(spec *btf.Spec) (*btf.Handle, error) {
- if btfs[spec] != nil {
- return btfs[spec], nil
+func newHandleCache() *handleCache {
+ return &handleCache{
+ btfHandles: make(map[*btf.Spec]*btf.Handle),
+ }
+}
+
+func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) {
+ if hc.btfHandles[spec] != nil {
+ return hc.btfHandles[spec], nil
}
handle, err := btf.NewHandle(spec)
@@ -281,122 +406,202 @@ func (btfs btfHandleCache) load(spec *btf.Spec) (*btf.Handle, error) {
return nil, err
}
- btfs[spec] = handle
+ hc.btfHandles[spec] = handle
return handle, nil
}
-func (btfs btfHandleCache) close() {
- for _, handle := range btfs {
+func (hc handleCache) close() {
+ for _, handle := range hc.btfHandles {
handle.Close()
}
}
-func lazyLoadCollection(coll *CollectionSpec, opts *CollectionOptions) (
- loadMap func(string) (*Map, error),
- loadProgram func(string) (*Program, error),
- done func() (map[string]*Map, map[string]*Program),
- cleanup func(),
-) {
- var (
- maps = make(map[string]*Map)
- progs = make(map[string]*Program)
- btfs = make(btfHandleCache)
- skipMapsAndProgs = false
- )
-
- cleanup = func() {
- btfs.close()
-
- if skipMapsAndProgs {
- return
- }
+type collectionLoader struct {
+ coll *CollectionSpec
+ opts *CollectionOptions
+ maps map[string]*Map
+ programs map[string]*Program
+ handles *handleCache
+}
- for _, m := range maps {
- m.Close()
+func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) {
+ if opts == nil {
+ opts = &CollectionOptions{}
+ }
+
+ // Check for existing MapSpecs in the CollectionSpec for all provided replacement maps.
+ for name, m := range opts.MapReplacements {
+ spec, ok := coll.Maps[name]
+ if !ok {
+ return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name)
}
- for _, p := range progs {
- p.Close()
+ if err := spec.checkCompatibility(m); err != nil {
+ return nil, fmt.Errorf("using replacement map %s: %w", spec.Name, err)
}
}
- done = func() (map[string]*Map, map[string]*Program) {
- skipMapsAndProgs = true
- return maps, progs
+ return &collectionLoader{
+ coll,
+ opts,
+ make(map[string]*Map),
+ make(map[string]*Program),
+ newHandleCache(),
+ }, nil
+}
+
+// close all resources left over in the collectionLoader.
+func (cl *collectionLoader) close() {
+ cl.handles.close()
+ for _, m := range cl.maps {
+ m.Close()
+ }
+ for _, p := range cl.programs {
+ p.Close()
+ }
+}
+
+func (cl *collectionLoader) loadMap(mapName string) (*Map, error) {
+ if m := cl.maps[mapName]; m != nil {
+ return m, nil
}
- loadMap = func(mapName string) (*Map, error) {
- if m := maps[mapName]; m != nil {
- return m, nil
- }
+ mapSpec := cl.coll.Maps[mapName]
+ if mapSpec == nil {
+ return nil, fmt.Errorf("missing map %s", mapName)
+ }
- mapSpec := coll.Maps[mapName]
- if mapSpec == nil {
- return nil, fmt.Errorf("missing map %s", mapName)
- }
+ if mapSpec.BTF != nil && cl.coll.Types != mapSpec.BTF {
+ return nil, fmt.Errorf("map %s: BTF doesn't match collection", mapName)
+ }
- m, err := newMapWithOptions(mapSpec, opts.Maps, btfs)
+ if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok {
+ // Clone the map to avoid closing user's map later on.
+ m, err := replaceMap.Clone()
if err != nil {
- return nil, fmt.Errorf("map %s: %w", mapName, err)
+ return nil, err
}
- maps[mapName] = m
+ cl.maps[mapName] = m
return m, nil
}
- loadProgram = func(progName string) (*Program, error) {
- if prog := progs[progName]; prog != nil {
- return prog, nil
- }
+ m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.handles)
+ if err != nil {
+ return nil, fmt.Errorf("map %s: %w", mapName, err)
+ }
- progSpec := coll.Programs[progName]
- if progSpec == nil {
- return nil, fmt.Errorf("unknown program %s", progName)
- }
+ cl.maps[mapName] = m
+ return m, nil
+}
- progSpec = progSpec.Copy()
+func (cl *collectionLoader) loadProgram(progName string) (*Program, error) {
+ if prog := cl.programs[progName]; prog != nil {
+ return prog, nil
+ }
- // Rewrite any reference to a valid map.
- for i := range progSpec.Instructions {
- ins := &progSpec.Instructions[i]
+ progSpec := cl.coll.Programs[progName]
+ if progSpec == nil {
+ return nil, fmt.Errorf("unknown program %s", progName)
+ }
- if ins.OpCode != asm.LoadImmOp(asm.DWord) || ins.Reference == "" {
- continue
- }
+ // Bail out early if we know the kernel is going to reject the program.
+ // This skips loading map dependencies, saving some cleanup work later.
+ if progSpec.Type == UnspecifiedProgram {
+ return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName)
+ }
- if uint32(ins.Constant) != math.MaxUint32 {
- // Don't overwrite maps already rewritten, users can
- // rewrite programs in the spec themselves
- continue
- }
+ if progSpec.BTF != nil && cl.coll.Types != progSpec.BTF {
+ return nil, fmt.Errorf("program %s: BTF doesn't match collection", progName)
+ }
- m, err := loadMap(ins.Reference)
- if err != nil {
- return nil, fmt.Errorf("program %s: %s", progName, err)
- }
+ progSpec = progSpec.Copy()
- fd := m.FD()
- if fd < 0 {
- return nil, fmt.Errorf("map %s: %w", ins.Reference, internal.ErrClosedFd)
- }
- if err := ins.RewriteMapPtr(m.FD()); err != nil {
- return nil, fmt.Errorf("progam %s: map %s: %w", progName, ins.Reference, err)
- }
+ // Rewrite any reference to a valid map in the program's instructions,
+ // which includes all of its dependencies.
+ for i := range progSpec.Instructions {
+ ins := &progSpec.Instructions[i]
+
+ if !ins.IsLoadFromMap() || ins.Reference() == "" {
+ continue
}
- prog, err := newProgramWithOptions(progSpec, opts.Programs, btfs)
+ // Don't overwrite map loads containing non-zero map fd's,
+ // they can be manually included by the caller.
+ // Map FDs/IDs are placed in the lower 32 bits of Constant.
+ if int32(ins.Constant) > 0 {
+ continue
+ }
+
+ m, err := cl.loadMap(ins.Reference())
if err != nil {
return nil, fmt.Errorf("program %s: %w", progName, err)
}
- progs[progName] = prog
- return prog, nil
+ if err := ins.AssociateMap(m); err != nil {
+ return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference(), err)
+ }
+ }
+
+ prog, err := newProgramWithOptions(progSpec, cl.opts.Programs, cl.handles)
+ if err != nil {
+ return nil, fmt.Errorf("program %s: %w", progName, err)
+ }
+
+ cl.programs[progName] = prog
+ return prog, nil
+}
+
+func (cl *collectionLoader) populateMaps() error {
+ for mapName, m := range cl.maps {
+ mapSpec, ok := cl.coll.Maps[mapName]
+ if !ok {
+ return fmt.Errorf("missing map spec %s", mapName)
+ }
+
+ mapSpec = mapSpec.Copy()
+
+ // MapSpecs that refer to inner maps or programs within the same
+ // CollectionSpec do so using strings. These strings are used as the key
+ // to look up the respective object in the Maps or Programs fields.
+ // Resolve those references to actual Map or Program resources that
+ // have been loaded into the kernel.
+ for i, kv := range mapSpec.Contents {
+ if objName, ok := kv.Value.(string); ok {
+ switch mapSpec.Type {
+ case ProgramArray:
+ // loadProgram is idempotent and could return an existing Program.
+ prog, err := cl.loadProgram(objName)
+ if err != nil {
+ return fmt.Errorf("loading program %s, for map %s: %w", objName, mapName, err)
+ }
+ mapSpec.Contents[i] = MapKV{kv.Key, prog}
+
+ case ArrayOfMaps, HashOfMaps:
+ // loadMap is idempotent and could return an existing Map.
+ innerMap, err := cl.loadMap(objName)
+ if err != nil {
+ return fmt.Errorf("loading inner map %s, for map %s: %w", objName, mapName, err)
+ }
+ mapSpec.Contents[i] = MapKV{kv.Key, innerMap}
+ }
+ }
+ }
+
+ // Populate and freeze the map if specified.
+ if err := m.finalize(mapSpec); err != nil {
+ return fmt.Errorf("populating map %s: %w", mapName, err)
+ }
}
- return
+ return nil
}
-// LoadCollection parses an object file and converts it to a collection.
+// LoadCollection reads an object file and creates and loads its declared
+// resources into the kernel.
+//
+// Omitting Collection.Close() during application shutdown is an error.
+// See the package documentation for details around Map and Program lifecycle.
func LoadCollection(file string) (*Collection, error) {
spec, err := LoadCollectionSpec(file)
if err != nil {
@@ -439,108 +644,81 @@ func (coll *Collection) DetachProgram(name string) *Program {
return p
}
-// Assign the contents of a collection to a struct.
-//
-// Deprecated: use CollectionSpec.Assign instead. It provides the same
-// functionality but creates only the maps and programs requested.
-func (coll *Collection) Assign(to interface{}) error {
- assignedMaps := make(map[string]struct{})
- assignedPrograms := make(map[string]struct{})
- valueOf := func(typ reflect.Type, name string) (reflect.Value, error) {
- switch typ {
- case reflect.TypeOf((*Program)(nil)):
- p := coll.Programs[name]
- if p == nil {
- return reflect.Value{}, fmt.Errorf("missing program %q", name)
- }
- assignedPrograms[name] = struct{}{}
- return reflect.ValueOf(p), nil
- case reflect.TypeOf((*Map)(nil)):
- m := coll.Maps[name]
- if m == nil {
- return reflect.Value{}, fmt.Errorf("missing map %q", name)
- }
- assignedMaps[name] = struct{}{}
- return reflect.ValueOf(m), nil
- default:
- return reflect.Value{}, fmt.Errorf("unsupported type %s", typ)
- }
- }
-
- if err := assignValues(to, valueOf); err != nil {
- return err
- }
+// structField represents a struct field containing the ebpf struct tag.
+type structField struct {
+ reflect.StructField
+ value reflect.Value
+}
- for name := range assignedPrograms {
- coll.DetachProgram(name)
+// ebpfFields extracts field names tagged with 'ebpf' from a struct type.
+// Keep track of visited types to avoid infinite recursion.
+func ebpfFields(structVal reflect.Value, visited map[reflect.Type]bool) ([]structField, error) {
+ if visited == nil {
+ visited = make(map[reflect.Type]bool)
}
- for name := range assignedMaps {
- coll.DetachMap(name)
+ structType := structVal.Type()
+ if structType.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("%s is not a struct", structType)
}
- return nil
-}
-
-func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Value, error)) error {
- type structField struct {
- reflect.StructField
- value reflect.Value
+ if visited[structType] {
+ return nil, fmt.Errorf("recursion on type %s", structType)
}
- var (
- fields []structField
- visitedTypes = make(map[reflect.Type]bool)
- flattenStruct func(reflect.Value) error
- )
-
- flattenStruct = func(structVal reflect.Value) error {
- structType := structVal.Type()
- if structType.Kind() != reflect.Struct {
- return fmt.Errorf("%s is not a struct", structType)
- }
+ fields := make([]structField, 0, structType.NumField())
+ for i := 0; i < structType.NumField(); i++ {
+ field := structField{structType.Field(i), structVal.Field(i)}
- if visitedTypes[structType] {
- return fmt.Errorf("recursion on type %s", structType)
+ // If the field is tagged, gather it and move on.
+ name := field.Tag.Get("ebpf")
+ if name != "" {
+ fields = append(fields, field)
+ continue
}
- for i := 0; i < structType.NumField(); i++ {
- field := structField{structType.Field(i), structVal.Field(i)}
-
- name := field.Tag.Get("ebpf")
- if name != "" {
- fields = append(fields, field)
+ // If the field does not have an ebpf tag, but is a struct or a pointer
+ // to a struct, attempt to gather its fields as well.
+ var v reflect.Value
+ switch field.Type.Kind() {
+ case reflect.Ptr:
+ if field.Type.Elem().Kind() != reflect.Struct {
continue
}
- var err error
- switch field.Type.Kind() {
- case reflect.Ptr:
- if field.Type.Elem().Kind() != reflect.Struct {
- continue
- }
-
- if field.value.IsNil() {
- return fmt.Errorf("nil pointer to %s", structType)
- }
+ if field.value.IsNil() {
+ return nil, fmt.Errorf("nil pointer to %s", structType)
+ }
- err = flattenStruct(field.value.Elem())
+ // Obtain the destination type of the pointer.
+ v = field.value.Elem()
- case reflect.Struct:
- err = flattenStruct(field.value)
+ case reflect.Struct:
+ // Reference the value's type directly.
+ v = field.value
- default:
- continue
- }
+ default:
+ continue
+ }
- if err != nil {
- return fmt.Errorf("field %s: %s", field.Name, err)
- }
+ inner, err := ebpfFields(v, visited)
+ if err != nil {
+ return nil, fmt.Errorf("field %s: %w", field.Name, err)
}
- return nil
+ fields = append(fields, inner...)
}
+ return fields, nil
+}
+
+// assignValues attempts to populate all fields of 'to' tagged with 'ebpf'.
+//
+// getValue is called for every tagged field of 'to' and must return the value
+// to be assigned to the field with the given typ and name.
+func assignValues(to interface{},
+ getValue func(typ reflect.Type, name string) (interface{}, error)) error {
+
toValue := reflect.ValueOf(to)
if toValue.Type().Kind() != reflect.Ptr {
return fmt.Errorf("%T is not a pointer to struct", to)
@@ -550,7 +728,8 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va
return fmt.Errorf("nil pointer to %T", to)
}
- if err := flattenStruct(toValue.Elem()); err != nil {
+ fields, err := ebpfFields(toValue.Elem(), nil)
+ if err != nil {
return err
}
@@ -560,19 +739,23 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va
name string
}
- assignedTo := make(map[elem]string)
+ assigned := make(map[elem]string)
for _, field := range fields {
- name := field.Tag.Get("ebpf")
- if strings.Contains(name, ",") {
+ // Get string value the field is tagged with.
+ tag := field.Tag.Get("ebpf")
+ if strings.Contains(tag, ",") {
return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name)
}
- e := elem{field.Type, name}
- if assignedField := assignedTo[e]; assignedField != "" {
- return fmt.Errorf("field %s: %q was already assigned to %s", field.Name, name, assignedField)
+ // Check if the eBPF object with the requested
+ // type and tag was already assigned elsewhere.
+ e := elem{field.Type, tag}
+ if af := assigned[e]; af != "" {
+ return fmt.Errorf("field %s: object %q was already assigned to %s", field.Name, tag, af)
}
- value, err := valueOf(field.Type, name)
+ // Get the eBPF object referred to by the tag.
+ value, err := getValue(field.Type, tag)
if err != nil {
return fmt.Errorf("field %s: %w", field.Name, err)
}
@@ -580,9 +763,9 @@ func assignValues(to interface{}, valueOf func(reflect.Type, string) (reflect.Va
if !field.value.CanSet() {
return fmt.Errorf("field %s: can't set value", field.Name)
}
+ field.value.Set(reflect.ValueOf(value))
- field.value.Set(value)
- assignedTo[e] = field.Name
+ assigned[e] = field.Name
}
return nil
diff --git a/vendor/github.com/cilium/ebpf/doc.go b/vendor/github.com/cilium/ebpf/doc.go
index f7f34da8f..396b3394d 100644
--- a/vendor/github.com/cilium/ebpf/doc.go
+++ b/vendor/github.com/cilium/ebpf/doc.go
@@ -13,4 +13,13 @@
// your application as any other resource.
//
// Use the link subpackage to attach a loaded program to a hook in the kernel.
+//
+// Note that losing all references to Map and Program resources will cause
+// their underlying file descriptors to be closed, potentially removing those
+// objects from the kernel. Always retain a reference by e.g. deferring a
+// Close() of a Collection or LoadAndAssign object until application exit.
+//
+// Special care needs to be taken when handling maps of type ProgramArray,
+// as the kernel erases its contents when the last userspace or bpffs
+// reference disappears, regardless of the map being in active use.
package ebpf
diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go
index 943142c49..df278895c 100644
--- a/vendor/github.com/cilium/ebpf/elf_reader.go
+++ b/vendor/github.com/cilium/ebpf/elf_reader.go
@@ -13,19 +13,20 @@ import (
"strings"
"github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal"
- "github.com/cilium/ebpf/internal/btf"
"github.com/cilium/ebpf/internal/unix"
)
// elfCode is a convenience to reduce the amount of arguments that have to
-// be passed around explicitly. You should treat it's contents as immutable.
+// be passed around explicitly. You should treat its contents as immutable.
type elfCode struct {
*internal.SafeELFFile
sections map[elf.SectionIndex]*elfSection
license string
version uint32
btf *btf.Spec
+ extInfo *btf.ExtInfos
}
// LoadCollectionSpec parses an ELF file into a CollectionSpec.
@@ -49,7 +50,6 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
if err != nil {
return nil, err
}
- defer f.Close()
var (
licenseSection *elf.Section
@@ -95,77 +95,29 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
return nil, fmt.Errorf("load version: %w", err)
}
- btfSpec, err := btf.LoadSpecFromReader(rd)
- if err != nil {
+ btfSpec, btfExtInfo, err := btf.LoadSpecAndExtInfosFromReader(rd)
+ if err != nil && !errors.Is(err, btf.ErrNotFound) {
return nil, fmt.Errorf("load BTF: %w", err)
}
- // Assign symbols to all the sections we're interested in.
- symbols, err := f.Symbols()
- if err != nil {
- return nil, fmt.Errorf("load symbols: %v", err)
- }
-
- for _, symbol := range symbols {
- idx := symbol.Section
- symType := elf.ST_TYPE(symbol.Info)
-
- section := sections[idx]
- if section == nil {
- continue
- }
-
- // Older versions of LLVM don't tag symbols correctly, so keep
- // all NOTYPE ones.
- keep := symType == elf.STT_NOTYPE
- switch section.kind {
- case mapSection, btfMapSection, dataSection:
- keep = keep || symType == elf.STT_OBJECT
- case programSection:
- keep = keep || symType == elf.STT_FUNC
- }
- if !keep || symbol.Name == "" {
- continue
- }
-
- section.symbols[symbol.Value] = symbol
- }
-
ec := &elfCode{
SafeELFFile: f,
sections: sections,
license: license,
version: version,
btf: btfSpec,
+ extInfo: btfExtInfo,
}
- // Go through relocation sections, and parse the ones for sections we're
- // interested in. Make sure that relocations point at valid sections.
- for idx, relSection := range relSections {
- section := sections[idx]
- if section == nil {
- continue
- }
-
- rels, err := ec.loadRelocations(relSection, symbols)
- if err != nil {
- return nil, fmt.Errorf("relocation for section %q: %w", section.Name, err)
- }
-
- for _, rel := range rels {
- target := sections[rel.Section]
- if target == nil {
- return nil, fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported)
- }
+ symbols, err := f.Symbols()
+ if err != nil {
+ return nil, fmt.Errorf("load symbols: %v", err)
+ }
- if target.Flags&elf.SHF_STRINGS > 0 {
- return nil, fmt.Errorf("section %q: string %q is not stack allocated: %w", section.Name, rel.Name, ErrNotSupported)
- }
+ ec.assignSymbols(symbols)
- target.references++
- }
-
- section.relocations = rels
+ if err := ec.loadRelocations(relSections, symbols); err != nil {
+ return nil, fmt.Errorf("load relocations: %w", err)
}
// Collect all the various ways to define maps.
@@ -183,12 +135,12 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
}
// Finally, collect programs and link them.
- progs, err := ec.loadPrograms()
+ progs, err := ec.loadProgramSections()
if err != nil {
return nil, fmt.Errorf("load programs: %w", err)
}
- return &CollectionSpec{maps, progs}, nil
+ return &CollectionSpec{maps, progs, btfSpec, ec.ByteOrder}, nil
}
func loadLicense(sec *elf.Section) (string, error) {
@@ -247,100 +199,238 @@ func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection {
}
}
-func (ec *elfCode) loadPrograms() (map[string]*ProgramSpec, error) {
- var (
- progs []*ProgramSpec
- libs []*ProgramSpec
- )
+// assignSymbols takes a list of symbols and assigns them to their
+// respective sections, indexed by name.
+func (ec *elfCode) assignSymbols(symbols []elf.Symbol) {
+ for _, symbol := range symbols {
+ symType := elf.ST_TYPE(symbol.Info)
+ symSection := ec.sections[symbol.Section]
+ if symSection == nil {
+ continue
+ }
- for _, sec := range ec.sections {
- if sec.kind != programSection {
+ // Anonymous symbols only occur in debug sections which we don't process
+ // relocations for. Anonymous symbols are not referenced from other sections.
+ if symbol.Name == "" {
continue
}
- if len(sec.symbols) == 0 {
- return nil, fmt.Errorf("section %v: missing symbols", sec.Name)
+ // Older versions of LLVM don't tag symbols correctly, so keep
+ // all NOTYPE ones.
+ switch symSection.kind {
+ case mapSection, btfMapSection, dataSection:
+ if symType != elf.STT_NOTYPE && symType != elf.STT_OBJECT {
+ continue
+ }
+ case programSection:
+ if symType != elf.STT_NOTYPE && symType != elf.STT_FUNC {
+ continue
+ }
+ // LLVM emits LBB_ (Local Basic Block) symbols that seem to be jump
+ // targets within sections, but BPF has no use for them.
+ if symType == elf.STT_NOTYPE && elf.ST_BIND(symbol.Info) == elf.STB_LOCAL &&
+ strings.HasPrefix(symbol.Name, "LBB") {
+ continue
+ }
+ // Only collect symbols that occur in program/maps/data sections.
+ default:
+ continue
}
- funcSym, ok := sec.symbols[0]
- if !ok {
- return nil, fmt.Errorf("section %v: no label at start", sec.Name)
+ symSection.symbols[symbol.Value] = symbol
+ }
+}
+
+// loadRelocations iterates .rel* sections and extracts relocation entries for
+// sections of interest. Makes sure relocations point at valid sections.
+func (ec *elfCode) loadRelocations(relSections map[elf.SectionIndex]*elf.Section, symbols []elf.Symbol) error {
+ for idx, relSection := range relSections {
+ section := ec.sections[idx]
+ if section == nil {
+ continue
}
- insns, length, err := ec.loadInstructions(sec)
+ rels, err := ec.loadSectionRelocations(relSection, symbols)
if err != nil {
- return nil, fmt.Errorf("program %s: %w", funcSym.Name, err)
+ return fmt.Errorf("relocation for section %q: %w", section.Name, err)
}
- progType, attachType, attachTo := getProgType(sec.Name)
+ for _, rel := range rels {
+ target := ec.sections[rel.Section]
+ if target == nil {
+ return fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported)
+ }
+
+ if target.Flags&elf.SHF_STRINGS > 0 {
+ return fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported)
+ }
- spec := &ProgramSpec{
- Name: funcSym.Name,
- Type: progType,
- AttachType: attachType,
- AttachTo: attachTo,
- License: ec.license,
- KernelVersion: ec.version,
- Instructions: insns,
- ByteOrder: ec.ByteOrder,
+ target.references++
}
- if ec.btf != nil {
- spec.BTF, err = ec.btf.Program(sec.Name, length)
- if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) {
- return nil, fmt.Errorf("program %s: %w", funcSym.Name, err)
- }
+ section.relocations = rels
+ }
+
+ return nil
+}
+
+// loadProgramSections iterates ec's sections and emits a ProgramSpec
+// for each function it finds.
+//
+// The resulting map is indexed by function name.
+func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) {
+
+ progs := make(map[string]*ProgramSpec)
+
+ // Generate a ProgramSpec for each function found in each program section.
+ var export []string
+ for _, sec := range ec.sections {
+ if sec.kind != programSection {
+ continue
}
- if spec.Type == UnspecifiedProgram {
- // There is no single name we can use for "library" sections,
- // since they may contain multiple functions. We'll decode the
- // labels they contain later on, and then link sections that way.
- libs = append(libs, spec)
- } else {
- progs = append(progs, spec)
+ if len(sec.symbols) == 0 {
+ return nil, fmt.Errorf("section %v: missing symbols", sec.Name)
}
- }
- res := make(map[string]*ProgramSpec, len(progs))
- for _, prog := range progs {
- err := link(prog, libs)
+ funcs, err := ec.loadFunctions(sec)
if err != nil {
- return nil, fmt.Errorf("program %s: %w", prog.Name, err)
+ return nil, fmt.Errorf("section %v: %w", sec.Name, err)
}
- res[prog.Name] = prog
- }
- return res, nil
-}
+ progType, attachType, progFlags, attachTo := getProgType(sec.Name)
+
+ for name, insns := range funcs {
+ spec := &ProgramSpec{
+ Name: name,
+ Type: progType,
+ Flags: progFlags,
+ AttachType: attachType,
+ AttachTo: attachTo,
+ SectionName: sec.Name,
+ License: ec.license,
+ KernelVersion: ec.version,
+ Instructions: insns,
+ ByteOrder: ec.ByteOrder,
+ BTF: ec.btf,
+ }
-func (ec *elfCode) loadInstructions(section *elfSection) (asm.Instructions, uint64, error) {
- var (
- r = bufio.NewReader(section.Open())
- insns asm.Instructions
- offset uint64
- )
- for {
- var ins asm.Instruction
- n, err := ins.Unmarshal(r, ec.ByteOrder)
- if err == io.EOF {
- return insns, offset, nil
+ // Function names must be unique within a single ELF blob.
+ if progs[name] != nil {
+ return nil, fmt.Errorf("duplicate program name %s", name)
+ }
+ progs[name] = spec
+
+ if spec.SectionName != ".text" {
+ export = append(export, name)
+ }
}
- if err != nil {
- return nil, 0, fmt.Errorf("offset %d: %w", offset, err)
+ }
+
+ flattenPrograms(progs, export)
+
+ // Hide programs (e.g. library functions) that were not explicitly emitted
+ // to an ELF section. These could be exposed in a separate CollectionSpec
+ // field later to allow them to be modified.
+ for n, p := range progs {
+ if p.SectionName == ".text" {
+ delete(progs, n)
}
+ }
+
+ return progs, nil
+}
- ins.Symbol = section.symbols[offset].Name
+// loadFunctions extracts instruction streams from the given program section
+// starting at each symbol in the section. The section's symbols must already
+// be narrowed down to STT_NOTYPE (emitted by clang <8) or STT_FUNC.
+//
+// The resulting map is indexed by function name.
+func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructions, error) {
+ r := bufio.NewReader(section.Open())
+
+ // Decode the section's instruction stream.
+ var insns asm.Instructions
+ if err := insns.Unmarshal(r, ec.ByteOrder); err != nil {
+ return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err)
+ }
+ if len(insns) == 0 {
+ return nil, fmt.Errorf("no instructions found in section %s", section.Name)
+ }
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+ offset := iter.Offset.Bytes()
+
+ // Tag Symbol Instructions.
+ if sym, ok := section.symbols[offset]; ok {
+ *ins = ins.WithSymbol(sym.Name)
+ }
+
+ // Apply any relocations for the current instruction.
+ // If no relocation is present, resolve any section-relative function calls.
if rel, ok := section.relocations[offset]; ok {
- if err = ec.relocateInstruction(&ins, rel); err != nil {
- return nil, 0, fmt.Errorf("offset %d: relocate instruction: %w", offset, err)
+ if err := ec.relocateInstruction(ins, rel); err != nil {
+ return nil, fmt.Errorf("offset %d: relocating instruction: %w", offset, err)
+ }
+ } else {
+ if err := referenceRelativeJump(ins, offset, section.symbols); err != nil {
+ return nil, fmt.Errorf("offset %d: resolving relative jump: %w", offset, err)
}
}
+ }
+
+ if ec.extInfo != nil {
+ ec.extInfo.Assign(insns, section.Name)
+ }
+
+ return splitSymbols(insns)
+}
+
+// referenceRelativeJump turns a relative jump to another bpf subprogram within
+// the same ELF section into a Reference Instruction.
+//
+// Up to LLVM 9, calls to subprograms within the same ELF section are sometimes
+// encoded using relative jumps instead of relocation entries. These jumps go
+// out of bounds of the current program, so their targets must be memoized
+// before the section's instruction stream is split.
+//
+// The relative jump Constant is blinded to -1 and the target Symbol is set as
+// the Instruction's Reference so it can be resolved by the linker.
+func referenceRelativeJump(ins *asm.Instruction, offset uint64, symbols map[uint64]elf.Symbol) error {
+ if !ins.IsFunctionReference() || ins.Constant == -1 {
+ return nil
+ }
- insns = append(insns, ins)
- offset += n
+ tgt := jumpTarget(offset, *ins)
+ sym := symbols[tgt].Name
+ if sym == "" {
+ return fmt.Errorf("no jump target found at offset %d", tgt)
}
+
+ *ins = ins.WithReference(sym)
+ ins.Constant = -1
+
+ return nil
+}
+
+// jumpTarget takes ins' offset within an instruction stream (in bytes)
+// and returns its absolute jump destination (in bytes) within the
+// instruction stream.
+func jumpTarget(offset uint64, ins asm.Instruction) uint64 {
+ // A relative jump instruction describes the amount of raw BPF instructions
+ // to jump, convert the offset into bytes.
+ dest := ins.Constant * asm.InstructionSize
+
+ // The starting point of the jump is the end of the current instruction.
+ dest += int64(offset + asm.InstructionSize)
+
+ if dest < 0 {
+ return 0
+ }
+
+ return uint64(dest)
}
func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error {
@@ -366,24 +456,35 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
ins.Src = asm.PseudoMapFD
- // Mark the instruction as needing an update when creating the
- // collection.
- if err := ins.RewriteMapPtr(-1); err != nil {
- return err
- }
-
case dataSection:
+ var offset uint32
switch typ {
case elf.STT_SECTION:
if bind != elf.STB_LOCAL {
- return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
+ return fmt.Errorf("direct load: %s: unsupported section relocation %s", name, bind)
}
+ // This is really a reference to a static symbol, which clang doesn't
+ // emit a symbol table entry for. Instead it encodes the offset in
+ // the instruction itself.
+ offset = uint32(uint64(ins.Constant))
+
case elf.STT_OBJECT:
- if bind != elf.STB_GLOBAL {
- return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind)
+ // LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants.
+ if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL {
+ return fmt.Errorf("direct load: %s: unsupported object relocation %s", name, bind)
}
+ offset = uint32(rel.Value)
+
+ case elf.STT_NOTYPE:
+ // LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants.
+ if bind != elf.STB_LOCAL {
+ return fmt.Errorf("direct load: %s: unsupported untyped relocation %s", name, bind)
+ }
+
+ offset = uint32(rel.Value)
+
default:
return fmt.Errorf("incorrect relocation type %v for direct map load", typ)
}
@@ -393,57 +494,75 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
// it's not clear how to encode that into Instruction.
name = target.Name
- // For some reason, clang encodes the offset of the symbol its
- // section in the first basic BPF instruction, while the kernel
- // expects it in the second one.
- ins.Constant <<= 32
+ // The kernel expects the offset in the second basic BPF instruction.
+ ins.Constant = int64(uint64(offset) << 32)
ins.Src = asm.PseudoMapValue
- // Mark the instruction as needing an update when creating the
- // collection.
- if err := ins.RewriteMapPtr(-1); err != nil {
- return err
- }
-
case programSection:
- if ins.OpCode.JumpOp() != asm.Call {
- return fmt.Errorf("not a call instruction: %s", ins)
- }
+ switch opCode := ins.OpCode; {
+ case opCode.JumpOp() == asm.Call:
+ if ins.Src != asm.PseudoCall {
+ return fmt.Errorf("call: %s: incorrect source register", name)
+ }
- if ins.Src != asm.PseudoCall {
- return fmt.Errorf("call: %s: incorrect source register", name)
- }
+ switch typ {
+ case elf.STT_NOTYPE, elf.STT_FUNC:
+ if bind != elf.STB_GLOBAL {
+ return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
+ }
- switch typ {
- case elf.STT_NOTYPE, elf.STT_FUNC:
- if bind != elf.STB_GLOBAL {
- return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
- }
+ case elf.STT_SECTION:
+ if bind != elf.STB_LOCAL {
+ return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
+ }
- case elf.STT_SECTION:
- if bind != elf.STB_LOCAL {
- return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
+ // The function we want to call is in the indicated section,
+ // at the offset encoded in the instruction itself. Reverse
+ // the calculation to find the real function we're looking for.
+ // A value of -1 references the first instruction in the section.
+ offset := int64(int32(ins.Constant)+1) * asm.InstructionSize
+ sym, ok := target.symbols[uint64(offset)]
+ if !ok {
+ return fmt.Errorf("call: no symbol at offset %d", offset)
+ }
+
+ name = sym.Name
+ ins.Constant = -1
+
+ default:
+ return fmt.Errorf("call: %s: invalid symbol type %s", name, typ)
}
+ case opCode.IsDWordLoad():
+ switch typ {
+ case elf.STT_FUNC:
+ if bind != elf.STB_GLOBAL {
+ return fmt.Errorf("load: %s: unsupported binding: %s", name, bind)
+ }
+
+ case elf.STT_SECTION:
+ if bind != elf.STB_LOCAL {
+ return fmt.Errorf("load: %s: unsupported binding: %s", name, bind)
+ }
- // The function we want to call is in the indicated section,
- // at the offset encoded in the instruction itself. Reverse
- // the calculation to find the real function we're looking for.
- // A value of -1 references the first instruction in the section.
- offset := int64(int32(ins.Constant)+1) * asm.InstructionSize
- if offset < 0 {
- return fmt.Errorf("call: %s: invalid offset %d", name, offset)
+ // ins.Constant already contains the offset in bytes from the
+ // start of the section. This is different than a call to a
+ // static function.
+
+ default:
+ return fmt.Errorf("load: %s: invalid symbol type %s", name, typ)
}
- sym, ok := target.symbols[uint64(offset)]
+ sym, ok := target.symbols[uint64(ins.Constant)]
if !ok {
- return fmt.Errorf("call: %s: no symbol at offset %d", name, offset)
+ return fmt.Errorf("load: no symbol at offset %d", ins.Constant)
}
- ins.Constant = -1
name = sym.Name
+ ins.Constant = -1
+ ins.Src = asm.PseudoFunc
default:
- return fmt.Errorf("call: %s: invalid symbol type %s", name, typ)
+ return fmt.Errorf("neither a call nor a load instruction: %v", ins)
}
case undefSection:
@@ -461,7 +580,7 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err
return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported)
}
- ins.Reference = name
+ *ins = ins.WithReference(name)
return nil
}
@@ -490,39 +609,51 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error {
return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
}
- if maps[mapSym.Name] != nil {
+ mapName := mapSym.Name
+ if maps[mapName] != nil {
return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym)
}
lr := io.LimitReader(r, int64(size))
spec := MapSpec{
- Name: SanitizeName(mapSym.Name, -1),
+ Name: SanitizeName(mapName, -1),
}
switch {
case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil:
- return fmt.Errorf("map %v: missing type", mapSym)
+ return fmt.Errorf("map %s: missing type", mapName)
case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil:
- return fmt.Errorf("map %v: missing key size", mapSym)
+ return fmt.Errorf("map %s: missing key size", mapName)
case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil:
- return fmt.Errorf("map %v: missing value size", mapSym)
+ return fmt.Errorf("map %s: missing value size", mapName)
case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil:
- return fmt.Errorf("map %v: missing max entries", mapSym)
+ return fmt.Errorf("map %s: missing max entries", mapName)
case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil:
- return fmt.Errorf("map %v: missing flags", mapSym)
+ return fmt.Errorf("map %s: missing flags", mapName)
+ }
+
+ extra, err := io.ReadAll(lr)
+ if err != nil {
+ return fmt.Errorf("map %s: reading map tail: %w", mapName, err)
+ }
+ if len(extra) > 0 {
+ spec.Extra = bytes.NewReader(extra)
}
- if _, err := io.Copy(internal.DiscardZeroes{}, lr); err != nil {
- return fmt.Errorf("map %v: unknown and non-zero fields in definition", mapSym)
+ if err := spec.clampPerfEventArraySize(); err != nil {
+ return fmt.Errorf("map %s: %w", mapName, err)
}
- maps[mapSym.Name] = &spec
+ maps[mapName] = &spec
}
}
return nil
}
+// loadBTFMaps iterates over all ELF sections marked as BTF map sections
+// (like .maps) and parses them into MapSpecs. Dump the .maps section and
+// any relocations with `readelf -x .maps -r <elf_file>`.
func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
for _, sec := range ec.sections {
if sec.kind != btfMapSection {
@@ -533,42 +664,66 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
return fmt.Errorf("missing BTF")
}
- if len(sec.symbols) == 0 {
- return fmt.Errorf("section %v: no symbols", sec.Name)
+ // Each section must appear as a DataSec in the ELF's BTF blob.
+ var ds *btf.Datasec
+ if err := ec.btf.TypeByName(sec.Name, &ds); err != nil {
+ return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err)
}
- _, err := io.Copy(internal.DiscardZeroes{}, bufio.NewReader(sec.Open()))
- if err != nil {
- return fmt.Errorf("section %v: initializing BTF map definitions: %w", sec.Name, internal.ErrNotSupported)
- }
+ // Open a Reader to the ELF's raw section bytes so we can assert that all
+ // of them are zero on a per-map (per-Var) basis. For now, the section's
+ // sole purpose is to receive relocations, so all must be zero.
+ rs := sec.Open()
- for _, sym := range sec.symbols {
- name := sym.Name
- if maps[name] != nil {
- return fmt.Errorf("section %v: map %v already exists", sec.Name, sym)
+ for _, vs := range ds.Vars {
+ // BPF maps are declared as and assigned to global variables,
+ // so iterate over each Var in the DataSec and validate their types.
+ v, ok := vs.Type.(*btf.Var)
+ if !ok {
+ return fmt.Errorf("section %v: unexpected type %s", sec.Name, vs.Type)
}
+ name := string(v.Name)
- // A global Var is created by declaring a struct with a 'structure variable',
- // as is common in eBPF C to declare eBPF maps. For example,
- // `struct { ... } map_name ...;` emits a global variable `map_name`
- // with the type of said struct (which can be anonymous).
- var v btf.Var
- if err := ec.btf.FindType(name, &v); err != nil {
- return fmt.Errorf("cannot find global variable '%s' in BTF: %w", name, err)
+ // The BTF metadata for each Var contains the full length of the map
+ // declaration, so read the corresponding amount of bytes from the ELF.
+ // This way, we can pinpoint which map declaration contains unexpected
+ // (and therefore unsupported) data.
+ _, err := io.Copy(internal.DiscardZeroes{}, io.LimitReader(rs, int64(vs.Size)))
+ if err != nil {
+ return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported)
}
+ if maps[name] != nil {
+ return fmt.Errorf("section %v: map %s already exists", sec.Name, name)
+ }
+
+ // Each Var representing a BTF map definition contains a Struct.
mapStruct, ok := v.Type.(*btf.Struct)
if !ok {
return fmt.Errorf("expected struct, got %s", v.Type)
}
- mapSpec, err := mapSpecFromBTF(name, mapStruct, false, ec.btf)
+ mapSpec, err := mapSpecFromBTF(sec, &vs, mapStruct, ec.btf, name, false)
if err != nil {
return fmt.Errorf("map %v: %w", name, err)
}
+ if err := mapSpec.clampPerfEventArraySize(); err != nil {
+ return fmt.Errorf("map %v: %w", name, err)
+ }
+
maps[name] = mapSpec
}
+
+ // Drain the ELF section reader to make sure all bytes are accounted for
+ // with BTF metadata.
+ i, err := io.Copy(io.Discard, rs)
+ if err != nil {
+ return fmt.Errorf("section %v: unexpected error reading remainder of ELF section: %w", sec.Name, err)
+ }
+ if i > 0 {
+ return fmt.Errorf("section %v: %d unexpected remaining bytes in ELF section, invalid BTF?", sec.Name, i)
+ }
}
return nil
@@ -577,24 +732,26 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error {
// mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing
// a BTF map definition. The name and spec arguments will be copied to the
// resulting MapSpec, and inner must be true on any resursive invocations.
-func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (*MapSpec, error) {
-
+func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *btf.Spec, name string, inner bool) (*MapSpec, error) {
var (
- key, value btf.Type
- keySize, valueSize uint32
- mapType, flags, maxEntries uint32
- pinType PinType
- innerMapSpec *MapSpec
- err error
+ key, value btf.Type
+ keySize, valueSize uint32
+ mapType MapType
+ flags, maxEntries uint32
+ pinType PinType
+ innerMapSpec *MapSpec
+ contents []MapKV
+ err error
)
for i, member := range def.Members {
switch member.Name {
case "type":
- mapType, err = uintFromBTF(member.Type)
+ mt, err := uintFromBTF(member.Type)
if err != nil {
return nil, fmt.Errorf("can't get type: %w", err)
}
+ mapType = MapType(mt)
case "map_flags":
flags, err = uintFromBTF(member.Type)
@@ -704,7 +861,7 @@ func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (*
case *btf.Struct:
// The values member pointing to an array of structs means we're expecting
// a map-in-map declaration.
- if MapType(mapType) != ArrayOfMaps && MapType(mapType) != HashOfMaps {
+ if mapType != ArrayOfMaps && mapType != HashOfMaps {
return nil, errors.New("outer map needs to be an array or a hash of maps")
}
if inner {
@@ -718,21 +875,38 @@ func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (*
// on kernels 5.2 and up)
// Pass the BTF spec from the parent object, since both parent and
// child must be created from the same BTF blob (on kernels that support BTF).
- innerMapSpec, err = mapSpecFromBTF(name+"_inner", t, true, spec)
+ innerMapSpec, err = mapSpecFromBTF(es, vs, t, spec, name+"_inner", true)
if err != nil {
return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err)
}
+ case *btf.FuncProto:
+ // The values member contains an array of function pointers, meaning an
+ // autopopulated PROG_ARRAY.
+ if mapType != ProgramArray {
+ return nil, errors.New("map needs to be a program array")
+ }
+
default:
return nil, fmt.Errorf("unsupported value type %q in 'values' field", t)
}
+ contents, err = resolveBTFValuesContents(es, vs, member)
+ if err != nil {
+ return nil, fmt.Errorf("resolving values contents: %w", err)
+ }
+
default:
return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name)
}
}
- bm := btf.NewMap(spec, key, value)
+ if key == nil {
+ key = &btf.Void{}
+ }
+ if value == nil {
+ value = &btf.Void{}
+ }
return &MapSpec{
Name: SanitizeName(name, -1),
@@ -741,9 +915,12 @@ func mapSpecFromBTF(name string, def *btf.Struct, inner bool, spec *btf.Spec) (*
ValueSize: valueSize,
MaxEntries: maxEntries,
Flags: flags,
- BTF: &bm,
+ Key: key,
+ Value: value,
+ BTF: spec,
Pinning: pinType,
InnerMap: innerMapSpec,
+ Contents: contents,
}, nil
}
@@ -780,6 +957,64 @@ func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) {
return ptr.Target, nil
}
+// resolveBTFValuesContents resolves relocations into ELF sections belonging
+// to btf.VarSecinfo's. This can be used on the 'values' member in BTF map
+// definitions to extract static declarations of map contents.
+func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Member) ([]MapKV, error) {
+ // The elements of a .values pointer array are not encoded in BTF.
+ // Instead, relocations are generated into each array index.
+ // However, it's possible to leave certain array indices empty, so all
+ // indices' offsets need to be checked for emitted relocations.
+
+ // The offset of the 'values' member within the _struct_ (in bits)
+ // is the starting point of the array. Convert to bytes. Add VarSecinfo
+ // offset to get the absolute position in the ELF blob.
+ start := member.Offset.Bytes() + vs.Offset
+ // 'values' is encoded in BTF as a zero (variable) length struct
+ // member, and its contents run until the end of the VarSecinfo.
+ // Add VarSecinfo offset to get the absolute position in the ELF blob.
+ end := vs.Size + vs.Offset
+ // The size of an address in this section. This determines the width of
+ // an index in the array.
+ align := uint32(es.SectionHeader.Addralign)
+
+ // Check if variable-length section is aligned.
+ if (end-start)%align != 0 {
+ return nil, errors.New("unaligned static values section")
+ }
+ elems := (end - start) / align
+
+ if elems == 0 {
+ return nil, nil
+ }
+
+ contents := make([]MapKV, 0, elems)
+
+ // k is the array index, off is its corresponding ELF section offset.
+ for k, off := uint32(0), start; k < elems; k, off = k+1, off+align {
+ r, ok := es.relocations[uint64(off)]
+ if !ok {
+ continue
+ }
+
+ // Relocation exists for the current offset in the ELF section.
+ // Emit a value stub based on the type of relocation to be replaced by
+ // a real fd later in the pipeline before populating the map.
+ // Map keys are encoded in MapKV entries, so empty array indices are
+ // skipped here.
+ switch t := elf.ST_TYPE(r.Info); t {
+ case elf.STT_FUNC:
+ contents = append(contents, MapKV{uint32(k), r.Name})
+ case elf.STT_OBJECT:
+ contents = append(contents, MapKV{uint32(k), r.Name})
+ default:
+ return nil, fmt.Errorf("unknown relocation type %v", t)
+ }
+ }
+
+ return contents, nil
+}
+
func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
for _, sec := range ec.sections {
if sec.kind != dataSection {
@@ -792,15 +1027,6 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
continue
}
- if ec.btf == nil {
- return errors.New("data sections require BTF, make sure all consts are marked as static")
- }
-
- btfMap, err := ec.btf.Datasec(sec.Name)
- if err != nil {
- return err
- }
-
data, err := sec.Data()
if err != nil {
return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err)
@@ -817,14 +1043,25 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
ValueSize: uint32(len(data)),
MaxEntries: 1,
Contents: []MapKV{{uint32(0), data}},
- BTF: btfMap,
}
- switch sec.Name {
- case ".rodata":
+ // It is possible for a data section to exist without a corresponding BTF Datasec
+ // if it only contains anonymous values like macro-defined arrays.
+ if ec.btf != nil {
+ var ds *btf.Datasec
+ if ec.btf.TypeByName(sec.Name, &ds) == nil {
+ // Assign the spec's key and BTF only if the Datasec lookup was successful.
+ mapSpec.BTF = ec.btf
+ mapSpec.Key = &btf.Void{}
+ mapSpec.Value = ds
+ }
+ }
+
+ switch n := sec.Name; {
+ case strings.HasPrefix(n, ".rodata"):
mapSpec.Flags = unix.BPF_F_RDONLY_PROG
mapSpec.Freeze = true
- case ".bss":
+ case n == ".bss":
// The kernel already zero-initializes the map
mapSpec.Contents = nil
}
@@ -834,74 +1071,104 @@ func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error {
return nil
}
-func getProgType(sectionName string) (ProgramType, AttachType, string) {
- types := map[string]struct {
+func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
+ types := []struct {
+ prefix string
progType ProgramType
attachType AttachType
+ progFlags uint32
}{
- // From https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c
- "socket": {SocketFilter, AttachNone},
- "seccomp": {SocketFilter, AttachNone},
- "kprobe/": {Kprobe, AttachNone},
- "uprobe/": {Kprobe, AttachNone},
- "kretprobe/": {Kprobe, AttachNone},
- "uretprobe/": {Kprobe, AttachNone},
- "tracepoint/": {TracePoint, AttachNone},
- "raw_tracepoint/": {RawTracepoint, AttachNone},
- "xdp": {XDP, AttachNone},
- "perf_event": {PerfEvent, AttachNone},
- "lwt_in": {LWTIn, AttachNone},
- "lwt_out": {LWTOut, AttachNone},
- "lwt_xmit": {LWTXmit, AttachNone},
- "lwt_seg6local": {LWTSeg6Local, AttachNone},
- "sockops": {SockOps, AttachCGroupSockOps},
- "sk_skb/stream_parser": {SkSKB, AttachSkSKBStreamParser},
- "sk_skb/stream_verdict": {SkSKB, AttachSkSKBStreamParser},
- "sk_msg": {SkMsg, AttachSkSKBStreamVerdict},
- "lirc_mode2": {LircMode2, AttachLircMode2},
- "flow_dissector": {FlowDissector, AttachFlowDissector},
- "iter/": {Tracing, AttachTraceIter},
- "sk_lookup/": {SkLookup, AttachSkLookup},
- "lsm/": {LSM, AttachLSMMac},
-
- "cgroup_skb/ingress": {CGroupSKB, AttachCGroupInetIngress},
- "cgroup_skb/egress": {CGroupSKB, AttachCGroupInetEgress},
- "cgroup/dev": {CGroupDevice, AttachCGroupDevice},
- "cgroup/skb": {CGroupSKB, AttachNone},
- "cgroup/sock": {CGroupSock, AttachCGroupInetSockCreate},
- "cgroup/post_bind4": {CGroupSock, AttachCGroupInet4PostBind},
- "cgroup/post_bind6": {CGroupSock, AttachCGroupInet6PostBind},
- "cgroup/bind4": {CGroupSockAddr, AttachCGroupInet4Bind},
- "cgroup/bind6": {CGroupSockAddr, AttachCGroupInet6Bind},
- "cgroup/connect4": {CGroupSockAddr, AttachCGroupInet4Connect},
- "cgroup/connect6": {CGroupSockAddr, AttachCGroupInet6Connect},
- "cgroup/sendmsg4": {CGroupSockAddr, AttachCGroupUDP4Sendmsg},
- "cgroup/sendmsg6": {CGroupSockAddr, AttachCGroupUDP6Sendmsg},
- "cgroup/recvmsg4": {CGroupSockAddr, AttachCGroupUDP4Recvmsg},
- "cgroup/recvmsg6": {CGroupSockAddr, AttachCGroupUDP6Recvmsg},
- "cgroup/sysctl": {CGroupSysctl, AttachCGroupSysctl},
- "cgroup/getsockopt": {CGroupSockopt, AttachCGroupGetsockopt},
- "cgroup/setsockopt": {CGroupSockopt, AttachCGroupSetsockopt},
- "classifier": {SchedCLS, AttachNone},
- "action": {SchedACT, AttachNone},
- }
-
- for prefix, t := range types {
- if !strings.HasPrefix(sectionName, prefix) {
+ // Please update the types from libbpf.c and follow the order of it.
+ // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c
+ {"socket", SocketFilter, AttachNone, 0},
+ {"sk_reuseport/migrate", SkReuseport, AttachSkReuseportSelectOrMigrate, 0},
+ {"sk_reuseport", SkReuseport, AttachSkReuseportSelect, 0},
+ {"kprobe/", Kprobe, AttachNone, 0},
+ {"uprobe/", Kprobe, AttachNone, 0},
+ {"kretprobe/", Kprobe, AttachNone, 0},
+ {"uretprobe/", Kprobe, AttachNone, 0},
+ {"tc", SchedCLS, AttachNone, 0},
+ {"classifier", SchedCLS, AttachNone, 0},
+ {"action", SchedACT, AttachNone, 0},
+ {"tracepoint/", TracePoint, AttachNone, 0},
+ {"tp/", TracePoint, AttachNone, 0},
+ {"raw_tracepoint/", RawTracepoint, AttachNone, 0},
+ {"raw_tp/", RawTracepoint, AttachNone, 0},
+ {"raw_tracepoint.w/", RawTracepointWritable, AttachNone, 0},
+ {"raw_tp.w/", RawTracepointWritable, AttachNone, 0},
+ {"tp_btf/", Tracing, AttachTraceRawTp, 0},
+ {"fentry/", Tracing, AttachTraceFEntry, 0},
+ {"fmod_ret/", Tracing, AttachModifyReturn, 0},
+ {"fexit/", Tracing, AttachTraceFExit, 0},
+ {"fentry.s/", Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE},
+ {"fmod_ret.s/", Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE},
+ {"fexit.s/", Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE},
+ {"freplace/", Extension, AttachNone, 0},
+ {"lsm/", LSM, AttachLSMMac, 0},
+ {"lsm.s/", LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE},
+ {"iter/", Tracing, AttachTraceIter, 0},
+ {"syscall", Syscall, AttachNone, 0},
+ {"xdp_devmap/", XDP, AttachXDPDevMap, 0},
+ {"xdp_cpumap/", XDP, AttachXDPCPUMap, 0},
+ {"xdp", XDP, AttachNone, 0},
+ {"perf_event", PerfEvent, AttachNone, 0},
+ {"lwt_in", LWTIn, AttachNone, 0},
+ {"lwt_out", LWTOut, AttachNone, 0},
+ {"lwt_xmit", LWTXmit, AttachNone, 0},
+ {"lwt_seg6local", LWTSeg6Local, AttachNone, 0},
+ {"cgroup_skb/ingress", CGroupSKB, AttachCGroupInetIngress, 0},
+ {"cgroup_skb/egress", CGroupSKB, AttachCGroupInetEgress, 0},
+ {"cgroup/skb", CGroupSKB, AttachNone, 0},
+ {"cgroup/sock_create", CGroupSock, AttachCGroupInetSockCreate, 0},
+ {"cgroup/sock_release", CGroupSock, AttachCgroupInetSockRelease, 0},
+ {"cgroup/sock", CGroupSock, AttachCGroupInetSockCreate, 0},
+ {"cgroup/post_bind4", CGroupSock, AttachCGroupInet4PostBind, 0},
+ {"cgroup/post_bind6", CGroupSock, AttachCGroupInet6PostBind, 0},
+ {"cgroup/dev", CGroupDevice, AttachCGroupDevice, 0},
+ {"sockops", SockOps, AttachCGroupSockOps, 0},
+ {"sk_skb/stream_parser", SkSKB, AttachSkSKBStreamParser, 0},
+ {"sk_skb/stream_verdict", SkSKB, AttachSkSKBStreamVerdict, 0},
+ {"sk_skb", SkSKB, AttachNone, 0},
+ {"sk_msg", SkMsg, AttachSkMsgVerdict, 0},
+ {"lirc_mode2", LircMode2, AttachLircMode2, 0},
+ {"flow_dissector", FlowDissector, AttachFlowDissector, 0},
+ {"cgroup/bind4", CGroupSockAddr, AttachCGroupInet4Bind, 0},
+ {"cgroup/bind6", CGroupSockAddr, AttachCGroupInet6Bind, 0},
+ {"cgroup/connect4", CGroupSockAddr, AttachCGroupInet4Connect, 0},
+ {"cgroup/connect6", CGroupSockAddr, AttachCGroupInet6Connect, 0},
+ {"cgroup/sendmsg4", CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0},
+ {"cgroup/sendmsg6", CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0},
+ {"cgroup/recvmsg4", CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0},
+ {"cgroup/recvmsg6", CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0},
+ {"cgroup/getpeername4", CGroupSockAddr, AttachCgroupInet4GetPeername, 0},
+ {"cgroup/getpeername6", CGroupSockAddr, AttachCgroupInet6GetPeername, 0},
+ {"cgroup/getsockname4", CGroupSockAddr, AttachCgroupInet4GetSockname, 0},
+ {"cgroup/getsockname6", CGroupSockAddr, AttachCgroupInet6GetSockname, 0},
+ {"cgroup/sysctl", CGroupSysctl, AttachCGroupSysctl, 0},
+ {"cgroup/getsockopt", CGroupSockopt, AttachCGroupGetsockopt, 0},
+ {"cgroup/setsockopt", CGroupSockopt, AttachCGroupSetsockopt, 0},
+ {"struct_ops+", StructOps, AttachNone, 0},
+ {"sk_lookup/", SkLookup, AttachSkLookup, 0},
+
+ {"seccomp", SocketFilter, AttachNone, 0},
+ }
+
+ for _, t := range types {
+ if !strings.HasPrefix(sectionName, t.prefix) {
continue
}
- if !strings.HasSuffix(prefix, "/") {
- return t.progType, t.attachType, ""
+ if !strings.HasSuffix(t.prefix, "/") {
+ return t.progType, t.attachType, t.progFlags, ""
}
- return t.progType, t.attachType, sectionName[len(prefix):]
+ return t.progType, t.attachType, t.progFlags, sectionName[len(t.prefix):]
}
- return UnspecifiedProgram, AttachNone, ""
+ return UnspecifiedProgram, AttachNone, 0, ""
}
-func (ec *elfCode) loadRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) {
+func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) {
rels := make(map[uint64]elf.Symbol)
if sec.Entsize < 16 {
diff --git a/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go b/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go
deleted file mode 100644
index d46d135f2..000000000
--- a/vendor/github.com/cilium/ebpf/elf_reader_fuzz.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// +build gofuzz
-
-// Use with https://github.com/dvyukov/go-fuzz
-
-package ebpf
-
-import "bytes"
-
-func FuzzLoadCollectionSpec(data []byte) int {
- spec, err := LoadCollectionSpecFromReader(bytes.NewReader(data))
- if err != nil {
- if spec != nil {
- panic("spec is not nil")
- }
- return 0
- }
- if spec == nil {
- panic("spec is nil")
- }
- return 1
-}
diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go
index b95131ef5..ae77bc619 100644
--- a/vendor/github.com/cilium/ebpf/info.go
+++ b/vendor/github.com/cilium/ebpf/info.go
@@ -2,6 +2,7 @@ package ebpf
import (
"bufio"
+ "bytes"
"encoding/hex"
"errors"
"fmt"
@@ -10,8 +11,13 @@ import (
"strings"
"syscall"
"time"
+ "unsafe"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
)
// MapInfo describes a map.
@@ -22,12 +28,13 @@ type MapInfo struct {
ValueSize uint32
MaxEntries uint32
Flags uint32
- // Name as supplied by user space at load time.
+ // Name as supplied by user space at load time. Available from 4.15.
Name string
}
-func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) {
- info, err := bpfGetMapInfoByFD(fd)
+func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) {
+ var info sys.MapInfo
+ err := sys.ObjInfo(fd, &info)
if errors.Is(err, syscall.EINVAL) {
return newMapInfoFromProc(fd)
}
@@ -36,18 +43,17 @@ func newMapInfoFromFd(fd *internal.FD) (*MapInfo, error) {
}
return &MapInfo{
- MapType(info.map_type),
- MapID(info.id),
- info.key_size,
- info.value_size,
- info.max_entries,
- info.map_flags,
- // name is available from 4.15.
- internal.CString(info.name[:]),
+ MapType(info.Type),
+ MapID(info.Id),
+ info.KeySize,
+ info.ValueSize,
+ info.MaxEntries,
+ info.MapFlags,
+ unix.ByteSliceToString(info.Name[:]),
}, nil
}
-func newMapInfoFromProc(fd *internal.FD) (*MapInfo, error) {
+func newMapInfoFromProc(fd *sys.FD) (*MapInfo, error) {
var mi MapInfo
err := scanFdInfo(fd, map[string]interface{}{
"map_type": &mi.Type,
@@ -83,16 +89,21 @@ type programStats struct {
type ProgramInfo struct {
Type ProgramType
id ProgramID
- // Truncated hash of the BPF bytecode.
+ // Truncated hash of the BPF bytecode. Available from 4.13.
Tag string
- // Name as supplied by user space at load time.
+ // Name as supplied by user space at load time. Available from 4.15.
Name string
+ btf btf.ID
stats *programStats
+
+ maps []MapID
+ insns []byte
}
-func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) {
- info, err := bpfGetProgInfoByFD(fd)
+func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) {
+ var info sys.ProgInfo
+ err := sys.ObjInfo(fd, &info)
if errors.Is(err, syscall.EINVAL) {
return newProgramInfoFromProc(fd)
}
@@ -100,21 +111,43 @@ func newProgramInfoFromFd(fd *internal.FD) (*ProgramInfo, error) {
return nil, err
}
- return &ProgramInfo{
- Type: ProgramType(info.prog_type),
- id: ProgramID(info.id),
- // tag is available if the kernel supports BPF_PROG_GET_INFO_BY_FD.
- Tag: hex.EncodeToString(info.tag[:]),
- // name is available from 4.15.
- Name: internal.CString(info.name[:]),
+ pi := ProgramInfo{
+ Type: ProgramType(info.Type),
+ id: ProgramID(info.Id),
+ Tag: hex.EncodeToString(info.Tag[:]),
+ Name: unix.ByteSliceToString(info.Name[:]),
+ btf: btf.ID(info.BtfId),
stats: &programStats{
- runtime: time.Duration(info.run_time_ns),
- runCount: info.run_cnt,
+ runtime: time.Duration(info.RunTimeNs),
+ runCount: info.RunCnt,
},
- }, nil
+ }
+
+ // Start with a clean struct for the second call, otherwise we may get EFAULT.
+ var info2 sys.ProgInfo
+
+ if info.NrMapIds > 0 {
+ pi.maps = make([]MapID, info.NrMapIds)
+ info2.NrMapIds = info.NrMapIds
+ info2.MapIds = sys.NewPointer(unsafe.Pointer(&pi.maps[0]))
+ }
+
+ if info.XlatedProgLen > 0 {
+ pi.insns = make([]byte, info.XlatedProgLen)
+ info2.XlatedProgLen = info.XlatedProgLen
+ info2.XlatedProgInsns = sys.NewSlicePointer(pi.insns)
+ }
+
+ if info.NrMapIds > 0 || info.XlatedProgLen > 0 {
+ if err := sys.ObjInfo(fd, &info2); err != nil {
+ return nil, err
+ }
+ }
+
+ return &pi, nil
}
-func newProgramInfoFromProc(fd *internal.FD) (*ProgramInfo, error) {
+func newProgramInfoFromProc(fd *sys.FD) (*ProgramInfo, error) {
var info ProgramInfo
err := scanFdInfo(fd, map[string]interface{}{
"prog_type": &info.Type,
@@ -142,6 +175,18 @@ func (pi *ProgramInfo) ID() (ProgramID, bool) {
return pi.id, pi.id > 0
}
+// BTFID returns the BTF ID associated with the program.
+//
+// The ID is only valid as long as the associated program is kept alive.
+// Available from 5.0.
+//
+// The bool return value indicates whether this optional field is available and
+// populated. (The field may be available but not populated if the kernel
+// supports the field but the program was loaded without BTF information.)
+func (pi *ProgramInfo) BTFID() (btf.ID, bool) {
+ return pi.btf, pi.btf > 0
+}
+
// RunCount returns the total number of times the program was called.
//
// Can return 0 if the collection of statistics is not enabled. See EnableStats().
@@ -164,13 +209,50 @@ func (pi *ProgramInfo) Runtime() (time.Duration, bool) {
return time.Duration(0), false
}
-func scanFdInfo(fd *internal.FD, fields map[string]interface{}) error {
- raw, err := fd.Value()
- if err != nil {
- return err
+// Instructions returns the 'xlated' instruction stream of the program
+// after it has been verified and rewritten by the kernel. These instructions
+// cannot be loaded back into the kernel as-is, this is mainly used for
+// inspecting loaded programs for troubleshooting, dumping, etc.
+//
+// For example, map accesses are made to reference their kernel map IDs,
+// not the FDs they had when the program was inserted. Note that before
+// the introduction of bpf_insn_prepare_dump in kernel 4.16, xlated
+// instructions were not sanitized, making the output even less reusable
+// and less likely to round-trip or evaluate to the same program Tag.
+//
+// The first instruction is marked as a symbol using the Program's name.
+//
+// Available from 4.13. Requires CAP_BPF or equivalent.
+func (pi *ProgramInfo) Instructions() (asm.Instructions, error) {
+ // If the calling process is not BPF-capable or if the kernel doesn't
+ // support getting xlated instructions, the field will be zero.
+ if len(pi.insns) == 0 {
+ return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported)
+ }
+
+ r := bytes.NewReader(pi.insns)
+ var insns asm.Instructions
+ if err := insns.Unmarshal(r, internal.NativeEndian); err != nil {
+ return nil, fmt.Errorf("unmarshaling instructions: %w", err)
}
- fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", raw))
+ // Tag the first instruction with the name of the program, if available.
+ insns[0] = insns[0].WithSymbol(pi.Name)
+
+ return insns, nil
+}
+
+// MapIDs returns the maps related to the program.
+//
+// Available from 4.15.
+//
+// The bool return value indicates whether this optional field is available.
+func (pi *ProgramInfo) MapIDs() ([]MapID, bool) {
+ return pi.maps, pi.maps != nil
+}
+
+func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error {
+ fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int()))
if err != nil {
return err
}
@@ -213,6 +295,10 @@ func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
return err
}
+ if len(fields) > 0 && scanned == 0 {
+ return ErrNotSupported
+ }
+
if scanned != len(fields) {
return errMissingFields
}
@@ -227,11 +313,9 @@ func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
//
// Requires at least 5.8.
func EnableStats(which uint32) (io.Closer, error) {
- attr := internal.BPFEnableStatsAttr{
- StatsType: which,
- }
-
- fd, err := internal.BPFEnableStats(&attr)
+ fd, err := sys.EnableStats(&sys.EnableStatsAttr{
+ Type: which,
+ })
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/cilium/ebpf/internal/align.go b/vendor/github.com/cilium/ebpf/internal/align.go
new file mode 100644
index 000000000..8b4f2658e
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/align.go
@@ -0,0 +1,6 @@
+package internal
+
+// Align returns 'n' updated to 'alignment' boundary.
+func Align(n, alignment int) int {
+ return (int(n) + alignment - 1) / alignment * alignment
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf.go b/vendor/github.com/cilium/ebpf/internal/btf/btf.go
deleted file mode 100644
index 1e66d9476..000000000
--- a/vendor/github.com/cilium/ebpf/internal/btf/btf.go
+++ /dev/null
@@ -1,791 +0,0 @@
-package btf
-
-import (
- "bytes"
- "debug/elf"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "os"
- "reflect"
- "sync"
- "unsafe"
-
- "github.com/cilium/ebpf/internal"
- "github.com/cilium/ebpf/internal/unix"
-)
-
-const btfMagic = 0xeB9F
-
-// Errors returned by BTF functions.
-var (
- ErrNotSupported = internal.ErrNotSupported
- ErrNotFound = errors.New("not found")
- ErrNoExtendedInfo = errors.New("no extended info")
-)
-
-// Spec represents decoded BTF.
-type Spec struct {
- rawTypes []rawType
- strings stringTable
- types []Type
- namedTypes map[string][]namedType
- funcInfos map[string]extInfo
- lineInfos map[string]extInfo
- coreRelos map[string]bpfCoreRelos
- byteOrder binary.ByteOrder
-}
-
-type btfHeader struct {
- Magic uint16
- Version uint8
- Flags uint8
- HdrLen uint32
-
- TypeOff uint32
- TypeLen uint32
- StringOff uint32
- StringLen uint32
-}
-
-// LoadSpecFromReader reads BTF sections from an ELF.
-//
-// Returns a nil Spec and no error if no BTF was present.
-func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
- file, err := internal.NewSafeELFFile(rd)
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- btfSection, btfExtSection, sectionSizes, err := findBtfSections(file)
- if err != nil {
- return nil, err
- }
-
- if btfSection == nil {
- return nil, nil
- }
-
- symbols, err := file.Symbols()
- if err != nil {
- return nil, fmt.Errorf("can't read symbols: %v", err)
- }
-
- variableOffsets := make(map[variable]uint32)
- for _, symbol := range symbols {
- if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
- // Ignore things like SHN_ABS
- continue
- }
-
- if int(symbol.Section) >= len(file.Sections) {
- return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
- }
-
- secName := file.Sections[symbol.Section].Name
- if _, ok := sectionSizes[secName]; !ok {
- continue
- }
-
- if symbol.Value > math.MaxUint32 {
- return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name)
- }
-
- variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
- }
-
- spec, err := loadNakedSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets)
- if err != nil {
- return nil, err
- }
-
- if btfExtSection == nil {
- return spec, nil
- }
-
- spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings)
- if err != nil {
- return nil, fmt.Errorf("can't read ext info: %w", err)
- }
-
- return spec, nil
-}
-
-func findBtfSections(file *internal.SafeELFFile) (*elf.Section, *elf.Section, map[string]uint32, error) {
- var (
- btfSection *elf.Section
- btfExtSection *elf.Section
- sectionSizes = make(map[string]uint32)
- )
-
- for _, sec := range file.Sections {
- switch sec.Name {
- case ".BTF":
- btfSection = sec
- case ".BTF.ext":
- btfExtSection = sec
- default:
- if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
- break
- }
-
- if sec.Size > math.MaxUint32 {
- return nil, nil, nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
- }
-
- sectionSizes[sec.Name] = uint32(sec.Size)
- }
- }
- return btfSection, btfExtSection, sectionSizes, nil
-}
-
-func loadSpecFromVmlinux(rd io.ReaderAt) (*Spec, error) {
- file, err := internal.NewSafeELFFile(rd)
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- btfSection, _, _, err := findBtfSections(file)
- if err != nil {
- return nil, fmt.Errorf(".BTF ELF section: %s", err)
- }
- if btfSection == nil {
- return nil, fmt.Errorf("unable to find .BTF ELF section")
- }
- return loadNakedSpec(btfSection.Open(), file.ByteOrder, nil, nil)
-}
-
-func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) {
- rawTypes, rawStrings, err := parseBTF(btf, bo)
- if err != nil {
- return nil, err
- }
-
- err = fixupDatasec(rawTypes, rawStrings, sectionSizes, variableOffsets)
- if err != nil {
- return nil, err
- }
-
- types, typesByName, err := inflateRawTypes(rawTypes, rawStrings)
- if err != nil {
- return nil, err
- }
-
- return &Spec{
- rawTypes: rawTypes,
- namedTypes: typesByName,
- types: types,
- strings: rawStrings,
- byteOrder: bo,
- }, nil
-}
-
-var kernelBTF struct {
- sync.Mutex
- *Spec
-}
-
-// LoadKernelSpec returns the current kernel's BTF information.
-//
-// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns
-// ErrNotSupported if BTF is not enabled.
-func LoadKernelSpec() (*Spec, error) {
- kernelBTF.Lock()
- defer kernelBTF.Unlock()
-
- if kernelBTF.Spec != nil {
- return kernelBTF.Spec, nil
- }
-
- var err error
- kernelBTF.Spec, err = loadKernelSpec()
- return kernelBTF.Spec, err
-}
-
-func loadKernelSpec() (*Spec, error) {
- release, err := unix.KernelRelease()
- if err != nil {
- return nil, fmt.Errorf("can't read kernel release number: %w", err)
- }
-
- fh, err := os.Open("/sys/kernel/btf/vmlinux")
- if err == nil {
- defer fh.Close()
-
- return loadNakedSpec(fh, internal.NativeEndian, nil, nil)
- }
-
- // use same list of locations as libbpf
- // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
- locations := []string{
- "/boot/vmlinux-%s",
- "/lib/modules/%s/vmlinux-%[1]s",
- "/lib/modules/%s/build/vmlinux",
- "/usr/lib/modules/%s/kernel/vmlinux",
- "/usr/lib/debug/boot/vmlinux-%s",
- "/usr/lib/debug/boot/vmlinux-%s.debug",
- "/usr/lib/debug/lib/modules/%s/vmlinux",
- }
-
- for _, loc := range locations {
- path := fmt.Sprintf(loc, release)
-
- fh, err := os.Open(path)
- if err != nil {
- continue
- }
- defer fh.Close()
-
- return loadSpecFromVmlinux(fh)
- }
-
- return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported)
-}
-
-func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) ([]rawType, stringTable, error) {
- rawBTF, err := ioutil.ReadAll(btf)
- if err != nil {
- return nil, nil, fmt.Errorf("can't read BTF: %v", err)
- }
-
- rd := bytes.NewReader(rawBTF)
-
- var header btfHeader
- if err := binary.Read(rd, bo, &header); err != nil {
- return nil, nil, fmt.Errorf("can't read header: %v", err)
- }
-
- if header.Magic != btfMagic {
- return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
- }
-
- if header.Version != 1 {
- return nil, nil, fmt.Errorf("unexpected version %v", header.Version)
- }
-
- if header.Flags != 0 {
- return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
- }
-
- remainder := int64(header.HdrLen) - int64(binary.Size(&header))
- if remainder < 0 {
- return nil, nil, errors.New("header is too short")
- }
-
- if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil {
- return nil, nil, fmt.Errorf("header padding: %v", err)
- }
-
- if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil {
- return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err)
- }
-
- rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen)))
- if err != nil {
- return nil, nil, fmt.Errorf("can't read type names: %w", err)
- }
-
- if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil {
- return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err)
- }
-
- rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo)
- if err != nil {
- return nil, nil, fmt.Errorf("can't read types: %w", err)
- }
-
- return rawTypes, rawStrings, nil
-}
-
-type variable struct {
- section string
- name string
-}
-
-func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
- for i, rawType := range rawTypes {
- if rawType.Kind() != kindDatasec {
- continue
- }
-
- name, err := rawStrings.Lookup(rawType.NameOff)
- if err != nil {
- return err
- }
-
- if name == ".kconfig" || name == ".ksyms" {
- return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
- }
-
- if rawTypes[i].SizeType != 0 {
- continue
- }
-
- size, ok := sectionSizes[name]
- if !ok {
- return fmt.Errorf("data section %s: missing size", name)
- }
-
- rawTypes[i].SizeType = size
-
- secinfos := rawType.data.([]btfVarSecinfo)
- for j, secInfo := range secinfos {
- id := int(secInfo.Type - 1)
- if id >= len(rawTypes) {
- return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
- }
-
- varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
- if err != nil {
- return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
- }
-
- offset, ok := variableOffsets[variable{name, varName}]
- if !ok {
- return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
- }
-
- secinfos[j].Offset = offset
- }
- }
-
- return nil
-}
-
-type marshalOpts struct {
- ByteOrder binary.ByteOrder
- StripFuncLinkage bool
-}
-
-func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
- var (
- buf bytes.Buffer
- header = new(btfHeader)
- headerLen = binary.Size(header)
- )
-
- // Reserve space for the header. We have to write it last since
- // we don't know the size of the type section yet.
- _, _ = buf.Write(make([]byte, headerLen))
-
- // Write type section, just after the header.
- for _, raw := range s.rawTypes {
- switch {
- case opts.StripFuncLinkage && raw.Kind() == kindFunc:
- raw.SetLinkage(linkageStatic)
- }
-
- if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
- return nil, fmt.Errorf("can't marshal BTF: %w", err)
- }
- }
-
- typeLen := uint32(buf.Len() - headerLen)
-
- // Write string section after type section.
- _, _ = buf.Write(s.strings)
-
- // Fill out the header, and write it out.
- header = &btfHeader{
- Magic: btfMagic,
- Version: 1,
- Flags: 0,
- HdrLen: uint32(headerLen),
- TypeOff: 0,
- TypeLen: typeLen,
- StringOff: typeLen,
- StringLen: uint32(len(s.strings)),
- }
-
- raw := buf.Bytes()
- err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
- if err != nil {
- return nil, fmt.Errorf("can't write header: %v", err)
- }
-
- return raw, nil
-}
-
-type sliceWriter []byte
-
-func (sw sliceWriter) Write(p []byte) (int, error) {
- if len(p) != len(sw) {
- return 0, errors.New("size doesn't match")
- }
-
- return copy(sw, p), nil
-}
-
-// Program finds the BTF for a specific section.
-//
-// Length is the number of bytes in the raw BPF instruction stream.
-//
-// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't
-// contain extended BTF info.
-func (s *Spec) Program(name string, length uint64) (*Program, error) {
- if length == 0 {
- return nil, errors.New("length musn't be zero")
- }
-
- if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil {
- return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo)
- }
-
- funcInfos, funcOK := s.funcInfos[name]
- lineInfos, lineOK := s.lineInfos[name]
- coreRelos, coreOK := s.coreRelos[name]
-
- if !funcOK && !lineOK && !coreOK {
- return nil, fmt.Errorf("no extended BTF info for section %s", name)
- }
-
- return &Program{s, length, funcInfos, lineInfos, coreRelos}, nil
-}
-
-// Datasec returns the BTF required to create maps which represent data sections.
-func (s *Spec) Datasec(name string) (*Map, error) {
- var datasec Datasec
- if err := s.FindType(name, &datasec); err != nil {
- return nil, fmt.Errorf("data section %s: can't get BTF: %w", name, err)
- }
-
- m := NewMap(s, &Void{}, &datasec)
- return &m, nil
-}
-
-// FindType searches for a type with a specific name.
-//
-// hint determines the type of the returned Type.
-//
-// Returns an error wrapping ErrNotFound if no matching
-// type exists in spec.
-func (s *Spec) FindType(name string, typ Type) error {
- var (
- wanted = reflect.TypeOf(typ)
- candidate Type
- )
-
- for _, typ := range s.namedTypes[essentialName(name)] {
- if reflect.TypeOf(typ) != wanted {
- continue
- }
-
- // Match against the full name, not just the essential one.
- if typ.name() != name {
- continue
- }
-
- if candidate != nil {
- return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
- }
-
- candidate = typ
- }
-
- if candidate == nil {
- return fmt.Errorf("type %s: %w", name, ErrNotFound)
- }
-
- value := reflect.Indirect(reflect.ValueOf(copyType(candidate)))
- reflect.Indirect(reflect.ValueOf(typ)).Set(value)
- return nil
-}
-
-// Handle is a reference to BTF loaded into the kernel.
-type Handle struct {
- fd *internal.FD
-}
-
-// NewHandle loads BTF into the kernel.
-//
-// Returns ErrNotSupported if BTF is not supported.
-func NewHandle(spec *Spec) (*Handle, error) {
- if err := haveBTF(); err != nil {
- return nil, err
- }
-
- if spec.byteOrder != internal.NativeEndian {
- return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
- }
-
- btf, err := spec.marshal(marshalOpts{
- ByteOrder: internal.NativeEndian,
- StripFuncLinkage: haveFuncLinkage() != nil,
- })
- if err != nil {
- return nil, fmt.Errorf("can't marshal BTF: %w", err)
- }
-
- if uint64(len(btf)) > math.MaxUint32 {
- return nil, errors.New("BTF exceeds the maximum size")
- }
-
- attr := &bpfLoadBTFAttr{
- btf: internal.NewSlicePointer(btf),
- btfSize: uint32(len(btf)),
- }
-
- fd, err := bpfLoadBTF(attr)
- if err != nil {
- logBuf := make([]byte, 64*1024)
- attr.logBuf = internal.NewSlicePointer(logBuf)
- attr.btfLogSize = uint32(len(logBuf))
- attr.btfLogLevel = 1
- _, logErr := bpfLoadBTF(attr)
- return nil, internal.ErrorWithLog(err, logBuf, logErr)
- }
-
- return &Handle{fd}, nil
-}
-
-// Close destroys the handle.
-//
-// Subsequent calls to FD will return an invalid value.
-func (h *Handle) Close() error {
- return h.fd.Close()
-}
-
-// FD returns the file descriptor for the handle.
-func (h *Handle) FD() int {
- value, err := h.fd.Value()
- if err != nil {
- return -1
- }
-
- return int(value)
-}
-
-// Map is the BTF for a map.
-type Map struct {
- spec *Spec
- key, value Type
-}
-
-// NewMap returns a new Map containing the given values.
-// The key and value arguments are initialized to Void if nil values are given.
-func NewMap(spec *Spec, key Type, value Type) Map {
- if key == nil {
- key = &Void{}
- }
- if value == nil {
- value = &Void{}
- }
-
- return Map{
- spec: spec,
- key: key,
- value: value,
- }
-}
-
-// MapSpec should be a method on Map, but is a free function
-// to hide it from users of the ebpf package.
-func MapSpec(m *Map) *Spec {
- return m.spec
-}
-
-// MapKey should be a method on Map, but is a free function
-// to hide it from users of the ebpf package.
-func MapKey(m *Map) Type {
- return m.key
-}
-
-// MapValue should be a method on Map, but is a free function
-// to hide it from users of the ebpf package.
-func MapValue(m *Map) Type {
- return m.value
-}
-
-// Program is the BTF information for a stream of instructions.
-type Program struct {
- spec *Spec
- length uint64
- funcInfos, lineInfos extInfo
- coreRelos bpfCoreRelos
-}
-
-// ProgramSpec returns the Spec needed for loading function and line infos into the kernel.
-//
-// This is a free function instead of a method to hide it from users
-// of package ebpf.
-func ProgramSpec(s *Program) *Spec {
- return s.spec
-}
-
-// ProgramAppend the information from other to the Program.
-//
-// This is a free function instead of a method to hide it from users
-// of package ebpf.
-func ProgramAppend(s, other *Program) error {
- funcInfos, err := s.funcInfos.append(other.funcInfos, s.length)
- if err != nil {
- return fmt.Errorf("func infos: %w", err)
- }
-
- lineInfos, err := s.lineInfos.append(other.lineInfos, s.length)
- if err != nil {
- return fmt.Errorf("line infos: %w", err)
- }
-
- s.funcInfos = funcInfos
- s.lineInfos = lineInfos
- s.coreRelos = s.coreRelos.append(other.coreRelos, s.length)
- s.length += other.length
- return nil
-}
-
-// ProgramFuncInfos returns the binary form of BTF function infos.
-//
-// This is a free function instead of a method to hide it from users
-// of package ebpf.
-func ProgramFuncInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
- bytes, err = s.funcInfos.MarshalBinary()
- if err != nil {
- return 0, nil, err
- }
-
- return s.funcInfos.recordSize, bytes, nil
-}
-
-// ProgramLineInfos returns the binary form of BTF line infos.
-//
-// This is a free function instead of a method to hide it from users
-// of package ebpf.
-func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
- bytes, err = s.lineInfos.MarshalBinary()
- if err != nil {
- return 0, nil, err
- }
-
- return s.lineInfos.recordSize, bytes, nil
-}
-
-// ProgramRelocations returns the CO-RE relocations required to adjust the
-// program to the target.
-//
-// This is a free function instead of a method to hide it from users
-// of package ebpf.
-func ProgramRelocations(s *Program, target *Spec) (map[uint64]Relocation, error) {
- if len(s.coreRelos) == 0 {
- return nil, nil
- }
-
- return coreRelocate(s.spec, target, s.coreRelos)
-}
-
-type bpfLoadBTFAttr struct {
- btf internal.Pointer
- logBuf internal.Pointer
- btfSize uint32
- btfLogSize uint32
- btfLogLevel uint32
-}
-
-func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) {
- fd, err := internal.BPF(internal.BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
- if err != nil {
- return nil, err
- }
-
- return internal.NewFD(uint32(fd)), nil
-}
-
-func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
- const minHeaderLength = 24
-
- typesLen := uint32(binary.Size(types))
- header := btfHeader{
- Magic: btfMagic,
- Version: 1,
- HdrLen: minHeaderLength,
- TypeOff: 0,
- TypeLen: typesLen,
- StringOff: typesLen,
- StringLen: uint32(len(strings)),
- }
-
- buf := new(bytes.Buffer)
- _ = binary.Write(buf, bo, &header)
- _ = binary.Write(buf, bo, types)
- buf.Write(strings)
-
- return buf.Bytes()
-}
-
-var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
- var (
- types struct {
- Integer btfType
- Var btfType
- btfVar struct{ Linkage uint32 }
- }
- strings = []byte{0, 'a', 0}
- )
-
- // We use a BTF_KIND_VAR here, to make sure that
- // the kernel understands BTF at least as well as we
- // do. BTF_KIND_VAR was introduced ~5.1.
- types.Integer.SetKind(kindPointer)
- types.Var.NameOff = 1
- types.Var.SetKind(kindVar)
- types.Var.SizeType = 1
-
- btf := marshalBTF(&types, strings, internal.NativeEndian)
-
- fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
- btf: internal.NewSlicePointer(btf),
- btfSize: uint32(len(btf)),
- })
- if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
- // Treat both EINVAL and EPERM as not supported: loading the program
- // might still succeed without BTF.
- return internal.ErrNotSupported
- }
- if err != nil {
- return err
- }
-
- fd.Close()
- return nil
-})
-
-var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
- if err := haveBTF(); err != nil {
- return err
- }
-
- var (
- types struct {
- FuncProto btfType
- Func btfType
- }
- strings = []byte{0, 'a', 0}
- )
-
- types.FuncProto.SetKind(kindFuncProto)
- types.Func.SetKind(kindFunc)
- types.Func.SizeType = 1 // aka FuncProto
- types.Func.NameOff = 1
- types.Func.SetLinkage(linkageGlobal)
-
- btf := marshalBTF(&types, strings, internal.NativeEndian)
-
- fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
- btf: internal.NewSlicePointer(btf),
- btfSize: uint32(len(btf)),
- })
- if errors.Is(err, unix.EINVAL) {
- return internal.ErrNotSupported
- }
- if err != nil {
- return err
- }
-
- fd.Close()
- return nil
-})
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/core.go b/vendor/github.com/cilium/ebpf/internal/btf/core.go
deleted file mode 100644
index 52b59ed18..000000000
--- a/vendor/github.com/cilium/ebpf/internal/btf/core.go
+++ /dev/null
@@ -1,388 +0,0 @@
-package btf
-
-import (
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "strings"
-)
-
-// Code in this file is derived from libbpf, which is available under a BSD
-// 2-Clause license.
-
-// Relocation describes a CO-RE relocation.
-type Relocation struct {
- Current uint32
- New uint32
-}
-
-func (r Relocation) equal(other Relocation) bool {
- return r.Current == other.Current && r.New == other.New
-}
-
-// coreReloKind is the type of CO-RE relocation
-type coreReloKind uint32
-
-const (
- reloFieldByteOffset coreReloKind = iota /* field byte offset */
- reloFieldByteSize /* field size in bytes */
- reloFieldExists /* field existence in target kernel */
- reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
- reloFieldLShiftU64 /* bitfield-specific left bitshift */
- reloFieldRShiftU64 /* bitfield-specific right bitshift */
- reloTypeIDLocal /* type ID in local BPF object */
- reloTypeIDTarget /* type ID in target kernel */
- reloTypeExists /* type existence in target kernel */
- reloTypeSize /* type size in bytes */
- reloEnumvalExists /* enum value existence in target kernel */
- reloEnumvalValue /* enum value integer value */
-)
-
-func (k coreReloKind) String() string {
- switch k {
- case reloFieldByteOffset:
- return "byte_off"
- case reloFieldByteSize:
- return "byte_sz"
- case reloFieldExists:
- return "field_exists"
- case reloFieldSigned:
- return "signed"
- case reloFieldLShiftU64:
- return "lshift_u64"
- case reloFieldRShiftU64:
- return "rshift_u64"
- case reloTypeIDLocal:
- return "local_type_id"
- case reloTypeIDTarget:
- return "target_type_id"
- case reloTypeExists:
- return "type_exists"
- case reloTypeSize:
- return "type_size"
- case reloEnumvalExists:
- return "enumval_exists"
- case reloEnumvalValue:
- return "enumval_value"
- default:
- return "unknown"
- }
-}
-
-func coreRelocate(local, target *Spec, coreRelos bpfCoreRelos) (map[uint64]Relocation, error) {
- if target == nil {
- var err error
- target, err = loadKernelSpec()
- if err != nil {
- return nil, err
- }
- }
-
- if local.byteOrder != target.byteOrder {
- return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
- }
-
- relocations := make(map[uint64]Relocation, len(coreRelos))
- for _, relo := range coreRelos {
- accessorStr, err := local.strings.Lookup(relo.AccessStrOff)
- if err != nil {
- return nil, err
- }
-
- accessor, err := parseCoreAccessor(accessorStr)
- if err != nil {
- return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
- }
-
- if int(relo.TypeID) >= len(local.types) {
- return nil, fmt.Errorf("invalid type id %d", relo.TypeID)
- }
-
- typ := local.types[relo.TypeID]
-
- if relo.ReloKind == reloTypeIDLocal {
- relocations[uint64(relo.InsnOff)] = Relocation{
- uint32(typ.ID()),
- uint32(typ.ID()),
- }
- continue
- }
-
- named, ok := typ.(namedType)
- if !ok || named.name() == "" {
- return nil, fmt.Errorf("relocate anonymous type %s: %w", typ.String(), ErrNotSupported)
- }
-
- name := essentialName(named.name())
- res, err := coreCalculateRelocation(typ, target.namedTypes[name], relo.ReloKind, accessor)
- if err != nil {
- return nil, fmt.Errorf("relocate %s: %w", name, err)
- }
-
- relocations[uint64(relo.InsnOff)] = res
- }
-
- return relocations, nil
-}
-
-var errAmbiguousRelocation = errors.New("ambiguous relocation")
-
-func coreCalculateRelocation(local Type, targets []namedType, kind coreReloKind, localAccessor coreAccessor) (Relocation, error) {
- var relos []Relocation
- var matches []Type
- for _, target := range targets {
- switch kind {
- case reloTypeIDTarget:
- if localAccessor[0] != 0 {
- return Relocation{}, fmt.Errorf("%s: unexpected non-zero accessor", kind)
- }
-
- if compat, err := coreAreTypesCompatible(local, target); err != nil {
- return Relocation{}, fmt.Errorf("%s: %s", kind, err)
- } else if !compat {
- continue
- }
-
- relos = append(relos, Relocation{uint32(target.ID()), uint32(target.ID())})
-
- default:
- return Relocation{}, fmt.Errorf("relocation %s: %w", kind, ErrNotSupported)
- }
- matches = append(matches, target)
- }
-
- if len(relos) == 0 {
- // TODO: Add switch for existence checks like reloEnumvalExists here.
-
- // TODO: This might have to be poisoned.
- return Relocation{}, fmt.Errorf("no relocation found, tried %v", targets)
- }
-
- relo := relos[0]
- for _, altRelo := range relos[1:] {
- if !altRelo.equal(relo) {
- return Relocation{}, fmt.Errorf("multiple types %v match: %w", matches, errAmbiguousRelocation)
- }
- }
-
- return relo, nil
-}
-
-/* coreAccessor contains a path through a struct. It contains at least one index.
- *
- * The interpretation depends on the kind of the relocation. The following is
- * taken from struct bpf_core_relo in libbpf_internal.h:
- *
- * - for field-based relocations, string encodes an accessed field using
- * a sequence of field and array indices, separated by colon (:). It's
- * conceptually very close to LLVM's getelementptr ([0]) instruction's
- * arguments for identifying offset to a field.
- * - for type-based relocations, strings is expected to be just "0";
- * - for enum value-based relocations, string contains an index of enum
- * value within its enum type;
- *
- * Example to provide a better feel.
- *
- * struct sample {
- * int a;
- * struct {
- * int b[10];
- * };
- * };
- *
- * struct sample s = ...;
- * int x = &s->a; // encoded as "0:0" (a is field #0)
- * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
- * // b is field #0 inside anon struct, accessing elem #5)
- * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
- */
-type coreAccessor []int
-
-func parseCoreAccessor(accessor string) (coreAccessor, error) {
- if accessor == "" {
- return nil, fmt.Errorf("empty accessor")
- }
-
- var result coreAccessor
- parts := strings.Split(accessor, ":")
- for _, part := range parts {
- // 31 bits to avoid overflowing int on 32 bit platforms.
- index, err := strconv.ParseUint(part, 10, 31)
- if err != nil {
- return nil, fmt.Errorf("accessor index %q: %s", part, err)
- }
-
- result = append(result, int(index))
- }
-
- return result, nil
-}
-
-/* The comment below is from bpf_core_types_are_compat in libbpf.c:
- *
- * Check local and target types for compatibility. This check is used for
- * type-based CO-RE relocations and follow slightly different rules than
- * field-based relocations. This function assumes that root types were already
- * checked for name match. Beyond that initial root-level name check, names
- * are completely ignored. Compatibility rules are as follows:
- * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
- * kind should match for local and target types (i.e., STRUCT is not
- * compatible with UNION);
- * - for ENUMs, the size is ignored;
- * - for INT, size and signedness are ignored;
- * - for ARRAY, dimensionality is ignored, element types are checked for
- * compatibility recursively;
- * - CONST/VOLATILE/RESTRICT modifiers are ignored;
- * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
- * - FUNC_PROTOs are compatible if they have compatible signature: same
- * number of input args and compatible return and argument types.
- * These rules are not set in stone and probably will be adjusted as we get
- * more experience with using BPF CO-RE relocations.
- */
-func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
- var (
- localTs, targetTs typeDeque
- l, t = &localType, &targetType
- depth = 0
- )
-
- for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
- if depth >= maxTypeDepth {
- return false, errors.New("types are nested too deep")
- }
-
- localType = skipQualifierAndTypedef(*l)
- targetType = skipQualifierAndTypedef(*t)
-
- if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
- return false, nil
- }
-
- switch lv := (localType).(type) {
- case *Void, *Struct, *Union, *Enum, *Fwd:
- // Nothing to do here
-
- case *Int:
- tv := targetType.(*Int)
- if lv.isBitfield() || tv.isBitfield() {
- return false, nil
- }
-
- case *Pointer, *Array:
- depth++
- localType.walk(&localTs)
- targetType.walk(&targetTs)
-
- case *FuncProto:
- tv := targetType.(*FuncProto)
- if len(lv.Params) != len(tv.Params) {
- return false, nil
- }
-
- depth++
- localType.walk(&localTs)
- targetType.walk(&targetTs)
-
- default:
- return false, fmt.Errorf("unsupported type %T", localType)
- }
- }
-
- if l != nil {
- return false, fmt.Errorf("dangling local type %T", *l)
- }
-
- if t != nil {
- return false, fmt.Errorf("dangling target type %T", *t)
- }
-
- return true, nil
-}
-
-/* The comment below is from bpf_core_fields_are_compat in libbpf.c:
- *
- * Check two types for compatibility for the purpose of field access
- * relocation. const/volatile/restrict and typedefs are skipped to ensure we
- * are relocating semantically compatible entities:
- * - any two STRUCTs/UNIONs are compatible and can be mixed;
- * - any two FWDs are compatible, if their names match (modulo flavor suffix);
- * - any two PTRs are always compatible;
- * - for ENUMs, names should be the same (ignoring flavor suffix) or at
- * least one of enums should be anonymous;
- * - for ENUMs, check sizes, names are ignored;
- * - for INT, size and signedness are ignored;
- * - for ARRAY, dimensionality is ignored, element types are checked for
- * compatibility recursively;
- * - everything else shouldn't be ever a target of relocation.
- * These rules are not set in stone and probably will be adjusted as we get
- * more experience with using BPF CO-RE relocations.
- */
-func coreAreMembersCompatible(localType Type, targetType Type) (bool, error) {
- doNamesMatch := func(a, b string) bool {
- if a == "" || b == "" {
- // allow anonymous and named type to match
- return true
- }
-
- return essentialName(a) == essentialName(b)
- }
-
- for depth := 0; depth <= maxTypeDepth; depth++ {
- localType = skipQualifierAndTypedef(localType)
- targetType = skipQualifierAndTypedef(targetType)
-
- _, lok := localType.(composite)
- _, tok := targetType.(composite)
- if lok && tok {
- return true, nil
- }
-
- if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
- return false, nil
- }
-
- switch lv := localType.(type) {
- case *Pointer:
- return true, nil
-
- case *Enum:
- tv := targetType.(*Enum)
- return doNamesMatch(lv.name(), tv.name()), nil
-
- case *Fwd:
- tv := targetType.(*Fwd)
- return doNamesMatch(lv.name(), tv.name()), nil
-
- case *Int:
- tv := targetType.(*Int)
- return !lv.isBitfield() && !tv.isBitfield(), nil
-
- case *Array:
- tv := targetType.(*Array)
-
- localType = lv.Type
- targetType = tv.Type
-
- default:
- return false, fmt.Errorf("unsupported type %T", localType)
- }
- }
-
- return false, errors.New("types are nested too deep")
-}
-
-func skipQualifierAndTypedef(typ Type) Type {
- result := typ
- for depth := 0; depth <= maxTypeDepth; depth++ {
- switch v := (result).(type) {
- case qualifier:
- result = v.qualify()
- case *Typedef:
- result = v.Type
- default:
- return result
- }
- }
- return typ
-}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go b/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
deleted file mode 100644
index 6a21b6bda..000000000
--- a/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
+++ /dev/null
@@ -1,281 +0,0 @@
-package btf
-
-import (
- "bufio"
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
-
- "github.com/cilium/ebpf/asm"
- "github.com/cilium/ebpf/internal"
-)
-
-type btfExtHeader struct {
- Magic uint16
- Version uint8
- Flags uint8
- HdrLen uint32
-
- FuncInfoOff uint32
- FuncInfoLen uint32
- LineInfoOff uint32
- LineInfoLen uint32
-}
-
-type btfExtCoreHeader struct {
- CoreReloOff uint32
- CoreReloLen uint32
-}
-
-func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, coreRelos map[string]bpfCoreRelos, err error) {
- var header btfExtHeader
- var coreHeader btfExtCoreHeader
- if err := binary.Read(r, bo, &header); err != nil {
- return nil, nil, nil, fmt.Errorf("can't read header: %v", err)
- }
-
- if header.Magic != btfMagic {
- return nil, nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
- }
-
- if header.Version != 1 {
- return nil, nil, nil, fmt.Errorf("unexpected version %v", header.Version)
- }
-
- if header.Flags != 0 {
- return nil, nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
- }
-
- remainder := int64(header.HdrLen) - int64(binary.Size(&header))
- if remainder < 0 {
- return nil, nil, nil, errors.New("header is too short")
- }
-
- coreHdrSize := int64(binary.Size(&coreHeader))
- if remainder >= coreHdrSize {
- if err := binary.Read(r, bo, &coreHeader); err != nil {
- return nil, nil, nil, fmt.Errorf("can't read CO-RE relocation header: %v", err)
- }
- remainder -= coreHdrSize
- }
-
- // Of course, the .BTF.ext header has different semantics than the
- // .BTF ext header. We need to ignore non-null values.
- _, err = io.CopyN(ioutil.Discard, r, remainder)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("header padding: %v", err)
- }
-
- if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil {
- return nil, nil, nil, fmt.Errorf("can't seek to function info section: %v", err)
- }
-
- buf := bufio.NewReader(io.LimitReader(r, int64(header.FuncInfoLen)))
- funcInfo, err = parseExtInfo(buf, bo, strings)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("function info: %w", err)
- }
-
- if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil {
- return nil, nil, nil, fmt.Errorf("can't seek to line info section: %v", err)
- }
-
- buf = bufio.NewReader(io.LimitReader(r, int64(header.LineInfoLen)))
- lineInfo, err = parseExtInfo(buf, bo, strings)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("line info: %w", err)
- }
-
- if coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 {
- if _, err := r.Seek(int64(header.HdrLen+coreHeader.CoreReloOff), io.SeekStart); err != nil {
- return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err)
- }
-
- coreRelos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings)
- if err != nil {
- return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err)
- }
- }
-
- return funcInfo, lineInfo, coreRelos, nil
-}
-
-type btfExtInfoSec struct {
- SecNameOff uint32
- NumInfo uint32
-}
-
-type extInfoRecord struct {
- InsnOff uint64
- Opaque []byte
-}
-
-type extInfo struct {
- recordSize uint32
- records []extInfoRecord
-}
-
-func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) {
- if other.recordSize != ei.recordSize {
- return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize)
- }
-
- records := make([]extInfoRecord, 0, len(ei.records)+len(other.records))
- records = append(records, ei.records...)
- for _, info := range other.records {
- records = append(records, extInfoRecord{
- InsnOff: info.InsnOff + offset,
- Opaque: info.Opaque,
- })
- }
- return extInfo{ei.recordSize, records}, nil
-}
-
-func (ei extInfo) MarshalBinary() ([]byte, error) {
- if len(ei.records) == 0 {
- return nil, nil
- }
-
- buf := bytes.NewBuffer(make([]byte, 0, int(ei.recordSize)*len(ei.records)))
- for _, info := range ei.records {
- // The kernel expects offsets in number of raw bpf instructions,
- // while the ELF tracks it in bytes.
- insnOff := uint32(info.InsnOff / asm.InstructionSize)
- if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil {
- return nil, fmt.Errorf("can't write instruction offset: %v", err)
- }
-
- buf.Write(info.Opaque)
- }
-
- return buf.Bytes(), nil
-}
-
-func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) {
- const maxRecordSize = 256
-
- var recordSize uint32
- if err := binary.Read(r, bo, &recordSize); err != nil {
- return nil, fmt.Errorf("can't read record size: %v", err)
- }
-
- if recordSize < 4 {
- // Need at least insnOff
- return nil, errors.New("record size too short")
- }
- if recordSize > maxRecordSize {
- return nil, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
- }
-
- result := make(map[string]extInfo)
- for {
- secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
- if errors.Is(err, io.EOF) {
- return result, nil
- }
-
- var records []extInfoRecord
- for i := uint32(0); i < infoHeader.NumInfo; i++ {
- var byteOff uint32
- if err := binary.Read(r, bo, &byteOff); err != nil {
- return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err)
- }
-
- buf := make([]byte, int(recordSize-4))
- if _, err := io.ReadFull(r, buf); err != nil {
- return nil, fmt.Errorf("section %v: can't read record: %v", secName, err)
- }
-
- if byteOff%asm.InstructionSize != 0 {
- return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff)
- }
-
- records = append(records, extInfoRecord{uint64(byteOff), buf})
- }
-
- result[secName] = extInfo{
- recordSize,
- records,
- }
- }
-}
-
-// bpfCoreRelo matches `struct bpf_core_relo` from the kernel
-type bpfCoreRelo struct {
- InsnOff uint32
- TypeID TypeID
- AccessStrOff uint32
- ReloKind coreReloKind
-}
-
-type bpfCoreRelos []bpfCoreRelo
-
-// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted
-// by offset.
-func (r bpfCoreRelos) append(other bpfCoreRelos, offset uint64) bpfCoreRelos {
- result := make([]bpfCoreRelo, 0, len(r)+len(other))
- result = append(result, r...)
- for _, relo := range other {
- relo.InsnOff += uint32(offset)
- result = append(result, relo)
- }
- return result
-}
-
-var extInfoReloSize = binary.Size(bpfCoreRelo{})
-
-func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]bpfCoreRelos, error) {
- var recordSize uint32
- if err := binary.Read(r, bo, &recordSize); err != nil {
- return nil, fmt.Errorf("read record size: %v", err)
- }
-
- if recordSize != uint32(extInfoReloSize) {
- return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
- }
-
- result := make(map[string]bpfCoreRelos)
- for {
- secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
- if errors.Is(err, io.EOF) {
- return result, nil
- }
-
- var relos []bpfCoreRelo
- for i := uint32(0); i < infoHeader.NumInfo; i++ {
- var relo bpfCoreRelo
- if err := binary.Read(r, bo, &relo); err != nil {
- return nil, fmt.Errorf("section %v: read record: %v", secName, err)
- }
-
- if relo.InsnOff%asm.InstructionSize != 0 {
- return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff)
- }
-
- relos = append(relos, relo)
- }
-
- result[secName] = relos
- }
-}
-
-func parseExtInfoHeader(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) {
- var infoHeader btfExtInfoSec
- if err := binary.Read(r, bo, &infoHeader); err != nil {
- return "", nil, fmt.Errorf("read ext info header: %w", err)
- }
-
- secName, err := strings.Lookup(infoHeader.SecNameOff)
- if err != nil {
- return "", nil, fmt.Errorf("get section name: %w", err)
- }
-
- if infoHeader.NumInfo == 0 {
- return "", nil, fmt.Errorf("section %s has zero records", secName)
- }
-
- return secName, &infoHeader, nil
-}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go b/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go
deleted file mode 100644
index 37e043fd3..000000000
--- a/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// +build gofuzz
-
-// Use with https://github.com/dvyukov/go-fuzz
-
-package btf
-
-import (
- "bytes"
- "encoding/binary"
-
- "github.com/cilium/ebpf/internal"
-)
-
-func FuzzSpec(data []byte) int {
- if len(data) < binary.Size(btfHeader{}) {
- return -1
- }
-
- spec, err := loadNakedSpec(bytes.NewReader(data), internal.NativeEndian, nil, nil)
- if err != nil {
- if spec != nil {
- panic("spec is not nil")
- }
- return 0
- }
- if spec == nil {
- panic("spec is nil")
- }
- return 1
-}
-
-func FuzzExtInfo(data []byte) int {
- if len(data) < binary.Size(btfExtHeader{}) {
- return -1
- }
-
- table := stringTable("\x00foo\x00barfoo\x00")
- info, err := parseExtInfo(bytes.NewReader(data), internal.NativeEndian, table)
- if err != nil {
- if info != nil {
- panic("info is not nil")
- }
- return 0
- }
- if info == nil {
- panic("info is nil")
- }
- return 1
-}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/strings.go b/vendor/github.com/cilium/ebpf/internal/btf/strings.go
deleted file mode 100644
index 8782643a0..000000000
--- a/vendor/github.com/cilium/ebpf/internal/btf/strings.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package btf
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
-)
-
-type stringTable []byte
-
-func readStringTable(r io.Reader) (stringTable, error) {
- contents, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, fmt.Errorf("can't read string table: %v", err)
- }
-
- if len(contents) < 1 {
- return nil, errors.New("string table is empty")
- }
-
- if contents[0] != '\x00' {
- return nil, errors.New("first item in string table is non-empty")
- }
-
- if contents[len(contents)-1] != '\x00' {
- return nil, errors.New("string table isn't null terminated")
- }
-
- return stringTable(contents), nil
-}
-
-func (st stringTable) Lookup(offset uint32) (string, error) {
- if int64(offset) > int64(^uint(0)>>1) {
- return "", fmt.Errorf("offset %d overflows int", offset)
- }
-
- pos := int(offset)
- if pos >= len(st) {
- return "", fmt.Errorf("offset %d is out of bounds", offset)
- }
-
- if pos > 0 && st[pos-1] != '\x00' {
- return "", fmt.Errorf("offset %d isn't start of a string", offset)
- }
-
- str := st[pos:]
- end := bytes.IndexByte(str, '\x00')
- if end == -1 {
- return "", fmt.Errorf("offset %d isn't null terminated", offset)
- }
-
- return string(str[:end]), nil
-}
-
-func (st stringTable) LookupName(offset uint32) (Name, error) {
- str, err := st.Lookup(offset)
- return Name(str), err
-}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/types.go b/vendor/github.com/cilium/ebpf/internal/btf/types.go
deleted file mode 100644
index 9e1fd8d0b..000000000
--- a/vendor/github.com/cilium/ebpf/internal/btf/types.go
+++ /dev/null
@@ -1,871 +0,0 @@
-package btf
-
-import (
- "errors"
- "fmt"
- "math"
- "strings"
-)
-
-const maxTypeDepth = 32
-
-// TypeID identifies a type in a BTF section.
-type TypeID uint32
-
-// ID implements part of the Type interface.
-func (tid TypeID) ID() TypeID {
- return tid
-}
-
-// Type represents a type described by BTF.
-type Type interface {
- ID() TypeID
-
- String() string
-
- // Make a copy of the type, without copying Type members.
- copy() Type
-
- // Enumerate all nested Types. Repeated calls must visit nested
- // types in the same order.
- walk(*typeDeque)
-}
-
-// namedType is a type with a name.
-//
-// Most named types simply embed Name.
-type namedType interface {
- Type
- name() string
-}
-
-// Name identifies a type.
-//
-// Anonymous types have an empty name.
-type Name string
-
-func (n Name) name() string {
- return string(n)
-}
-
-// Void is the unit type of BTF.
-type Void struct{}
-
-func (v *Void) ID() TypeID { return 0 }
-func (v *Void) String() string { return "void#0" }
-func (v *Void) size() uint32 { return 0 }
-func (v *Void) copy() Type { return (*Void)(nil) }
-func (v *Void) walk(*typeDeque) {}
-
-type IntEncoding byte
-
-const (
- Signed IntEncoding = 1 << iota
- Char
- Bool
-)
-
-// Int is an integer of a given length.
-type Int struct {
- TypeID
- Name
-
- // The size of the integer in bytes.
- Size uint32
- Encoding IntEncoding
- // Offset is the starting bit offset. Currently always 0.
- // See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
- Offset uint32
- Bits byte
-}
-
-var _ namedType = (*Int)(nil)
-
-func (i *Int) String() string {
- var s strings.Builder
-
- switch {
- case i.Encoding&Char != 0:
- s.WriteString("char")
- case i.Encoding&Bool != 0:
- s.WriteString("bool")
- default:
- if i.Encoding&Signed == 0 {
- s.WriteRune('u')
- }
- s.WriteString("int")
- fmt.Fprintf(&s, "%d", i.Size*8)
- }
-
- fmt.Fprintf(&s, "#%d", i.TypeID)
-
- if i.Bits > 0 {
- fmt.Fprintf(&s, "[bits=%d]", i.Bits)
- }
-
- return s.String()
-}
-
-func (i *Int) size() uint32 { return i.Size }
-func (i *Int) walk(*typeDeque) {}
-func (i *Int) copy() Type {
- cpy := *i
- return &cpy
-}
-
-func (i *Int) isBitfield() bool {
- return i.Offset > 0
-}
-
-// Pointer is a pointer to another type.
-type Pointer struct {
- TypeID
- Target Type
-}
-
-func (p *Pointer) String() string {
- return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID())
-}
-
-func (p *Pointer) size() uint32 { return 8 }
-func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) }
-func (p *Pointer) copy() Type {
- cpy := *p
- return &cpy
-}
-
-// Array is an array with a fixed number of elements.
-type Array struct {
- TypeID
- Type Type
- Nelems uint32
-}
-
-func (arr *Array) String() string {
- return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems)
-}
-
-func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) }
-func (arr *Array) copy() Type {
- cpy := *arr
- return &cpy
-}
-
-// Struct is a compound type of consecutive members.
-type Struct struct {
- TypeID
- Name
- // The size of the struct including padding, in bytes
- Size uint32
- Members []Member
-}
-
-func (s *Struct) String() string {
- return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name)
-}
-
-func (s *Struct) size() uint32 { return s.Size }
-
-func (s *Struct) walk(tdq *typeDeque) {
- for i := range s.Members {
- tdq.push(&s.Members[i].Type)
- }
-}
-
-func (s *Struct) copy() Type {
- cpy := *s
- cpy.Members = make([]Member, len(s.Members))
- copy(cpy.Members, s.Members)
- return &cpy
-}
-
-func (s *Struct) members() []Member {
- return s.Members
-}
-
-// Union is a compound type where members occupy the same memory.
-type Union struct {
- TypeID
- Name
- // The size of the union including padding, in bytes.
- Size uint32
- Members []Member
-}
-
-func (u *Union) String() string {
- return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name)
-}
-
-func (u *Union) size() uint32 { return u.Size }
-
-func (u *Union) walk(tdq *typeDeque) {
- for i := range u.Members {
- tdq.push(&u.Members[i].Type)
- }
-}
-
-func (u *Union) copy() Type {
- cpy := *u
- cpy.Members = make([]Member, len(u.Members))
- copy(cpy.Members, u.Members)
- return &cpy
-}
-
-func (u *Union) members() []Member {
- return u.Members
-}
-
-type composite interface {
- members() []Member
-}
-
-var (
- _ composite = (*Struct)(nil)
- _ composite = (*Union)(nil)
-)
-
-// Member is part of a Struct or Union.
-//
-// It is not a valid Type.
-type Member struct {
- Name
- Type Type
- // Offset is the bit offset of this member
- Offset uint32
- BitfieldSize uint32
-}
-
-// Enum lists possible values.
-type Enum struct {
- TypeID
- Name
- Values []EnumValue
-}
-
-func (e *Enum) String() string {
- return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name)
-}
-
-// EnumValue is part of an Enum
-//
-// Is is not a valid Type
-type EnumValue struct {
- Name
- Value int32
-}
-
-func (e *Enum) size() uint32 { return 4 }
-func (e *Enum) walk(*typeDeque) {}
-func (e *Enum) copy() Type {
- cpy := *e
- cpy.Values = make([]EnumValue, len(e.Values))
- copy(cpy.Values, e.Values)
- return &cpy
-}
-
-// FwdKind is the type of forward declaration.
-type FwdKind int
-
-// Valid types of forward declaration.
-const (
- FwdStruct FwdKind = iota
- FwdUnion
-)
-
-func (fk FwdKind) String() string {
- switch fk {
- case FwdStruct:
- return "struct"
- case FwdUnion:
- return "union"
- default:
- return fmt.Sprintf("%T(%d)", fk, int(fk))
- }
-}
-
-// Fwd is a forward declaration of a Type.
-type Fwd struct {
- TypeID
- Name
- Kind FwdKind
-}
-
-func (f *Fwd) String() string {
- return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name)
-}
-
-func (f *Fwd) walk(*typeDeque) {}
-func (f *Fwd) copy() Type {
- cpy := *f
- return &cpy
-}
-
-// Typedef is an alias of a Type.
-type Typedef struct {
- TypeID
- Name
- Type Type
-}
-
-func (td *Typedef) String() string {
- return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID())
-}
-
-func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) }
-func (td *Typedef) copy() Type {
- cpy := *td
- return &cpy
-}
-
-// Volatile is a qualifier.
-type Volatile struct {
- TypeID
- Type Type
-}
-
-func (v *Volatile) String() string {
- return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID())
-}
-
-func (v *Volatile) qualify() Type { return v.Type }
-func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) }
-func (v *Volatile) copy() Type {
- cpy := *v
- return &cpy
-}
-
-// Const is a qualifier.
-type Const struct {
- TypeID
- Type Type
-}
-
-func (c *Const) String() string {
- return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID())
-}
-
-func (c *Const) qualify() Type { return c.Type }
-func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) }
-func (c *Const) copy() Type {
- cpy := *c
- return &cpy
-}
-
-// Restrict is a qualifier.
-type Restrict struct {
- TypeID
- Type Type
-}
-
-func (r *Restrict) String() string {
- return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID())
-}
-
-func (r *Restrict) qualify() Type { return r.Type }
-func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) }
-func (r *Restrict) copy() Type {
- cpy := *r
- return &cpy
-}
-
-// Func is a function definition.
-type Func struct {
- TypeID
- Name
- Type Type
-}
-
-func (f *Func) String() string {
- return fmt.Sprintf("func#%d[%q proto=#%d]", f.TypeID, f.Name, f.Type.ID())
-}
-
-func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) }
-func (f *Func) copy() Type {
- cpy := *f
- return &cpy
-}
-
-// FuncProto is a function declaration.
-type FuncProto struct {
- TypeID
- Return Type
- Params []FuncParam
-}
-
-func (fp *FuncProto) String() string {
- var s strings.Builder
- fmt.Fprintf(&s, "proto#%d[", fp.TypeID)
- for _, param := range fp.Params {
- fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID())
- }
- fmt.Fprintf(&s, "return=#%d]", fp.Return.ID())
- return s.String()
-}
-
-func (fp *FuncProto) walk(tdq *typeDeque) {
- tdq.push(&fp.Return)
- for i := range fp.Params {
- tdq.push(&fp.Params[i].Type)
- }
-}
-
-func (fp *FuncProto) copy() Type {
- cpy := *fp
- cpy.Params = make([]FuncParam, len(fp.Params))
- copy(cpy.Params, fp.Params)
- return &cpy
-}
-
-type FuncParam struct {
- Name
- Type Type
-}
-
-// Var is a global variable.
-type Var struct {
- TypeID
- Name
- Type Type
-}
-
-func (v *Var) String() string {
- // TODO: Linkage
- return fmt.Sprintf("var#%d[%q]", v.TypeID, v.Name)
-}
-
-func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) }
-func (v *Var) copy() Type {
- cpy := *v
- return &cpy
-}
-
-// Datasec is a global program section containing data.
-type Datasec struct {
- TypeID
- Name
- Size uint32
- Vars []VarSecinfo
-}
-
-func (ds *Datasec) String() string {
- return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name)
-}
-
-func (ds *Datasec) size() uint32 { return ds.Size }
-
-func (ds *Datasec) walk(tdq *typeDeque) {
- for i := range ds.Vars {
- tdq.push(&ds.Vars[i].Type)
- }
-}
-
-func (ds *Datasec) copy() Type {
- cpy := *ds
- cpy.Vars = make([]VarSecinfo, len(ds.Vars))
- copy(cpy.Vars, ds.Vars)
- return &cpy
-}
-
-// VarSecinfo describes variable in a Datasec
-//
-// It is not a valid Type.
-type VarSecinfo struct {
- Type Type
- Offset uint32
- Size uint32
-}
-
-type sizer interface {
- size() uint32
-}
-
-var (
- _ sizer = (*Int)(nil)
- _ sizer = (*Pointer)(nil)
- _ sizer = (*Struct)(nil)
- _ sizer = (*Union)(nil)
- _ sizer = (*Enum)(nil)
- _ sizer = (*Datasec)(nil)
-)
-
-type qualifier interface {
- qualify() Type
-}
-
-var (
- _ qualifier = (*Const)(nil)
- _ qualifier = (*Restrict)(nil)
- _ qualifier = (*Volatile)(nil)
-)
-
-// Sizeof returns the size of a type in bytes.
-//
-// Returns an error if the size can't be computed.
-func Sizeof(typ Type) (int, error) {
- var (
- n = int64(1)
- elem int64
- )
-
- for i := 0; i < maxTypeDepth; i++ {
- switch v := typ.(type) {
- case *Array:
- if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
- return 0, errors.New("overflow")
- }
-
- // Arrays may be of zero length, which allows
- // n to be zero as well.
- n *= int64(v.Nelems)
- typ = v.Type
- continue
-
- case sizer:
- elem = int64(v.size())
-
- case *Typedef:
- typ = v.Type
- continue
-
- case qualifier:
- typ = v.qualify()
- continue
-
- default:
- return 0, fmt.Errorf("unrecognized type %T", typ)
- }
-
- if n > 0 && elem > math.MaxInt64/n {
- return 0, errors.New("overflow")
- }
-
- size := n * elem
- if int64(int(size)) != size {
- return 0, errors.New("overflow")
- }
-
- return int(size), nil
- }
-
- return 0, errors.New("exceeded type depth")
-}
-
-// copy a Type recursively.
-//
-// typ may form a cycle.
-func copyType(typ Type) Type {
- var (
- copies = make(map[Type]Type)
- work typeDeque
- )
-
- for t := &typ; t != nil; t = work.pop() {
- // *t is the identity of the type.
- if cpy := copies[*t]; cpy != nil {
- *t = cpy
- continue
- }
-
- cpy := (*t).copy()
- copies[*t] = cpy
- *t = cpy
-
- // Mark any nested types for copying.
- cpy.walk(&work)
- }
-
- return typ
-}
-
-// typeDeque keeps track of pointers to types which still
-// need to be visited.
-type typeDeque struct {
- types []*Type
- read, write uint64
- mask uint64
-}
-
-// push adds a type to the stack.
-func (dq *typeDeque) push(t *Type) {
- if dq.write-dq.read < uint64(len(dq.types)) {
- dq.types[dq.write&dq.mask] = t
- dq.write++
- return
- }
-
- new := len(dq.types) * 2
- if new == 0 {
- new = 8
- }
-
- types := make([]*Type, new)
- pivot := dq.read & dq.mask
- n := copy(types, dq.types[pivot:])
- n += copy(types[n:], dq.types[:pivot])
- types[n] = t
-
- dq.types = types
- dq.mask = uint64(new) - 1
- dq.read, dq.write = 0, uint64(n+1)
-}
-
-// shift returns the first element or null.
-func (dq *typeDeque) shift() *Type {
- if dq.read == dq.write {
- return nil
- }
-
- index := dq.read & dq.mask
- t := dq.types[index]
- dq.types[index] = nil
- dq.read++
- return t
-}
-
-// pop returns the last element or null.
-func (dq *typeDeque) pop() *Type {
- if dq.read == dq.write {
- return nil
- }
-
- dq.write--
- index := dq.write & dq.mask
- t := dq.types[index]
- dq.types[index] = nil
- return t
-}
-
-// all returns all elements.
-//
-// The deque is empty after calling this method.
-func (dq *typeDeque) all() []*Type {
- length := dq.write - dq.read
- types := make([]*Type, 0, length)
- for t := dq.shift(); t != nil; t = dq.shift() {
- types = append(types, t)
- }
- return types
-}
-
-// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
-// it into a graph of Types connected via pointers.
-//
-// Returns a map of named types (so, where NameOff is non-zero) and a slice of types
-// indexed by TypeID. Since BTF ignores compilation units, multiple types may share
-// the same name. A Type may form a cyclic graph by pointing at itself.
-func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]namedType, err error) {
- type fixupDef struct {
- id TypeID
- expectedKind btfKind
- typ *Type
- }
-
- var fixups []fixupDef
- fixup := func(id TypeID, expectedKind btfKind, typ *Type) {
- fixups = append(fixups, fixupDef{id, expectedKind, typ})
- }
-
- convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) {
- // NB: The fixup below relies on pre-allocating this array to
- // work, since otherwise append might re-allocate members.
- members := make([]Member, 0, len(raw))
- for i, btfMember := range raw {
- name, err := rawStrings.LookupName(btfMember.NameOff)
- if err != nil {
- return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
- }
- m := Member{
- Name: name,
- Offset: btfMember.Offset,
- }
- if kindFlag {
- m.BitfieldSize = btfMember.Offset >> 24
- m.Offset &= 0xffffff
- }
- members = append(members, m)
- }
- for i := range members {
- fixup(raw[i].Type, kindUnknown, &members[i].Type)
- }
- return members, nil
- }
-
- types = make([]Type, 0, len(rawTypes))
- types = append(types, (*Void)(nil))
- namedTypes = make(map[string][]namedType)
-
- for i, raw := range rawTypes {
- var (
- // Void is defined to always be type ID 0, and is thus
- // omitted from BTF.
- id = TypeID(i + 1)
- typ Type
- )
-
- name, err := rawStrings.LookupName(raw.NameOff)
- if err != nil {
- return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err)
- }
-
- switch raw.Kind() {
- case kindInt:
- encoding, offset, bits := intEncoding(*raw.data.(*uint32))
- typ = &Int{id, name, raw.Size(), encoding, offset, bits}
-
- case kindPointer:
- ptr := &Pointer{id, nil}
- fixup(raw.Type(), kindUnknown, &ptr.Target)
- typ = ptr
-
- case kindArray:
- btfArr := raw.data.(*btfArray)
-
- // IndexType is unused according to btf.rst.
- // Don't make it available right now.
- arr := &Array{id, nil, btfArr.Nelems}
- fixup(btfArr.Type, kindUnknown, &arr.Type)
- typ = arr
-
- case kindStruct:
- members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
- if err != nil {
- return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
- }
- typ = &Struct{id, name, raw.Size(), members}
-
- case kindUnion:
- members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
- if err != nil {
- return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
- }
- typ = &Union{id, name, raw.Size(), members}
-
- case kindEnum:
- rawvals := raw.data.([]btfEnum)
- vals := make([]EnumValue, 0, len(rawvals))
- for i, btfVal := range rawvals {
- name, err := rawStrings.LookupName(btfVal.NameOff)
- if err != nil {
- return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err)
- }
- vals = append(vals, EnumValue{
- Name: name,
- Value: btfVal.Val,
- })
- }
- typ = &Enum{id, name, vals}
-
- case kindForward:
- if raw.KindFlag() {
- typ = &Fwd{id, name, FwdUnion}
- } else {
- typ = &Fwd{id, name, FwdStruct}
- }
-
- case kindTypedef:
- typedef := &Typedef{id, name, nil}
- fixup(raw.Type(), kindUnknown, &typedef.Type)
- typ = typedef
-
- case kindVolatile:
- volatile := &Volatile{id, nil}
- fixup(raw.Type(), kindUnknown, &volatile.Type)
- typ = volatile
-
- case kindConst:
- cnst := &Const{id, nil}
- fixup(raw.Type(), kindUnknown, &cnst.Type)
- typ = cnst
-
- case kindRestrict:
- restrict := &Restrict{id, nil}
- fixup(raw.Type(), kindUnknown, &restrict.Type)
- typ = restrict
-
- case kindFunc:
- fn := &Func{id, name, nil}
- fixup(raw.Type(), kindFuncProto, &fn.Type)
- typ = fn
-
- case kindFuncProto:
- rawparams := raw.data.([]btfParam)
- params := make([]FuncParam, 0, len(rawparams))
- for i, param := range rawparams {
- name, err := rawStrings.LookupName(param.NameOff)
- if err != nil {
- return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
- }
- params = append(params, FuncParam{
- Name: name,
- })
- }
- for i := range params {
- fixup(rawparams[i].Type, kindUnknown, &params[i].Type)
- }
-
- fp := &FuncProto{id, nil, params}
- fixup(raw.Type(), kindUnknown, &fp.Return)
- typ = fp
-
- case kindVar:
- v := &Var{id, name, nil}
- fixup(raw.Type(), kindUnknown, &v.Type)
- typ = v
-
- case kindDatasec:
- btfVars := raw.data.([]btfVarSecinfo)
- vars := make([]VarSecinfo, 0, len(btfVars))
- for _, btfVar := range btfVars {
- vars = append(vars, VarSecinfo{
- Offset: btfVar.Offset,
- Size: btfVar.Size,
- })
- }
- for i := range vars {
- fixup(btfVars[i].Type, kindVar, &vars[i].Type)
- }
- typ = &Datasec{id, name, raw.SizeType, vars}
-
- default:
- return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
- }
-
- types = append(types, typ)
-
- if named, ok := typ.(namedType); ok {
- if name := essentialName(named.name()); name != "" {
- namedTypes[name] = append(namedTypes[name], named)
- }
- }
- }
-
- for _, fixup := range fixups {
- i := int(fixup.id)
- if i >= len(types) {
- return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
- }
-
- // Default void (id 0) to unknown
- rawKind := kindUnknown
- if i > 0 {
- rawKind = rawTypes[i-1].Kind()
- }
-
- if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected {
- return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
- }
-
- *fixup.typ = types[i]
- }
-
- return types, namedTypes, nil
-}
-
-// essentialName returns name without a ___ suffix.
-func essentialName(name string) string {
- lastIdx := strings.LastIndex(name, "___")
- if lastIdx > 0 {
- return name[:lastIdx]
- }
- return name
-}
diff --git a/vendor/github.com/cilium/ebpf/internal/cpu.go b/vendor/github.com/cilium/ebpf/internal/cpu.go
index d3424ba43..3affa1efb 100644
--- a/vendor/github.com/cilium/ebpf/internal/cpu.go
+++ b/vendor/github.com/cilium/ebpf/internal/cpu.go
@@ -2,7 +2,7 @@ package internal
import (
"fmt"
- "io/ioutil"
+ "os"
"strings"
"sync"
)
@@ -24,7 +24,7 @@ func PossibleCPUs() (int, error) {
}
func parseCPUsFromFile(path string) (int, error) {
- spec, err := ioutil.ReadFile(path)
+ spec, err := os.ReadFile(path)
if err != nil {
return 0, err
}
diff --git a/vendor/github.com/cilium/ebpf/internal/elf.go b/vendor/github.com/cilium/ebpf/internal/elf.go
index c3f9ea0f8..011581938 100644
--- a/vendor/github.com/cilium/ebpf/internal/elf.go
+++ b/vendor/github.com/cilium/ebpf/internal/elf.go
@@ -35,6 +35,29 @@ func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) {
return &SafeELFFile{file}, nil
}
+// OpenSafeELFFile reads an ELF from a file.
+//
+// It works like NewSafeELFFile, with the exception that safe.Close will
+// close the underlying file.
+func OpenSafeELFFile(path string) (safe *SafeELFFile, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ safe = nil
+ err = fmt.Errorf("reading ELF file panicked: %s", r)
+ }()
+
+ file, err := elf.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &SafeELFFile{file}, nil
+}
+
// Symbols is the safe version of elf.File.Symbols.
func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) {
defer func() {
@@ -50,3 +73,30 @@ func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) {
syms, err = se.File.Symbols()
return
}
+
+// DynamicSymbols is the safe version of elf.File.DynamicSymbols.
+func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ syms = nil
+ err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r)
+ }()
+
+ syms, err = se.File.DynamicSymbols()
+ return
+}
+
+// SectionsByType returns all sections in the file with the specified section type.
+func (se *SafeELFFile) SectionsByType(typ elf.SectionType) []*elf.Section {
+ sections := make([]*elf.Section, 0, 1)
+ for _, section := range se.Sections {
+ if section.Type == typ {
+ sections = append(sections, section)
+ }
+ }
+ return sections
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/endian.go b/vendor/github.com/cilium/ebpf/internal/endian.go
deleted file mode 100644
index ac8a94e51..000000000
--- a/vendor/github.com/cilium/ebpf/internal/endian.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package internal
-
-import (
- "encoding/binary"
- "unsafe"
-)
-
-// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
-// depending on the host's endianness.
-var NativeEndian binary.ByteOrder
-
-func init() {
- if isBigEndian() {
- NativeEndian = binary.BigEndian
- } else {
- NativeEndian = binary.LittleEndian
- }
-}
-
-func isBigEndian() (ret bool) {
- i := int(0x1)
- bs := (*[int(unsafe.Sizeof(i))]byte)(unsafe.Pointer(&i))
- return bs[0] == 0
-}
diff --git a/vendor/github.com/cilium/ebpf/internal/endian_be.go b/vendor/github.com/cilium/ebpf/internal/endian_be.go
new file mode 100644
index 000000000..ad33cda85
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/endian_be.go
@@ -0,0 +1,13 @@
+//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64
+// +build armbe arm64be mips mips64 mips64p32 ppc64 s390 s390x sparc sparc64
+
+package internal
+
+import "encoding/binary"
+
+// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
+// depending on the host's endianness.
+var NativeEndian binary.ByteOrder = binary.BigEndian
+
+// ClangEndian is set to either "el" or "eb" depending on the host's endianness.
+const ClangEndian = "eb"
diff --git a/vendor/github.com/cilium/ebpf/internal/endian_le.go b/vendor/github.com/cilium/ebpf/internal/endian_le.go
new file mode 100644
index 000000000..41a68224c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/endian_le.go
@@ -0,0 +1,13 @@
+//go:build 386 || amd64 || amd64p32 || arm || arm64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64
+// +build 386 amd64 amd64p32 arm arm64 mipsle mips64le mips64p32le ppc64le riscv64
+
+package internal
+
+import "encoding/binary"
+
+// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
+// depending on the host's endianness.
+var NativeEndian binary.ByteOrder = binary.LittleEndian
+
+// ClangEndian is set to either "el" or "eb" depending on the host's endianness.
+const ClangEndian = "el"
diff --git a/vendor/github.com/cilium/ebpf/internal/errors.go b/vendor/github.com/cilium/ebpf/internal/errors.go
index b6aee81f7..b5ccdd7d0 100644
--- a/vendor/github.com/cilium/ebpf/internal/errors.go
+++ b/vendor/github.com/cilium/ebpf/internal/errors.go
@@ -2,46 +2,205 @@ package internal
import (
"bytes"
- "errors"
"fmt"
+ "io"
"strings"
-
- "github.com/cilium/ebpf/internal/unix"
)
-// ErrorWithLog returns an error that includes logs from the
-// kernel verifier.
+// ErrorWithLog returns an error which includes logs from the kernel verifier.
+//
+// The default error output is a summary of the full log. The latter can be
+// accessed via VerifierError.Log or by formatting the error, see Format.
//
-// logErr should be the error returned by the syscall that generated
-// the log. It is used to check for truncation of the output.
-func ErrorWithLog(err error, log []byte, logErr error) error {
- logStr := strings.Trim(CString(log), "\t\r\n ")
- if errors.Is(logErr, unix.ENOSPC) {
- logStr += " (truncated...)"
+// A set of heuristics is used to determine whether the log has been truncated.
+func ErrorWithLog(err error, log []byte) *VerifierError {
+ const whitespace = "\t\r\v\n "
+
+ // Convert verifier log C string by truncating it on the first 0 byte
+ // and trimming trailing whitespace before interpreting as a Go string.
+ truncated := false
+ if i := bytes.IndexByte(log, 0); i != -1 {
+ if i == len(log)-1 && !bytes.HasSuffix(log[:i], []byte{'\n'}) {
+ // The null byte is at the end of the buffer and it's not preceded
+ // by a newline character. Most likely the buffer was too short.
+ truncated = true
+ }
+
+ log = log[:i]
+ } else if len(log) > 0 {
+ // No null byte? Dodgy!
+ truncated = true
+ }
+
+ log = bytes.Trim(log, whitespace)
+ logLines := bytes.Split(log, []byte{'\n'})
+ lines := make([]string, 0, len(logLines))
+ for _, line := range logLines {
+ // Don't remove leading white space on individual lines. We rely on it
+ // when outputting logs.
+ lines = append(lines, string(bytes.TrimRight(line, whitespace)))
}
- return &VerifierError{err, logStr}
+ return &VerifierError{err, lines, truncated}
}
// VerifierError includes information from the eBPF verifier.
+//
+// It summarises the log output, see Format if you want to output the full contents.
type VerifierError struct {
- cause error
- log string
+ // The error which caused this error.
+ Cause error
+ // The verifier output split into lines.
+ Log []string
+ // Whether the log output is truncated, based on several heuristics.
+ Truncated bool
+}
+
+func (le *VerifierError) Unwrap() error {
+ return le.Cause
}
func (le *VerifierError) Error() string {
- if le.log == "" {
- return le.cause.Error()
+ log := le.Log
+ if n := len(log); n > 0 && strings.HasPrefix(log[n-1], "processed ") {
+ // Get rid of "processed 39 insns (limit 1000000) ..." from summary.
+ log = log[:n-1]
+ }
+
+ n := len(log)
+ if n == 0 {
+ return le.Cause.Error()
+ }
+
+ lines := log[n-1:]
+ if n >= 2 && (includePreviousLine(log[n-1]) || le.Truncated) {
+ // Add one more line of context if it aids understanding the error.
+ lines = log[n-2:]
+ }
+
+ var b strings.Builder
+ fmt.Fprintf(&b, "%s: ", le.Cause.Error())
+
+ for i, line := range lines {
+ b.WriteString(strings.TrimSpace(line))
+ if i != len(lines)-1 {
+ b.WriteString(": ")
+ }
+ }
+
+ omitted := len(le.Log) - len(lines)
+ if omitted == 0 && !le.Truncated {
+ return b.String()
+ }
+
+ b.WriteString(" (")
+ if le.Truncated {
+ b.WriteString("truncated")
+ }
+
+ if omitted > 0 {
+ if le.Truncated {
+ b.WriteString(", ")
+ }
+ fmt.Fprintf(&b, "%d line(s) omitted", omitted)
}
+ b.WriteString(")")
- return fmt.Sprintf("%s: %s", le.cause, le.log)
+ return b.String()
}
-// CString turns a NUL / zero terminated byte buffer into a string.
-func CString(in []byte) string {
- inLen := bytes.IndexByte(in, 0)
- if inLen == -1 {
- return ""
+// includePreviousLine returns true if the given line likely is better
+// understood with additional context from the preceding line.
+func includePreviousLine(line string) bool {
+ // We need to find a good trade off between understandable error messages
+ // and too much complexity here. Checking the string prefix is ok, requiring
+ // regular expressions to do it is probably overkill.
+
+ if strings.HasPrefix(line, "\t") {
+ // [13] STRUCT drm_rect size=16 vlen=4
+ // \tx1 type_id=2
+ return true
+ }
+
+ if len(line) >= 2 && line[0] == 'R' && line[1] >= '0' && line[1] <= '9' {
+ // 0: (95) exit
+ // R0 !read_ok
+ return true
+ }
+
+ if strings.HasPrefix(line, "invalid bpf_context access") {
+ // 0: (79) r6 = *(u64 *)(r1 +0)
+ // func '__x64_sys_recvfrom' arg0 type FWD is not a struct
+ // invalid bpf_context access off=0 size=8
+ return true
+ }
+
+ return false
+}
+
+// Format the error.
+//
+// Understood verbs are %s and %v, which are equivalent to calling Error(). %v
+// allows outputting additional information using the following flags:
+//
+// + Output the first <width> lines, or all lines if no width is given.
+// - Output the last <width> lines, or all lines if no width is given.
+//
+// Use width to specify how many lines to output. Use the '-' flag to output
+// lines from the end of the log instead of the beginning.
+func (le *VerifierError) Format(f fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ _, _ = io.WriteString(f, le.Error())
+
+ case 'v':
+ n, haveWidth := f.Width()
+ if !haveWidth || n > len(le.Log) {
+ n = len(le.Log)
+ }
+
+ if !f.Flag('+') && !f.Flag('-') {
+ if haveWidth {
+ _, _ = io.WriteString(f, "%!v(BADWIDTH)")
+ return
+ }
+
+ _, _ = io.WriteString(f, le.Error())
+ return
+ }
+
+ if f.Flag('+') && f.Flag('-') {
+ _, _ = io.WriteString(f, "%!v(BADFLAG)")
+ return
+ }
+
+ fmt.Fprintf(f, "%s:", le.Cause.Error())
+
+ omitted := len(le.Log) - n
+ lines := le.Log[:n]
+ if f.Flag('-') {
+ // Print last instead of first lines.
+ lines = le.Log[len(le.Log)-n:]
+ if omitted > 0 {
+ fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted)
+ }
+ }
+
+ for _, line := range lines {
+ fmt.Fprintf(f, "\n\t%s", line)
+ }
+
+ if !f.Flag('-') {
+ if omitted > 0 {
+ fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted)
+ }
+ }
+
+ if le.Truncated {
+ fmt.Fprintf(f, "\n\t(truncated)")
+ }
+
+ default:
+ fmt.Fprintf(f, "%%!%c(BADVERB)", verb)
}
- return string(in[:inLen])
}
diff --git a/vendor/github.com/cilium/ebpf/internal/fd.go b/vendor/github.com/cilium/ebpf/internal/fd.go
deleted file mode 100644
index af04955bd..000000000
--- a/vendor/github.com/cilium/ebpf/internal/fd.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package internal
-
-import (
- "errors"
- "fmt"
- "os"
- "runtime"
- "strconv"
-
- "github.com/cilium/ebpf/internal/unix"
-)
-
-var ErrClosedFd = errors.New("use of closed file descriptor")
-
-type FD struct {
- raw int64
-}
-
-func NewFD(value uint32) *FD {
- fd := &FD{int64(value)}
- runtime.SetFinalizer(fd, (*FD).Close)
- return fd
-}
-
-func (fd *FD) String() string {
- return strconv.FormatInt(fd.raw, 10)
-}
-
-func (fd *FD) Value() (uint32, error) {
- if fd.raw < 0 {
- return 0, ErrClosedFd
- }
-
- return uint32(fd.raw), nil
-}
-
-func (fd *FD) Close() error {
- if fd.raw < 0 {
- return nil
- }
-
- value := int(fd.raw)
- fd.raw = -1
-
- fd.Forget()
- return unix.Close(value)
-}
-
-func (fd *FD) Forget() {
- runtime.SetFinalizer(fd, nil)
-}
-
-func (fd *FD) Dup() (*FD, error) {
- if fd.raw < 0 {
- return nil, ErrClosedFd
- }
-
- dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0)
- if err != nil {
- return nil, fmt.Errorf("can't dup fd: %v", err)
- }
-
- return NewFD(uint32(dup)), nil
-}
-
-func (fd *FD) File(name string) *os.File {
- fd.Forget()
- return os.NewFile(uintptr(fd.raw), name)
-}
diff --git a/vendor/github.com/cilium/ebpf/internal/feature.go b/vendor/github.com/cilium/ebpf/internal/feature.go
index ec62ed39b..0a6c2d1d5 100644
--- a/vendor/github.com/cilium/ebpf/internal/feature.go
+++ b/vendor/github.com/cilium/ebpf/internal/feature.go
@@ -54,11 +54,6 @@ type FeatureTestFn func() error
//
// Returns an error wrapping ErrNotSupported if the feature is not supported.
func FeatureTest(name, version string, fn FeatureTestFn) func() error {
- v, err := NewVersion(version)
- if err != nil {
- return func() error { return err }
- }
-
ft := new(featureTest)
return func() error {
ft.RLock()
@@ -79,6 +74,11 @@ func FeatureTest(name, version string, fn FeatureTestFn) func() error {
err := fn()
switch {
case errors.Is(err, ErrNotSupported):
+ v, err := NewVersion(version)
+ if err != nil {
+ return err
+ }
+
ft.result = &UnsupportedFeatureError{
MinimumVersion: v,
Name: name,
@@ -98,41 +98,3 @@ func FeatureTest(name, version string, fn FeatureTestFn) func() error {
return ft.result
}
}
-
-// A Version in the form Major.Minor.Patch.
-type Version [3]uint16
-
-// NewVersion creates a version from a string like "Major.Minor.Patch".
-//
-// Patch is optional.
-func NewVersion(ver string) (Version, error) {
- var major, minor, patch uint16
- n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch)
- if n < 2 {
- return Version{}, fmt.Errorf("invalid version: %s", ver)
- }
- return Version{major, minor, patch}, nil
-}
-
-func (v Version) String() string {
- if v[2] == 0 {
- return fmt.Sprintf("v%d.%d", v[0], v[1])
- }
- return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2])
-}
-
-// Less returns true if the version is less than another version.
-func (v Version) Less(other Version) bool {
- for i, a := range v {
- if a == other[i] {
- continue
- }
- return a < other[i]
- }
- return false
-}
-
-// Unspecified returns true if the version is all zero.
-func (v Version) Unspecified() bool {
- return v[0] == 0 && v[1] == 0 && v[2] == 0
-}
diff --git a/vendor/github.com/cilium/ebpf/internal/io.go b/vendor/github.com/cilium/ebpf/internal/io.go
index fa7402782..30b6641f0 100644
--- a/vendor/github.com/cilium/ebpf/internal/io.go
+++ b/vendor/github.com/cilium/ebpf/internal/io.go
@@ -1,6 +1,35 @@
package internal
-import "errors"
+import (
+ "bufio"
+ "compress/gzip"
+ "errors"
+ "io"
+ "os"
+)
+
+// NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized
+// buffered reader. It is a convenience function for reading subsections of
+// ELF sections while minimizing the amount of read() syscalls made.
+//
+// Syscall overhead is non-negligible in continuous integration context
+// where ELFs might be accessed over virtual filesystems with poor random
+// access performance. Buffering reads makes sense because (sub)sections
+// end up being read completely anyway.
+//
+// Use instead of the r.Seek() + io.LimitReader() pattern.
+func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader {
+ // Clamp the size of the buffer to one page to avoid slurping large parts
+ // of a file into memory. bufio.NewReader uses a hardcoded default buffer
+ // of 4096. Allow arches with larger pages to allocate more, but don't
+ // allocate a fixed 4k buffer if we only need to read a small segment.
+ buf := n
+ if ps := int64(os.Getpagesize()); n > ps {
+ buf = ps
+ }
+
+ return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf))
+}
// DiscardZeroes makes sure that all written bytes are zero
// before discarding them.
@@ -14,3 +43,20 @@ func (DiscardZeroes) Write(p []byte) (int, error) {
}
return len(p), nil
}
+
+// ReadAllCompressed decompresses a gzipped file into memory.
+func ReadAllCompressed(file string) ([]byte, error) {
+ fh, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer fh.Close()
+
+ gz, err := gzip.NewReader(fh)
+ if err != nil {
+ return nil, err
+ }
+ defer gz.Close()
+
+ return io.ReadAll(gz)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/output.go b/vendor/github.com/cilium/ebpf/internal/output.go
new file mode 100644
index 000000000..aeab37fcf
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/output.go
@@ -0,0 +1,84 @@
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "go/format"
+ "go/scanner"
+ "io"
+ "strings"
+ "unicode"
+)
+
+// Identifier turns a C style type or field name into an exportable Go equivalent.
+func Identifier(str string) string {
+ prev := rune(-1)
+ return strings.Map(func(r rune) rune {
+ // See https://golang.org/ref/spec#Identifiers
+ switch {
+ case unicode.IsLetter(r):
+ if prev == -1 {
+ r = unicode.ToUpper(r)
+ }
+
+ case r == '_':
+ switch {
+ // The previous rune was deleted, or we are at the
+ // beginning of the string.
+ case prev == -1:
+ fallthrough
+
+ // The previous rune is a lower case letter or a digit.
+ case unicode.IsDigit(prev) || (unicode.IsLetter(prev) && unicode.IsLower(prev)):
+ // delete the current rune, and force the
+ // next character to be uppercased.
+ r = -1
+ }
+
+ case unicode.IsDigit(r):
+
+ default:
+ // Delete the current rune. prev is unchanged.
+ return -1
+ }
+
+ prev = r
+ return r
+ }, str)
+}
+
+// WriteFormatted outputs a formatted src into out.
+//
+// If formatting fails it returns an informative error message.
+func WriteFormatted(src []byte, out io.Writer) error {
+ formatted, err := format.Source(src)
+ if err == nil {
+ _, err = out.Write(formatted)
+ return err
+ }
+
+ var el scanner.ErrorList
+ if !errors.As(err, &el) {
+ return err
+ }
+
+ var nel scanner.ErrorList
+ for _, err := range el {
+ if !err.Pos.IsValid() {
+ nel = append(nel, err)
+ continue
+ }
+
+ buf := src[err.Pos.Offset:]
+ nl := bytes.IndexRune(buf, '\n')
+ if nl == -1 {
+ nel = append(nel, err)
+ continue
+ }
+
+ err.Msg += ": " + string(buf[:nl])
+ nel = append(nel, err)
+ }
+
+ return nel
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/pinning.go b/vendor/github.com/cilium/ebpf/internal/pinning.go
new file mode 100644
index 000000000..c711353c3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/pinning.go
@@ -0,0 +1,77 @@
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+func Pin(currentPath, newPath string, fd *sys.FD) error {
+ const bpfFSType = 0xcafe4a11
+
+ if newPath == "" {
+ return errors.New("given pinning path cannot be empty")
+ }
+ if currentPath == newPath {
+ return nil
+ }
+
+ var statfs unix.Statfs_t
+ if err := unix.Statfs(filepath.Dir(newPath), &statfs); err != nil {
+ return err
+ }
+
+ fsType := int64(statfs.Type)
+ if unsafe.Sizeof(statfs.Type) == 4 {
+ // We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a
+ // negative number when interpreted as int32 so we need to cast via
+ // uint32 to avoid sign extension.
+ fsType = int64(uint32(statfs.Type))
+ }
+
+ if fsType != bpfFSType {
+ return fmt.Errorf("%s is not on a bpf filesystem", newPath)
+ }
+
+ defer runtime.KeepAlive(fd)
+
+ if currentPath == "" {
+ return sys.ObjPin(&sys.ObjPinAttr{
+ Pathname: sys.NewStringPointer(newPath),
+ BpfFd: fd.Uint(),
+ })
+ }
+
+ // Renameat2 is used instead of os.Rename to disallow the new path replacing
+ // an existing path.
+ err := unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE)
+ if err == nil {
+ // Object is now moved to the new pinning path.
+ return nil
+ }
+ if !os.IsNotExist(err) {
+ return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err)
+ }
+ // Internal state not in sync with the file system so let's fix it.
+ return sys.ObjPin(&sys.ObjPinAttr{
+ Pathname: sys.NewStringPointer(newPath),
+ BpfFd: fd.Uint(),
+ })
+}
+
+func Unpin(pinnedPath string) error {
+ if pinnedPath == "" {
+ return nil
+ }
+ err := os.Remove(pinnedPath)
+ if err == nil || os.IsNotExist(err) {
+ return nil
+ }
+ return err
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/ptr_64.go
deleted file mode 100644
index 69452dceb..000000000
--- a/vendor/github.com/cilium/ebpf/internal/ptr_64.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le
-// +build !armbe,!mips,!mips64p32
-
-package internal
-
-import (
- "unsafe"
-)
-
-// Pointer wraps an unsafe.Pointer to be 64bit to
-// conform to the syscall specification.
-type Pointer struct {
- ptr unsafe.Pointer
-}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/doc.go b/vendor/github.com/cilium/ebpf/internal/sys/doc.go
new file mode 100644
index 000000000..dfe174448
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/doc.go
@@ -0,0 +1,6 @@
+// Package sys contains bindings for the BPF syscall.
+package sys
+
+// Regenerate types.go by invoking go generate in the current directory.
+
+//go:generate go run github.com/cilium/ebpf/internal/cmd/gentypes ../../btf/testdata/vmlinux.btf.gz
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/vendor/github.com/cilium/ebpf/internal/sys/fd.go
new file mode 100644
index 000000000..65517d45e
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/fd.go
@@ -0,0 +1,96 @@
+package sys
+
+import (
+ "fmt"
+ "math"
+ "os"
+ "runtime"
+ "strconv"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var ErrClosedFd = unix.EBADF
+
+type FD struct {
+ raw int
+}
+
+func newFD(value int) *FD {
+ fd := &FD{value}
+ runtime.SetFinalizer(fd, (*FD).Close)
+ return fd
+}
+
+// NewFD wraps a raw fd with a finalizer.
+//
+// You must not use the raw fd after calling this function, since the underlying
+// file descriptor number may change. This is because the BPF UAPI assumes that
+// zero is not a valid fd value.
+func NewFD(value int) (*FD, error) {
+ if value < 0 {
+ return nil, fmt.Errorf("invalid fd %d", value)
+ }
+
+ fd := newFD(value)
+ if value != 0 {
+ return fd, nil
+ }
+
+ dup, err := fd.Dup()
+ _ = fd.Close()
+ return dup, err
+}
+
+func (fd *FD) String() string {
+ return strconv.FormatInt(int64(fd.raw), 10)
+}
+
+func (fd *FD) Int() int {
+ return fd.raw
+}
+
+func (fd *FD) Uint() uint32 {
+ if fd.raw < 0 || int64(fd.raw) > math.MaxUint32 {
+ // Best effort: this is the number most likely to be an invalid file
+ // descriptor. It is equal to -1 (on two's complement arches).
+ return math.MaxUint32
+ }
+ return uint32(fd.raw)
+}
+
+func (fd *FD) Close() error {
+ if fd.raw < 0 {
+ return nil
+ }
+
+ value := int(fd.raw)
+ fd.raw = -1
+
+ fd.Forget()
+ return unix.Close(value)
+}
+
+func (fd *FD) Forget() {
+ runtime.SetFinalizer(fd, nil)
+}
+
+func (fd *FD) Dup() (*FD, error) {
+ if fd.raw < 0 {
+ return nil, ErrClosedFd
+ }
+
+ // Always require the fd to be larger than zero: the BPF API treats the value
+ // as "no argument provided".
+ dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1)
+ if err != nil {
+ return nil, fmt.Errorf("can't dup fd: %v", err)
+ }
+
+ return newFD(dup), nil
+}
+
+func (fd *FD) File(name string) *os.File {
+ fd.Forget()
+ return os.NewFile(uintptr(fd.raw), name)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/ptr.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go
index a7f12b2db..a22100688 100644
--- a/vendor/github.com/cilium/ebpf/internal/ptr.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go
@@ -1,6 +1,10 @@
-package internal
+package sys
-import "unsafe"
+import (
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
// NewPointer creates a 64-bit pointer from an unsafe Pointer.
func NewPointer(ptr unsafe.Pointer) Pointer {
@@ -16,15 +20,19 @@ func NewSlicePointer(buf []byte) Pointer {
return Pointer{ptr: unsafe.Pointer(&buf[0])}
}
+// NewSlicePointer creates a 64-bit pointer from a byte slice.
+//
+// Useful to assign both the pointer and the length in one go.
+func NewSlicePointerLen(buf []byte) (Pointer, uint32) {
+ return NewSlicePointer(buf), uint32(len(buf))
+}
+
// NewStringPointer creates a 64-bit pointer from a string.
func NewStringPointer(str string) Pointer {
- if str == "" {
+ p, err := unix.BytePtrFromString(str)
+ if err != nil {
return Pointer{}
}
- // The kernel expects strings to be zero terminated
- buf := make([]byte, len(str)+1)
- copy(buf, str)
-
- return Pointer{ptr: unsafe.Pointer(&buf[0])}
+ return Pointer{ptr: unsafe.Pointer(p)}
}
diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go
index a56fbcc8e..df903d780 100644
--- a/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go
@@ -1,6 +1,7 @@
+//go:build armbe || mips || mips64p32
// +build armbe mips mips64p32
-package internal
+package sys
import (
"unsafe"
diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go
index be2ecfca7..a6a51edb6 100644
--- a/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go
@@ -1,6 +1,7 @@
+//go:build 386 || amd64p32 || arm || mipsle || mips64p32le
// +build 386 amd64p32 arm mipsle mips64p32le
-package internal
+package sys
import (
"unsafe"
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
new file mode 100644
index 000000000..7c0279e48
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
@@ -0,0 +1,14 @@
+//go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32
+// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le,!armbe,!mips,!mips64p32
+
+package sys
+
+import (
+ "unsafe"
+)
+
+// Pointer wraps an unsafe.Pointer to be 64bit to
+// conform to the syscall specification.
+type Pointer struct {
+ ptr unsafe.Pointer
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
new file mode 100644
index 000000000..2a5935dc9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
@@ -0,0 +1,126 @@
+package sys
+
+import (
+ "runtime"
+ "syscall"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// BPF wraps SYS_BPF.
+//
+// Any pointers contained in attr must use the Pointer type from this package.
+func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
+ for {
+ r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
+ runtime.KeepAlive(attr)
+
+ // As of ~4.20 the verifier can be interrupted by a signal,
+ // and returns EAGAIN in that case.
+ if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD {
+ continue
+ }
+
+ var err error
+ if errNo != 0 {
+ err = wrappedErrno{errNo}
+ }
+
+ return r1, err
+ }
+}
+
+// Info is implemented by all structs that can be passed to the ObjInfo syscall.
+//
+// MapInfo
+// ProgInfo
+// LinkInfo
+// BtfInfo
+type Info interface {
+ info() (unsafe.Pointer, uint32)
+}
+
+var _ Info = (*MapInfo)(nil)
+
+func (i *MapInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+var _ Info = (*ProgInfo)(nil)
+
+func (i *ProgInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+var _ Info = (*LinkInfo)(nil)
+
+func (i *LinkInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+var _ Info = (*BtfInfo)(nil)
+
+func (i *BtfInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+// ObjInfo retrieves information about a BPF Fd.
+//
+// info may be one of MapInfo, ProgInfo, LinkInfo and BtfInfo.
+func ObjInfo(fd *FD, info Info) error {
+ ptr, len := info.info()
+ err := ObjGetInfoByFd(&ObjGetInfoByFdAttr{
+ BpfFd: fd.Uint(),
+ InfoLen: len,
+ Info: NewPointer(ptr),
+ })
+ runtime.KeepAlive(fd)
+ return err
+}
+
+// BPFObjName is a null-terminated string made up of
+// 'A-Za-z0-9_' characters.
+type ObjName [unix.BPF_OBJ_NAME_LEN]byte
+
+// NewObjName truncates the result if it is too long.
+func NewObjName(name string) ObjName {
+ var result ObjName
+ copy(result[:unix.BPF_OBJ_NAME_LEN-1], name)
+ return result
+}
+
+// LinkID uniquely identifies a bpf_link.
+type LinkID uint32
+
+// BTFID uniquely identifies a BTF blob loaded into the kernel.
+type BTFID uint32
+
+// wrappedErrno wraps syscall.Errno to prevent direct comparisons with
+// syscall.E* or unix.E* constants.
+//
+// You should never export an error of this type.
+type wrappedErrno struct {
+ syscall.Errno
+}
+
+func (we wrappedErrno) Unwrap() error {
+ return we.Errno
+}
+
+type syscallError struct {
+ error
+ errno syscall.Errno
+}
+
+func Error(err error, errno syscall.Errno) error {
+ return &syscallError{err, errno}
+}
+
+func (se *syscallError) Is(target error) bool {
+ return target == se.error
+}
+
+func (se *syscallError) Unwrap() error {
+ return se.errno
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go
new file mode 100644
index 000000000..291e3a619
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go
@@ -0,0 +1,1052 @@
+// Code generated by internal/cmd/gentypes; DO NOT EDIT.
+
+package sys
+
+import (
+ "unsafe"
+)
+
+type AdjRoomMode int32
+
+const (
+ BPF_ADJ_ROOM_NET AdjRoomMode = 0
+ BPF_ADJ_ROOM_MAC AdjRoomMode = 1
+)
+
+type AttachType int32
+
+const (
+ BPF_CGROUP_INET_INGRESS AttachType = 0
+ BPF_CGROUP_INET_EGRESS AttachType = 1
+ BPF_CGROUP_INET_SOCK_CREATE AttachType = 2
+ BPF_CGROUP_SOCK_OPS AttachType = 3
+ BPF_SK_SKB_STREAM_PARSER AttachType = 4
+ BPF_SK_SKB_STREAM_VERDICT AttachType = 5
+ BPF_CGROUP_DEVICE AttachType = 6
+ BPF_SK_MSG_VERDICT AttachType = 7
+ BPF_CGROUP_INET4_BIND AttachType = 8
+ BPF_CGROUP_INET6_BIND AttachType = 9
+ BPF_CGROUP_INET4_CONNECT AttachType = 10
+ BPF_CGROUP_INET6_CONNECT AttachType = 11
+ BPF_CGROUP_INET4_POST_BIND AttachType = 12
+ BPF_CGROUP_INET6_POST_BIND AttachType = 13
+ BPF_CGROUP_UDP4_SENDMSG AttachType = 14
+ BPF_CGROUP_UDP6_SENDMSG AttachType = 15
+ BPF_LIRC_MODE2 AttachType = 16
+ BPF_FLOW_DISSECTOR AttachType = 17
+ BPF_CGROUP_SYSCTL AttachType = 18
+ BPF_CGROUP_UDP4_RECVMSG AttachType = 19
+ BPF_CGROUP_UDP6_RECVMSG AttachType = 20
+ BPF_CGROUP_GETSOCKOPT AttachType = 21
+ BPF_CGROUP_SETSOCKOPT AttachType = 22
+ BPF_TRACE_RAW_TP AttachType = 23
+ BPF_TRACE_FENTRY AttachType = 24
+ BPF_TRACE_FEXIT AttachType = 25
+ BPF_MODIFY_RETURN AttachType = 26
+ BPF_LSM_MAC AttachType = 27
+ BPF_TRACE_ITER AttachType = 28
+ BPF_CGROUP_INET4_GETPEERNAME AttachType = 29
+ BPF_CGROUP_INET6_GETPEERNAME AttachType = 30
+ BPF_CGROUP_INET4_GETSOCKNAME AttachType = 31
+ BPF_CGROUP_INET6_GETSOCKNAME AttachType = 32
+ BPF_XDP_DEVMAP AttachType = 33
+ BPF_CGROUP_INET_SOCK_RELEASE AttachType = 34
+ BPF_XDP_CPUMAP AttachType = 35
+ BPF_SK_LOOKUP AttachType = 36
+ BPF_XDP AttachType = 37
+ BPF_SK_SKB_VERDICT AttachType = 38
+ BPF_SK_REUSEPORT_SELECT AttachType = 39
+ BPF_SK_REUSEPORT_SELECT_OR_MIGRATE AttachType = 40
+ BPF_PERF_EVENT AttachType = 41
+ BPF_TRACE_KPROBE_MULTI AttachType = 42
+ __MAX_BPF_ATTACH_TYPE AttachType = 43
+)
+
+type Cmd int32
+
+const (
+ BPF_MAP_CREATE Cmd = 0
+ BPF_MAP_LOOKUP_ELEM Cmd = 1
+ BPF_MAP_UPDATE_ELEM Cmd = 2
+ BPF_MAP_DELETE_ELEM Cmd = 3
+ BPF_MAP_GET_NEXT_KEY Cmd = 4
+ BPF_PROG_LOAD Cmd = 5
+ BPF_OBJ_PIN Cmd = 6
+ BPF_OBJ_GET Cmd = 7
+ BPF_PROG_ATTACH Cmd = 8
+ BPF_PROG_DETACH Cmd = 9
+ BPF_PROG_TEST_RUN Cmd = 10
+ BPF_PROG_RUN Cmd = 10
+ BPF_PROG_GET_NEXT_ID Cmd = 11
+ BPF_MAP_GET_NEXT_ID Cmd = 12
+ BPF_PROG_GET_FD_BY_ID Cmd = 13
+ BPF_MAP_GET_FD_BY_ID Cmd = 14
+ BPF_OBJ_GET_INFO_BY_FD Cmd = 15
+ BPF_PROG_QUERY Cmd = 16
+ BPF_RAW_TRACEPOINT_OPEN Cmd = 17
+ BPF_BTF_LOAD Cmd = 18
+ BPF_BTF_GET_FD_BY_ID Cmd = 19
+ BPF_TASK_FD_QUERY Cmd = 20
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM Cmd = 21
+ BPF_MAP_FREEZE Cmd = 22
+ BPF_BTF_GET_NEXT_ID Cmd = 23
+ BPF_MAP_LOOKUP_BATCH Cmd = 24
+ BPF_MAP_LOOKUP_AND_DELETE_BATCH Cmd = 25
+ BPF_MAP_UPDATE_BATCH Cmd = 26
+ BPF_MAP_DELETE_BATCH Cmd = 27
+ BPF_LINK_CREATE Cmd = 28
+ BPF_LINK_UPDATE Cmd = 29
+ BPF_LINK_GET_FD_BY_ID Cmd = 30
+ BPF_LINK_GET_NEXT_ID Cmd = 31
+ BPF_ENABLE_STATS Cmd = 32
+ BPF_ITER_CREATE Cmd = 33
+ BPF_LINK_DETACH Cmd = 34
+ BPF_PROG_BIND_MAP Cmd = 35
+)
+
+type FunctionId int32
+
+const (
+ BPF_FUNC_unspec FunctionId = 0
+ BPF_FUNC_map_lookup_elem FunctionId = 1
+ BPF_FUNC_map_update_elem FunctionId = 2
+ BPF_FUNC_map_delete_elem FunctionId = 3
+ BPF_FUNC_probe_read FunctionId = 4
+ BPF_FUNC_ktime_get_ns FunctionId = 5
+ BPF_FUNC_trace_printk FunctionId = 6
+ BPF_FUNC_get_prandom_u32 FunctionId = 7
+ BPF_FUNC_get_smp_processor_id FunctionId = 8
+ BPF_FUNC_skb_store_bytes FunctionId = 9
+ BPF_FUNC_l3_csum_replace FunctionId = 10
+ BPF_FUNC_l4_csum_replace FunctionId = 11
+ BPF_FUNC_tail_call FunctionId = 12
+ BPF_FUNC_clone_redirect FunctionId = 13
+ BPF_FUNC_get_current_pid_tgid FunctionId = 14
+ BPF_FUNC_get_current_uid_gid FunctionId = 15
+ BPF_FUNC_get_current_comm FunctionId = 16
+ BPF_FUNC_get_cgroup_classid FunctionId = 17
+ BPF_FUNC_skb_vlan_push FunctionId = 18
+ BPF_FUNC_skb_vlan_pop FunctionId = 19
+ BPF_FUNC_skb_get_tunnel_key FunctionId = 20
+ BPF_FUNC_skb_set_tunnel_key FunctionId = 21
+ BPF_FUNC_perf_event_read FunctionId = 22
+ BPF_FUNC_redirect FunctionId = 23
+ BPF_FUNC_get_route_realm FunctionId = 24
+ BPF_FUNC_perf_event_output FunctionId = 25
+ BPF_FUNC_skb_load_bytes FunctionId = 26
+ BPF_FUNC_get_stackid FunctionId = 27
+ BPF_FUNC_csum_diff FunctionId = 28
+ BPF_FUNC_skb_get_tunnel_opt FunctionId = 29
+ BPF_FUNC_skb_set_tunnel_opt FunctionId = 30
+ BPF_FUNC_skb_change_proto FunctionId = 31
+ BPF_FUNC_skb_change_type FunctionId = 32
+ BPF_FUNC_skb_under_cgroup FunctionId = 33
+ BPF_FUNC_get_hash_recalc FunctionId = 34
+ BPF_FUNC_get_current_task FunctionId = 35
+ BPF_FUNC_probe_write_user FunctionId = 36
+ BPF_FUNC_current_task_under_cgroup FunctionId = 37
+ BPF_FUNC_skb_change_tail FunctionId = 38
+ BPF_FUNC_skb_pull_data FunctionId = 39
+ BPF_FUNC_csum_update FunctionId = 40
+ BPF_FUNC_set_hash_invalid FunctionId = 41
+ BPF_FUNC_get_numa_node_id FunctionId = 42
+ BPF_FUNC_skb_change_head FunctionId = 43
+ BPF_FUNC_xdp_adjust_head FunctionId = 44
+ BPF_FUNC_probe_read_str FunctionId = 45
+ BPF_FUNC_get_socket_cookie FunctionId = 46
+ BPF_FUNC_get_socket_uid FunctionId = 47
+ BPF_FUNC_set_hash FunctionId = 48
+ BPF_FUNC_setsockopt FunctionId = 49
+ BPF_FUNC_skb_adjust_room FunctionId = 50
+ BPF_FUNC_redirect_map FunctionId = 51
+ BPF_FUNC_sk_redirect_map FunctionId = 52
+ BPF_FUNC_sock_map_update FunctionId = 53
+ BPF_FUNC_xdp_adjust_meta FunctionId = 54
+ BPF_FUNC_perf_event_read_value FunctionId = 55
+ BPF_FUNC_perf_prog_read_value FunctionId = 56
+ BPF_FUNC_getsockopt FunctionId = 57
+ BPF_FUNC_override_return FunctionId = 58
+ BPF_FUNC_sock_ops_cb_flags_set FunctionId = 59
+ BPF_FUNC_msg_redirect_map FunctionId = 60
+ BPF_FUNC_msg_apply_bytes FunctionId = 61
+ BPF_FUNC_msg_cork_bytes FunctionId = 62
+ BPF_FUNC_msg_pull_data FunctionId = 63
+ BPF_FUNC_bind FunctionId = 64
+ BPF_FUNC_xdp_adjust_tail FunctionId = 65
+ BPF_FUNC_skb_get_xfrm_state FunctionId = 66
+ BPF_FUNC_get_stack FunctionId = 67
+ BPF_FUNC_skb_load_bytes_relative FunctionId = 68
+ BPF_FUNC_fib_lookup FunctionId = 69
+ BPF_FUNC_sock_hash_update FunctionId = 70
+ BPF_FUNC_msg_redirect_hash FunctionId = 71
+ BPF_FUNC_sk_redirect_hash FunctionId = 72
+ BPF_FUNC_lwt_push_encap FunctionId = 73
+ BPF_FUNC_lwt_seg6_store_bytes FunctionId = 74
+ BPF_FUNC_lwt_seg6_adjust_srh FunctionId = 75
+ BPF_FUNC_lwt_seg6_action FunctionId = 76
+ BPF_FUNC_rc_repeat FunctionId = 77
+ BPF_FUNC_rc_keydown FunctionId = 78
+ BPF_FUNC_skb_cgroup_id FunctionId = 79
+ BPF_FUNC_get_current_cgroup_id FunctionId = 80
+ BPF_FUNC_get_local_storage FunctionId = 81
+ BPF_FUNC_sk_select_reuseport FunctionId = 82
+ BPF_FUNC_skb_ancestor_cgroup_id FunctionId = 83
+ BPF_FUNC_sk_lookup_tcp FunctionId = 84
+ BPF_FUNC_sk_lookup_udp FunctionId = 85
+ BPF_FUNC_sk_release FunctionId = 86
+ BPF_FUNC_map_push_elem FunctionId = 87
+ BPF_FUNC_map_pop_elem FunctionId = 88
+ BPF_FUNC_map_peek_elem FunctionId = 89
+ BPF_FUNC_msg_push_data FunctionId = 90
+ BPF_FUNC_msg_pop_data FunctionId = 91
+ BPF_FUNC_rc_pointer_rel FunctionId = 92
+ BPF_FUNC_spin_lock FunctionId = 93
+ BPF_FUNC_spin_unlock FunctionId = 94
+ BPF_FUNC_sk_fullsock FunctionId = 95
+ BPF_FUNC_tcp_sock FunctionId = 96
+ BPF_FUNC_skb_ecn_set_ce FunctionId = 97
+ BPF_FUNC_get_listener_sock FunctionId = 98
+ BPF_FUNC_skc_lookup_tcp FunctionId = 99
+ BPF_FUNC_tcp_check_syncookie FunctionId = 100
+ BPF_FUNC_sysctl_get_name FunctionId = 101
+ BPF_FUNC_sysctl_get_current_value FunctionId = 102
+ BPF_FUNC_sysctl_get_new_value FunctionId = 103
+ BPF_FUNC_sysctl_set_new_value FunctionId = 104
+ BPF_FUNC_strtol FunctionId = 105
+ BPF_FUNC_strtoul FunctionId = 106
+ BPF_FUNC_sk_storage_get FunctionId = 107
+ BPF_FUNC_sk_storage_delete FunctionId = 108
+ BPF_FUNC_send_signal FunctionId = 109
+ BPF_FUNC_tcp_gen_syncookie FunctionId = 110
+ BPF_FUNC_skb_output FunctionId = 111
+ BPF_FUNC_probe_read_user FunctionId = 112
+ BPF_FUNC_probe_read_kernel FunctionId = 113
+ BPF_FUNC_probe_read_user_str FunctionId = 114
+ BPF_FUNC_probe_read_kernel_str FunctionId = 115
+ BPF_FUNC_tcp_send_ack FunctionId = 116
+ BPF_FUNC_send_signal_thread FunctionId = 117
+ BPF_FUNC_jiffies64 FunctionId = 118
+ BPF_FUNC_read_branch_records FunctionId = 119
+ BPF_FUNC_get_ns_current_pid_tgid FunctionId = 120
+ BPF_FUNC_xdp_output FunctionId = 121
+ BPF_FUNC_get_netns_cookie FunctionId = 122
+ BPF_FUNC_get_current_ancestor_cgroup_id FunctionId = 123
+ BPF_FUNC_sk_assign FunctionId = 124
+ BPF_FUNC_ktime_get_boot_ns FunctionId = 125
+ BPF_FUNC_seq_printf FunctionId = 126
+ BPF_FUNC_seq_write FunctionId = 127
+ BPF_FUNC_sk_cgroup_id FunctionId = 128
+ BPF_FUNC_sk_ancestor_cgroup_id FunctionId = 129
+ BPF_FUNC_ringbuf_output FunctionId = 130
+ BPF_FUNC_ringbuf_reserve FunctionId = 131
+ BPF_FUNC_ringbuf_submit FunctionId = 132
+ BPF_FUNC_ringbuf_discard FunctionId = 133
+ BPF_FUNC_ringbuf_query FunctionId = 134
+ BPF_FUNC_csum_level FunctionId = 135
+ BPF_FUNC_skc_to_tcp6_sock FunctionId = 136
+ BPF_FUNC_skc_to_tcp_sock FunctionId = 137
+ BPF_FUNC_skc_to_tcp_timewait_sock FunctionId = 138
+ BPF_FUNC_skc_to_tcp_request_sock FunctionId = 139
+ BPF_FUNC_skc_to_udp6_sock FunctionId = 140
+ BPF_FUNC_get_task_stack FunctionId = 141
+ BPF_FUNC_load_hdr_opt FunctionId = 142
+ BPF_FUNC_store_hdr_opt FunctionId = 143
+ BPF_FUNC_reserve_hdr_opt FunctionId = 144
+ BPF_FUNC_inode_storage_get FunctionId = 145
+ BPF_FUNC_inode_storage_delete FunctionId = 146
+ BPF_FUNC_d_path FunctionId = 147
+ BPF_FUNC_copy_from_user FunctionId = 148
+ BPF_FUNC_snprintf_btf FunctionId = 149
+ BPF_FUNC_seq_printf_btf FunctionId = 150
+ BPF_FUNC_skb_cgroup_classid FunctionId = 151
+ BPF_FUNC_redirect_neigh FunctionId = 152
+ BPF_FUNC_per_cpu_ptr FunctionId = 153
+ BPF_FUNC_this_cpu_ptr FunctionId = 154
+ BPF_FUNC_redirect_peer FunctionId = 155
+ BPF_FUNC_task_storage_get FunctionId = 156
+ BPF_FUNC_task_storage_delete FunctionId = 157
+ BPF_FUNC_get_current_task_btf FunctionId = 158
+ BPF_FUNC_bprm_opts_set FunctionId = 159
+ BPF_FUNC_ktime_get_coarse_ns FunctionId = 160
+ BPF_FUNC_ima_inode_hash FunctionId = 161
+ BPF_FUNC_sock_from_file FunctionId = 162
+ BPF_FUNC_check_mtu FunctionId = 163
+ BPF_FUNC_for_each_map_elem FunctionId = 164
+ BPF_FUNC_snprintf FunctionId = 165
+ BPF_FUNC_sys_bpf FunctionId = 166
+ BPF_FUNC_btf_find_by_name_kind FunctionId = 167
+ BPF_FUNC_sys_close FunctionId = 168
+ BPF_FUNC_timer_init FunctionId = 169
+ BPF_FUNC_timer_set_callback FunctionId = 170
+ BPF_FUNC_timer_start FunctionId = 171
+ BPF_FUNC_timer_cancel FunctionId = 172
+ BPF_FUNC_get_func_ip FunctionId = 173
+ BPF_FUNC_get_attach_cookie FunctionId = 174
+ BPF_FUNC_task_pt_regs FunctionId = 175
+ BPF_FUNC_get_branch_snapshot FunctionId = 176
+ BPF_FUNC_trace_vprintk FunctionId = 177
+ BPF_FUNC_skc_to_unix_sock FunctionId = 178
+ BPF_FUNC_kallsyms_lookup_name FunctionId = 179
+ BPF_FUNC_find_vma FunctionId = 180
+ BPF_FUNC_loop FunctionId = 181
+ BPF_FUNC_strncmp FunctionId = 182
+ BPF_FUNC_get_func_arg FunctionId = 183
+ BPF_FUNC_get_func_ret FunctionId = 184
+ BPF_FUNC_get_func_arg_cnt FunctionId = 185
+ BPF_FUNC_get_retval FunctionId = 186
+ BPF_FUNC_set_retval FunctionId = 187
+ BPF_FUNC_xdp_get_buff_len FunctionId = 188
+ BPF_FUNC_xdp_load_bytes FunctionId = 189
+ BPF_FUNC_xdp_store_bytes FunctionId = 190
+ BPF_FUNC_copy_from_user_task FunctionId = 191
+ BPF_FUNC_skb_set_tstamp FunctionId = 192
+ BPF_FUNC_ima_file_hash FunctionId = 193
+ __BPF_FUNC_MAX_ID FunctionId = 194
+)
+
+type HdrStartOff int32
+
+const (
+ BPF_HDR_START_MAC HdrStartOff = 0
+ BPF_HDR_START_NET HdrStartOff = 1
+)
+
+type LinkType int32
+
+const (
+ BPF_LINK_TYPE_UNSPEC LinkType = 0
+ BPF_LINK_TYPE_RAW_TRACEPOINT LinkType = 1
+ BPF_LINK_TYPE_TRACING LinkType = 2
+ BPF_LINK_TYPE_CGROUP LinkType = 3
+ BPF_LINK_TYPE_ITER LinkType = 4
+ BPF_LINK_TYPE_NETNS LinkType = 5
+ BPF_LINK_TYPE_XDP LinkType = 6
+ BPF_LINK_TYPE_PERF_EVENT LinkType = 7
+ BPF_LINK_TYPE_KPROBE_MULTI LinkType = 8
+ MAX_BPF_LINK_TYPE LinkType = 9
+)
+
+type MapType int32
+
+const (
+ BPF_MAP_TYPE_UNSPEC MapType = 0
+ BPF_MAP_TYPE_HASH MapType = 1
+ BPF_MAP_TYPE_ARRAY MapType = 2
+ BPF_MAP_TYPE_PROG_ARRAY MapType = 3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4
+ BPF_MAP_TYPE_PERCPU_HASH MapType = 5
+ BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6
+ BPF_MAP_TYPE_STACK_TRACE MapType = 7
+ BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8
+ BPF_MAP_TYPE_LRU_HASH MapType = 9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10
+ BPF_MAP_TYPE_LPM_TRIE MapType = 11
+ BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12
+ BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13
+ BPF_MAP_TYPE_DEVMAP MapType = 14
+ BPF_MAP_TYPE_SOCKMAP MapType = 15
+ BPF_MAP_TYPE_CPUMAP MapType = 16
+ BPF_MAP_TYPE_XSKMAP MapType = 17
+ BPF_MAP_TYPE_SOCKHASH MapType = 18
+ BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21
+ BPF_MAP_TYPE_QUEUE MapType = 22
+ BPF_MAP_TYPE_STACK MapType = 23
+ BPF_MAP_TYPE_SK_STORAGE MapType = 24
+ BPF_MAP_TYPE_DEVMAP_HASH MapType = 25
+ BPF_MAP_TYPE_STRUCT_OPS MapType = 26
+ BPF_MAP_TYPE_RINGBUF MapType = 27
+ BPF_MAP_TYPE_INODE_STORAGE MapType = 28
+ BPF_MAP_TYPE_TASK_STORAGE MapType = 29
+ BPF_MAP_TYPE_BLOOM_FILTER MapType = 30
+)
+
+type ProgType int32
+
+const (
+ BPF_PROG_TYPE_UNSPEC ProgType = 0
+ BPF_PROG_TYPE_SOCKET_FILTER ProgType = 1
+ BPF_PROG_TYPE_KPROBE ProgType = 2
+ BPF_PROG_TYPE_SCHED_CLS ProgType = 3
+ BPF_PROG_TYPE_SCHED_ACT ProgType = 4
+ BPF_PROG_TYPE_TRACEPOINT ProgType = 5
+ BPF_PROG_TYPE_XDP ProgType = 6
+ BPF_PROG_TYPE_PERF_EVENT ProgType = 7
+ BPF_PROG_TYPE_CGROUP_SKB ProgType = 8
+ BPF_PROG_TYPE_CGROUP_SOCK ProgType = 9
+ BPF_PROG_TYPE_LWT_IN ProgType = 10
+ BPF_PROG_TYPE_LWT_OUT ProgType = 11
+ BPF_PROG_TYPE_LWT_XMIT ProgType = 12
+ BPF_PROG_TYPE_SOCK_OPS ProgType = 13
+ BPF_PROG_TYPE_SK_SKB ProgType = 14
+ BPF_PROG_TYPE_CGROUP_DEVICE ProgType = 15
+ BPF_PROG_TYPE_SK_MSG ProgType = 16
+ BPF_PROG_TYPE_RAW_TRACEPOINT ProgType = 17
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR ProgType = 18
+ BPF_PROG_TYPE_LWT_SEG6LOCAL ProgType = 19
+ BPF_PROG_TYPE_LIRC_MODE2 ProgType = 20
+ BPF_PROG_TYPE_SK_REUSEPORT ProgType = 21
+ BPF_PROG_TYPE_FLOW_DISSECTOR ProgType = 22
+ BPF_PROG_TYPE_CGROUP_SYSCTL ProgType = 23
+ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE ProgType = 24
+ BPF_PROG_TYPE_CGROUP_SOCKOPT ProgType = 25
+ BPF_PROG_TYPE_TRACING ProgType = 26
+ BPF_PROG_TYPE_STRUCT_OPS ProgType = 27
+ BPF_PROG_TYPE_EXT ProgType = 28
+ BPF_PROG_TYPE_LSM ProgType = 29
+ BPF_PROG_TYPE_SK_LOOKUP ProgType = 30
+ BPF_PROG_TYPE_SYSCALL ProgType = 31
+)
+
+type RetCode int32
+
+const (
+ BPF_OK RetCode = 0
+ BPF_DROP RetCode = 2
+ BPF_REDIRECT RetCode = 7
+ BPF_LWT_REROUTE RetCode = 128
+)
+
+type SkAction int32
+
+const (
+ SK_DROP SkAction = 0
+ SK_PASS SkAction = 1
+)
+
+type StackBuildIdStatus int32
+
+const (
+ BPF_STACK_BUILD_ID_EMPTY StackBuildIdStatus = 0
+ BPF_STACK_BUILD_ID_VALID StackBuildIdStatus = 1
+ BPF_STACK_BUILD_ID_IP StackBuildIdStatus = 2
+)
+
+type StatsType int32
+
+const (
+ BPF_STATS_RUN_TIME StatsType = 0
+)
+
+type XdpAction int32
+
+const (
+ XDP_ABORTED XdpAction = 0
+ XDP_DROP XdpAction = 1
+ XDP_PASS XdpAction = 2
+ XDP_TX XdpAction = 3
+ XDP_REDIRECT XdpAction = 4
+)
+
+type BtfInfo struct {
+ Btf Pointer
+ BtfSize uint32
+ Id BTFID
+ Name Pointer
+ NameLen uint32
+ KernelBtf uint32
+}
+
+type FuncInfo struct {
+ InsnOff uint32
+ TypeId uint32
+}
+
+type LineInfo struct {
+ InsnOff uint32
+ FileNameOff uint32
+ LineOff uint32
+ LineCol uint32
+}
+
+type LinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ Extra [16]uint8
+}
+
+type MapInfo struct {
+ Type uint32
+ Id uint32
+ KeySize uint32
+ ValueSize uint32
+ MaxEntries uint32
+ MapFlags uint32
+ Name ObjName
+ Ifindex uint32
+ BtfVmlinuxValueTypeId uint32
+ NetnsDev uint64
+ NetnsIno uint64
+ BtfId uint32
+ BtfKeyTypeId uint32
+ BtfValueTypeId uint32
+ _ [4]byte
+ MapExtra uint64
+}
+
+type ProgInfo struct {
+ Type uint32
+ Id uint32
+ Tag [8]uint8
+ JitedProgLen uint32
+ XlatedProgLen uint32
+ JitedProgInsns uint64
+ XlatedProgInsns Pointer
+ LoadTime uint64
+ CreatedByUid uint32
+ NrMapIds uint32
+ MapIds Pointer
+ Name ObjName
+ Ifindex uint32
+ _ [4]byte /* unsupported bitfield */
+ NetnsDev uint64
+ NetnsIno uint64
+ NrJitedKsyms uint32
+ NrJitedFuncLens uint32
+ JitedKsyms uint64
+ JitedFuncLens uint64
+ BtfId uint32
+ FuncInfoRecSize uint32
+ FuncInfo uint64
+ NrFuncInfo uint32
+ NrLineInfo uint32
+ LineInfo uint64
+ JitedLineInfo uint64
+ NrJitedLineInfo uint32
+ LineInfoRecSize uint32
+ JitedLineInfoRecSize uint32
+ NrProgTags uint32
+ ProgTags uint64
+ RunTimeNs uint64
+ RunCnt uint64
+ RecursionMisses uint64
+ VerifiedInsns uint32
+ _ [4]byte
+}
+
+type SkLookup struct {
+ Cookie uint64
+ Family uint32
+ Protocol uint32
+ RemoteIp4 [4]uint8
+ RemoteIp6 [16]uint8
+ RemotePort uint16
+ _ [2]byte
+ LocalIp4 [4]uint8
+ LocalIp6 [16]uint8
+ LocalPort uint32
+ IngressIfindex uint32
+ _ [4]byte
+}
+
+type XdpMd struct {
+ Data uint32
+ DataEnd uint32
+ DataMeta uint32
+ IngressIfindex uint32
+ RxQueueIndex uint32
+ EgressIfindex uint32
+}
+
+type BtfGetFdByIdAttr struct{ Id uint32 }
+
+func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) {
+ fd, err := BPF(BPF_BTF_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type BtfGetNextIdAttr struct {
+ Id BTFID
+ NextId BTFID
+}
+
+func BtfGetNextId(attr *BtfGetNextIdAttr) error {
+ _, err := BPF(BPF_BTF_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type BtfLoadAttr struct {
+ Btf Pointer
+ BtfLogBuf Pointer
+ BtfSize uint32
+ BtfLogSize uint32
+ BtfLogLevel uint32
+ _ [4]byte
+}
+
+func BtfLoad(attr *BtfLoadAttr) (*FD, error) {
+ fd, err := BPF(BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type EnableStatsAttr struct{ Type uint32 }
+
+func EnableStats(attr *EnableStatsAttr) (*FD, error) {
+ fd, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type IterCreateAttr struct {
+ LinkFd uint32
+ Flags uint32
+}
+
+func IterCreate(attr *IterCreateAttr) (*FD, error) {
+ fd, err := BPF(BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ TargetBtfId uint32
+ _ [28]byte
+}
+
+func LinkCreate(attr *LinkCreateAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateIterAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ IterInfo Pointer
+ IterInfoLen uint32
+ _ [20]byte
+}
+
+func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreatePerfEventAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ BpfCookie uint64
+ _ [24]byte
+}
+
+func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkUpdateAttr struct {
+ LinkFd uint32
+ NewProgFd uint32
+ Flags uint32
+ OldProgFd uint32
+}
+
+func LinkUpdate(attr *LinkUpdateAttr) error {
+ _, err := BPF(BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapCreateAttr struct {
+ MapType MapType
+ KeySize uint32
+ ValueSize uint32
+ MaxEntries uint32
+ MapFlags uint32
+ InnerMapFd uint32
+ NumaNode uint32
+ MapName ObjName
+ MapIfindex uint32
+ BtfFd uint32
+ BtfKeyTypeId uint32
+ BtfValueTypeId uint32
+ BtfVmlinuxValueTypeId uint32
+ MapExtra uint64
+}
+
+func MapCreate(attr *MapCreateAttr) (*FD, error) {
+ fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type MapDeleteBatchAttr struct {
+ InBatch Pointer
+ OutBatch Pointer
+ Keys Pointer
+ Values Pointer
+ Count uint32
+ MapFd uint32
+ ElemFlags uint64
+ Flags uint64
+}
+
+func MapDeleteBatch(attr *MapDeleteBatchAttr) error {
+ _, err := BPF(BPF_MAP_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapDeleteElemAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ Value Pointer
+ Flags uint64
+}
+
+func MapDeleteElem(attr *MapDeleteElemAttr) error {
+ _, err := BPF(BPF_MAP_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapFreezeAttr struct{ MapFd uint32 }
+
+func MapFreeze(attr *MapFreezeAttr) error {
+ _, err := BPF(BPF_MAP_FREEZE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapGetFdByIdAttr struct{ Id uint32 }
+
+func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) {
+ fd, err := BPF(BPF_MAP_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type MapGetNextIdAttr struct {
+ Id uint32
+ NextId uint32
+}
+
+func MapGetNextId(attr *MapGetNextIdAttr) error {
+ _, err := BPF(BPF_MAP_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapGetNextKeyAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ NextKey Pointer
+}
+
+func MapGetNextKey(attr *MapGetNextKeyAttr) error {
+ _, err := BPF(BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapLookupAndDeleteBatchAttr struct {
+ InBatch Pointer
+ OutBatch Pointer
+ Keys Pointer
+ Values Pointer
+ Count uint32
+ MapFd uint32
+ ElemFlags uint64
+ Flags uint64
+}
+
+func MapLookupAndDeleteBatch(attr *MapLookupAndDeleteBatchAttr) error {
+ _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapLookupAndDeleteElemAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ Value Pointer
+ Flags uint64
+}
+
+func MapLookupAndDeleteElem(attr *MapLookupAndDeleteElemAttr) error {
+ _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapLookupBatchAttr struct {
+ InBatch Pointer
+ OutBatch Pointer
+ Keys Pointer
+ Values Pointer
+ Count uint32
+ MapFd uint32
+ ElemFlags uint64
+ Flags uint64
+}
+
+func MapLookupBatch(attr *MapLookupBatchAttr) error {
+ _, err := BPF(BPF_MAP_LOOKUP_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapLookupElemAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ Value Pointer
+ Flags uint64
+}
+
+func MapLookupElem(attr *MapLookupElemAttr) error {
+ _, err := BPF(BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapUpdateBatchAttr struct {
+ InBatch Pointer
+ OutBatch Pointer
+ Keys Pointer
+ Values Pointer
+ Count uint32
+ MapFd uint32
+ ElemFlags uint64
+ Flags uint64
+}
+
+func MapUpdateBatch(attr *MapUpdateBatchAttr) error {
+ _, err := BPF(BPF_MAP_UPDATE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapUpdateElemAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ Value Pointer
+ Flags uint64
+}
+
+func MapUpdateElem(attr *MapUpdateElemAttr) error {
+ _, err := BPF(BPF_MAP_UPDATE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ObjGetAttr struct {
+ Pathname Pointer
+ BpfFd uint32
+ FileFlags uint32
+}
+
+func ObjGet(attr *ObjGetAttr) (*FD, error) {
+ fd, err := BPF(BPF_OBJ_GET, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type ObjGetInfoByFdAttr struct {
+ BpfFd uint32
+ InfoLen uint32
+ Info Pointer
+}
+
+func ObjGetInfoByFd(attr *ObjGetInfoByFdAttr) error {
+ _, err := BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ObjPinAttr struct {
+ Pathname Pointer
+ BpfFd uint32
+ FileFlags uint32
+}
+
+func ObjPin(attr *ObjPinAttr) error {
+ _, err := BPF(BPF_OBJ_PIN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgAttachAttr struct {
+ TargetFd uint32
+ AttachBpfFd uint32
+ AttachType uint32
+ AttachFlags uint32
+ ReplaceBpfFd uint32
+}
+
+func ProgAttach(attr *ProgAttachAttr) error {
+ _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgBindMapAttr struct {
+ ProgFd uint32
+ MapFd uint32
+ Flags uint32
+}
+
+func ProgBindMap(attr *ProgBindMapAttr) error {
+ _, err := BPF(BPF_PROG_BIND_MAP, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgDetachAttr struct {
+ TargetFd uint32
+ AttachBpfFd uint32
+ AttachType uint32
+}
+
+func ProgDetach(attr *ProgDetachAttr) error {
+ _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgGetFdByIdAttr struct{ Id uint32 }
+
+func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) {
+ fd, err := BPF(BPF_PROG_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type ProgGetNextIdAttr struct {
+ Id uint32
+ NextId uint32
+}
+
+func ProgGetNextId(attr *ProgGetNextIdAttr) error {
+ _, err := BPF(BPF_PROG_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgLoadAttr struct {
+ ProgType ProgType
+ InsnCnt uint32
+ Insns Pointer
+ License Pointer
+ LogLevel uint32
+ LogSize uint32
+ LogBuf Pointer
+ KernVersion uint32
+ ProgFlags uint32
+ ProgName ObjName
+ ProgIfindex uint32
+ ExpectedAttachType AttachType
+ ProgBtfFd uint32
+ FuncInfoRecSize uint32
+ FuncInfo Pointer
+ FuncInfoCnt uint32
+ LineInfoRecSize uint32
+ LineInfo Pointer
+ LineInfoCnt uint32
+ AttachBtfId uint32
+ AttachProgFd uint32
+ CoreReloCnt uint32
+ FdArray Pointer
+ CoreRelos Pointer
+ CoreReloRecSize uint32
+ _ [4]byte
+}
+
+func ProgLoad(attr *ProgLoadAttr) (*FD, error) {
+ fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type ProgRunAttr struct {
+ ProgFd uint32
+ Retval uint32
+ DataSizeIn uint32
+ DataSizeOut uint32
+ DataIn Pointer
+ DataOut Pointer
+ Repeat uint32
+ Duration uint32
+ CtxSizeIn uint32
+ CtxSizeOut uint32
+ CtxIn Pointer
+ CtxOut Pointer
+ Flags uint32
+ Cpu uint32
+ BatchSize uint32
+ _ [4]byte
+}
+
+func ProgRun(attr *ProgRunAttr) error {
+ _, err := BPF(BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type RawTracepointOpenAttr struct {
+ Name Pointer
+ ProgFd uint32
+ _ [4]byte
+}
+
+func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) {
+ fd, err := BPF(BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type CgroupLinkInfo struct {
+ CgroupId uint64
+ AttachType AttachType
+ _ [4]byte
+}
+
+type IterLinkInfo struct {
+ TargetName Pointer
+ TargetNameLen uint32
+}
+
+type NetNsLinkInfo struct {
+ NetnsIno uint32
+ AttachType AttachType
+}
+
+type RawTracepointLinkInfo struct {
+ TpName Pointer
+ TpNameLen uint32
+ _ [4]byte
+}
+
+type TracingLinkInfo struct {
+ AttachType AttachType
+ TargetObjId uint32
+ TargetBtfId uint32
+}
+
+type XDPLinkInfo struct{ Ifindex uint32 }
diff --git a/vendor/github.com/cilium/ebpf/internal/syscall.go b/vendor/github.com/cilium/ebpf/internal/syscall.go
deleted file mode 100644
index c80815131..000000000
--- a/vendor/github.com/cilium/ebpf/internal/syscall.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package internal
-
-import (
- "fmt"
- "path/filepath"
- "runtime"
- "unsafe"
-
- "github.com/cilium/ebpf/internal/unix"
-)
-
-//go:generate stringer -output syscall_string.go -type=BPFCmd
-
-// BPFCmd identifies a subcommand of the bpf syscall.
-type BPFCmd int
-
-// Well known BPF commands.
-const (
- BPF_MAP_CREATE BPFCmd = iota
- BPF_MAP_LOOKUP_ELEM
- BPF_MAP_UPDATE_ELEM
- BPF_MAP_DELETE_ELEM
- BPF_MAP_GET_NEXT_KEY
- BPF_PROG_LOAD
- BPF_OBJ_PIN
- BPF_OBJ_GET
- BPF_PROG_ATTACH
- BPF_PROG_DETACH
- BPF_PROG_TEST_RUN
- BPF_PROG_GET_NEXT_ID
- BPF_MAP_GET_NEXT_ID
- BPF_PROG_GET_FD_BY_ID
- BPF_MAP_GET_FD_BY_ID
- BPF_OBJ_GET_INFO_BY_FD
- BPF_PROG_QUERY
- BPF_RAW_TRACEPOINT_OPEN
- BPF_BTF_LOAD
- BPF_BTF_GET_FD_BY_ID
- BPF_TASK_FD_QUERY
- BPF_MAP_LOOKUP_AND_DELETE_ELEM
- BPF_MAP_FREEZE
- BPF_BTF_GET_NEXT_ID
- BPF_MAP_LOOKUP_BATCH
- BPF_MAP_LOOKUP_AND_DELETE_BATCH
- BPF_MAP_UPDATE_BATCH
- BPF_MAP_DELETE_BATCH
- BPF_LINK_CREATE
- BPF_LINK_UPDATE
- BPF_LINK_GET_FD_BY_ID
- BPF_LINK_GET_NEXT_ID
- BPF_ENABLE_STATS
- BPF_ITER_CREATE
-)
-
-// BPF wraps SYS_BPF.
-//
-// Any pointers contained in attr must use the Pointer type from this package.
-func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
- r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
- runtime.KeepAlive(attr)
-
- var err error
- if errNo != 0 {
- err = errNo
- }
-
- return r1, err
-}
-
-type BPFProgAttachAttr struct {
- TargetFd uint32
- AttachBpfFd uint32
- AttachType uint32
- AttachFlags uint32
- ReplaceBpfFd uint32
-}
-
-func BPFProgAttach(attr *BPFProgAttachAttr) error {
- _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
- return err
-}
-
-type BPFProgDetachAttr struct {
- TargetFd uint32
- AttachBpfFd uint32
- AttachType uint32
-}
-
-func BPFProgDetach(attr *BPFProgDetachAttr) error {
- _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
- return err
-}
-
-type BPFEnableStatsAttr struct {
- StatsType uint32
-}
-
-func BPFEnableStats(attr *BPFEnableStatsAttr) (*FD, error) {
- ptr, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
- if err != nil {
- return nil, fmt.Errorf("enable stats: %w", err)
- }
- return NewFD(uint32(ptr)), nil
-
-}
-
-type bpfObjAttr struct {
- fileName Pointer
- fd uint32
- fileFlags uint32
-}
-
-const bpfFSType = 0xcafe4a11
-
-// BPFObjPin wraps BPF_OBJ_PIN.
-func BPFObjPin(fileName string, fd *FD) error {
- dirName := filepath.Dir(fileName)
- var statfs unix.Statfs_t
- if err := unix.Statfs(dirName, &statfs); err != nil {
- return err
- }
- if uint64(statfs.Type) != bpfFSType {
- return fmt.Errorf("%s is not on a bpf filesystem", fileName)
- }
-
- value, err := fd.Value()
- if err != nil {
- return err
- }
-
- attr := bpfObjAttr{
- fileName: NewStringPointer(fileName),
- fd: value,
- }
- _, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
- if err != nil {
- return fmt.Errorf("pin object %s: %w", fileName, err)
- }
- return nil
-}
-
-// BPFObjGet wraps BPF_OBJ_GET.
-func BPFObjGet(fileName string) (*FD, error) {
- attr := bpfObjAttr{
- fileName: NewStringPointer(fileName),
- }
- ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
- if err != nil {
- return nil, fmt.Errorf("get object %s: %w", fileName, err)
- }
- return NewFD(uint32(ptr)), nil
-}
-
-type bpfObjGetInfoByFDAttr struct {
- fd uint32
- infoLen uint32
- info Pointer
-}
-
-// BPFObjGetInfoByFD wraps BPF_OBJ_GET_INFO_BY_FD.
-//
-// Available from 4.13.
-func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error {
- value, err := fd.Value()
- if err != nil {
- return err
- }
-
- attr := bpfObjGetInfoByFDAttr{
- fd: value,
- infoLen: uint32(size),
- info: NewPointer(info),
- }
- _, err = BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
- if err != nil {
- return fmt.Errorf("fd %v: %w", fd, err)
- }
- return nil
-}
diff --git a/vendor/github.com/cilium/ebpf/internal/syscall_string.go b/vendor/github.com/cilium/ebpf/internal/syscall_string.go
deleted file mode 100644
index 85df04779..000000000
--- a/vendor/github.com/cilium/ebpf/internal/syscall_string.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Code generated by "stringer -output syscall_string.go -type=BPFCmd"; DO NOT EDIT.
-
-package internal
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[BPF_MAP_CREATE-0]
- _ = x[BPF_MAP_LOOKUP_ELEM-1]
- _ = x[BPF_MAP_UPDATE_ELEM-2]
- _ = x[BPF_MAP_DELETE_ELEM-3]
- _ = x[BPF_MAP_GET_NEXT_KEY-4]
- _ = x[BPF_PROG_LOAD-5]
- _ = x[BPF_OBJ_PIN-6]
- _ = x[BPF_OBJ_GET-7]
- _ = x[BPF_PROG_ATTACH-8]
- _ = x[BPF_PROG_DETACH-9]
- _ = x[BPF_PROG_TEST_RUN-10]
- _ = x[BPF_PROG_GET_NEXT_ID-11]
- _ = x[BPF_MAP_GET_NEXT_ID-12]
- _ = x[BPF_PROG_GET_FD_BY_ID-13]
- _ = x[BPF_MAP_GET_FD_BY_ID-14]
- _ = x[BPF_OBJ_GET_INFO_BY_FD-15]
- _ = x[BPF_PROG_QUERY-16]
- _ = x[BPF_RAW_TRACEPOINT_OPEN-17]
- _ = x[BPF_BTF_LOAD-18]
- _ = x[BPF_BTF_GET_FD_BY_ID-19]
- _ = x[BPF_TASK_FD_QUERY-20]
- _ = x[BPF_MAP_LOOKUP_AND_DELETE_ELEM-21]
- _ = x[BPF_MAP_FREEZE-22]
- _ = x[BPF_BTF_GET_NEXT_ID-23]
- _ = x[BPF_MAP_LOOKUP_BATCH-24]
- _ = x[BPF_MAP_LOOKUP_AND_DELETE_BATCH-25]
- _ = x[BPF_MAP_UPDATE_BATCH-26]
- _ = x[BPF_MAP_DELETE_BATCH-27]
- _ = x[BPF_LINK_CREATE-28]
- _ = x[BPF_LINK_UPDATE-29]
- _ = x[BPF_LINK_GET_FD_BY_ID-30]
- _ = x[BPF_LINK_GET_NEXT_ID-31]
- _ = x[BPF_ENABLE_STATS-32]
- _ = x[BPF_ITER_CREATE-33]
-}
-
-const _BPFCmd_name = "BPF_MAP_CREATEBPF_MAP_LOOKUP_ELEMBPF_MAP_UPDATE_ELEMBPF_MAP_DELETE_ELEMBPF_MAP_GET_NEXT_KEYBPF_PROG_LOADBPF_OBJ_PINBPF_OBJ_GETBPF_PROG_ATTACHBPF_PROG_DETACHBPF_PROG_TEST_RUNBPF_PROG_GET_NEXT_IDBPF_MAP_GET_NEXT_IDBPF_PROG_GET_FD_BY_IDBPF_MAP_GET_FD_BY_IDBPF_OBJ_GET_INFO_BY_FDBPF_PROG_QUERYBPF_RAW_TRACEPOINT_OPENBPF_BTF_LOADBPF_BTF_GET_FD_BY_IDBPF_TASK_FD_QUERYBPF_MAP_LOOKUP_AND_DELETE_ELEMBPF_MAP_FREEZEBPF_BTF_GET_NEXT_IDBPF_MAP_LOOKUP_BATCHBPF_MAP_LOOKUP_AND_DELETE_BATCHBPF_MAP_UPDATE_BATCHBPF_MAP_DELETE_BATCHBPF_LINK_CREATEBPF_LINK_UPDATEBPF_LINK_GET_FD_BY_IDBPF_LINK_GET_NEXT_IDBPF_ENABLE_STATSBPF_ITER_CREATE"
-
-var _BPFCmd_index = [...]uint16{0, 14, 33, 52, 71, 91, 104, 115, 126, 141, 156, 173, 193, 212, 233, 253, 275, 289, 312, 324, 344, 361, 391, 405, 424, 444, 475, 495, 515, 530, 545, 566, 586, 602, 617}
-
-func (i BPFCmd) String() string {
- if i < 0 || i >= BPFCmd(len(_BPFCmd_index)-1) {
- return "BPFCmd(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _BPFCmd_name[_BPFCmd_index[i]:_BPFCmd_index[i+1]]
-}
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
index 86d2a10f9..db4a1f5bf 100644
--- a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
@@ -1,9 +1,9 @@
+//go:build linux
// +build linux
package unix
import (
- "bytes"
"syscall"
linux "golang.org/x/sys/unix"
@@ -20,16 +20,27 @@ const (
EPERM = linux.EPERM
ESRCH = linux.ESRCH
ENODEV = linux.ENODEV
+ EBADF = linux.EBADF
+ E2BIG = linux.E2BIG
+ EFAULT = linux.EFAULT
+ EACCES = linux.EACCES
// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
ENOTSUPP = syscall.Errno(0x20c)
- EBADF = linux.EBADF
BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC
BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE
+ BPF_F_RDONLY = linux.BPF_F_RDONLY
+ BPF_F_WRONLY = linux.BPF_F_WRONLY
BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG
BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG
+ BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE
+ BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE
+ BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP
BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
BPF_TAG_SIZE = linux.BPF_TAG_SIZE
+ BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT
+ BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT
+ BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ
SYS_BPF = linux.SYS_BPF
F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC
EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD
@@ -39,27 +50,36 @@ const (
PROT_READ = linux.PROT_READ
PROT_WRITE = linux.PROT_WRITE
MAP_SHARED = linux.MAP_SHARED
+ PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1
PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE
+ PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT
PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT
+ PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE
+ PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE
+ PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF
PerfBitWatermark = linux.PerfBitWatermark
PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW
PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC
RLIM_INFINITY = linux.RLIM_INFINITY
RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK
BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME
+ PERF_RECORD_LOST = linux.PERF_RECORD_LOST
+ PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE
+ AT_FDCWD = linux.AT_FDCWD
+ RENAME_NOREPLACE = linux.RENAME_NOREPLACE
+ SO_ATTACH_BPF = linux.SO_ATTACH_BPF
+ SO_DETACH_BPF = linux.SO_DETACH_BPF
+ SOL_SOCKET = linux.SOL_SOCKET
)
// Statfs_t is a wrapper
type Statfs_t = linux.Statfs_t
+type Stat_t = linux.Stat_t
+
// Rlimit is a wrapper
type Rlimit = linux.Rlimit
-// Setrlimit is a wrapper
-func Setrlimit(resource int, rlim *Rlimit) (err error) {
- return linux.Setrlimit(resource, rlim)
-}
-
// Syscall is a wrapper
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
return linux.Syscall(trap, a1, a2, a3)
@@ -70,6 +90,11 @@ func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
return linux.FcntlInt(fd, cmd, arg)
}
+// IoctlSetInt is a wrapper
+func IoctlSetInt(fd int, req uint, value int) error {
+ return linux.IoctlSetInt(fd, req, value)
+}
+
// Statfs is a wrapper
func Statfs(path string, buf *Statfs_t) (err error) {
return linux.Statfs(path, buf)
@@ -157,14 +182,29 @@ func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
return linux.Tgkill(tgid, tid, sig)
}
-func KernelRelease() (string, error) {
- var uname Utsname
- err := Uname(&uname)
- if err != nil {
- return "", err
- }
+// BytePtrFromString is a wrapper
+func BytePtrFromString(s string) (*byte, error) {
+ return linux.BytePtrFromString(s)
+}
+
+// ByteSliceToString is a wrapper
+func ByteSliceToString(s []byte) string {
+ return linux.ByteSliceToString(s)
+}
+
+// Renameat2 is a wrapper
+func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error {
+ return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags)
+}
+
+func Prlimit(pid, resource int, new, old *Rlimit) error {
+ return linux.Prlimit(pid, resource, new, old)
+}
+
+func Open(path string, mode int, perm uint32) (int, error) {
+ return linux.Open(path, mode, perm)
+}
- end := bytes.IndexByte(uname.Release[:], 0)
- release := string(uname.Release[:end])
- return release, nil
+func Fstat(fd int, stat *Stat_t) error {
+ return linux.Fstat(fd, stat)
}
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
index 8c291796a..133c267db 100644
--- a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
@@ -1,3 +1,4 @@
+//go:build !linux
// +build !linux
package unix
@@ -21,15 +22,26 @@ const (
ESRCH = syscall.ESRCH
ENODEV = syscall.ENODEV
EBADF = syscall.Errno(0)
+ E2BIG = syscall.Errno(0)
+ EFAULT = syscall.EFAULT
+ EACCES = syscall.Errno(0)
// ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
ENOTSUPP = syscall.Errno(0x20c)
BPF_F_NO_PREALLOC = 0
BPF_F_NUMA_NODE = 0
+ BPF_F_RDONLY = 0
+ BPF_F_WRONLY = 0
BPF_F_RDONLY_PROG = 0
BPF_F_WRONLY_PROG = 0
+ BPF_F_SLEEPABLE = 0
+ BPF_F_MMAPABLE = 0
+ BPF_F_INNER_MAP = 0
BPF_OBJ_NAME_LEN = 0x10
BPF_TAG_SIZE = 0x8
+ BPF_RINGBUF_BUSY_BIT = 0
+ BPF_RINGBUF_DISCARD_BIT = 0
+ BPF_RINGBUF_HDR_SZ = 0
SYS_BPF = 321
F_DUPFD_CLOEXEC = 0x406
EPOLLIN = 0x1
@@ -40,14 +52,26 @@ const (
PROT_READ = 0x1
PROT_WRITE = 0x2
MAP_SHARED = 0x1
+ PERF_ATTR_SIZE_VER1 = 0
PERF_TYPE_SOFTWARE = 0x1
+ PERF_TYPE_TRACEPOINT = 0
PERF_COUNT_SW_BPF_OUTPUT = 0xa
+ PERF_EVENT_IOC_DISABLE = 0
+ PERF_EVENT_IOC_ENABLE = 0
+ PERF_EVENT_IOC_SET_BPF = 0
PerfBitWatermark = 0x4000
PERF_SAMPLE_RAW = 0x400
PERF_FLAG_FD_CLOEXEC = 0x8
RLIM_INFINITY = 0x7fffffffffffffff
RLIMIT_MEMLOCK = 8
BPF_STATS_RUN_TIME = 0
+ PERF_RECORD_LOST = 2
+ PERF_RECORD_SAMPLE = 9
+ AT_FDCWD = -0x2
+ RENAME_NOREPLACE = 0x1
+ SO_ATTACH_BPF = 0x32
+ SO_DETACH_BPF = 0x1b
+ SOL_SOCKET = 0x1
)
// Statfs_t is a wrapper
@@ -66,17 +90,14 @@ type Statfs_t struct {
Spare [4]int64
}
+type Stat_t struct{}
+
// Rlimit is a wrapper
type Rlimit struct {
Cur uint64
Max uint64
}
-// Setrlimit is a wrapper
-func Setrlimit(resource int, rlim *Rlimit) (err error) {
- return errNonLinux
-}
-
// Syscall is a wrapper
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
return 0, 0, syscall.Errno(1)
@@ -87,6 +108,11 @@ func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
return -1, errNonLinux
}
+// IoctlSetInt is a wrapper
+func IoctlSetInt(fd int, req uint, value int) error {
+ return errNonLinux
+}
+
// Statfs is a wrapper
func Statfs(path string, buf *Statfs_t) error {
return errNonLinux
@@ -201,6 +227,7 @@ func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int
// Utsname is a wrapper
type Utsname struct {
Release [65]byte
+ Version [65]byte
}
// Uname is a wrapper
@@ -223,6 +250,29 @@ func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
return errNonLinux
}
-func KernelRelease() (string, error) {
- return "", errNonLinux
+// BytePtrFromString is a wrapper
+func BytePtrFromString(s string) (*byte, error) {
+ return nil, errNonLinux
+}
+
+// ByteSliceToString is a wrapper
+func ByteSliceToString(s []byte) string {
+ return ""
+}
+
+// Renameat2 is a wrapper
+func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error {
+ return errNonLinux
+}
+
+func Prlimit(pid, resource int, new, old *Rlimit) error {
+ return errNonLinux
+}
+
+func Open(path string, mode int, perm uint32) (int, error) {
+ return -1, errNonLinux
+}
+
+func Fstat(fd int, stat *Stat_t) error {
+ return errNonLinux
}
diff --git a/vendor/github.com/cilium/ebpf/internal/vdso.go b/vendor/github.com/cilium/ebpf/internal/vdso.go
new file mode 100644
index 000000000..ae4821de2
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/vdso.go
@@ -0,0 +1,150 @@
+package internal
+
+import (
+ "debug/elf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+ errAuxvNoVDSO = errors.New("no vdso address found in auxv")
+)
+
+// vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library
+// linked into the current process image.
+func vdsoVersion() (uint32, error) {
+ // Read data from the auxiliary vector, which is normally passed directly
+ // to the process. Go does not expose that data, so we must read it from procfs.
+ // https://man7.org/linux/man-pages/man3/getauxval.3.html
+ av, err := os.Open("/proc/self/auxv")
+ if err != nil {
+ return 0, fmt.Errorf("opening auxv: %w", err)
+ }
+ defer av.Close()
+
+ vdsoAddr, err := vdsoMemoryAddress(av)
+ if err != nil {
+ return 0, fmt.Errorf("finding vDSO memory address: %w", err)
+ }
+
+ // Use /proc/self/mem rather than unsafe.Pointer tricks.
+ mem, err := os.Open("/proc/self/mem")
+ if err != nil {
+ return 0, fmt.Errorf("opening mem: %w", err)
+ }
+ defer mem.Close()
+
+ // Open ELF at provided memory address, as offset into /proc/self/mem.
+ c, err := vdsoLinuxVersionCode(io.NewSectionReader(mem, int64(vdsoAddr), math.MaxInt64))
+ if err != nil {
+ return 0, fmt.Errorf("reading linux version code: %w", err)
+ }
+
+ return c, nil
+}
+
+// vdsoMemoryAddress returns the memory address of the vDSO library
+// linked into the current process image. r is an io.Reader into an auxv blob.
+func vdsoMemoryAddress(r io.Reader) (uint64, error) {
+ const (
+ _AT_NULL = 0 // End of vector
+ _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image
+ )
+
+ // Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`,
+ // the address of a page containing the virtual Dynamic Shared Object (vDSO).
+ aux := struct{ Tag, Val uint64 }{}
+ for {
+ if err := binary.Read(r, NativeEndian, &aux); err != nil {
+ return 0, fmt.Errorf("reading auxv entry: %w", err)
+ }
+
+ switch aux.Tag {
+ case _AT_SYSINFO_EHDR:
+ if aux.Val != 0 {
+ return aux.Val, nil
+ }
+ return 0, fmt.Errorf("invalid vDSO address in auxv")
+ // _AT_NULL is always the last tag/val pair in the aux vector
+ // and can be treated like EOF.
+ case _AT_NULL:
+ return 0, errAuxvNoVDSO
+ }
+ }
+}
+
+// format described at https://www.man7.org/linux/man-pages/man5/elf.5.html in section 'Notes (Nhdr)'
+type elfNoteHeader struct {
+ NameSize int32
+ DescSize int32
+ Type int32
+}
+
+// vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in
+// the ELF notes section of the binary provided by the reader.
+func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) {
+ hdr, err := NewSafeELFFile(r)
+ if err != nil {
+ return 0, fmt.Errorf("reading vDSO ELF: %w", err)
+ }
+
+ sections := hdr.SectionsByType(elf.SHT_NOTE)
+ if len(sections) == 0 {
+ return 0, fmt.Errorf("no note section found in vDSO ELF")
+ }
+
+ for _, sec := range sections {
+ sr := sec.Open()
+ var n elfNoteHeader
+
+ // Read notes until we find one named 'Linux'.
+ for {
+ if err := binary.Read(sr, hdr.ByteOrder, &n); err != nil {
+ if errors.Is(err, io.EOF) {
+ // We looked at all the notes in this section
+ break
+ }
+ return 0, fmt.Errorf("reading note header: %w", err)
+ }
+
+ // If a note name is defined, it follows the note header.
+ var name string
+ if n.NameSize > 0 {
+ // Read the note name, aligned to 4 bytes.
+ buf := make([]byte, Align(int(n.NameSize), 4))
+ if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil {
+ return 0, fmt.Errorf("reading note name: %w", err)
+ }
+
+ // Read nul-terminated string.
+ name = unix.ByteSliceToString(buf[:n.NameSize])
+ }
+
+ // If a note descriptor is defined, it follows the name.
+ // It is possible for a note to have a descriptor but not a name.
+ if n.DescSize > 0 {
+ // LINUX_VERSION_CODE is a uint32 value.
+ if name == "Linux" && n.DescSize == 4 && n.Type == 0 {
+ var version uint32
+ if err := binary.Read(sr, hdr.ByteOrder, &version); err != nil {
+ return 0, fmt.Errorf("reading note descriptor: %w", err)
+ }
+ return version, nil
+ }
+
+ // Discard the note descriptor if it exists but we're not interested in it.
+ if _, err := io.CopyN(io.Discard, sr, int64(Align(int(n.DescSize), 4))); err != nil {
+ return 0, err
+ }
+ }
+ }
+ }
+
+ return 0, fmt.Errorf("no Linux note in ELF")
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go
new file mode 100644
index 000000000..370e01e44
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/version.go
@@ -0,0 +1,122 @@
+package internal
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+const (
+ // Version constant used in ELF binaries indicating that the loader needs to
+ // substitute the eBPF program's version with the value of the kernel's
+ // KERNEL_VERSION compile-time macro. Used for compatibility with BCC, gobpf
+ // and RedSift.
+ MagicKernelVersion = 0xFFFFFFFE
+)
+
+var (
+ kernelVersion = struct {
+ once sync.Once
+ version Version
+ err error
+ }{}
+)
+
+// A Version in the form Major.Minor.Patch.
+type Version [3]uint16
+
+// NewVersion creates a version from a string like "Major.Minor.Patch".
+//
+// Patch is optional.
+func NewVersion(ver string) (Version, error) {
+ var major, minor, patch uint16
+ n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch)
+ if n < 2 {
+ return Version{}, fmt.Errorf("invalid version: %s", ver)
+ }
+ return Version{major, minor, patch}, nil
+}
+
+// NewVersionFromCode creates a version from a LINUX_VERSION_CODE.
+func NewVersionFromCode(code uint32) Version {
+ return Version{
+ uint16(uint8(code >> 16)),
+ uint16(uint8(code >> 8)),
+ uint16(uint8(code)),
+ }
+}
+
+func (v Version) String() string {
+ if v[2] == 0 {
+ return fmt.Sprintf("v%d.%d", v[0], v[1])
+ }
+ return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2])
+}
+
+// Less returns true if the version is less than another version.
+func (v Version) Less(other Version) bool {
+ for i, a := range v {
+ if a == other[i] {
+ continue
+ }
+ return a < other[i]
+ }
+ return false
+}
+
+// Unspecified returns true if the version is all zero.
+func (v Version) Unspecified() bool {
+ return v[0] == 0 && v[1] == 0 && v[2] == 0
+}
+
+// Kernel implements the kernel's KERNEL_VERSION macro from linux/version.h.
+// It represents the kernel version and patch level as a single value.
+func (v Version) Kernel() uint32 {
+
+ // Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid
+ // overflowing into PATCHLEVEL.
+ // See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255").
+ s := v[2]
+ if s > 255 {
+ s = 255
+ }
+
+ // Truncate members to uint8 to prevent them from spilling over into
+ // each other when overflowing 8 bits.
+ return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s))
+}
+
+// KernelVersion returns the version of the currently running kernel.
+func KernelVersion() (Version, error) {
+ kernelVersion.once.Do(func() {
+ kernelVersion.version, kernelVersion.err = detectKernelVersion()
+ })
+
+ if kernelVersion.err != nil {
+ return Version{}, kernelVersion.err
+ }
+ return kernelVersion.version, nil
+}
+
+// detectKernelVersion returns the version of the running kernel.
+func detectKernelVersion() (Version, error) {
+ vc, err := vdsoVersion()
+ if err != nil {
+ return Version{}, err
+ }
+ return NewVersionFromCode(vc), nil
+}
+
+// KernelRelease returns the release string of the running kernel.
+// Its format depends on the Linux distribution and corresponds to directory
+// names in /lib/modules by convention. Some examples are 5.15.17-1-lts and
+// 4.19.0-16-amd64.
+func KernelRelease() (string, error) {
+ var uname unix.Utsname
+ if err := unix.Uname(&uname); err != nil {
+ return "", fmt.Errorf("uname failed: %w", err)
+ }
+
+ return unix.ByteSliceToString(uname.Release[:]), nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go
index 16a943930..003b0638e 100644
--- a/vendor/github.com/cilium/ebpf/link/cgroup.go
+++ b/vendor/github.com/cilium/ebpf/link/cgroup.go
@@ -56,16 +56,6 @@ func AttachCgroup(opts CgroupOptions) (Link, error) {
return cg, nil
}
-// LoadPinnedCgroup loads a pinned cgroup from a bpffs.
-func LoadPinnedCgroup(fileName string) (Link, error) {
- link, err := LoadPinnedRawLink(fileName)
- if err != nil {
- return nil, err
- }
-
- return &linkCgroup{link}, nil
-}
-
type progAttachCgroup struct {
cgroup *os.File
current *ebpf.Program
@@ -147,14 +137,20 @@ func (cg *progAttachCgroup) Pin(string) error {
return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported)
}
+func (cg *progAttachCgroup) Unpin() error {
+ return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported)
+}
+
+func (cg *progAttachCgroup) Info() (*Info, error) {
+ return nil, fmt.Errorf("can't get cgroup info: %w", ErrNotSupported)
+}
+
type linkCgroup struct {
- *RawLink
+ RawLink
}
var _ Link = (*linkCgroup)(nil)
-func (cg *linkCgroup) isLink() {}
-
func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) {
link, err := AttachRawLink(RawLinkOptions{
Target: int(cgroup.Fd()),
@@ -165,5 +161,5 @@ func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program)
return nil, err
}
- return &linkCgroup{link}, err
+ return &linkCgroup{*link}, err
}
diff --git a/vendor/github.com/cilium/ebpf/link/iter.go b/vendor/github.com/cilium/ebpf/link/iter.go
index 2b5f2846a..d2b32ef33 100644
--- a/vendor/github.com/cilium/ebpf/link/iter.go
+++ b/vendor/github.com/cilium/ebpf/link/iter.go
@@ -3,8 +3,10 @@ package link
import (
"fmt"
"io"
+ "unsafe"
"github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
)
type IterOptions struct {
@@ -15,77 +17,69 @@ type IterOptions struct {
// AttachTo requires the kernel to include BTF of itself,
// and it to be compiled with a recent pahole (>= 1.16).
Program *ebpf.Program
+
+ // Map specifies the target map for bpf_map_elem and sockmap iterators.
+ // It may be nil.
+ Map *ebpf.Map
}
// AttachIter attaches a BPF seq_file iterator.
func AttachIter(opts IterOptions) (*Iter, error) {
- link, err := AttachRawLink(RawLinkOptions{
- Program: opts.Program,
- Attach: ebpf.AttachTraceIter,
- })
- if err != nil {
- return nil, fmt.Errorf("can't link iterator: %w", err)
+ if err := haveBPFLink(); err != nil {
+ return nil, err
}
- return &Iter{link}, err
-}
+ progFd := opts.Program.FD()
+ if progFd < 0 {
+ return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
+ }
+
+ var info bpfIterLinkInfoMap
+ if opts.Map != nil {
+ mapFd := opts.Map.FD()
+ if mapFd < 0 {
+ return nil, fmt.Errorf("invalid map: %w", sys.ErrClosedFd)
+ }
+ info.map_fd = uint32(mapFd)
+ }
+
+ attr := sys.LinkCreateIterAttr{
+ ProgFd: uint32(progFd),
+ AttachType: sys.AttachType(ebpf.AttachTraceIter),
+ IterInfo: sys.NewPointer(unsafe.Pointer(&info)),
+ IterInfoLen: uint32(unsafe.Sizeof(info)),
+ }
-// LoadPinnedIter loads a pinned iterator from a bpffs.
-func LoadPinnedIter(fileName string) (*Iter, error) {
- link, err := LoadPinnedRawLink(fileName)
+ fd, err := sys.LinkCreateIter(&attr)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("can't link iterator: %w", err)
}
- return &Iter{link}, err
+ return &Iter{RawLink{fd, ""}}, err
}
// Iter represents an attached bpf_iter.
type Iter struct {
- link *RawLink
-}
-
-var _ Link = (*Iter)(nil)
-
-func (it *Iter) isLink() {}
-
-// FD returns the underlying file descriptor.
-func (it *Iter) FD() int {
- return it.link.FD()
-}
-
-// Close implements Link.
-func (it *Iter) Close() error {
- return it.link.Close()
-}
-
-// Pin implements Link.
-func (it *Iter) Pin(fileName string) error {
- return it.link.Pin(fileName)
-}
-
-// Update implements Link.
-func (it *Iter) Update(new *ebpf.Program) error {
- return it.link.Update(new)
+ RawLink
}
// Open creates a new instance of the iterator.
//
// Reading from the returned reader triggers the BPF program.
func (it *Iter) Open() (io.ReadCloser, error) {
- linkFd, err := it.link.fd.Value()
- if err != nil {
- return nil, err
+ attr := &sys.IterCreateAttr{
+ LinkFd: it.fd.Uint(),
}
- attr := &bpfIterCreateAttr{
- linkFd: linkFd,
- }
-
- fd, err := bpfIterCreate(attr)
+ fd, err := sys.IterCreate(attr)
if err != nil {
return nil, fmt.Errorf("can't create iterator: %w", err)
}
return fd.File("bpf_iter"), nil
}
+
+// union bpf_iter_link_info.map
+type bpfIterLinkInfoMap struct {
+ map_fd uint32
+}
diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go
new file mode 100644
index 000000000..fdf622a0c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/kprobe.go
@@ -0,0 +1,568 @@
+package link
+
+import (
+ "bytes"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+ "syscall"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+ kprobeEventsPath = filepath.Join(tracefsPath, "kprobe_events")
+
+ kprobeRetprobeBit = struct {
+ once sync.Once
+ value uint64
+ err error
+ }{}
+)
+
+type probeType uint8
+
+type probeArgs struct {
+ symbol, group, path string
+ offset, refCtrOffset, cookie uint64
+ pid int
+ ret bool
+}
+
+// KprobeOptions defines additional parameters that will be used
+// when loading Kprobes.
+type KprobeOptions struct {
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ //
+ // Needs kernel 5.15+.
+ Cookie uint64
+ // Offset of the kprobe relative to the traced symbol.
+ // Can be used to insert kprobes at arbitrary offsets in kernel functions,
+ // e.g. in places where functions have been inlined.
+ Offset uint64
+}
+
+const (
+ kprobeType probeType = iota
+ uprobeType
+)
+
+func (pt probeType) String() string {
+ if pt == kprobeType {
+ return "kprobe"
+ }
+ return "uprobe"
+}
+
+func (pt probeType) EventsPath() string {
+ if pt == kprobeType {
+ return kprobeEventsPath
+ }
+ return uprobeEventsPath
+}
+
+func (pt probeType) PerfEventType(ret bool) perfEventType {
+ if pt == kprobeType {
+ if ret {
+ return kretprobeEvent
+ }
+ return kprobeEvent
+ }
+ if ret {
+ return uretprobeEvent
+ }
+ return uprobeEvent
+}
+
+func (pt probeType) RetprobeBit() (uint64, error) {
+ if pt == kprobeType {
+ return kretprobeBit()
+ }
+ return uretprobeBit()
+}
+
+// Kprobe attaches the given eBPF program to a perf event that fires when the
+// given kernel symbol starts executing. See /proc/kallsyms for available
+// symbols. For example, printk():
+//
+// kp, err := Kprobe("printk", prog, nil)
+//
+// Losing the reference to the resulting Link (kp) will close the Kprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
+ k, err := kprobe(symbol, prog, opts, false)
+ if err != nil {
+ return nil, err
+ }
+
+ lnk, err := attachPerfEvent(k, prog)
+ if err != nil {
+ k.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
+
+// Kretprobe attaches the given eBPF program to a perf event that fires right
+// before the given kernel symbol exits, with the function stack left intact.
+// See /proc/kallsyms for available symbols. For example, printk():
+//
+// kp, err := Kretprobe("printk", prog, nil)
+//
+// Losing the reference to the resulting Link (kp) will close the Kretprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
+ k, err := kprobe(symbol, prog, opts, true)
+ if err != nil {
+ return nil, err
+ }
+
+ lnk, err := attachPerfEvent(k, prog)
+ if err != nil {
+ k.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
+
+// isValidKprobeSymbol implements the equivalent of a regex match
+// against "^[a-zA-Z_][0-9a-zA-Z_.]*$".
+func isValidKprobeSymbol(s string) bool {
+ if len(s) < 1 {
+ return false
+ }
+
+ for i, c := range []byte(s) {
+ switch {
+ case c >= 'a' && c <= 'z':
+ case c >= 'A' && c <= 'Z':
+ case c == '_':
+ case i > 0 && c >= '0' && c <= '9':
+
+ // Allow `.` in symbol name. GCC-compiled kernel may change symbol name
+ // to have a `.isra.$n` suffix, like `udp_send_skb.isra.52`.
+ // See: https://gcc.gnu.org/gcc-10/changes.html
+ case i > 0 && c == '.':
+
+ default:
+ return false
+ }
+ }
+
+ return true
+}
+
+// kprobe opens a perf event on the given symbol and attaches prog to it.
+// If ret is true, create a kretprobe.
+func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*perfEvent, error) {
+ if symbol == "" {
+ return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput)
+ }
+ if prog == nil {
+ return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+ }
+ if !isValidKprobeSymbol(symbol) {
+ return nil, fmt.Errorf("symbol '%s' must be a valid symbol in /proc/kallsyms: %w", symbol, errInvalidInput)
+ }
+ if prog.Type() != ebpf.Kprobe {
+ return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput)
+ }
+
+ args := probeArgs{
+ pid: perfAllThreads,
+ symbol: symbol,
+ ret: ret,
+ }
+
+ if opts != nil {
+ args.cookie = opts.Cookie
+ args.offset = opts.Offset
+ }
+
+ // Use kprobe PMU if the kernel has it available.
+ tp, err := pmuKprobe(args)
+ if errors.Is(err, os.ErrNotExist) {
+ args.symbol = platformPrefix(symbol)
+ tp, err = pmuKprobe(args)
+ }
+ if err == nil {
+ return tp, nil
+ }
+ if err != nil && !errors.Is(err, ErrNotSupported) {
+ return nil, fmt.Errorf("creating perf_kprobe PMU: %w", err)
+ }
+
+ // Use tracefs if kprobe PMU is missing.
+ args.symbol = symbol
+ tp, err = tracefsKprobe(args)
+ if errors.Is(err, os.ErrNotExist) {
+ args.symbol = platformPrefix(symbol)
+ tp, err = tracefsKprobe(args)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err)
+ }
+
+ return tp, nil
+}
+
+// pmuKprobe opens a perf event based on the kprobe PMU.
+// Returns os.ErrNotExist if the given symbol does not exist in the kernel.
+func pmuKprobe(args probeArgs) (*perfEvent, error) {
+ return pmuProbe(kprobeType, args)
+}
+
+// pmuProbe opens a perf event based on a Performance Monitoring Unit.
+//
+// Requires at least a 4.17 kernel.
+// e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU"
+// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU"
+//
+// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU
+func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) {
+ // Getting the PMU type will fail if the kernel doesn't support
+ // the perf_[k,u]probe PMU.
+ et, err := getPMUEventType(typ)
+ if err != nil {
+ return nil, err
+ }
+
+ var config uint64
+ if args.ret {
+ bit, err := typ.RetprobeBit()
+ if err != nil {
+ return nil, err
+ }
+ config |= 1 << bit
+ }
+
+ var (
+ attr unix.PerfEventAttr
+ sp unsafe.Pointer
+ )
+ switch typ {
+ case kprobeType:
+ // Create a pointer to a NUL-terminated string for the kernel.
+ sp, err = unsafeStringPtr(args.symbol)
+ if err != nil {
+ return nil, err
+ }
+
+ attr = unix.PerfEventAttr{
+ // The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1,
+ // since it added the config2 (Ext2) field. Use Ext2 as probe_offset.
+ Size: unix.PERF_ATTR_SIZE_VER1,
+ Type: uint32(et), // PMU event type read from sysfs
+ Ext1: uint64(uintptr(sp)), // Kernel symbol to trace
+ Ext2: args.offset, // Kernel symbol offset
+ Config: config, // Retprobe flag
+ }
+ case uprobeType:
+ sp, err = unsafeStringPtr(args.path)
+ if err != nil {
+ return nil, err
+ }
+
+ if args.refCtrOffset != 0 {
+ config |= args.refCtrOffset << uprobeRefCtrOffsetShift
+ }
+
+ attr = unix.PerfEventAttr{
+ // The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1,
+ // since it added the config2 (Ext2) field. The Size field controls the
+ // size of the internal buffer the kernel allocates for reading the
+ // perf_event_attr argument from userspace.
+ Size: unix.PERF_ATTR_SIZE_VER1,
+ Type: uint32(et), // PMU event type read from sysfs
+ Ext1: uint64(uintptr(sp)), // Uprobe path
+ Ext2: args.offset, // Uprobe offset
+ Config: config, // RefCtrOffset, Retprobe flag
+ }
+ }
+
+ rawFd, err := unix.PerfEventOpen(&attr, args.pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
+
+ // On some old kernels, kprobe PMU doesn't allow `.` in symbol names and
+ // return -EINVAL. Return ErrNotSupported to allow falling back to tracefs.
+ // https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343
+ if errors.Is(err, unix.EINVAL) && strings.Contains(args.symbol, ".") {
+ return nil, fmt.Errorf("symbol '%s+%#x': older kernels don't accept dots: %w", args.symbol, args.offset, ErrNotSupported)
+ }
+ // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
+ // when trying to create a kretprobe for a missing symbol. Make sure ENOENT
+ // is returned to the caller.
+ if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
+ return nil, fmt.Errorf("symbol '%s+%#x' not found: %w", args.symbol, args.offset, os.ErrNotExist)
+ }
+ // Since commit ab105a4fb894, -EILSEQ is returned when a kprobe sym+offset is resolved
+ // to an invalid insn boundary.
+ if errors.Is(err, syscall.EILSEQ) {
+ return nil, fmt.Errorf("symbol '%s+%#x' not found (bad insn boundary): %w", args.symbol, args.offset, os.ErrNotExist)
+ }
+ // Since at least commit cb9a19fe4aa51, ENOTSUPP is returned
+ // when attempting to set a uprobe on a trap instruction.
+ if errors.Is(err, unix.ENOTSUPP) {
+ return nil, fmt.Errorf("failed setting uprobe on offset %#x (possible trap insn): %w", args.offset, err)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("opening perf event: %w", err)
+ }
+
+ // Ensure the string pointer is not collected before PerfEventOpen returns.
+ runtime.KeepAlive(sp)
+
+ fd, err := sys.NewFD(rawFd)
+ if err != nil {
+ return nil, err
+ }
+
+ // Kernel has perf_[k,u]probe PMU available, initialize perf event.
+ return &perfEvent{
+ typ: typ.PerfEventType(args.ret),
+ name: args.symbol,
+ pmuID: et,
+ cookie: args.cookie,
+ fd: fd,
+ }, nil
+}
+
+// tracefsKprobe creates a Kprobe tracefs entry.
+func tracefsKprobe(args probeArgs) (*perfEvent, error) {
+ return tracefsProbe(kprobeType, args)
+}
+
+// tracefsProbe creates a trace event by writing an entry to <tracefs>/[k,u]probe_events.
+// A new trace event group name is generated on every call to support creating
+// multiple trace events for the same kernel or userspace symbol.
+// Path and offset are only set in the case of uprobe(s) and are used to set
+// the executable/library path on the filesystem and the offset where the probe is inserted.
+// A perf event is then opened on the newly-created trace event and returned to the caller.
+func tracefsProbe(typ probeType, args probeArgs) (_ *perfEvent, err error) {
+ // Generate a random string for each trace event we attempt to create.
+ // This value is used as the 'group' token in tracefs to allow creating
+ // multiple kprobe trace events with the same name.
+ group, err := randomGroup("ebpf")
+ if err != nil {
+ return nil, fmt.Errorf("randomizing group name: %w", err)
+ }
+ args.group = group
+
+ // Before attempting to create a trace event through tracefs,
+ // check if an event with the same group and name already exists.
+ // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate
+ // entry, so we need to rely on reads for detecting uniqueness.
+ _, err = getTraceEventID(group, args.symbol)
+ if err == nil {
+ return nil, fmt.Errorf("trace event already exists: %s/%s", group, args.symbol)
+ }
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("checking trace event %s/%s: %w", group, args.symbol, err)
+ }
+
+ // Create the [k,u]probe trace event using tracefs.
+ if err := createTraceFSProbeEvent(typ, args); err != nil {
+ return nil, fmt.Errorf("creating probe entry on tracefs: %w", err)
+ }
+ defer func() {
+ if err != nil {
+ // Make sure we clean up the created tracefs event when we return error.
+ // If a livepatch handler is already active on the symbol, the write to
+ // tracefs will succeed, a trace event will show up, but creating the
+ // perf event will fail with EBUSY.
+ _ = closeTraceFSProbeEvent(typ, args.group, args.symbol)
+ }
+ }()
+
+ // Get the newly-created trace event's id.
+ tid, err := getTraceEventID(group, args.symbol)
+ if err != nil {
+ return nil, fmt.Errorf("getting trace event id: %w", err)
+ }
+
+ // Kprobes are ephemeral tracepoints and share the same perf event type.
+ fd, err := openTracepointPerfEvent(tid, args.pid)
+ if err != nil {
+ return nil, err
+ }
+
+ return &perfEvent{
+ typ: typ.PerfEventType(args.ret),
+ group: group,
+ name: args.symbol,
+ tracefsID: tid,
+ cookie: args.cookie,
+ fd: fd,
+ }, nil
+}
+
+// createTraceFSProbeEvent creates a new ephemeral trace event by writing to
+// <tracefs>/[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid
+// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist
+// if a probe with the same group and symbol already exists.
+func createTraceFSProbeEvent(typ probeType, args probeArgs) error {
+ // Open the kprobe_events file in tracefs.
+ f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
+ if err != nil {
+ return fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err)
+ }
+ defer f.Close()
+
+ var pe, token string
+ switch typ {
+ case kprobeType:
+ // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
+ // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
+ // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
+ // -:[GRP/]EVENT : Clear a probe
+ //
+ // Some examples:
+ // r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
+ // p:ebpf_5678/p_my_kprobe __x64_sys_execve
+ //
+ // Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
+ // kernel default to NR_CPUS. This is desired in most eBPF cases since
+ // subsampling or rate limiting logic can be more accurately implemented in
+ // the eBPF program itself.
+ // See Documentation/kprobes.txt for more details.
+ token = kprobeToken(args)
+ pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, sanitizeSymbol(args.symbol), token)
+ case uprobeType:
+ // The uprobe_events syntax is as follows:
+ // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
+ // r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe
+ // -:[GRP/]EVENT : Clear a probe
+ //
+ // Some examples:
+ // r:ebpf_1234/readline /bin/bash:0x12345
+ // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123)
+ //
+ // See Documentation/trace/uprobetracer.txt for more details.
+ token = uprobeToken(args)
+ pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, args.symbol, token)
+ }
+ _, err = f.WriteString(pe)
+ // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
+ // when trying to create a kretprobe for a missing symbol. Make sure ENOENT
+ // is returned to the caller.
+ // EINVAL is also returned on pre-5.2 kernels when the `SYM[+offs]` token
+ // is resolved to an invalid insn boundary.
+ if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
+ return fmt.Errorf("token %s: %w", token, os.ErrNotExist)
+ }
+ // Since commit ab105a4fb894, -EILSEQ is returned when a kprobe sym+offset is resolved
+ // to an invalid insn boundary.
+ if errors.Is(err, syscall.EILSEQ) {
+ return fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
+ }
+ // ERANGE is returned when the `SYM[+offs]` token is too big and cannot
+ // be resolved.
+ if errors.Is(err, syscall.ERANGE) {
+ return fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist)
+ }
+ if err != nil {
+ return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
+ }
+
+ return nil
+}
+
+// closeTraceFSProbeEvent removes the [k,u]probe with the given type, group and symbol
+// from <tracefs>/[k,u]probe_events.
+func closeTraceFSProbeEvent(typ probeType, group, symbol string) error {
+ f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666)
+ if err != nil {
+ return fmt.Errorf("error opening %s: %w", typ.EventsPath(), err)
+ }
+ defer f.Close()
+
+ // See [k,u]probe_events syntax above. The probe type does not need to be specified
+ // for removals.
+ pe := fmt.Sprintf("-:%s/%s", group, sanitizeSymbol(symbol))
+ if _, err = f.WriteString(pe); err != nil {
+ return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err)
+ }
+
+ return nil
+}
+
+// randomGroup generates a pseudorandom string for use as a tracefs group name.
+// Returns an error when the output string would exceed 63 characters (kernel
+// limitation), when rand.Read() fails or when prefix contains characters not
+// allowed by isValidTraceID.
+func randomGroup(prefix string) (string, error) {
+ if !isValidTraceID(prefix) {
+ return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, errInvalidInput)
+ }
+
+ b := make([]byte, 8)
+ if _, err := rand.Read(b); err != nil {
+ return "", fmt.Errorf("reading random bytes: %w", err)
+ }
+
+ group := fmt.Sprintf("%s_%x", prefix, b)
+ if len(group) > 63 {
+ return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, errInvalidInput)
+ }
+
+ return group, nil
+}
+
+func probePrefix(ret bool) string {
+ if ret {
+ return "r"
+ }
+ return "p"
+}
+
+// determineRetprobeBit reads a Performance Monitoring Unit's retprobe bit
+// from /sys/bus/event_source/devices/<pmu>/format/retprobe.
+func determineRetprobeBit(typ probeType) (uint64, error) {
+ p := filepath.Join("/sys/bus/event_source/devices/", typ.String(), "/format/retprobe")
+
+ data, err := os.ReadFile(p)
+ if err != nil {
+ return 0, err
+ }
+
+ var rp uint64
+ n, err := fmt.Sscanf(string(bytes.TrimSpace(data)), "config:%d", &rp)
+ if err != nil {
+ return 0, fmt.Errorf("parse retprobe bit: %w", err)
+ }
+ if n != 1 {
+ return 0, fmt.Errorf("parse retprobe bit: expected 1 item, got %d", n)
+ }
+
+ return rp, nil
+}
+
+func kretprobeBit() (uint64, error) {
+ kprobeRetprobeBit.once.Do(func() {
+ kprobeRetprobeBit.value, kprobeRetprobeBit.err = determineRetprobeBit(kprobeType)
+ })
+ return kprobeRetprobeBit.value, kprobeRetprobeBit.err
+}
+
+// kprobeToken creates the SYM[+offs] token for the tracefs api.
+func kprobeToken(args probeArgs) string {
+ po := args.symbol
+
+ if args.offset != 0 {
+ po += fmt.Sprintf("+%#x", args.offset)
+ }
+
+ return po
+}
diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go
index 48f1a5529..067d0101a 100644
--- a/vendor/github.com/cilium/ebpf/link/link.go
+++ b/vendor/github.com/cilium/ebpf/link/link.go
@@ -1,11 +1,14 @@
package link
import (
+ "bytes"
+ "encoding/binary"
"fmt"
- "unsafe"
"github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
)
var ErrNotSupported = internal.ErrNotSupported
@@ -22,19 +25,65 @@ type Link interface {
// May return an error wrapping ErrNotSupported.
Pin(string) error
+ // Undo a previous call to Pin.
+ //
+ // May return an error wrapping ErrNotSupported.
+ Unpin() error
+
// Close frees resources.
//
- // The link will be broken unless it has been pinned. A link
- // may continue past the lifetime of the process if Close is
+ // The link will be broken unless it has been successfully pinned.
+ // A link may continue past the lifetime of the process if Close is
// not called.
Close() error
+ // Info returns metadata on a link.
+ //
+ // May return an error wrapping ErrNotSupported.
+ Info() (*Info, error)
+
// Prevent external users from implementing this interface.
isLink()
}
+// LoadPinnedLink loads a link that was persisted into a bpffs.
+func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) {
+ raw, err := loadPinnedRawLink(fileName, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return wrapRawLink(raw)
+}
+
+// wrap a RawLink in a more specific type if possible.
+//
+// The function takes ownership of raw and closes it on error.
+func wrapRawLink(raw *RawLink) (Link, error) {
+ info, err := raw.Info()
+ if err != nil {
+ raw.Close()
+ return nil, err
+ }
+
+ switch info.Type {
+ case RawTracepointType:
+ return &rawTracepoint{*raw}, nil
+ case TracingType:
+ return &tracing{*raw}, nil
+ case CgroupType:
+ return &linkCgroup{*raw}, nil
+ case IterType:
+ return &Iter{*raw}, nil
+ case NetNsType:
+ return &NetNsLink{*raw}, nil
+ default:
+ return raw, nil
+ }
+}
+
// ID uniquely identifies a BPF link.
-type ID uint32
+type ID = sys.LinkID
// RawLinkOptions control the creation of a raw link.
type RawLinkOptions struct {
@@ -44,13 +93,55 @@ type RawLinkOptions struct {
Program *ebpf.Program
// Attach must match the attach type of Program.
Attach ebpf.AttachType
+ // BTF is the BTF of the attachment target.
+ BTF btf.TypeID
+ // Flags control the attach behaviour.
+ Flags uint32
}
-// RawLinkInfo contains metadata on a link.
-type RawLinkInfo struct {
+// Info contains metadata on a link.
+type Info struct {
Type Type
ID ID
Program ebpf.ProgramID
+ extra interface{}
+}
+
+type TracingInfo sys.TracingLinkInfo
+type CgroupInfo sys.CgroupLinkInfo
+type NetNsInfo sys.NetNsLinkInfo
+type XDPInfo sys.XDPLinkInfo
+
+// Tracing returns tracing type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) Tracing() *TracingInfo {
+ e, _ := r.extra.(*TracingInfo)
+ return e
+}
+
+// Cgroup returns cgroup type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) Cgroup() *CgroupInfo {
+ e, _ := r.extra.(*CgroupInfo)
+ return e
+}
+
+// NetNs returns netns type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) NetNs() *NetNsInfo {
+ e, _ := r.extra.(*NetNsInfo)
+ return e
+}
+
+// ExtraNetNs returns XDP type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) XDP() *XDPInfo {
+ e, _ := r.extra.(*XDPInfo)
+ return e
}
// RawLink is the low-level API to bpf_link.
@@ -58,7 +149,8 @@ type RawLinkInfo struct {
// You should consider using the higher level interfaces in this
// package instead.
type RawLink struct {
- fd *internal.FD
+ fd *sys.FD
+ pinnedPath string
}
// AttachRawLink creates a raw link.
@@ -68,66 +160,46 @@ func AttachRawLink(opts RawLinkOptions) (*RawLink, error) {
}
if opts.Target < 0 {
- return nil, fmt.Errorf("invalid target: %s", internal.ErrClosedFd)
+ return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd)
}
progFd := opts.Program.FD()
if progFd < 0 {
- return nil, fmt.Errorf("invalid program: %s", internal.ErrClosedFd)
+ return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
}
- attr := bpfLinkCreateAttr{
- targetFd: uint32(opts.Target),
- progFd: uint32(progFd),
- attachType: opts.Attach,
+ attr := sys.LinkCreateAttr{
+ TargetFd: uint32(opts.Target),
+ ProgFd: uint32(progFd),
+ AttachType: sys.AttachType(opts.Attach),
+ TargetBtfId: uint32(opts.BTF),
+ Flags: opts.Flags,
}
- fd, err := bpfLinkCreate(&attr)
+ fd, err := sys.LinkCreate(&attr)
if err != nil {
return nil, fmt.Errorf("can't create link: %s", err)
}
- return &RawLink{fd}, nil
-}
-
-// LoadPinnedRawLink loads a persisted link from a bpffs.
-func LoadPinnedRawLink(fileName string) (*RawLink, error) {
- return loadPinnedRawLink(fileName, UnspecifiedType)
+ return &RawLink{fd, ""}, nil
}
-func loadPinnedRawLink(fileName string, typ Type) (*RawLink, error) {
- fd, err := internal.BPFObjGet(fileName)
- if err != nil {
- return nil, fmt.Errorf("load pinned link: %s", err)
- }
-
- link := &RawLink{fd}
- if typ == UnspecifiedType {
- return link, nil
- }
-
- info, err := link.Info()
+func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, error) {
+ fd, err := sys.ObjGet(&sys.ObjGetAttr{
+ Pathname: sys.NewStringPointer(fileName),
+ FileFlags: opts.Marshal(),
+ })
if err != nil {
- link.Close()
- return nil, fmt.Errorf("get pinned link info: %s", err)
- }
-
- if info.Type != typ {
- link.Close()
- return nil, fmt.Errorf("link type %v doesn't match %v", info.Type, typ)
+ return nil, fmt.Errorf("load pinned link: %w", err)
}
- return link, nil
+ return &RawLink{fd, fileName}, nil
}
func (l *RawLink) isLink() {}
// FD returns the raw file descriptor.
func (l *RawLink) FD() int {
- fd, err := l.fd.Value()
- if err != nil {
- return -1
- }
- return int(fd)
+ return l.fd.Int()
}
// Close breaks the link.
@@ -142,13 +214,23 @@ func (l *RawLink) Close() error {
// Calling Close on a pinned Link will not break the link
// until the pin is removed.
func (l *RawLink) Pin(fileName string) error {
- if err := internal.BPFObjPin(fileName, l.fd); err != nil {
- return fmt.Errorf("can't pin link: %s", err)
+ if err := internal.Pin(l.pinnedPath, fileName, l.fd); err != nil {
+ return err
}
+ l.pinnedPath = fileName
return nil
}
-// Update implements Link.
+// Unpin implements the Link interface.
+func (l *RawLink) Unpin() error {
+ if err := internal.Unpin(l.pinnedPath); err != nil {
+ return err
+ }
+ l.pinnedPath = ""
+ return nil
+}
+
+// Update implements the Link interface.
func (l *RawLink) Update(new *ebpf.Program) error {
return l.UpdateArgs(RawLinkUpdateOptions{
New: new,
@@ -166,49 +248,66 @@ type RawLinkUpdateOptions struct {
func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error {
newFd := opts.New.FD()
if newFd < 0 {
- return fmt.Errorf("invalid program: %s", internal.ErrClosedFd)
+ return fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
}
var oldFd int
if opts.Old != nil {
oldFd = opts.Old.FD()
if oldFd < 0 {
- return fmt.Errorf("invalid replacement program: %s", internal.ErrClosedFd)
+ return fmt.Errorf("invalid replacement program: %s", sys.ErrClosedFd)
}
}
- linkFd, err := l.fd.Value()
- if err != nil {
- return fmt.Errorf("can't update link: %s", err)
- }
-
- attr := bpfLinkUpdateAttr{
- linkFd: linkFd,
- newProgFd: uint32(newFd),
- oldProgFd: uint32(oldFd),
- flags: opts.Flags,
+ attr := sys.LinkUpdateAttr{
+ LinkFd: l.fd.Uint(),
+ NewProgFd: uint32(newFd),
+ OldProgFd: uint32(oldFd),
+ Flags: opts.Flags,
}
- return bpfLinkUpdate(&attr)
-}
-
-// struct bpf_link_info
-type bpfLinkInfo struct {
- typ uint32
- id uint32
- prog_id uint32
+ return sys.LinkUpdate(&attr)
}
// Info returns metadata about the link.
-func (l *RawLink) Info() (*RawLinkInfo, error) {
- var info bpfLinkInfo
- err := internal.BPFObjGetInfoByFD(l.fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
- if err != nil {
+func (l *RawLink) Info() (*Info, error) {
+ var info sys.LinkInfo
+
+ if err := sys.ObjInfo(l.fd, &info); err != nil {
return nil, fmt.Errorf("link info: %s", err)
}
- return &RawLinkInfo{
- Type(info.typ),
- ID(info.id),
- ebpf.ProgramID(info.prog_id),
+ var extra interface{}
+ switch info.Type {
+ case CgroupType:
+ extra = &CgroupInfo{}
+ case IterType:
+ // not supported
+ case NetNsType:
+ extra = &NetNsInfo{}
+ case RawTracepointType:
+ // not supported
+ case TracingType:
+ extra = &TracingInfo{}
+ case XDPType:
+ extra = &XDPInfo{}
+ case PerfEventType:
+ // no extra
+ default:
+ return nil, fmt.Errorf("unknown link info type: %d", info.Type)
+ }
+
+ if info.Type != RawTracepointType && info.Type != IterType && info.Type != PerfEventType {
+ buf := bytes.NewReader(info.Extra[:])
+ err := binary.Read(buf, internal.NativeEndian, extra)
+ if err != nil {
+ return nil, fmt.Errorf("can not read extra link info: %w", err)
+ }
+ }
+
+ return &Info{
+ info.Type,
+ info.Id,
+ ebpf.ProgramID(info.ProgId),
+ extra,
}, nil
}
diff --git a/vendor/github.com/cilium/ebpf/link/netns.go b/vendor/github.com/cilium/ebpf/link/netns.go
index 3533ff0fa..344ecced6 100644
--- a/vendor/github.com/cilium/ebpf/link/netns.go
+++ b/vendor/github.com/cilium/ebpf/link/netns.go
@@ -6,14 +6,9 @@ import (
"github.com/cilium/ebpf"
)
-// NetNsInfo contains metadata about a network namespace link.
-type NetNsInfo struct {
- RawLinkInfo
-}
-
// NetNsLink is a program attached to a network namespace.
type NetNsLink struct {
- *RawLink
+ RawLink
}
// AttachNetNs attaches a program to a network namespace.
@@ -37,24 +32,5 @@ func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) {
return nil, err
}
- return &NetNsLink{link}, nil
-}
-
-// LoadPinnedNetNs loads a network namespace link from bpffs.
-func LoadPinnedNetNs(fileName string) (*NetNsLink, error) {
- link, err := loadPinnedRawLink(fileName, NetNsType)
- if err != nil {
- return nil, err
- }
-
- return &NetNsLink{link}, nil
-}
-
-// Info returns information about the link.
-func (nns *NetNsLink) Info() (*NetNsInfo, error) {
- info, err := nns.RawLink.Info()
- if err != nil {
- return nil, err
- }
- return &NetNsInfo{*info}, nil
+ return &NetNsLink{*link}, nil
}
diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go
new file mode 100644
index 000000000..0e5bd4791
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/perf_event.go
@@ -0,0 +1,394 @@
+package link
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// Getting the terminology right is usually the hardest part. For posterity and
+// for staying sane during implementation:
+//
+// - trace event: Representation of a kernel runtime hook. Filesystem entries
+// under <tracefs>/events. Can be tracepoints (static), kprobes or uprobes.
+// Can be instantiated into perf events (see below).
+// - tracepoint: A predetermined hook point in the kernel. Exposed as trace
+// events in (sub)directories under <tracefs>/events. Cannot be closed or
+// removed, they are static.
+// - k(ret)probe: Ephemeral trace events based on entry or exit points of
+// exported kernel symbols. kprobe-based (tracefs) trace events can be
+// created system-wide by writing to the <tracefs>/kprobe_events file, or
+// they can be scoped to the current process by creating PMU perf events.
+// - u(ret)probe: Ephemeral trace events based on user provides ELF binaries
+// and offsets. uprobe-based (tracefs) trace events can be
+// created system-wide by writing to the <tracefs>/uprobe_events file, or
+// they can be scoped to the current process by creating PMU perf events.
+// - perf event: An object instantiated based on an existing trace event or
+// kernel symbol. Referred to by fd in userspace.
+// Exactly one eBPF program can be attached to a perf event. Multiple perf
+// events can be created from a single trace event. Closing a perf event
+// stops any further invocations of the attached eBPF program.
+
+var (
+ tracefsPath = "/sys/kernel/debug/tracing"
+
+ errInvalidInput = errors.New("invalid input")
+)
+
+const (
+ perfAllThreads = -1
+)
+
+type perfEventType uint8
+
+const (
+ tracepointEvent perfEventType = iota
+ kprobeEvent
+ kretprobeEvent
+ uprobeEvent
+ uretprobeEvent
+)
+
+// A perfEvent represents a perf event kernel object. Exactly one eBPF program
+// can be attached to it. It is created based on a tracefs trace event or a
+// Performance Monitoring Unit (PMU).
+type perfEvent struct {
+ // The event type determines the types of programs that can be attached.
+ typ perfEventType
+
+ // Group and name of the tracepoint/kprobe/uprobe.
+ group string
+ name string
+
+ // PMU event ID read from sysfs. Valid IDs are non-zero.
+ pmuID uint64
+ // ID of the trace event read from tracefs. Valid IDs are non-zero.
+ tracefsID uint64
+
+ // User provided arbitrary value.
+ cookie uint64
+
+ // This is the perf event FD.
+ fd *sys.FD
+}
+
+func (pe *perfEvent) Close() error {
+ if err := pe.fd.Close(); err != nil {
+ return fmt.Errorf("closing perf event fd: %w", err)
+ }
+
+ switch pe.typ {
+ case kprobeEvent, kretprobeEvent:
+ // Clean up kprobe tracefs entry.
+ if pe.tracefsID != 0 {
+ return closeTraceFSProbeEvent(kprobeType, pe.group, pe.name)
+ }
+ case uprobeEvent, uretprobeEvent:
+ // Clean up uprobe tracefs entry.
+ if pe.tracefsID != 0 {
+ return closeTraceFSProbeEvent(uprobeType, pe.group, pe.name)
+ }
+ case tracepointEvent:
+ // Tracepoint trace events don't hold any extra resources.
+ return nil
+ }
+
+ return nil
+}
+
+// perfEventLink represents a bpf perf link.
+type perfEventLink struct {
+ RawLink
+ pe *perfEvent
+}
+
+func (pl *perfEventLink) isLink() {}
+
+// Pinning requires the underlying perf event FD to stay open.
+//
+// | PerfEvent FD | BpfLink FD | Works |
+// |--------------|------------|-------|
+// | Open | Open | Yes |
+// | Closed | Open | No |
+// | Open | Closed | No (Pin() -> EINVAL) |
+// | Closed | Closed | No (Pin() -> EINVAL) |
+//
+// There is currently no pretty way to recover the perf event FD
+// when loading a pinned link, so leave as not supported for now.
+func (pl *perfEventLink) Pin(string) error {
+ return fmt.Errorf("perf event link pin: %w", ErrNotSupported)
+}
+
+func (pl *perfEventLink) Unpin() error {
+ return fmt.Errorf("perf event link unpin: %w", ErrNotSupported)
+}
+
+func (pl *perfEventLink) Close() error {
+ if err := pl.pe.Close(); err != nil {
+ return fmt.Errorf("perf event link close: %w", err)
+ }
+ return pl.fd.Close()
+}
+
+func (pl *perfEventLink) Update(prog *ebpf.Program) error {
+ return fmt.Errorf("perf event link update: %w", ErrNotSupported)
+}
+
+// perfEventIoctl implements Link and handles the perf event lifecycle
+// via ioctl().
+type perfEventIoctl struct {
+ *perfEvent
+}
+
+func (pi *perfEventIoctl) isLink() {}
+
+// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"),
+// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array
+// owned by the perf event, which means multiple programs can be attached
+// simultaneously.
+//
+// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event
+// returns EEXIST.
+//
+// Detaching a program from a perf event is currently not possible, so a
+// program replacement mechanism cannot be implemented for perf events.
+func (pi *perfEventIoctl) Update(prog *ebpf.Program) error {
+ return fmt.Errorf("perf event ioctl update: %w", ErrNotSupported)
+}
+
+func (pi *perfEventIoctl) Pin(string) error {
+ return fmt.Errorf("perf event ioctl pin: %w", ErrNotSupported)
+}
+
+func (pi *perfEventIoctl) Unpin() error {
+ return fmt.Errorf("perf event ioctl unpin: %w", ErrNotSupported)
+}
+
+func (pi *perfEventIoctl) Info() (*Info, error) {
+ return nil, fmt.Errorf("perf event ioctl info: %w", ErrNotSupported)
+}
+
+// attach the given eBPF prog to the perf event stored in pe.
+// pe must contain a valid perf event fd.
+// prog's type must match the program type stored in pe.
+func attachPerfEvent(pe *perfEvent, prog *ebpf.Program) (Link, error) {
+ if prog == nil {
+ return nil, errors.New("cannot attach a nil program")
+ }
+ if prog.FD() < 0 {
+ return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd)
+ }
+
+ switch pe.typ {
+ case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent:
+ if t := prog.Type(); t != ebpf.Kprobe {
+ return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t)
+ }
+ case tracepointEvent:
+ if t := prog.Type(); t != ebpf.TracePoint {
+ return nil, fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t)
+ }
+ default:
+ return nil, fmt.Errorf("unknown perf event type: %d", pe.typ)
+ }
+
+ if err := haveBPFLinkPerfEvent(); err == nil {
+ return attachPerfEventLink(pe, prog)
+ }
+ return attachPerfEventIoctl(pe, prog)
+}
+
+func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) {
+ if pe.cookie != 0 {
+ return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported)
+ }
+
+ // Assign the eBPF program to the perf event.
+ err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD())
+ if err != nil {
+ return nil, fmt.Errorf("setting perf event bpf program: %w", err)
+ }
+
+ // PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values.
+ if err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil {
+ return nil, fmt.Errorf("enable perf event: %s", err)
+ }
+
+ pi := &perfEventIoctl{pe}
+
+ // Close the perf event when its reference is lost to avoid leaking system resources.
+ runtime.SetFinalizer(pi, (*perfEventIoctl).Close)
+ return pi, nil
+}
+
+// Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+).
+//
+// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
+func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program) (*perfEventLink, error) {
+ fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{
+ ProgFd: uint32(prog.FD()),
+ TargetFd: pe.fd.Uint(),
+ AttachType: sys.BPF_PERF_EVENT,
+ BpfCookie: pe.cookie,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("cannot create bpf perf link: %v", err)
+ }
+
+ pl := &perfEventLink{RawLink{fd: fd}, pe}
+
+ // Close the perf event when its reference is lost to avoid leaking system resources.
+ runtime.SetFinalizer(pl, (*perfEventLink).Close)
+ return pl, nil
+}
+
+// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str.
+func unsafeStringPtr(str string) (unsafe.Pointer, error) {
+ p, err := unix.BytePtrFromString(str)
+ if err != nil {
+ return nil, err
+ }
+ return unsafe.Pointer(p), nil
+}
+
+// getTraceEventID reads a trace event's ID from tracefs given its group and name.
+// The kernel requires group and name to be alphanumeric or underscore.
+//
+// name automatically has its invalid symbols converted to underscores so the caller
+// can pass a raw symbol name, e.g. a kernel symbol containing dots.
+func getTraceEventID(group, name string) (uint64, error) {
+ name = sanitizeSymbol(name)
+ tid, err := uint64FromFile(tracefsPath, "events", group, name, "id")
+ if errors.Is(err, os.ErrNotExist) {
+ return 0, fmt.Errorf("trace event %s/%s: %w", group, name, os.ErrNotExist)
+ }
+ if err != nil {
+ return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err)
+ }
+
+ return tid, nil
+}
+
+// getPMUEventType reads a Performance Monitoring Unit's type (numeric identifier)
+// from /sys/bus/event_source/devices/<pmu>/type.
+//
+// Returns ErrNotSupported if the pmu type is not supported.
+func getPMUEventType(typ probeType) (uint64, error) {
+ et, err := uint64FromFile("/sys/bus/event_source/devices", typ.String(), "type")
+ if errors.Is(err, os.ErrNotExist) {
+ return 0, fmt.Errorf("pmu type %s: %w", typ, ErrNotSupported)
+ }
+ if err != nil {
+ return 0, fmt.Errorf("reading pmu type %s: %w", typ, err)
+ }
+
+ return et, nil
+}
+
+// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide
+// [k,u]probes created by writing to <tracefs>/[k,u]probe_events are tracepoints
+// behind the scenes, and can be attached to using these perf events.
+func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) {
+ attr := unix.PerfEventAttr{
+ Type: unix.PERF_TYPE_TRACEPOINT,
+ Config: tid,
+ Sample_type: unix.PERF_SAMPLE_RAW,
+ Sample: 1,
+ Wakeup: 1,
+ }
+
+ fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
+ if err != nil {
+ return nil, fmt.Errorf("opening tracepoint perf event: %w", err)
+ }
+
+ return sys.NewFD(fd)
+}
+
+// uint64FromFile reads a uint64 from a file. All elements of path are sanitized
+// and joined onto base. Returns error if base no longer prefixes the path after
+// joining all components.
+func uint64FromFile(base string, path ...string) (uint64, error) {
+ l := filepath.Join(path...)
+ p := filepath.Join(base, l)
+ if !strings.HasPrefix(p, base) {
+ return 0, fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput)
+ }
+
+ data, err := os.ReadFile(p)
+ if err != nil {
+ return 0, fmt.Errorf("reading file %s: %w", p, err)
+ }
+
+ et := bytes.TrimSpace(data)
+ return strconv.ParseUint(string(et), 10, 64)
+}
+
+// Probe BPF perf link.
+//
+// https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307
+// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
+var haveBPFLinkPerfEvent = internal.FeatureTest("bpf_link_perf_event", "5.15", func() error {
+ prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+ Name: "probe_bpf_perf_link",
+ Type: ebpf.Kprobe,
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ License: "MIT",
+ })
+ if err != nil {
+ return err
+ }
+ defer prog.Close()
+
+ _, err = sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{
+ ProgFd: uint32(prog.FD()),
+ AttachType: sys.BPF_PERF_EVENT,
+ })
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+ return err
+})
+
+// isValidTraceID implements the equivalent of a regex match
+// against "^[a-zA-Z_][0-9a-zA-Z_]*$".
+//
+// Trace event groups, names and kernel symbols must adhere to this set
+// of characters. Non-empty, first character must not be a number, all
+// characters must be alphanumeric or underscore.
+func isValidTraceID(s string) bool {
+ if len(s) < 1 {
+ return false
+ }
+ for i, c := range []byte(s) {
+ switch {
+ case c >= 'a' && c <= 'z':
+ case c >= 'A' && c <= 'Z':
+ case c == '_':
+ case i > 0 && c >= '0' && c <= '9':
+
+ default:
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/ebpf/link/platform.go b/vendor/github.com/cilium/ebpf/link/platform.go
new file mode 100644
index 000000000..eb6f7b7a3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/platform.go
@@ -0,0 +1,25 @@
+package link
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func platformPrefix(symbol string) string {
+
+ prefix := runtime.GOARCH
+
+ // per https://github.com/golang/go/blob/master/src/go/build/syslist.go
+ switch prefix {
+ case "386":
+ prefix = "ia32"
+ case "amd64", "amd64p32":
+ prefix = "x64"
+ case "arm64", "arm64be":
+ prefix = "arm64"
+ default:
+ return symbol
+ }
+
+ return fmt.Sprintf("__%s_%s", prefix, symbol)
+}
diff --git a/vendor/github.com/cilium/ebpf/link/program.go b/vendor/github.com/cilium/ebpf/link/program.go
index 0fe9d37c4..ea3181737 100644
--- a/vendor/github.com/cilium/ebpf/link/program.go
+++ b/vendor/github.com/cilium/ebpf/link/program.go
@@ -4,7 +4,7 @@ import (
"fmt"
"github.com/cilium/ebpf"
- "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
)
type RawAttachProgramOptions struct {
@@ -34,7 +34,7 @@ func RawAttachProgram(opts RawAttachProgramOptions) error {
replaceFd = uint32(opts.Replace.FD())
}
- attr := internal.BPFProgAttachAttr{
+ attr := sys.ProgAttachAttr{
TargetFd: uint32(opts.Target),
AttachBpfFd: uint32(opts.Program.FD()),
ReplaceBpfFd: replaceFd,
@@ -42,8 +42,8 @@ func RawAttachProgram(opts RawAttachProgramOptions) error {
AttachFlags: uint32(opts.Flags),
}
- if err := internal.BPFProgAttach(&attr); err != nil {
- return fmt.Errorf("can't attach program: %s", err)
+ if err := sys.ProgAttach(&attr); err != nil {
+ return fmt.Errorf("can't attach program: %w", err)
}
return nil
}
@@ -63,13 +63,13 @@ func RawDetachProgram(opts RawDetachProgramOptions) error {
return err
}
- attr := internal.BPFProgDetachAttr{
+ attr := sys.ProgDetachAttr{
TargetFd: uint32(opts.Target),
AttachBpfFd: uint32(opts.Program.FD()),
AttachType: uint32(opts.Attach),
}
- if err := internal.BPFProgDetach(&attr); err != nil {
- return fmt.Errorf("can't detach program: %s", err)
+ if err := sys.ProgDetach(&attr); err != nil {
+ return fmt.Errorf("can't detach program: %w", err)
}
return nil
diff --git a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go
index 65652486f..925e621cb 100644
--- a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go
+++ b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go
@@ -1,10 +1,11 @@
package link
import (
+ "errors"
"fmt"
"github.com/cilium/ebpf"
- "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
)
type RawTracepointOptions struct {
@@ -22,36 +23,65 @@ func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) {
return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t)
}
if opts.Program.FD() < 0 {
- return nil, fmt.Errorf("invalid program: %w", internal.ErrClosedFd)
+ return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd)
}
- fd, err := bpfRawTracepointOpen(&bpfRawTracepointOpenAttr{
- name: internal.NewStringPointer(opts.Name),
- fd: uint32(opts.Program.FD()),
+ fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{
+ Name: sys.NewStringPointer(opts.Name),
+ ProgFd: uint32(opts.Program.FD()),
})
if err != nil {
return nil, err
}
- return &progAttachRawTracepoint{fd: fd}, nil
+ err = haveBPFLink()
+ if errors.Is(err, ErrNotSupported) {
+ // Prior to commit 70ed506c3bbc ("bpf: Introduce pinnable bpf_link abstraction")
+ // raw_tracepoints are just a plain fd.
+ return &simpleRawTracepoint{fd}, nil
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &rawTracepoint{RawLink{fd: fd}}, nil
}
-type progAttachRawTracepoint struct {
- fd *internal.FD
+type simpleRawTracepoint struct {
+ fd *sys.FD
}
-var _ Link = (*progAttachRawTracepoint)(nil)
+var _ Link = (*simpleRawTracepoint)(nil)
-func (rt *progAttachRawTracepoint) isLink() {}
+func (frt *simpleRawTracepoint) isLink() {}
+
+func (frt *simpleRawTracepoint) Close() error {
+ return frt.fd.Close()
+}
-func (rt *progAttachRawTracepoint) Close() error {
- return rt.fd.Close()
+func (frt *simpleRawTracepoint) Update(_ *ebpf.Program) error {
+ return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported)
}
-func (rt *progAttachRawTracepoint) Update(_ *ebpf.Program) error {
- return fmt.Errorf("can't update raw_tracepoint: %w", ErrNotSupported)
+func (frt *simpleRawTracepoint) Pin(string) error {
+ return fmt.Errorf("pin raw_tracepoint: %w", ErrNotSupported)
}
-func (rt *progAttachRawTracepoint) Pin(_ string) error {
- return fmt.Errorf("can't pin raw_tracepoint: %w", ErrNotSupported)
+func (frt *simpleRawTracepoint) Unpin() error {
+ return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported)
+}
+
+func (frt *simpleRawTracepoint) Info() (*Info, error) {
+ return nil, fmt.Errorf("can't get raw_tracepoint info: %w", ErrNotSupported)
+}
+
+type rawTracepoint struct {
+ RawLink
+}
+
+var _ Link = (*rawTracepoint)(nil)
+
+func (rt *rawTracepoint) Update(_ *ebpf.Program) error {
+ return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported)
}
diff --git a/vendor/github.com/cilium/ebpf/link/socket_filter.go b/vendor/github.com/cilium/ebpf/link/socket_filter.go
new file mode 100644
index 000000000..94f3958cc
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/socket_filter.go
@@ -0,0 +1,40 @@
+package link
+
+import (
+ "syscall"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// AttachSocketFilter attaches a SocketFilter BPF program to a socket.
+func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error {
+ rawConn, err := conn.SyscallConn()
+ if err != nil {
+ return err
+ }
+ var ssoErr error
+ err = rawConn.Control(func(fd uintptr) {
+ ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD())
+ })
+ if ssoErr != nil {
+ return ssoErr
+ }
+ return err
+}
+
+// DetachSocketFilter detaches a SocketFilter BPF program from a socket.
+func DetachSocketFilter(conn syscall.Conn) error {
+ rawConn, err := conn.SyscallConn()
+ if err != nil {
+ return err
+ }
+ var ssoErr error
+ err = rawConn.Control(func(fd uintptr) {
+ ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0)
+ })
+ if ssoErr != nil {
+ return ssoErr
+ }
+ return err
+}
diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go
index 19326c8af..a661395b3 100644
--- a/vendor/github.com/cilium/ebpf/link/syscalls.go
+++ b/vendor/github.com/cilium/ebpf/link/syscalls.go
@@ -2,35 +2,33 @@ package link
import (
"errors"
- "unsafe"
"github.com/cilium/ebpf"
"github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
// Type is the kind of link.
-type Type uint32
+type Type = sys.LinkType
// Valid link types.
-//
-// Equivalent to enum bpf_link_type.
const (
- UnspecifiedType Type = iota
- RawTracepointType
- TracingType
- CgroupType
- IterType
- NetNsType
- XDPType
+ UnspecifiedType = sys.BPF_LINK_TYPE_UNSPEC
+ RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT
+ TracingType = sys.BPF_LINK_TYPE_TRACING
+ CgroupType = sys.BPF_LINK_TYPE_CGROUP
+ IterType = sys.BPF_LINK_TYPE_ITER
+ NetNsType = sys.BPF_LINK_TYPE_NETNS
+ XDPType = sys.BPF_LINK_TYPE_XDP
+ PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT
)
var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() error {
prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
- Type: ebpf.CGroupSKB,
- AttachType: ebpf.AttachCGroupInetIngress,
- License: "MIT",
+ Type: ebpf.CGroupSKB,
+ License: "MIT",
Instructions: asm.Instructions{
asm.Mov.Imm(asm.R0, 0),
asm.Return(),
@@ -69,7 +67,7 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace
// We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs.
// If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't
// present.
- attr := internal.BPFProgAttachAttr{
+ attr := sys.ProgAttachAttr{
// We rely on this being checked after attachFlags.
TargetFd: ^uint32(0),
AttachBpfFd: uint32(prog.FD()),
@@ -77,7 +75,7 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace
AttachFlags: uint32(flagReplace),
}
- err = internal.BPFProgAttach(&attr)
+ err = sys.ProgAttach(&attr)
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
@@ -87,55 +85,14 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace
return err
})
-type bpfLinkCreateAttr struct {
- progFd uint32
- targetFd uint32
- attachType ebpf.AttachType
- flags uint32
-}
-
-func bpfLinkCreate(attr *bpfLinkCreateAttr) (*internal.FD, error) {
- ptr, err := internal.BPF(internal.BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
- if err != nil {
- return nil, err
- }
- return internal.NewFD(uint32(ptr)), nil
-}
-
-type bpfLinkUpdateAttr struct {
- linkFd uint32
- newProgFd uint32
- flags uint32
- oldProgFd uint32
-}
-
-func bpfLinkUpdate(attr *bpfLinkUpdateAttr) error {
- _, err := internal.BPF(internal.BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
- return err
-}
-
var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error {
- prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
- Type: ebpf.CGroupSKB,
- AttachType: ebpf.AttachCGroupInetIngress,
- License: "MIT",
- Instructions: asm.Instructions{
- asm.Mov.Imm(asm.R0, 0),
- asm.Return(),
- },
- })
- if err != nil {
- return internal.ErrNotSupported
- }
- defer prog.Close()
-
- attr := bpfLinkCreateAttr{
+ attr := sys.LinkCreateAttr{
// This is a hopefully invalid file descriptor, which triggers EBADF.
- targetFd: ^uint32(0),
- progFd: uint32(prog.FD()),
- attachType: ebpf.AttachCGroupInetIngress,
+ TargetFd: ^uint32(0),
+ ProgFd: ^uint32(0),
+ AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress),
}
- _, err = bpfLinkCreate(&attr)
+ _, err := sys.LinkCreate(&attr)
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
}
@@ -144,30 +101,3 @@ var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error {
}
return err
})
-
-type bpfIterCreateAttr struct {
- linkFd uint32
- flags uint32
-}
-
-func bpfIterCreate(attr *bpfIterCreateAttr) (*internal.FD, error) {
- ptr, err := internal.BPF(internal.BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
- if err == nil {
- return internal.NewFD(uint32(ptr)), nil
- }
- return nil, err
-}
-
-type bpfRawTracepointOpenAttr struct {
- name internal.Pointer
- fd uint32
- _ uint32
-}
-
-func bpfRawTracepointOpen(attr *bpfRawTracepointOpenAttr) (*internal.FD, error) {
- ptr, err := internal.BPF(internal.BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
- if err == nil {
- return internal.NewFD(uint32(ptr)), nil
- }
- return nil, err
-}
diff --git a/vendor/github.com/cilium/ebpf/link/tracepoint.go b/vendor/github.com/cilium/ebpf/link/tracepoint.go
new file mode 100644
index 000000000..a59ef9d1c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/tracepoint.go
@@ -0,0 +1,77 @@
+package link
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf"
+)
+
+// TracepointOptions defines additional parameters that will be used
+// when loading Tracepoints.
+type TracepointOptions struct {
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ //
+ // Needs kernel 5.15+.
+ Cookie uint64
+}
+
+// Tracepoint attaches the given eBPF program to the tracepoint with the given
+// group and name. See /sys/kernel/debug/tracing/events to find available
+// tracepoints. The top-level directory is the group, the event's subdirectory
+// is the name. Example:
+//
+// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog, nil)
+//
+// Losing the reference to the resulting Link (tp) will close the Tracepoint
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is
+// only possible as of kernel 4.14 (commit cf5f5ce).
+func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) {
+ if group == "" || name == "" {
+ return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput)
+ }
+ if prog == nil {
+ return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+ }
+ if !isValidTraceID(group) || !isValidTraceID(name) {
+ return nil, fmt.Errorf("group and name '%s/%s' must be alphanumeric or underscore: %w", group, name, errInvalidInput)
+ }
+ if prog.Type() != ebpf.TracePoint {
+ return nil, fmt.Errorf("eBPF program type %s is not a Tracepoint: %w", prog.Type(), errInvalidInput)
+ }
+
+ tid, err := getTraceEventID(group, name)
+ if err != nil {
+ return nil, err
+ }
+
+ fd, err := openTracepointPerfEvent(tid, perfAllThreads)
+ if err != nil {
+ return nil, err
+ }
+
+ var cookie uint64
+ if opts != nil {
+ cookie = opts.Cookie
+ }
+
+ pe := &perfEvent{
+ typ: tracepointEvent,
+ group: group,
+ name: name,
+ tracefsID: tid,
+ cookie: cookie,
+ fd: fd,
+ }
+
+ lnk, err := attachPerfEvent(pe, prog)
+ if err != nil {
+ pe.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/tracing.go b/vendor/github.com/cilium/ebpf/link/tracing.go
new file mode 100644
index 000000000..e47e61a3b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/tracing.go
@@ -0,0 +1,141 @@
+package link
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+type tracing struct {
+ RawLink
+}
+
+func (f *tracing) Update(new *ebpf.Program) error {
+ return fmt.Errorf("tracing update: %w", ErrNotSupported)
+}
+
+// AttachFreplace attaches the given eBPF program to the function it replaces.
+//
+// The program and name can either be provided at link time, or can be provided
+// at program load time. If they were provided at load time, they should be nil
+// and empty respectively here, as they will be ignored by the kernel.
+// Examples:
+//
+// AttachFreplace(dispatcher, "function", replacement)
+// AttachFreplace(nil, "", replacement)
+func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (Link, error) {
+ if (name == "") != (targetProg == nil) {
+ return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput)
+ }
+ if prog == nil {
+ return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+ }
+ if prog.Type() != ebpf.Extension {
+ return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput)
+ }
+
+ var (
+ target int
+ typeID btf.TypeID
+ )
+ if targetProg != nil {
+ btfHandle, err := targetProg.Handle()
+ if err != nil {
+ return nil, err
+ }
+ defer btfHandle.Close()
+
+ spec, err := btfHandle.Spec(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var function *btf.Func
+ if err := spec.TypeByName(name, &function); err != nil {
+ return nil, err
+ }
+
+ target = targetProg.FD()
+ typeID, err = spec.TypeID(function)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ link, err := AttachRawLink(RawLinkOptions{
+ Target: target,
+ Program: prog,
+ Attach: ebpf.AttachNone,
+ BTF: typeID,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &tracing{*link}, nil
+}
+
+type TracingOptions struct {
+ // Program must be of type Tracing with attach type
+ // AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or
+ // AttachTraceRawTp.
+ Program *ebpf.Program
+}
+
+type LSMOptions struct {
+ // Program must be of type LSM with attach type
+ // AttachLSMMac.
+ Program *ebpf.Program
+}
+
+// attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id.
+func attachBTFID(program *ebpf.Program) (Link, error) {
+ if program.FD() < 0 {
+ return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd)
+ }
+
+ fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{
+ ProgFd: uint32(program.FD()),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ raw := RawLink{fd: fd}
+ info, err := raw.Info()
+ if err != nil {
+ raw.Close()
+ return nil, err
+ }
+
+ if info.Type == RawTracepointType {
+ // Sadness upon sadness: a Tracing program with AttachRawTp returns
+ // a raw_tracepoint link. Other types return a tracing link.
+ return &rawTracepoint{raw}, nil
+ }
+
+ return &tracing{RawLink: RawLink{fd: fd}}, nil
+}
+
+// AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or
+// a BTF-powered raw tracepoint (tp_btf) BPF Program to a BPF hook defined
+// in kernel modules.
+func AttachTracing(opts TracingOptions) (Link, error) {
+ if t := opts.Program.Type(); t != ebpf.Tracing {
+ return nil, fmt.Errorf("invalid program type %s, expected Tracing", t)
+ }
+
+ return attachBTFID(opts.Program)
+}
+
+// AttachLSM links a Linux security module (LSM) BPF Program to a BPF
+// hook defined in kernel modules.
+func AttachLSM(opts LSMOptions) (Link, error) {
+ if t := opts.Program.Type(); t != ebpf.LSM {
+ return nil, fmt.Errorf("invalid program type %s, expected LSM", t)
+ }
+
+ return attachBTFID(opts.Program)
+}
diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go
new file mode 100644
index 000000000..edf925b57
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/uprobe.go
@@ -0,0 +1,373 @@
+package link
+
+import (
+ "debug/elf"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal"
+)
+
+var (
+ uprobeEventsPath = filepath.Join(tracefsPath, "uprobe_events")
+
+ uprobeRetprobeBit = struct {
+ once sync.Once
+ value uint64
+ err error
+ }{}
+
+ uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset"
+ // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799
+ uprobeRefCtrOffsetShift = 32
+ haveRefCtrOffsetPMU = internal.FeatureTest("RefCtrOffsetPMU", "4.20", func() error {
+ _, err := os.Stat(uprobeRefCtrOffsetPMUPath)
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ return nil
+ })
+
+ // ErrNoSymbol indicates that the given symbol was not found
+ // in the ELF symbols table.
+ ErrNoSymbol = errors.New("not found")
+)
+
+// Executable defines an executable program on the filesystem.
+type Executable struct {
+ // Path of the executable on the filesystem.
+ path string
+ // Parsed ELF and dynamic symbols' addresses.
+ addresses map[string]uint64
+}
+
+// UprobeOptions defines additional parameters that will be used
+// when loading Uprobes.
+type UprobeOptions struct {
+ // Symbol address. Must be provided in case of external symbols (shared libs).
+ // If set, overrides the address eventually parsed from the executable.
+ Address uint64
+ // The offset relative to given symbol. Useful when tracing an arbitrary point
+ // inside the frame of given symbol.
+ //
+ // Note: this field changed from being an absolute offset to being relative
+ // to Address.
+ Offset uint64
+ // Only set the uprobe on the given process ID. Useful when tracing
+ // shared library calls or programs that have many running instances.
+ PID int
+ // Automatically manage SDT reference counts (semaphores).
+ //
+ // If this field is set, the Kernel will increment/decrement the
+ // semaphore located in the process memory at the provided address on
+ // probe attach/detach.
+ //
+ // See also:
+ // sourceware.org/systemtap/wiki/UserSpaceProbeImplementation (Semaphore Handling)
+ // github.com/torvalds/linux/commit/1cc33161a83d
+ // github.com/torvalds/linux/commit/a6ca88b241d5
+ RefCtrOffset uint64
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ //
+ // Needs kernel 5.15+.
+ Cookie uint64
+}
+
+// To open a new Executable, use:
+//
+// OpenExecutable("/bin/bash")
+//
+// The returned value can then be used to open Uprobe(s).
+func OpenExecutable(path string) (*Executable, error) {
+ if path == "" {
+ return nil, fmt.Errorf("path cannot be empty")
+ }
+
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, fmt.Errorf("open file '%s': %w", path, err)
+ }
+ defer f.Close()
+
+ se, err := internal.NewSafeELFFile(f)
+ if err != nil {
+ return nil, fmt.Errorf("parse ELF file: %w", err)
+ }
+
+ if se.Type != elf.ET_EXEC && se.Type != elf.ET_DYN {
+ // ELF is not an executable or a shared object.
+ return nil, errors.New("the given file is not an executable or a shared object")
+ }
+
+ ex := Executable{
+ path: path,
+ addresses: make(map[string]uint64),
+ }
+
+ if err := ex.load(se); err != nil {
+ return nil, err
+ }
+
+ return &ex, nil
+}
+
+func (ex *Executable) load(f *internal.SafeELFFile) error {
+ syms, err := f.Symbols()
+ if err != nil && !errors.Is(err, elf.ErrNoSymbols) {
+ return err
+ }
+
+ dynsyms, err := f.DynamicSymbols()
+ if err != nil && !errors.Is(err, elf.ErrNoSymbols) {
+ return err
+ }
+
+ syms = append(syms, dynsyms...)
+
+ for _, s := range syms {
+ if elf.ST_TYPE(s.Info) != elf.STT_FUNC {
+ // Symbol not associated with a function or other executable code.
+ continue
+ }
+
+ address := s.Value
+
+ // Loop over ELF segments.
+ for _, prog := range f.Progs {
+ // Skip uninteresting segments.
+ if prog.Type != elf.PT_LOAD || (prog.Flags&elf.PF_X) == 0 {
+ continue
+ }
+
+ if prog.Vaddr <= s.Value && s.Value < (prog.Vaddr+prog.Memsz) {
+ // If the symbol value is contained in the segment, calculate
+ // the symbol offset.
+ //
+ // fn symbol offset = fn symbol VA - .text VA + .text offset
+ //
+ // stackoverflow.com/a/40249502
+ address = s.Value - prog.Vaddr + prog.Off
+ break
+ }
+ }
+
+ ex.addresses[s.Name] = address
+ }
+
+ return nil
+}
+
+// address calculates the address of a symbol in the executable.
+//
+// opts must not be nil.
+func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error) {
+ if opts.Address > 0 {
+ return opts.Address + opts.Offset, nil
+ }
+
+ address, ok := ex.addresses[symbol]
+ if !ok {
+ return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol)
+ }
+
+ // Symbols with location 0 from section undef are shared library calls and
+ // are relocated before the binary is executed. Dynamic linking is not
+ // implemented by the library, so mark this as unsupported for now.
+ //
+ // Since only offset values are stored and not elf.Symbol, if the value is 0,
+ // assume it's an external symbol.
+ if address == 0 {
+ return 0, fmt.Errorf("cannot resolve %s library call '%s': %w "+
+ "(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported)
+ }
+
+ return address + opts.Offset, nil
+}
+
+// Uprobe attaches the given eBPF program to a perf event that fires when the
+// given symbol starts executing in the given Executable.
+// For example, /bin/bash::main():
+//
+// ex, _ = OpenExecutable("/bin/bash")
+// ex.Uprobe("main", prog, nil)
+//
+// When using symbols which belongs to shared libraries,
+// an offset must be provided via options:
+//
+// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123})
+//
+// Note: Setting the Offset field in the options supersedes the symbol's offset.
+//
+// Losing the reference to the resulting Link (up) will close the Uprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// Functions provided by shared libraries can currently not be traced and
+// will result in an ErrNotSupported.
+func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) {
+ u, err := ex.uprobe(symbol, prog, opts, false)
+ if err != nil {
+ return nil, err
+ }
+
+ lnk, err := attachPerfEvent(u, prog)
+ if err != nil {
+ u.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
+
+// Uretprobe attaches the given eBPF program to a perf event that fires right
+// before the given symbol exits. For example, /bin/bash::main():
+//
+// ex, _ = OpenExecutable("/bin/bash")
+// ex.Uretprobe("main", prog, nil)
+//
+// When using symbols which belongs to shared libraries,
+// an offset must be provided via options:
+//
+// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123})
+//
+// Note: Setting the Offset field in the options supersedes the symbol's offset.
+//
+// Losing the reference to the resulting Link (up) will close the Uprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// Functions provided by shared libraries can currently not be traced and
+// will result in an ErrNotSupported.
+func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) {
+ u, err := ex.uprobe(symbol, prog, opts, true)
+ if err != nil {
+ return nil, err
+ }
+
+ lnk, err := attachPerfEvent(u, prog)
+ if err != nil {
+ u.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
+
+// uprobe opens a perf event for the given binary/symbol and attaches prog to it.
+// If ret is true, create a uretprobe.
+func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions, ret bool) (*perfEvent, error) {
+ if prog == nil {
+ return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+ }
+ if prog.Type() != ebpf.Kprobe {
+ return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput)
+ }
+ if opts == nil {
+ opts = &UprobeOptions{}
+ }
+
+ offset, err := ex.address(symbol, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ pid := opts.PID
+ if pid == 0 {
+ pid = perfAllThreads
+ }
+
+ if opts.RefCtrOffset != 0 {
+ if err := haveRefCtrOffsetPMU(); err != nil {
+ return nil, fmt.Errorf("uprobe ref_ctr_offset: %w", err)
+ }
+ }
+
+ args := probeArgs{
+ symbol: symbol,
+ path: ex.path,
+ offset: offset,
+ pid: pid,
+ refCtrOffset: opts.RefCtrOffset,
+ ret: ret,
+ cookie: opts.Cookie,
+ }
+
+ // Use uprobe PMU if the kernel has it available.
+ tp, err := pmuUprobe(args)
+ if err == nil {
+ return tp, nil
+ }
+ if err != nil && !errors.Is(err, ErrNotSupported) {
+ return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err)
+ }
+
+ // Use tracefs if uprobe PMU is missing.
+ args.symbol = sanitizeSymbol(symbol)
+ tp, err = tracefsUprobe(args)
+ if err != nil {
+ return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err)
+ }
+
+ return tp, nil
+}
+
+// pmuUprobe opens a perf event based on the uprobe PMU.
+func pmuUprobe(args probeArgs) (*perfEvent, error) {
+ return pmuProbe(uprobeType, args)
+}
+
+// tracefsUprobe creates a Uprobe tracefs entry.
+func tracefsUprobe(args probeArgs) (*perfEvent, error) {
+ return tracefsProbe(uprobeType, args)
+}
+
+// sanitizeSymbol replaces every invalid character for the tracefs api with an underscore.
+// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_").
+func sanitizeSymbol(s string) string {
+ var b strings.Builder
+ b.Grow(len(s))
+ var skip bool
+ for _, c := range []byte(s) {
+ switch {
+ case c >= 'a' && c <= 'z',
+ c >= 'A' && c <= 'Z',
+ c >= '0' && c <= '9':
+ skip = false
+ b.WriteByte(c)
+
+ default:
+ if !skip {
+ b.WriteByte('_')
+ skip = true
+ }
+ }
+ }
+
+ return b.String()
+}
+
+// uprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api.
+func uprobeToken(args probeArgs) string {
+ po := fmt.Sprintf("%s:%#x", args.path, args.offset)
+
+ if args.refCtrOffset != 0 {
+ // This is not documented in Documentation/trace/uprobetracer.txt.
+ // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564
+ po += fmt.Sprintf("(%#x)", args.refCtrOffset)
+ }
+
+ return po
+}
+
+func uretprobeBit() (uint64, error) {
+ uprobeRetprobeBit.once.Do(func() {
+ uprobeRetprobeBit.value, uprobeRetprobeBit.err = determineRetprobeBit(uprobeType)
+ })
+ return uprobeRetprobeBit.value, uprobeRetprobeBit.err
+}
diff --git a/vendor/github.com/cilium/ebpf/link/xdp.go b/vendor/github.com/cilium/ebpf/link/xdp.go
new file mode 100644
index 000000000..aa8dd3a4c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/xdp.go
@@ -0,0 +1,54 @@
+package link
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf"
+)
+
+// XDPAttachFlags represents how XDP program will be attached to interface.
+type XDPAttachFlags uint32
+
+const (
+ // XDPGenericMode (SKB) links XDP BPF program for drivers which do
+ // not yet support native XDP.
+ XDPGenericMode XDPAttachFlags = 1 << (iota + 1)
+ // XDPDriverMode links XDP BPF program into the driver’s receive path.
+ XDPDriverMode
+ // XDPOffloadMode offloads the entire XDP BPF program into hardware.
+ XDPOffloadMode
+)
+
+type XDPOptions struct {
+ // Program must be an XDP BPF program.
+ Program *ebpf.Program
+
+ // Interface is the interface index to attach program to.
+ Interface int
+
+ // Flags is one of XDPAttachFlags (optional).
+ //
+ // Only one XDP mode should be set, without flag defaults
+ // to driver/generic mode (best effort).
+ Flags XDPAttachFlags
+}
+
+// AttachXDP links an XDP BPF program to an XDP hook.
+func AttachXDP(opts XDPOptions) (Link, error) {
+ if t := opts.Program.Type(); t != ebpf.XDP {
+ return nil, fmt.Errorf("invalid program type %s, expected XDP", t)
+ }
+
+ if opts.Interface < 1 {
+ return nil, fmt.Errorf("invalid interface index: %d", opts.Interface)
+ }
+
+ rawLink, err := AttachRawLink(RawLinkOptions{
+ Program: opts.Program,
+ Attach: ebpf.AttachXDP,
+ Target: opts.Interface,
+ Flags: uint32(opts.Flags),
+ })
+
+ return rawLink, err
+}
diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go
index f843bb25e..e6276b182 100644
--- a/vendor/github.com/cilium/ebpf/linker.go
+++ b/vendor/github.com/cilium/ebpf/linker.go
@@ -1,133 +1,238 @@
package ebpf
import (
+ "errors"
"fmt"
+ "sync"
"github.com/cilium/ebpf/asm"
- "github.com/cilium/ebpf/internal/btf"
+ "github.com/cilium/ebpf/btf"
)
-// link resolves bpf-to-bpf calls.
+// splitSymbols splits insns into subsections delimited by Symbol Instructions.
+// insns cannot be empty and must start with a Symbol Instruction.
//
-// Each library may contain multiple functions / labels, and is only linked
-// if prog references one of these functions.
-//
-// Libraries also linked.
-func link(prog *ProgramSpec, libs []*ProgramSpec) error {
- var (
- linked = make(map[*ProgramSpec]bool)
- pending = []asm.Instructions{prog.Instructions}
- insns asm.Instructions
- )
- for len(pending) > 0 {
- insns, pending = pending[0], pending[1:]
- for _, lib := range libs {
- if linked[lib] {
- continue
- }
+// The resulting map is indexed by Symbol name.
+func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) {
+ if len(insns) == 0 {
+ return nil, errors.New("insns is empty")
+ }
- needed, err := needSection(insns, lib.Instructions)
- if err != nil {
- return fmt.Errorf("linking %s: %w", lib.Name, err)
- }
+ if insns[0].Symbol() == "" {
+ return nil, errors.New("insns must start with a Symbol")
+ }
- if !needed {
- continue
+ var name string
+ progs := make(map[string]asm.Instructions)
+ for _, ins := range insns {
+ if sym := ins.Symbol(); sym != "" {
+ if progs[sym] != nil {
+ return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym)
}
+ name = sym
+ }
+
+ progs[name] = append(progs[name], ins)
+ }
- linked[lib] = true
- prog.Instructions = append(prog.Instructions, lib.Instructions...)
- pending = append(pending, lib.Instructions)
+ return progs, nil
+}
- if prog.BTF != nil && lib.BTF != nil {
- if err := btf.ProgramAppend(prog.BTF, lib.BTF); err != nil {
- return fmt.Errorf("linking BTF of %s: %w", lib.Name, err)
- }
- }
+// The linker is responsible for resolving bpf-to-bpf calls between programs
+// within an ELF. Each BPF program must be a self-contained binary blob,
+// so when an instruction in one ELF program section wants to jump to
+// a function in another, the linker needs to pull in the bytecode
+// (and BTF info) of the target function and concatenate the instruction
+// streams.
+//
+// Later on in the pipeline, all call sites are fixed up with relative jumps
+// within this newly-created instruction stream to then finally hand off to
+// the kernel with BPF_PROG_LOAD.
+//
+// Each function is denoted by an ELF symbol and the compiler takes care of
+// register setup before each jump instruction.
+
+// hasFunctionReferences returns true if insns contains one or more bpf2bpf
+// function references.
+func hasFunctionReferences(insns asm.Instructions) bool {
+ for _, i := range insns {
+ if i.IsFunctionReference() {
+ return true
}
}
-
- return nil
+ return false
}
-func needSection(insns, section asm.Instructions) (bool, error) {
- // A map of symbols to the libraries which contain them.
- symbols, err := section.SymbolOffsets()
+// applyRelocations collects and applies any CO-RE relocations in insns.
+//
+// Passing a nil target will relocate against the running kernel. insns are
+// modified in place.
+func applyRelocations(insns asm.Instructions, local, target *btf.Spec) error {
+ var relos []*btf.CORERelocation
+ var reloInsns []*asm.Instruction
+ iter := insns.Iterate()
+ for iter.Next() {
+ if relo := btf.CORERelocationMetadata(iter.Ins); relo != nil {
+ relos = append(relos, relo)
+ reloInsns = append(reloInsns, iter.Ins)
+ }
+ }
+
+ if len(relos) == 0 {
+ return nil
+ }
+
+ target, err := maybeLoadKernelBTF(target)
if err != nil {
- return false, err
+ return err
}
- for _, ins := range insns {
- if ins.Reference == "" {
- continue
- }
+ fixups, err := btf.CORERelocate(local, target, relos)
+ if err != nil {
+ return err
+ }
- if ins.OpCode.JumpOp() != asm.Call || ins.Src != asm.PseudoCall {
- continue
+ for i, fixup := range fixups {
+ if err := fixup.Apply(reloInsns[i]); err != nil {
+ return fmt.Errorf("apply fixup %s: %w", &fixup, err)
}
+ }
+
+ return nil
+}
+
+// flattenPrograms resolves bpf-to-bpf calls for a set of programs.
+//
+// Links all programs in names by modifying their ProgramSpec in progs.
+func flattenPrograms(progs map[string]*ProgramSpec, names []string) {
+ // Pre-calculate all function references.
+ refs := make(map[*ProgramSpec][]string)
+ for _, prog := range progs {
+ refs[prog] = prog.Instructions.FunctionReferences()
+ }
+
+ // Create a flattened instruction stream, but don't modify progs yet to
+ // avoid linking multiple times.
+ flattened := make([]asm.Instructions, 0, len(names))
+ for _, name := range names {
+ flattened = append(flattened, flattenInstructions(name, progs, refs))
+ }
+
+ // Finally, assign the flattened instructions.
+ for i, name := range names {
+ progs[name].Instructions = flattened[i]
+ }
+}
+
+// flattenInstructions resolves bpf-to-bpf calls for a single program.
+//
+// Flattens the instructions of prog by concatenating the instructions of all
+// direct and indirect dependencies.
+//
+// progs contains all referenceable programs, while refs contain the direct
+// dependencies of each program.
+func flattenInstructions(name string, progs map[string]*ProgramSpec, refs map[*ProgramSpec][]string) asm.Instructions {
+ prog := progs[name]
+
+ insns := make(asm.Instructions, len(prog.Instructions))
+ copy(insns, prog.Instructions)
+
+ // Add all direct references of prog to the list of to be linked programs.
+ pending := make([]string, len(refs[prog]))
+ copy(pending, refs[prog])
- if ins.Constant != -1 {
- // This is already a valid call, no need to link again.
+ // All references for which we've appended instructions.
+ linked := make(map[string]bool)
+
+ // Iterate all pending references. We can't use a range since pending is
+ // modified in the body below.
+ for len(pending) > 0 {
+ var ref string
+ ref, pending = pending[0], pending[1:]
+
+ if linked[ref] {
+ // We've already linked this ref, don't append instructions again.
continue
}
- if _, ok := symbols[ins.Reference]; !ok {
- // Symbol isn't available in this section
+ progRef := progs[ref]
+ if progRef == nil {
+ // We don't have instructions that go with this reference. This
+ // happens when calling extern functions.
continue
}
- // At this point we know that at least one function in the
- // library is called from insns, so we have to link it.
- return true, nil
+ insns = append(insns, progRef.Instructions...)
+ linked[ref] = true
+
+ // Make sure we link indirect references.
+ pending = append(pending, refs[progRef]...)
}
- // None of the functions in the section are called.
- return false, nil
+ return insns
}
-func fixupJumpsAndCalls(insns asm.Instructions) error {
- symbolOffsets := make(map[string]asm.RawInstructionOffset)
+// fixupAndValidate is called by the ELF reader right before marshaling the
+// instruction stream. It performs last-minute adjustments to the program and
+// runs some sanity checks before sending it off to the kernel.
+func fixupAndValidate(insns asm.Instructions) error {
iter := insns.Iterate()
for iter.Next() {
ins := iter.Ins
- if ins.Symbol == "" {
- continue
+ // Map load was tagged with a Reference, but does not contain a Map pointer.
+ if ins.IsLoadFromMap() && ins.Reference() != "" && ins.Map() == nil {
+ return fmt.Errorf("instruction %d: map %s: %w", iter.Index, ins.Reference(), asm.ErrUnsatisfiedMapReference)
}
- if _, ok := symbolOffsets[ins.Symbol]; ok {
- return fmt.Errorf("duplicate symbol %s", ins.Symbol)
- }
+ fixupProbeReadKernel(ins)
+ }
- symbolOffsets[ins.Symbol] = iter.Offset
+ return nil
+}
+
+// fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str)
+// with bpf_probe_read(_str) on kernels that don't support it yet.
+func fixupProbeReadKernel(ins *asm.Instruction) {
+ if !ins.IsBuiltinCall() {
+ return
}
- iter = insns.Iterate()
- for iter.Next() {
- i := iter.Index
- offset := iter.Offset
- ins := iter.Ins
+ // Kernel supports bpf_probe_read_kernel, nothing to do.
+ if haveProbeReadKernel() == nil {
+ return
+ }
- switch {
- case ins.IsFunctionCall() && ins.Constant == -1:
- // Rewrite bpf to bpf call
- callOffset, ok := symbolOffsets[ins.Reference]
- if !ok {
- return fmt.Errorf("instruction %d: reference to missing symbol %q", i, ins.Reference)
- }
+ switch asm.BuiltinFunc(ins.Constant) {
+ case asm.FnProbeReadKernel, asm.FnProbeReadUser:
+ ins.Constant = int64(asm.FnProbeRead)
+ case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr:
+ ins.Constant = int64(asm.FnProbeReadStr)
+ }
+}
- ins.Constant = int64(callOffset - offset - 1)
+var kernelBTF struct {
+ sync.Mutex
+ spec *btf.Spec
+}
- case ins.OpCode.Class() == asm.JumpClass && ins.Offset == -1:
- // Rewrite jump to label
- jumpOffset, ok := symbolOffsets[ins.Reference]
- if !ok {
- return fmt.Errorf("instruction %d: reference to missing symbol %q", i, ins.Reference)
- }
+// maybeLoadKernelBTF loads the current kernel's BTF if spec is nil, otherwise
+// it returns spec unchanged.
+//
+// The kernel BTF is cached for the lifetime of the process.
+func maybeLoadKernelBTF(spec *btf.Spec) (*btf.Spec, error) {
+ if spec != nil {
+ return spec, nil
+ }
- ins.Offset = int16(jumpOffset - offset - 1)
- }
+ kernelBTF.Lock()
+ defer kernelBTF.Unlock()
+
+ if kernelBTF.spec != nil {
+ return kernelBTF.spec, nil
}
- return nil
+ var err error
+ kernelBTF.spec, err = btf.LoadKernelSpec()
+ return kernelBTF.spec, err
}
diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go
index 316fc37b1..e4a6c87e9 100644
--- a/vendor/github.com/cilium/ebpf/map.go
+++ b/vendor/github.com/cilium/ebpf/map.go
@@ -1,15 +1,19 @@
package ebpf
import (
+ "bytes"
"errors"
"fmt"
"io"
+ "math/rand"
"path/filepath"
"reflect"
- "strings"
+ "time"
+ "unsafe"
+ "github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal"
- "github.com/cilium/ebpf/internal/btf"
+ "github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
@@ -18,6 +22,8 @@ var (
ErrKeyNotExist = errors.New("key does not exist")
ErrKeyExist = errors.New("key already exists")
ErrIterationAborted = errors.New("iteration aborted")
+ ErrMapIncompatible = errors.New("map spec is incompatible with existing map")
+ errMapNoBTFValue = errors.New("map spec does not contain a BTF Value")
)
// MapOptions control loading a map into the kernel.
@@ -25,7 +31,8 @@ type MapOptions struct {
// The base path to pin maps in if requested via PinByName.
// Existing maps will be re-used if they are compatible, otherwise an
// error is returned.
- PinPath string
+ PinPath string
+ LoadPinOptions LoadPinOptions
}
// MapID represents the unique ID of an eBPF map
@@ -40,7 +47,10 @@ type MapSpec struct {
KeySize uint32
ValueSize uint32
MaxEntries uint32
- Flags uint32
+
+ // Flags is passed to the kernel and specifies additional map
+ // creation attributes.
+ Flags uint32
// Automatically pin and load a map from MapOptions.PinPath.
// Generates an error if an existing pinned map is incompatible with the MapSpec.
@@ -60,8 +70,16 @@ type MapSpec struct {
// InnerMap is used as a template for ArrayOfMaps and HashOfMaps
InnerMap *MapSpec
+ // Extra trailing bytes found in the ELF map definition when using structs
+ // larger than libbpf's bpf_map_def. nil if no trailing bytes were present.
+ // Must be nil or empty before instantiating the MapSpec into a Map.
+ Extra *bytes.Reader
+
+ // The key and value type of this map. May be nil.
+ Key, Value btf.Type
+
// The BTF associated with this map.
- BTF *btf.Map
+ BTF *btf.Spec
}
func (ms *MapSpec) String() string {
@@ -77,12 +95,63 @@ func (ms *MapSpec) Copy() *MapSpec {
}
cpy := *ms
+
cpy.Contents = make([]MapKV, len(ms.Contents))
copy(cpy.Contents, ms.Contents)
+
cpy.InnerMap = ms.InnerMap.Copy()
+
return &cpy
}
+// hasBTF returns true if the MapSpec has a valid BTF spec and if its
+// map type supports associated BTF metadata in the kernel.
+func (ms *MapSpec) hasBTF() bool {
+ return ms.BTF != nil && ms.Type.hasBTF()
+}
+
+func (ms *MapSpec) clampPerfEventArraySize() error {
+ if ms.Type != PerfEventArray {
+ return nil
+ }
+
+ n, err := internal.PossibleCPUs()
+ if err != nil {
+ return fmt.Errorf("perf event array: %w", err)
+ }
+
+ if n := uint32(n); ms.MaxEntries > n {
+ ms.MaxEntries = n
+ }
+
+ return nil
+}
+
+// dataSection returns the contents and BTF Datasec descriptor of the spec.
+func (ms *MapSpec) dataSection() ([]byte, *btf.Datasec, error) {
+
+ if ms.Value == nil {
+ return nil, nil, errMapNoBTFValue
+ }
+
+ ds, ok := ms.Value.(*btf.Datasec)
+ if !ok {
+ return nil, nil, fmt.Errorf("map value BTF is a %T, not a *btf.Datasec", ms.Value)
+ }
+
+ if n := len(ms.Contents); n != 1 {
+ return nil, nil, fmt.Errorf("expected one key, found %d", n)
+ }
+
+ kv := ms.Contents[0]
+ value, ok := kv.Value.([]byte)
+ if !ok {
+ return nil, nil, fmt.Errorf("value at first map key is %T, not []byte", kv.Value)
+ }
+
+ return value, ds, nil
+}
+
// MapKV is used to initialize the contents of a Map.
type MapKV struct {
Key interface{}
@@ -92,19 +161,20 @@ type MapKV struct {
func (ms *MapSpec) checkCompatibility(m *Map) error {
switch {
case m.typ != ms.Type:
- return fmt.Errorf("expected type %v, got %v", ms.Type, m.typ)
+ return fmt.Errorf("expected type %v, got %v: %w", ms.Type, m.typ, ErrMapIncompatible)
case m.keySize != ms.KeySize:
- return fmt.Errorf("expected key size %v, got %v", ms.KeySize, m.keySize)
+ return fmt.Errorf("expected key size %v, got %v: %w", ms.KeySize, m.keySize, ErrMapIncompatible)
case m.valueSize != ms.ValueSize:
- return fmt.Errorf("expected value size %v, got %v", ms.ValueSize, m.valueSize)
+ return fmt.Errorf("expected value size %v, got %v: %w", ms.ValueSize, m.valueSize, ErrMapIncompatible)
- case m.maxEntries != ms.MaxEntries:
- return fmt.Errorf("expected max entries %v, got %v", ms.MaxEntries, m.maxEntries)
+ case !(ms.Type == PerfEventArray && ms.MaxEntries == 0) &&
+ m.maxEntries != ms.MaxEntries:
+ return fmt.Errorf("expected max entries %v, got %v: %w", ms.MaxEntries, m.maxEntries, ErrMapIncompatible)
case m.flags != ms.Flags:
- return fmt.Errorf("expected flags %v, got %v", ms.Flags, m.flags)
+ return fmt.Errorf("expected flags %v, got %v: %w", ms.Flags, m.flags, ErrMapIncompatible)
}
return nil
}
@@ -120,7 +190,7 @@ func (ms *MapSpec) checkCompatibility(m *Map) error {
// if you require custom encoding.
type Map struct {
name string
- fd *internal.FD
+ fd *sys.FD
typ MapType
keySize uint32
valueSize uint32
@@ -135,18 +205,19 @@ type Map struct {
//
// You should not use fd after calling this function.
func NewMapFromFD(fd int) (*Map, error) {
- if fd < 0 {
- return nil, errors.New("invalid fd")
+ f, err := sys.NewFD(fd)
+ if err != nil {
+ return nil, err
}
- return newMapFromFD(internal.NewFD(uint32(fd)))
+ return newMapFromFD(f)
}
-func newMapFromFD(fd *internal.FD) (*Map, error) {
+func newMapFromFD(fd *sys.FD) (*Map, error) {
info, err := newMapInfoFromFd(fd)
if err != nil {
fd.Close()
- return nil, fmt.Errorf("get map info: %s", err)
+ return nil, fmt.Errorf("get map info: %w", err)
}
return newMap(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags)
@@ -166,32 +237,55 @@ func NewMap(spec *MapSpec) (*Map, error) {
//
// The caller is responsible for ensuring the process' rlimit is set
// sufficiently high for locking memory during map creation. This can be done
-// by calling unix.Setrlimit with unix.RLIMIT_MEMLOCK prior to calling NewMapWithOptions.
+// by calling rlimit.RemoveMemlock() prior to calling NewMapWithOptions.
+//
+// May return an error wrapping ErrMapIncompatible.
func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) {
- btfs := make(btfHandleCache)
- defer btfs.close()
+ handles := newHandleCache()
+ defer handles.close()
+
+ m, err := newMapWithOptions(spec, opts, handles)
+ if err != nil {
+ return nil, fmt.Errorf("creating map: %w", err)
+ }
+
+ if err := m.finalize(spec); err != nil {
+ m.Close()
+ return nil, fmt.Errorf("populating map: %w", err)
+ }
- return newMapWithOptions(spec, opts, btfs)
+ return m, nil
}
-func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (*Map, error) {
+func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ *Map, err error) {
+ closeOnError := func(c io.Closer) {
+ if err != nil {
+ c.Close()
+ }
+ }
+
switch spec.Pinning {
case PinByName:
- if spec.Name == "" || opts.PinPath == "" {
- return nil, fmt.Errorf("pin by name: missing Name or PinPath")
+ if spec.Name == "" {
+ return nil, fmt.Errorf("pin by name: missing Name")
+ }
+
+ if opts.PinPath == "" {
+ return nil, fmt.Errorf("pin by name: missing MapOptions.PinPath")
}
- m, err := LoadPinnedMap(filepath.Join(opts.PinPath, spec.Name))
+ path := filepath.Join(opts.PinPath, spec.Name)
+ m, err := LoadPinnedMap(path, &opts.LoadPinOptions)
if errors.Is(err, unix.ENOENT) {
break
}
if err != nil {
- return nil, fmt.Errorf("load pinned map: %s", err)
+ return nil, fmt.Errorf("load pinned map: %w", err)
}
+ defer closeOnError(m)
if err := spec.checkCompatibility(m); err != nil {
- m.Close()
- return nil, fmt.Errorf("use pinned map %s: %s", spec.Name, err)
+ return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err)
}
return m, nil
@@ -200,10 +294,10 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (*Ma
// Nothing to do here
default:
- return nil, fmt.Errorf("unsupported pin type %d", int(spec.Pinning))
+ return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported)
}
- var innerFd *internal.FD
+ var innerFd *sys.FD
if spec.Type == ArrayOfMaps || spec.Type == HashOfMaps {
if spec.InnerMap == nil {
return nil, fmt.Errorf("%s requires InnerMap", spec.Type)
@@ -213,31 +307,37 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, btfs btfHandleCache) (*Ma
return nil, errors.New("inner maps cannot be pinned")
}
- template, err := createMap(spec.InnerMap, nil, opts, btfs)
+ template, err := spec.InnerMap.createMap(nil, opts, handles)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("inner map: %w", err)
}
defer template.Close()
+ // Intentionally skip populating and freezing (finalizing)
+ // the inner map template since it will be removed shortly.
+
innerFd = template.fd
}
- m, err := createMap(spec, innerFd, opts, btfs)
+ m, err := spec.createMap(innerFd, opts, handles)
if err != nil {
return nil, err
}
+ defer closeOnError(m)
if spec.Pinning == PinByName {
- if err := m.Pin(filepath.Join(opts.PinPath, spec.Name)); err != nil {
- m.Close()
- return nil, fmt.Errorf("pin map: %s", err)
+ path := filepath.Join(opts.PinPath, spec.Name)
+ if err := m.Pin(path); err != nil {
+ return nil, fmt.Errorf("pin map: %w", err)
}
}
return m, nil
}
-func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, btfs btfHandleCache) (_ *Map, err error) {
+// createMap validates the spec's properties and creates the map in the kernel
+// using the given opts. It does not populate or freeze the map.
+func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) {
closeOnError := func(closer io.Closer) {
if err != nil {
closer.Close()
@@ -246,10 +346,18 @@ func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, btfs btfHandl
spec = spec.Copy()
+ // Kernels 4.13 through 5.4 used a struct bpf_map_def that contained
+ // additional 'inner_map_idx' and later 'numa_node' fields.
+ // In order to support loading these definitions, tolerate the presence of
+ // extra bytes, but require them to be zeroes.
+ if spec.Extra != nil {
+ if _, err := io.Copy(internal.DiscardZeroes{}, spec.Extra); err != nil {
+ return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map")
+ }
+ }
+
switch spec.Type {
- case ArrayOfMaps:
- fallthrough
- case HashOfMaps:
+ case ArrayOfMaps, HashOfMaps:
if err := haveNestedMaps(); err != nil {
return nil, err
}
@@ -284,51 +392,73 @@ func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, btfs btfHandl
return nil, fmt.Errorf("map create: %w", err)
}
}
+ if spec.Flags&unix.BPF_F_MMAPABLE > 0 {
+ if err := haveMmapableMaps(); err != nil {
+ return nil, fmt.Errorf("map create: %w", err)
+ }
+ }
+ if spec.Flags&unix.BPF_F_INNER_MAP > 0 {
+ if err := haveInnerMaps(); err != nil {
+ return nil, fmt.Errorf("map create: %w", err)
+ }
+ }
+ if spec.Flags&unix.BPF_F_NO_PREALLOC > 0 {
+ if err := haveNoPreallocMaps(); err != nil {
+ return nil, fmt.Errorf("map create: %w", err)
+ }
+ }
- attr := bpfMapCreateAttr{
- mapType: spec.Type,
- keySize: spec.KeySize,
- valueSize: spec.ValueSize,
- maxEntries: spec.MaxEntries,
- flags: spec.Flags,
- numaNode: spec.NumaNode,
+ attr := sys.MapCreateAttr{
+ MapType: sys.MapType(spec.Type),
+ KeySize: spec.KeySize,
+ ValueSize: spec.ValueSize,
+ MaxEntries: spec.MaxEntries,
+ MapFlags: spec.Flags,
+ NumaNode: spec.NumaNode,
}
if inner != nil {
- var err error
- attr.innerMapFd, err = inner.Value()
- if err != nil {
- return nil, fmt.Errorf("map create: %w", err)
- }
+ attr.InnerMapFd = inner.Uint()
}
if haveObjName() == nil {
- attr.mapName = newBPFObjName(spec.Name)
+ attr.MapName = sys.NewObjName(spec.Name)
}
- var btfDisabled bool
- if spec.BTF != nil {
- handle, err := btfs.load(btf.MapSpec(spec.BTF))
- btfDisabled = errors.Is(err, btf.ErrNotSupported)
- if err != nil && !btfDisabled {
+ if spec.hasBTF() {
+ handle, err := handles.btfHandle(spec.BTF)
+ if err != nil && !errors.Is(err, btf.ErrNotSupported) {
return nil, fmt.Errorf("load BTF: %w", err)
}
if handle != nil {
- attr.btfFd = uint32(handle.FD())
- attr.btfKeyTypeID = btf.MapKey(spec.BTF).ID()
- attr.btfValueTypeID = btf.MapValue(spec.BTF).ID()
+ keyTypeID, err := spec.BTF.TypeID(spec.Key)
+ if err != nil {
+ return nil, err
+ }
+
+ valueTypeID, err := spec.BTF.TypeID(spec.Value)
+ if err != nil {
+ return nil, err
+ }
+
+ attr.BtfFd = uint32(handle.FD())
+ attr.BtfKeyTypeId = uint32(keyTypeID)
+ attr.BtfValueTypeId = uint32(valueTypeID)
}
}
- fd, err := bpfMapCreate(&attr)
+ fd, err := sys.MapCreate(&attr)
if err != nil {
if errors.Is(err, unix.EPERM) {
- return nil, fmt.Errorf("map create: RLIMIT_MEMLOCK may be too low: %w", err)
+ return nil, fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err)
}
- if btfDisabled {
+ if !spec.hasBTF() {
return nil, fmt.Errorf("map create without BTF: %w", err)
}
+ if errors.Is(err, unix.EINVAL) && attr.MaxEntries == 0 {
+ return nil, fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err)
+ }
return nil, fmt.Errorf("map create: %w", err)
}
defer closeOnError(fd)
@@ -338,20 +468,12 @@ func createMap(spec *MapSpec, inner *internal.FD, opts MapOptions, btfs btfHandl
return nil, fmt.Errorf("map create: %w", err)
}
- if err := m.populate(spec.Contents); err != nil {
- return nil, fmt.Errorf("map create: can't set initial contents: %w", err)
- }
-
- if spec.Freeze {
- if err := m.Freeze(); err != nil {
- return nil, fmt.Errorf("can't freeze map: %w", err)
- }
- }
-
return m, nil
}
-func newMap(fd *internal.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) {
+// newMap allocates and returns a new Map structure.
+// Sets the fullValueSize on per-CPU maps.
+func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) {
m := &Map{
name,
fd,
@@ -373,7 +495,7 @@ func newMap(fd *internal.FD, name string, typ MapType, keySize, valueSize, maxEn
return nil, err
}
- m.fullValueSize = align(int(valueSize), 8) * possibleCPUs
+ m.fullValueSize = internal.Align(int(valueSize), 8) * possibleCPUs
return m, nil
}
@@ -414,6 +536,12 @@ func (m *Map) Info() (*MapInfo, error) {
return newMapInfoFromFd(m.fd)
}
+// MapLookupFlags controls the behaviour of the map lookup calls.
+type MapLookupFlags uint64
+
+// LookupLock look up the value of a spin-locked map.
+const LookupLock MapLookupFlags = 4
+
// Lookup retrieves a value from a Map.
//
// Calls Close() on valueOut if it is of type **Map or **Program,
@@ -422,39 +550,58 @@ func (m *Map) Info() (*MapInfo, error) {
// Returns an error if the key doesn't exist, see ErrKeyNotExist.
func (m *Map) Lookup(key, valueOut interface{}) error {
valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
- if err := m.lookup(key, valuePtr); err != nil {
+ if err := m.lookup(key, valuePtr, 0); err != nil {
return err
}
return m.unmarshalValue(valueOut, valueBytes)
}
-// LookupAndDelete retrieves and deletes a value from a Map.
+// LookupWithFlags retrieves a value from a Map with flags.
//
-// Returns ErrKeyNotExist if the key doesn't exist.
-func (m *Map) LookupAndDelete(key, valueOut interface{}) error {
+// Passing LookupLock flag will look up the value of a spin-locked
+// map without returning the lock. This must be specified if the
+// elements contain a spinlock.
+//
+// Calls Close() on valueOut if it is of type **Map or **Program,
+// and *valueOut is not nil.
+//
+// Returns an error if the key doesn't exist, see ErrKeyNotExist.
+func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) error {
valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
-
- keyPtr, err := m.marshalKey(key)
- if err != nil {
- return fmt.Errorf("can't marshal key: %w", err)
- }
-
- if err := bpfMapLookupAndDelete(m.fd, keyPtr, valuePtr); err != nil {
- return fmt.Errorf("lookup and delete failed: %w", err)
+ if err := m.lookup(key, valuePtr, flags); err != nil {
+ return err
}
return m.unmarshalValue(valueOut, valueBytes)
}
+// LookupAndDelete retrieves and deletes a value from a Map.
+//
+// Returns ErrKeyNotExist if the key doesn't exist.
+func (m *Map) LookupAndDelete(key, valueOut interface{}) error {
+ return m.lookupAndDelete(key, valueOut, 0)
+}
+
+// LookupAndDeleteWithFlags retrieves and deletes a value from a Map.
+//
+// Passing LookupLock flag will look up and delete the value of a spin-locked
+// map without returning the lock. This must be specified if the elements
+// contain a spinlock.
+//
+// Returns ErrKeyNotExist if the key doesn't exist.
+func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLookupFlags) error {
+ return m.lookupAndDelete(key, valueOut, flags)
+}
+
// LookupBytes gets a value from Map.
//
// Returns a nil value if a key doesn't exist.
func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
valueBytes := make([]byte, m.fullValueSize)
- valuePtr := internal.NewSlicePointer(valueBytes)
+ valuePtr := sys.NewSlicePointer(valueBytes)
- err := m.lookup(key, valuePtr)
+ err := m.lookup(key, valuePtr, 0)
if errors.Is(err, ErrKeyNotExist) {
return nil, nil
}
@@ -462,18 +609,47 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
return valueBytes, err
}
-func (m *Map) lookup(key interface{}, valueOut internal.Pointer) error {
+func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error {
keyPtr, err := m.marshalKey(key)
if err != nil {
return fmt.Errorf("can't marshal key: %w", err)
}
- if err = bpfMapLookupElem(m.fd, keyPtr, valueOut); err != nil {
- return fmt.Errorf("lookup failed: %w", err)
+ attr := sys.MapLookupElemAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ Value: valueOut,
+ Flags: uint64(flags),
+ }
+
+ if err = sys.MapLookupElem(&attr); err != nil {
+ return fmt.Errorf("lookup: %w", wrapMapError(err))
}
return nil
}
+func (m *Map) lookupAndDelete(key, valueOut interface{}, flags MapLookupFlags) error {
+ valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize)
+
+ keyPtr, err := m.marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("can't marshal key: %w", err)
+ }
+
+ attr := sys.MapLookupAndDeleteElemAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ Value: valuePtr,
+ Flags: uint64(flags),
+ }
+
+ if err := sys.MapLookupAndDeleteElem(&attr); err != nil {
+ return fmt.Errorf("lookup and delete: %w", wrapMapError(err))
+ }
+
+ return m.unmarshalValue(valueOut, valueBytes)
+}
+
// MapUpdateFlags controls the behaviour of the Map.Update call.
//
// The exact semantics depend on the specific MapType.
@@ -486,6 +662,8 @@ const (
UpdateNoExist MapUpdateFlags = 1 << (iota - 1)
// UpdateExist updates an existing element.
UpdateExist
+ // UpdateLock updates elements under bpf_spin_lock.
+ UpdateLock
)
// Put replaces or creates a value in map.
@@ -507,8 +685,15 @@ func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error {
return fmt.Errorf("can't marshal value: %w", err)
}
- if err = bpfMapUpdateElem(m.fd, keyPtr, valuePtr, uint64(flags)); err != nil {
- return fmt.Errorf("update failed: %w", err)
+ attr := sys.MapUpdateElemAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ Value: valuePtr,
+ Flags: uint64(flags),
+ }
+
+ if err = sys.MapUpdateElem(&attr); err != nil {
+ return fmt.Errorf("update: %w", wrapMapError(err))
}
return nil
@@ -523,8 +708,13 @@ func (m *Map) Delete(key interface{}) error {
return fmt.Errorf("can't marshal key: %w", err)
}
- if err = bpfMapDeleteElem(m.fd, keyPtr); err != nil {
- return fmt.Errorf("delete failed: %w", err)
+ attr := sys.MapDeleteElemAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ }
+
+ if err = sys.MapDeleteElem(&attr); err != nil {
+ return fmt.Errorf("delete: %w", wrapMapError(err))
}
return nil
}
@@ -556,7 +746,7 @@ func (m *Map) NextKey(key, nextKeyOut interface{}) error {
// Returns nil if there are no more keys.
func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) {
nextKey := make([]byte, m.keySize)
- nextKeyPtr := internal.NewSlicePointer(nextKey)
+ nextKeyPtr := sys.NewSlicePointer(nextKey)
err := m.nextKey(key, nextKeyPtr)
if errors.Is(err, ErrKeyNotExist) {
@@ -566,9 +756,9 @@ func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) {
return nextKey, err
}
-func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error {
+func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error {
var (
- keyPtr internal.Pointer
+ keyPtr sys.Pointer
err error
)
@@ -579,12 +769,77 @@ func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error {
}
}
- if err = bpfMapGetNextKey(m.fd, keyPtr, nextKeyOut); err != nil {
- return fmt.Errorf("next key failed: %w", err)
+ attr := sys.MapGetNextKeyAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ NextKey: nextKeyOut,
+ }
+
+ if err = sys.MapGetNextKey(&attr); err != nil {
+ // Kernels 4.4.131 and earlier return EFAULT instead of a pointer to the
+ // first map element when a nil key pointer is specified.
+ if key == nil && errors.Is(err, unix.EFAULT) {
+ var guessKey []byte
+ guessKey, err = m.guessNonExistentKey()
+ if err != nil {
+ return err
+ }
+
+ // Retry the syscall with a valid non-existing key.
+ attr.Key = sys.NewSlicePointer(guessKey)
+ if err = sys.MapGetNextKey(&attr); err == nil {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("next key: %w", wrapMapError(err))
}
+
return nil
}
+// guessNonExistentKey attempts to perform a map lookup that returns ENOENT.
+// This is necessary on kernels before 4.4.132, since those don't support
+// iterating maps from the start by providing an invalid key pointer.
+func (m *Map) guessNonExistentKey() ([]byte, error) {
+ // Provide an invalid value pointer to prevent a copy on the kernel side.
+ valuePtr := sys.NewPointer(unsafe.Pointer(^uintptr(0)))
+ randKey := make([]byte, int(m.keySize))
+
+ for i := 0; i < 4; i++ {
+ switch i {
+ // For hash maps, the 0 key is less likely to be occupied. They're often
+ // used for storing data related to pointers, and their access pattern is
+ // generally scattered across the keyspace.
+ case 0:
+ // An all-0xff key is guaranteed to be out of bounds of any array, since
+ // those have a fixed key size of 4 bytes. The only corner case being
+ // arrays with 2^32 max entries, but those are prohibitively expensive
+ // in many environments.
+ case 1:
+ for r := range randKey {
+ randKey[r] = 0xff
+ }
+ // Inspired by BCC, 0x55 is an alternating binary pattern (0101), so
+ // is unlikely to be taken.
+ case 2:
+ for r := range randKey {
+ randKey[r] = 0x55
+ }
+ // Last ditch effort, generate a random key.
+ case 3:
+ rand.New(rand.NewSource(time.Now().UnixNano())).Read(randKey)
+ }
+
+ err := m.lookup(randKey, valuePtr, 0)
+ if errors.Is(err, ErrKeyNotExist) {
+ return randKey, nil
+ }
+ }
+
+ return nil, errors.New("couldn't find non-existing key")
+}
+
// BatchLookup looks up many elements in a map at once.
//
// "keysOut" and "valuesOut" must be of type slice, a pointer
@@ -596,7 +851,7 @@ func (m *Map) nextKey(key interface{}, nextKeyOut internal.Pointer) error {
// the end of all possible results, even when partial results
// are returned. It should be used to evaluate when lookup is "done".
func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
- return m.batchLookup(internal.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts)
+ return m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts)
}
// BatchLookupAndDelete looks up many elements in a map at once,
@@ -611,10 +866,10 @@ func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, o
// the end of all possible results, even when partial results
// are returned. It should be used to evaluate when lookup is "done".
func (m *Map) BatchLookupAndDelete(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
- return m.batchLookup(internal.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts)
+ return m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts)
}
-func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
+func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
if err := haveBatchAPI(); err != nil {
return 0, err
}
@@ -634,29 +889,36 @@ func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, va
return 0, fmt.Errorf("keysOut and valuesOut must be the same length")
}
keyBuf := make([]byte, count*int(m.keySize))
- keyPtr := internal.NewSlicePointer(keyBuf)
+ keyPtr := sys.NewSlicePointer(keyBuf)
valueBuf := make([]byte, count*int(m.fullValueSize))
- valuePtr := internal.NewSlicePointer(valueBuf)
+ valuePtr := sys.NewSlicePointer(valueBuf)
+ nextPtr, nextBuf := makeBuffer(nextKeyOut, int(m.keySize))
- var (
- startPtr internal.Pointer
- err error
- retErr error
- )
+ attr := sys.MapLookupBatchAttr{
+ MapFd: m.fd.Uint(),
+ Keys: keyPtr,
+ Values: valuePtr,
+ Count: uint32(count),
+ OutBatch: nextPtr,
+ }
+
+ if opts != nil {
+ attr.ElemFlags = opts.ElemFlags
+ attr.Flags = opts.Flags
+ }
+
+ var err error
if startKey != nil {
- startPtr, err = marshalPtr(startKey, int(m.keySize))
+ attr.InBatch, err = marshalPtr(startKey, int(m.keySize))
if err != nil {
return 0, err
}
}
- nextPtr, nextBuf := makeBuffer(nextKeyOut, int(m.keySize))
- ct, err := bpfMapBatch(cmd, m.fd, startPtr, nextPtr, keyPtr, valuePtr, uint32(count), opts)
- if err != nil {
- if !errors.Is(err, ErrKeyNotExist) {
- return 0, err
- }
- retErr = ErrKeyNotExist
+ _, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
+ sysErr = wrapMapError(sysErr)
+ if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) {
+ return 0, sysErr
}
err = m.unmarshalKey(nextKeyOut, nextBuf)
@@ -669,9 +931,10 @@ func (m *Map) batchLookup(cmd internal.BPFCmd, startKey, nextKeyOut, keysOut, va
}
err = unmarshalBytes(valuesOut, valueBuf)
if err != nil {
- retErr = err
+ return 0, err
}
- return int(ct), retErr
+
+ return int(attr.Count), sysErr
}
// BatchUpdate updates the map with multiple keys and values
@@ -695,7 +958,7 @@ func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, er
}
var (
count = keysValue.Len()
- valuePtr internal.Pointer
+ valuePtr sys.Pointer
err error
)
if count != valuesValue.Len() {
@@ -709,9 +972,24 @@ func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, er
if err != nil {
return 0, err
}
- var nilPtr internal.Pointer
- ct, err := bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, valuePtr, uint32(count), opts)
- return int(ct), err
+
+ attr := sys.MapUpdateBatchAttr{
+ MapFd: m.fd.Uint(),
+ Keys: keyPtr,
+ Values: valuePtr,
+ Count: uint32(count),
+ }
+ if opts != nil {
+ attr.ElemFlags = opts.ElemFlags
+ attr.Flags = opts.Flags
+ }
+
+ err = sys.MapUpdateBatch(&attr)
+ if err != nil {
+ return int(attr.Count), fmt.Errorf("batch update: %w", wrapMapError(err))
+ }
+
+ return int(attr.Count), nil
}
// BatchDelete batch deletes entries in the map by keys.
@@ -732,9 +1010,23 @@ func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) {
if err != nil {
return 0, fmt.Errorf("cannot marshal keys: %v", err)
}
- var nilPtr internal.Pointer
- ct, err := bpfMapBatch(internal.BPF_MAP_DELETE_BATCH, m.fd, nilPtr, nilPtr, keyPtr, nilPtr, uint32(count), opts)
- return int(ct), err
+
+ attr := sys.MapDeleteBatchAttr{
+ MapFd: m.fd.Uint(),
+ Keys: keyPtr,
+ Count: uint32(count),
+ }
+
+ if opts != nil {
+ attr.ElemFlags = opts.ElemFlags
+ attr.Flags = opts.Flags
+ }
+
+ if err = sys.MapDeleteBatch(&attr); err != nil {
+ return int(attr.Count), fmt.Errorf("batch delete: %w", wrapMapError(err))
+ }
+
+ return int(attr.Count), nil
}
// Iterate traverses a map.
@@ -747,7 +1039,8 @@ func (m *Map) Iterate() *MapIterator {
return newMapIterator(m)
}
-// Close removes a Map
+// Close the Map's underlying file descriptor, which could unload the
+// Map from the kernel if it is not pinned or in use by a loaded Program.
func (m *Map) Close() error {
if m == nil {
// This makes it easier to clean up when iterating maps
@@ -762,14 +1055,7 @@ func (m *Map) Close() error {
//
// Calling this function is invalid after Close has been called.
func (m *Map) FD() int {
- fd, err := m.fd.Value()
- if err != nil {
- // Best effort: -1 is the number most likely to be an
- // invalid file descriptor.
- return -1
- }
-
- return int(fd)
+ return m.fd.Int()
}
// Clone creates a duplicate of the Map.
@@ -805,12 +1091,13 @@ func (m *Map) Clone() (*Map, error) {
// Pin persists the map on the BPF virtual file system past the lifetime of
// the process that created it .
//
-// Calling Pin on a previously pinned map will override the path.
+// Calling Pin on a previously pinned map will overwrite the path, except when
+// the new path already exists. Re-pinning across filesystems is not supported.
// You can Clone a map to pin it to a different path.
//
// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs
func (m *Map) Pin(fileName string) error {
- if err := pin(m.pinnedPath, fileName, m.fd); err != nil {
+ if err := internal.Pin(m.pinnedPath, fileName, m.fd); err != nil {
return err
}
m.pinnedPath = fileName
@@ -823,7 +1110,7 @@ func (m *Map) Pin(fileName string) error {
//
// Unpinning an unpinned Map returns nil.
func (m *Map) Unpin() error {
- if err := unpin(m.pinnedPath); err != nil {
+ if err := internal.Unpin(m.pinnedPath); err != nil {
return err
}
m.pinnedPath = ""
@@ -832,10 +1119,7 @@ func (m *Map) Unpin() error {
// IsPinned returns true if the map has a non-empty pinned path.
func (m *Map) IsPinned() bool {
- if m.pinnedPath == "" {
- return false
- }
- return true
+ return m.pinnedPath != ""
}
// Freeze prevents a map to be modified from user space.
@@ -846,28 +1130,41 @@ func (m *Map) Freeze() error {
return fmt.Errorf("can't freeze map: %w", err)
}
- if err := bpfMapFreeze(m.fd); err != nil {
+ attr := sys.MapFreezeAttr{
+ MapFd: m.fd.Uint(),
+ }
+
+ if err := sys.MapFreeze(&attr); err != nil {
return fmt.Errorf("can't freeze map: %w", err)
}
return nil
}
-func (m *Map) populate(contents []MapKV) error {
- for _, kv := range contents {
+// finalize populates the Map according to the Contents specified
+// in spec and freezes the Map if requested by spec.
+func (m *Map) finalize(spec *MapSpec) error {
+ for _, kv := range spec.Contents {
if err := m.Put(kv.Key, kv.Value); err != nil {
- return fmt.Errorf("key %v: %w", kv.Key, err)
+ return fmt.Errorf("putting value: key %v: %w", kv.Key, err)
+ }
+ }
+
+ if spec.Freeze {
+ if err := m.Freeze(); err != nil {
+ return fmt.Errorf("freezing map: %w", err)
}
}
+
return nil
}
-func (m *Map) marshalKey(data interface{}) (internal.Pointer, error) {
+func (m *Map) marshalKey(data interface{}) (sys.Pointer, error) {
if data == nil {
if m.keySize == 0 {
// Queues have a key length of zero, so passing nil here is valid.
- return internal.NewPointer(nil), nil
+ return sys.NewPointer(nil), nil
}
- return internal.Pointer{}, errors.New("can't use nil as key of map")
+ return sys.Pointer{}, errors.New("can't use nil as key of map")
}
return marshalPtr(data, int(m.keySize))
@@ -882,7 +1179,7 @@ func (m *Map) unmarshalKey(data interface{}, buf []byte) error {
return unmarshalBytes(data, buf)
}
-func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) {
+func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) {
if m.typ.hasPerCPUValue() {
return marshalPerCPUValue(data, int(m.valueSize))
}
@@ -895,13 +1192,13 @@ func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) {
switch value := data.(type) {
case *Map:
if !m.typ.canStoreMap() {
- return internal.Pointer{}, fmt.Errorf("can't store map in %s", m.typ)
+ return sys.Pointer{}, fmt.Errorf("can't store map in %s", m.typ)
}
buf, err = marshalMap(value, int(m.valueSize))
case *Program:
if !m.typ.canStoreProgram() {
- return internal.Pointer{}, fmt.Errorf("can't store program in %s", m.typ)
+ return sys.Pointer{}, fmt.Errorf("can't store program in %s", m.typ)
}
buf, err = marshalProgram(value, int(m.valueSize))
@@ -910,10 +1207,10 @@ func (m *Map) marshalValue(data interface{}) (internal.Pointer, error) {
}
if err != nil {
- return internal.Pointer{}, err
+ return sys.Pointer{}, err
}
- return internal.NewSlicePointer(buf), nil
+ return sys.NewSlicePointer(buf), nil
}
func (m *Map) unmarshalValue(value interface{}, buf []byte) error {
@@ -937,7 +1234,9 @@ func (m *Map) unmarshalValue(value interface{}, buf []byte) error {
return err
}
- (*value).Close()
+ // The caller might close the map externally, so ignore errors.
+ _ = (*value).Close()
+
*value = other
return nil
@@ -957,7 +1256,9 @@ func (m *Map) unmarshalValue(value interface{}, buf []byte) error {
return err
}
- (*value).Close()
+ // The caller might close the program externally, so ignore errors.
+ _ = (*value).Close()
+
*value = other
return nil
@@ -971,9 +1272,12 @@ func (m *Map) unmarshalValue(value interface{}, buf []byte) error {
return unmarshalBytes(value, buf)
}
-// LoadPinnedMap load a Map from a BPF file.
-func LoadPinnedMap(fileName string) (*Map, error) {
- fd, err := internal.BPFObjGet(fileName)
+// LoadPinnedMap loads a Map from a BPF file.
+func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) {
+ fd, err := sys.ObjGet(&sys.ObjGetAttr{
+ Pathname: sys.NewStringPointer(fileName),
+ FileFlags: opts.Marshal(),
+ })
if err != nil {
return nil, err
}
@@ -1002,70 +1306,11 @@ func marshalMap(m *Map, length int) ([]byte, error) {
return nil, fmt.Errorf("can't marshal map to %d bytes", length)
}
- fd, err := m.fd.Value()
- if err != nil {
- return nil, err
- }
-
buf := make([]byte, 4)
- internal.NativeEndian.PutUint32(buf, fd)
+ internal.NativeEndian.PutUint32(buf, m.fd.Uint())
return buf, nil
}
-func patchValue(value []byte, typ btf.Type, replacements map[string]interface{}) error {
- replaced := make(map[string]bool)
- replace := func(name string, offset, size int, replacement interface{}) error {
- if offset+size > len(value) {
- return fmt.Errorf("%s: offset %d(+%d) is out of bounds", name, offset, size)
- }
-
- buf, err := marshalBytes(replacement, size)
- if err != nil {
- return fmt.Errorf("marshal %s: %w", name, err)
- }
-
- copy(value[offset:offset+size], buf)
- replaced[name] = true
- return nil
- }
-
- switch parent := typ.(type) {
- case *btf.Datasec:
- for _, secinfo := range parent.Vars {
- name := string(secinfo.Type.(*btf.Var).Name)
- replacement, ok := replacements[name]
- if !ok {
- continue
- }
-
- err := replace(name, int(secinfo.Offset), int(secinfo.Size), replacement)
- if err != nil {
- return err
- }
- }
-
- default:
- return fmt.Errorf("patching %T is not supported", typ)
- }
-
- if len(replaced) == len(replacements) {
- return nil
- }
-
- var missing []string
- for name := range replacements {
- if !replaced[name] {
- missing = append(missing, name)
- }
- }
-
- if len(missing) == 1 {
- return fmt.Errorf("unknown field: %s", missing[0])
- }
-
- return fmt.Errorf("unknown fields: %s", strings.Join(missing, ","))
-}
-
// MapIterator iterates a Map.
//
// See Map.Iterate.
@@ -1160,29 +1405,20 @@ func (mi *MapIterator) Err() error {
//
// Returns ErrNotExist, if there is no next eBPF map.
func MapGetNextID(startID MapID) (MapID, error) {
- id, err := objGetNextID(internal.BPF_MAP_GET_NEXT_ID, uint32(startID))
- return MapID(id), err
+ attr := &sys.MapGetNextIdAttr{Id: uint32(startID)}
+ return MapID(attr.NextId), sys.MapGetNextId(attr)
}
// NewMapFromID returns the map for a given id.
//
// Returns ErrNotExist, if there is no eBPF map with the given id.
func NewMapFromID(id MapID) (*Map, error) {
- fd, err := bpfObjGetFDByID(internal.BPF_MAP_GET_FD_BY_ID, uint32(id))
+ fd, err := sys.MapGetFdById(&sys.MapGetFdByIdAttr{
+ Id: uint32(id),
+ })
if err != nil {
return nil, err
}
return newMapFromFD(fd)
}
-
-// ID returns the systemwide unique ID of the map.
-//
-// Deprecated: use MapInfo.ID() instead.
-func (m *Map) ID() (MapID, error) {
- info, err := bpfGetMapInfoByFD(m.fd)
- if err != nil {
- return MapID(0), err
- }
- return MapID(info.id), nil
-}
diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go
index 3ea1021a8..544d17f35 100644
--- a/vendor/github.com/cilium/ebpf/marshalers.go
+++ b/vendor/github.com/cilium/ebpf/marshalers.go
@@ -8,9 +8,11 @@ import (
"fmt"
"reflect"
"runtime"
+ "sync"
"unsafe"
"github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
)
// marshalPtr converts an arbitrary value into a pointer suitable
@@ -18,17 +20,17 @@ import (
//
// As an optimization, it returns the original value if it is an
// unsafe.Pointer.
-func marshalPtr(data interface{}, length int) (internal.Pointer, error) {
+func marshalPtr(data interface{}, length int) (sys.Pointer, error) {
if ptr, ok := data.(unsafe.Pointer); ok {
- return internal.NewPointer(ptr), nil
+ return sys.NewPointer(ptr), nil
}
buf, err := marshalBytes(data, length)
if err != nil {
- return internal.Pointer{}, err
+ return sys.Pointer{}, err
}
- return internal.NewSlicePointer(buf), nil
+ return sys.NewSlicePointer(buf), nil
}
// marshalBytes converts an arbitrary value into a byte buffer.
@@ -39,6 +41,10 @@ func marshalPtr(data interface{}, length int) (internal.Pointer, error) {
// Returns an error if the given value isn't representable in exactly
// length bytes.
func marshalBytes(data interface{}, length int) (buf []byte, err error) {
+ if data == nil {
+ return nil, errors.New("can't marshal a nil value")
+ }
+
switch value := data.(type) {
case encoding.BinaryMarshaler:
buf, err = value.MarshalBinary()
@@ -68,29 +74,32 @@ func marshalBytes(data interface{}, length int) (buf []byte, err error) {
return buf, nil
}
-func makeBuffer(dst interface{}, length int) (internal.Pointer, []byte) {
+func makeBuffer(dst interface{}, length int) (sys.Pointer, []byte) {
if ptr, ok := dst.(unsafe.Pointer); ok {
- return internal.NewPointer(ptr), nil
+ return sys.NewPointer(ptr), nil
}
buf := make([]byte, length)
- return internal.NewSlicePointer(buf), buf
+ return sys.NewSlicePointer(buf), buf
+}
+
+var bytesReaderPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Reader)
+ },
}
// unmarshalBytes converts a byte buffer into an arbitrary value.
//
// Prefer using Map.unmarshalKey and Map.unmarshalValue if possible, since
// those have special cases that allow more types to be encoded.
+//
+// The common int32 and int64 types are directly handled to avoid
+// unnecessary heap allocations as happening in the default case.
func unmarshalBytes(data interface{}, buf []byte) error {
switch value := data.(type) {
case unsafe.Pointer:
- sh := &reflect.SliceHeader{
- Data: uintptr(value),
- Len: len(buf),
- Cap: len(buf),
- }
-
- dst := *(*[]byte)(unsafe.Pointer(sh))
+ dst := unsafe.Slice((*byte)(value), len(buf))
copy(dst, buf)
runtime.KeepAlive(value)
return nil
@@ -104,12 +113,38 @@ func unmarshalBytes(data interface{}, buf []byte) error {
case *[]byte:
*value = buf
return nil
+ case *int32:
+ if len(buf) < 4 {
+ return errors.New("int32 requires 4 bytes")
+ }
+ *value = int32(internal.NativeEndian.Uint32(buf))
+ return nil
+ case *uint32:
+ if len(buf) < 4 {
+ return errors.New("uint32 requires 4 bytes")
+ }
+ *value = internal.NativeEndian.Uint32(buf)
+ return nil
+ case *int64:
+ if len(buf) < 8 {
+ return errors.New("int64 requires 8 bytes")
+ }
+ *value = int64(internal.NativeEndian.Uint64(buf))
+ return nil
+ case *uint64:
+ if len(buf) < 8 {
+ return errors.New("uint64 requires 8 bytes")
+ }
+ *value = internal.NativeEndian.Uint64(buf)
+ return nil
case string:
return errors.New("require pointer to string")
case []byte:
return errors.New("require pointer to []byte")
default:
- rd := bytes.NewReader(buf)
+ rd := bytesReaderPool.Get().(*bytes.Reader)
+ rd.Reset(buf)
+ defer bytesReaderPool.Put(rd)
if err := binary.Read(rd, internal.NativeEndian, value); err != nil {
return fmt.Errorf("decoding %T: %v", value, err)
}
@@ -123,38 +158,38 @@ func unmarshalBytes(data interface{}, buf []byte) error {
// Values are initialized to zero if the slice has less elements than CPUs.
//
// slice must have a type like []elementType.
-func marshalPerCPUValue(slice interface{}, elemLength int) (internal.Pointer, error) {
+func marshalPerCPUValue(slice interface{}, elemLength int) (sys.Pointer, error) {
sliceType := reflect.TypeOf(slice)
if sliceType.Kind() != reflect.Slice {
- return internal.Pointer{}, errors.New("per-CPU value requires slice")
+ return sys.Pointer{}, errors.New("per-CPU value requires slice")
}
possibleCPUs, err := internal.PossibleCPUs()
if err != nil {
- return internal.Pointer{}, err
+ return sys.Pointer{}, err
}
sliceValue := reflect.ValueOf(slice)
sliceLen := sliceValue.Len()
if sliceLen > possibleCPUs {
- return internal.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs")
+ return sys.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs")
}
- alignedElemLength := align(elemLength, 8)
+ alignedElemLength := internal.Align(elemLength, 8)
buf := make([]byte, alignedElemLength*possibleCPUs)
for i := 0; i < sliceLen; i++ {
elem := sliceValue.Index(i).Interface()
elemBytes, err := marshalBytes(elem, elemLength)
if err != nil {
- return internal.Pointer{}, err
+ return sys.Pointer{}, err
}
offset := i * alignedElemLength
copy(buf[offset:offset+elemLength], elemBytes)
}
- return internal.NewSlicePointer(buf), nil
+ return sys.NewSlicePointer(buf), nil
}
// unmarshalPerCPUValue decodes a buffer into a slice containing one value per
@@ -210,7 +245,3 @@ func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) erro
reflect.ValueOf(slicePtr).Elem().Set(slice)
return nil
}
-
-func align(n, alignment int) int {
- return (int(n) + alignment - 1) / alignment * alignment
-}
diff --git a/vendor/github.com/cilium/ebpf/pinning.go b/vendor/github.com/cilium/ebpf/pinning.go
deleted file mode 100644
index 78812364a..000000000
--- a/vendor/github.com/cilium/ebpf/pinning.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package ebpf
-
-import (
- "errors"
- "fmt"
- "os"
-
- "github.com/cilium/ebpf/internal"
-)
-
-func pin(currentPath, newPath string, fd *internal.FD) error {
- if newPath == "" {
- return errors.New("given pinning path cannot be empty")
- }
- if currentPath == "" {
- return internal.BPFObjPin(newPath, fd)
- }
- if currentPath == newPath {
- return nil
- }
- var err error
- // Object is now moved to the new pinning path.
- if err = os.Rename(currentPath, newPath); err == nil {
- return nil
- }
- if !os.IsNotExist(err) {
- return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err)
- }
- // Internal state not in sync with the file system so let's fix it.
- return internal.BPFObjPin(newPath, fd)
-}
-
-func unpin(pinnedPath string) error {
- if pinnedPath == "" {
- return nil
- }
- err := os.Remove(pinnedPath)
- if err == nil || os.IsNotExist(err) {
- return nil
- }
- return err
-}
diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go
index 4b65f23b2..675edc711 100644
--- a/vendor/github.com/cilium/ebpf/prog.go
+++ b/vendor/github.com/cilium/ebpf/prog.go
@@ -7,12 +7,14 @@ import (
"fmt"
"math"
"path/filepath"
+ "runtime"
"strings"
"time"
"github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
"github.com/cilium/ebpf/internal"
- "github.com/cilium/ebpf/internal/btf"
+ "github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
@@ -41,6 +43,13 @@ type ProgramOptions struct {
// Controls the output buffer size for the verifier. Defaults to
// DefaultVerifierLogSize.
LogSize int
+ // Type information used for CO-RE relocations and when attaching to
+ // kernel functions.
+ //
+ // This is useful in environments where the kernel BTF is not available
+ // (containers) or where it is in a non-standard location. Defaults to
+ // use the kernel BTF from a well-known location if nil.
+ KernelTypes *btf.Spec
}
// ProgramSpec defines a Program.
@@ -48,29 +57,48 @@ type ProgramSpec struct {
// Name is passed to the kernel as a debug aid. Must only contain
// alpha numeric and '_' characters.
Name string
+
// Type determines at which hook in the kernel a program will run.
- Type ProgramType
+ Type ProgramType
+
+ // AttachType of the program, needed to differentiate allowed context
+ // accesses in some newer program types like CGroupSockAddr.
+ //
+ // Available on kernels 4.17 and later.
AttachType AttachType
- // Name of a kernel data structure to attach to. It's interpretation
- // depends on Type and AttachType.
- AttachTo string
+
+ // Name of a kernel data structure or function to attach to. Its
+ // interpretation depends on Type and AttachType.
+ AttachTo string
+
+ // The program to attach to. Must be provided manually.
+ AttachTarget *Program
+
+ // The name of the ELF section this program orininated from.
+ SectionName string
+
Instructions asm.Instructions
+ // Flags is passed to the kernel and specifies additional program
+ // load attributes.
+ Flags uint32
+
// License of the program. Some helpers are only available if
// the license is deemed compatible with the GPL.
//
// See https://www.kernel.org/doc/html/latest/process/license-rules.html#id1
License string
- // Version used by tracing programs.
+ // Version used by Kprobe programs.
//
- // Deprecated: superseded by BTF.
+ // Deprecated on kernels 5.0 and later. Leave empty to let the library
+ // detect this value automatically.
KernelVersion uint32
// The BTF associated with this program. Changing Instructions
// will most likely invalidate the contained data, and may
// result in errors when attempting to load it into the kernel.
- BTF *btf.Program
+ BTF *btf.Spec
// The byte order this program was compiled for, may be nil.
ByteOrder binary.ByteOrder
@@ -95,6 +123,8 @@ func (ps *ProgramSpec) Tag() (string, error) {
return ps.Instructions.Tag(internal.NativeEndian)
}
+type VerifierError = internal.VerifierError
+
// Program represents BPF program loaded into the kernel.
//
// It is not safe to close a Program which is used by other goroutines.
@@ -103,7 +133,7 @@ type Program struct {
// otherwise it is empty.
VerifierLog string
- fd *internal.FD
+ fd *sys.FD
name string
pinnedPath string
typ ProgramType
@@ -111,8 +141,7 @@ type Program struct {
// NewProgram creates a new Program.
//
-// Loading a program for the first time will perform
-// feature detection by loading small, temporary programs.
+// See NewProgramWithOptions for details.
func NewProgram(spec *ProgramSpec) (*Program, error) {
return NewProgramWithOptions(spec, ProgramOptions{})
}
@@ -121,97 +150,129 @@ func NewProgram(spec *ProgramSpec) (*Program, error) {
//
// Loading a program for the first time will perform
// feature detection by loading small, temporary programs.
+//
+// Returns an error wrapping VerifierError if the program or its BTF is rejected
+// by the kernel.
func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
- btfs := make(btfHandleCache)
- defer btfs.close()
+ if spec == nil {
+ return nil, errors.New("can't load a program from a nil spec")
+ }
+
+ handles := newHandleCache()
+ defer handles.close()
- return newProgramWithOptions(spec, opts, btfs)
+ prog, err := newProgramWithOptions(spec, opts, handles)
+ if errors.Is(err, asm.ErrUnsatisfiedMapReference) {
+ return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err)
+ }
+ return prog, err
}
-func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandleCache) (*Program, error) {
+func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *handleCache) (*Program, error) {
if len(spec.Instructions) == 0 {
- return nil, errors.New("Instructions cannot be empty")
+ return nil, errors.New("instructions cannot be empty")
}
- if len(spec.License) == 0 {
- return nil, errors.New("License cannot be empty")
+ if spec.Type == UnspecifiedProgram {
+ return nil, errors.New("can't load program of unspecified type")
}
if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian {
return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian)
}
- insns := make(asm.Instructions, len(spec.Instructions))
- copy(insns, spec.Instructions)
-
- if err := fixupJumpsAndCalls(insns); err != nil {
- return nil, err
- }
-
- buf := bytes.NewBuffer(make([]byte, 0, len(spec.Instructions)*asm.InstructionSize))
- err := insns.Marshal(buf, internal.NativeEndian)
- if err != nil {
- return nil, err
+ // Kernels before 5.0 (6c4fc209fcf9 "bpf: remove useless version check for prog load")
+ // require the version field to be set to the value of the KERNEL_VERSION
+ // macro for kprobe-type programs.
+ // Overwrite Kprobe program version if set to zero or the magic version constant.
+ kv := spec.KernelVersion
+ if spec.Type == Kprobe && (kv == 0 || kv == internal.MagicKernelVersion) {
+ v, err := internal.KernelVersion()
+ if err != nil {
+ return nil, fmt.Errorf("detecting kernel version: %w", err)
+ }
+ kv = v.Kernel()
}
- bytecode := buf.Bytes()
- insCount := uint32(len(bytecode) / asm.InstructionSize)
- attr := &bpfProgLoadAttr{
- progType: spec.Type,
- expectedAttachType: spec.AttachType,
- insCount: insCount,
- instructions: internal.NewSlicePointer(bytecode),
- license: internal.NewStringPointer(spec.License),
- kernelVersion: spec.KernelVersion,
+ attr := &sys.ProgLoadAttr{
+ ProgType: sys.ProgType(spec.Type),
+ ProgFlags: spec.Flags,
+ ExpectedAttachType: sys.AttachType(spec.AttachType),
+ License: sys.NewStringPointer(spec.License),
+ KernVersion: kv,
}
if haveObjName() == nil {
- attr.progName = newBPFObjName(spec.Name)
+ attr.ProgName = sys.NewObjName(spec.Name)
}
+ kernelTypes := opts.KernelTypes
+
+ insns := make(asm.Instructions, len(spec.Instructions))
+ copy(insns, spec.Instructions)
+
var btfDisabled bool
if spec.BTF != nil {
- if relos, err := btf.ProgramRelocations(spec.BTF, nil); err != nil {
- return nil, fmt.Errorf("CO-RE relocations: %s", err)
- } else if len(relos) > 0 {
- return nil, fmt.Errorf("applying CO-RE relocations: %w", ErrNotSupported)
+ if err := applyRelocations(insns, spec.BTF, kernelTypes); err != nil {
+ return nil, fmt.Errorf("apply CO-RE relocations: %w", err)
}
- handle, err := btfs.load(btf.ProgramSpec(spec.BTF))
+ handle, err := handles.btfHandle(spec.BTF)
btfDisabled = errors.Is(err, btf.ErrNotSupported)
if err != nil && !btfDisabled {
return nil, fmt.Errorf("load BTF: %w", err)
}
if handle != nil {
- attr.progBTFFd = uint32(handle.FD())
+ attr.ProgBtfFd = uint32(handle.FD())
- recSize, bytes, err := btf.ProgramLineInfos(spec.BTF)
+ fib, lib, err := btf.MarshalExtInfos(insns, spec.BTF.TypeID)
if err != nil {
- return nil, fmt.Errorf("get BTF line infos: %w", err)
+ return nil, err
}
- attr.lineInfoRecSize = recSize
- attr.lineInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
- attr.lineInfo = internal.NewSlicePointer(bytes)
- recSize, bytes, err = btf.ProgramFuncInfos(spec.BTF)
- if err != nil {
- return nil, fmt.Errorf("get BTF function infos: %w", err)
- }
- attr.funcInfoRecSize = recSize
- attr.funcInfoCnt = uint32(uint64(len(bytes)) / uint64(recSize))
- attr.funcInfo = internal.NewSlicePointer(bytes)
+ attr.FuncInfoRecSize = btf.FuncInfoSize
+ attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize
+ attr.FuncInfo = sys.NewSlicePointer(fib)
+
+ attr.LineInfoRecSize = btf.LineInfoSize
+ attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize
+ attr.LineInfo = sys.NewSlicePointer(lib)
}
}
- if spec.AttachTo != "" {
- target, err := resolveBTFType(spec.AttachTo, spec.Type, spec.AttachType)
+ if err := fixupAndValidate(insns); err != nil {
+ return nil, err
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
+ err := insns.Marshal(buf, internal.NativeEndian)
+ if err != nil {
+ return nil, err
+ }
+
+ bytecode := buf.Bytes()
+ attr.Insns = sys.NewSlicePointer(bytecode)
+ attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize)
+
+ if spec.AttachTarget != nil {
+ targetID, err := findTargetInProgram(spec.AttachTarget, spec.AttachTo, spec.Type, spec.AttachType)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err)
}
- if target != nil {
- attr.attachBTFID = target.ID()
+
+ attr.AttachBtfId = uint32(targetID)
+ attr.AttachProgFd = uint32(spec.AttachTarget.FD())
+ defer runtime.KeepAlive(spec.AttachTarget)
+ } else if spec.AttachTo != "" {
+ targetID, err := findTargetInKernel(kernelTypes, spec.AttachTo, spec.Type, spec.AttachType)
+ if err != nil && !errors.Is(err, errUnrecognizedAttachType) {
+ // We ignore errUnrecognizedAttachType since AttachTo may be non-empty
+ // for programs that don't attach anywhere.
+ return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err)
}
+
+ attr.AttachBtfId = uint32(targetID)
}
logSize := DefaultVerifierLogSize
@@ -222,36 +283,46 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandl
var logBuf []byte
if opts.LogLevel > 0 {
logBuf = make([]byte, logSize)
- attr.logLevel = opts.LogLevel
- attr.logSize = uint32(len(logBuf))
- attr.logBuf = internal.NewSlicePointer(logBuf)
+ attr.LogLevel = opts.LogLevel
+ attr.LogSize = uint32(len(logBuf))
+ attr.LogBuf = sys.NewSlicePointer(logBuf)
}
- fd, err := bpfProgLoad(attr)
+ fd, err := sys.ProgLoad(attr)
if err == nil {
- return &Program{internal.CString(logBuf), fd, spec.Name, "", spec.Type}, nil
+ return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil
}
- logErr := err
- if opts.LogLevel == 0 {
+ if opts.LogLevel == 0 && opts.LogSize >= 0 {
// Re-run with the verifier enabled to get better error messages.
logBuf = make([]byte, logSize)
- attr.logLevel = 1
- attr.logSize = uint32(len(logBuf))
- attr.logBuf = internal.NewSlicePointer(logBuf)
+ attr.LogLevel = 1
+ attr.LogSize = uint32(len(logBuf))
+ attr.LogBuf = sys.NewSlicePointer(logBuf)
+ _, _ = sys.ProgLoad(attr)
+ }
+
+ switch {
+ case errors.Is(err, unix.EPERM):
+ if len(logBuf) > 0 && logBuf[0] == 0 {
+ // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can
+ // check that the log is empty to reduce false positives.
+ return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err)
+ }
- _, logErr = bpfProgLoad(attr)
- }
+ fallthrough
- if errors.Is(logErr, unix.EPERM) && logBuf[0] == 0 {
- // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can
- // check that the log is empty to reduce false positives.
- return nil, fmt.Errorf("load program: RLIMIT_MEMLOCK may be too low: %w", logErr)
+ case errors.Is(err, unix.EINVAL):
+ if hasFunctionReferences(spec.Instructions) {
+ if err := haveBPFToBPFCalls(); err != nil {
+ return nil, fmt.Errorf("load program: %w", err)
+ }
+ }
}
- err = internal.ErrorWithLog(err, logBuf, logErr)
+ err = internal.ErrorWithLog(err, logBuf)
if btfDisabled {
- return nil, fmt.Errorf("load program without BTF: %w", err)
+ return nil, fmt.Errorf("load program: %w (BTF disabled)", err)
}
return nil, fmt.Errorf("load program: %w", err)
}
@@ -262,18 +333,21 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, btfs btfHandl
//
// Requires at least Linux 4.10.
func NewProgramFromFD(fd int) (*Program, error) {
- if fd < 0 {
- return nil, errors.New("invalid fd")
+ f, err := sys.NewFD(fd)
+ if err != nil {
+ return nil, err
}
- return newProgramFromFD(internal.NewFD(uint32(fd)))
+ return newProgramFromFD(f)
}
// NewProgramFromID returns the program for a given id.
//
// Returns ErrNotExist, if there is no eBPF program with the given id.
func NewProgramFromID(id ProgramID) (*Program, error) {
- fd, err := bpfObjGetFDByID(internal.BPF_PROG_GET_FD_BY_ID, uint32(id))
+ fd, err := sys.ProgGetFdById(&sys.ProgGetFdByIdAttr{
+ Id: uint32(id),
+ })
if err != nil {
return nil, fmt.Errorf("get program by id: %w", err)
}
@@ -281,7 +355,7 @@ func NewProgramFromID(id ProgramID) (*Program, error) {
return newProgramFromFD(fd)
}
-func newProgramFromFD(fd *internal.FD) (*Program, error) {
+func newProgramFromFD(fd *sys.FD) (*Program, error) {
info, err := newProgramInfoFromFd(fd)
if err != nil {
fd.Close()
@@ -310,18 +384,29 @@ func (p *Program) Info() (*ProgramInfo, error) {
return newProgramInfoFromFd(p.fd)
}
-// FD gets the file descriptor of the Program.
+// Handle returns a reference to the program's type information in the kernel.
//
-// It is invalid to call this function after Close has been called.
-func (p *Program) FD() int {
- fd, err := p.fd.Value()
+// Returns ErrNotSupported if the kernel has no BTF support, or if there is no
+// BTF associated with the program.
+func (p *Program) Handle() (*btf.Handle, error) {
+ info, err := p.Info()
if err != nil {
- // Best effort: -1 is the number most likely to be an
- // invalid file descriptor.
- return -1
+ return nil, err
}
- return int(fd)
+ id, ok := info.BTFID()
+ if !ok {
+ return nil, fmt.Errorf("program %s: retrieve BTF ID: %w", p, ErrNotSupported)
+ }
+
+ return btf.NewHandleFromID(id)
+}
+
+// FD gets the file descriptor of the Program.
+//
+// It is invalid to call this function after Close has been called.
+func (p *Program) FD() int {
+ return p.fd.Int()
}
// Clone creates a duplicate of the Program.
@@ -345,9 +430,12 @@ func (p *Program) Clone() (*Program, error) {
// Pin persists the Program on the BPF virtual file system past the lifetime of
// the process that created it
//
+// Calling Pin on a previously pinned program will overwrite the path, except when
+// the new path already exists. Re-pinning across filesystems is not supported.
+//
// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs
func (p *Program) Pin(fileName string) error {
- if err := pin(p.pinnedPath, fileName, p.fd); err != nil {
+ if err := internal.Pin(p.pinnedPath, fileName, p.fd); err != nil {
return err
}
p.pinnedPath = fileName
@@ -360,7 +448,7 @@ func (p *Program) Pin(fileName string) error {
//
// Unpinning an unpinned Program returns nil.
func (p *Program) Unpin() error {
- if err := unpin(p.pinnedPath); err != nil {
+ if err := internal.Unpin(p.pinnedPath); err != nil {
return err
}
p.pinnedPath = ""
@@ -369,13 +457,12 @@ func (p *Program) Unpin() error {
// IsPinned returns true if the Program has a non-empty pinned path.
func (p *Program) IsPinned() bool {
- if p.pinnedPath == "" {
- return false
- }
- return true
+ return p.pinnedPath != ""
}
-// Close unloads the program from the kernel.
+// Close the Program's underlying file descriptor, which could unload
+// the program from the kernel if it is not pinned or attached to a
+// kernel hook.
func (p *Program) Close() error {
if p == nil {
return nil
@@ -384,6 +471,28 @@ func (p *Program) Close() error {
return p.fd.Close()
}
+// Various options for Run'ing a Program
+type RunOptions struct {
+ // Program's data input. Required field.
+ Data []byte
+ // Program's data after Program has run. Caller must allocate. Optional field.
+ DataOut []byte
+ // Program's context input. Optional field.
+ Context interface{}
+ // Program's context after Program has run. Must be a pointer or slice. Optional field.
+ ContextOut interface{}
+ // Number of times to run Program. Optional field. Defaults to 1.
+ Repeat uint32
+ // Optional flags.
+ Flags uint32
+ // CPU to run Program on. Optional field.
+ // Note not all program types support this field.
+ CPU uint32
+ // Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer
+ // or similar. Typically used during benchmarking. Optional field.
+ Reset func()
+}
+
// Test runs the Program in the kernel with the given input and returns the
// value returned by the eBPF program. outLen may be zero.
//
@@ -392,11 +501,38 @@ func (p *Program) Close() error {
//
// This function requires at least Linux 4.12.
func (p *Program) Test(in []byte) (uint32, []byte, error) {
- ret, out, _, err := p.testRun(in, 1, nil)
+ // Older kernels ignore the dataSizeOut argument when copying to user space.
+ // Combined with things like bpf_xdp_adjust_head() we don't really know what the final
+ // size will be. Hence we allocate an output buffer which we hope will always be large
+ // enough, and panic if the kernel wrote past the end of the allocation.
+ // See https://patchwork.ozlabs.org/cover/1006822/
+ var out []byte
+ if len(in) > 0 {
+ out = make([]byte, len(in)+outputPad)
+ }
+
+ opts := RunOptions{
+ Data: in,
+ DataOut: out,
+ Repeat: 1,
+ }
+
+ ret, _, err := p.testRun(&opts)
if err != nil {
return ret, nil, fmt.Errorf("can't test program: %w", err)
}
- return ret, out, nil
+ return ret, opts.DataOut, nil
+}
+
+// Run runs the Program in kernel with given RunOptions.
+//
+// Note: the same restrictions from Test apply.
+func (p *Program) Run(opts *RunOptions) (uint32, error) {
+ ret, _, err := p.testRun(opts)
+ if err != nil {
+ return ret, fmt.Errorf("can't test program: %w", err)
+ }
+ return ret, nil
}
// Benchmark runs the Program with the given input for a number of times
@@ -411,7 +547,17 @@ func (p *Program) Test(in []byte) (uint32, []byte, error) {
//
// This function requires at least Linux 4.12.
func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) {
- ret, _, total, err := p.testRun(in, repeat, reset)
+ if uint(repeat) > math.MaxUint32 {
+ return 0, 0, fmt.Errorf("repeat is too high")
+ }
+
+ opts := RunOptions{
+ Data: in,
+ Repeat: uint32(repeat),
+ Reset: reset,
+ }
+
+ ret, total, err := p.testRun(&opts)
if err != nil {
return ret, total, fmt.Errorf("can't benchmark program: %w", err)
}
@@ -420,6 +566,7 @@ func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.D
var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() error {
prog, err := NewProgram(&ProgramSpec{
+ // SocketFilter does not require privileges on newer kernels.
Type: SocketFilter,
Instructions: asm.Instructions{
asm.LoadImm(asm.R0, 0, asm.DWord),
@@ -435,88 +582,109 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() e
// Programs require at least 14 bytes input
in := make([]byte, 14)
- attr := bpfProgTestRunAttr{
- fd: uint32(prog.FD()),
- dataSizeIn: uint32(len(in)),
- dataIn: internal.NewSlicePointer(in),
+ attr := sys.ProgRunAttr{
+ ProgFd: uint32(prog.FD()),
+ DataSizeIn: uint32(len(in)),
+ DataIn: sys.NewSlicePointer(in),
}
- err = bpfProgTestRun(&attr)
- if errors.Is(err, unix.EINVAL) {
+ err = sys.ProgRun(&attr)
+ switch {
+ case errors.Is(err, unix.EINVAL):
// Check for EINVAL specifically, rather than err != nil since we
// otherwise misdetect due to insufficient permissions.
return internal.ErrNotSupported
- }
- if errors.Is(err, unix.EINTR) {
+
+ case errors.Is(err, unix.EINTR):
// We know that PROG_TEST_RUN is supported if we get EINTR.
return nil
- }
- return err
-})
-func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte, time.Duration, error) {
- if uint(repeat) > math.MaxUint32 {
- return 0, nil, 0, fmt.Errorf("repeat is too high")
+ case errors.Is(err, unix.ENOTSUPP):
+ // The first PROG_TEST_RUN patches shipped in 4.12 didn't include
+ // a test runner for SocketFilter. ENOTSUPP means PROG_TEST_RUN is
+ // supported, but not for the program type used in the probe.
+ return nil
}
- if len(in) == 0 {
- return 0, nil, 0, fmt.Errorf("missing input")
- }
+ return err
+})
- if uint(len(in)) > math.MaxUint32 {
- return 0, nil, 0, fmt.Errorf("input is too long")
+func (p *Program) testRun(opts *RunOptions) (uint32, time.Duration, error) {
+ if uint(len(opts.Data)) > math.MaxUint32 {
+ return 0, 0, fmt.Errorf("input is too long")
}
if err := haveProgTestRun(); err != nil {
- return 0, nil, 0, err
+ return 0, 0, err
}
- // Older kernels ignore the dataSizeOut argument when copying to user space.
- // Combined with things like bpf_xdp_adjust_head() we don't really know what the final
- // size will be. Hence we allocate an output buffer which we hope will always be large
- // enough, and panic if the kernel wrote past the end of the allocation.
- // See https://patchwork.ozlabs.org/cover/1006822/
- out := make([]byte, len(in)+outputPad)
+ var ctxBytes []byte
+ if opts.Context != nil {
+ ctx := new(bytes.Buffer)
+ if err := binary.Write(ctx, internal.NativeEndian, opts.Context); err != nil {
+ return 0, 0, fmt.Errorf("cannot serialize context: %v", err)
+ }
+ ctxBytes = ctx.Bytes()
+ }
- fd, err := p.fd.Value()
- if err != nil {
- return 0, nil, 0, err
+ var ctxOut []byte
+ if opts.ContextOut != nil {
+ ctxOut = make([]byte, binary.Size(opts.ContextOut))
}
- attr := bpfProgTestRunAttr{
- fd: fd,
- dataSizeIn: uint32(len(in)),
- dataSizeOut: uint32(len(out)),
- dataIn: internal.NewSlicePointer(in),
- dataOut: internal.NewSlicePointer(out),
- repeat: uint32(repeat),
+ attr := sys.ProgRunAttr{
+ ProgFd: p.fd.Uint(),
+ DataSizeIn: uint32(len(opts.Data)),
+ DataSizeOut: uint32(len(opts.DataOut)),
+ DataIn: sys.NewSlicePointer(opts.Data),
+ DataOut: sys.NewSlicePointer(opts.DataOut),
+ Repeat: uint32(opts.Repeat),
+ CtxSizeIn: uint32(len(ctxBytes)),
+ CtxSizeOut: uint32(len(ctxOut)),
+ CtxIn: sys.NewSlicePointer(ctxBytes),
+ CtxOut: sys.NewSlicePointer(ctxOut),
+ Flags: opts.Flags,
+ Cpu: opts.CPU,
}
for {
- err = bpfProgTestRun(&attr)
+ err := sys.ProgRun(&attr)
if err == nil {
break
}
if errors.Is(err, unix.EINTR) {
- if reset != nil {
- reset()
+ if opts.Reset != nil {
+ opts.Reset()
}
continue
}
- return 0, nil, 0, fmt.Errorf("can't run test: %w", err)
+ if errors.Is(err, unix.ENOTSUPP) {
+ return 0, 0, fmt.Errorf("kernel doesn't support testing program type %s: %w", p.Type(), ErrNotSupported)
+ }
+
+ return 0, 0, fmt.Errorf("can't run test: %w", err)
+ }
+
+ if opts.DataOut != nil {
+ if int(attr.DataSizeOut) > cap(opts.DataOut) {
+ // Houston, we have a problem. The program created more data than we allocated,
+ // and the kernel wrote past the end of our buffer.
+ panic("kernel wrote past end of output buffer")
+ }
+ opts.DataOut = opts.DataOut[:int(attr.DataSizeOut)]
}
- if int(attr.dataSizeOut) > cap(out) {
- // Houston, we have a problem. The program created more data than we allocated,
- // and the kernel wrote past the end of our buffer.
- panic("kernel wrote past end of output buffer")
+ if len(ctxOut) != 0 {
+ b := bytes.NewReader(ctxOut)
+ if err := binary.Read(b, internal.NativeEndian, opts.ContextOut); err != nil {
+ return 0, 0, fmt.Errorf("failed to decode ContextOut: %v", err)
+ }
}
- out = out[:int(attr.dataSizeOut)]
- total := time.Duration(attr.duration) * time.Nanosecond
- return attr.retval, out, total, nil
+ total := time.Duration(attr.Duration) * time.Nanosecond
+ return attr.Retval, total, nil
}
func unmarshalProgram(buf []byte) (*Program, error) {
@@ -535,70 +703,19 @@ func marshalProgram(p *Program, length int) ([]byte, error) {
return nil, fmt.Errorf("can't marshal program to %d bytes", length)
}
- value, err := p.fd.Value()
- if err != nil {
- return nil, err
- }
-
buf := make([]byte, 4)
- internal.NativeEndian.PutUint32(buf, value)
+ internal.NativeEndian.PutUint32(buf, p.fd.Uint())
return buf, nil
}
-// Attach a Program.
-//
-// Deprecated: use link.RawAttachProgram instead.
-func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error {
- if fd < 0 {
- return errors.New("invalid fd")
- }
-
- pfd, err := p.fd.Value()
- if err != nil {
- return err
- }
-
- attr := internal.BPFProgAttachAttr{
- TargetFd: uint32(fd),
- AttachBpfFd: pfd,
- AttachType: uint32(typ),
- AttachFlags: uint32(flags),
- }
-
- return internal.BPFProgAttach(&attr)
-}
-
-// Detach a Program.
-//
-// Deprecated: use link.RawDetachProgram instead.
-func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error {
- if fd < 0 {
- return errors.New("invalid fd")
- }
-
- if flags != 0 {
- return errors.New("flags must be zero")
- }
-
- pfd, err := p.fd.Value()
- if err != nil {
- return err
- }
-
- attr := internal.BPFProgDetachAttr{
- TargetFd: uint32(fd),
- AttachBpfFd: pfd,
- AttachType: uint32(typ),
- }
-
- return internal.BPFProgDetach(&attr)
-}
-
// LoadPinnedProgram loads a Program from a BPF file.
//
// Requires at least Linux 4.11.
-func LoadPinnedProgram(fileName string) (*Program, error) {
- fd, err := internal.BPFObjGet(fileName)
+func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) {
+ fd, err := sys.ObjGet(&sys.ObjGetAttr{
+ Pathname: sys.NewStringPointer(fileName),
+ FileFlags: opts.Marshal(),
+ })
if err != nil {
return nil, err
}
@@ -609,7 +726,7 @@ func LoadPinnedProgram(fileName string) (*Program, error) {
return nil, fmt.Errorf("info for %s: %w", fileName, err)
}
- return &Program{"", fd, filepath.Base(fileName), "", info.Type}, nil
+ return &Program{"", fd, filepath.Base(fileName), fileName, info.Type}, nil
}
// SanitizeName replaces all invalid characters in name with replacement.
@@ -632,67 +749,127 @@ func SanitizeName(name string, replacement rune) string {
//
// Returns ErrNotExist, if there is no next eBPF program.
func ProgramGetNextID(startID ProgramID) (ProgramID, error) {
- id, err := objGetNextID(internal.BPF_PROG_GET_NEXT_ID, uint32(startID))
- return ProgramID(id), err
+ attr := &sys.ProgGetNextIdAttr{Id: uint32(startID)}
+ return ProgramID(attr.NextId), sys.ProgGetNextId(attr)
}
-// ID returns the systemwide unique ID of the program.
+// BindMap binds map to the program and is only released once program is released.
//
-// Deprecated: use ProgramInfo.ID() instead.
-func (p *Program) ID() (ProgramID, error) {
- info, err := bpfGetProgInfoByFD(p.fd)
- if err != nil {
- return ProgramID(0), err
- }
- return ProgramID(info.id), nil
-}
-
-func findKernelType(name string, typ btf.Type) error {
- kernel, err := btf.LoadKernelSpec()
- if err != nil {
- return fmt.Errorf("can't load kernel spec: %w", err)
+// This may be used in cases where metadata should be associated with the program
+// which otherwise does not contain any references to the map.
+func (p *Program) BindMap(m *Map) error {
+ attr := &sys.ProgBindMapAttr{
+ ProgFd: uint32(p.FD()),
+ MapFd: uint32(m.FD()),
}
- return kernel.FindType(name, typ)
+ return sys.ProgBindMap(attr)
}
-func resolveBTFType(name string, progType ProgramType, attachType AttachType) (btf.Type, error) {
+var errUnrecognizedAttachType = errors.New("unrecognized attach type")
+
+// find an attach target type in the kernel.
+//
+// spec may be nil and defaults to the canonical kernel BTF. name together with
+// progType and attachType determine which type we need to attach to.
+//
+// Returns errUnrecognizedAttachType.
+func findTargetInKernel(spec *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) {
type match struct {
p ProgramType
a AttachType
}
- target := match{progType, attachType}
- switch target {
+ var (
+ typeName, featureName string
+ isBTFTypeFunc = true
+ )
+
+ switch (match{progType, attachType}) {
case match{LSM, AttachLSMMac}:
- var target btf.Func
- err := findKernelType("bpf_lsm_"+name, &target)
- if errors.Is(err, btf.ErrNotFound) {
- return nil, &internal.UnsupportedFeatureError{
- Name: name + " LSM hook",
- }
- }
- if err != nil {
- return nil, fmt.Errorf("resolve BTF for LSM hook %s: %w", name, err)
- }
+ typeName = "bpf_lsm_" + name
+ featureName = name + " LSM hook"
+ case match{Tracing, AttachTraceIter}:
+ typeName = "bpf_iter_" + name
+ featureName = name + " iterator"
+ case match{Tracing, AttachTraceFEntry}:
+ typeName = name
+ featureName = fmt.Sprintf("fentry %s", name)
+ case match{Tracing, AttachTraceFExit}:
+ typeName = name
+ featureName = fmt.Sprintf("fexit %s", name)
+ case match{Tracing, AttachModifyReturn}:
+ typeName = name
+ featureName = fmt.Sprintf("fmod_ret %s", name)
+ case match{Tracing, AttachTraceRawTp}:
+ typeName = fmt.Sprintf("btf_trace_%s", name)
+ featureName = fmt.Sprintf("raw_tp %s", name)
+ isBTFTypeFunc = false
+ default:
+ return 0, errUnrecognizedAttachType
+ }
- return &target, nil
+ spec, err := maybeLoadKernelBTF(spec)
+ if err != nil {
+ return 0, fmt.Errorf("load kernel spec: %w", err)
+ }
- case match{Tracing, AttachTraceIter}:
- var target btf.Func
- err := findKernelType("bpf_iter_"+name, &target)
+ var target btf.Type
+ if isBTFTypeFunc {
+ var targetFunc *btf.Func
+ err = spec.TypeByName(typeName, &targetFunc)
+ target = targetFunc
+ } else {
+ var targetTypedef *btf.Typedef
+ err = spec.TypeByName(typeName, &targetTypedef)
+ target = targetTypedef
+ }
+
+ if err != nil {
if errors.Is(err, btf.ErrNotFound) {
- return nil, &internal.UnsupportedFeatureError{
- Name: name + " iterator",
+ return 0, &internal.UnsupportedFeatureError{
+ Name: featureName,
}
}
- if err != nil {
- return nil, fmt.Errorf("resolve BTF for iterator %s: %w", name, err)
- }
+ return 0, fmt.Errorf("find target for %s: %w", featureName, err)
+ }
+
+ return spec.TypeID(target)
+}
- return &target, nil
+// find an attach target type in a program.
+//
+// Returns errUnrecognizedAttachType.
+func findTargetInProgram(prog *Program, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) {
+ type match struct {
+ p ProgramType
+ a AttachType
+ }
+ var typeName string
+ switch (match{progType, attachType}) {
+ case match{Extension, AttachNone}:
+ typeName = name
default:
- return nil, nil
+ return 0, errUnrecognizedAttachType
+ }
+
+ btfHandle, err := prog.Handle()
+ if err != nil {
+ return 0, fmt.Errorf("load target BTF: %w", err)
}
+ defer btfHandle.Close()
+
+ spec, err := btfHandle.Spec(nil)
+ if err != nil {
+ return 0, err
+ }
+
+ var targetFunc *btf.Func
+ err = spec.TypeByName(typeName, &targetFunc)
+ if err != nil {
+ return 0, fmt.Errorf("find target %s: %w", typeName, err)
+ }
+
+ return spec.TypeID(targetFunc)
}
diff --git a/vendor/github.com/cilium/ebpf/run-tests.sh b/vendor/github.com/cilium/ebpf/run-tests.sh
index 647a61aab..c21cca9e5 100644
--- a/vendor/github.com/cilium/ebpf/run-tests.sh
+++ b/vendor/github.com/cilium/ebpf/run-tests.sh
@@ -1,91 +1,145 @@
-#!/bin/bash
+#!/usr/bin/env bash
# Test the current package under a different kernel.
# Requires virtme and qemu to be installed.
+# Examples:
+# Run all tests on a 5.4 kernel
+# $ ./run-tests.sh 5.4
+# Run a subset of tests:
+# $ ./run-tests.sh 5.4 ./link
+
+set -euo pipefail
+
+script="$(realpath "$0")"
+readonly script
+
+# This script is a bit like a Matryoshka doll since it keeps re-executing itself
+# in various different contexts:
+#
+# 1. invoked by the user like run-tests.sh 5.4
+# 2. invoked by go test like run-tests.sh --exec-vm
+# 3. invoked by init in the vm like run-tests.sh --exec-test
+#
+# This allows us to use all available CPU on the host machine to compile our
+# code, and then only use the VM to execute the test. This is because the VM
+# is usually slower at compiling than the host.
+if [[ "${1:-}" = "--exec-vm" ]]; then
+ shift
+
+ input="$1"
+ shift
-set -eu
-set -o pipefail
+ # Use sudo if /dev/kvm isn't accessible by the current user.
+ sudo=""
+ if [[ ! -r /dev/kvm || ! -w /dev/kvm ]]; then
+ sudo="sudo"
+ fi
+ readonly sudo
+
+ testdir="$(dirname "$1")"
+ output="$(mktemp -d)"
+ printf -v cmd "%q " "$@"
+
+ if [[ "$(stat -c '%t:%T' -L /proc/$$/fd/0)" == "1:3" ]]; then
+ # stdin is /dev/null, which doesn't play well with qemu. Use a fifo as a
+ # blocking substitute.
+ mkfifo "${output}/fake-stdin"
+ # Open for reading and writing to avoid blocking.
+ exec 0<> "${output}/fake-stdin"
+ rm "${output}/fake-stdin"
+ fi
-if [[ "${1:-}" = "--in-vm" ]]; then
+ for ((i = 0; i < 3; i++)); do
+ if ! $sudo virtme-run --kimg "${input}/bzImage" --memory 768M --pwd \
+ --rwdir="${testdir}=${testdir}" \
+ --rodir=/run/input="${input}" \
+ --rwdir=/run/output="${output}" \
+ --script-sh "PATH=\"$PATH\" CI_MAX_KERNEL_VERSION="${CI_MAX_KERNEL_VERSION:-}" \"$script\" --exec-test $cmd" \
+ --kopt possible_cpus=2; then # need at least two CPUs for some tests
+ exit 23
+ fi
+
+ if [[ -e "${output}/status" ]]; then
+ break
+ fi
+
+ if [[ -v CI ]]; then
+ echo "Retrying test run due to qemu crash"
+ continue
+ fi
+
+ exit 42
+ done
+
+ rc=$(<"${output}/status")
+ $sudo rm -r "$output"
+ exit $rc
+elif [[ "${1:-}" = "--exec-test" ]]; then
shift
mount -t bpf bpf /sys/fs/bpf
- export CGO_ENABLED=0
- export GOFLAGS=-mod=readonly
- export GOPATH=/run/go-path
- export GOPROXY=file:///run/go-path/pkg/mod/cache/download
- export GOSUMDB=off
- export GOCACHE=/run/go-cache
+ mount -t tracefs tracefs /sys/kernel/debug/tracing
if [[ -d "/run/input/bpf" ]]; then
export KERNEL_SELFTESTS="/run/input/bpf"
fi
- readonly output="${1}"
- shift
-
- echo Running tests...
- go test -v -coverpkg=./... -coverprofile="$output/coverage.txt" -count 1 ./...
- touch "$output/success"
- exit 0
-fi
-
-# Pull all dependencies, so that we can run tests without the
-# vm having network access.
-go mod download
+ if [[ -f "/run/input/bpf/bpf_testmod/bpf_testmod.ko" ]]; then
+ insmod "/run/input/bpf/bpf_testmod/bpf_testmod.ko"
+ fi
-# Use sudo if /dev/kvm isn't accessible by the current user.
-sudo=""
-if [[ ! -r /dev/kvm || ! -w /dev/kvm ]]; then
- sudo="sudo"
+ dmesg --clear
+ rc=0
+ "$@" || rc=$?
+ dmesg
+ echo $rc > "/run/output/status"
+ exit $rc # this return code is "swallowed" by qemu
fi
-readonly sudo
readonly kernel_version="${1:-}"
if [[ -z "${kernel_version}" ]]; then
echo "Expecting kernel version as first argument"
exit 1
fi
+shift
readonly kernel="linux-${kernel_version}.bz"
-readonly selftests="linux-${kernel_version}-selftests-bpf.bz"
+readonly selftests="linux-${kernel_version}-selftests-bpf.tgz"
readonly input="$(mktemp -d)"
-readonly output="$(mktemp -d)"
readonly tmp_dir="${TMPDIR:-/tmp}"
readonly branch="${BRANCH:-master}"
fetch() {
echo Fetching "${1}"
- wget -nv -N -P "${tmp_dir}" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}"
+ pushd "${tmp_dir}" > /dev/null
+ curl -s -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}"
+ local ret=$?
+ popd > /dev/null
+ return $ret
}
fetch "${kernel}"
+cp "${tmp_dir}/${kernel}" "${input}/bzImage"
if fetch "${selftests}"; then
+ echo "Decompressing selftests"
mkdir "${input}/bpf"
- tar --strip-components=4 -xjf "${tmp_dir}/${selftests}" -C "${input}/bpf"
+ tar --strip-components=4 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf"
else
echo "No selftests found, disabling"
fi
-echo Testing on "${kernel_version}"
-$sudo virtme-run --kimg "${tmp_dir}/${kernel}" --memory 512M --pwd \
- --rw \
- --rwdir=/run/input="${input}" \
- --rwdir=/run/output="${output}" \
- --rodir=/run/go-path="$(go env GOPATH)" \
- --rwdir=/run/go-cache="$(go env GOCACHE)" \
- --script-sh "PATH=\"$PATH\" $(realpath "$0") --in-vm /run/output" \
- --qemu-opts -smp 2 # need at least two CPUs for some tests
-
-if [[ ! -e "${output}/success" ]]; then
- echo "Test failed on ${kernel_version}"
- exit 1
-else
- echo "Test successful on ${kernel_version}"
- if [[ -v COVERALLS_TOKEN ]]; then
- goveralls -coverprofile="${output}/coverage.txt" -service=semaphore -repotoken "$COVERALLS_TOKEN"
- fi
+args=(-short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...)
+if (( $# > 0 )); then
+ args=("$@")
fi
-$sudo rm -r "${input}"
-$sudo rm -r "${output}"
+export GOFLAGS=-mod=readonly
+export CGO_ENABLED=0
+# LINUX_VERSION_CODE test compares this to discovered value.
+export KERNEL_VERSION="${kernel_version}"
+
+echo Testing on "${kernel_version}"
+go test -exec "$script --exec-vm $input" "${args[@]}"
+echo "Test successful on ${kernel_version}"
+
+rm -r "${input}"
diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go
index 1cba1d747..e5c270a55 100644
--- a/vendor/github.com/cilium/ebpf/syscalls.go
+++ b/vendor/github.com/cilium/ebpf/syscalls.go
@@ -1,29 +1,16 @@
package ebpf
import (
+ "bytes"
"errors"
"fmt"
- "unsafe"
+ "github.com/cilium/ebpf/asm"
"github.com/cilium/ebpf/internal"
- "github.com/cilium/ebpf/internal/btf"
+ "github.com/cilium/ebpf/internal/sys"
"github.com/cilium/ebpf/internal/unix"
)
-// Generic errors returned by BPF syscalls.
-var ErrNotExist = errors.New("requested object does not exist")
-
-// bpfObjName is a null-terminated string made up of
-// 'A-Za-z0-9_' characters.
-type bpfObjName [unix.BPF_OBJ_NAME_LEN]byte
-
-// newBPFObjName truncates the result if it is too long.
-func newBPFObjName(name string) bpfObjName {
- var result bpfObjName
- copy(result[:unix.BPF_OBJ_NAME_LEN-1], name)
- return result
-}
-
// invalidBPFObjNameChar returns true if char may not appear in
// a BPF object name.
func invalidBPFObjNameChar(char rune) bool {
@@ -45,183 +32,29 @@ func invalidBPFObjNameChar(char rune) bool {
}
}
-type bpfMapCreateAttr struct {
- mapType MapType
- keySize uint32
- valueSize uint32
- maxEntries uint32
- flags uint32
- innerMapFd uint32 // since 4.12 56f668dfe00d
- numaNode uint32 // since 4.14 96eabe7a40aa
- mapName bpfObjName // since 4.15 ad5b177bd73f
- mapIfIndex uint32
- btfFd uint32
- btfKeyTypeID btf.TypeID
- btfValueTypeID btf.TypeID
-}
-
-type bpfMapOpAttr struct {
- mapFd uint32
- padding uint32
- key internal.Pointer
- value internal.Pointer
- flags uint64
-}
-
-type bpfBatchMapOpAttr struct {
- inBatch internal.Pointer
- outBatch internal.Pointer
- keys internal.Pointer
- values internal.Pointer
- count uint32
- mapFd uint32
- elemFlags uint64
- flags uint64
-}
-
-type bpfMapInfo struct {
- map_type uint32 // since 4.12 1e2709769086
- id uint32
- key_size uint32
- value_size uint32
- max_entries uint32
- map_flags uint32
- name bpfObjName // since 4.15 ad5b177bd73f
- ifindex uint32 // since 4.16 52775b33bb50
- btf_vmlinux_value_type_id uint32 // since 5.6 85d33df357b6
- netns_dev uint64 // since 4.16 52775b33bb50
- netns_ino uint64
- btf_id uint32 // since 4.18 78958fca7ead
- btf_key_type_id uint32 // since 4.18 9b2cf328b2ec
- btf_value_type_id uint32
-}
-
-type bpfProgLoadAttr struct {
- progType ProgramType
- insCount uint32
- instructions internal.Pointer
- license internal.Pointer
- logLevel uint32
- logSize uint32
- logBuf internal.Pointer
- kernelVersion uint32 // since 4.1 2541517c32be
- progFlags uint32 // since 4.11 e07b98d9bffe
- progName bpfObjName // since 4.15 067cae47771c
- progIfIndex uint32 // since 4.15 1f6f4cb7ba21
- expectedAttachType AttachType // since 4.17 5e43f899b03a
- progBTFFd uint32
- funcInfoRecSize uint32
- funcInfo internal.Pointer
- funcInfoCnt uint32
- lineInfoRecSize uint32
- lineInfo internal.Pointer
- lineInfoCnt uint32
- attachBTFID btf.TypeID
- attachProgFd uint32
-}
-
-type bpfProgInfo struct {
- prog_type uint32
- id uint32
- tag [unix.BPF_TAG_SIZE]byte
- jited_prog_len uint32
- xlated_prog_len uint32
- jited_prog_insns internal.Pointer
- xlated_prog_insns internal.Pointer
- load_time uint64 // since 4.15 cb4d2b3f03d8
- created_by_uid uint32
- nr_map_ids uint32
- map_ids internal.Pointer
- name bpfObjName // since 4.15 067cae47771c
- ifindex uint32
- gpl_compatible uint32
- netns_dev uint64
- netns_ino uint64
- nr_jited_ksyms uint32
- nr_jited_func_lens uint32
- jited_ksyms internal.Pointer
- jited_func_lens internal.Pointer
- btf_id uint32
- func_info_rec_size uint32
- func_info internal.Pointer
- nr_func_info uint32
- nr_line_info uint32
- line_info internal.Pointer
- jited_line_info internal.Pointer
- nr_jited_line_info uint32
- line_info_rec_size uint32
- jited_line_info_rec_size uint32
- nr_prog_tags uint32
- prog_tags internal.Pointer
- run_time_ns uint64
- run_cnt uint64
-}
-
-type bpfProgTestRunAttr struct {
- fd uint32
- retval uint32
- dataSizeIn uint32
- dataSizeOut uint32
- dataIn internal.Pointer
- dataOut internal.Pointer
- repeat uint32
- duration uint32
-}
-
-type bpfGetFDByIDAttr struct {
- id uint32
- next uint32
-}
-
-type bpfMapFreezeAttr struct {
- mapFd uint32
-}
-
-type bpfObjGetNextIDAttr struct {
- startID uint32
- nextID uint32
- openFlags uint32
-}
-
-func bpfProgLoad(attr *bpfProgLoadAttr) (*internal.FD, error) {
- for {
- fd, err := internal.BPF(internal.BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
- // As of ~4.20 the verifier can be interrupted by a signal,
- // and returns EAGAIN in that case.
- if err == unix.EAGAIN {
- continue
- }
-
- if err != nil {
- return nil, err
- }
-
- return internal.NewFD(uint32(fd)), nil
- }
-}
-
-func bpfProgTestRun(attr *bpfProgTestRunAttr) error {
- _, err := internal.BPF(internal.BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
- return err
-}
-
-func bpfMapCreate(attr *bpfMapCreateAttr) (*internal.FD, error) {
- fd, err := internal.BPF(internal.BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
- if err != nil {
+func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, error) {
+ buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
+ if err := insns.Marshal(buf, internal.NativeEndian); err != nil {
return nil, err
}
+ bytecode := buf.Bytes()
- return internal.NewFD(uint32(fd)), nil
+ return sys.ProgLoad(&sys.ProgLoadAttr{
+ ProgType: sys.ProgType(typ),
+ License: sys.NewStringPointer(license),
+ Insns: sys.NewSlicePointer(bytecode),
+ InsnCnt: uint32(len(bytecode) / asm.InstructionSize),
+ })
}
var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error {
- _, err := bpfMapCreate(&bpfMapCreateAttr{
- mapType: ArrayOfMaps,
- keySize: 4,
- valueSize: 4,
- maxEntries: 1,
+ _, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(ArrayOfMaps),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
// Invalid file descriptor.
- innerMapFd: ^uint32(0),
+ InnerMapFd: ^uint32(0),
})
if errors.Is(err, unix.EINVAL) {
return internal.ErrNotSupported
@@ -235,12 +68,12 @@ var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error {
var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() error {
// This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since
// BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check.
- m, err := bpfMapCreate(&bpfMapCreateAttr{
- mapType: Array,
- keySize: 4,
- valueSize: 4,
- maxEntries: 1,
- flags: unix.BPF_F_RDONLY_PROG,
+ m, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapFlags: unix.BPF_F_RDONLY_PROG,
})
if err != nil {
return internal.ErrNotSupported
@@ -249,122 +82,53 @@ var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps
return nil
})
-func bpfMapLookupElem(m *internal.FD, key, valueOut internal.Pointer) error {
- fd, err := m.Value()
- if err != nil {
- return err
- }
-
- attr := bpfMapOpAttr{
- mapFd: fd,
- key: key,
- value: valueOut,
- }
- _, err = internal.BPF(internal.BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
- return wrapMapError(err)
-}
-
-func bpfMapLookupAndDelete(m *internal.FD, key, valueOut internal.Pointer) error {
- fd, err := m.Value()
- if err != nil {
- return err
- }
-
- attr := bpfMapOpAttr{
- mapFd: fd,
- key: key,
- value: valueOut,
- }
- _, err = internal.BPF(internal.BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
- return wrapMapError(err)
-}
-
-func bpfMapUpdateElem(m *internal.FD, key, valueOut internal.Pointer, flags uint64) error {
- fd, err := m.Value()
- if err != nil {
- return err
- }
-
- attr := bpfMapOpAttr{
- mapFd: fd,
- key: key,
- value: valueOut,
- flags: flags,
- }
- _, err = internal.BPF(internal.BPF_MAP_UPDATE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
- return wrapMapError(err)
-}
-
-func bpfMapDeleteElem(m *internal.FD, key internal.Pointer) error {
- fd, err := m.Value()
+var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error {
+ // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps.
+ m, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapFlags: unix.BPF_F_MMAPABLE,
+ })
if err != nil {
- return err
- }
-
- attr := bpfMapOpAttr{
- mapFd: fd,
- key: key,
+ return internal.ErrNotSupported
}
- _, err = internal.BPF(internal.BPF_MAP_DELETE_ELEM, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
- return wrapMapError(err)
-}
+ _ = m.Close()
+ return nil
+})
-func bpfMapGetNextKey(m *internal.FD, key, nextKeyOut internal.Pointer) error {
- fd, err := m.Value()
+var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error {
+ // This checks BPF_F_INNER_MAP, which appeared in 5.10.
+ m, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapFlags: unix.BPF_F_INNER_MAP,
+ })
if err != nil {
- return err
- }
-
- attr := bpfMapOpAttr{
- mapFd: fd,
- key: key,
- value: nextKeyOut,
- }
- _, err = internal.BPF(internal.BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
- return wrapMapError(err)
-}
-
-func objGetNextID(cmd internal.BPFCmd, start uint32) (uint32, error) {
- attr := bpfObjGetNextIDAttr{
- startID: start,
+ return internal.ErrNotSupported
}
- _, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
- return attr.nextID, wrapObjError(err)
-}
+ _ = m.Close()
+ return nil
+})
-func bpfMapBatch(cmd internal.BPFCmd, m *internal.FD, inBatch, outBatch, keys, values internal.Pointer, count uint32, opts *BatchOptions) (uint32, error) {
- fd, err := m.Value()
+var haveNoPreallocMaps = internal.FeatureTest("prealloc maps", "4.6", func() error {
+ // This checks BPF_F_NO_PREALLOC, which appeared in 4.6.
+ m, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(Hash),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapFlags: unix.BPF_F_NO_PREALLOC,
+ })
if err != nil {
- return 0, err
- }
-
- attr := bpfBatchMapOpAttr{
- inBatch: inBatch,
- outBatch: outBatch,
- keys: keys,
- values: values,
- count: count,
- mapFd: fd,
- }
- if opts != nil {
- attr.elemFlags = opts.ElemFlags
- attr.flags = opts.Flags
- }
- _, err = internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
- // always return count even on an error, as things like update might partially be fulfilled.
- return attr.count, wrapMapError(err)
-}
-
-func wrapObjError(err error) error {
- if err == nil {
- return nil
- }
- if errors.Is(err, unix.ENOENT) {
- return fmt.Errorf("%w", ErrNotExist)
+ return internal.ErrNotSupported
}
-
- return errors.New(err.Error())
-}
+ _ = m.Close()
+ return nil
+})
func wrapMapError(err error) error {
if err == nil {
@@ -372,60 +136,34 @@ func wrapMapError(err error) error {
}
if errors.Is(err, unix.ENOENT) {
- return ErrKeyNotExist
+ return sys.Error(ErrKeyNotExist, unix.ENOENT)
}
if errors.Is(err, unix.EEXIST) {
- return ErrKeyExist
+ return sys.Error(ErrKeyExist, unix.EEXIST)
}
if errors.Is(err, unix.ENOTSUPP) {
- return ErrNotSupported
+ return sys.Error(ErrNotSupported, unix.ENOTSUPP)
}
- return errors.New(err.Error())
-}
-
-func bpfMapFreeze(m *internal.FD) error {
- fd, err := m.Value()
- if err != nil {
- return err
+ if errors.Is(err, unix.E2BIG) {
+ return fmt.Errorf("key too big for map: %w", err)
}
- attr := bpfMapFreezeAttr{
- mapFd: fd,
- }
- _, err = internal.BPF(internal.BPF_MAP_FREEZE, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
return err
}
-func bpfGetProgInfoByFD(fd *internal.FD) (*bpfProgInfo, error) {
- var info bpfProgInfo
- if err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info)); err != nil {
- return nil, fmt.Errorf("can't get program info: %w", err)
- }
- return &info, nil
-}
-
-func bpfGetMapInfoByFD(fd *internal.FD) (*bpfMapInfo, error) {
- var info bpfMapInfo
- err := internal.BPFObjGetInfoByFD(fd, unsafe.Pointer(&info), unsafe.Sizeof(info))
- if err != nil {
- return nil, fmt.Errorf("can't get map info: %w", err)
- }
- return &info, nil
-}
-
var haveObjName = internal.FeatureTest("object names", "4.15", func() error {
- attr := bpfMapCreateAttr{
- mapType: Array,
- keySize: 4,
- valueSize: 4,
- maxEntries: 1,
- mapName: newBPFObjName("feature_test"),
+ attr := sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapName: sys.NewObjName("feature_test"),
}
- fd, err := bpfMapCreate(&attr)
+ fd, err := sys.MapCreate(&attr)
if err != nil {
return internal.ErrNotSupported
}
@@ -439,15 +177,15 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func()
return err
}
- attr := bpfMapCreateAttr{
- mapType: Array,
- keySize: 4,
- valueSize: 4,
- maxEntries: 1,
- mapName: newBPFObjName(".test"),
+ attr := sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapName: sys.NewObjName(".test"),
}
- fd, err := bpfMapCreate(&attr)
+ fd, err := sys.MapCreate(&attr)
if err != nil {
return internal.ErrNotSupported
}
@@ -458,34 +196,69 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func()
var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error {
var maxEntries uint32 = 2
- attr := bpfMapCreateAttr{
- mapType: Hash,
- keySize: 4,
- valueSize: 4,
- maxEntries: maxEntries,
+ attr := sys.MapCreateAttr{
+ MapType: sys.MapType(Hash),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: maxEntries,
}
- fd, err := bpfMapCreate(&attr)
+ fd, err := sys.MapCreate(&attr)
if err != nil {
return internal.ErrNotSupported
}
defer fd.Close()
+
keys := []uint32{1, 2}
values := []uint32{3, 4}
kp, _ := marshalPtr(keys, 8)
vp, _ := marshalPtr(values, 8)
- nilPtr := internal.NewPointer(nil)
- _, err = bpfMapBatch(internal.BPF_MAP_UPDATE_BATCH, fd, nilPtr, nilPtr, kp, vp, maxEntries, nil)
+
+ err = sys.MapUpdateBatch(&sys.MapUpdateBatchAttr{
+ MapFd: fd.Uint(),
+ Keys: kp,
+ Values: vp,
+ Count: maxEntries,
+ })
if err != nil {
return internal.ErrNotSupported
}
return nil
})
-func bpfObjGetFDByID(cmd internal.BPFCmd, id uint32) (*internal.FD, error) {
- attr := bpfGetFDByIDAttr{
- id: id,
+var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", func() error {
+ insns := asm.Instructions{
+ asm.Mov.Reg(asm.R1, asm.R10),
+ asm.Add.Imm(asm.R1, -8),
+ asm.Mov.Imm(asm.R2, 8),
+ asm.Mov.Imm(asm.R3, 0),
+ asm.FnProbeReadKernel.Call(),
+ asm.Return(),
}
- ptr, err := internal.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
- return internal.NewFD(uint32(ptr)), wrapObjError(err)
-}
+
+ fd, err := progLoad(insns, Kprobe, "GPL")
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ _ = fd.Close()
+ return nil
+})
+
+var haveBPFToBPFCalls = internal.FeatureTest("bpf2bpf calls", "4.16", func() error {
+ insns := asm.Instructions{
+ asm.Call.Label("prog2").WithSymbol("prog1"),
+ asm.Return(),
+ asm.Mov.Imm(asm.R0, 0).WithSymbol("prog2"),
+ asm.Return(),
+ }
+
+ fd, err := progLoad(insns, SocketFilter, "MIT")
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+ _ = fd.Close()
+ return nil
+})
diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go
index 3191ba1e0..a27b44247 100644
--- a/vendor/github.com/cilium/ebpf/types.go
+++ b/vendor/github.com/cilium/ebpf/types.go
@@ -1,11 +1,20 @@
package ebpf
-//go:generate stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType
+import (
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+//go:generate stringer -output types_string.go -type=MapType,ProgramType,PinType
// MapType indicates the type map structure
// that will be initialized in the kernel.
type MapType uint32
+// Max returns the latest supported MapType.
+func (MapType) Max() MapType {
+ return maxMapType - 1
+}
+
// All the various map types that can be created
const (
UnspecifiedMap MapType = iota
@@ -81,11 +90,22 @@ const (
SkStorage
// DevMapHash - Hash-based indexing scheme for references to network devices.
DevMapHash
+ // StructOpsMap - This map holds a kernel struct with its function pointer implemented in a BPF
+ // program.
+ StructOpsMap
+ // RingBuf - Similar to PerfEventArray, but shared across all CPUs.
+ RingBuf
+ // InodeStorage - Specialized local storage map for inodes.
+ InodeStorage
+ // TaskStorage - Specialized local storage map for task_struct.
+ TaskStorage
+ // maxMapType - Bound enum of MapTypes, has to be last in enum.
+ maxMapType
)
// hasPerCPUValue returns true if the Map stores a value per CPU.
func (mt MapType) hasPerCPUValue() bool {
- return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash
+ return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage
}
// canStoreMap returns true if the map type accepts a map fd
@@ -100,9 +120,25 @@ func (mt MapType) canStoreProgram() bool {
return mt == ProgramArray
}
+// hasBTF returns true if the map type supports BTF key/value metadata.
+func (mt MapType) hasBTF() bool {
+ switch mt {
+ case PerfEventArray, CGroupArray, StackTrace, ArrayOfMaps, HashOfMaps, DevMap,
+ DevMapHash, CPUMap, XSKMap, SockMap, SockHash, Queue, Stack, RingBuf:
+ return false
+ default:
+ return true
+ }
+}
+
// ProgramType of the eBPF program
type ProgramType uint32
+// Max return the latest supported ProgramType.
+func (ProgramType) Max() ProgramType {
+ return maxProgramType - 1
+}
+
// eBPF program types
const (
UnspecifiedProgram ProgramType = iota
@@ -136,6 +172,8 @@ const (
Extension
LSM
SkLookup
+ Syscall
+ maxProgramType
)
// AttachType of the eBPF program, needed to differentiate allowed context accesses in
@@ -143,6 +181,8 @@ const (
// Will cause invalid argument (EINVAL) at program load time if set incorrectly.
type AttachType uint32
+//go:generate stringer -type AttachType -trimprefix Attach
+
// AttachNone is an alias for AttachCGroupInetIngress for readability reasons.
const AttachNone AttachType = 0
@@ -185,6 +225,10 @@ const (
AttachXDPCPUMap
AttachSkLookup
AttachXDP
+ AttachSkSKBVerdict
+ AttachSkReuseportSelect
+ AttachSkReuseportSelectOrMigrate
+ AttachPerfEvent
)
// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command
@@ -202,6 +246,33 @@ const (
PinByName
)
+// LoadPinOptions control how a pinned object is loaded.
+type LoadPinOptions struct {
+ // Request a read-only or write-only object. The default is a read-write
+ // object. Only one of the flags may be set.
+ ReadOnly bool
+ WriteOnly bool
+
+ // Raw flags for the syscall. Other fields of this struct take precedence.
+ Flags uint32
+}
+
+// Marshal returns a value suitable for BPF_OBJ_GET syscall file_flags parameter.
+func (lpo *LoadPinOptions) Marshal() uint32 {
+ if lpo == nil {
+ return 0
+ }
+
+ flags := lpo.Flags
+ if lpo.ReadOnly {
+ flags |= unix.BPF_F_RDONLY
+ }
+ if lpo.WriteOnly {
+ flags |= unix.BPF_F_WRONLY
+ }
+ return flags
+}
+
// BatchOptions batch map operations options
//
// Mirrors libbpf struct bpf_map_batch_opts
diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go
index 976bd76be..e80b948b0 100644
--- a/vendor/github.com/cilium/ebpf/types_string.go
+++ b/vendor/github.com/cilium/ebpf/types_string.go
@@ -1,4 +1,4 @@
-// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,AttachType,PinType"; DO NOT EDIT.
+// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,PinType"; DO NOT EDIT.
package ebpf
@@ -34,11 +34,16 @@ func _() {
_ = x[Stack-23]
_ = x[SkStorage-24]
_ = x[DevMapHash-25]
+ _ = x[StructOpsMap-26]
+ _ = x[RingBuf-27]
+ _ = x[InodeStorage-28]
+ _ = x[TaskStorage-29]
+ _ = x[maxMapType-30]
}
-const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHash"
+const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStoragemaxMapType"
-var _MapType_index = [...]uint8{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248}
+var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290, 300}
func (i MapType) String() string {
if i >= MapType(len(_MapType_index)-1) {
@@ -81,11 +86,13 @@ func _() {
_ = x[Extension-28]
_ = x[LSM-29]
_ = x[SkLookup-30]
+ _ = x[Syscall-31]
+ _ = x[maxProgramType-32]
}
-const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookup"
+const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallmaxProgramType"
-var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294}
+var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 315}
func (i ProgramType) String() string {
if i >= ProgramType(len(_ProgramType_index)-1) {
@@ -97,61 +104,6 @@ func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
- _ = x[AttachNone-0]
- _ = x[AttachCGroupInetIngress-0]
- _ = x[AttachCGroupInetEgress-1]
- _ = x[AttachCGroupInetSockCreate-2]
- _ = x[AttachCGroupSockOps-3]
- _ = x[AttachSkSKBStreamParser-4]
- _ = x[AttachSkSKBStreamVerdict-5]
- _ = x[AttachCGroupDevice-6]
- _ = x[AttachSkMsgVerdict-7]
- _ = x[AttachCGroupInet4Bind-8]
- _ = x[AttachCGroupInet6Bind-9]
- _ = x[AttachCGroupInet4Connect-10]
- _ = x[AttachCGroupInet6Connect-11]
- _ = x[AttachCGroupInet4PostBind-12]
- _ = x[AttachCGroupInet6PostBind-13]
- _ = x[AttachCGroupUDP4Sendmsg-14]
- _ = x[AttachCGroupUDP6Sendmsg-15]
- _ = x[AttachLircMode2-16]
- _ = x[AttachFlowDissector-17]
- _ = x[AttachCGroupSysctl-18]
- _ = x[AttachCGroupUDP4Recvmsg-19]
- _ = x[AttachCGroupUDP6Recvmsg-20]
- _ = x[AttachCGroupGetsockopt-21]
- _ = x[AttachCGroupSetsockopt-22]
- _ = x[AttachTraceRawTp-23]
- _ = x[AttachTraceFEntry-24]
- _ = x[AttachTraceFExit-25]
- _ = x[AttachModifyReturn-26]
- _ = x[AttachLSMMac-27]
- _ = x[AttachTraceIter-28]
- _ = x[AttachCgroupInet4GetPeername-29]
- _ = x[AttachCgroupInet6GetPeername-30]
- _ = x[AttachCgroupInet4GetSockname-31]
- _ = x[AttachCgroupInet6GetSockname-32]
- _ = x[AttachXDPDevMap-33]
- _ = x[AttachCgroupInetSockRelease-34]
- _ = x[AttachXDPCPUMap-35]
- _ = x[AttachSkLookup-36]
- _ = x[AttachXDP-37]
-}
-
-const _AttachType_name = "AttachNoneAttachCGroupInetEgressAttachCGroupInetSockCreateAttachCGroupSockOpsAttachSkSKBStreamParserAttachSkSKBStreamVerdictAttachCGroupDeviceAttachSkMsgVerdictAttachCGroupInet4BindAttachCGroupInet6BindAttachCGroupInet4ConnectAttachCGroupInet6ConnectAttachCGroupInet4PostBindAttachCGroupInet6PostBindAttachCGroupUDP4SendmsgAttachCGroupUDP6SendmsgAttachLircMode2AttachFlowDissectorAttachCGroupSysctlAttachCGroupUDP4RecvmsgAttachCGroupUDP6RecvmsgAttachCGroupGetsockoptAttachCGroupSetsockoptAttachTraceRawTpAttachTraceFEntryAttachTraceFExitAttachModifyReturnAttachLSMMacAttachTraceIterAttachCgroupInet4GetPeernameAttachCgroupInet6GetPeernameAttachCgroupInet4GetSocknameAttachCgroupInet6GetSocknameAttachXDPDevMapAttachCgroupInetSockReleaseAttachXDPCPUMapAttachSkLookupAttachXDP"
-
-var _AttachType_index = [...]uint16{0, 10, 32, 58, 77, 100, 124, 142, 160, 181, 202, 226, 250, 275, 300, 323, 346, 361, 380, 398, 421, 444, 466, 488, 504, 521, 537, 555, 567, 582, 610, 638, 666, 694, 709, 736, 751, 765, 774}
-
-func (i AttachType) String() string {
- if i >= AttachType(len(_AttachType_index)-1) {
- return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]]
-}
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
_ = x[PinNone-0]
_ = x[PinByName-1]
}