summaryrefslogtreecommitdiff
path: root/vendor/github.com/cilium/ebpf/internal
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/cilium/ebpf/internal')
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/btf.go791
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/btf_types.go269
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/core.go388
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/doc.go8
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/ext_info.go281
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/fuzz.go49
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/strings.go60
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/types.go871
-rw-r--r--vendor/github.com/cilium/ebpf/internal/cpu.go62
-rw-r--r--vendor/github.com/cilium/ebpf/internal/elf.go52
-rw-r--r--vendor/github.com/cilium/ebpf/internal/endian.go24
-rw-r--r--vendor/github.com/cilium/ebpf/internal/errors.go47
-rw-r--r--vendor/github.com/cilium/ebpf/internal/fd.go69
-rw-r--r--vendor/github.com/cilium/ebpf/internal/feature.go138
-rw-r--r--vendor/github.com/cilium/ebpf/internal/io.go16
-rw-r--r--vendor/github.com/cilium/ebpf/internal/ptr.go30
-rw-r--r--vendor/github.com/cilium/ebpf/internal/ptr_32_be.go14
-rw-r--r--vendor/github.com/cilium/ebpf/internal/ptr_32_le.go14
-rw-r--r--vendor/github.com/cilium/ebpf/internal/ptr_64.go14
-rw-r--r--vendor/github.com/cilium/ebpf/internal/syscall.go179
-rw-r--r--vendor/github.com/cilium/ebpf/internal/syscall_string.go56
-rw-r--r--vendor/github.com/cilium/ebpf/internal/unix/types_linux.go170
-rw-r--r--vendor/github.com/cilium/ebpf/internal/unix/types_other.go228
23 files changed, 3830 insertions, 0 deletions
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf.go b/vendor/github.com/cilium/ebpf/internal/btf/btf.go
new file mode 100644
index 000000000..1e66d9476
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/btf.go
@@ -0,0 +1,791 @@
+package btf
+
+import (
+ "bytes"
+ "debug/elf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "reflect"
+ "sync"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+const btfMagic = 0xeB9F
+
+// Errors returned by BTF functions.
+var (
+ ErrNotSupported = internal.ErrNotSupported
+ ErrNotFound = errors.New("not found")
+ ErrNoExtendedInfo = errors.New("no extended info")
+)
+
+// Spec represents decoded BTF.
+type Spec struct {
+ rawTypes []rawType
+ strings stringTable
+ types []Type
+ namedTypes map[string][]namedType
+ funcInfos map[string]extInfo
+ lineInfos map[string]extInfo
+ coreRelos map[string]bpfCoreRelos
+ byteOrder binary.ByteOrder
+}
+
+type btfHeader struct {
+ Magic uint16
+ Version uint8
+ Flags uint8
+ HdrLen uint32
+
+ TypeOff uint32
+ TypeLen uint32
+ StringOff uint32
+ StringLen uint32
+}
+
+// LoadSpecFromReader reads BTF sections from an ELF.
+//
+// Returns a nil Spec and no error if no BTF was present.
+func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
+ file, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ btfSection, btfExtSection, sectionSizes, err := findBtfSections(file)
+ if err != nil {
+ return nil, err
+ }
+
+ if btfSection == nil {
+ return nil, nil
+ }
+
+ symbols, err := file.Symbols()
+ if err != nil {
+ return nil, fmt.Errorf("can't read symbols: %v", err)
+ }
+
+ variableOffsets := make(map[variable]uint32)
+ for _, symbol := range symbols {
+ if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
+ // Ignore things like SHN_ABS
+ continue
+ }
+
+ if int(symbol.Section) >= len(file.Sections) {
+ return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
+ }
+
+ secName := file.Sections[symbol.Section].Name
+ if _, ok := sectionSizes[secName]; !ok {
+ continue
+ }
+
+ if symbol.Value > math.MaxUint32 {
+ return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name)
+ }
+
+ variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
+ }
+
+ spec, err := loadNakedSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets)
+ if err != nil {
+ return nil, err
+ }
+
+ if btfExtSection == nil {
+ return spec, nil
+ }
+
+ spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings)
+ if err != nil {
+ return nil, fmt.Errorf("can't read ext info: %w", err)
+ }
+
+ return spec, nil
+}
+
+func findBtfSections(file *internal.SafeELFFile) (*elf.Section, *elf.Section, map[string]uint32, error) {
+ var (
+ btfSection *elf.Section
+ btfExtSection *elf.Section
+ sectionSizes = make(map[string]uint32)
+ )
+
+ for _, sec := range file.Sections {
+ switch sec.Name {
+ case ".BTF":
+ btfSection = sec
+ case ".BTF.ext":
+ btfExtSection = sec
+ default:
+ if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
+ break
+ }
+
+ if sec.Size > math.MaxUint32 {
+ return nil, nil, nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
+ }
+
+ sectionSizes[sec.Name] = uint32(sec.Size)
+ }
+ }
+ return btfSection, btfExtSection, sectionSizes, nil
+}
+
+func loadSpecFromVmlinux(rd io.ReaderAt) (*Spec, error) {
+ file, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ btfSection, _, _, err := findBtfSections(file)
+ if err != nil {
+ return nil, fmt.Errorf(".BTF ELF section: %s", err)
+ }
+ if btfSection == nil {
+ return nil, fmt.Errorf("unable to find .BTF ELF section")
+ }
+ return loadNakedSpec(btfSection.Open(), file.ByteOrder, nil, nil)
+}
+
+func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) {
+ rawTypes, rawStrings, err := parseBTF(btf, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ err = fixupDatasec(rawTypes, rawStrings, sectionSizes, variableOffsets)
+ if err != nil {
+ return nil, err
+ }
+
+ types, typesByName, err := inflateRawTypes(rawTypes, rawStrings)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Spec{
+ rawTypes: rawTypes,
+ namedTypes: typesByName,
+ types: types,
+ strings: rawStrings,
+ byteOrder: bo,
+ }, nil
+}
+
+var kernelBTF struct {
+ sync.Mutex
+ *Spec
+}
+
+// LoadKernelSpec returns the current kernel's BTF information.
+//
+// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns
+// ErrNotSupported if BTF is not enabled.
+func LoadKernelSpec() (*Spec, error) {
+ kernelBTF.Lock()
+ defer kernelBTF.Unlock()
+
+ if kernelBTF.Spec != nil {
+ return kernelBTF.Spec, nil
+ }
+
+ var err error
+ kernelBTF.Spec, err = loadKernelSpec()
+ return kernelBTF.Spec, err
+}
+
+func loadKernelSpec() (*Spec, error) {
+ release, err := unix.KernelRelease()
+ if err != nil {
+ return nil, fmt.Errorf("can't read kernel release number: %w", err)
+ }
+
+ fh, err := os.Open("/sys/kernel/btf/vmlinux")
+ if err == nil {
+ defer fh.Close()
+
+ return loadNakedSpec(fh, internal.NativeEndian, nil, nil)
+ }
+
+ // use same list of locations as libbpf
+ // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
+ locations := []string{
+ "/boot/vmlinux-%s",
+ "/lib/modules/%s/vmlinux-%[1]s",
+ "/lib/modules/%s/build/vmlinux",
+ "/usr/lib/modules/%s/kernel/vmlinux",
+ "/usr/lib/debug/boot/vmlinux-%s",
+ "/usr/lib/debug/boot/vmlinux-%s.debug",
+ "/usr/lib/debug/lib/modules/%s/vmlinux",
+ }
+
+ for _, loc := range locations {
+ path := fmt.Sprintf(loc, release)
+
+ fh, err := os.Open(path)
+ if err != nil {
+ continue
+ }
+ defer fh.Close()
+
+ return loadSpecFromVmlinux(fh)
+ }
+
+ return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported)
+}
+
+func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) ([]rawType, stringTable, error) {
+ rawBTF, err := ioutil.ReadAll(btf)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't read BTF: %v", err)
+ }
+
+ rd := bytes.NewReader(rawBTF)
+
+ var header btfHeader
+ if err := binary.Read(rd, bo, &header); err != nil {
+ return nil, nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ if header.Magic != btfMagic {
+ return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
+ }
+
+ if header.Version != 1 {
+ return nil, nil, fmt.Errorf("unexpected version %v", header.Version)
+ }
+
+ if header.Flags != 0 {
+ return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
+ }
+
+ remainder := int64(header.HdrLen) - int64(binary.Size(&header))
+ if remainder < 0 {
+ return nil, nil, errors.New("header is too short")
+ }
+
+ if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil {
+ return nil, nil, fmt.Errorf("header padding: %v", err)
+ }
+
+ if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil {
+ return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err)
+ }
+
+ rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen)))
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't read type names: %w", err)
+ }
+
+ if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil {
+ return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err)
+ }
+
+ rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't read types: %w", err)
+ }
+
+ return rawTypes, rawStrings, nil
+}
+
+type variable struct {
+ section string
+ name string
+}
+
+func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
+ for i, rawType := range rawTypes {
+ if rawType.Kind() != kindDatasec {
+ continue
+ }
+
+ name, err := rawStrings.Lookup(rawType.NameOff)
+ if err != nil {
+ return err
+ }
+
+ if name == ".kconfig" || name == ".ksyms" {
+ return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
+ }
+
+ if rawTypes[i].SizeType != 0 {
+ continue
+ }
+
+ size, ok := sectionSizes[name]
+ if !ok {
+ return fmt.Errorf("data section %s: missing size", name)
+ }
+
+ rawTypes[i].SizeType = size
+
+ secinfos := rawType.data.([]btfVarSecinfo)
+ for j, secInfo := range secinfos {
+ id := int(secInfo.Type - 1)
+ if id >= len(rawTypes) {
+ return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
+ }
+
+ varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
+ if err != nil {
+ return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
+ }
+
+ offset, ok := variableOffsets[variable{name, varName}]
+ if !ok {
+ return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
+ }
+
+ secinfos[j].Offset = offset
+ }
+ }
+
+ return nil
+}
+
+type marshalOpts struct {
+ ByteOrder binary.ByteOrder
+ StripFuncLinkage bool
+}
+
+func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
+ var (
+ buf bytes.Buffer
+ header = new(btfHeader)
+ headerLen = binary.Size(header)
+ )
+
+ // Reserve space for the header. We have to write it last since
+ // we don't know the size of the type section yet.
+ _, _ = buf.Write(make([]byte, headerLen))
+
+ // Write type section, just after the header.
+ for _, raw := range s.rawTypes {
+ switch {
+ case opts.StripFuncLinkage && raw.Kind() == kindFunc:
+ raw.SetLinkage(linkageStatic)
+ }
+
+ if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
+ return nil, fmt.Errorf("can't marshal BTF: %w", err)
+ }
+ }
+
+ typeLen := uint32(buf.Len() - headerLen)
+
+ // Write string section after type section.
+ _, _ = buf.Write(s.strings)
+
+ // Fill out the header, and write it out.
+ header = &btfHeader{
+ Magic: btfMagic,
+ Version: 1,
+ Flags: 0,
+ HdrLen: uint32(headerLen),
+ TypeOff: 0,
+ TypeLen: typeLen,
+ StringOff: typeLen,
+ StringLen: uint32(len(s.strings)),
+ }
+
+ raw := buf.Bytes()
+ err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
+ if err != nil {
+ return nil, fmt.Errorf("can't write header: %v", err)
+ }
+
+ return raw, nil
+}
+
+type sliceWriter []byte
+
+func (sw sliceWriter) Write(p []byte) (int, error) {
+ if len(p) != len(sw) {
+ return 0, errors.New("size doesn't match")
+ }
+
+ return copy(sw, p), nil
+}
+
+// Program finds the BTF for a specific section.
+//
+// Length is the number of bytes in the raw BPF instruction stream.
+//
+// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't
+// contain extended BTF info.
+func (s *Spec) Program(name string, length uint64) (*Program, error) {
+ if length == 0 {
+ return nil, errors.New("length musn't be zero")
+ }
+
+ if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil {
+ return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo)
+ }
+
+ funcInfos, funcOK := s.funcInfos[name]
+ lineInfos, lineOK := s.lineInfos[name]
+ coreRelos, coreOK := s.coreRelos[name]
+
+ if !funcOK && !lineOK && !coreOK {
+ return nil, fmt.Errorf("no extended BTF info for section %s", name)
+ }
+
+ return &Program{s, length, funcInfos, lineInfos, coreRelos}, nil
+}
+
+// Datasec returns the BTF required to create maps which represent data sections.
+func (s *Spec) Datasec(name string) (*Map, error) {
+ var datasec Datasec
+ if err := s.FindType(name, &datasec); err != nil {
+ return nil, fmt.Errorf("data section %s: can't get BTF: %w", name, err)
+ }
+
+ m := NewMap(s, &Void{}, &datasec)
+ return &m, nil
+}
+
+// FindType searches for a type with a specific name.
+//
+// hint determines the type of the returned Type.
+//
+// Returns an error wrapping ErrNotFound if no matching
+// type exists in spec.
+func (s *Spec) FindType(name string, typ Type) error {
+ var (
+ wanted = reflect.TypeOf(typ)
+ candidate Type
+ )
+
+ for _, typ := range s.namedTypes[essentialName(name)] {
+ if reflect.TypeOf(typ) != wanted {
+ continue
+ }
+
+ // Match against the full name, not just the essential one.
+ if typ.name() != name {
+ continue
+ }
+
+ if candidate != nil {
+ return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
+ }
+
+ candidate = typ
+ }
+
+ if candidate == nil {
+ return fmt.Errorf("type %s: %w", name, ErrNotFound)
+ }
+
+ value := reflect.Indirect(reflect.ValueOf(copyType(candidate)))
+ reflect.Indirect(reflect.ValueOf(typ)).Set(value)
+ return nil
+}
+
+// Handle is a reference to BTF loaded into the kernel.
+type Handle struct {
+ fd *internal.FD
+}
+
+// NewHandle loads BTF into the kernel.
+//
+// Returns ErrNotSupported if BTF is not supported.
+func NewHandle(spec *Spec) (*Handle, error) {
+ if err := haveBTF(); err != nil {
+ return nil, err
+ }
+
+ if spec.byteOrder != internal.NativeEndian {
+ return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
+ }
+
+ btf, err := spec.marshal(marshalOpts{
+ ByteOrder: internal.NativeEndian,
+ StripFuncLinkage: haveFuncLinkage() != nil,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("can't marshal BTF: %w", err)
+ }
+
+ if uint64(len(btf)) > math.MaxUint32 {
+ return nil, errors.New("BTF exceeds the maximum size")
+ }
+
+ attr := &bpfLoadBTFAttr{
+ btf: internal.NewSlicePointer(btf),
+ btfSize: uint32(len(btf)),
+ }
+
+ fd, err := bpfLoadBTF(attr)
+ if err != nil {
+ logBuf := make([]byte, 64*1024)
+ attr.logBuf = internal.NewSlicePointer(logBuf)
+ attr.btfLogSize = uint32(len(logBuf))
+ attr.btfLogLevel = 1
+ _, logErr := bpfLoadBTF(attr)
+ return nil, internal.ErrorWithLog(err, logBuf, logErr)
+ }
+
+ return &Handle{fd}, nil
+}
+
+// Close destroys the handle.
+//
+// Subsequent calls to FD will return an invalid value.
+func (h *Handle) Close() error {
+ return h.fd.Close()
+}
+
+// FD returns the file descriptor for the handle.
+func (h *Handle) FD() int {
+ value, err := h.fd.Value()
+ if err != nil {
+ return -1
+ }
+
+ return int(value)
+}
+
+// Map is the BTF for a map.
+type Map struct {
+ spec *Spec
+ key, value Type
+}
+
+// NewMap returns a new Map containing the given values.
+// The key and value arguments are initialized to Void if nil values are given.
+func NewMap(spec *Spec, key Type, value Type) Map {
+ if key == nil {
+ key = &Void{}
+ }
+ if value == nil {
+ value = &Void{}
+ }
+
+ return Map{
+ spec: spec,
+ key: key,
+ value: value,
+ }
+}
+
+// MapSpec should be a method on Map, but is a free function
+// to hide it from users of the ebpf package.
+func MapSpec(m *Map) *Spec {
+ return m.spec
+}
+
+// MapKey should be a method on Map, but is a free function
+// to hide it from users of the ebpf package.
+func MapKey(m *Map) Type {
+ return m.key
+}
+
+// MapValue should be a method on Map, but is a free function
+// to hide it from users of the ebpf package.
+func MapValue(m *Map) Type {
+ return m.value
+}
+
+// Program is the BTF information for a stream of instructions.
+type Program struct {
+ spec *Spec
+ length uint64
+ funcInfos, lineInfos extInfo
+ coreRelos bpfCoreRelos
+}
+
+// ProgramSpec returns the Spec needed for loading function and line infos into the kernel.
+//
+// This is a free function instead of a method to hide it from users
+// of package ebpf.
+func ProgramSpec(s *Program) *Spec {
+ return s.spec
+}
+
+// ProgramAppend the information from other to the Program.
+//
+// This is a free function instead of a method to hide it from users
+// of package ebpf.
+func ProgramAppend(s, other *Program) error {
+ funcInfos, err := s.funcInfos.append(other.funcInfos, s.length)
+ if err != nil {
+ return fmt.Errorf("func infos: %w", err)
+ }
+
+ lineInfos, err := s.lineInfos.append(other.lineInfos, s.length)
+ if err != nil {
+ return fmt.Errorf("line infos: %w", err)
+ }
+
+ s.funcInfos = funcInfos
+ s.lineInfos = lineInfos
+ s.coreRelos = s.coreRelos.append(other.coreRelos, s.length)
+ s.length += other.length
+ return nil
+}
+
+// ProgramFuncInfos returns the binary form of BTF function infos.
+//
+// This is a free function instead of a method to hide it from users
+// of package ebpf.
+func ProgramFuncInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
+ bytes, err = s.funcInfos.MarshalBinary()
+ if err != nil {
+ return 0, nil, err
+ }
+
+ return s.funcInfos.recordSize, bytes, nil
+}
+
+// ProgramLineInfos returns the binary form of BTF line infos.
+//
+// This is a free function instead of a method to hide it from users
+// of package ebpf.
+func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
+ bytes, err = s.lineInfos.MarshalBinary()
+ if err != nil {
+ return 0, nil, err
+ }
+
+ return s.lineInfos.recordSize, bytes, nil
+}
+
+// ProgramRelocations returns the CO-RE relocations required to adjust the
+// program to the target.
+//
+// This is a free function instead of a method to hide it from users
+// of package ebpf.
+func ProgramRelocations(s *Program, target *Spec) (map[uint64]Relocation, error) {
+ if len(s.coreRelos) == 0 {
+ return nil, nil
+ }
+
+ return coreRelocate(s.spec, target, s.coreRelos)
+}
+
+type bpfLoadBTFAttr struct {
+ btf internal.Pointer
+ logBuf internal.Pointer
+ btfSize uint32
+ btfLogSize uint32
+ btfLogLevel uint32
+}
+
+func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) {
+ fd, err := internal.BPF(internal.BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+
+ return internal.NewFD(uint32(fd)), nil
+}
+
+func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
+ const minHeaderLength = 24
+
+ typesLen := uint32(binary.Size(types))
+ header := btfHeader{
+ Magic: btfMagic,
+ Version: 1,
+ HdrLen: minHeaderLength,
+ TypeOff: 0,
+ TypeLen: typesLen,
+ StringOff: typesLen,
+ StringLen: uint32(len(strings)),
+ }
+
+ buf := new(bytes.Buffer)
+ _ = binary.Write(buf, bo, &header)
+ _ = binary.Write(buf, bo, types)
+ buf.Write(strings)
+
+ return buf.Bytes()
+}
+
+var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
+ var (
+ types struct {
+ Integer btfType
+ Var btfType
+ btfVar struct{ Linkage uint32 }
+ }
+ strings = []byte{0, 'a', 0}
+ )
+
+ // We use a BTF_KIND_VAR here, to make sure that
+ // the kernel understands BTF at least as well as we
+ // do. BTF_KIND_VAR was introduced ~5.1.
+ types.Integer.SetKind(kindPointer)
+ types.Var.NameOff = 1
+ types.Var.SetKind(kindVar)
+ types.Var.SizeType = 1
+
+ btf := marshalBTF(&types, strings, internal.NativeEndian)
+
+ fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
+ btf: internal.NewSlicePointer(btf),
+ btfSize: uint32(len(btf)),
+ })
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
+ // Treat both EINVAL and EPERM as not supported: loading the program
+ // might still succeed without BTF.
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+
+ fd.Close()
+ return nil
+})
+
+var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
+ if err := haveBTF(); err != nil {
+ return err
+ }
+
+ var (
+ types struct {
+ FuncProto btfType
+ Func btfType
+ }
+ strings = []byte{0, 'a', 0}
+ )
+
+ types.FuncProto.SetKind(kindFuncProto)
+ types.Func.SetKind(kindFunc)
+ types.Func.SizeType = 1 // aka FuncProto
+ types.Func.NameOff = 1
+ types.Func.SetLinkage(linkageGlobal)
+
+ btf := marshalBTF(&types, strings, internal.NativeEndian)
+
+ fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
+ btf: internal.NewSlicePointer(btf),
+ btfSize: uint32(len(btf)),
+ })
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+
+ fd.Close()
+ return nil
+})
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go b/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go
new file mode 100644
index 000000000..a4cde3fe8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go
@@ -0,0 +1,269 @@
+package btf
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+// btfKind describes a Type.
+type btfKind uint8
+
+// Equivalents of the BTF_KIND_* constants.
+const (
+ kindUnknown btfKind = iota
+ kindInt
+ kindPointer
+ kindArray
+ kindStruct
+ kindUnion
+ kindEnum
+ kindForward
+ kindTypedef
+ kindVolatile
+ kindConst
+ kindRestrict
+ // Added ~4.20
+ kindFunc
+ kindFuncProto
+ // Added ~5.1
+ kindVar
+ kindDatasec
+)
+
+type btfFuncLinkage uint8
+
+const (
+ linkageStatic btfFuncLinkage = iota
+ linkageGlobal
+ linkageExtern
+)
+
+const (
+ btfTypeKindShift = 24
+ btfTypeKindLen = 4
+ btfTypeVlenShift = 0
+ btfTypeVlenMask = 16
+ btfTypeKindFlagShift = 31
+ btfTypeKindFlagMask = 1
+)
+
+// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
+type btfType struct {
+ NameOff uint32
+ /* "info" bits arrangement
+ * bits 0-15: vlen (e.g. # of struct's members), linkage
+ * bits 16-23: unused
+ * bits 24-27: kind (e.g. int, ptr, array...etc)
+ * bits 28-30: unused
+ * bit 31: kind_flag, currently used by
+ * struct, union and fwd
+ */
+ Info uint32
+ /* "size" is used by INT, ENUM, STRUCT and UNION.
+ * "size" tells the size of the type it is describing.
+ *
+ * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
+ * FUNC and FUNC_PROTO.
+ * "type" is a type_id referring to another type.
+ */
+ SizeType uint32
+}
+
+func (k btfKind) String() string {
+ switch k {
+ case kindUnknown:
+ return "Unknown"
+ case kindInt:
+ return "Integer"
+ case kindPointer:
+ return "Pointer"
+ case kindArray:
+ return "Array"
+ case kindStruct:
+ return "Struct"
+ case kindUnion:
+ return "Union"
+ case kindEnum:
+ return "Enumeration"
+ case kindForward:
+ return "Forward"
+ case kindTypedef:
+ return "Typedef"
+ case kindVolatile:
+ return "Volatile"
+ case kindConst:
+ return "Const"
+ case kindRestrict:
+ return "Restrict"
+ case kindFunc:
+ return "Function"
+ case kindFuncProto:
+ return "Function Proto"
+ case kindVar:
+ return "Variable"
+ case kindDatasec:
+ return "Section"
+ default:
+ return fmt.Sprintf("Unknown (%d)", k)
+ }
+}
+
+func mask(len uint32) uint32 {
+ return (1 << len) - 1
+}
+
+func (bt *btfType) info(len, shift uint32) uint32 {
+ return (bt.Info >> shift) & mask(len)
+}
+
+func (bt *btfType) setInfo(value, len, shift uint32) {
+ bt.Info &^= mask(len) << shift
+ bt.Info |= (value & mask(len)) << shift
+}
+
+func (bt *btfType) Kind() btfKind {
+ return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift))
+}
+
+func (bt *btfType) SetKind(kind btfKind) {
+ bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift)
+}
+
+func (bt *btfType) Vlen() int {
+ return int(bt.info(btfTypeVlenMask, btfTypeVlenShift))
+}
+
+func (bt *btfType) SetVlen(vlen int) {
+ bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
+}
+
+func (bt *btfType) KindFlag() bool {
+ return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
+}
+
+func (bt *btfType) Linkage() btfFuncLinkage {
+ return btfFuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
+}
+
+func (bt *btfType) SetLinkage(linkage btfFuncLinkage) {
+ bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
+}
+
+func (bt *btfType) Type() TypeID {
+ // TODO: Panic here if wrong kind?
+ return TypeID(bt.SizeType)
+}
+
+func (bt *btfType) Size() uint32 {
+ // TODO: Panic here if wrong kind?
+ return bt.SizeType
+}
+
+type rawType struct {
+ btfType
+ data interface{}
+}
+
+func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
+ if err := binary.Write(w, bo, &rt.btfType); err != nil {
+ return err
+ }
+
+ if rt.data == nil {
+ return nil
+ }
+
+ return binary.Write(w, bo, rt.data)
+}
+
+type btfArray struct {
+ Type TypeID
+ IndexType TypeID
+ Nelems uint32
+}
+
+type btfMember struct {
+ NameOff uint32
+ Type TypeID
+ Offset uint32
+}
+
+type btfVarSecinfo struct {
+ Type TypeID
+ Offset uint32
+ Size uint32
+}
+
+type btfVariable struct {
+ Linkage uint32
+}
+
+type btfEnum struct {
+ NameOff uint32
+ Val int32
+}
+
+type btfParam struct {
+ NameOff uint32
+ Type TypeID
+}
+
+func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
+ var (
+ header btfType
+ types []rawType
+ )
+
+ for id := TypeID(1); ; id++ {
+ if err := binary.Read(r, bo, &header); err == io.EOF {
+ return types, nil
+ } else if err != nil {
+ return nil, fmt.Errorf("can't read type info for id %v: %v", id, err)
+ }
+
+ var data interface{}
+ switch header.Kind() {
+ case kindInt:
+ data = new(uint32)
+ case kindPointer:
+ case kindArray:
+ data = new(btfArray)
+ case kindStruct:
+ fallthrough
+ case kindUnion:
+ data = make([]btfMember, header.Vlen())
+ case kindEnum:
+ data = make([]btfEnum, header.Vlen())
+ case kindForward:
+ case kindTypedef:
+ case kindVolatile:
+ case kindConst:
+ case kindRestrict:
+ case kindFunc:
+ case kindFuncProto:
+ data = make([]btfParam, header.Vlen())
+ case kindVar:
+ data = new(btfVariable)
+ case kindDatasec:
+ data = make([]btfVarSecinfo, header.Vlen())
+ default:
+ return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
+ }
+
+ if data == nil {
+ types = append(types, rawType{header, nil})
+ continue
+ }
+
+ if err := binary.Read(r, bo, data); err != nil {
+ return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err)
+ }
+
+ types = append(types, rawType{header, data})
+ }
+}
+
+func intEncoding(raw uint32) (IntEncoding, uint32, byte) {
+ return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/core.go b/vendor/github.com/cilium/ebpf/internal/btf/core.go
new file mode 100644
index 000000000..52b59ed18
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/core.go
@@ -0,0 +1,388 @@
+package btf
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// Code in this file is derived from libbpf, which is available under a BSD
+// 2-Clause license.
+
+// Relocation describes a CO-RE relocation.
+type Relocation struct {
+ Current uint32
+ New uint32
+}
+
+func (r Relocation) equal(other Relocation) bool {
+ return r.Current == other.Current && r.New == other.New
+}
+
+// coreReloKind is the type of CO-RE relocation
+type coreReloKind uint32
+
+const (
+ reloFieldByteOffset coreReloKind = iota /* field byte offset */
+ reloFieldByteSize /* field size in bytes */
+ reloFieldExists /* field existence in target kernel */
+ reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
+ reloFieldLShiftU64 /* bitfield-specific left bitshift */
+ reloFieldRShiftU64 /* bitfield-specific right bitshift */
+ reloTypeIDLocal /* type ID in local BPF object */
+ reloTypeIDTarget /* type ID in target kernel */
+ reloTypeExists /* type existence in target kernel */
+ reloTypeSize /* type size in bytes */
+ reloEnumvalExists /* enum value existence in target kernel */
+ reloEnumvalValue /* enum value integer value */
+)
+
+func (k coreReloKind) String() string {
+ switch k {
+ case reloFieldByteOffset:
+ return "byte_off"
+ case reloFieldByteSize:
+ return "byte_sz"
+ case reloFieldExists:
+ return "field_exists"
+ case reloFieldSigned:
+ return "signed"
+ case reloFieldLShiftU64:
+ return "lshift_u64"
+ case reloFieldRShiftU64:
+ return "rshift_u64"
+ case reloTypeIDLocal:
+ return "local_type_id"
+ case reloTypeIDTarget:
+ return "target_type_id"
+ case reloTypeExists:
+ return "type_exists"
+ case reloTypeSize:
+ return "type_size"
+ case reloEnumvalExists:
+ return "enumval_exists"
+ case reloEnumvalValue:
+ return "enumval_value"
+ default:
+ return "unknown"
+ }
+}
+
+func coreRelocate(local, target *Spec, coreRelos bpfCoreRelos) (map[uint64]Relocation, error) {
+ if target == nil {
+ var err error
+ target, err = loadKernelSpec()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if local.byteOrder != target.byteOrder {
+ return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
+ }
+
+ relocations := make(map[uint64]Relocation, len(coreRelos))
+ for _, relo := range coreRelos {
+ accessorStr, err := local.strings.Lookup(relo.AccessStrOff)
+ if err != nil {
+ return nil, err
+ }
+
+ accessor, err := parseCoreAccessor(accessorStr)
+ if err != nil {
+ return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
+ }
+
+ if int(relo.TypeID) >= len(local.types) {
+ return nil, fmt.Errorf("invalid type id %d", relo.TypeID)
+ }
+
+ typ := local.types[relo.TypeID]
+
+ if relo.ReloKind == reloTypeIDLocal {
+ relocations[uint64(relo.InsnOff)] = Relocation{
+ uint32(typ.ID()),
+ uint32(typ.ID()),
+ }
+ continue
+ }
+
+ named, ok := typ.(namedType)
+ if !ok || named.name() == "" {
+ return nil, fmt.Errorf("relocate anonymous type %s: %w", typ.String(), ErrNotSupported)
+ }
+
+ name := essentialName(named.name())
+ res, err := coreCalculateRelocation(typ, target.namedTypes[name], relo.ReloKind, accessor)
+ if err != nil {
+ return nil, fmt.Errorf("relocate %s: %w", name, err)
+ }
+
+ relocations[uint64(relo.InsnOff)] = res
+ }
+
+ return relocations, nil
+}
+
+var errAmbiguousRelocation = errors.New("ambiguous relocation")
+
+func coreCalculateRelocation(local Type, targets []namedType, kind coreReloKind, localAccessor coreAccessor) (Relocation, error) {
+ var relos []Relocation
+ var matches []Type
+ for _, target := range targets {
+ switch kind {
+ case reloTypeIDTarget:
+ if localAccessor[0] != 0 {
+ return Relocation{}, fmt.Errorf("%s: unexpected non-zero accessor", kind)
+ }
+
+ if compat, err := coreAreTypesCompatible(local, target); err != nil {
+ return Relocation{}, fmt.Errorf("%s: %s", kind, err)
+ } else if !compat {
+ continue
+ }
+
+ relos = append(relos, Relocation{uint32(target.ID()), uint32(target.ID())})
+
+ default:
+ return Relocation{}, fmt.Errorf("relocation %s: %w", kind, ErrNotSupported)
+ }
+ matches = append(matches, target)
+ }
+
+ if len(relos) == 0 {
+ // TODO: Add switch for existence checks like reloEnumvalExists here.
+
+ // TODO: This might have to be poisoned.
+ return Relocation{}, fmt.Errorf("no relocation found, tried %v", targets)
+ }
+
+ relo := relos[0]
+ for _, altRelo := range relos[1:] {
+ if !altRelo.equal(relo) {
+ return Relocation{}, fmt.Errorf("multiple types %v match: %w", matches, errAmbiguousRelocation)
+ }
+ }
+
+ return relo, nil
+}
+
+/* coreAccessor contains a path through a struct. It contains at least one index.
+ *
+ * The interpretation depends on the kind of the relocation. The following is
+ * taken from struct bpf_core_relo in libbpf_internal.h:
+ *
+ * - for field-based relocations, string encodes an accessed field using
+ * a sequence of field and array indices, separated by colon (:). It's
+ * conceptually very close to LLVM's getelementptr ([0]) instruction's
+ * arguments for identifying offset to a field.
+ * - for type-based relocations, strings is expected to be just "0";
+ * - for enum value-based relocations, string contains an index of enum
+ * value within its enum type;
+ *
+ * Example to provide a better feel.
+ *
+ * struct sample {
+ * int a;
+ * struct {
+ * int b[10];
+ * };
+ * };
+ *
+ * struct sample s = ...;
+ * int x = &s->a; // encoded as "0:0" (a is field #0)
+ * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
+ * // b is field #0 inside anon struct, accessing elem #5)
+ * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
+ */
+type coreAccessor []int
+
+func parseCoreAccessor(accessor string) (coreAccessor, error) {
+ if accessor == "" {
+ return nil, fmt.Errorf("empty accessor")
+ }
+
+ var result coreAccessor
+ parts := strings.Split(accessor, ":")
+ for _, part := range parts {
+ // 31 bits to avoid overflowing int on 32 bit platforms.
+ index, err := strconv.ParseUint(part, 10, 31)
+ if err != nil {
+ return nil, fmt.Errorf("accessor index %q: %s", part, err)
+ }
+
+ result = append(result, int(index))
+ }
+
+ return result, nil
+}
+
+/* The comment below is from bpf_core_types_are_compat in libbpf.c:
+ *
+ * Check local and target types for compatibility. This check is used for
+ * type-based CO-RE relocations and follow slightly different rules than
+ * field-based relocations. This function assumes that root types were already
+ * checked for name match. Beyond that initial root-level name check, names
+ * are completely ignored. Compatibility rules are as follows:
+ * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
+ * kind should match for local and target types (i.e., STRUCT is not
+ * compatible with UNION);
+ * - for ENUMs, the size is ignored;
+ * - for INT, size and signedness are ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - CONST/VOLATILE/RESTRICT modifiers are ignored;
+ * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
+ * - FUNC_PROTOs are compatible if they have compatible signature: same
+ * number of input args and compatible return and argument types.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ */
+func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
+ var (
+ localTs, targetTs typeDeque
+ l, t = &localType, &targetType
+ depth = 0
+ )
+
+ for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
+ if depth >= maxTypeDepth {
+ return false, errors.New("types are nested too deep")
+ }
+
+ localType = skipQualifierAndTypedef(*l)
+ targetType = skipQualifierAndTypedef(*t)
+
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
+ return false, nil
+ }
+
+ switch lv := (localType).(type) {
+ case *Void, *Struct, *Union, *Enum, *Fwd:
+ // Nothing to do here
+
+ case *Int:
+ tv := targetType.(*Int)
+ if lv.isBitfield() || tv.isBitfield() {
+ return false, nil
+ }
+
+ case *Pointer, *Array:
+ depth++
+ localType.walk(&localTs)
+ targetType.walk(&targetTs)
+
+ case *FuncProto:
+ tv := targetType.(*FuncProto)
+ if len(lv.Params) != len(tv.Params) {
+ return false, nil
+ }
+
+ depth++
+ localType.walk(&localTs)
+ targetType.walk(&targetTs)
+
+ default:
+ return false, fmt.Errorf("unsupported type %T", localType)
+ }
+ }
+
+ if l != nil {
+ return false, fmt.Errorf("dangling local type %T", *l)
+ }
+
+ if t != nil {
+ return false, fmt.Errorf("dangling target type %T", *t)
+ }
+
+ return true, nil
+}
+
+/* The comment below is from bpf_core_fields_are_compat in libbpf.c:
+ *
+ * Check two types for compatibility for the purpose of field access
+ * relocation. const/volatile/restrict and typedefs are skipped to ensure we
+ * are relocating semantically compatible entities:
+ * - any two STRUCTs/UNIONs are compatible and can be mixed;
+ * - any two FWDs are compatible, if their names match (modulo flavor suffix);
+ * - any two PTRs are always compatible;
+ * - for ENUMs, names should be the same (ignoring flavor suffix) or at
+ * least one of enums should be anonymous;
+ * - for ENUMs, check sizes, names are ignored;
+ * - for INT, size and signedness are ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - everything else shouldn't be ever a target of relocation.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ */
+func coreAreMembersCompatible(localType Type, targetType Type) (bool, error) {
+ doNamesMatch := func(a, b string) bool {
+ if a == "" || b == "" {
+ // allow anonymous and named type to match
+ return true
+ }
+
+ return essentialName(a) == essentialName(b)
+ }
+
+ for depth := 0; depth <= maxTypeDepth; depth++ {
+ localType = skipQualifierAndTypedef(localType)
+ targetType = skipQualifierAndTypedef(targetType)
+
+ _, lok := localType.(composite)
+ _, tok := targetType.(composite)
+ if lok && tok {
+ return true, nil
+ }
+
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
+ return false, nil
+ }
+
+ switch lv := localType.(type) {
+ case *Pointer:
+ return true, nil
+
+ case *Enum:
+ tv := targetType.(*Enum)
+ return doNamesMatch(lv.name(), tv.name()), nil
+
+ case *Fwd:
+ tv := targetType.(*Fwd)
+ return doNamesMatch(lv.name(), tv.name()), nil
+
+ case *Int:
+ tv := targetType.(*Int)
+ return !lv.isBitfield() && !tv.isBitfield(), nil
+
+ case *Array:
+ tv := targetType.(*Array)
+
+ localType = lv.Type
+ targetType = tv.Type
+
+ default:
+ return false, fmt.Errorf("unsupported type %T", localType)
+ }
+ }
+
+ return false, errors.New("types are nested too deep")
+}
+
+func skipQualifierAndTypedef(typ Type) Type {
+ result := typ
+ for depth := 0; depth <= maxTypeDepth; depth++ {
+ switch v := (result).(type) {
+ case qualifier:
+ result = v.qualify()
+ case *Typedef:
+ result = v.Type
+ default:
+ return result
+ }
+ }
+ return typ
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/doc.go b/vendor/github.com/cilium/ebpf/internal/btf/doc.go
new file mode 100644
index 000000000..ad2576cb2
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/doc.go
@@ -0,0 +1,8 @@
+// Package btf handles data encoded according to the BPF Type Format.
+//
+// The canonical documentation lives in the Linux kernel repository and is
+// available at https://www.kernel.org/doc/html/latest/bpf/btf.html
+//
+// The API is very much unstable. You should only use this via the main
+// ebpf library.
+package btf
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go b/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
new file mode 100644
index 000000000..6a21b6bda
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
@@ -0,0 +1,281 @@
+package btf
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+)
+
+type btfExtHeader struct {
+ Magic uint16
+ Version uint8
+ Flags uint8
+ HdrLen uint32
+
+ FuncInfoOff uint32
+ FuncInfoLen uint32
+ LineInfoOff uint32
+ LineInfoLen uint32
+}
+
+type btfExtCoreHeader struct {
+ CoreReloOff uint32
+ CoreReloLen uint32
+}
+
+func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, coreRelos map[string]bpfCoreRelos, err error) {
+ var header btfExtHeader
+ var coreHeader btfExtCoreHeader
+ if err := binary.Read(r, bo, &header); err != nil {
+ return nil, nil, nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ if header.Magic != btfMagic {
+ return nil, nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
+ }
+
+ if header.Version != 1 {
+ return nil, nil, nil, fmt.Errorf("unexpected version %v", header.Version)
+ }
+
+ if header.Flags != 0 {
+ return nil, nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
+ }
+
+ remainder := int64(header.HdrLen) - int64(binary.Size(&header))
+ if remainder < 0 {
+ return nil, nil, nil, errors.New("header is too short")
+ }
+
+ coreHdrSize := int64(binary.Size(&coreHeader))
+ if remainder >= coreHdrSize {
+ if err := binary.Read(r, bo, &coreHeader); err != nil {
+ return nil, nil, nil, fmt.Errorf("can't read CO-RE relocation header: %v", err)
+ }
+ remainder -= coreHdrSize
+ }
+
+ // Of course, the .BTF.ext header has different semantics than the
+ // .BTF ext header. We need to ignore non-null values.
+ _, err = io.CopyN(ioutil.Discard, r, remainder)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("header padding: %v", err)
+ }
+
+ if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil {
+ return nil, nil, nil, fmt.Errorf("can't seek to function info section: %v", err)
+ }
+
+ buf := bufio.NewReader(io.LimitReader(r, int64(header.FuncInfoLen)))
+ funcInfo, err = parseExtInfo(buf, bo, strings)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("function info: %w", err)
+ }
+
+ if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil {
+ return nil, nil, nil, fmt.Errorf("can't seek to line info section: %v", err)
+ }
+
+ buf = bufio.NewReader(io.LimitReader(r, int64(header.LineInfoLen)))
+ lineInfo, err = parseExtInfo(buf, bo, strings)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("line info: %w", err)
+ }
+
+ if coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 {
+ if _, err := r.Seek(int64(header.HdrLen+coreHeader.CoreReloOff), io.SeekStart); err != nil {
+ return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err)
+ }
+
+ coreRelos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err)
+ }
+ }
+
+ return funcInfo, lineInfo, coreRelos, nil
+}
+
+type btfExtInfoSec struct {
+ SecNameOff uint32
+ NumInfo uint32
+}
+
+type extInfoRecord struct {
+ InsnOff uint64
+ Opaque []byte
+}
+
+type extInfo struct {
+ recordSize uint32
+ records []extInfoRecord
+}
+
+func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) {
+ if other.recordSize != ei.recordSize {
+ return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize)
+ }
+
+ records := make([]extInfoRecord, 0, len(ei.records)+len(other.records))
+ records = append(records, ei.records...)
+ for _, info := range other.records {
+ records = append(records, extInfoRecord{
+ InsnOff: info.InsnOff + offset,
+ Opaque: info.Opaque,
+ })
+ }
+ return extInfo{ei.recordSize, records}, nil
+}
+
+func (ei extInfo) MarshalBinary() ([]byte, error) {
+ if len(ei.records) == 0 {
+ return nil, nil
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, int(ei.recordSize)*len(ei.records)))
+ for _, info := range ei.records {
+ // The kernel expects offsets in number of raw bpf instructions,
+ // while the ELF tracks it in bytes.
+ insnOff := uint32(info.InsnOff / asm.InstructionSize)
+ if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil {
+ return nil, fmt.Errorf("can't write instruction offset: %v", err)
+ }
+
+ buf.Write(info.Opaque)
+ }
+
+ return buf.Bytes(), nil
+}
+
+func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) {
+ const maxRecordSize = 256
+
+ var recordSize uint32
+ if err := binary.Read(r, bo, &recordSize); err != nil {
+ return nil, fmt.Errorf("can't read record size: %v", err)
+ }
+
+ if recordSize < 4 {
+ // Need at least insnOff
+ return nil, errors.New("record size too short")
+ }
+ if recordSize > maxRecordSize {
+ return nil, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
+ }
+
+ result := make(map[string]extInfo)
+ for {
+ secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+
+ var records []extInfoRecord
+ for i := uint32(0); i < infoHeader.NumInfo; i++ {
+ var byteOff uint32
+ if err := binary.Read(r, bo, &byteOff); err != nil {
+ return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err)
+ }
+
+ buf := make([]byte, int(recordSize-4))
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return nil, fmt.Errorf("section %v: can't read record: %v", secName, err)
+ }
+
+ if byteOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff)
+ }
+
+ records = append(records, extInfoRecord{uint64(byteOff), buf})
+ }
+
+ result[secName] = extInfo{
+ recordSize,
+ records,
+ }
+ }
+}
+
+// bpfCoreRelo matches `struct bpf_core_relo` from the kernel
+type bpfCoreRelo struct {
+ InsnOff uint32
+ TypeID TypeID
+ AccessStrOff uint32
+ ReloKind coreReloKind
+}
+
+type bpfCoreRelos []bpfCoreRelo
+
+// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted
+// by offset.
+func (r bpfCoreRelos) append(other bpfCoreRelos, offset uint64) bpfCoreRelos {
+ result := make([]bpfCoreRelo, 0, len(r)+len(other))
+ result = append(result, r...)
+ for _, relo := range other {
+ relo.InsnOff += uint32(offset)
+ result = append(result, relo)
+ }
+ return result
+}
+
+var extInfoReloSize = binary.Size(bpfCoreRelo{})
+
+func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]bpfCoreRelos, error) {
+ var recordSize uint32
+ if err := binary.Read(r, bo, &recordSize); err != nil {
+ return nil, fmt.Errorf("read record size: %v", err)
+ }
+
+ if recordSize != uint32(extInfoReloSize) {
+ return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
+ }
+
+ result := make(map[string]bpfCoreRelos)
+ for {
+ secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+
+ var relos []bpfCoreRelo
+ for i := uint32(0); i < infoHeader.NumInfo; i++ {
+ var relo bpfCoreRelo
+ if err := binary.Read(r, bo, &relo); err != nil {
+ return nil, fmt.Errorf("section %v: read record: %v", secName, err)
+ }
+
+ if relo.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff)
+ }
+
+ relos = append(relos, relo)
+ }
+
+ result[secName] = relos
+ }
+}
+
+func parseExtInfoHeader(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) {
+ var infoHeader btfExtInfoSec
+ if err := binary.Read(r, bo, &infoHeader); err != nil {
+ return "", nil, fmt.Errorf("read ext info header: %w", err)
+ }
+
+ secName, err := strings.Lookup(infoHeader.SecNameOff)
+ if err != nil {
+ return "", nil, fmt.Errorf("get section name: %w", err)
+ }
+
+ if infoHeader.NumInfo == 0 {
+ return "", nil, fmt.Errorf("section %s has zero records", secName)
+ }
+
+ return secName, &infoHeader, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go b/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go
new file mode 100644
index 000000000..37e043fd3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go
@@ -0,0 +1,49 @@
+// +build gofuzz
+
+// Use with https://github.com/dvyukov/go-fuzz
+
+package btf
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ "github.com/cilium/ebpf/internal"
+)
+
+func FuzzSpec(data []byte) int {
+ if len(data) < binary.Size(btfHeader{}) {
+ return -1
+ }
+
+ spec, err := loadNakedSpec(bytes.NewReader(data), internal.NativeEndian, nil, nil)
+ if err != nil {
+ if spec != nil {
+ panic("spec is not nil")
+ }
+ return 0
+ }
+ if spec == nil {
+ panic("spec is nil")
+ }
+ return 1
+}
+
+func FuzzExtInfo(data []byte) int {
+ if len(data) < binary.Size(btfExtHeader{}) {
+ return -1
+ }
+
+ table := stringTable("\x00foo\x00barfoo\x00")
+ info, err := parseExtInfo(bytes.NewReader(data), internal.NativeEndian, table)
+ if err != nil {
+ if info != nil {
+ panic("info is not nil")
+ }
+ return 0
+ }
+ if info == nil {
+ panic("info is nil")
+ }
+ return 1
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/strings.go b/vendor/github.com/cilium/ebpf/internal/btf/strings.go
new file mode 100644
index 000000000..8782643a0
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/strings.go
@@ -0,0 +1,60 @@
+package btf
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+)
+
+type stringTable []byte
+
+func readStringTable(r io.Reader) (stringTable, error) {
+ contents, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, fmt.Errorf("can't read string table: %v", err)
+ }
+
+ if len(contents) < 1 {
+ return nil, errors.New("string table is empty")
+ }
+
+ if contents[0] != '\x00' {
+ return nil, errors.New("first item in string table is non-empty")
+ }
+
+ if contents[len(contents)-1] != '\x00' {
+ return nil, errors.New("string table isn't null terminated")
+ }
+
+ return stringTable(contents), nil
+}
+
+func (st stringTable) Lookup(offset uint32) (string, error) {
+ if int64(offset) > int64(^uint(0)>>1) {
+ return "", fmt.Errorf("offset %d overflows int", offset)
+ }
+
+ pos := int(offset)
+ if pos >= len(st) {
+ return "", fmt.Errorf("offset %d is out of bounds", offset)
+ }
+
+ if pos > 0 && st[pos-1] != '\x00' {
+ return "", fmt.Errorf("offset %d isn't start of a string", offset)
+ }
+
+ str := st[pos:]
+ end := bytes.IndexByte(str, '\x00')
+ if end == -1 {
+ return "", fmt.Errorf("offset %d isn't null terminated", offset)
+ }
+
+ return string(str[:end]), nil
+}
+
+func (st stringTable) LookupName(offset uint32) (Name, error) {
+ str, err := st.Lookup(offset)
+ return Name(str), err
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/types.go b/vendor/github.com/cilium/ebpf/internal/btf/types.go
new file mode 100644
index 000000000..9e1fd8d0b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/types.go
@@ -0,0 +1,871 @@
+package btf
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "strings"
+)
+
+const maxTypeDepth = 32
+
+// TypeID identifies a type in a BTF section.
+type TypeID uint32
+
+// ID implements part of the Type interface.
+func (tid TypeID) ID() TypeID {
+ return tid
+}
+
+// Type represents a type described by BTF.
+type Type interface {
+ ID() TypeID
+
+ String() string
+
+ // Make a copy of the type, without copying Type members.
+ copy() Type
+
+ // Enumerate all nested Types. Repeated calls must visit nested
+ // types in the same order.
+ walk(*typeDeque)
+}
+
+// namedType is a type with a name.
+//
+// Most named types simply embed Name.
+type namedType interface {
+ Type
+ name() string
+}
+
+// Name identifies a type.
+//
+// Anonymous types have an empty name.
+type Name string
+
+func (n Name) name() string {
+ return string(n)
+}
+
+// Void is the unit type of BTF.
+type Void struct{}
+
+func (v *Void) ID() TypeID { return 0 }
+func (v *Void) String() string { return "void#0" }
+func (v *Void) size() uint32 { return 0 }
+func (v *Void) copy() Type { return (*Void)(nil) }
+func (v *Void) walk(*typeDeque) {}
+
+type IntEncoding byte
+
+const (
+ Signed IntEncoding = 1 << iota
+ Char
+ Bool
+)
+
+// Int is an integer of a given length.
+type Int struct {
+ TypeID
+ Name
+
+ // The size of the integer in bytes.
+ Size uint32
+ Encoding IntEncoding
+ // Offset is the starting bit offset. Currently always 0.
+ // See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
+ Offset uint32
+ Bits byte
+}
+
+var _ namedType = (*Int)(nil)
+
+func (i *Int) String() string {
+ var s strings.Builder
+
+ switch {
+ case i.Encoding&Char != 0:
+ s.WriteString("char")
+ case i.Encoding&Bool != 0:
+ s.WriteString("bool")
+ default:
+ if i.Encoding&Signed == 0 {
+ s.WriteRune('u')
+ }
+ s.WriteString("int")
+ fmt.Fprintf(&s, "%d", i.Size*8)
+ }
+
+ fmt.Fprintf(&s, "#%d", i.TypeID)
+
+ if i.Bits > 0 {
+ fmt.Fprintf(&s, "[bits=%d]", i.Bits)
+ }
+
+ return s.String()
+}
+
+func (i *Int) size() uint32 { return i.Size }
+func (i *Int) walk(*typeDeque) {}
+func (i *Int) copy() Type {
+ cpy := *i
+ return &cpy
+}
+
+func (i *Int) isBitfield() bool {
+ return i.Offset > 0
+}
+
+// Pointer is a pointer to another type.
+type Pointer struct {
+ TypeID
+ Target Type
+}
+
+func (p *Pointer) String() string {
+ return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID())
+}
+
+func (p *Pointer) size() uint32 { return 8 }
+func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) }
+func (p *Pointer) copy() Type {
+ cpy := *p
+ return &cpy
+}
+
+// Array is an array with a fixed number of elements.
+type Array struct {
+ TypeID
+ Type Type
+ Nelems uint32
+}
+
+func (arr *Array) String() string {
+ return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems)
+}
+
+func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) }
+func (arr *Array) copy() Type {
+ cpy := *arr
+ return &cpy
+}
+
+// Struct is a compound type of consecutive members.
+type Struct struct {
+ TypeID
+ Name
+ // The size of the struct including padding, in bytes
+ Size uint32
+ Members []Member
+}
+
+func (s *Struct) String() string {
+ return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name)
+}
+
+func (s *Struct) size() uint32 { return s.Size }
+
+func (s *Struct) walk(tdq *typeDeque) {
+ for i := range s.Members {
+ tdq.push(&s.Members[i].Type)
+ }
+}
+
+func (s *Struct) copy() Type {
+ cpy := *s
+ cpy.Members = make([]Member, len(s.Members))
+ copy(cpy.Members, s.Members)
+ return &cpy
+}
+
+func (s *Struct) members() []Member {
+ return s.Members
+}
+
+// Union is a compound type where members occupy the same memory.
+type Union struct {
+ TypeID
+ Name
+ // The size of the union including padding, in bytes.
+ Size uint32
+ Members []Member
+}
+
+func (u *Union) String() string {
+ return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name)
+}
+
+func (u *Union) size() uint32 { return u.Size }
+
+func (u *Union) walk(tdq *typeDeque) {
+ for i := range u.Members {
+ tdq.push(&u.Members[i].Type)
+ }
+}
+
+func (u *Union) copy() Type {
+ cpy := *u
+ cpy.Members = make([]Member, len(u.Members))
+ copy(cpy.Members, u.Members)
+ return &cpy
+}
+
+func (u *Union) members() []Member {
+ return u.Members
+}
+
+type composite interface {
+ members() []Member
+}
+
+var (
+ _ composite = (*Struct)(nil)
+ _ composite = (*Union)(nil)
+)
+
+// Member is part of a Struct or Union.
+//
+// It is not a valid Type.
+type Member struct {
+ Name
+ Type Type
+ // Offset is the bit offset of this member
+ Offset uint32
+ BitfieldSize uint32
+}
+
+// Enum lists possible values.
+type Enum struct {
+ TypeID
+ Name
+ Values []EnumValue
+}
+
+func (e *Enum) String() string {
+ return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name)
+}
+
+// EnumValue is part of an Enum
+//
+// Is is not a valid Type
+type EnumValue struct {
+ Name
+ Value int32
+}
+
+func (e *Enum) size() uint32 { return 4 }
+func (e *Enum) walk(*typeDeque) {}
+func (e *Enum) copy() Type {
+ cpy := *e
+ cpy.Values = make([]EnumValue, len(e.Values))
+ copy(cpy.Values, e.Values)
+ return &cpy
+}
+
+// FwdKind is the type of forward declaration.
+type FwdKind int
+
+// Valid types of forward declaration.
+const (
+ FwdStruct FwdKind = iota
+ FwdUnion
+)
+
+func (fk FwdKind) String() string {
+ switch fk {
+ case FwdStruct:
+ return "struct"
+ case FwdUnion:
+ return "union"
+ default:
+ return fmt.Sprintf("%T(%d)", fk, int(fk))
+ }
+}
+
+// Fwd is a forward declaration of a Type.
+type Fwd struct {
+ TypeID
+ Name
+ Kind FwdKind
+}
+
+func (f *Fwd) String() string {
+ return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name)
+}
+
+func (f *Fwd) walk(*typeDeque) {}
+func (f *Fwd) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// Typedef is an alias of a Type.
+type Typedef struct {
+ TypeID
+ Name
+ Type Type
+}
+
+func (td *Typedef) String() string {
+ return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID())
+}
+
+func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) }
+func (td *Typedef) copy() Type {
+ cpy := *td
+ return &cpy
+}
+
+// Volatile is a qualifier.
+type Volatile struct {
+ TypeID
+ Type Type
+}
+
+func (v *Volatile) String() string {
+ return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID())
+}
+
+func (v *Volatile) qualify() Type { return v.Type }
+func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) }
+func (v *Volatile) copy() Type {
+ cpy := *v
+ return &cpy
+}
+
+// Const is a qualifier.
+type Const struct {
+ TypeID
+ Type Type
+}
+
+func (c *Const) String() string {
+ return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID())
+}
+
+func (c *Const) qualify() Type { return c.Type }
+func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) }
+func (c *Const) copy() Type {
+ cpy := *c
+ return &cpy
+}
+
+// Restrict is a qualifier.
+type Restrict struct {
+ TypeID
+ Type Type
+}
+
+func (r *Restrict) String() string {
+ return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID())
+}
+
+func (r *Restrict) qualify() Type { return r.Type }
+func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) }
+func (r *Restrict) copy() Type {
+ cpy := *r
+ return &cpy
+}
+
+// Func is a function definition.
+type Func struct {
+ TypeID
+ Name
+ Type Type
+}
+
+func (f *Func) String() string {
+ return fmt.Sprintf("func#%d[%q proto=#%d]", f.TypeID, f.Name, f.Type.ID())
+}
+
+func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) }
+func (f *Func) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// FuncProto is a function declaration.
+type FuncProto struct {
+ TypeID
+ Return Type
+ Params []FuncParam
+}
+
+func (fp *FuncProto) String() string {
+ var s strings.Builder
+ fmt.Fprintf(&s, "proto#%d[", fp.TypeID)
+ for _, param := range fp.Params {
+ fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID())
+ }
+ fmt.Fprintf(&s, "return=#%d]", fp.Return.ID())
+ return s.String()
+}
+
+func (fp *FuncProto) walk(tdq *typeDeque) {
+ tdq.push(&fp.Return)
+ for i := range fp.Params {
+ tdq.push(&fp.Params[i].Type)
+ }
+}
+
+func (fp *FuncProto) copy() Type {
+ cpy := *fp
+ cpy.Params = make([]FuncParam, len(fp.Params))
+ copy(cpy.Params, fp.Params)
+ return &cpy
+}
+
+type FuncParam struct {
+ Name
+ Type Type
+}
+
+// Var is a global variable.
+type Var struct {
+ TypeID
+ Name
+ Type Type
+}
+
+func (v *Var) String() string {
+ // TODO: Linkage
+ return fmt.Sprintf("var#%d[%q]", v.TypeID, v.Name)
+}
+
+func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) }
+func (v *Var) copy() Type {
+ cpy := *v
+ return &cpy
+}
+
+// Datasec is a global program section containing data.
+type Datasec struct {
+ TypeID
+ Name
+ Size uint32
+ Vars []VarSecinfo
+}
+
+func (ds *Datasec) String() string {
+ return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name)
+}
+
+func (ds *Datasec) size() uint32 { return ds.Size }
+
+func (ds *Datasec) walk(tdq *typeDeque) {
+ for i := range ds.Vars {
+ tdq.push(&ds.Vars[i].Type)
+ }
+}
+
+func (ds *Datasec) copy() Type {
+ cpy := *ds
+ cpy.Vars = make([]VarSecinfo, len(ds.Vars))
+ copy(cpy.Vars, ds.Vars)
+ return &cpy
+}
+
+// VarSecinfo describes variable in a Datasec
+//
+// It is not a valid Type.
+type VarSecinfo struct {
+ Type Type
+ Offset uint32
+ Size uint32
+}
+
+type sizer interface {
+ size() uint32
+}
+
+var (
+ _ sizer = (*Int)(nil)
+ _ sizer = (*Pointer)(nil)
+ _ sizer = (*Struct)(nil)
+ _ sizer = (*Union)(nil)
+ _ sizer = (*Enum)(nil)
+ _ sizer = (*Datasec)(nil)
+)
+
+type qualifier interface {
+ qualify() Type
+}
+
+var (
+ _ qualifier = (*Const)(nil)
+ _ qualifier = (*Restrict)(nil)
+ _ qualifier = (*Volatile)(nil)
+)
+
+// Sizeof returns the size of a type in bytes.
+//
+// Returns an error if the size can't be computed.
+func Sizeof(typ Type) (int, error) {
+ var (
+ n = int64(1)
+ elem int64
+ )
+
+ for i := 0; i < maxTypeDepth; i++ {
+ switch v := typ.(type) {
+ case *Array:
+ if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
+ return 0, errors.New("overflow")
+ }
+
+ // Arrays may be of zero length, which allows
+ // n to be zero as well.
+ n *= int64(v.Nelems)
+ typ = v.Type
+ continue
+
+ case sizer:
+ elem = int64(v.size())
+
+ case *Typedef:
+ typ = v.Type
+ continue
+
+ case qualifier:
+ typ = v.qualify()
+ continue
+
+ default:
+ return 0, fmt.Errorf("unrecognized type %T", typ)
+ }
+
+ if n > 0 && elem > math.MaxInt64/n {
+ return 0, errors.New("overflow")
+ }
+
+ size := n * elem
+ if int64(int(size)) != size {
+ return 0, errors.New("overflow")
+ }
+
+ return int(size), nil
+ }
+
+ return 0, errors.New("exceeded type depth")
+}
+
+// copy a Type recursively.
+//
+// typ may form a cycle.
+func copyType(typ Type) Type {
+ var (
+ copies = make(map[Type]Type)
+ work typeDeque
+ )
+
+ for t := &typ; t != nil; t = work.pop() {
+ // *t is the identity of the type.
+ if cpy := copies[*t]; cpy != nil {
+ *t = cpy
+ continue
+ }
+
+ cpy := (*t).copy()
+ copies[*t] = cpy
+ *t = cpy
+
+ // Mark any nested types for copying.
+ cpy.walk(&work)
+ }
+
+ return typ
+}
+
+// typeDeque keeps track of pointers to types which still
+// need to be visited.
+type typeDeque struct {
+ types []*Type
+ read, write uint64
+ mask uint64
+}
+
+// push adds a type to the stack.
+func (dq *typeDeque) push(t *Type) {
+ if dq.write-dq.read < uint64(len(dq.types)) {
+ dq.types[dq.write&dq.mask] = t
+ dq.write++
+ return
+ }
+
+ new := len(dq.types) * 2
+ if new == 0 {
+ new = 8
+ }
+
+ types := make([]*Type, new)
+ pivot := dq.read & dq.mask
+ n := copy(types, dq.types[pivot:])
+ n += copy(types[n:], dq.types[:pivot])
+ types[n] = t
+
+ dq.types = types
+ dq.mask = uint64(new) - 1
+ dq.read, dq.write = 0, uint64(n+1)
+}
+
+// shift returns the first element or null.
+func (dq *typeDeque) shift() *Type {
+ if dq.read == dq.write {
+ return nil
+ }
+
+ index := dq.read & dq.mask
+ t := dq.types[index]
+ dq.types[index] = nil
+ dq.read++
+ return t
+}
+
+// pop returns the last element or null.
+func (dq *typeDeque) pop() *Type {
+ if dq.read == dq.write {
+ return nil
+ }
+
+ dq.write--
+ index := dq.write & dq.mask
+ t := dq.types[index]
+ dq.types[index] = nil
+ return t
+}
+
+// all returns all elements.
+//
+// The deque is empty after calling this method.
+func (dq *typeDeque) all() []*Type {
+ length := dq.write - dq.read
+ types := make([]*Type, 0, length)
+ for t := dq.shift(); t != nil; t = dq.shift() {
+ types = append(types, t)
+ }
+ return types
+}
+
+// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
+// it into a graph of Types connected via pointers.
+//
+// Returns a map of named types (so, where NameOff is non-zero) and a slice of types
+// indexed by TypeID. Since BTF ignores compilation units, multiple types may share
+// the same name. A Type may form a cyclic graph by pointing at itself.
+func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]namedType, err error) {
+ type fixupDef struct {
+ id TypeID
+ expectedKind btfKind
+ typ *Type
+ }
+
+ var fixups []fixupDef
+ fixup := func(id TypeID, expectedKind btfKind, typ *Type) {
+ fixups = append(fixups, fixupDef{id, expectedKind, typ})
+ }
+
+ convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) {
+ // NB: The fixup below relies on pre-allocating this array to
+ // work, since otherwise append might re-allocate members.
+ members := make([]Member, 0, len(raw))
+ for i, btfMember := range raw {
+ name, err := rawStrings.LookupName(btfMember.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
+ }
+ m := Member{
+ Name: name,
+ Offset: btfMember.Offset,
+ }
+ if kindFlag {
+ m.BitfieldSize = btfMember.Offset >> 24
+ m.Offset &= 0xffffff
+ }
+ members = append(members, m)
+ }
+ for i := range members {
+ fixup(raw[i].Type, kindUnknown, &members[i].Type)
+ }
+ return members, nil
+ }
+
+ types = make([]Type, 0, len(rawTypes))
+ types = append(types, (*Void)(nil))
+ namedTypes = make(map[string][]namedType)
+
+ for i, raw := range rawTypes {
+ var (
+ // Void is defined to always be type ID 0, and is thus
+ // omitted from BTF.
+ id = TypeID(i + 1)
+ typ Type
+ )
+
+ name, err := rawStrings.LookupName(raw.NameOff)
+ if err != nil {
+ return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err)
+ }
+
+ switch raw.Kind() {
+ case kindInt:
+ encoding, offset, bits := intEncoding(*raw.data.(*uint32))
+ typ = &Int{id, name, raw.Size(), encoding, offset, bits}
+
+ case kindPointer:
+ ptr := &Pointer{id, nil}
+ fixup(raw.Type(), kindUnknown, &ptr.Target)
+ typ = ptr
+
+ case kindArray:
+ btfArr := raw.data.(*btfArray)
+
+ // IndexType is unused according to btf.rst.
+ // Don't make it available right now.
+ arr := &Array{id, nil, btfArr.Nelems}
+ fixup(btfArr.Type, kindUnknown, &arr.Type)
+ typ = arr
+
+ case kindStruct:
+ members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
+ if err != nil {
+ return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
+ }
+ typ = &Struct{id, name, raw.Size(), members}
+
+ case kindUnion:
+ members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
+ if err != nil {
+ return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
+ }
+ typ = &Union{id, name, raw.Size(), members}
+
+ case kindEnum:
+ rawvals := raw.data.([]btfEnum)
+ vals := make([]EnumValue, 0, len(rawvals))
+ for i, btfVal := range rawvals {
+ name, err := rawStrings.LookupName(btfVal.NameOff)
+ if err != nil {
+ return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err)
+ }
+ vals = append(vals, EnumValue{
+ Name: name,
+ Value: btfVal.Val,
+ })
+ }
+ typ = &Enum{id, name, vals}
+
+ case kindForward:
+ if raw.KindFlag() {
+ typ = &Fwd{id, name, FwdUnion}
+ } else {
+ typ = &Fwd{id, name, FwdStruct}
+ }
+
+ case kindTypedef:
+ typedef := &Typedef{id, name, nil}
+ fixup(raw.Type(), kindUnknown, &typedef.Type)
+ typ = typedef
+
+ case kindVolatile:
+ volatile := &Volatile{id, nil}
+ fixup(raw.Type(), kindUnknown, &volatile.Type)
+ typ = volatile
+
+ case kindConst:
+ cnst := &Const{id, nil}
+ fixup(raw.Type(), kindUnknown, &cnst.Type)
+ typ = cnst
+
+ case kindRestrict:
+ restrict := &Restrict{id, nil}
+ fixup(raw.Type(), kindUnknown, &restrict.Type)
+ typ = restrict
+
+ case kindFunc:
+ fn := &Func{id, name, nil}
+ fixup(raw.Type(), kindFuncProto, &fn.Type)
+ typ = fn
+
+ case kindFuncProto:
+ rawparams := raw.data.([]btfParam)
+ params := make([]FuncParam, 0, len(rawparams))
+ for i, param := range rawparams {
+ name, err := rawStrings.LookupName(param.NameOff)
+ if err != nil {
+ return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
+ }
+ params = append(params, FuncParam{
+ Name: name,
+ })
+ }
+ for i := range params {
+ fixup(rawparams[i].Type, kindUnknown, &params[i].Type)
+ }
+
+ fp := &FuncProto{id, nil, params}
+ fixup(raw.Type(), kindUnknown, &fp.Return)
+ typ = fp
+
+ case kindVar:
+ v := &Var{id, name, nil}
+ fixup(raw.Type(), kindUnknown, &v.Type)
+ typ = v
+
+ case kindDatasec:
+ btfVars := raw.data.([]btfVarSecinfo)
+ vars := make([]VarSecinfo, 0, len(btfVars))
+ for _, btfVar := range btfVars {
+ vars = append(vars, VarSecinfo{
+ Offset: btfVar.Offset,
+ Size: btfVar.Size,
+ })
+ }
+ for i := range vars {
+ fixup(btfVars[i].Type, kindVar, &vars[i].Type)
+ }
+ typ = &Datasec{id, name, raw.SizeType, vars}
+
+ default:
+ return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
+ }
+
+ types = append(types, typ)
+
+ if named, ok := typ.(namedType); ok {
+ if name := essentialName(named.name()); name != "" {
+ namedTypes[name] = append(namedTypes[name], named)
+ }
+ }
+ }
+
+ for _, fixup := range fixups {
+ i := int(fixup.id)
+ if i >= len(types) {
+ return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
+ }
+
+ // Default void (id 0) to unknown
+ rawKind := kindUnknown
+ if i > 0 {
+ rawKind = rawTypes[i-1].Kind()
+ }
+
+ if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected {
+ return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
+ }
+
+ *fixup.typ = types[i]
+ }
+
+ return types, namedTypes, nil
+}
+
+// essentialName returns name without a ___ suffix.
+func essentialName(name string) string {
+ lastIdx := strings.LastIndex(name, "___")
+ if lastIdx > 0 {
+ return name[:lastIdx]
+ }
+ return name
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/cpu.go b/vendor/github.com/cilium/ebpf/internal/cpu.go
new file mode 100644
index 000000000..d3424ba43
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/cpu.go
@@ -0,0 +1,62 @@
+package internal
+
+import (
+ "fmt"
+ "io/ioutil"
+ "strings"
+ "sync"
+)
+
+var sysCPU struct {
+ once sync.Once
+ err error
+ num int
+}
+
+// PossibleCPUs returns the max number of CPUs a system may possibly have
+// Logical CPU numbers must be of the form 0-n
+func PossibleCPUs() (int, error) {
+ sysCPU.once.Do(func() {
+ sysCPU.num, sysCPU.err = parseCPUsFromFile("/sys/devices/system/cpu/possible")
+ })
+
+ return sysCPU.num, sysCPU.err
+}
+
+func parseCPUsFromFile(path string) (int, error) {
+ spec, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+
+ n, err := parseCPUs(string(spec))
+ if err != nil {
+ return 0, fmt.Errorf("can't parse %s: %v", path, err)
+ }
+
+ return n, nil
+}
+
+// parseCPUs parses the number of cpus from a string produced
+// by bitmap_list_string() in the Linux kernel.
+// Multiple ranges are rejected, since they can't be unified
+// into a single number.
+// This is the format of /sys/devices/system/cpu/possible, it
+// is not suitable for /sys/devices/system/cpu/online, etc.
+func parseCPUs(spec string) (int, error) {
+ if strings.Trim(spec, "\n") == "0" {
+ return 1, nil
+ }
+
+ var low, high int
+ n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high)
+ if n != 2 || err != nil {
+ return 0, fmt.Errorf("invalid format: %s", spec)
+ }
+ if low != 0 {
+ return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec)
+ }
+
+ // cpus is 0 indexed
+ return high + 1, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/elf.go b/vendor/github.com/cilium/ebpf/internal/elf.go
new file mode 100644
index 000000000..c3f9ea0f8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/elf.go
@@ -0,0 +1,52 @@
+package internal
+
+import (
+ "debug/elf"
+ "fmt"
+ "io"
+)
+
+type SafeELFFile struct {
+ *elf.File
+}
+
+// NewSafeELFFile reads an ELF safely.
+//
+// Any panic during parsing is turned into an error. This is necessary since
+// there are a bunch of unfixed bugs in debug/elf.
+//
+// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle
+func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ safe = nil
+ err = fmt.Errorf("reading ELF file panicked: %s", r)
+ }()
+
+ file, err := elf.NewFile(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return &SafeELFFile{file}, nil
+}
+
+// Symbols is the safe version of elf.File.Symbols.
+func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ syms = nil
+ err = fmt.Errorf("reading ELF symbols panicked: %s", r)
+ }()
+
+ syms, err = se.File.Symbols()
+ return
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/endian.go b/vendor/github.com/cilium/ebpf/internal/endian.go
new file mode 100644
index 000000000..ac8a94e51
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/endian.go
@@ -0,0 +1,24 @@
+package internal
+
+import (
+ "encoding/binary"
+ "unsafe"
+)
+
+// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
+// depending on the host's endianness.
+var NativeEndian binary.ByteOrder
+
+func init() {
+ if isBigEndian() {
+ NativeEndian = binary.BigEndian
+ } else {
+ NativeEndian = binary.LittleEndian
+ }
+}
+
+func isBigEndian() (ret bool) {
+ i := int(0x1)
+ bs := (*[int(unsafe.Sizeof(i))]byte)(unsafe.Pointer(&i))
+ return bs[0] == 0
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/errors.go b/vendor/github.com/cilium/ebpf/internal/errors.go
new file mode 100644
index 000000000..b6aee81f7
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/errors.go
@@ -0,0 +1,47 @@
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// ErrorWithLog returns an error that includes logs from the
+// kernel verifier.
+//
+// logErr should be the error returned by the syscall that generated
+// the log. It is used to check for truncation of the output.
+func ErrorWithLog(err error, log []byte, logErr error) error {
+ logStr := strings.Trim(CString(log), "\t\r\n ")
+ if errors.Is(logErr, unix.ENOSPC) {
+ logStr += " (truncated...)"
+ }
+
+ return &VerifierError{err, logStr}
+}
+
+// VerifierError includes information from the eBPF verifier.
+type VerifierError struct {
+ cause error
+ log string
+}
+
+func (le *VerifierError) Error() string {
+ if le.log == "" {
+ return le.cause.Error()
+ }
+
+ return fmt.Sprintf("%s: %s", le.cause, le.log)
+}
+
+// CString turns a NUL / zero terminated byte buffer into a string.
+func CString(in []byte) string {
+ inLen := bytes.IndexByte(in, 0)
+ if inLen == -1 {
+ return ""
+ }
+ return string(in[:inLen])
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/fd.go b/vendor/github.com/cilium/ebpf/internal/fd.go
new file mode 100644
index 000000000..af04955bd
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/fd.go
@@ -0,0 +1,69 @@
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strconv"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var ErrClosedFd = errors.New("use of closed file descriptor")
+
+type FD struct {
+ raw int64
+}
+
+func NewFD(value uint32) *FD {
+ fd := &FD{int64(value)}
+ runtime.SetFinalizer(fd, (*FD).Close)
+ return fd
+}
+
+func (fd *FD) String() string {
+ return strconv.FormatInt(fd.raw, 10)
+}
+
+func (fd *FD) Value() (uint32, error) {
+ if fd.raw < 0 {
+ return 0, ErrClosedFd
+ }
+
+ return uint32(fd.raw), nil
+}
+
+func (fd *FD) Close() error {
+ if fd.raw < 0 {
+ return nil
+ }
+
+ value := int(fd.raw)
+ fd.raw = -1
+
+ fd.Forget()
+ return unix.Close(value)
+}
+
+func (fd *FD) Forget() {
+ runtime.SetFinalizer(fd, nil)
+}
+
+func (fd *FD) Dup() (*FD, error) {
+ if fd.raw < 0 {
+ return nil, ErrClosedFd
+ }
+
+ dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 0)
+ if err != nil {
+ return nil, fmt.Errorf("can't dup fd: %v", err)
+ }
+
+ return NewFD(uint32(dup)), nil
+}
+
+func (fd *FD) File(name string) *os.File {
+ fd.Forget()
+ return os.NewFile(uintptr(fd.raw), name)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/feature.go b/vendor/github.com/cilium/ebpf/internal/feature.go
new file mode 100644
index 000000000..ec62ed39b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/feature.go
@@ -0,0 +1,138 @@
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+)
+
+// ErrNotSupported indicates that a feature is not supported by the current kernel.
+var ErrNotSupported = errors.New("not supported")
+
+// UnsupportedFeatureError is returned by FeatureTest() functions.
+type UnsupportedFeatureError struct {
+ // The minimum Linux mainline version required for this feature.
+ // Used for the error string, and for sanity checking during testing.
+ MinimumVersion Version
+
+ // The name of the feature that isn't supported.
+ Name string
+}
+
+func (ufe *UnsupportedFeatureError) Error() string {
+ if ufe.MinimumVersion.Unspecified() {
+ return fmt.Sprintf("%s not supported", ufe.Name)
+ }
+ return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion)
+}
+
+// Is indicates that UnsupportedFeatureError is ErrNotSupported.
+func (ufe *UnsupportedFeatureError) Is(target error) bool {
+ return target == ErrNotSupported
+}
+
+type featureTest struct {
+ sync.RWMutex
+ successful bool
+ result error
+}
+
+// FeatureTestFn is used to determine whether the kernel supports
+// a certain feature.
+//
+// The return values have the following semantics:
+//
+// err == ErrNotSupported: the feature is not available
+// err == nil: the feature is available
+// err != nil: the test couldn't be executed
+type FeatureTestFn func() error
+
+// FeatureTest wraps a function so that it is run at most once.
+//
+// name should identify the tested feature, while version must be in the
+// form Major.Minor[.Patch].
+//
+// Returns an error wrapping ErrNotSupported if the feature is not supported.
+func FeatureTest(name, version string, fn FeatureTestFn) func() error {
+ v, err := NewVersion(version)
+ if err != nil {
+ return func() error { return err }
+ }
+
+ ft := new(featureTest)
+ return func() error {
+ ft.RLock()
+ if ft.successful {
+ defer ft.RUnlock()
+ return ft.result
+ }
+ ft.RUnlock()
+ ft.Lock()
+ defer ft.Unlock()
+ // check one more time on the off
+ // chance that two go routines
+ // were able to call into the write
+ // lock
+ if ft.successful {
+ return ft.result
+ }
+ err := fn()
+ switch {
+ case errors.Is(err, ErrNotSupported):
+ ft.result = &UnsupportedFeatureError{
+ MinimumVersion: v,
+ Name: name,
+ }
+ fallthrough
+
+ case err == nil:
+ ft.successful = true
+
+ default:
+ // We couldn't execute the feature test to a point
+ // where it could make a determination.
+ // Don't cache the result, just return it.
+ return fmt.Errorf("detect support for %s: %w", name, err)
+ }
+
+ return ft.result
+ }
+}
+
+// A Version in the form Major.Minor.Patch.
+type Version [3]uint16
+
+// NewVersion creates a version from a string like "Major.Minor.Patch".
+//
+// Patch is optional.
+func NewVersion(ver string) (Version, error) {
+ var major, minor, patch uint16
+ n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch)
+ if n < 2 {
+ return Version{}, fmt.Errorf("invalid version: %s", ver)
+ }
+ return Version{major, minor, patch}, nil
+}
+
+func (v Version) String() string {
+ if v[2] == 0 {
+ return fmt.Sprintf("v%d.%d", v[0], v[1])
+ }
+ return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2])
+}
+
+// Less returns true if the version is less than another version.
+func (v Version) Less(other Version) bool {
+ for i, a := range v {
+ if a == other[i] {
+ continue
+ }
+ return a < other[i]
+ }
+ return false
+}
+
+// Unspecified returns true if the version is all zero.
+func (v Version) Unspecified() bool {
+ return v[0] == 0 && v[1] == 0 && v[2] == 0
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/io.go b/vendor/github.com/cilium/ebpf/internal/io.go
new file mode 100644
index 000000000..fa7402782
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/io.go
@@ -0,0 +1,16 @@
+package internal
+
+import "errors"
+
+// DiscardZeroes makes sure that all written bytes are zero
+// before discarding them.
+type DiscardZeroes struct{}
+
+func (DiscardZeroes) Write(p []byte) (int, error) {
+ for _, b := range p {
+ if b != 0 {
+ return 0, errors.New("encountered non-zero byte")
+ }
+ }
+ return len(p), nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/ptr.go b/vendor/github.com/cilium/ebpf/internal/ptr.go
new file mode 100644
index 000000000..a7f12b2db
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/ptr.go
@@ -0,0 +1,30 @@
+package internal
+
+import "unsafe"
+
+// NewPointer creates a 64-bit pointer from an unsafe Pointer.
+func NewPointer(ptr unsafe.Pointer) Pointer {
+ return Pointer{ptr: ptr}
+}
+
+// NewSlicePointer creates a 64-bit pointer from a byte slice.
+func NewSlicePointer(buf []byte) Pointer {
+ if len(buf) == 0 {
+ return Pointer{}
+ }
+
+ return Pointer{ptr: unsafe.Pointer(&buf[0])}
+}
+
+// NewStringPointer creates a 64-bit pointer from a string.
+func NewStringPointer(str string) Pointer {
+ if str == "" {
+ return Pointer{}
+ }
+
+ // The kernel expects strings to be zero terminated
+ buf := make([]byte, len(str)+1)
+ copy(buf, str)
+
+ return Pointer{ptr: unsafe.Pointer(&buf[0])}
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go
new file mode 100644
index 000000000..a56fbcc8e
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/ptr_32_be.go
@@ -0,0 +1,14 @@
+// +build armbe mips mips64p32
+
+package internal
+
+import (
+ "unsafe"
+)
+
+// Pointer wraps an unsafe.Pointer to be 64bit to
+// conform to the syscall specification.
+type Pointer struct {
+ pad uint32
+ ptr unsafe.Pointer
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go
new file mode 100644
index 000000000..be2ecfca7
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/ptr_32_le.go
@@ -0,0 +1,14 @@
+// +build 386 amd64p32 arm mipsle mips64p32le
+
+package internal
+
+import (
+ "unsafe"
+)
+
+// Pointer wraps an unsafe.Pointer to be 64bit to
+// conform to the syscall specification.
+type Pointer struct {
+ ptr unsafe.Pointer
+ pad uint32
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/ptr_64.go
new file mode 100644
index 000000000..69452dceb
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/ptr_64.go
@@ -0,0 +1,14 @@
+// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le
+// +build !armbe,!mips,!mips64p32
+
+package internal
+
+import (
+ "unsafe"
+)
+
+// Pointer wraps an unsafe.Pointer to be 64bit to
+// conform to the syscall specification.
+type Pointer struct {
+ ptr unsafe.Pointer
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/syscall.go b/vendor/github.com/cilium/ebpf/internal/syscall.go
new file mode 100644
index 000000000..c80815131
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/syscall.go
@@ -0,0 +1,179 @@
+package internal
+
+import (
+ "fmt"
+ "path/filepath"
+ "runtime"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+//go:generate stringer -output syscall_string.go -type=BPFCmd
+
+// BPFCmd identifies a subcommand of the bpf syscall.
+type BPFCmd int
+
+// Well known BPF commands.
+const (
+ BPF_MAP_CREATE BPFCmd = iota
+ BPF_MAP_LOOKUP_ELEM
+ BPF_MAP_UPDATE_ELEM
+ BPF_MAP_DELETE_ELEM
+ BPF_MAP_GET_NEXT_KEY
+ BPF_PROG_LOAD
+ BPF_OBJ_PIN
+ BPF_OBJ_GET
+ BPF_PROG_ATTACH
+ BPF_PROG_DETACH
+ BPF_PROG_TEST_RUN
+ BPF_PROG_GET_NEXT_ID
+ BPF_MAP_GET_NEXT_ID
+ BPF_PROG_GET_FD_BY_ID
+ BPF_MAP_GET_FD_BY_ID
+ BPF_OBJ_GET_INFO_BY_FD
+ BPF_PROG_QUERY
+ BPF_RAW_TRACEPOINT_OPEN
+ BPF_BTF_LOAD
+ BPF_BTF_GET_FD_BY_ID
+ BPF_TASK_FD_QUERY
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM
+ BPF_MAP_FREEZE
+ BPF_BTF_GET_NEXT_ID
+ BPF_MAP_LOOKUP_BATCH
+ BPF_MAP_LOOKUP_AND_DELETE_BATCH
+ BPF_MAP_UPDATE_BATCH
+ BPF_MAP_DELETE_BATCH
+ BPF_LINK_CREATE
+ BPF_LINK_UPDATE
+ BPF_LINK_GET_FD_BY_ID
+ BPF_LINK_GET_NEXT_ID
+ BPF_ENABLE_STATS
+ BPF_ITER_CREATE
+)
+
+// BPF wraps SYS_BPF.
+//
+// Any pointers contained in attr must use the Pointer type from this package.
+func BPF(cmd BPFCmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
+ r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
+ runtime.KeepAlive(attr)
+
+ var err error
+ if errNo != 0 {
+ err = errNo
+ }
+
+ return r1, err
+}
+
+type BPFProgAttachAttr struct {
+ TargetFd uint32
+ AttachBpfFd uint32
+ AttachType uint32
+ AttachFlags uint32
+ ReplaceBpfFd uint32
+}
+
+func BPFProgAttach(attr *BPFProgAttachAttr) error {
+ _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type BPFProgDetachAttr struct {
+ TargetFd uint32
+ AttachBpfFd uint32
+ AttachType uint32
+}
+
+func BPFProgDetach(attr *BPFProgDetachAttr) error {
+ _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type BPFEnableStatsAttr struct {
+ StatsType uint32
+}
+
+func BPFEnableStats(attr *BPFEnableStatsAttr) (*FD, error) {
+ ptr, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, fmt.Errorf("enable stats: %w", err)
+ }
+ return NewFD(uint32(ptr)), nil
+
+}
+
+type bpfObjAttr struct {
+ fileName Pointer
+ fd uint32
+ fileFlags uint32
+}
+
+const bpfFSType = 0xcafe4a11
+
+// BPFObjPin wraps BPF_OBJ_PIN.
+func BPFObjPin(fileName string, fd *FD) error {
+ dirName := filepath.Dir(fileName)
+ var statfs unix.Statfs_t
+ if err := unix.Statfs(dirName, &statfs); err != nil {
+ return err
+ }
+ if uint64(statfs.Type) != bpfFSType {
+ return fmt.Errorf("%s is not on a bpf filesystem", fileName)
+ }
+
+ value, err := fd.Value()
+ if err != nil {
+ return err
+ }
+
+ attr := bpfObjAttr{
+ fileName: NewStringPointer(fileName),
+ fd: value,
+ }
+ _, err = BPF(BPF_OBJ_PIN, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
+ if err != nil {
+ return fmt.Errorf("pin object %s: %w", fileName, err)
+ }
+ return nil
+}
+
+// BPFObjGet wraps BPF_OBJ_GET.
+func BPFObjGet(fileName string) (*FD, error) {
+ attr := bpfObjAttr{
+ fileName: NewStringPointer(fileName),
+ }
+ ptr, err := BPF(BPF_OBJ_GET, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
+ if err != nil {
+ return nil, fmt.Errorf("get object %s: %w", fileName, err)
+ }
+ return NewFD(uint32(ptr)), nil
+}
+
+type bpfObjGetInfoByFDAttr struct {
+ fd uint32
+ infoLen uint32
+ info Pointer
+}
+
+// BPFObjGetInfoByFD wraps BPF_OBJ_GET_INFO_BY_FD.
+//
+// Available from 4.13.
+func BPFObjGetInfoByFD(fd *FD, info unsafe.Pointer, size uintptr) error {
+ value, err := fd.Value()
+ if err != nil {
+ return err
+ }
+
+ attr := bpfObjGetInfoByFDAttr{
+ fd: value,
+ infoLen: uint32(size),
+ info: NewPointer(info),
+ }
+ _, err = BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
+ if err != nil {
+ return fmt.Errorf("fd %v: %w", fd, err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/syscall_string.go b/vendor/github.com/cilium/ebpf/internal/syscall_string.go
new file mode 100644
index 000000000..85df04779
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/syscall_string.go
@@ -0,0 +1,56 @@
+// Code generated by "stringer -output syscall_string.go -type=BPFCmd"; DO NOT EDIT.
+
+package internal
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[BPF_MAP_CREATE-0]
+ _ = x[BPF_MAP_LOOKUP_ELEM-1]
+ _ = x[BPF_MAP_UPDATE_ELEM-2]
+ _ = x[BPF_MAP_DELETE_ELEM-3]
+ _ = x[BPF_MAP_GET_NEXT_KEY-4]
+ _ = x[BPF_PROG_LOAD-5]
+ _ = x[BPF_OBJ_PIN-6]
+ _ = x[BPF_OBJ_GET-7]
+ _ = x[BPF_PROG_ATTACH-8]
+ _ = x[BPF_PROG_DETACH-9]
+ _ = x[BPF_PROG_TEST_RUN-10]
+ _ = x[BPF_PROG_GET_NEXT_ID-11]
+ _ = x[BPF_MAP_GET_NEXT_ID-12]
+ _ = x[BPF_PROG_GET_FD_BY_ID-13]
+ _ = x[BPF_MAP_GET_FD_BY_ID-14]
+ _ = x[BPF_OBJ_GET_INFO_BY_FD-15]
+ _ = x[BPF_PROG_QUERY-16]
+ _ = x[BPF_RAW_TRACEPOINT_OPEN-17]
+ _ = x[BPF_BTF_LOAD-18]
+ _ = x[BPF_BTF_GET_FD_BY_ID-19]
+ _ = x[BPF_TASK_FD_QUERY-20]
+ _ = x[BPF_MAP_LOOKUP_AND_DELETE_ELEM-21]
+ _ = x[BPF_MAP_FREEZE-22]
+ _ = x[BPF_BTF_GET_NEXT_ID-23]
+ _ = x[BPF_MAP_LOOKUP_BATCH-24]
+ _ = x[BPF_MAP_LOOKUP_AND_DELETE_BATCH-25]
+ _ = x[BPF_MAP_UPDATE_BATCH-26]
+ _ = x[BPF_MAP_DELETE_BATCH-27]
+ _ = x[BPF_LINK_CREATE-28]
+ _ = x[BPF_LINK_UPDATE-29]
+ _ = x[BPF_LINK_GET_FD_BY_ID-30]
+ _ = x[BPF_LINK_GET_NEXT_ID-31]
+ _ = x[BPF_ENABLE_STATS-32]
+ _ = x[BPF_ITER_CREATE-33]
+}
+
+const _BPFCmd_name = "BPF_MAP_CREATEBPF_MAP_LOOKUP_ELEMBPF_MAP_UPDATE_ELEMBPF_MAP_DELETE_ELEMBPF_MAP_GET_NEXT_KEYBPF_PROG_LOADBPF_OBJ_PINBPF_OBJ_GETBPF_PROG_ATTACHBPF_PROG_DETACHBPF_PROG_TEST_RUNBPF_PROG_GET_NEXT_IDBPF_MAP_GET_NEXT_IDBPF_PROG_GET_FD_BY_IDBPF_MAP_GET_FD_BY_IDBPF_OBJ_GET_INFO_BY_FDBPF_PROG_QUERYBPF_RAW_TRACEPOINT_OPENBPF_BTF_LOADBPF_BTF_GET_FD_BY_IDBPF_TASK_FD_QUERYBPF_MAP_LOOKUP_AND_DELETE_ELEMBPF_MAP_FREEZEBPF_BTF_GET_NEXT_IDBPF_MAP_LOOKUP_BATCHBPF_MAP_LOOKUP_AND_DELETE_BATCHBPF_MAP_UPDATE_BATCHBPF_MAP_DELETE_BATCHBPF_LINK_CREATEBPF_LINK_UPDATEBPF_LINK_GET_FD_BY_IDBPF_LINK_GET_NEXT_IDBPF_ENABLE_STATSBPF_ITER_CREATE"
+
+var _BPFCmd_index = [...]uint16{0, 14, 33, 52, 71, 91, 104, 115, 126, 141, 156, 173, 193, 212, 233, 253, 275, 289, 312, 324, 344, 361, 391, 405, 424, 444, 475, 495, 515, 530, 545, 566, 586, 602, 617}
+
+func (i BPFCmd) String() string {
+ if i < 0 || i >= BPFCmd(len(_BPFCmd_index)-1) {
+ return "BPFCmd(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _BPFCmd_name[_BPFCmd_index[i]:_BPFCmd_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
new file mode 100644
index 000000000..86d2a10f9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
@@ -0,0 +1,170 @@
+// +build linux
+
+package unix
+
+import (
+ "bytes"
+ "syscall"
+
+ linux "golang.org/x/sys/unix"
+)
+
+const (
+ ENOENT = linux.ENOENT
+ EEXIST = linux.EEXIST
+ EAGAIN = linux.EAGAIN
+ ENOSPC = linux.ENOSPC
+ EINVAL = linux.EINVAL
+ EPOLLIN = linux.EPOLLIN
+ EINTR = linux.EINTR
+ EPERM = linux.EPERM
+ ESRCH = linux.ESRCH
+ ENODEV = linux.ENODEV
+ // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
+ ENOTSUPP = syscall.Errno(0x20c)
+
+ EBADF = linux.EBADF
+ BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC
+ BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE
+ BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG
+ BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG
+ BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
+ BPF_TAG_SIZE = linux.BPF_TAG_SIZE
+ SYS_BPF = linux.SYS_BPF
+ F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC
+ EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD
+ EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC
+ O_CLOEXEC = linux.O_CLOEXEC
+ O_NONBLOCK = linux.O_NONBLOCK
+ PROT_READ = linux.PROT_READ
+ PROT_WRITE = linux.PROT_WRITE
+ MAP_SHARED = linux.MAP_SHARED
+ PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE
+ PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT
+ PerfBitWatermark = linux.PerfBitWatermark
+ PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW
+ PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC
+ RLIM_INFINITY = linux.RLIM_INFINITY
+ RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK
+ BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME
+)
+
+// Statfs_t is a wrapper
+type Statfs_t = linux.Statfs_t
+
+// Rlimit is a wrapper
+type Rlimit = linux.Rlimit
+
+// Setrlimit is a wrapper
+func Setrlimit(resource int, rlim *Rlimit) (err error) {
+ return linux.Setrlimit(resource, rlim)
+}
+
+// Syscall is a wrapper
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ return linux.Syscall(trap, a1, a2, a3)
+}
+
+// FcntlInt is a wrapper
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return linux.FcntlInt(fd, cmd, arg)
+}
+
+// Statfs is a wrapper
+func Statfs(path string, buf *Statfs_t) (err error) {
+ return linux.Statfs(path, buf)
+}
+
+// Close is a wrapper
+func Close(fd int) (err error) {
+ return linux.Close(fd)
+}
+
+// EpollEvent is a wrapper
+type EpollEvent = linux.EpollEvent
+
+// EpollWait is a wrapper
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ return linux.EpollWait(epfd, events, msec)
+}
+
+// EpollCtl is a wrapper
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ return linux.EpollCtl(epfd, op, fd, event)
+}
+
+// Eventfd is a wrapper
+func Eventfd(initval uint, flags int) (fd int, err error) {
+ return linux.Eventfd(initval, flags)
+}
+
+// Write is a wrapper
+func Write(fd int, p []byte) (n int, err error) {
+ return linux.Write(fd, p)
+}
+
+// EpollCreate1 is a wrapper
+func EpollCreate1(flag int) (fd int, err error) {
+ return linux.EpollCreate1(flag)
+}
+
+// PerfEventMmapPage is a wrapper
+type PerfEventMmapPage linux.PerfEventMmapPage
+
+// SetNonblock is a wrapper
+func SetNonblock(fd int, nonblocking bool) (err error) {
+ return linux.SetNonblock(fd, nonblocking)
+}
+
+// Mmap is a wrapper
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return linux.Mmap(fd, offset, length, prot, flags)
+}
+
+// Munmap is a wrapper
+func Munmap(b []byte) (err error) {
+ return linux.Munmap(b)
+}
+
+// PerfEventAttr is a wrapper
+type PerfEventAttr = linux.PerfEventAttr
+
+// PerfEventOpen is a wrapper
+func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
+ return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags)
+}
+
+// Utsname is a wrapper
+type Utsname = linux.Utsname
+
+// Uname is a wrapper
+func Uname(buf *Utsname) (err error) {
+ return linux.Uname(buf)
+}
+
+// Getpid is a wrapper
+func Getpid() int {
+ return linux.Getpid()
+}
+
+// Gettid is a wrapper
+func Gettid() int {
+ return linux.Gettid()
+}
+
+// Tgkill is a wrapper
+func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
+ return linux.Tgkill(tgid, tid, sig)
+}
+
+func KernelRelease() (string, error) {
+ var uname Utsname
+ err := Uname(&uname)
+ if err != nil {
+ return "", err
+ }
+
+ end := bytes.IndexByte(uname.Release[:], 0)
+ release := string(uname.Release[:end])
+ return release, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
new file mode 100644
index 000000000..8c291796a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
@@ -0,0 +1,228 @@
+// +build !linux
+
+package unix
+
+import (
+ "fmt"
+ "runtime"
+ "syscall"
+)
+
+var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
+
+const (
+ ENOENT = syscall.ENOENT
+ EEXIST = syscall.EEXIST
+ EAGAIN = syscall.EAGAIN
+ ENOSPC = syscall.ENOSPC
+ EINVAL = syscall.EINVAL
+ EINTR = syscall.EINTR
+ EPERM = syscall.EPERM
+ ESRCH = syscall.ESRCH
+ ENODEV = syscall.ENODEV
+ EBADF = syscall.Errno(0)
+ // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP
+ ENOTSUPP = syscall.Errno(0x20c)
+
+ BPF_F_NO_PREALLOC = 0
+ BPF_F_NUMA_NODE = 0
+ BPF_F_RDONLY_PROG = 0
+ BPF_F_WRONLY_PROG = 0
+ BPF_OBJ_NAME_LEN = 0x10
+ BPF_TAG_SIZE = 0x8
+ SYS_BPF = 321
+ F_DUPFD_CLOEXEC = 0x406
+ EPOLLIN = 0x1
+ EPOLL_CTL_ADD = 0x1
+ EPOLL_CLOEXEC = 0x80000
+ O_CLOEXEC = 0x80000
+ O_NONBLOCK = 0x800
+ PROT_READ = 0x1
+ PROT_WRITE = 0x2
+ MAP_SHARED = 0x1
+ PERF_TYPE_SOFTWARE = 0x1
+ PERF_COUNT_SW_BPF_OUTPUT = 0xa
+ PerfBitWatermark = 0x4000
+ PERF_SAMPLE_RAW = 0x400
+ PERF_FLAG_FD_CLOEXEC = 0x8
+ RLIM_INFINITY = 0x7fffffffffffffff
+ RLIMIT_MEMLOCK = 8
+ BPF_STATS_RUN_TIME = 0
+)
+
+// Statfs_t is a wrapper
+type Statfs_t struct {
+ Type int64
+ Bsize int64
+ Blocks uint64
+ Bfree uint64
+ Bavail uint64
+ Files uint64
+ Ffree uint64
+ Fsid [2]int32
+ Namelen int64
+ Frsize int64
+ Flags int64
+ Spare [4]int64
+}
+
+// Rlimit is a wrapper
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+// Setrlimit is a wrapper
+func Setrlimit(resource int, rlim *Rlimit) (err error) {
+ return errNonLinux
+}
+
+// Syscall is a wrapper
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ return 0, 0, syscall.Errno(1)
+}
+
+// FcntlInt is a wrapper
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return -1, errNonLinux
+}
+
+// Statfs is a wrapper
+func Statfs(path string, buf *Statfs_t) error {
+ return errNonLinux
+}
+
+// Close is a wrapper
+func Close(fd int) (err error) {
+ return errNonLinux
+}
+
+// EpollEvent is a wrapper
+type EpollEvent struct {
+ Events uint32
+ Fd int32
+ Pad int32
+}
+
+// EpollWait is a wrapper
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ return 0, errNonLinux
+}
+
+// EpollCtl is a wrapper
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ return errNonLinux
+}
+
+// Eventfd is a wrapper
+func Eventfd(initval uint, flags int) (fd int, err error) {
+ return 0, errNonLinux
+}
+
+// Write is a wrapper
+func Write(fd int, p []byte) (n int, err error) {
+ return 0, errNonLinux
+}
+
+// EpollCreate1 is a wrapper
+func EpollCreate1(flag int) (fd int, err error) {
+ return 0, errNonLinux
+}
+
+// PerfEventMmapPage is a wrapper
+type PerfEventMmapPage struct {
+ Version uint32
+ Compat_version uint32
+ Lock uint32
+ Index uint32
+ Offset int64
+ Time_enabled uint64
+ Time_running uint64
+ Capabilities uint64
+ Pmc_width uint16
+ Time_shift uint16
+ Time_mult uint32
+ Time_offset uint64
+ Time_zero uint64
+ Size uint32
+
+ Data_head uint64
+ Data_tail uint64
+ Data_offset uint64
+ Data_size uint64
+ Aux_head uint64
+ Aux_tail uint64
+ Aux_offset uint64
+ Aux_size uint64
+}
+
+// SetNonblock is a wrapper
+func SetNonblock(fd int, nonblocking bool) (err error) {
+ return errNonLinux
+}
+
+// Mmap is a wrapper
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return []byte{}, errNonLinux
+}
+
+// Munmap is a wrapper
+func Munmap(b []byte) (err error) {
+ return errNonLinux
+}
+
+// PerfEventAttr is a wrapper
+type PerfEventAttr struct {
+ Type uint32
+ Size uint32
+ Config uint64
+ Sample uint64
+ Sample_type uint64
+ Read_format uint64
+ Bits uint64
+ Wakeup uint32
+ Bp_type uint32
+ Ext1 uint64
+ Ext2 uint64
+ Branch_sample_type uint64
+ Sample_regs_user uint64
+ Sample_stack_user uint32
+ Clockid int32
+ Sample_regs_intr uint64
+ Aux_watermark uint32
+ Sample_max_stack uint16
+}
+
+// PerfEventOpen is a wrapper
+func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
+ return 0, errNonLinux
+}
+
+// Utsname is a wrapper
+type Utsname struct {
+ Release [65]byte
+}
+
+// Uname is a wrapper
+func Uname(buf *Utsname) (err error) {
+ return errNonLinux
+}
+
+// Getpid is a wrapper
+func Getpid() int {
+ return -1
+}
+
+// Gettid is a wrapper
+func Gettid() int {
+ return -1
+}
+
+// Tgkill is a wrapper
+func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
+ return errNonLinux
+}
+
+func KernelRelease() (string, error) {
+ return "", errNonLinux
+}