summaryrefslogtreecommitdiff
path: root/vendor/github.com/cilium/ebpf/internal/btf
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/cilium/ebpf/internal/btf')
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/btf.go791
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/btf_types.go269
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/core.go388
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/doc.go8
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/ext_info.go281
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/fuzz.go49
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/strings.go60
-rw-r--r--vendor/github.com/cilium/ebpf/internal/btf/types.go871
8 files changed, 2717 insertions, 0 deletions
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf.go b/vendor/github.com/cilium/ebpf/internal/btf/btf.go
new file mode 100644
index 000000000..1e66d9476
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/btf.go
@@ -0,0 +1,791 @@
+package btf
+
+import (
+ "bytes"
+ "debug/elf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+ "reflect"
+ "sync"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+const btfMagic = 0xeB9F
+
+// Errors returned by BTF functions.
+var (
+ ErrNotSupported = internal.ErrNotSupported
+ ErrNotFound = errors.New("not found")
+ ErrNoExtendedInfo = errors.New("no extended info")
+)
+
+// Spec represents decoded BTF.
+type Spec struct {
+ rawTypes []rawType
+ strings stringTable
+ types []Type
+ namedTypes map[string][]namedType
+ funcInfos map[string]extInfo
+ lineInfos map[string]extInfo
+ coreRelos map[string]bpfCoreRelos
+ byteOrder binary.ByteOrder
+}
+
+type btfHeader struct {
+ Magic uint16
+ Version uint8
+ Flags uint8
+ HdrLen uint32
+
+ TypeOff uint32
+ TypeLen uint32
+ StringOff uint32
+ StringLen uint32
+}
+
+// LoadSpecFromReader reads BTF sections from an ELF.
+//
+// Returns a nil Spec and no error if no BTF was present.
+func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
+ file, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ btfSection, btfExtSection, sectionSizes, err := findBtfSections(file)
+ if err != nil {
+ return nil, err
+ }
+
+ if btfSection == nil {
+ return nil, nil
+ }
+
+ symbols, err := file.Symbols()
+ if err != nil {
+ return nil, fmt.Errorf("can't read symbols: %v", err)
+ }
+
+ variableOffsets := make(map[variable]uint32)
+ for _, symbol := range symbols {
+ if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
+ // Ignore things like SHN_ABS
+ continue
+ }
+
+ if int(symbol.Section) >= len(file.Sections) {
+ return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
+ }
+
+ secName := file.Sections[symbol.Section].Name
+ if _, ok := sectionSizes[secName]; !ok {
+ continue
+ }
+
+ if symbol.Value > math.MaxUint32 {
+ return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name)
+ }
+
+ variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
+ }
+
+ spec, err := loadNakedSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets)
+ if err != nil {
+ return nil, err
+ }
+
+ if btfExtSection == nil {
+ return spec, nil
+ }
+
+ spec.funcInfos, spec.lineInfos, spec.coreRelos, err = parseExtInfos(btfExtSection.Open(), file.ByteOrder, spec.strings)
+ if err != nil {
+ return nil, fmt.Errorf("can't read ext info: %w", err)
+ }
+
+ return spec, nil
+}
+
+func findBtfSections(file *internal.SafeELFFile) (*elf.Section, *elf.Section, map[string]uint32, error) {
+ var (
+ btfSection *elf.Section
+ btfExtSection *elf.Section
+ sectionSizes = make(map[string]uint32)
+ )
+
+ for _, sec := range file.Sections {
+ switch sec.Name {
+ case ".BTF":
+ btfSection = sec
+ case ".BTF.ext":
+ btfExtSection = sec
+ default:
+ if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
+ break
+ }
+
+ if sec.Size > math.MaxUint32 {
+ return nil, nil, nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
+ }
+
+ sectionSizes[sec.Name] = uint32(sec.Size)
+ }
+ }
+ return btfSection, btfExtSection, sectionSizes, nil
+}
+
+func loadSpecFromVmlinux(rd io.ReaderAt) (*Spec, error) {
+ file, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ btfSection, _, _, err := findBtfSections(file)
+ if err != nil {
+ return nil, fmt.Errorf(".BTF ELF section: %s", err)
+ }
+ if btfSection == nil {
+ return nil, fmt.Errorf("unable to find .BTF ELF section")
+ }
+ return loadNakedSpec(btfSection.Open(), file.ByteOrder, nil, nil)
+}
+
+func loadNakedSpec(btf io.ReadSeeker, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) {
+ rawTypes, rawStrings, err := parseBTF(btf, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ err = fixupDatasec(rawTypes, rawStrings, sectionSizes, variableOffsets)
+ if err != nil {
+ return nil, err
+ }
+
+ types, typesByName, err := inflateRawTypes(rawTypes, rawStrings)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Spec{
+ rawTypes: rawTypes,
+ namedTypes: typesByName,
+ types: types,
+ strings: rawStrings,
+ byteOrder: bo,
+ }, nil
+}
+
+var kernelBTF struct {
+ sync.Mutex
+ *Spec
+}
+
+// LoadKernelSpec returns the current kernel's BTF information.
+//
+// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns
+// ErrNotSupported if BTF is not enabled.
+func LoadKernelSpec() (*Spec, error) {
+ kernelBTF.Lock()
+ defer kernelBTF.Unlock()
+
+ if kernelBTF.Spec != nil {
+ return kernelBTF.Spec, nil
+ }
+
+ var err error
+ kernelBTF.Spec, err = loadKernelSpec()
+ return kernelBTF.Spec, err
+}
+
+func loadKernelSpec() (*Spec, error) {
+ release, err := unix.KernelRelease()
+ if err != nil {
+ return nil, fmt.Errorf("can't read kernel release number: %w", err)
+ }
+
+ fh, err := os.Open("/sys/kernel/btf/vmlinux")
+ if err == nil {
+ defer fh.Close()
+
+ return loadNakedSpec(fh, internal.NativeEndian, nil, nil)
+ }
+
+ // use same list of locations as libbpf
+ // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
+ locations := []string{
+ "/boot/vmlinux-%s",
+ "/lib/modules/%s/vmlinux-%[1]s",
+ "/lib/modules/%s/build/vmlinux",
+ "/usr/lib/modules/%s/kernel/vmlinux",
+ "/usr/lib/debug/boot/vmlinux-%s",
+ "/usr/lib/debug/boot/vmlinux-%s.debug",
+ "/usr/lib/debug/lib/modules/%s/vmlinux",
+ }
+
+ for _, loc := range locations {
+ path := fmt.Sprintf(loc, release)
+
+ fh, err := os.Open(path)
+ if err != nil {
+ continue
+ }
+ defer fh.Close()
+
+ return loadSpecFromVmlinux(fh)
+ }
+
+ return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported)
+}
+
+func parseBTF(btf io.ReadSeeker, bo binary.ByteOrder) ([]rawType, stringTable, error) {
+ rawBTF, err := ioutil.ReadAll(btf)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't read BTF: %v", err)
+ }
+
+ rd := bytes.NewReader(rawBTF)
+
+ var header btfHeader
+ if err := binary.Read(rd, bo, &header); err != nil {
+ return nil, nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ if header.Magic != btfMagic {
+ return nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
+ }
+
+ if header.Version != 1 {
+ return nil, nil, fmt.Errorf("unexpected version %v", header.Version)
+ }
+
+ if header.Flags != 0 {
+ return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
+ }
+
+ remainder := int64(header.HdrLen) - int64(binary.Size(&header))
+ if remainder < 0 {
+ return nil, nil, errors.New("header is too short")
+ }
+
+ if _, err := io.CopyN(internal.DiscardZeroes{}, rd, remainder); err != nil {
+ return nil, nil, fmt.Errorf("header padding: %v", err)
+ }
+
+ if _, err := rd.Seek(int64(header.HdrLen+header.StringOff), io.SeekStart); err != nil {
+ return nil, nil, fmt.Errorf("can't seek to start of string section: %v", err)
+ }
+
+ rawStrings, err := readStringTable(io.LimitReader(rd, int64(header.StringLen)))
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't read type names: %w", err)
+ }
+
+ if _, err := rd.Seek(int64(header.HdrLen+header.TypeOff), io.SeekStart); err != nil {
+ return nil, nil, fmt.Errorf("can't seek to start of type section: %v", err)
+ }
+
+ rawTypes, err := readTypes(io.LimitReader(rd, int64(header.TypeLen)), bo)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't read types: %w", err)
+ }
+
+ return rawTypes, rawStrings, nil
+}
+
+type variable struct {
+ section string
+ name string
+}
+
+func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
+ for i, rawType := range rawTypes {
+ if rawType.Kind() != kindDatasec {
+ continue
+ }
+
+ name, err := rawStrings.Lookup(rawType.NameOff)
+ if err != nil {
+ return err
+ }
+
+ if name == ".kconfig" || name == ".ksyms" {
+ return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
+ }
+
+ if rawTypes[i].SizeType != 0 {
+ continue
+ }
+
+ size, ok := sectionSizes[name]
+ if !ok {
+ return fmt.Errorf("data section %s: missing size", name)
+ }
+
+ rawTypes[i].SizeType = size
+
+ secinfos := rawType.data.([]btfVarSecinfo)
+ for j, secInfo := range secinfos {
+ id := int(secInfo.Type - 1)
+ if id >= len(rawTypes) {
+ return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
+ }
+
+ varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
+ if err != nil {
+ return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
+ }
+
+ offset, ok := variableOffsets[variable{name, varName}]
+ if !ok {
+ return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
+ }
+
+ secinfos[j].Offset = offset
+ }
+ }
+
+ return nil
+}
+
+type marshalOpts struct {
+ ByteOrder binary.ByteOrder
+ StripFuncLinkage bool
+}
+
+func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
+ var (
+ buf bytes.Buffer
+ header = new(btfHeader)
+ headerLen = binary.Size(header)
+ )
+
+ // Reserve space for the header. We have to write it last since
+ // we don't know the size of the type section yet.
+ _, _ = buf.Write(make([]byte, headerLen))
+
+ // Write type section, just after the header.
+ for _, raw := range s.rawTypes {
+ switch {
+ case opts.StripFuncLinkage && raw.Kind() == kindFunc:
+ raw.SetLinkage(linkageStatic)
+ }
+
+ if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
+ return nil, fmt.Errorf("can't marshal BTF: %w", err)
+ }
+ }
+
+ typeLen := uint32(buf.Len() - headerLen)
+
+ // Write string section after type section.
+ _, _ = buf.Write(s.strings)
+
+ // Fill out the header, and write it out.
+ header = &btfHeader{
+ Magic: btfMagic,
+ Version: 1,
+ Flags: 0,
+ HdrLen: uint32(headerLen),
+ TypeOff: 0,
+ TypeLen: typeLen,
+ StringOff: typeLen,
+ StringLen: uint32(len(s.strings)),
+ }
+
+ raw := buf.Bytes()
+ err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
+ if err != nil {
+ return nil, fmt.Errorf("can't write header: %v", err)
+ }
+
+ return raw, nil
+}
+
+type sliceWriter []byte
+
+func (sw sliceWriter) Write(p []byte) (int, error) {
+ if len(p) != len(sw) {
+ return 0, errors.New("size doesn't match")
+ }
+
+ return copy(sw, p), nil
+}
+
+// Program finds the BTF for a specific section.
+//
+// Length is the number of bytes in the raw BPF instruction stream.
+//
+// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't
+// contain extended BTF info.
+func (s *Spec) Program(name string, length uint64) (*Program, error) {
+ if length == 0 {
+ return nil, errors.New("length musn't be zero")
+ }
+
+ if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil {
+ return nil, fmt.Errorf("BTF for section %s: %w", name, ErrNoExtendedInfo)
+ }
+
+ funcInfos, funcOK := s.funcInfos[name]
+ lineInfos, lineOK := s.lineInfos[name]
+ coreRelos, coreOK := s.coreRelos[name]
+
+ if !funcOK && !lineOK && !coreOK {
+ return nil, fmt.Errorf("no extended BTF info for section %s", name)
+ }
+
+ return &Program{s, length, funcInfos, lineInfos, coreRelos}, nil
+}
+
+// Datasec returns the BTF required to create maps which represent data sections.
+func (s *Spec) Datasec(name string) (*Map, error) {
+ var datasec Datasec
+ if err := s.FindType(name, &datasec); err != nil {
+ return nil, fmt.Errorf("data section %s: can't get BTF: %w", name, err)
+ }
+
+ m := NewMap(s, &Void{}, &datasec)
+ return &m, nil
+}
+
+// FindType searches for a type with a specific name.
+//
+// hint determines the type of the returned Type.
+//
+// Returns an error wrapping ErrNotFound if no matching
+// type exists in spec.
+func (s *Spec) FindType(name string, typ Type) error {
+ var (
+ wanted = reflect.TypeOf(typ)
+ candidate Type
+ )
+
+ for _, typ := range s.namedTypes[essentialName(name)] {
+ if reflect.TypeOf(typ) != wanted {
+ continue
+ }
+
+ // Match against the full name, not just the essential one.
+ if typ.name() != name {
+ continue
+ }
+
+ if candidate != nil {
+ return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
+ }
+
+ candidate = typ
+ }
+
+ if candidate == nil {
+ return fmt.Errorf("type %s: %w", name, ErrNotFound)
+ }
+
+ value := reflect.Indirect(reflect.ValueOf(copyType(candidate)))
+ reflect.Indirect(reflect.ValueOf(typ)).Set(value)
+ return nil
+}
+
+// Handle is a reference to BTF loaded into the kernel.
+type Handle struct {
+ fd *internal.FD
+}
+
+// NewHandle loads BTF into the kernel.
+//
+// Returns ErrNotSupported if BTF is not supported.
+func NewHandle(spec *Spec) (*Handle, error) {
+ if err := haveBTF(); err != nil {
+ return nil, err
+ }
+
+ if spec.byteOrder != internal.NativeEndian {
+ return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
+ }
+
+ btf, err := spec.marshal(marshalOpts{
+ ByteOrder: internal.NativeEndian,
+ StripFuncLinkage: haveFuncLinkage() != nil,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("can't marshal BTF: %w", err)
+ }
+
+ if uint64(len(btf)) > math.MaxUint32 {
+ return nil, errors.New("BTF exceeds the maximum size")
+ }
+
+ attr := &bpfLoadBTFAttr{
+ btf: internal.NewSlicePointer(btf),
+ btfSize: uint32(len(btf)),
+ }
+
+ fd, err := bpfLoadBTF(attr)
+ if err != nil {
+ logBuf := make([]byte, 64*1024)
+ attr.logBuf = internal.NewSlicePointer(logBuf)
+ attr.btfLogSize = uint32(len(logBuf))
+ attr.btfLogLevel = 1
+ _, logErr := bpfLoadBTF(attr)
+ return nil, internal.ErrorWithLog(err, logBuf, logErr)
+ }
+
+ return &Handle{fd}, nil
+}
+
+// Close destroys the handle.
+//
+// Subsequent calls to FD will return an invalid value.
+func (h *Handle) Close() error {
+ return h.fd.Close()
+}
+
+// FD returns the file descriptor for the handle.
+func (h *Handle) FD() int {
+ value, err := h.fd.Value()
+ if err != nil {
+ return -1
+ }
+
+ return int(value)
+}
+
+// Map is the BTF for a map.
+type Map struct {
+ spec *Spec
+ key, value Type
+}
+
+// NewMap returns a new Map containing the given values.
+// The key and value arguments are initialized to Void if nil values are given.
+func NewMap(spec *Spec, key Type, value Type) Map {
+ if key == nil {
+ key = &Void{}
+ }
+ if value == nil {
+ value = &Void{}
+ }
+
+ return Map{
+ spec: spec,
+ key: key,
+ value: value,
+ }
+}
+
+// MapSpec should be a method on Map, but is a free function
+// to hide it from users of the ebpf package.
+func MapSpec(m *Map) *Spec {
+ return m.spec
+}
+
+// MapKey should be a method on Map, but is a free function
+// to hide it from users of the ebpf package.
+func MapKey(m *Map) Type {
+ return m.key
+}
+
+// MapValue should be a method on Map, but is a free function
+// to hide it from users of the ebpf package.
+func MapValue(m *Map) Type {
+ return m.value
+}
+
+// Program is the BTF information for a stream of instructions.
+type Program struct {
+ spec *Spec
+ length uint64
+ funcInfos, lineInfos extInfo
+ coreRelos bpfCoreRelos
+}
+
+// ProgramSpec returns the Spec needed for loading function and line infos into the kernel.
+//
+// This is a free function instead of a method to hide it from users
+// of package ebpf.
+func ProgramSpec(s *Program) *Spec {
+ return s.spec
+}
+
+// ProgramAppend the information from other to the Program.
+//
+// This is a free function instead of a method to hide it from users
+// of package ebpf.
+func ProgramAppend(s, other *Program) error {
+ funcInfos, err := s.funcInfos.append(other.funcInfos, s.length)
+ if err != nil {
+ return fmt.Errorf("func infos: %w", err)
+ }
+
+ lineInfos, err := s.lineInfos.append(other.lineInfos, s.length)
+ if err != nil {
+ return fmt.Errorf("line infos: %w", err)
+ }
+
+ s.funcInfos = funcInfos
+ s.lineInfos = lineInfos
+ s.coreRelos = s.coreRelos.append(other.coreRelos, s.length)
+ s.length += other.length
+ return nil
+}
+
+// ProgramFuncInfos returns the binary form of BTF function infos.
+//
+// This is a free function instead of a method to hide it from users
+// of package ebpf.
+func ProgramFuncInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
+ bytes, err = s.funcInfos.MarshalBinary()
+ if err != nil {
+ return 0, nil, err
+ }
+
+ return s.funcInfos.recordSize, bytes, nil
+}
+
+// ProgramLineInfos returns the binary form of BTF line infos.
+//
+// This is a free function instead of a method to hide it from users
+// of package ebpf.
+func ProgramLineInfos(s *Program) (recordSize uint32, bytes []byte, err error) {
+ bytes, err = s.lineInfos.MarshalBinary()
+ if err != nil {
+ return 0, nil, err
+ }
+
+ return s.lineInfos.recordSize, bytes, nil
+}
+
+// ProgramRelocations returns the CO-RE relocations required to adjust the
+// program to the target.
+//
+// This is a free function instead of a method to hide it from users
+// of package ebpf.
+func ProgramRelocations(s *Program, target *Spec) (map[uint64]Relocation, error) {
+ if len(s.coreRelos) == 0 {
+ return nil, nil
+ }
+
+ return coreRelocate(s.spec, target, s.coreRelos)
+}
+
+type bpfLoadBTFAttr struct {
+ btf internal.Pointer
+ logBuf internal.Pointer
+ btfSize uint32
+ btfLogSize uint32
+ btfLogLevel uint32
+}
+
+func bpfLoadBTF(attr *bpfLoadBTFAttr) (*internal.FD, error) {
+ fd, err := internal.BPF(internal.BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+
+ return internal.NewFD(uint32(fd)), nil
+}
+
+func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
+ const minHeaderLength = 24
+
+ typesLen := uint32(binary.Size(types))
+ header := btfHeader{
+ Magic: btfMagic,
+ Version: 1,
+ HdrLen: minHeaderLength,
+ TypeOff: 0,
+ TypeLen: typesLen,
+ StringOff: typesLen,
+ StringLen: uint32(len(strings)),
+ }
+
+ buf := new(bytes.Buffer)
+ _ = binary.Write(buf, bo, &header)
+ _ = binary.Write(buf, bo, types)
+ buf.Write(strings)
+
+ return buf.Bytes()
+}
+
+var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
+ var (
+ types struct {
+ Integer btfType
+ Var btfType
+ btfVar struct{ Linkage uint32 }
+ }
+ strings = []byte{0, 'a', 0}
+ )
+
+ // We use a BTF_KIND_VAR here, to make sure that
+ // the kernel understands BTF at least as well as we
+ // do. BTF_KIND_VAR was introduced ~5.1.
+ types.Integer.SetKind(kindPointer)
+ types.Var.NameOff = 1
+ types.Var.SetKind(kindVar)
+ types.Var.SizeType = 1
+
+ btf := marshalBTF(&types, strings, internal.NativeEndian)
+
+ fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
+ btf: internal.NewSlicePointer(btf),
+ btfSize: uint32(len(btf)),
+ })
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
+ // Treat both EINVAL and EPERM as not supported: loading the program
+ // might still succeed without BTF.
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+
+ fd.Close()
+ return nil
+})
+
+var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
+ if err := haveBTF(); err != nil {
+ return err
+ }
+
+ var (
+ types struct {
+ FuncProto btfType
+ Func btfType
+ }
+ strings = []byte{0, 'a', 0}
+ )
+
+ types.FuncProto.SetKind(kindFuncProto)
+ types.Func.SetKind(kindFunc)
+ types.Func.SizeType = 1 // aka FuncProto
+ types.Func.NameOff = 1
+ types.Func.SetLinkage(linkageGlobal)
+
+ btf := marshalBTF(&types, strings, internal.NativeEndian)
+
+ fd, err := bpfLoadBTF(&bpfLoadBTFAttr{
+ btf: internal.NewSlicePointer(btf),
+ btfSize: uint32(len(btf)),
+ })
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+
+ fd.Close()
+ return nil
+})
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go b/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go
new file mode 100644
index 000000000..a4cde3fe8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go
@@ -0,0 +1,269 @@
+package btf
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+// btfKind describes a Type.
+type btfKind uint8
+
+// Equivalents of the BTF_KIND_* constants.
+const (
+ kindUnknown btfKind = iota
+ kindInt
+ kindPointer
+ kindArray
+ kindStruct
+ kindUnion
+ kindEnum
+ kindForward
+ kindTypedef
+ kindVolatile
+ kindConst
+ kindRestrict
+ // Added ~4.20
+ kindFunc
+ kindFuncProto
+ // Added ~5.1
+ kindVar
+ kindDatasec
+)
+
+type btfFuncLinkage uint8
+
+const (
+ linkageStatic btfFuncLinkage = iota
+ linkageGlobal
+ linkageExtern
+)
+
+const (
+ btfTypeKindShift = 24
+ btfTypeKindLen = 4
+ btfTypeVlenShift = 0
+ btfTypeVlenMask = 16
+ btfTypeKindFlagShift = 31
+ btfTypeKindFlagMask = 1
+)
+
+// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
+type btfType struct {
+ NameOff uint32
+ /* "info" bits arrangement
+ * bits 0-15: vlen (e.g. # of struct's members), linkage
+ * bits 16-23: unused
+ * bits 24-27: kind (e.g. int, ptr, array...etc)
+ * bits 28-30: unused
+ * bit 31: kind_flag, currently used by
+ * struct, union and fwd
+ */
+ Info uint32
+ /* "size" is used by INT, ENUM, STRUCT and UNION.
+ * "size" tells the size of the type it is describing.
+ *
+ * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
+ * FUNC and FUNC_PROTO.
+ * "type" is a type_id referring to another type.
+ */
+ SizeType uint32
+}
+
+func (k btfKind) String() string {
+ switch k {
+ case kindUnknown:
+ return "Unknown"
+ case kindInt:
+ return "Integer"
+ case kindPointer:
+ return "Pointer"
+ case kindArray:
+ return "Array"
+ case kindStruct:
+ return "Struct"
+ case kindUnion:
+ return "Union"
+ case kindEnum:
+ return "Enumeration"
+ case kindForward:
+ return "Forward"
+ case kindTypedef:
+ return "Typedef"
+ case kindVolatile:
+ return "Volatile"
+ case kindConst:
+ return "Const"
+ case kindRestrict:
+ return "Restrict"
+ case kindFunc:
+ return "Function"
+ case kindFuncProto:
+ return "Function Proto"
+ case kindVar:
+ return "Variable"
+ case kindDatasec:
+ return "Section"
+ default:
+ return fmt.Sprintf("Unknown (%d)", k)
+ }
+}
+
+func mask(len uint32) uint32 {
+ return (1 << len) - 1
+}
+
+func (bt *btfType) info(len, shift uint32) uint32 {
+ return (bt.Info >> shift) & mask(len)
+}
+
+func (bt *btfType) setInfo(value, len, shift uint32) {
+ bt.Info &^= mask(len) << shift
+ bt.Info |= (value & mask(len)) << shift
+}
+
+func (bt *btfType) Kind() btfKind {
+ return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift))
+}
+
+func (bt *btfType) SetKind(kind btfKind) {
+ bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift)
+}
+
+func (bt *btfType) Vlen() int {
+ return int(bt.info(btfTypeVlenMask, btfTypeVlenShift))
+}
+
+func (bt *btfType) SetVlen(vlen int) {
+ bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
+}
+
+func (bt *btfType) KindFlag() bool {
+ return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
+}
+
+func (bt *btfType) Linkage() btfFuncLinkage {
+ return btfFuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
+}
+
+func (bt *btfType) SetLinkage(linkage btfFuncLinkage) {
+ bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
+}
+
+func (bt *btfType) Type() TypeID {
+ // TODO: Panic here if wrong kind?
+ return TypeID(bt.SizeType)
+}
+
+func (bt *btfType) Size() uint32 {
+ // TODO: Panic here if wrong kind?
+ return bt.SizeType
+}
+
+type rawType struct {
+ btfType
+ data interface{}
+}
+
+func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
+ if err := binary.Write(w, bo, &rt.btfType); err != nil {
+ return err
+ }
+
+ if rt.data == nil {
+ return nil
+ }
+
+ return binary.Write(w, bo, rt.data)
+}
+
+type btfArray struct {
+ Type TypeID
+ IndexType TypeID
+ Nelems uint32
+}
+
+type btfMember struct {
+ NameOff uint32
+ Type TypeID
+ Offset uint32
+}
+
+type btfVarSecinfo struct {
+ Type TypeID
+ Offset uint32
+ Size uint32
+}
+
+type btfVariable struct {
+ Linkage uint32
+}
+
+type btfEnum struct {
+ NameOff uint32
+ Val int32
+}
+
+type btfParam struct {
+ NameOff uint32
+ Type TypeID
+}
+
+func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) {
+ var (
+ header btfType
+ types []rawType
+ )
+
+ for id := TypeID(1); ; id++ {
+ if err := binary.Read(r, bo, &header); err == io.EOF {
+ return types, nil
+ } else if err != nil {
+ return nil, fmt.Errorf("can't read type info for id %v: %v", id, err)
+ }
+
+ var data interface{}
+ switch header.Kind() {
+ case kindInt:
+ data = new(uint32)
+ case kindPointer:
+ case kindArray:
+ data = new(btfArray)
+ case kindStruct:
+ fallthrough
+ case kindUnion:
+ data = make([]btfMember, header.Vlen())
+ case kindEnum:
+ data = make([]btfEnum, header.Vlen())
+ case kindForward:
+ case kindTypedef:
+ case kindVolatile:
+ case kindConst:
+ case kindRestrict:
+ case kindFunc:
+ case kindFuncProto:
+ data = make([]btfParam, header.Vlen())
+ case kindVar:
+ data = new(btfVariable)
+ case kindDatasec:
+ data = make([]btfVarSecinfo, header.Vlen())
+ default:
+ return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
+ }
+
+ if data == nil {
+ types = append(types, rawType{header, nil})
+ continue
+ }
+
+ if err := binary.Read(r, bo, data); err != nil {
+ return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err)
+ }
+
+ types = append(types, rawType{header, data})
+ }
+}
+
+func intEncoding(raw uint32) (IntEncoding, uint32, byte) {
+ return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/core.go b/vendor/github.com/cilium/ebpf/internal/btf/core.go
new file mode 100644
index 000000000..52b59ed18
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/core.go
@@ -0,0 +1,388 @@
+package btf
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// Code in this file is derived from libbpf, which is available under a BSD
+// 2-Clause license.
+
+// Relocation describes a CO-RE relocation.
+type Relocation struct {
+ Current uint32
+ New uint32
+}
+
+func (r Relocation) equal(other Relocation) bool {
+ return r.Current == other.Current && r.New == other.New
+}
+
+// coreReloKind is the type of CO-RE relocation
+type coreReloKind uint32
+
+const (
+ reloFieldByteOffset coreReloKind = iota /* field byte offset */
+ reloFieldByteSize /* field size in bytes */
+ reloFieldExists /* field existence in target kernel */
+ reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
+ reloFieldLShiftU64 /* bitfield-specific left bitshift */
+ reloFieldRShiftU64 /* bitfield-specific right bitshift */
+ reloTypeIDLocal /* type ID in local BPF object */
+ reloTypeIDTarget /* type ID in target kernel */
+ reloTypeExists /* type existence in target kernel */
+ reloTypeSize /* type size in bytes */
+ reloEnumvalExists /* enum value existence in target kernel */
+ reloEnumvalValue /* enum value integer value */
+)
+
+func (k coreReloKind) String() string {
+ switch k {
+ case reloFieldByteOffset:
+ return "byte_off"
+ case reloFieldByteSize:
+ return "byte_sz"
+ case reloFieldExists:
+ return "field_exists"
+ case reloFieldSigned:
+ return "signed"
+ case reloFieldLShiftU64:
+ return "lshift_u64"
+ case reloFieldRShiftU64:
+ return "rshift_u64"
+ case reloTypeIDLocal:
+ return "local_type_id"
+ case reloTypeIDTarget:
+ return "target_type_id"
+ case reloTypeExists:
+ return "type_exists"
+ case reloTypeSize:
+ return "type_size"
+ case reloEnumvalExists:
+ return "enumval_exists"
+ case reloEnumvalValue:
+ return "enumval_value"
+ default:
+ return "unknown"
+ }
+}
+
+func coreRelocate(local, target *Spec, coreRelos bpfCoreRelos) (map[uint64]Relocation, error) {
+ if target == nil {
+ var err error
+ target, err = loadKernelSpec()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if local.byteOrder != target.byteOrder {
+ return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
+ }
+
+ relocations := make(map[uint64]Relocation, len(coreRelos))
+ for _, relo := range coreRelos {
+ accessorStr, err := local.strings.Lookup(relo.AccessStrOff)
+ if err != nil {
+ return nil, err
+ }
+
+ accessor, err := parseCoreAccessor(accessorStr)
+ if err != nil {
+ return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
+ }
+
+ if int(relo.TypeID) >= len(local.types) {
+ return nil, fmt.Errorf("invalid type id %d", relo.TypeID)
+ }
+
+ typ := local.types[relo.TypeID]
+
+ if relo.ReloKind == reloTypeIDLocal {
+ relocations[uint64(relo.InsnOff)] = Relocation{
+ uint32(typ.ID()),
+ uint32(typ.ID()),
+ }
+ continue
+ }
+
+ named, ok := typ.(namedType)
+ if !ok || named.name() == "" {
+ return nil, fmt.Errorf("relocate anonymous type %s: %w", typ.String(), ErrNotSupported)
+ }
+
+ name := essentialName(named.name())
+ res, err := coreCalculateRelocation(typ, target.namedTypes[name], relo.ReloKind, accessor)
+ if err != nil {
+ return nil, fmt.Errorf("relocate %s: %w", name, err)
+ }
+
+ relocations[uint64(relo.InsnOff)] = res
+ }
+
+ return relocations, nil
+}
+
+var errAmbiguousRelocation = errors.New("ambiguous relocation")
+
+func coreCalculateRelocation(local Type, targets []namedType, kind coreReloKind, localAccessor coreAccessor) (Relocation, error) {
+ var relos []Relocation
+ var matches []Type
+ for _, target := range targets {
+ switch kind {
+ case reloTypeIDTarget:
+ if localAccessor[0] != 0 {
+ return Relocation{}, fmt.Errorf("%s: unexpected non-zero accessor", kind)
+ }
+
+ if compat, err := coreAreTypesCompatible(local, target); err != nil {
+ return Relocation{}, fmt.Errorf("%s: %s", kind, err)
+ } else if !compat {
+ continue
+ }
+
+ relos = append(relos, Relocation{uint32(target.ID()), uint32(target.ID())})
+
+ default:
+ return Relocation{}, fmt.Errorf("relocation %s: %w", kind, ErrNotSupported)
+ }
+ matches = append(matches, target)
+ }
+
+ if len(relos) == 0 {
+ // TODO: Add switch for existence checks like reloEnumvalExists here.
+
+ // TODO: This might have to be poisoned.
+ return Relocation{}, fmt.Errorf("no relocation found, tried %v", targets)
+ }
+
+ relo := relos[0]
+ for _, altRelo := range relos[1:] {
+ if !altRelo.equal(relo) {
+ return Relocation{}, fmt.Errorf("multiple types %v match: %w", matches, errAmbiguousRelocation)
+ }
+ }
+
+ return relo, nil
+}
+
+/* coreAccessor contains a path through a struct. It contains at least one index.
+ *
+ * The interpretation depends on the kind of the relocation. The following is
+ * taken from struct bpf_core_relo in libbpf_internal.h:
+ *
+ * - for field-based relocations, string encodes an accessed field using
+ * a sequence of field and array indices, separated by colon (:). It's
+ * conceptually very close to LLVM's getelementptr ([0]) instruction's
+ * arguments for identifying offset to a field.
+ * - for type-based relocations, strings is expected to be just "0";
+ * - for enum value-based relocations, string contains an index of enum
+ * value within its enum type;
+ *
+ * Example to provide a better feel.
+ *
+ * struct sample {
+ * int a;
+ * struct {
+ * int b[10];
+ * };
+ * };
+ *
+ * struct sample s = ...;
+ * int x = &s->a; // encoded as "0:0" (a is field #0)
+ * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
+ * // b is field #0 inside anon struct, accessing elem #5)
+ * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
+ */
+type coreAccessor []int
+
+func parseCoreAccessor(accessor string) (coreAccessor, error) {
+ if accessor == "" {
+ return nil, fmt.Errorf("empty accessor")
+ }
+
+ var result coreAccessor
+ parts := strings.Split(accessor, ":")
+ for _, part := range parts {
+ // 31 bits to avoid overflowing int on 32 bit platforms.
+ index, err := strconv.ParseUint(part, 10, 31)
+ if err != nil {
+ return nil, fmt.Errorf("accessor index %q: %s", part, err)
+ }
+
+ result = append(result, int(index))
+ }
+
+ return result, nil
+}
+
+/* The comment below is from bpf_core_types_are_compat in libbpf.c:
+ *
+ * Check local and target types for compatibility. This check is used for
+ * type-based CO-RE relocations and follow slightly different rules than
+ * field-based relocations. This function assumes that root types were already
+ * checked for name match. Beyond that initial root-level name check, names
+ * are completely ignored. Compatibility rules are as follows:
+ * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
+ * kind should match for local and target types (i.e., STRUCT is not
+ * compatible with UNION);
+ * - for ENUMs, the size is ignored;
+ * - for INT, size and signedness are ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - CONST/VOLATILE/RESTRICT modifiers are ignored;
+ * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
+ * - FUNC_PROTOs are compatible if they have compatible signature: same
+ * number of input args and compatible return and argument types.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ */
+func coreAreTypesCompatible(localType Type, targetType Type) (bool, error) {
+ var (
+ localTs, targetTs typeDeque
+ l, t = &localType, &targetType
+ depth = 0
+ )
+
+ for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
+ if depth >= maxTypeDepth {
+ return false, errors.New("types are nested too deep")
+ }
+
+ localType = skipQualifierAndTypedef(*l)
+ targetType = skipQualifierAndTypedef(*t)
+
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
+ return false, nil
+ }
+
+ switch lv := (localType).(type) {
+ case *Void, *Struct, *Union, *Enum, *Fwd:
+ // Nothing to do here
+
+ case *Int:
+ tv := targetType.(*Int)
+ if lv.isBitfield() || tv.isBitfield() {
+ return false, nil
+ }
+
+ case *Pointer, *Array:
+ depth++
+ localType.walk(&localTs)
+ targetType.walk(&targetTs)
+
+ case *FuncProto:
+ tv := targetType.(*FuncProto)
+ if len(lv.Params) != len(tv.Params) {
+ return false, nil
+ }
+
+ depth++
+ localType.walk(&localTs)
+ targetType.walk(&targetTs)
+
+ default:
+ return false, fmt.Errorf("unsupported type %T", localType)
+ }
+ }
+
+ if l != nil {
+ return false, fmt.Errorf("dangling local type %T", *l)
+ }
+
+ if t != nil {
+ return false, fmt.Errorf("dangling target type %T", *t)
+ }
+
+ return true, nil
+}
+
+/* The comment below is from bpf_core_fields_are_compat in libbpf.c:
+ *
+ * Check two types for compatibility for the purpose of field access
+ * relocation. const/volatile/restrict and typedefs are skipped to ensure we
+ * are relocating semantically compatible entities:
+ * - any two STRUCTs/UNIONs are compatible and can be mixed;
+ * - any two FWDs are compatible, if their names match (modulo flavor suffix);
+ * - any two PTRs are always compatible;
+ * - for ENUMs, names should be the same (ignoring flavor suffix) or at
+ * least one of enums should be anonymous;
+ * - for ENUMs, check sizes, names are ignored;
+ * - for INT, size and signedness are ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - everything else shouldn't be ever a target of relocation.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ */
+func coreAreMembersCompatible(localType Type, targetType Type) (bool, error) {
+ doNamesMatch := func(a, b string) bool {
+ if a == "" || b == "" {
+ // allow anonymous and named type to match
+ return true
+ }
+
+ return essentialName(a) == essentialName(b)
+ }
+
+ for depth := 0; depth <= maxTypeDepth; depth++ {
+ localType = skipQualifierAndTypedef(localType)
+ targetType = skipQualifierAndTypedef(targetType)
+
+ _, lok := localType.(composite)
+ _, tok := targetType.(composite)
+ if lok && tok {
+ return true, nil
+ }
+
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
+ return false, nil
+ }
+
+ switch lv := localType.(type) {
+ case *Pointer:
+ return true, nil
+
+ case *Enum:
+ tv := targetType.(*Enum)
+ return doNamesMatch(lv.name(), tv.name()), nil
+
+ case *Fwd:
+ tv := targetType.(*Fwd)
+ return doNamesMatch(lv.name(), tv.name()), nil
+
+ case *Int:
+ tv := targetType.(*Int)
+ return !lv.isBitfield() && !tv.isBitfield(), nil
+
+ case *Array:
+ tv := targetType.(*Array)
+
+ localType = lv.Type
+ targetType = tv.Type
+
+ default:
+ return false, fmt.Errorf("unsupported type %T", localType)
+ }
+ }
+
+ return false, errors.New("types are nested too deep")
+}
+
+func skipQualifierAndTypedef(typ Type) Type {
+ result := typ
+ for depth := 0; depth <= maxTypeDepth; depth++ {
+ switch v := (result).(type) {
+ case qualifier:
+ result = v.qualify()
+ case *Typedef:
+ result = v.Type
+ default:
+ return result
+ }
+ }
+ return typ
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/doc.go b/vendor/github.com/cilium/ebpf/internal/btf/doc.go
new file mode 100644
index 000000000..ad2576cb2
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/doc.go
@@ -0,0 +1,8 @@
+// Package btf handles data encoded according to the BPF Type Format.
+//
+// The canonical documentation lives in the Linux kernel repository and is
+// available at https://www.kernel.org/doc/html/latest/bpf/btf.html
+//
+// The API is very much unstable. You should only use this via the main
+// ebpf library.
+package btf
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go b/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
new file mode 100644
index 000000000..6a21b6bda
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go
@@ -0,0 +1,281 @@
+package btf
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+)
+
+type btfExtHeader struct {
+ Magic uint16
+ Version uint8
+ Flags uint8
+ HdrLen uint32
+
+ FuncInfoOff uint32
+ FuncInfoLen uint32
+ LineInfoOff uint32
+ LineInfoLen uint32
+}
+
+type btfExtCoreHeader struct {
+ CoreReloOff uint32
+ CoreReloLen uint32
+}
+
+func parseExtInfos(r io.ReadSeeker, bo binary.ByteOrder, strings stringTable) (funcInfo, lineInfo map[string]extInfo, coreRelos map[string]bpfCoreRelos, err error) {
+ var header btfExtHeader
+ var coreHeader btfExtCoreHeader
+ if err := binary.Read(r, bo, &header); err != nil {
+ return nil, nil, nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ if header.Magic != btfMagic {
+ return nil, nil, nil, fmt.Errorf("incorrect magic value %v", header.Magic)
+ }
+
+ if header.Version != 1 {
+ return nil, nil, nil, fmt.Errorf("unexpected version %v", header.Version)
+ }
+
+ if header.Flags != 0 {
+ return nil, nil, nil, fmt.Errorf("unsupported flags %v", header.Flags)
+ }
+
+ remainder := int64(header.HdrLen) - int64(binary.Size(&header))
+ if remainder < 0 {
+ return nil, nil, nil, errors.New("header is too short")
+ }
+
+ coreHdrSize := int64(binary.Size(&coreHeader))
+ if remainder >= coreHdrSize {
+ if err := binary.Read(r, bo, &coreHeader); err != nil {
+ return nil, nil, nil, fmt.Errorf("can't read CO-RE relocation header: %v", err)
+ }
+ remainder -= coreHdrSize
+ }
+
+ // Of course, the .BTF.ext header has different semantics than the
+ // .BTF ext header. We need to ignore non-null values.
+ _, err = io.CopyN(ioutil.Discard, r, remainder)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("header padding: %v", err)
+ }
+
+ if _, err := r.Seek(int64(header.HdrLen+header.FuncInfoOff), io.SeekStart); err != nil {
+ return nil, nil, nil, fmt.Errorf("can't seek to function info section: %v", err)
+ }
+
+ buf := bufio.NewReader(io.LimitReader(r, int64(header.FuncInfoLen)))
+ funcInfo, err = parseExtInfo(buf, bo, strings)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("function info: %w", err)
+ }
+
+ if _, err := r.Seek(int64(header.HdrLen+header.LineInfoOff), io.SeekStart); err != nil {
+ return nil, nil, nil, fmt.Errorf("can't seek to line info section: %v", err)
+ }
+
+ buf = bufio.NewReader(io.LimitReader(r, int64(header.LineInfoLen)))
+ lineInfo, err = parseExtInfo(buf, bo, strings)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("line info: %w", err)
+ }
+
+ if coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 {
+ if _, err := r.Seek(int64(header.HdrLen+coreHeader.CoreReloOff), io.SeekStart); err != nil {
+ return nil, nil, nil, fmt.Errorf("can't seek to CO-RE relocation section: %v", err)
+ }
+
+ coreRelos, err = parseExtInfoRelos(io.LimitReader(r, int64(coreHeader.CoreReloLen)), bo, strings)
+ if err != nil {
+ return nil, nil, nil, fmt.Errorf("CO-RE relocation info: %w", err)
+ }
+ }
+
+ return funcInfo, lineInfo, coreRelos, nil
+}
+
+type btfExtInfoSec struct {
+ SecNameOff uint32
+ NumInfo uint32
+}
+
+type extInfoRecord struct {
+ InsnOff uint64
+ Opaque []byte
+}
+
+type extInfo struct {
+ recordSize uint32
+ records []extInfoRecord
+}
+
+func (ei extInfo) append(other extInfo, offset uint64) (extInfo, error) {
+ if other.recordSize != ei.recordSize {
+ return extInfo{}, fmt.Errorf("ext_info record size mismatch, want %d (got %d)", ei.recordSize, other.recordSize)
+ }
+
+ records := make([]extInfoRecord, 0, len(ei.records)+len(other.records))
+ records = append(records, ei.records...)
+ for _, info := range other.records {
+ records = append(records, extInfoRecord{
+ InsnOff: info.InsnOff + offset,
+ Opaque: info.Opaque,
+ })
+ }
+ return extInfo{ei.recordSize, records}, nil
+}
+
+func (ei extInfo) MarshalBinary() ([]byte, error) {
+ if len(ei.records) == 0 {
+ return nil, nil
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, int(ei.recordSize)*len(ei.records)))
+ for _, info := range ei.records {
+ // The kernel expects offsets in number of raw bpf instructions,
+ // while the ELF tracks it in bytes.
+ insnOff := uint32(info.InsnOff / asm.InstructionSize)
+ if err := binary.Write(buf, internal.NativeEndian, insnOff); err != nil {
+ return nil, fmt.Errorf("can't write instruction offset: %v", err)
+ }
+
+ buf.Write(info.Opaque)
+ }
+
+ return buf.Bytes(), nil
+}
+
+func parseExtInfo(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]extInfo, error) {
+ const maxRecordSize = 256
+
+ var recordSize uint32
+ if err := binary.Read(r, bo, &recordSize); err != nil {
+ return nil, fmt.Errorf("can't read record size: %v", err)
+ }
+
+ if recordSize < 4 {
+ // Need at least insnOff
+ return nil, errors.New("record size too short")
+ }
+ if recordSize > maxRecordSize {
+ return nil, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
+ }
+
+ result := make(map[string]extInfo)
+ for {
+ secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+
+ var records []extInfoRecord
+ for i := uint32(0); i < infoHeader.NumInfo; i++ {
+ var byteOff uint32
+ if err := binary.Read(r, bo, &byteOff); err != nil {
+ return nil, fmt.Errorf("section %v: can't read extended info offset: %v", secName, err)
+ }
+
+ buf := make([]byte, int(recordSize-4))
+ if _, err := io.ReadFull(r, buf); err != nil {
+ return nil, fmt.Errorf("section %v: can't read record: %v", secName, err)
+ }
+
+ if byteOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, byteOff)
+ }
+
+ records = append(records, extInfoRecord{uint64(byteOff), buf})
+ }
+
+ result[secName] = extInfo{
+ recordSize,
+ records,
+ }
+ }
+}
+
+// bpfCoreRelo matches `struct bpf_core_relo` from the kernel
+type bpfCoreRelo struct {
+ InsnOff uint32
+ TypeID TypeID
+ AccessStrOff uint32
+ ReloKind coreReloKind
+}
+
+type bpfCoreRelos []bpfCoreRelo
+
+// append two slices of extInfoRelo to each other. The InsnOff of b are adjusted
+// by offset.
+func (r bpfCoreRelos) append(other bpfCoreRelos, offset uint64) bpfCoreRelos {
+ result := make([]bpfCoreRelo, 0, len(r)+len(other))
+ result = append(result, r...)
+ for _, relo := range other {
+ relo.InsnOff += uint32(offset)
+ result = append(result, relo)
+ }
+ return result
+}
+
+var extInfoReloSize = binary.Size(bpfCoreRelo{})
+
+func parseExtInfoRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]bpfCoreRelos, error) {
+ var recordSize uint32
+ if err := binary.Read(r, bo, &recordSize); err != nil {
+ return nil, fmt.Errorf("read record size: %v", err)
+ }
+
+ if recordSize != uint32(extInfoReloSize) {
+ return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
+ }
+
+ result := make(map[string]bpfCoreRelos)
+ for {
+ secName, infoHeader, err := parseExtInfoHeader(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+
+ var relos []bpfCoreRelo
+ for i := uint32(0); i < infoHeader.NumInfo; i++ {
+ var relo bpfCoreRelo
+ if err := binary.Read(r, bo, &relo); err != nil {
+ return nil, fmt.Errorf("section %v: read record: %v", secName, err)
+ }
+
+ if relo.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("section %v: offset %v is not aligned with instruction size", secName, relo.InsnOff)
+ }
+
+ relos = append(relos, relo)
+ }
+
+ result[secName] = relos
+ }
+}
+
+func parseExtInfoHeader(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) {
+ var infoHeader btfExtInfoSec
+ if err := binary.Read(r, bo, &infoHeader); err != nil {
+ return "", nil, fmt.Errorf("read ext info header: %w", err)
+ }
+
+ secName, err := strings.Lookup(infoHeader.SecNameOff)
+ if err != nil {
+ return "", nil, fmt.Errorf("get section name: %w", err)
+ }
+
+ if infoHeader.NumInfo == 0 {
+ return "", nil, fmt.Errorf("section %s has zero records", secName)
+ }
+
+ return secName, &infoHeader, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go b/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go
new file mode 100644
index 000000000..37e043fd3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/fuzz.go
@@ -0,0 +1,49 @@
+// +build gofuzz
+
+// Use with https://github.com/dvyukov/go-fuzz
+
+package btf
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ "github.com/cilium/ebpf/internal"
+)
+
+func FuzzSpec(data []byte) int {
+ if len(data) < binary.Size(btfHeader{}) {
+ return -1
+ }
+
+ spec, err := loadNakedSpec(bytes.NewReader(data), internal.NativeEndian, nil, nil)
+ if err != nil {
+ if spec != nil {
+ panic("spec is not nil")
+ }
+ return 0
+ }
+ if spec == nil {
+ panic("spec is nil")
+ }
+ return 1
+}
+
+func FuzzExtInfo(data []byte) int {
+ if len(data) < binary.Size(btfExtHeader{}) {
+ return -1
+ }
+
+ table := stringTable("\x00foo\x00barfoo\x00")
+ info, err := parseExtInfo(bytes.NewReader(data), internal.NativeEndian, table)
+ if err != nil {
+ if info != nil {
+ panic("info is not nil")
+ }
+ return 0
+ }
+ if info == nil {
+ panic("info is nil")
+ }
+ return 1
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/strings.go b/vendor/github.com/cilium/ebpf/internal/btf/strings.go
new file mode 100644
index 000000000..8782643a0
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/strings.go
@@ -0,0 +1,60 @@
+package btf
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+)
+
+type stringTable []byte
+
+func readStringTable(r io.Reader) (stringTable, error) {
+ contents, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, fmt.Errorf("can't read string table: %v", err)
+ }
+
+ if len(contents) < 1 {
+ return nil, errors.New("string table is empty")
+ }
+
+ if contents[0] != '\x00' {
+ return nil, errors.New("first item in string table is non-empty")
+ }
+
+ if contents[len(contents)-1] != '\x00' {
+ return nil, errors.New("string table isn't null terminated")
+ }
+
+ return stringTable(contents), nil
+}
+
+func (st stringTable) Lookup(offset uint32) (string, error) {
+ if int64(offset) > int64(^uint(0)>>1) {
+ return "", fmt.Errorf("offset %d overflows int", offset)
+ }
+
+ pos := int(offset)
+ if pos >= len(st) {
+ return "", fmt.Errorf("offset %d is out of bounds", offset)
+ }
+
+ if pos > 0 && st[pos-1] != '\x00' {
+ return "", fmt.Errorf("offset %d isn't start of a string", offset)
+ }
+
+ str := st[pos:]
+ end := bytes.IndexByte(str, '\x00')
+ if end == -1 {
+ return "", fmt.Errorf("offset %d isn't null terminated", offset)
+ }
+
+ return string(str[:end]), nil
+}
+
+func (st stringTable) LookupName(offset uint32) (Name, error) {
+ str, err := st.Lookup(offset)
+ return Name(str), err
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/btf/types.go b/vendor/github.com/cilium/ebpf/internal/btf/types.go
new file mode 100644
index 000000000..9e1fd8d0b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/btf/types.go
@@ -0,0 +1,871 @@
+package btf
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "strings"
+)
+
+const maxTypeDepth = 32
+
+// TypeID identifies a type in a BTF section.
+type TypeID uint32
+
+// ID implements part of the Type interface.
+func (tid TypeID) ID() TypeID {
+ return tid
+}
+
+// Type represents a type described by BTF.
+type Type interface {
+ ID() TypeID
+
+ String() string
+
+ // Make a copy of the type, without copying Type members.
+ copy() Type
+
+ // Enumerate all nested Types. Repeated calls must visit nested
+ // types in the same order.
+ walk(*typeDeque)
+}
+
+// namedType is a type with a name.
+//
+// Most named types simply embed Name.
+type namedType interface {
+ Type
+ name() string
+}
+
+// Name identifies a type.
+//
+// Anonymous types have an empty name.
+type Name string
+
+func (n Name) name() string {
+ return string(n)
+}
+
+// Void is the unit type of BTF.
+type Void struct{}
+
+func (v *Void) ID() TypeID { return 0 }
+func (v *Void) String() string { return "void#0" }
+func (v *Void) size() uint32 { return 0 }
+func (v *Void) copy() Type { return (*Void)(nil) }
+func (v *Void) walk(*typeDeque) {}
+
+type IntEncoding byte
+
+const (
+ Signed IntEncoding = 1 << iota
+ Char
+ Bool
+)
+
+// Int is an integer of a given length.
+type Int struct {
+ TypeID
+ Name
+
+ // The size of the integer in bytes.
+ Size uint32
+ Encoding IntEncoding
+ // Offset is the starting bit offset. Currently always 0.
+ // See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
+ Offset uint32
+ Bits byte
+}
+
+var _ namedType = (*Int)(nil)
+
+func (i *Int) String() string {
+ var s strings.Builder
+
+ switch {
+ case i.Encoding&Char != 0:
+ s.WriteString("char")
+ case i.Encoding&Bool != 0:
+ s.WriteString("bool")
+ default:
+ if i.Encoding&Signed == 0 {
+ s.WriteRune('u')
+ }
+ s.WriteString("int")
+ fmt.Fprintf(&s, "%d", i.Size*8)
+ }
+
+ fmt.Fprintf(&s, "#%d", i.TypeID)
+
+ if i.Bits > 0 {
+ fmt.Fprintf(&s, "[bits=%d]", i.Bits)
+ }
+
+ return s.String()
+}
+
+func (i *Int) size() uint32 { return i.Size }
+func (i *Int) walk(*typeDeque) {}
+func (i *Int) copy() Type {
+ cpy := *i
+ return &cpy
+}
+
+func (i *Int) isBitfield() bool {
+ return i.Offset > 0
+}
+
+// Pointer is a pointer to another type.
+type Pointer struct {
+ TypeID
+ Target Type
+}
+
+func (p *Pointer) String() string {
+ return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID())
+}
+
+func (p *Pointer) size() uint32 { return 8 }
+func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) }
+func (p *Pointer) copy() Type {
+ cpy := *p
+ return &cpy
+}
+
+// Array is an array with a fixed number of elements.
+type Array struct {
+ TypeID
+ Type Type
+ Nelems uint32
+}
+
+func (arr *Array) String() string {
+ return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems)
+}
+
+func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) }
+func (arr *Array) copy() Type {
+ cpy := *arr
+ return &cpy
+}
+
+// Struct is a compound type of consecutive members.
+type Struct struct {
+ TypeID
+ Name
+ // The size of the struct including padding, in bytes
+ Size uint32
+ Members []Member
+}
+
+func (s *Struct) String() string {
+ return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name)
+}
+
+func (s *Struct) size() uint32 { return s.Size }
+
+func (s *Struct) walk(tdq *typeDeque) {
+ for i := range s.Members {
+ tdq.push(&s.Members[i].Type)
+ }
+}
+
+func (s *Struct) copy() Type {
+ cpy := *s
+ cpy.Members = make([]Member, len(s.Members))
+ copy(cpy.Members, s.Members)
+ return &cpy
+}
+
+func (s *Struct) members() []Member {
+ return s.Members
+}
+
+// Union is a compound type where members occupy the same memory.
+type Union struct {
+ TypeID
+ Name
+ // The size of the union including padding, in bytes.
+ Size uint32
+ Members []Member
+}
+
+func (u *Union) String() string {
+ return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name)
+}
+
+func (u *Union) size() uint32 { return u.Size }
+
+func (u *Union) walk(tdq *typeDeque) {
+ for i := range u.Members {
+ tdq.push(&u.Members[i].Type)
+ }
+}
+
+func (u *Union) copy() Type {
+ cpy := *u
+ cpy.Members = make([]Member, len(u.Members))
+ copy(cpy.Members, u.Members)
+ return &cpy
+}
+
+func (u *Union) members() []Member {
+ return u.Members
+}
+
+type composite interface {
+ members() []Member
+}
+
+var (
+ _ composite = (*Struct)(nil)
+ _ composite = (*Union)(nil)
+)
+
+// Member is part of a Struct or Union.
+//
+// It is not a valid Type.
+type Member struct {
+ Name
+ Type Type
+ // Offset is the bit offset of this member
+ Offset uint32
+ BitfieldSize uint32
+}
+
+// Enum lists possible values.
+type Enum struct {
+ TypeID
+ Name
+ Values []EnumValue
+}
+
+func (e *Enum) String() string {
+ return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name)
+}
+
+// EnumValue is part of an Enum
+//
+// Is is not a valid Type
+type EnumValue struct {
+ Name
+ Value int32
+}
+
+func (e *Enum) size() uint32 { return 4 }
+func (e *Enum) walk(*typeDeque) {}
+func (e *Enum) copy() Type {
+ cpy := *e
+ cpy.Values = make([]EnumValue, len(e.Values))
+ copy(cpy.Values, e.Values)
+ return &cpy
+}
+
+// FwdKind is the type of forward declaration.
+type FwdKind int
+
+// Valid types of forward declaration.
+const (
+ FwdStruct FwdKind = iota
+ FwdUnion
+)
+
+func (fk FwdKind) String() string {
+ switch fk {
+ case FwdStruct:
+ return "struct"
+ case FwdUnion:
+ return "union"
+ default:
+ return fmt.Sprintf("%T(%d)", fk, int(fk))
+ }
+}
+
+// Fwd is a forward declaration of a Type.
+type Fwd struct {
+ TypeID
+ Name
+ Kind FwdKind
+}
+
+func (f *Fwd) String() string {
+ return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name)
+}
+
+func (f *Fwd) walk(*typeDeque) {}
+func (f *Fwd) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// Typedef is an alias of a Type.
+type Typedef struct {
+ TypeID
+ Name
+ Type Type
+}
+
+func (td *Typedef) String() string {
+ return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID())
+}
+
+func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) }
+func (td *Typedef) copy() Type {
+ cpy := *td
+ return &cpy
+}
+
+// Volatile is a qualifier.
+type Volatile struct {
+ TypeID
+ Type Type
+}
+
+func (v *Volatile) String() string {
+ return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID())
+}
+
+func (v *Volatile) qualify() Type { return v.Type }
+func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) }
+func (v *Volatile) copy() Type {
+ cpy := *v
+ return &cpy
+}
+
+// Const is a qualifier.
+type Const struct {
+ TypeID
+ Type Type
+}
+
+func (c *Const) String() string {
+ return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID())
+}
+
+func (c *Const) qualify() Type { return c.Type }
+func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) }
+func (c *Const) copy() Type {
+ cpy := *c
+ return &cpy
+}
+
+// Restrict is a qualifier.
+type Restrict struct {
+ TypeID
+ Type Type
+}
+
+func (r *Restrict) String() string {
+ return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID())
+}
+
+func (r *Restrict) qualify() Type { return r.Type }
+func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) }
+func (r *Restrict) copy() Type {
+ cpy := *r
+ return &cpy
+}
+
+// Func is a function definition.
+type Func struct {
+ TypeID
+ Name
+ Type Type
+}
+
+func (f *Func) String() string {
+ return fmt.Sprintf("func#%d[%q proto=#%d]", f.TypeID, f.Name, f.Type.ID())
+}
+
+func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) }
+func (f *Func) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// FuncProto is a function declaration.
+type FuncProto struct {
+ TypeID
+ Return Type
+ Params []FuncParam
+}
+
+func (fp *FuncProto) String() string {
+ var s strings.Builder
+ fmt.Fprintf(&s, "proto#%d[", fp.TypeID)
+ for _, param := range fp.Params {
+ fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID())
+ }
+ fmt.Fprintf(&s, "return=#%d]", fp.Return.ID())
+ return s.String()
+}
+
+func (fp *FuncProto) walk(tdq *typeDeque) {
+ tdq.push(&fp.Return)
+ for i := range fp.Params {
+ tdq.push(&fp.Params[i].Type)
+ }
+}
+
+func (fp *FuncProto) copy() Type {
+ cpy := *fp
+ cpy.Params = make([]FuncParam, len(fp.Params))
+ copy(cpy.Params, fp.Params)
+ return &cpy
+}
+
+type FuncParam struct {
+ Name
+ Type Type
+}
+
+// Var is a global variable.
+type Var struct {
+ TypeID
+ Name
+ Type Type
+}
+
+func (v *Var) String() string {
+ // TODO: Linkage
+ return fmt.Sprintf("var#%d[%q]", v.TypeID, v.Name)
+}
+
+func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) }
+func (v *Var) copy() Type {
+ cpy := *v
+ return &cpy
+}
+
+// Datasec is a global program section containing data.
+type Datasec struct {
+ TypeID
+ Name
+ Size uint32
+ Vars []VarSecinfo
+}
+
+func (ds *Datasec) String() string {
+ return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name)
+}
+
+func (ds *Datasec) size() uint32 { return ds.Size }
+
+func (ds *Datasec) walk(tdq *typeDeque) {
+ for i := range ds.Vars {
+ tdq.push(&ds.Vars[i].Type)
+ }
+}
+
+func (ds *Datasec) copy() Type {
+ cpy := *ds
+ cpy.Vars = make([]VarSecinfo, len(ds.Vars))
+ copy(cpy.Vars, ds.Vars)
+ return &cpy
+}
+
+// VarSecinfo describes variable in a Datasec
+//
+// It is not a valid Type.
+type VarSecinfo struct {
+ Type Type
+ Offset uint32
+ Size uint32
+}
+
+type sizer interface {
+ size() uint32
+}
+
+var (
+ _ sizer = (*Int)(nil)
+ _ sizer = (*Pointer)(nil)
+ _ sizer = (*Struct)(nil)
+ _ sizer = (*Union)(nil)
+ _ sizer = (*Enum)(nil)
+ _ sizer = (*Datasec)(nil)
+)
+
+type qualifier interface {
+ qualify() Type
+}
+
+var (
+ _ qualifier = (*Const)(nil)
+ _ qualifier = (*Restrict)(nil)
+ _ qualifier = (*Volatile)(nil)
+)
+
+// Sizeof returns the size of a type in bytes.
+//
+// Returns an error if the size can't be computed.
+func Sizeof(typ Type) (int, error) {
+ var (
+ n = int64(1)
+ elem int64
+ )
+
+ for i := 0; i < maxTypeDepth; i++ {
+ switch v := typ.(type) {
+ case *Array:
+ if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
+ return 0, errors.New("overflow")
+ }
+
+ // Arrays may be of zero length, which allows
+ // n to be zero as well.
+ n *= int64(v.Nelems)
+ typ = v.Type
+ continue
+
+ case sizer:
+ elem = int64(v.size())
+
+ case *Typedef:
+ typ = v.Type
+ continue
+
+ case qualifier:
+ typ = v.qualify()
+ continue
+
+ default:
+ return 0, fmt.Errorf("unrecognized type %T", typ)
+ }
+
+ if n > 0 && elem > math.MaxInt64/n {
+ return 0, errors.New("overflow")
+ }
+
+ size := n * elem
+ if int64(int(size)) != size {
+ return 0, errors.New("overflow")
+ }
+
+ return int(size), nil
+ }
+
+ return 0, errors.New("exceeded type depth")
+}
+
+// copy a Type recursively.
+//
+// typ may form a cycle.
+func copyType(typ Type) Type {
+ var (
+ copies = make(map[Type]Type)
+ work typeDeque
+ )
+
+ for t := &typ; t != nil; t = work.pop() {
+ // *t is the identity of the type.
+ if cpy := copies[*t]; cpy != nil {
+ *t = cpy
+ continue
+ }
+
+ cpy := (*t).copy()
+ copies[*t] = cpy
+ *t = cpy
+
+ // Mark any nested types for copying.
+ cpy.walk(&work)
+ }
+
+ return typ
+}
+
+// typeDeque keeps track of pointers to types which still
+// need to be visited.
+type typeDeque struct {
+ types []*Type
+ read, write uint64
+ mask uint64
+}
+
+// push adds a type to the stack.
+func (dq *typeDeque) push(t *Type) {
+ if dq.write-dq.read < uint64(len(dq.types)) {
+ dq.types[dq.write&dq.mask] = t
+ dq.write++
+ return
+ }
+
+ new := len(dq.types) * 2
+ if new == 0 {
+ new = 8
+ }
+
+ types := make([]*Type, new)
+ pivot := dq.read & dq.mask
+ n := copy(types, dq.types[pivot:])
+ n += copy(types[n:], dq.types[:pivot])
+ types[n] = t
+
+ dq.types = types
+ dq.mask = uint64(new) - 1
+ dq.read, dq.write = 0, uint64(n+1)
+}
+
+// shift returns the first element or null.
+func (dq *typeDeque) shift() *Type {
+ if dq.read == dq.write {
+ return nil
+ }
+
+ index := dq.read & dq.mask
+ t := dq.types[index]
+ dq.types[index] = nil
+ dq.read++
+ return t
+}
+
+// pop returns the last element or null.
+func (dq *typeDeque) pop() *Type {
+ if dq.read == dq.write {
+ return nil
+ }
+
+ dq.write--
+ index := dq.write & dq.mask
+ t := dq.types[index]
+ dq.types[index] = nil
+ return t
+}
+
+// all returns all elements.
+//
+// The deque is empty after calling this method.
+func (dq *typeDeque) all() []*Type {
+ length := dq.write - dq.read
+ types := make([]*Type, 0, length)
+ for t := dq.shift(); t != nil; t = dq.shift() {
+ types = append(types, t)
+ }
+ return types
+}
+
+// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
+// it into a graph of Types connected via pointers.
+//
+// Returns a map of named types (so, where NameOff is non-zero) and a slice of types
+// indexed by TypeID. Since BTF ignores compilation units, multiple types may share
+// the same name. A Type may form a cyclic graph by pointing at itself.
+func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[string][]namedType, err error) {
+ type fixupDef struct {
+ id TypeID
+ expectedKind btfKind
+ typ *Type
+ }
+
+ var fixups []fixupDef
+ fixup := func(id TypeID, expectedKind btfKind, typ *Type) {
+ fixups = append(fixups, fixupDef{id, expectedKind, typ})
+ }
+
+ convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) {
+ // NB: The fixup below relies on pre-allocating this array to
+ // work, since otherwise append might re-allocate members.
+ members := make([]Member, 0, len(raw))
+ for i, btfMember := range raw {
+ name, err := rawStrings.LookupName(btfMember.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
+ }
+ m := Member{
+ Name: name,
+ Offset: btfMember.Offset,
+ }
+ if kindFlag {
+ m.BitfieldSize = btfMember.Offset >> 24
+ m.Offset &= 0xffffff
+ }
+ members = append(members, m)
+ }
+ for i := range members {
+ fixup(raw[i].Type, kindUnknown, &members[i].Type)
+ }
+ return members, nil
+ }
+
+ types = make([]Type, 0, len(rawTypes))
+ types = append(types, (*Void)(nil))
+ namedTypes = make(map[string][]namedType)
+
+ for i, raw := range rawTypes {
+ var (
+ // Void is defined to always be type ID 0, and is thus
+ // omitted from BTF.
+ id = TypeID(i + 1)
+ typ Type
+ )
+
+ name, err := rawStrings.LookupName(raw.NameOff)
+ if err != nil {
+ return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err)
+ }
+
+ switch raw.Kind() {
+ case kindInt:
+ encoding, offset, bits := intEncoding(*raw.data.(*uint32))
+ typ = &Int{id, name, raw.Size(), encoding, offset, bits}
+
+ case kindPointer:
+ ptr := &Pointer{id, nil}
+ fixup(raw.Type(), kindUnknown, &ptr.Target)
+ typ = ptr
+
+ case kindArray:
+ btfArr := raw.data.(*btfArray)
+
+ // IndexType is unused according to btf.rst.
+ // Don't make it available right now.
+ arr := &Array{id, nil, btfArr.Nelems}
+ fixup(btfArr.Type, kindUnknown, &arr.Type)
+ typ = arr
+
+ case kindStruct:
+ members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
+ if err != nil {
+ return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
+ }
+ typ = &Struct{id, name, raw.Size(), members}
+
+ case kindUnion:
+ members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
+ if err != nil {
+ return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
+ }
+ typ = &Union{id, name, raw.Size(), members}
+
+ case kindEnum:
+ rawvals := raw.data.([]btfEnum)
+ vals := make([]EnumValue, 0, len(rawvals))
+ for i, btfVal := range rawvals {
+ name, err := rawStrings.LookupName(btfVal.NameOff)
+ if err != nil {
+ return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err)
+ }
+ vals = append(vals, EnumValue{
+ Name: name,
+ Value: btfVal.Val,
+ })
+ }
+ typ = &Enum{id, name, vals}
+
+ case kindForward:
+ if raw.KindFlag() {
+ typ = &Fwd{id, name, FwdUnion}
+ } else {
+ typ = &Fwd{id, name, FwdStruct}
+ }
+
+ case kindTypedef:
+ typedef := &Typedef{id, name, nil}
+ fixup(raw.Type(), kindUnknown, &typedef.Type)
+ typ = typedef
+
+ case kindVolatile:
+ volatile := &Volatile{id, nil}
+ fixup(raw.Type(), kindUnknown, &volatile.Type)
+ typ = volatile
+
+ case kindConst:
+ cnst := &Const{id, nil}
+ fixup(raw.Type(), kindUnknown, &cnst.Type)
+ typ = cnst
+
+ case kindRestrict:
+ restrict := &Restrict{id, nil}
+ fixup(raw.Type(), kindUnknown, &restrict.Type)
+ typ = restrict
+
+ case kindFunc:
+ fn := &Func{id, name, nil}
+ fixup(raw.Type(), kindFuncProto, &fn.Type)
+ typ = fn
+
+ case kindFuncProto:
+ rawparams := raw.data.([]btfParam)
+ params := make([]FuncParam, 0, len(rawparams))
+ for i, param := range rawparams {
+ name, err := rawStrings.LookupName(param.NameOff)
+ if err != nil {
+ return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
+ }
+ params = append(params, FuncParam{
+ Name: name,
+ })
+ }
+ for i := range params {
+ fixup(rawparams[i].Type, kindUnknown, &params[i].Type)
+ }
+
+ fp := &FuncProto{id, nil, params}
+ fixup(raw.Type(), kindUnknown, &fp.Return)
+ typ = fp
+
+ case kindVar:
+ v := &Var{id, name, nil}
+ fixup(raw.Type(), kindUnknown, &v.Type)
+ typ = v
+
+ case kindDatasec:
+ btfVars := raw.data.([]btfVarSecinfo)
+ vars := make([]VarSecinfo, 0, len(btfVars))
+ for _, btfVar := range btfVars {
+ vars = append(vars, VarSecinfo{
+ Offset: btfVar.Offset,
+ Size: btfVar.Size,
+ })
+ }
+ for i := range vars {
+ fixup(btfVars[i].Type, kindVar, &vars[i].Type)
+ }
+ typ = &Datasec{id, name, raw.SizeType, vars}
+
+ default:
+ return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
+ }
+
+ types = append(types, typ)
+
+ if named, ok := typ.(namedType); ok {
+ if name := essentialName(named.name()); name != "" {
+ namedTypes[name] = append(namedTypes[name], named)
+ }
+ }
+ }
+
+ for _, fixup := range fixups {
+ i := int(fixup.id)
+ if i >= len(types) {
+ return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
+ }
+
+ // Default void (id 0) to unknown
+ rawKind := kindUnknown
+ if i > 0 {
+ rawKind = rawTypes[i-1].Kind()
+ }
+
+ if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected {
+ return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind)
+ }
+
+ *fixup.typ = types[i]
+ }
+
+ return types, namedTypes, nil
+}
+
+// essentialName returns name without a ___ suffix.
+func essentialName(name string) string {
+ lastIdx := strings.LastIndex(name, "___")
+ if lastIdx > 0 {
+ return name[:lastIdx]
+ }
+ return name
+}