summaryrefslogtreecommitdiff
path: root/vendor/github.com/cilium/ebpf/btf
diff options
context:
space:
mode:
authorLibravatar dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>2023-04-03 11:16:17 +0200
committerLibravatar GitHub <noreply@github.com>2023-04-03 11:16:17 +0200
commit57dc742c76d7876a2457594715a7b5bc2c9a92bd (patch)
tree76be1ec744face5bf4f617d4c9fca084707e4268 /vendor/github.com/cilium/ebpf/btf
parent[bugfix/frontend] Preload css styles (#1638) (diff)
downloadgotosocial-57dc742c76d7876a2457594715a7b5bc2c9a92bd.tar.xz
[chore]: Bump github.com/KimMachineGun/automemlimit from 0.2.4 to 0.2.5 (#1666)
Bumps [github.com/KimMachineGun/automemlimit](https://github.com/KimMachineGun/automemlimit) from 0.2.4 to 0.2.5. - [Release notes](https://github.com/KimMachineGun/automemlimit/releases) - [Commits](https://github.com/KimMachineGun/automemlimit/compare/v0.2.4...v0.2.5) --- updated-dependencies: - dependency-name: github.com/KimMachineGun/automemlimit dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Diffstat (limited to 'vendor/github.com/cilium/ebpf/btf')
-rw-r--r--vendor/github.com/cilium/ebpf/btf/btf.go897
-rw-r--r--vendor/github.com/cilium/ebpf/btf/btf_types.go343
-rw-r--r--vendor/github.com/cilium/ebpf/btf/btf_types_string.go44
-rw-r--r--vendor/github.com/cilium/ebpf/btf/core.go972
-rw-r--r--vendor/github.com/cilium/ebpf/btf/doc.go5
-rw-r--r--vendor/github.com/cilium/ebpf/btf/ext_info.go721
-rw-r--r--vendor/github.com/cilium/ebpf/btf/format.go319
-rw-r--r--vendor/github.com/cilium/ebpf/btf/handle.go121
-rw-r--r--vendor/github.com/cilium/ebpf/btf/strings.go128
-rw-r--r--vendor/github.com/cilium/ebpf/btf/types.go1212
10 files changed, 4762 insertions, 0 deletions
diff --git a/vendor/github.com/cilium/ebpf/btf/btf.go b/vendor/github.com/cilium/ebpf/btf/btf.go
new file mode 100644
index 000000000..a5969332a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/btf.go
@@ -0,0 +1,897 @@
+package btf
+
+import (
+ "bufio"
+ "bytes"
+ "debug/elf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "reflect"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+const btfMagic = 0xeB9F
+
+// Errors returned by BTF functions.
+var (
+ ErrNotSupported = internal.ErrNotSupported
+ ErrNotFound = errors.New("not found")
+ ErrNoExtendedInfo = errors.New("no extended info")
+)
+
+// ID represents the unique ID of a BTF object.
+type ID = sys.BTFID
+
+// Spec represents decoded BTF.
+type Spec struct {
+ // Data from .BTF.
+ rawTypes []rawType
+ strings *stringTable
+
+ // All types contained by the spec. For the base type, the position of
+ // a type in the slice is its ID.
+ types types
+
+ // Type IDs indexed by type.
+ typeIDs map[Type]TypeID
+
+ // Types indexed by essential name.
+ // Includes all struct flavors and types with the same name.
+ namedTypes map[essentialName][]Type
+
+ byteOrder binary.ByteOrder
+}
+
+type btfHeader struct {
+ Magic uint16
+ Version uint8
+ Flags uint8
+ HdrLen uint32
+
+ TypeOff uint32
+ TypeLen uint32
+ StringOff uint32
+ StringLen uint32
+}
+
+// typeStart returns the offset from the beginning of the .BTF section
+// to the start of its type entries.
+func (h *btfHeader) typeStart() int64 {
+ return int64(h.HdrLen + h.TypeOff)
+}
+
+// stringStart returns the offset from the beginning of the .BTF section
+// to the start of its string table.
+func (h *btfHeader) stringStart() int64 {
+ return int64(h.HdrLen + h.StringOff)
+}
+
+// LoadSpec opens file and calls LoadSpecFromReader on it.
+func LoadSpec(file string) (*Spec, error) {
+ fh, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer fh.Close()
+
+ return LoadSpecFromReader(fh)
+}
+
+// LoadSpecFromReader reads from an ELF or a raw BTF blob.
+//
+// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos
+// may be nil.
+func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
+ file, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ if bo := guessRawBTFByteOrder(rd); bo != nil {
+ // Try to parse a naked BTF blob. This will return an error if
+ // we encounter a Datasec, since we can't fix it up.
+ spec, err := loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil)
+ return spec, err
+ }
+
+ return nil, err
+ }
+
+ return loadSpecFromELF(file)
+}
+
+// LoadSpecAndExtInfosFromReader reads from an ELF.
+//
+// ExtInfos may be nil if the ELF doesn't contain section metadta.
+// Returns ErrNotFound if the ELF contains no BTF.
+func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
+ file, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ spec, err := loadSpecFromELF(file)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ extInfos, err := loadExtInfosFromELF(file, spec.types, spec.strings)
+ if err != nil && !errors.Is(err, ErrNotFound) {
+ return nil, nil, err
+ }
+
+ return spec, extInfos, nil
+}
+
+// variableOffsets extracts all symbols offsets from an ELF and indexes them by
+// section and variable name.
+//
+// References to variables in BTF data sections carry unsigned 32-bit offsets.
+// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well
+// beyond this range. Since these symbols cannot be described by BTF info,
+// ignore them here.
+func variableOffsets(file *internal.SafeELFFile) (map[variable]uint32, error) {
+ symbols, err := file.Symbols()
+ if err != nil {
+ return nil, fmt.Errorf("can't read symbols: %v", err)
+ }
+
+ variableOffsets := make(map[variable]uint32)
+ for _, symbol := range symbols {
+ if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
+ // Ignore things like SHN_ABS
+ continue
+ }
+
+ if symbol.Value > math.MaxUint32 {
+ // VarSecinfo offset is u32, cannot reference symbols in higher regions.
+ continue
+ }
+
+ if int(symbol.Section) >= len(file.Sections) {
+ return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section)
+ }
+
+ secName := file.Sections[symbol.Section].Name
+ variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value)
+ }
+
+ return variableOffsets, nil
+}
+
+func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
+ var (
+ btfSection *elf.Section
+ sectionSizes = make(map[string]uint32)
+ )
+
+ for _, sec := range file.Sections {
+ switch sec.Name {
+ case ".BTF":
+ btfSection = sec
+ default:
+ if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
+ break
+ }
+
+ if sec.Size > math.MaxUint32 {
+ return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
+ }
+
+ sectionSizes[sec.Name] = uint32(sec.Size)
+ }
+ }
+
+ if btfSection == nil {
+ return nil, fmt.Errorf("btf: %w", ErrNotFound)
+ }
+
+ vars, err := variableOffsets(file)
+ if err != nil {
+ return nil, err
+ }
+
+ if btfSection.ReaderAt == nil {
+ return nil, fmt.Errorf("compressed BTF is not supported")
+ }
+
+ rawTypes, rawStrings, err := parseBTF(btfSection.ReaderAt, file.ByteOrder, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ err = fixupDatasec(rawTypes, rawStrings, sectionSizes, vars)
+ if err != nil {
+ return nil, err
+ }
+
+ return inflateSpec(rawTypes, rawStrings, file.ByteOrder, nil)
+}
+
+func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder,
+ baseTypes types, baseStrings *stringTable) (*Spec, error) {
+
+ rawTypes, rawStrings, err := parseBTF(btf, bo, baseStrings)
+ if err != nil {
+ return nil, err
+ }
+
+ return inflateSpec(rawTypes, rawStrings, bo, baseTypes)
+}
+
+func inflateSpec(rawTypes []rawType, rawStrings *stringTable, bo binary.ByteOrder,
+ baseTypes types) (*Spec, error) {
+
+ types, err := inflateRawTypes(rawTypes, baseTypes, rawStrings)
+ if err != nil {
+ return nil, err
+ }
+
+ typeIDs, typesByName := indexTypes(types, TypeID(len(baseTypes)))
+
+ return &Spec{
+ rawTypes: rawTypes,
+ namedTypes: typesByName,
+ typeIDs: typeIDs,
+ types: types,
+ strings: rawStrings,
+ byteOrder: bo,
+ }, nil
+}
+
+func indexTypes(types []Type, typeIDOffset TypeID) (map[Type]TypeID, map[essentialName][]Type) {
+ namedTypes := 0
+ for _, typ := range types {
+ if typ.TypeName() != "" {
+ // Do a pre-pass to figure out how big types by name has to be.
+ // Most types have unique names, so it's OK to ignore essentialName
+ // here.
+ namedTypes++
+ }
+ }
+
+ typeIDs := make(map[Type]TypeID, len(types))
+ typesByName := make(map[essentialName][]Type, namedTypes)
+
+ for i, typ := range types {
+ if name := newEssentialName(typ.TypeName()); name != "" {
+ typesByName[name] = append(typesByName[name], typ)
+ }
+ typeIDs[typ] = TypeID(i) + typeIDOffset
+ }
+
+ return typeIDs, typesByName
+}
+
+// LoadKernelSpec returns the current kernel's BTF information.
+//
+// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
+// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
+func LoadKernelSpec() (*Spec, error) {
+ fh, err := os.Open("/sys/kernel/btf/vmlinux")
+ if err == nil {
+ defer fh.Close()
+
+ return loadRawSpec(fh, internal.NativeEndian, nil, nil)
+ }
+
+ file, err := findVMLinux()
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return loadSpecFromELF(file)
+}
+
+// findVMLinux scans multiple well-known paths for vmlinux kernel images.
+func findVMLinux() (*internal.SafeELFFile, error) {
+ release, err := internal.KernelRelease()
+ if err != nil {
+ return nil, err
+ }
+
+ // use same list of locations as libbpf
+ // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
+ locations := []string{
+ "/boot/vmlinux-%s",
+ "/lib/modules/%s/vmlinux-%[1]s",
+ "/lib/modules/%s/build/vmlinux",
+ "/usr/lib/modules/%s/kernel/vmlinux",
+ "/usr/lib/debug/boot/vmlinux-%s",
+ "/usr/lib/debug/boot/vmlinux-%s.debug",
+ "/usr/lib/debug/lib/modules/%s/vmlinux",
+ }
+
+ for _, loc := range locations {
+ file, err := internal.OpenSafeELFFile(fmt.Sprintf(loc, release))
+ if errors.Is(err, os.ErrNotExist) {
+ continue
+ }
+ return file, err
+ }
+
+ return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported)
+}
+
+// parseBTFHeader parses the header of the .BTF section.
+func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) {
+ var header btfHeader
+ if err := binary.Read(r, bo, &header); err != nil {
+ return nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ if header.Magic != btfMagic {
+ return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
+ }
+
+ if header.Version != 1 {
+ return nil, fmt.Errorf("unexpected version %v", header.Version)
+ }
+
+ if header.Flags != 0 {
+ return nil, fmt.Errorf("unsupported flags %v", header.Flags)
+ }
+
+ remainder := int64(header.HdrLen) - int64(binary.Size(&header))
+ if remainder < 0 {
+ return nil, errors.New("header length shorter than btfHeader size")
+ }
+
+ if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil {
+ return nil, fmt.Errorf("header padding: %v", err)
+ }
+
+ return &header, nil
+}
+
+func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder {
+ buf := new(bufio.Reader)
+ for _, bo := range []binary.ByteOrder{
+ binary.LittleEndian,
+ binary.BigEndian,
+ } {
+ buf.Reset(io.NewSectionReader(r, 0, math.MaxInt64))
+ if _, err := parseBTFHeader(buf, bo); err == nil {
+ return bo
+ }
+ }
+
+ return nil
+}
+
+// parseBTF reads a .BTF section into memory and parses it into a list of
+// raw types and a string table.
+func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable) ([]rawType, *stringTable, error) {
+ buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64)
+ header, err := parseBTFHeader(buf, bo)
+ if err != nil {
+ return nil, nil, fmt.Errorf("parsing .BTF header: %v", err)
+ }
+
+ rawStrings, err := readStringTable(io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)),
+ baseStrings)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't read type names: %w", err)
+ }
+
+ buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen)))
+ rawTypes, err := readTypes(buf, bo, header.TypeLen)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't read types: %w", err)
+ }
+
+ return rawTypes, rawStrings, nil
+}
+
+type variable struct {
+ section string
+ name string
+}
+
+func fixupDatasec(rawTypes []rawType, rawStrings *stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error {
+ for i, rawType := range rawTypes {
+ if rawType.Kind() != kindDatasec {
+ continue
+ }
+
+ name, err := rawStrings.Lookup(rawType.NameOff)
+ if err != nil {
+ return err
+ }
+
+ if name == ".kconfig" || name == ".ksyms" {
+ return fmt.Errorf("reference to %s: %w", name, ErrNotSupported)
+ }
+
+ if rawTypes[i].SizeType != 0 {
+ continue
+ }
+
+ size, ok := sectionSizes[name]
+ if !ok {
+ return fmt.Errorf("data section %s: missing size", name)
+ }
+
+ rawTypes[i].SizeType = size
+
+ secinfos := rawType.data.([]btfVarSecinfo)
+ for j, secInfo := range secinfos {
+ id := int(secInfo.Type - 1)
+ if id >= len(rawTypes) {
+ return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j)
+ }
+
+ varName, err := rawStrings.Lookup(rawTypes[id].NameOff)
+ if err != nil {
+ return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err)
+ }
+
+ offset, ok := variableOffsets[variable{name, varName}]
+ if !ok {
+ return fmt.Errorf("data section %s: missing offset for variable %s", name, varName)
+ }
+
+ secinfos[j].Offset = offset
+ }
+ }
+
+ return nil
+}
+
+// Copy creates a copy of Spec.
+func (s *Spec) Copy() *Spec {
+ types := copyTypes(s.types, nil)
+
+ typeIDOffset := TypeID(0)
+ if len(s.types) != 0 {
+ typeIDOffset = s.typeIDs[s.types[0]]
+ }
+ typeIDs, typesByName := indexTypes(types, typeIDOffset)
+
+ // NB: Other parts of spec are not copied since they are immutable.
+ return &Spec{
+ s.rawTypes,
+ s.strings,
+ types,
+ typeIDs,
+ typesByName,
+ s.byteOrder,
+ }
+}
+
+type marshalOpts struct {
+ ByteOrder binary.ByteOrder
+ StripFuncLinkage bool
+}
+
+func (s *Spec) marshal(opts marshalOpts) ([]byte, error) {
+ var (
+ buf bytes.Buffer
+ header = new(btfHeader)
+ headerLen = binary.Size(header)
+ )
+
+ // Reserve space for the header. We have to write it last since
+ // we don't know the size of the type section yet.
+ _, _ = buf.Write(make([]byte, headerLen))
+
+ // Write type section, just after the header.
+ for _, raw := range s.rawTypes {
+ switch {
+ case opts.StripFuncLinkage && raw.Kind() == kindFunc:
+ raw.SetLinkage(StaticFunc)
+ }
+
+ if err := raw.Marshal(&buf, opts.ByteOrder); err != nil {
+ return nil, fmt.Errorf("can't marshal BTF: %w", err)
+ }
+ }
+
+ typeLen := uint32(buf.Len() - headerLen)
+
+ // Write string section after type section.
+ stringsLen := s.strings.Length()
+ buf.Grow(stringsLen)
+ if err := s.strings.Marshal(&buf); err != nil {
+ return nil, err
+ }
+
+ // Fill out the header, and write it out.
+ header = &btfHeader{
+ Magic: btfMagic,
+ Version: 1,
+ Flags: 0,
+ HdrLen: uint32(headerLen),
+ TypeOff: 0,
+ TypeLen: typeLen,
+ StringOff: typeLen,
+ StringLen: uint32(stringsLen),
+ }
+
+ raw := buf.Bytes()
+ err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header)
+ if err != nil {
+ return nil, fmt.Errorf("can't write header: %v", err)
+ }
+
+ return raw, nil
+}
+
+type sliceWriter []byte
+
+func (sw sliceWriter) Write(p []byte) (int, error) {
+ if len(p) != len(sw) {
+ return 0, errors.New("size doesn't match")
+ }
+
+ return copy(sw, p), nil
+}
+
+// TypeByID returns the BTF Type with the given type ID.
+//
+// Returns an error wrapping ErrNotFound if a Type with the given ID
+// does not exist in the Spec.
+func (s *Spec) TypeByID(id TypeID) (Type, error) {
+ return s.types.ByID(id)
+}
+
+// TypeID returns the ID for a given Type.
+//
+// Returns an error wrapping ErrNoFound if the type isn't part of the Spec.
+func (s *Spec) TypeID(typ Type) (TypeID, error) {
+ if _, ok := typ.(*Void); ok {
+ // Equality is weird for void, since it is a zero sized type.
+ return 0, nil
+ }
+
+ id, ok := s.typeIDs[typ]
+ if !ok {
+ return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound)
+ }
+
+ return id, nil
+}
+
+// AnyTypesByName returns a list of BTF Types with the given name.
+//
+// If the BTF blob describes multiple compilation units like vmlinux, multiple
+// Types with the same name and kind can exist, but might not describe the same
+// data structure.
+//
+// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
+func (s *Spec) AnyTypesByName(name string) ([]Type, error) {
+ types := s.namedTypes[newEssentialName(name)]
+ if len(types) == 0 {
+ return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound)
+ }
+
+ // Return a copy to prevent changes to namedTypes.
+ result := make([]Type, 0, len(types))
+ for _, t := range types {
+ // Match against the full name, not just the essential one
+ // in case the type being looked up is a struct flavor.
+ if t.TypeName() == name {
+ result = append(result, t)
+ }
+ }
+ return result, nil
+}
+
+// AnyTypeByName returns a Type with the given name.
+//
+// Returns an error if multiple types of that name exist.
+func (s *Spec) AnyTypeByName(name string) (Type, error) {
+ types, err := s.AnyTypesByName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(types) > 1 {
+ return nil, fmt.Errorf("found multiple types: %v", types)
+ }
+
+ return types[0], nil
+}
+
+// TypeByName searches for a Type with a specific name. Since multiple
+// Types with the same name can exist, the parameter typ is taken to
+// narrow down the search in case of a clash.
+//
+// typ must be a non-nil pointer to an implementation of a Type.
+// On success, the address of the found Type will be copied to typ.
+//
+// Returns an error wrapping ErrNotFound if no matching
+// Type exists in the Spec. If multiple candidates are found,
+// an error is returned.
+func (s *Spec) TypeByName(name string, typ interface{}) error {
+ typValue := reflect.ValueOf(typ)
+ if typValue.Kind() != reflect.Ptr {
+ return fmt.Errorf("%T is not a pointer", typ)
+ }
+
+ typPtr := typValue.Elem()
+ if !typPtr.CanSet() {
+ return fmt.Errorf("%T cannot be set", typ)
+ }
+
+ wanted := typPtr.Type()
+ if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) {
+ return fmt.Errorf("%T does not satisfy Type interface", typ)
+ }
+
+ types, err := s.AnyTypesByName(name)
+ if err != nil {
+ return err
+ }
+
+ var candidate Type
+ for _, typ := range types {
+ if reflect.TypeOf(typ) != wanted {
+ continue
+ }
+
+ if candidate != nil {
+ return fmt.Errorf("type %s: multiple candidates for %T", name, typ)
+ }
+
+ candidate = typ
+ }
+
+ if candidate == nil {
+ return fmt.Errorf("type %s: %w", name, ErrNotFound)
+ }
+
+ typPtr.Set(reflect.ValueOf(candidate))
+
+ return nil
+}
+
+// LoadSplitSpecFromReader loads split BTF from a reader.
+//
+// Types from base are used to resolve references in the split BTF.
+// The returned Spec only contains types from the split BTF, not from the base.
+func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) {
+ return loadRawSpec(r, internal.NativeEndian, base.types, base.strings)
+}
+
+// TypesIterator iterates over types of a given spec.
+type TypesIterator struct {
+ spec *Spec
+ index int
+ // The last visited type in the spec.
+ Type Type
+}
+
+// Iterate returns the types iterator.
+func (s *Spec) Iterate() *TypesIterator {
+ return &TypesIterator{spec: s, index: 0}
+}
+
+// Next returns true as long as there are any remaining types.
+func (iter *TypesIterator) Next() bool {
+ if len(iter.spec.types) <= iter.index {
+ return false
+ }
+
+ iter.Type = iter.spec.types[iter.index]
+ iter.index++
+ return true
+}
+
+// Handle is a reference to BTF loaded into the kernel.
+type Handle struct {
+ fd *sys.FD
+
+ // Size of the raw BTF in bytes.
+ size uint32
+}
+
+// NewHandle loads BTF into the kernel.
+//
+// Returns ErrNotSupported if BTF is not supported.
+func NewHandle(spec *Spec) (*Handle, error) {
+ if err := haveBTF(); err != nil {
+ return nil, err
+ }
+
+ if spec.byteOrder != internal.NativeEndian {
+ return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian)
+ }
+
+ btf, err := spec.marshal(marshalOpts{
+ ByteOrder: internal.NativeEndian,
+ StripFuncLinkage: haveFuncLinkage() != nil,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("can't marshal BTF: %w", err)
+ }
+
+ if uint64(len(btf)) > math.MaxUint32 {
+ return nil, errors.New("BTF exceeds the maximum size")
+ }
+
+ attr := &sys.BtfLoadAttr{
+ Btf: sys.NewSlicePointer(btf),
+ BtfSize: uint32(len(btf)),
+ }
+
+ fd, err := sys.BtfLoad(attr)
+ if err != nil {
+ logBuf := make([]byte, 64*1024)
+ attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
+ attr.BtfLogSize = uint32(len(logBuf))
+ attr.BtfLogLevel = 1
+ // NB: The syscall will never return ENOSPC as of 5.18-rc4.
+ _, _ = sys.BtfLoad(attr)
+ return nil, internal.ErrorWithLog(err, logBuf)
+ }
+
+ return &Handle{fd, attr.BtfSize}, nil
+}
+
+// NewHandleFromID returns the BTF handle for a given id.
+//
+// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible.
+//
+// Returns ErrNotExist, if there is no BTF with the given id.
+//
+// Requires CAP_SYS_ADMIN.
+func NewHandleFromID(id ID) (*Handle, error) {
+ fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{
+ Id: uint32(id),
+ })
+ if err != nil {
+ return nil, fmt.Errorf("get FD for ID %d: %w", id, err)
+ }
+
+ info, err := newHandleInfoFromFD(fd)
+ if err != nil {
+ _ = fd.Close()
+ return nil, err
+ }
+
+ return &Handle{fd, info.size}, nil
+}
+
+// Spec parses the kernel BTF into Go types.
+//
+// base is used to decode split BTF and may be nil.
+func (h *Handle) Spec(base *Spec) (*Spec, error) {
+ var btfInfo sys.BtfInfo
+ btfBuffer := make([]byte, h.size)
+ btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer)
+
+ if err := sys.ObjInfo(h.fd, &btfInfo); err != nil {
+ return nil, err
+ }
+
+ var baseTypes types
+ var baseStrings *stringTable
+ if base != nil {
+ baseTypes = base.types
+ baseStrings = base.strings
+ }
+
+ return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, baseTypes, baseStrings)
+}
+
+// Close destroys the handle.
+//
+// Subsequent calls to FD will return an invalid value.
+func (h *Handle) Close() error {
+ if h == nil {
+ return nil
+ }
+
+ return h.fd.Close()
+}
+
+// FD returns the file descriptor for the handle.
+func (h *Handle) FD() int {
+ return h.fd.Int()
+}
+
+// Info returns metadata about the handle.
+func (h *Handle) Info() (*HandleInfo, error) {
+ return newHandleInfoFromFD(h.fd)
+}
+
+func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte {
+ const minHeaderLength = 24
+
+ typesLen := uint32(binary.Size(types))
+ header := btfHeader{
+ Magic: btfMagic,
+ Version: 1,
+ HdrLen: minHeaderLength,
+ TypeOff: 0,
+ TypeLen: typesLen,
+ StringOff: typesLen,
+ StringLen: uint32(len(strings)),
+ }
+
+ buf := new(bytes.Buffer)
+ _ = binary.Write(buf, bo, &header)
+ _ = binary.Write(buf, bo, types)
+ buf.Write(strings)
+
+ return buf.Bytes()
+}
+
+var haveBTF = internal.FeatureTest("BTF", "5.1", func() error {
+ var (
+ types struct {
+ Integer btfType
+ Var btfType
+ btfVar struct{ Linkage uint32 }
+ }
+ strings = []byte{0, 'a', 0}
+ )
+
+ // We use a BTF_KIND_VAR here, to make sure that
+ // the kernel understands BTF at least as well as we
+ // do. BTF_KIND_VAR was introduced ~5.1.
+ types.Integer.SetKind(kindPointer)
+ types.Var.NameOff = 1
+ types.Var.SetKind(kindVar)
+ types.Var.SizeType = 1
+
+ btf := marshalBTF(&types, strings, internal.NativeEndian)
+
+ fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
+ Btf: sys.NewSlicePointer(btf),
+ BtfSize: uint32(len(btf)),
+ })
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
+ // Treat both EINVAL and EPERM as not supported: loading the program
+ // might still succeed without BTF.
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+
+ fd.Close()
+ return nil
+})
+
+var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error {
+ if err := haveBTF(); err != nil {
+ return err
+ }
+
+ var (
+ types struct {
+ FuncProto btfType
+ Func btfType
+ }
+ strings = []byte{0, 'a', 0}
+ )
+
+ types.FuncProto.SetKind(kindFuncProto)
+ types.Func.SetKind(kindFunc)
+ types.Func.SizeType = 1 // aka FuncProto
+ types.Func.NameOff = 1
+ types.Func.SetLinkage(GlobalFunc)
+
+ btf := marshalBTF(&types, strings, internal.NativeEndian)
+
+ fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
+ Btf: sys.NewSlicePointer(btf),
+ BtfSize: uint32(len(btf)),
+ })
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+
+ fd.Close()
+ return nil
+})
diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types.go b/vendor/github.com/cilium/ebpf/btf/btf_types.go
new file mode 100644
index 000000000..481018049
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/btf_types.go
@@ -0,0 +1,343 @@
+package btf
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+//go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage
+
+// btfKind describes a Type.
+type btfKind uint8
+
+// Equivalents of the BTF_KIND_* constants.
+const (
+ kindUnknown btfKind = iota
+ kindInt
+ kindPointer
+ kindArray
+ kindStruct
+ kindUnion
+ kindEnum
+ kindForward
+ kindTypedef
+ kindVolatile
+ kindConst
+ kindRestrict
+ // Added ~4.20
+ kindFunc
+ kindFuncProto
+ // Added ~5.1
+ kindVar
+ kindDatasec
+ // Added ~5.13
+ kindFloat
+)
+
+// FuncLinkage describes BTF function linkage metadata.
+type FuncLinkage int
+
+// Equivalent of enum btf_func_linkage.
+const (
+ StaticFunc FuncLinkage = iota // static
+ GlobalFunc // global
+ ExternFunc // extern
+)
+
+// VarLinkage describes BTF variable linkage metadata.
+type VarLinkage int
+
+const (
+ StaticVar VarLinkage = iota // static
+ GlobalVar // global
+ ExternVar // extern
+)
+
+const (
+ btfTypeKindShift = 24
+ btfTypeKindLen = 5
+ btfTypeVlenShift = 0
+ btfTypeVlenMask = 16
+ btfTypeKindFlagShift = 31
+ btfTypeKindFlagMask = 1
+)
+
+// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
+type btfType struct {
+ NameOff uint32
+ /* "info" bits arrangement
+ * bits 0-15: vlen (e.g. # of struct's members), linkage
+ * bits 16-23: unused
+ * bits 24-28: kind (e.g. int, ptr, array...etc)
+ * bits 29-30: unused
+ * bit 31: kind_flag, currently used by
+ * struct, union and fwd
+ */
+ Info uint32
+ /* "size" is used by INT, ENUM, STRUCT and UNION.
+ * "size" tells the size of the type it is describing.
+ *
+ * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
+ * FUNC and FUNC_PROTO.
+ * "type" is a type_id referring to another type.
+ */
+ SizeType uint32
+}
+
+func (k btfKind) String() string {
+ switch k {
+ case kindUnknown:
+ return "Unknown"
+ case kindInt:
+ return "Integer"
+ case kindPointer:
+ return "Pointer"
+ case kindArray:
+ return "Array"
+ case kindStruct:
+ return "Struct"
+ case kindUnion:
+ return "Union"
+ case kindEnum:
+ return "Enumeration"
+ case kindForward:
+ return "Forward"
+ case kindTypedef:
+ return "Typedef"
+ case kindVolatile:
+ return "Volatile"
+ case kindConst:
+ return "Const"
+ case kindRestrict:
+ return "Restrict"
+ case kindFunc:
+ return "Function"
+ case kindFuncProto:
+ return "Function Proto"
+ case kindVar:
+ return "Variable"
+ case kindDatasec:
+ return "Section"
+ case kindFloat:
+ return "Float"
+ default:
+ return fmt.Sprintf("Unknown (%d)", k)
+ }
+}
+
+func mask(len uint32) uint32 {
+ return (1 << len) - 1
+}
+
+func readBits(value, len, shift uint32) uint32 {
+ return (value >> shift) & mask(len)
+}
+
+func writeBits(value, len, shift, new uint32) uint32 {
+ value &^= mask(len) << shift
+ value |= (new & mask(len)) << shift
+ return value
+}
+
+func (bt *btfType) info(len, shift uint32) uint32 {
+ return readBits(bt.Info, len, shift)
+}
+
+func (bt *btfType) setInfo(value, len, shift uint32) {
+ bt.Info = writeBits(bt.Info, len, shift, value)
+}
+
+func (bt *btfType) Kind() btfKind {
+ return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift))
+}
+
+func (bt *btfType) SetKind(kind btfKind) {
+ bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift)
+}
+
+func (bt *btfType) Vlen() int {
+ return int(bt.info(btfTypeVlenMask, btfTypeVlenShift))
+}
+
+func (bt *btfType) SetVlen(vlen int) {
+ bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
+}
+
+func (bt *btfType) KindFlag() bool {
+ return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
+}
+
+func (bt *btfType) Linkage() FuncLinkage {
+ return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
+}
+
+func (bt *btfType) SetLinkage(linkage FuncLinkage) {
+ bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
+}
+
+func (bt *btfType) Type() TypeID {
+ // TODO: Panic here if wrong kind?
+ return TypeID(bt.SizeType)
+}
+
+func (bt *btfType) Size() uint32 {
+ // TODO: Panic here if wrong kind?
+ return bt.SizeType
+}
+
+func (bt *btfType) SetSize(size uint32) {
+ bt.SizeType = size
+}
+
+type rawType struct {
+ btfType
+ data interface{}
+}
+
+func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
+ if err := binary.Write(w, bo, &rt.btfType); err != nil {
+ return err
+ }
+
+ if rt.data == nil {
+ return nil
+ }
+
+ return binary.Write(w, bo, rt.data)
+}
+
+// btfInt encodes additional data for integers.
+//
+// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b
+// ? = undefined
+// e = encoding
+// o = offset (bitfields?)
+// b = bits (bitfields)
+type btfInt struct {
+ Raw uint32
+}
+
+const (
+ btfIntEncodingLen = 4
+ btfIntEncodingShift = 24
+ btfIntOffsetLen = 8
+ btfIntOffsetShift = 16
+ btfIntBitsLen = 8
+ btfIntBitsShift = 0
+)
+
+func (bi btfInt) Encoding() IntEncoding {
+ return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift))
+}
+
+func (bi *btfInt) SetEncoding(e IntEncoding) {
+ bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e))
+}
+
+func (bi btfInt) Offset() Bits {
+ return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift))
+}
+
+func (bi *btfInt) SetOffset(offset uint32) {
+ bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset)
+}
+
+func (bi btfInt) Bits() Bits {
+ return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift))
+}
+
+func (bi *btfInt) SetBits(bits byte) {
+ bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits))
+}
+
+type btfArray struct {
+ Type TypeID
+ IndexType TypeID
+ Nelems uint32
+}
+
+type btfMember struct {
+ NameOff uint32
+ Type TypeID
+ Offset uint32
+}
+
+type btfVarSecinfo struct {
+ Type TypeID
+ Offset uint32
+ Size uint32
+}
+
+type btfVariable struct {
+ Linkage uint32
+}
+
+type btfEnum struct {
+ NameOff uint32
+ Val int32
+}
+
+type btfParam struct {
+ NameOff uint32
+ Type TypeID
+}
+
+func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, error) {
+ var header btfType
+ // because of the interleaving between types and struct members it is difficult to
+ // precompute the numbers of raw types this will parse
+ // this "guess" is a good first estimation
+ sizeOfbtfType := uintptr(binary.Size(btfType{}))
+ tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2
+ types := make([]rawType, 0, tyMaxCount)
+
+ for id := TypeID(1); ; id++ {
+ if err := binary.Read(r, bo, &header); err == io.EOF {
+ return types, nil
+ } else if err != nil {
+ return nil, fmt.Errorf("can't read type info for id %v: %v", id, err)
+ }
+
+ var data interface{}
+ switch header.Kind() {
+ case kindInt:
+ data = new(btfInt)
+ case kindPointer:
+ case kindArray:
+ data = new(btfArray)
+ case kindStruct:
+ fallthrough
+ case kindUnion:
+ data = make([]btfMember, header.Vlen())
+ case kindEnum:
+ data = make([]btfEnum, header.Vlen())
+ case kindForward:
+ case kindTypedef:
+ case kindVolatile:
+ case kindConst:
+ case kindRestrict:
+ case kindFunc:
+ case kindFuncProto:
+ data = make([]btfParam, header.Vlen())
+ case kindVar:
+ data = new(btfVariable)
+ case kindDatasec:
+ data = make([]btfVarSecinfo, header.Vlen())
+ case kindFloat:
+ default:
+ return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
+ }
+
+ if data == nil {
+ types = append(types, rawType{header, nil})
+ continue
+ }
+
+ if err := binary.Read(r, bo, data); err != nil {
+ return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err)
+ }
+
+ types = append(types, rawType{header, data})
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types_string.go b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go
new file mode 100644
index 000000000..0e0c17d68
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go
@@ -0,0 +1,44 @@
+// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage"; DO NOT EDIT.
+
+package btf
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[StaticFunc-0]
+ _ = x[GlobalFunc-1]
+ _ = x[ExternFunc-2]
+}
+
+const _FuncLinkage_name = "staticglobalextern"
+
+var _FuncLinkage_index = [...]uint8{0, 6, 12, 18}
+
+func (i FuncLinkage) String() string {
+ if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) {
+ return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[StaticVar-0]
+ _ = x[GlobalVar-1]
+ _ = x[ExternVar-2]
+}
+
+const _VarLinkage_name = "staticglobalextern"
+
+var _VarLinkage_index = [...]uint8{0, 6, 12, 18}
+
+func (i VarLinkage) String() string {
+ if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) {
+ return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/core.go b/vendor/github.com/cilium/ebpf/btf/core.go
new file mode 100644
index 000000000..c48754809
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/core.go
@@ -0,0 +1,972 @@
+package btf
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/ebpf/asm"
+)
+
+// Code in this file is derived from libbpf, which is available under a BSD
+// 2-Clause license.
+
+// COREFixup is the result of computing a CO-RE relocation for a target.
+type COREFixup struct {
+ kind coreKind
+ local uint32
+ target uint32
+ // True if there is no valid fixup. The instruction is replaced with an
+ // invalid dummy.
+ poison bool
+ // True if the validation of the local value should be skipped. Used by
+ // some kinds of bitfield relocations.
+ skipLocalValidation bool
+}
+
+func (f *COREFixup) equal(other COREFixup) bool {
+ return f.local == other.local && f.target == other.target
+}
+
+func (f *COREFixup) String() string {
+ if f.poison {
+ return fmt.Sprintf("%s=poison", f.kind)
+ }
+ return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target)
+}
+
+func (f *COREFixup) Apply(ins *asm.Instruction) error {
+ if f.poison {
+ const badRelo = 0xbad2310
+
+ *ins = asm.BuiltinFunc(badRelo).Call()
+ return nil
+ }
+
+ switch class := ins.OpCode.Class(); class {
+ case asm.LdXClass, asm.StClass, asm.StXClass:
+ if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset {
+ return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local)
+ }
+
+ if f.target > math.MaxInt16 {
+ return fmt.Errorf("offset %d exceeds MaxInt16", f.target)
+ }
+
+ ins.Offset = int16(f.target)
+
+ case asm.LdClass:
+ if !ins.IsConstantLoad(asm.DWord) {
+ return fmt.Errorf("not a dword-sized immediate load")
+ }
+
+ if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
+ return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f)
+ }
+
+ ins.Constant = int64(f.target)
+
+ case asm.ALUClass:
+ if ins.OpCode.ALUOp() == asm.Swap {
+ return fmt.Errorf("relocation against swap")
+ }
+
+ fallthrough
+
+ case asm.ALU64Class:
+ if src := ins.OpCode.Source(); src != asm.ImmSource {
+ return fmt.Errorf("invalid source %s", src)
+ }
+
+ if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
+ return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins)
+ }
+
+ if f.target > math.MaxInt32 {
+ return fmt.Errorf("immediate %d exceeds MaxInt32", f.target)
+ }
+
+ ins.Constant = int64(f.target)
+
+ default:
+ return fmt.Errorf("invalid class %s", class)
+ }
+
+ return nil
+}
+
+func (f COREFixup) isNonExistant() bool {
+ return f.kind.checksForExistence() && f.target == 0
+}
+
+// coreKind is the type of CO-RE relocation as specified in BPF source code.
+type coreKind uint32
+
+const (
+ reloFieldByteOffset coreKind = iota /* field byte offset */
+ reloFieldByteSize /* field size in bytes */
+ reloFieldExists /* field existence in target kernel */
+ reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
+ reloFieldLShiftU64 /* bitfield-specific left bitshift */
+ reloFieldRShiftU64 /* bitfield-specific right bitshift */
+ reloTypeIDLocal /* type ID in local BPF object */
+ reloTypeIDTarget /* type ID in target kernel */
+ reloTypeExists /* type existence in target kernel */
+ reloTypeSize /* type size in bytes */
+ reloEnumvalExists /* enum value existence in target kernel */
+ reloEnumvalValue /* enum value integer value */
+)
+
+func (k coreKind) checksForExistence() bool {
+ return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
+}
+
+func (k coreKind) String() string {
+ switch k {
+ case reloFieldByteOffset:
+ return "byte_off"
+ case reloFieldByteSize:
+ return "byte_sz"
+ case reloFieldExists:
+ return "field_exists"
+ case reloFieldSigned:
+ return "signed"
+ case reloFieldLShiftU64:
+ return "lshift_u64"
+ case reloFieldRShiftU64:
+ return "rshift_u64"
+ case reloTypeIDLocal:
+ return "local_type_id"
+ case reloTypeIDTarget:
+ return "target_type_id"
+ case reloTypeExists:
+ return "type_exists"
+ case reloTypeSize:
+ return "type_size"
+ case reloEnumvalExists:
+ return "enumval_exists"
+ case reloEnumvalValue:
+ return "enumval_value"
+ default:
+ return "unknown"
+ }
+}
+
+// CORERelocate calculates the difference in types between local and target.
+//
+// Returns a list of fixups which can be applied to instructions to make them
+// match the target type(s).
+//
+// Fixups are returned in the order of relos, e.g. fixup[i] is the solution
+// for relos[i].
+func CORERelocate(local, target *Spec, relos []*CORERelocation) ([]COREFixup, error) {
+ if local.byteOrder != target.byteOrder {
+ return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder)
+ }
+
+ type reloGroup struct {
+ relos []*CORERelocation
+ // Position of each relocation in relos.
+ indices []int
+ }
+
+ // Split relocations into per Type lists.
+ relosByType := make(map[Type]*reloGroup)
+ result := make([]COREFixup, len(relos))
+ for i, relo := range relos {
+ if relo.kind == reloTypeIDLocal {
+ // Filtering out reloTypeIDLocal here makes our lives a lot easier
+ // down the line, since it doesn't have a target at all.
+ if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
+ return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
+ }
+
+ id, err := local.TypeID(relo.typ)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %w", relo.kind, err)
+ }
+
+ result[i] = COREFixup{
+ kind: relo.kind,
+ local: uint32(id),
+ target: uint32(id),
+ }
+ continue
+ }
+
+ group, ok := relosByType[relo.typ]
+ if !ok {
+ group = &reloGroup{}
+ relosByType[relo.typ] = group
+ }
+ group.relos = append(group.relos, relo)
+ group.indices = append(group.indices, i)
+ }
+
+ for localType, group := range relosByType {
+ localTypeName := localType.TypeName()
+ if localTypeName == "" {
+ return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
+ }
+
+ targets := target.namedTypes[newEssentialName(localTypeName)]
+ fixups, err := coreCalculateFixups(local, target, localType, targets, group.relos)
+ if err != nil {
+ return nil, fmt.Errorf("relocate %s: %w", localType, err)
+ }
+
+ for j, index := range group.indices {
+ result[index] = fixups[j]
+ }
+ }
+
+ return result, nil
+}
+
+var errAmbiguousRelocation = errors.New("ambiguous relocation")
+var errImpossibleRelocation = errors.New("impossible relocation")
+
+// coreCalculateFixups calculates the fixups for the given relocations using
+// the "best" target.
+//
+// The best target is determined by scoring: the less poisoning we have to do
+// the better the target is.
+func coreCalculateFixups(localSpec, targetSpec *Spec, local Type, targets []Type, relos []*CORERelocation) ([]COREFixup, error) {
+ localID, err := localSpec.TypeID(local)
+ if err != nil {
+ return nil, fmt.Errorf("local type ID: %w", err)
+ }
+ local = Copy(local, UnderlyingType)
+
+ bestScore := len(relos)
+ var bestFixups []COREFixup
+ for i := range targets {
+ targetID, err := targetSpec.TypeID(targets[i])
+ if err != nil {
+ return nil, fmt.Errorf("target type ID: %w", err)
+ }
+ target := Copy(targets[i], UnderlyingType)
+
+ score := 0 // lower is better
+ fixups := make([]COREFixup, 0, len(relos))
+ for _, relo := range relos {
+ fixup, err := coreCalculateFixup(localSpec.byteOrder, local, localID, target, targetID, relo)
+ if err != nil {
+ return nil, fmt.Errorf("target %s: %w", target, err)
+ }
+ if fixup.poison || fixup.isNonExistant() {
+ score++
+ }
+ fixups = append(fixups, fixup)
+ }
+
+ if score > bestScore {
+ // We have a better target already, ignore this one.
+ continue
+ }
+
+ if score < bestScore {
+ // This is the best target yet, use it.
+ bestScore = score
+ bestFixups = fixups
+ continue
+ }
+
+ // Some other target has the same score as the current one. Make sure
+ // the fixups agree with each other.
+ for i, fixup := range bestFixups {
+ if !fixup.equal(fixups[i]) {
+ return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation)
+ }
+ }
+ }
+
+ if bestFixups == nil {
+ // Nothing at all matched, probably because there are no suitable
+ // targets at all.
+ //
+ // Poison everything except checksForExistence.
+ bestFixups = make([]COREFixup, len(relos))
+ for i, relo := range relos {
+ if relo.kind.checksForExistence() {
+ bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0}
+ } else {
+ bestFixups[i] = COREFixup{kind: relo.kind, poison: true}
+ }
+ }
+ }
+
+ return bestFixups, nil
+}
+
+// coreCalculateFixup calculates the fixup for a single local type, target type
+// and relocation.
+func coreCalculateFixup(byteOrder binary.ByteOrder, local Type, localID TypeID, target Type, targetID TypeID, relo *CORERelocation) (COREFixup, error) {
+ fixup := func(local, target uint32) (COREFixup, error) {
+ return COREFixup{kind: relo.kind, local: local, target: target}, nil
+ }
+ fixupWithoutValidation := func(local, target uint32) (COREFixup, error) {
+ return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil
+ }
+ poison := func() (COREFixup, error) {
+ if relo.kind.checksForExistence() {
+ return fixup(1, 0)
+ }
+ return COREFixup{kind: relo.kind, poison: true}, nil
+ }
+ zero := COREFixup{}
+
+ switch relo.kind {
+ case reloTypeIDTarget, reloTypeSize, reloTypeExists:
+ if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
+ return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
+ }
+
+ err := coreAreTypesCompatible(local, target)
+ if errors.Is(err, errImpossibleRelocation) {
+ return poison()
+ }
+ if err != nil {
+ return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
+ }
+
+ switch relo.kind {
+ case reloTypeExists:
+ return fixup(1, 1)
+
+ case reloTypeIDTarget:
+ return fixup(uint32(localID), uint32(targetID))
+
+ case reloTypeSize:
+ localSize, err := Sizeof(local)
+ if err != nil {
+ return zero, err
+ }
+
+ targetSize, err := Sizeof(target)
+ if err != nil {
+ return zero, err
+ }
+
+ return fixup(uint32(localSize), uint32(targetSize))
+ }
+
+ case reloEnumvalValue, reloEnumvalExists:
+ localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target)
+ if errors.Is(err, errImpossibleRelocation) {
+ return poison()
+ }
+ if err != nil {
+ return zero, fmt.Errorf("relocation %s: %w", relo.kind, err)
+ }
+
+ switch relo.kind {
+ case reloEnumvalExists:
+ return fixup(1, 1)
+
+ case reloEnumvalValue:
+ return fixup(uint32(localValue.Value), uint32(targetValue.Value))
+ }
+
+ case reloFieldSigned:
+ switch local.(type) {
+ case *Enum:
+ return fixup(1, 1)
+ case *Int:
+ return fixup(
+ uint32(local.(*Int).Encoding&Signed),
+ uint32(target.(*Int).Encoding&Signed),
+ )
+ default:
+ return fixupWithoutValidation(0, 0)
+ }
+
+ case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64:
+ if _, ok := target.(*Fwd); ok {
+ // We can't relocate fields using a forward declaration, so
+ // skip it. If a non-forward declaration is present in the BTF
+ // we'll find it in one of the other iterations.
+ return poison()
+ }
+
+ localField, targetField, err := coreFindField(local, relo.accessor, target)
+ if errors.Is(err, errImpossibleRelocation) {
+ return poison()
+ }
+ if err != nil {
+ return zero, fmt.Errorf("target %s: %w", target, err)
+ }
+
+ maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) {
+ f.skipLocalValidation = localField.bitfieldSize > 0
+ return f, err
+ }
+
+ switch relo.kind {
+ case reloFieldExists:
+ return fixup(1, 1)
+
+ case reloFieldByteOffset:
+ return maybeSkipValidation(fixup(localField.offset, targetField.offset))
+
+ case reloFieldByteSize:
+ localSize, err := Sizeof(localField.Type)
+ if err != nil {
+ return zero, err
+ }
+
+ targetSize, err := Sizeof(targetField.Type)
+ if err != nil {
+ return zero, err
+ }
+ return maybeSkipValidation(fixup(uint32(localSize), uint32(targetSize)))
+
+ case reloFieldLShiftU64:
+ var target uint32
+ if byteOrder == binary.LittleEndian {
+ targetSize, err := targetField.sizeBits()
+ if err != nil {
+ return zero, err
+ }
+
+ target = uint32(64 - targetField.bitfieldOffset - targetSize)
+ } else {
+ loadWidth, err := Sizeof(targetField.Type)
+ if err != nil {
+ return zero, err
+ }
+
+ target = uint32(64 - Bits(loadWidth*8) + targetField.bitfieldOffset)
+ }
+ return fixupWithoutValidation(0, target)
+
+ case reloFieldRShiftU64:
+ targetSize, err := targetField.sizeBits()
+ if err != nil {
+ return zero, err
+ }
+
+ return fixupWithoutValidation(0, uint32(64-targetSize))
+ }
+ }
+
+ return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported)
+}
+
+/* coreAccessor contains a path through a struct. It contains at least one index.
+ *
+ * The interpretation depends on the kind of the relocation. The following is
+ * taken from struct bpf_core_relo in libbpf_internal.h:
+ *
+ * - for field-based relocations, string encodes an accessed field using
+ * a sequence of field and array indices, separated by colon (:). It's
+ * conceptually very close to LLVM's getelementptr ([0]) instruction's
+ * arguments for identifying offset to a field.
+ * - for type-based relocations, strings is expected to be just "0";
+ * - for enum value-based relocations, string contains an index of enum
+ * value within its enum type;
+ *
+ * Example to provide a better feel.
+ *
+ * struct sample {
+ * int a;
+ * struct {
+ * int b[10];
+ * };
+ * };
+ *
+ * struct sample s = ...;
+ * int x = &s->a; // encoded as "0:0" (a is field #0)
+ * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
+ * // b is field #0 inside anon struct, accessing elem #5)
+ * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
+ */
+type coreAccessor []int
+
+func parseCOREAccessor(accessor string) (coreAccessor, error) {
+ if accessor == "" {
+ return nil, fmt.Errorf("empty accessor")
+ }
+
+ parts := strings.Split(accessor, ":")
+ result := make(coreAccessor, 0, len(parts))
+ for _, part := range parts {
+ // 31 bits to avoid overflowing int on 32 bit platforms.
+ index, err := strconv.ParseUint(part, 10, 31)
+ if err != nil {
+ return nil, fmt.Errorf("accessor index %q: %s", part, err)
+ }
+
+ result = append(result, int(index))
+ }
+
+ return result, nil
+}
+
+func (ca coreAccessor) String() string {
+ strs := make([]string, 0, len(ca))
+ for _, i := range ca {
+ strs = append(strs, strconv.Itoa(i))
+ }
+ return strings.Join(strs, ":")
+}
+
+func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
+ e, ok := t.(*Enum)
+ if !ok {
+ return nil, fmt.Errorf("not an enum: %s", t)
+ }
+
+ if len(ca) > 1 {
+ return nil, fmt.Errorf("invalid accessor %s for enum", ca)
+ }
+
+ i := ca[0]
+ if i >= len(e.Values) {
+ return nil, fmt.Errorf("invalid index %d for %s", i, e)
+ }
+
+ return &e.Values[i], nil
+}
+
+// coreField represents the position of a "child" of a composite type from the
+// start of that type.
+//
+// /- start of composite
+// | offset * 8 | bitfieldOffset | bitfieldSize | ... |
+// \- start of field end of field -/
+type coreField struct {
+ Type Type
+
+ // The position of the field from the start of the composite type in bytes.
+ offset uint32
+
+ // The offset of the bitfield in bits from the start of the field.
+ bitfieldOffset Bits
+
+ // The size of the bitfield in bits.
+ //
+ // Zero if the field is not a bitfield.
+ bitfieldSize Bits
+}
+
+func (cf *coreField) adjustOffsetToNthElement(n int) error {
+ size, err := Sizeof(cf.Type)
+ if err != nil {
+ return err
+ }
+
+ cf.offset += uint32(n) * uint32(size)
+ return nil
+}
+
+func (cf *coreField) adjustOffsetBits(offset Bits) error {
+ align, err := alignof(cf.Type)
+ if err != nil {
+ return err
+ }
+
+ // We can compute the load offset by:
+ // 1) converting the bit offset to bytes with a flooring division.
+ // 2) dividing and multiplying that offset by the alignment, yielding the
+ // load size aligned offset.
+ offsetBytes := uint32(offset/8) / uint32(align) * uint32(align)
+
+ // The number of bits remaining is the bit offset less the number of bits
+ // we can "skip" with the aligned offset.
+ cf.bitfieldOffset = offset - Bits(offsetBytes*8)
+
+ // We know that cf.offset is aligned at to at least align since we get it
+ // from the compiler via BTF. Adding an aligned offsetBytes preserves the
+ // alignment.
+ cf.offset += offsetBytes
+ return nil
+}
+
+func (cf *coreField) sizeBits() (Bits, error) {
+ if cf.bitfieldSize > 0 {
+ return cf.bitfieldSize, nil
+ }
+
+ // Someone is trying to access a non-bitfield via a bit shift relocation.
+ // This happens when a field changes from a bitfield to a regular field
+ // between kernel versions. Synthesise the size to make the shifts work.
+ size, err := Sizeof(cf.Type)
+ if err != nil {
+ return 0, nil
+ }
+ return Bits(size * 8), nil
+}
+
+// coreFindField descends into the local type using the accessor and tries to
+// find an equivalent field in target at each step.
+//
+// Returns the field and the offset of the field from the start of
+// target in bits.
+func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) {
+ local := coreField{Type: localT}
+ target := coreField{Type: targetT}
+
+ // The first index is used to offset a pointer of the base type like
+ // when accessing an array.
+ if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
+ return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
+ }
+
+ var localMaybeFlex, targetMaybeFlex bool
+ for i, acc := range localAcc[1:] {
+ switch localType := local.Type.(type) {
+ case composite:
+ // For composite types acc is used to find the field in the local type,
+ // and then we try to find a field in target with the same name.
+ localMembers := localType.members()
+ if acc >= len(localMembers) {
+ return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType)
+ }
+
+ localMember := localMembers[acc]
+ if localMember.Name == "" {
+ _, ok := localMember.Type.(composite)
+ if !ok {
+ return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
+ }
+
+ // This is an anonymous struct or union, ignore it.
+ local = coreField{
+ Type: localMember.Type,
+ offset: local.offset + localMember.Offset.Bytes(),
+ }
+ localMaybeFlex = false
+ continue
+ }
+
+ targetType, ok := target.Type.(composite)
+ if !ok {
+ return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
+ }
+
+ targetMember, last, err := coreFindMember(targetType, localMember.Name)
+ if err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ local = coreField{
+ Type: localMember.Type,
+ offset: local.offset,
+ bitfieldSize: localMember.BitfieldSize,
+ }
+ localMaybeFlex = acc == len(localMembers)-1
+
+ target = coreField{
+ Type: targetMember.Type,
+ offset: target.offset,
+ bitfieldSize: targetMember.BitfieldSize,
+ }
+ targetMaybeFlex = last
+
+ if local.bitfieldSize == 0 && target.bitfieldSize == 0 {
+ local.offset += localMember.Offset.Bytes()
+ target.offset += targetMember.Offset.Bytes()
+ break
+ }
+
+ // Either of the members is a bitfield. Make sure we're at the
+ // end of the accessor.
+ if next := i + 1; next < len(localAcc[1:]) {
+ return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield")
+ }
+
+ if err := local.adjustOffsetBits(localMember.Offset); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ if err := target.adjustOffsetBits(targetMember.Offset); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ case *Array:
+ // For arrays, acc is the index in the target.
+ targetType, ok := target.Type.(*Array)
+ if !ok {
+ return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
+ }
+
+ if localType.Nelems == 0 && !localMaybeFlex {
+ return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array")
+ }
+ if targetType.Nelems == 0 && !targetMaybeFlex {
+ return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array")
+ }
+
+ if localType.Nelems > 0 && acc >= int(localType.Nelems) {
+ return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc)
+ }
+ if targetType.Nelems > 0 && acc >= int(targetType.Nelems) {
+ return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation)
+ }
+
+ local = coreField{
+ Type: localType.Type,
+ offset: local.offset,
+ }
+ localMaybeFlex = false
+
+ if err := local.adjustOffsetToNthElement(acc); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ target = coreField{
+ Type: targetType.Type,
+ offset: target.offset,
+ }
+ targetMaybeFlex = false
+
+ if err := target.adjustOffsetToNthElement(acc); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ default:
+ return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported)
+ }
+
+ if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
+ return coreField{}, coreField{}, err
+ }
+ }
+
+ return local, target, nil
+}
+
+// coreFindMember finds a member in a composite type while handling anonymous
+// structs and unions.
+func coreFindMember(typ composite, name string) (Member, bool, error) {
+ if name == "" {
+ return Member{}, false, errors.New("can't search for anonymous member")
+ }
+
+ type offsetTarget struct {
+ composite
+ offset Bits
+ }
+
+ targets := []offsetTarget{{typ, 0}}
+ visited := make(map[composite]bool)
+
+ for i := 0; i < len(targets); i++ {
+ target := targets[i]
+
+ // Only visit targets once to prevent infinite recursion.
+ if visited[target] {
+ continue
+ }
+ if len(visited) >= maxTypeDepth {
+ // This check is different than libbpf, which restricts the entire
+ // path to BPF_CORE_SPEC_MAX_LEN items.
+ return Member{}, false, fmt.Errorf("type is nested too deep")
+ }
+ visited[target] = true
+
+ members := target.members()
+ for j, member := range members {
+ if member.Name == name {
+ // NB: This is safe because member is a copy.
+ member.Offset += target.offset
+ return member, j == len(members)-1, nil
+ }
+
+ // The names don't match, but this member could be an anonymous struct
+ // or union.
+ if member.Name != "" {
+ continue
+ }
+
+ comp, ok := member.Type.(composite)
+ if !ok {
+ return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
+ }
+
+ targets = append(targets, offsetTarget{comp, target.offset + member.Offset})
+ }
+ }
+
+ return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation)
+}
+
+// coreFindEnumValue follows localAcc to find the equivalent enum value in target.
+func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) {
+ localValue, err := localAcc.enumValue(local)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ targetEnum, ok := target.(*Enum)
+ if !ok {
+ return nil, nil, errImpossibleRelocation
+ }
+
+ localName := newEssentialName(localValue.Name)
+ for i, targetValue := range targetEnum.Values {
+ if newEssentialName(targetValue.Name) != localName {
+ continue
+ }
+
+ return localValue, &targetEnum.Values[i], nil
+ }
+
+ return nil, nil, errImpossibleRelocation
+}
+
+/* The comment below is from bpf_core_types_are_compat in libbpf.c:
+ *
+ * Check local and target types for compatibility. This check is used for
+ * type-based CO-RE relocations and follow slightly different rules than
+ * field-based relocations. This function assumes that root types were already
+ * checked for name match. Beyond that initial root-level name check, names
+ * are completely ignored. Compatibility rules are as follows:
+ * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
+ * kind should match for local and target types (i.e., STRUCT is not
+ * compatible with UNION);
+ * - for ENUMs, the size is ignored;
+ * - for INT, size and signedness are ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - CONST/VOLATILE/RESTRICT modifiers are ignored;
+ * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
+ * - FUNC_PROTOs are compatible if they have compatible signature: same
+ * number of input args and compatible return and argument types.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ *
+ * Returns errImpossibleRelocation if types are not compatible.
+ */
+func coreAreTypesCompatible(localType Type, targetType Type) error {
+ var (
+ localTs, targetTs typeDeque
+ l, t = &localType, &targetType
+ depth = 0
+ )
+
+ for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() {
+ if depth >= maxTypeDepth {
+ return errors.New("types are nested too deep")
+ }
+
+ localType = *l
+ targetType = *t
+
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
+ return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
+ }
+
+ switch lv := (localType).(type) {
+ case *Void, *Struct, *Union, *Enum, *Fwd, *Int:
+ // Nothing to do here
+
+ case *Pointer, *Array:
+ depth++
+ localType.walk(&localTs)
+ targetType.walk(&targetTs)
+
+ case *FuncProto:
+ tv := targetType.(*FuncProto)
+ if len(lv.Params) != len(tv.Params) {
+ return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation)
+ }
+
+ depth++
+ localType.walk(&localTs)
+ targetType.walk(&targetTs)
+
+ default:
+ return fmt.Errorf("unsupported type %T", localType)
+ }
+ }
+
+ if l != nil {
+ return fmt.Errorf("dangling local type %T", *l)
+ }
+
+ if t != nil {
+ return fmt.Errorf("dangling target type %T", *t)
+ }
+
+ return nil
+}
+
+/* coreAreMembersCompatible checks two types for field-based relocation compatibility.
+ *
+ * The comment below is from bpf_core_fields_are_compat in libbpf.c:
+ *
+ * Check two types for compatibility for the purpose of field access
+ * relocation. const/volatile/restrict and typedefs are skipped to ensure we
+ * are relocating semantically compatible entities:
+ * - any two STRUCTs/UNIONs are compatible and can be mixed;
+ * - any two FWDs are compatible, if their names match (modulo flavor suffix);
+ * - any two PTRs are always compatible;
+ * - for ENUMs, names should be the same (ignoring flavor suffix) or at
+ * least one of enums should be anonymous;
+ * - for ENUMs, check sizes, names are ignored;
+ * - for INT, size and signedness are ignored;
+ * - any two FLOATs are always compatible;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * [ NB: coreAreMembersCompatible doesn't recurse, this check is done
+ * by coreFindField. ]
+ * - everything else shouldn't be ever a target of relocation.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ *
+ * Returns errImpossibleRelocation if the members are not compatible.
+ */
+func coreAreMembersCompatible(localType Type, targetType Type) error {
+ doNamesMatch := func(a, b string) error {
+ if a == "" || b == "" {
+ // allow anonymous and named type to match
+ return nil
+ }
+
+ if newEssentialName(a) == newEssentialName(b) {
+ return nil
+ }
+
+ return fmt.Errorf("names don't match: %w", errImpossibleRelocation)
+ }
+
+ _, lok := localType.(composite)
+ _, tok := targetType.(composite)
+ if lok && tok {
+ return nil
+ }
+
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
+ return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
+ }
+
+ switch lv := localType.(type) {
+ case *Array, *Pointer, *Float, *Int:
+ return nil
+
+ case *Enum:
+ tv := targetType.(*Enum)
+ return doNamesMatch(lv.Name, tv.Name)
+
+ case *Fwd:
+ tv := targetType.(*Fwd)
+ return doNamesMatch(lv.Name, tv.Name)
+
+ default:
+ return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/doc.go b/vendor/github.com/cilium/ebpf/btf/doc.go
new file mode 100644
index 000000000..b1f4b1fc3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/doc.go
@@ -0,0 +1,5 @@
+// Package btf handles data encoded according to the BPF Type Format.
+//
+// The canonical documentation lives in the Linux kernel repository and is
+// available at https://www.kernel.org/doc/html/latest/bpf/btf.html
+package btf
diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go
new file mode 100644
index 000000000..2c0e1afe2
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go
@@ -0,0 +1,721 @@
+package btf
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+)
+
+// ExtInfos contains ELF section metadata.
+type ExtInfos struct {
+ // The slices are sorted by offset in ascending order.
+ funcInfos map[string][]funcInfo
+ lineInfos map[string][]lineInfo
+ relocationInfos map[string][]coreRelocationInfo
+}
+
+// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF.
+//
+// Returns an error wrapping ErrNotFound if no ext infos are present.
+func loadExtInfosFromELF(file *internal.SafeELFFile, ts types, strings *stringTable) (*ExtInfos, error) {
+ section := file.Section(".BTF.ext")
+ if section == nil {
+ return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound)
+ }
+
+ if section.ReaderAt == nil {
+ return nil, fmt.Errorf("compressed ext_info is not supported")
+ }
+
+ return loadExtInfos(section.ReaderAt, file.ByteOrder, ts, strings)
+}
+
+// loadExtInfos parses bare ext infos.
+func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, ts types, strings *stringTable) (*ExtInfos, error) {
+ // Open unbuffered section reader. binary.Read() calls io.ReadFull on
+ // the header structs, resulting in one syscall per header.
+ headerRd := io.NewSectionReader(r, 0, math.MaxInt64)
+ extHeader, err := parseBTFExtHeader(headerRd, bo)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF extension header: %w", err)
+ }
+
+ coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err)
+ }
+
+ buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen))
+ btfFuncInfos, err := parseFuncInfos(buf, bo, strings)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF function info: %w", err)
+ }
+
+ funcInfos := make(map[string][]funcInfo, len(btfFuncInfos))
+ for section, bfis := range btfFuncInfos {
+ funcInfos[section], err = newFuncInfos(bfis, ts)
+ if err != nil {
+ return nil, fmt.Errorf("section %s: func infos: %w", section, err)
+ }
+ }
+
+ buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen))
+ btfLineInfos, err := parseLineInfos(buf, bo, strings)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF line info: %w", err)
+ }
+
+ lineInfos := make(map[string][]lineInfo, len(btfLineInfos))
+ for section, blis := range btfLineInfos {
+ lineInfos[section], err = newLineInfos(blis, strings)
+ if err != nil {
+ return nil, fmt.Errorf("section %s: line infos: %w", section, err)
+ }
+ }
+
+ if coreHeader == nil || coreHeader.COREReloLen == 0 {
+ return &ExtInfos{funcInfos, lineInfos, nil}, nil
+ }
+
+ var btfCORERelos map[string][]bpfCORERelo
+ buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen))
+ btfCORERelos, err = parseCORERelos(buf, bo, strings)
+ if err != nil {
+ return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err)
+ }
+
+ coreRelos := make(map[string][]coreRelocationInfo, len(btfCORERelos))
+ for section, brs := range btfCORERelos {
+ coreRelos[section], err = newRelocationInfos(brs, ts, strings)
+ if err != nil {
+ return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err)
+ }
+ }
+
+ return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil
+}
+
+type funcInfoMeta struct{}
+type coreRelocationMeta struct{}
+
+// Assign per-section metadata from BTF to a section's instructions.
+func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
+ funcInfos := ei.funcInfos[section]
+ lineInfos := ei.lineInfos[section]
+ reloInfos := ei.relocationInfos[section]
+
+ iter := insns.Iterate()
+ for iter.Next() {
+ if len(funcInfos) > 0 && funcInfos[0].offset == iter.Offset {
+ iter.Ins.Metadata.Set(funcInfoMeta{}, funcInfos[0].fn)
+ funcInfos = funcInfos[1:]
+ }
+
+ if len(lineInfos) > 0 && lineInfos[0].offset == iter.Offset {
+ *iter.Ins = iter.Ins.WithSource(lineInfos[0].line)
+ lineInfos = lineInfos[1:]
+ }
+
+ if len(reloInfos) > 0 && reloInfos[0].offset == iter.Offset {
+ iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos[0].relo)
+ reloInfos = reloInfos[1:]
+ }
+ }
+}
+
+// MarshalExtInfos encodes function and line info embedded in insns into kernel
+// wire format.
+func MarshalExtInfos(insns asm.Instructions, typeID func(Type) (TypeID, error)) (funcInfos, lineInfos []byte, _ error) {
+ iter := insns.Iterate()
+ var fiBuf, liBuf bytes.Buffer
+ for iter.Next() {
+ if fn := FuncMetadata(iter.Ins); fn != nil {
+ fi := &funcInfo{
+ fn: fn,
+ offset: iter.Offset,
+ }
+ if err := fi.marshal(&fiBuf, typeID); err != nil {
+ return nil, nil, fmt.Errorf("write func info: %w", err)
+ }
+ }
+
+ if line, ok := iter.Ins.Source().(*Line); ok {
+ li := &lineInfo{
+ line: line,
+ offset: iter.Offset,
+ }
+ if err := li.marshal(&liBuf); err != nil {
+ return nil, nil, fmt.Errorf("write line info: %w", err)
+ }
+ }
+ }
+ return fiBuf.Bytes(), liBuf.Bytes(), nil
+}
+
+// btfExtHeader is found at the start of the .BTF.ext section.
+type btfExtHeader struct {
+ Magic uint16
+ Version uint8
+ Flags uint8
+
+ // HdrLen is larger than the size of struct btfExtHeader when it is
+ // immediately followed by a btfExtCOREHeader.
+ HdrLen uint32
+
+ FuncInfoOff uint32
+ FuncInfoLen uint32
+ LineInfoOff uint32
+ LineInfoLen uint32
+}
+
+// parseBTFExtHeader parses the header of the .BTF.ext section.
+func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) {
+ var header btfExtHeader
+ if err := binary.Read(r, bo, &header); err != nil {
+ return nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ if header.Magic != btfMagic {
+ return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
+ }
+
+ if header.Version != 1 {
+ return nil, fmt.Errorf("unexpected version %v", header.Version)
+ }
+
+ if header.Flags != 0 {
+ return nil, fmt.Errorf("unsupported flags %v", header.Flags)
+ }
+
+ if int64(header.HdrLen) < int64(binary.Size(&header)) {
+ return nil, fmt.Errorf("header length shorter than btfExtHeader size")
+ }
+
+ return &header, nil
+}
+
+// funcInfoStart returns the offset from the beginning of the .BTF.ext section
+// to the start of its func_info entries.
+func (h *btfExtHeader) funcInfoStart() int64 {
+ return int64(h.HdrLen + h.FuncInfoOff)
+}
+
+// lineInfoStart returns the offset from the beginning of the .BTF.ext section
+// to the start of its line_info entries.
+func (h *btfExtHeader) lineInfoStart() int64 {
+ return int64(h.HdrLen + h.LineInfoOff)
+}
+
+// coreReloStart returns the offset from the beginning of the .BTF.ext section
+// to the start of its CO-RE relocation entries.
+func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 {
+ return int64(h.HdrLen + ch.COREReloOff)
+}
+
+// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen
+// field is larger than its size.
+type btfExtCOREHeader struct {
+ COREReloOff uint32
+ COREReloLen uint32
+}
+
+// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional
+// header bytes are present, extHeader.HdrLen will be larger than the struct,
+// indicating the presence of a CO-RE extension header.
+func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) {
+ extHdrSize := int64(binary.Size(&extHeader))
+ remainder := int64(extHeader.HdrLen) - extHdrSize
+
+ if remainder == 0 {
+ return nil, nil
+ }
+
+ var coreHeader btfExtCOREHeader
+ if err := binary.Read(r, bo, &coreHeader); err != nil {
+ return nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ return &coreHeader, nil
+}
+
+type btfExtInfoSec struct {
+ SecNameOff uint32
+ NumInfo uint32
+}
+
+// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext,
+// appearing within func_info and line_info sub-sections.
+// These headers appear once for each program section in the ELF and are
+// followed by one or more func/line_info records for the section.
+func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) {
+ var infoHeader btfExtInfoSec
+ if err := binary.Read(r, bo, &infoHeader); err != nil {
+ return "", nil, fmt.Errorf("read ext info header: %w", err)
+ }
+
+ secName, err := strings.Lookup(infoHeader.SecNameOff)
+ if err != nil {
+ return "", nil, fmt.Errorf("get section name: %w", err)
+ }
+ if secName == "" {
+ return "", nil, fmt.Errorf("extinfo header refers to empty section name")
+ }
+
+ if infoHeader.NumInfo == 0 {
+ return "", nil, fmt.Errorf("section %s has zero records", secName)
+ }
+
+ return secName, &infoHeader, nil
+}
+
+// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos
+// or line_infos segment that describes the length of all extInfoRecords in
+// that segment.
+func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) {
+ const maxRecordSize = 256
+
+ var recordSize uint32
+ if err := binary.Read(r, bo, &recordSize); err != nil {
+ return 0, fmt.Errorf("can't read record size: %v", err)
+ }
+
+ if recordSize < 4 {
+ // Need at least InsnOff worth of bytes per record.
+ return 0, errors.New("record size too short")
+ }
+ if recordSize > maxRecordSize {
+ return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
+ }
+
+ return recordSize, nil
+}
+
+// The size of a FuncInfo in BTF wire format.
+var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{}))
+
+type funcInfo struct {
+ fn *Func
+ offset asm.RawInstructionOffset
+}
+
+type bpfFuncInfo struct {
+ // Instruction offset of the function within an ELF section.
+ InsnOff uint32
+ TypeID TypeID
+}
+
+func newFuncInfo(fi bpfFuncInfo, ts types) (*funcInfo, error) {
+ typ, err := ts.ByID(fi.TypeID)
+ if err != nil {
+ return nil, err
+ }
+
+ fn, ok := typ.(*Func)
+ if !ok {
+ return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ)
+ }
+
+ // C doesn't have anonymous functions, but check just in case.
+ if fn.Name == "" {
+ return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID)
+ }
+
+ return &funcInfo{
+ fn,
+ asm.RawInstructionOffset(fi.InsnOff),
+ }, nil
+}
+
+func newFuncInfos(bfis []bpfFuncInfo, ts types) ([]funcInfo, error) {
+ fis := make([]funcInfo, 0, len(bfis))
+ for _, bfi := range bfis {
+ fi, err := newFuncInfo(bfi, ts)
+ if err != nil {
+ return nil, fmt.Errorf("offset %d: %w", bfi.InsnOff, err)
+ }
+ fis = append(fis, *fi)
+ }
+ sort.Slice(fis, func(i, j int) bool {
+ return fis[i].offset <= fis[j].offset
+ })
+ return fis, nil
+}
+
+// marshal into the BTF wire format.
+func (fi *funcInfo) marshal(w io.Writer, typeID func(Type) (TypeID, error)) error {
+ id, err := typeID(fi.fn)
+ if err != nil {
+ return err
+ }
+ bfi := bpfFuncInfo{
+ InsnOff: uint32(fi.offset),
+ TypeID: id,
+ }
+ return binary.Write(w, internal.NativeEndian, &bfi)
+}
+
+// parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of
+// func infos indexed by section name.
+func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) {
+ recordSize, err := parseExtInfoRecordSize(r, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ result := make(map[string][]bpfFuncInfo)
+ for {
+ secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", secName, err)
+ }
+
+ result[secName] = records
+ }
+}
+
+// parseFuncInfoRecords parses a stream of func_infos into a funcInfos.
+// These records appear after a btf_ext_info_sec header in the func_info
+// sub-section of .BTF.ext.
+func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfFuncInfo, error) {
+ var out []bpfFuncInfo
+ var fi bpfFuncInfo
+
+ if exp, got := FuncInfoSize, recordSize; exp != got {
+ // BTF blob's record size is longer than we know how to parse.
+ return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got)
+ }
+
+ for i := uint32(0); i < recordNum; i++ {
+ if err := binary.Read(r, bo, &fi); err != nil {
+ return nil, fmt.Errorf("can't read function info: %v", err)
+ }
+
+ if fi.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff)
+ }
+
+ // ELF tracks offset in bytes, the kernel expects raw BPF instructions.
+ // Convert as early as possible.
+ fi.InsnOff /= asm.InstructionSize
+
+ out = append(out, fi)
+ }
+
+ return out, nil
+}
+
+var LineInfoSize = uint32(binary.Size(bpfLineInfo{}))
+
+// Line represents the location and contents of a single line of source
+// code a BPF ELF was compiled from.
+type Line struct {
+ fileName string
+ line string
+ lineNumber uint32
+ lineColumn uint32
+
+ // TODO: We should get rid of the fields below, but for that we need to be
+ // able to write BTF.
+
+ fileNameOff uint32
+ lineOff uint32
+}
+
+func (li *Line) FileName() string {
+ return li.fileName
+}
+
+func (li *Line) Line() string {
+ return li.line
+}
+
+func (li *Line) LineNumber() uint32 {
+ return li.lineNumber
+}
+
+func (li *Line) LineColumn() uint32 {
+ return li.lineColumn
+}
+
+func (li *Line) String() string {
+ return li.line
+}
+
+type lineInfo struct {
+ line *Line
+ offset asm.RawInstructionOffset
+}
+
+// Constants for the format of bpfLineInfo.LineCol.
+const (
+ bpfLineShift = 10
+ bpfLineMax = (1 << (32 - bpfLineShift)) - 1
+ bpfColumnMax = (1 << bpfLineShift) - 1
+)
+
+type bpfLineInfo struct {
+ // Instruction offset of the line within the whole instruction stream, in instructions.
+ InsnOff uint32
+ FileNameOff uint32
+ LineOff uint32
+ LineCol uint32
+}
+
+func newLineInfo(li bpfLineInfo, strings *stringTable) (*lineInfo, error) {
+ line, err := strings.Lookup(li.LineOff)
+ if err != nil {
+ return nil, fmt.Errorf("lookup of line: %w", err)
+ }
+
+ fileName, err := strings.Lookup(li.FileNameOff)
+ if err != nil {
+ return nil, fmt.Errorf("lookup of filename: %w", err)
+ }
+
+ lineNumber := li.LineCol >> bpfLineShift
+ lineColumn := li.LineCol & bpfColumnMax
+
+ return &lineInfo{
+ &Line{
+ fileName,
+ line,
+ lineNumber,
+ lineColumn,
+ li.FileNameOff,
+ li.LineOff,
+ },
+ asm.RawInstructionOffset(li.InsnOff),
+ }, nil
+}
+
+func newLineInfos(blis []bpfLineInfo, strings *stringTable) ([]lineInfo, error) {
+ lis := make([]lineInfo, 0, len(blis))
+ for _, bli := range blis {
+ li, err := newLineInfo(bli, strings)
+ if err != nil {
+ return nil, fmt.Errorf("offset %d: %w", bli.InsnOff, err)
+ }
+ lis = append(lis, *li)
+ }
+ sort.Slice(lis, func(i, j int) bool {
+ return lis[i].offset <= lis[j].offset
+ })
+ return lis, nil
+}
+
+// marshal writes the binary representation of the LineInfo to w.
+func (li *lineInfo) marshal(w io.Writer) error {
+ line := li.line
+ if line.lineNumber > bpfLineMax {
+ return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax)
+ }
+
+ if line.lineColumn > bpfColumnMax {
+ return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax)
+ }
+
+ bli := bpfLineInfo{
+ uint32(li.offset),
+ line.fileNameOff,
+ line.lineOff,
+ (line.lineNumber << bpfLineShift) | line.lineColumn,
+ }
+ return binary.Write(w, internal.NativeEndian, &bli)
+}
+
+// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of
+// line infos indexed by section name.
+func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) {
+ recordSize, err := parseExtInfoRecordSize(r, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ result := make(map[string][]bpfLineInfo)
+ for {
+ secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", secName, err)
+ }
+
+ result[secName] = records
+ }
+}
+
+// parseLineInfoRecords parses a stream of line_infos into a lineInfos.
+// These records appear after a btf_ext_info_sec header in the line_info
+// sub-section of .BTF.ext.
+func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfLineInfo, error) {
+ var out []bpfLineInfo
+ var li bpfLineInfo
+
+ if exp, got := uint32(binary.Size(li)), recordSize; exp != got {
+ // BTF blob's record size is longer than we know how to parse.
+ return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got)
+ }
+
+ for i := uint32(0); i < recordNum; i++ {
+ if err := binary.Read(r, bo, &li); err != nil {
+ return nil, fmt.Errorf("can't read line info: %v", err)
+ }
+
+ if li.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff)
+ }
+
+ // ELF tracks offset in bytes, the kernel expects raw BPF instructions.
+ // Convert as early as possible.
+ li.InsnOff /= asm.InstructionSize
+
+ out = append(out, li)
+ }
+
+ return out, nil
+}
+
+// bpfCORERelo matches the kernel's struct bpf_core_relo.
+type bpfCORERelo struct {
+ InsnOff uint32
+ TypeID TypeID
+ AccessStrOff uint32
+ Kind coreKind
+}
+
+type CORERelocation struct {
+ typ Type
+ accessor coreAccessor
+ kind coreKind
+}
+
+func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation {
+ relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation)
+ return relo
+}
+
+type coreRelocationInfo struct {
+ relo *CORERelocation
+ offset asm.RawInstructionOffset
+}
+
+func newRelocationInfo(relo bpfCORERelo, ts types, strings *stringTable) (*coreRelocationInfo, error) {
+ typ, err := ts.ByID(relo.TypeID)
+ if err != nil {
+ return nil, err
+ }
+
+ accessorStr, err := strings.Lookup(relo.AccessStrOff)
+ if err != nil {
+ return nil, err
+ }
+
+ accessor, err := parseCOREAccessor(accessorStr)
+ if err != nil {
+ return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
+ }
+
+ return &coreRelocationInfo{
+ &CORERelocation{
+ typ,
+ accessor,
+ relo.Kind,
+ },
+ asm.RawInstructionOffset(relo.InsnOff),
+ }, nil
+}
+
+func newRelocationInfos(brs []bpfCORERelo, ts types, strings *stringTable) ([]coreRelocationInfo, error) {
+ rs := make([]coreRelocationInfo, 0, len(brs))
+ for _, br := range brs {
+ relo, err := newRelocationInfo(br, ts, strings)
+ if err != nil {
+ return nil, fmt.Errorf("offset %d: %w", br.InsnOff, err)
+ }
+ rs = append(rs, *relo)
+ }
+ sort.Slice(rs, func(i, j int) bool {
+ return rs[i].offset < rs[j].offset
+ })
+ return rs, nil
+}
+
+var extInfoReloSize = binary.Size(bpfCORERelo{})
+
+// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of
+// CO-RE relocations indexed by section name.
+func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) {
+ recordSize, err := parseExtInfoRecordSize(r, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ if recordSize != uint32(extInfoReloSize) {
+ return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
+ }
+
+ result := make(map[string][]bpfCORERelo)
+ for {
+ secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ records, err := parseCOREReloRecords(r, bo, recordSize, infoHeader.NumInfo)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", secName, err)
+ }
+
+ result[secName] = records
+ }
+}
+
+// parseCOREReloRecords parses a stream of CO-RE relocation entries into a
+// coreRelos. These records appear after a btf_ext_info_sec header in the
+// core_relos sub-section of .BTF.ext.
+func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfCORERelo, error) {
+ var out []bpfCORERelo
+
+ var relo bpfCORERelo
+ for i := uint32(0); i < recordNum; i++ {
+ if err := binary.Read(r, bo, &relo); err != nil {
+ return nil, fmt.Errorf("can't read CO-RE relocation: %v", err)
+ }
+
+ if relo.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff)
+ }
+
+ // ELF tracks offset in bytes, the kernel expects raw BPF instructions.
+ // Convert as early as possible.
+ relo.InsnOff /= asm.InstructionSize
+
+ out = append(out, relo)
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/format.go b/vendor/github.com/cilium/ebpf/btf/format.go
new file mode 100644
index 000000000..e7688a2a6
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/format.go
@@ -0,0 +1,319 @@
+package btf
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+var errNestedTooDeep = errors.New("nested too deep")
+
+// GoFormatter converts a Type to Go syntax.
+//
+// A zero GoFormatter is valid to use.
+type GoFormatter struct {
+ w strings.Builder
+
+ // Types present in this map are referred to using the given name if they
+ // are encountered when outputting another type.
+ Names map[Type]string
+
+ // Identifier is called for each field of struct-like types. By default the
+ // field name is used as is.
+ Identifier func(string) string
+
+ // EnumIdentifier is called for each element of an enum. By default the
+ // name of the enum type is concatenated with Identifier(element).
+ EnumIdentifier func(name, element string) string
+}
+
+// TypeDeclaration generates a Go type declaration for a BTF type.
+func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) {
+ gf.w.Reset()
+ if err := gf.writeTypeDecl(name, typ); err != nil {
+ return "", err
+ }
+ return gf.w.String(), nil
+}
+
+func (gf *GoFormatter) identifier(s string) string {
+ if gf.Identifier != nil {
+ return gf.Identifier(s)
+ }
+
+ return s
+}
+
+func (gf *GoFormatter) enumIdentifier(name, element string) string {
+ if gf.EnumIdentifier != nil {
+ return gf.EnumIdentifier(name, element)
+ }
+
+ return name + gf.identifier(element)
+}
+
+// writeTypeDecl outputs a declaration of the given type.
+//
+// It encodes https://golang.org/ref/spec#Type_declarations:
+//
+// type foo struct { bar uint32; }
+// type bar int32
+func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error {
+ if name == "" {
+ return fmt.Errorf("need a name for type %s", typ)
+ }
+
+ switch v := skipQualifiers(typ).(type) {
+ case *Enum:
+ fmt.Fprintf(&gf.w, "type %s ", name)
+ switch v.Size {
+ case 1:
+ gf.w.WriteString("int8")
+ case 2:
+ gf.w.WriteString("int16")
+ case 4:
+ gf.w.WriteString("int32")
+ case 8:
+ gf.w.WriteString("int64")
+ default:
+ return fmt.Errorf("%s: invalid enum size %d", typ, v.Size)
+ }
+
+ if len(v.Values) == 0 {
+ return nil
+ }
+
+ gf.w.WriteString("; const ( ")
+ for _, ev := range v.Values {
+ id := gf.enumIdentifier(name, ev.Name)
+ fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, ev.Value)
+ }
+ gf.w.WriteString(")")
+
+ return nil
+
+ default:
+ fmt.Fprintf(&gf.w, "type %s ", name)
+ return gf.writeTypeLit(v, 0)
+ }
+}
+
+// writeType outputs the name of a named type or a literal describing the type.
+//
+// It encodes https://golang.org/ref/spec#Types.
+//
+// foo (if foo is a named type)
+// uint32
+func (gf *GoFormatter) writeType(typ Type, depth int) error {
+ typ = skipQualifiers(typ)
+
+ name := gf.Names[typ]
+ if name != "" {
+ gf.w.WriteString(name)
+ return nil
+ }
+
+ return gf.writeTypeLit(typ, depth)
+}
+
+// writeTypeLit outputs a literal describing the type.
+//
+// The function ignores named types.
+//
+// It encodes https://golang.org/ref/spec#TypeLit.
+//
+// struct { bar uint32; }
+// uint32
+func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
+ depth++
+ if depth > maxTypeDepth {
+ return errNestedTooDeep
+ }
+
+ var err error
+ switch v := skipQualifiers(typ).(type) {
+ case *Int:
+ gf.writeIntLit(v)
+
+ case *Enum:
+ gf.w.WriteString("int32")
+
+ case *Typedef:
+ err = gf.writeType(v.Type, depth)
+
+ case *Array:
+ fmt.Fprintf(&gf.w, "[%d]", v.Nelems)
+ err = gf.writeType(v.Type, depth)
+
+ case *Struct:
+ err = gf.writeStructLit(v.Size, v.Members, depth)
+
+ case *Union:
+ // Always choose the first member to represent the union in Go.
+ err = gf.writeStructLit(v.Size, v.Members[:1], depth)
+
+ case *Datasec:
+ err = gf.writeDatasecLit(v, depth)
+
+ default:
+ return fmt.Errorf("type %T: %w", v, ErrNotSupported)
+ }
+
+ if err != nil {
+ return fmt.Errorf("%s: %w", typ, err)
+ }
+
+ return nil
+}
+
+func (gf *GoFormatter) writeIntLit(i *Int) {
+ // NB: Encoding.IsChar is ignored.
+ if i.Encoding.IsBool() && i.Size == 1 {
+ gf.w.WriteString("bool")
+ return
+ }
+
+ bits := i.Size * 8
+ if i.Encoding.IsSigned() {
+ fmt.Fprintf(&gf.w, "int%d", bits)
+ } else {
+ fmt.Fprintf(&gf.w, "uint%d", bits)
+ }
+}
+
+func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error {
+ gf.w.WriteString("struct { ")
+
+ prevOffset := uint32(0)
+ skippedBitfield := false
+ for i, m := range members {
+ if m.BitfieldSize > 0 {
+ skippedBitfield = true
+ continue
+ }
+
+ offset := m.Offset.Bytes()
+ if n := offset - prevOffset; skippedBitfield && n > 0 {
+ fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n)
+ } else {
+ gf.writePadding(n)
+ }
+
+ size, err := Sizeof(m.Type)
+ if err != nil {
+ return fmt.Errorf("field %d: %w", i, err)
+ }
+ prevOffset = offset + uint32(size)
+
+ if err := gf.writeStructField(m, depth); err != nil {
+ return fmt.Errorf("field %d: %w", i, err)
+ }
+ }
+
+ gf.writePadding(size - prevOffset)
+ gf.w.WriteString("}")
+ return nil
+}
+
+func (gf *GoFormatter) writeStructField(m Member, depth int) error {
+ if m.BitfieldSize > 0 {
+ return fmt.Errorf("bitfields are not supported")
+ }
+ if m.Offset%8 != 0 {
+ return fmt.Errorf("unsupported offset %d", m.Offset)
+ }
+
+ if m.Name == "" {
+ // Special case a nested anonymous union like
+ // struct foo { union { int bar; int baz }; }
+ // by replacing the whole union with its first member.
+ union, ok := m.Type.(*Union)
+ if !ok {
+ return fmt.Errorf("anonymous fields are not supported")
+
+ }
+
+ if len(union.Members) == 0 {
+ return errors.New("empty anonymous union")
+ }
+
+ depth++
+ if depth > maxTypeDepth {
+ return errNestedTooDeep
+ }
+
+ m := union.Members[0]
+ size, err := Sizeof(m.Type)
+ if err != nil {
+ return err
+ }
+
+ if err := gf.writeStructField(m, depth); err != nil {
+ return err
+ }
+
+ gf.writePadding(union.Size - uint32(size))
+ return nil
+
+ }
+
+ fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name))
+
+ if err := gf.writeType(m.Type, depth); err != nil {
+ return err
+ }
+
+ gf.w.WriteString("; ")
+ return nil
+}
+
+func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error {
+ gf.w.WriteString("struct { ")
+
+ prevOffset := uint32(0)
+ for i, vsi := range ds.Vars {
+ v := vsi.Type.(*Var)
+ if v.Linkage != GlobalVar {
+ // Ignore static, extern, etc. for now.
+ continue
+ }
+
+ if v.Name == "" {
+ return fmt.Errorf("variable %d: empty name", i)
+ }
+
+ gf.writePadding(vsi.Offset - prevOffset)
+ prevOffset = vsi.Offset + vsi.Size
+
+ fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name))
+
+ if err := gf.writeType(v.Type, depth); err != nil {
+ return fmt.Errorf("variable %d: %w", i, err)
+ }
+
+ gf.w.WriteString("; ")
+ }
+
+ gf.writePadding(ds.Size - prevOffset)
+ gf.w.WriteString("}")
+ return nil
+}
+
+func (gf *GoFormatter) writePadding(bytes uint32) {
+ if bytes > 0 {
+ fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes)
+ }
+}
+
+func skipQualifiers(typ Type) Type {
+ result := typ
+ for depth := 0; depth <= maxTypeDepth; depth++ {
+ switch v := (result).(type) {
+ case qualifier:
+ result = v.qualify()
+ default:
+ return result
+ }
+ }
+ return &cycle{typ}
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/handle.go b/vendor/github.com/cilium/ebpf/btf/handle.go
new file mode 100644
index 000000000..128e9b35c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/handle.go
@@ -0,0 +1,121 @@
+package btf
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// HandleInfo describes a Handle.
+type HandleInfo struct {
+ // ID of this handle in the kernel. The ID is only valid as long as the
+ // associated handle is kept alive.
+ ID ID
+
+ // Name is an identifying name for the BTF, currently only used by the
+ // kernel.
+ Name string
+
+ // IsKernel is true if the BTF originated with the kernel and not
+ // userspace.
+ IsKernel bool
+
+ // Size of the raw BTF in bytes.
+ size uint32
+}
+
+func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) {
+ // We invoke the syscall once with a empty BTF and name buffers to get size
+ // information to allocate buffers. Then we invoke it a second time with
+ // buffers to receive the data.
+ var btfInfo sys.BtfInfo
+ if err := sys.ObjInfo(fd, &btfInfo); err != nil {
+ return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err)
+ }
+
+ if btfInfo.NameLen > 0 {
+ // NameLen doesn't account for the terminating NUL.
+ btfInfo.NameLen++
+ }
+
+ // Don't pull raw BTF by default, since it may be quite large.
+ btfSize := btfInfo.BtfSize
+ btfInfo.BtfSize = 0
+
+ nameBuffer := make([]byte, btfInfo.NameLen)
+ btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer)
+ if err := sys.ObjInfo(fd, &btfInfo); err != nil {
+ return nil, err
+ }
+
+ return &HandleInfo{
+ ID: ID(btfInfo.Id),
+ Name: unix.ByteSliceToString(nameBuffer),
+ IsKernel: btfInfo.KernelBtf != 0,
+ size: btfSize,
+ }, nil
+}
+
+// IsModule returns true if the BTF is for the kernel itself.
+func (i *HandleInfo) IsVmlinux() bool {
+ return i.IsKernel && i.Name == "vmlinux"
+}
+
+// IsModule returns true if the BTF is for a kernel module.
+func (i *HandleInfo) IsModule() bool {
+ return i.IsKernel && i.Name != "vmlinux"
+}
+
+// HandleIterator allows enumerating BTF blobs loaded into the kernel.
+type HandleIterator struct {
+ // The ID of the last retrieved handle. Only valid after a call to Next.
+ ID ID
+ err error
+}
+
+// Next retrieves a handle for the next BTF blob.
+//
+// [Handle.Close] is called if *handle is non-nil to avoid leaking fds.
+//
+// Returns true if another BTF blob was found. Call [HandleIterator.Err] after
+// the function returns false.
+func (it *HandleIterator) Next(handle **Handle) bool {
+ if *handle != nil {
+ (*handle).Close()
+ *handle = nil
+ }
+
+ id := it.ID
+ for {
+ attr := &sys.BtfGetNextIdAttr{Id: id}
+ err := sys.BtfGetNextId(attr)
+ if errors.Is(err, os.ErrNotExist) {
+ // There are no more BTF objects.
+ return false
+ } else if err != nil {
+ it.err = fmt.Errorf("get next BTF ID: %w", err)
+ return false
+ }
+
+ id = attr.NextId
+ *handle, err = NewHandleFromID(id)
+ if errors.Is(err, os.ErrNotExist) {
+ // Try again with the next ID.
+ continue
+ } else if err != nil {
+ it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err)
+ return false
+ }
+
+ it.ID = id
+ return true
+ }
+}
+
+// Err returns an error if iteration failed for some reason.
+func (it *HandleIterator) Err() error {
+ return it.err
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/strings.go b/vendor/github.com/cilium/ebpf/btf/strings.go
new file mode 100644
index 000000000..67626e0dd
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/strings.go
@@ -0,0 +1,128 @@
+package btf
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+)
+
+type stringTable struct {
+ base *stringTable
+ offsets []uint32
+ strings []string
+}
+
+// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc.
+type sizedReader interface {
+ io.Reader
+ Size() int64
+}
+
+func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) {
+ // When parsing split BTF's string table, the first entry offset is derived
+ // from the last entry offset of the base BTF.
+ firstStringOffset := uint32(0)
+ if base != nil {
+ idx := len(base.offsets) - 1
+ firstStringOffset = base.offsets[idx] + uint32(len(base.strings[idx])) + 1
+ }
+
+ // Derived from vmlinux BTF.
+ const averageStringLength = 16
+
+ n := int(r.Size() / averageStringLength)
+ offsets := make([]uint32, 0, n)
+ strings := make([]string, 0, n)
+
+ offset := firstStringOffset
+ scanner := bufio.NewScanner(r)
+ scanner.Split(splitNull)
+ for scanner.Scan() {
+ str := scanner.Text()
+ offsets = append(offsets, offset)
+ strings = append(strings, str)
+ offset += uint32(len(str)) + 1
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ if len(strings) == 0 {
+ return nil, errors.New("string table is empty")
+ }
+
+ if firstStringOffset == 0 && strings[0] != "" {
+ return nil, errors.New("first item in string table is non-empty")
+ }
+
+ return &stringTable{base, offsets, strings}, nil
+}
+
+func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ i := bytes.IndexByte(data, 0)
+ if i == -1 {
+ if atEOF && len(data) > 0 {
+ return 0, nil, errors.New("string table isn't null terminated")
+ }
+ return 0, nil, nil
+ }
+
+ return i + 1, data[:i], nil
+}
+
+func (st *stringTable) Lookup(offset uint32) (string, error) {
+ if st.base != nil && offset <= st.base.offsets[len(st.base.offsets)-1] {
+ return st.base.lookup(offset)
+ }
+ return st.lookup(offset)
+}
+
+func (st *stringTable) lookup(offset uint32) (string, error) {
+ i := search(st.offsets, offset)
+ if i == len(st.offsets) || st.offsets[i] != offset {
+ return "", fmt.Errorf("offset %d isn't start of a string", offset)
+ }
+
+ return st.strings[i], nil
+}
+
+func (st *stringTable) Length() int {
+ last := len(st.offsets) - 1
+ return int(st.offsets[last]) + len(st.strings[last]) + 1
+}
+
+func (st *stringTable) Marshal(w io.Writer) error {
+ for _, str := range st.strings {
+ _, err := io.WriteString(w, str)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write([]byte{0})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// search is a copy of sort.Search specialised for uint32.
+//
+// Licensed under https://go.dev/LICENSE
+func search(ints []uint32, needle uint32) int {
+ // Define f(-1) == false and f(n) == true.
+ // Invariant: f(i-1) == false, f(j) == true.
+ i, j := 0, len(ints)
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if !(ints[h] >= needle) {
+ i = h + 1 // preserves f(i-1) == false
+ } else {
+ j = h // preserves f(j) == true
+ }
+ }
+ // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
+ return i
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go
new file mode 100644
index 000000000..402a363c2
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/types.go
@@ -0,0 +1,1212 @@
+package btf
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strings"
+
+ "github.com/cilium/ebpf/asm"
+)
+
+const maxTypeDepth = 32
+
+// TypeID identifies a type in a BTF section.
+type TypeID uint32
+
+// Type represents a type described by BTF.
+type Type interface {
+ // Type can be formatted using the %s and %v verbs. %s outputs only the
+ // identity of the type, without any detail. %v outputs additional detail.
+ //
+ // Use the '+' flag to include the address of the type.
+ //
+ // Use the width to specify how many levels of detail to output, for example
+ // %1v will output detail for the root type and a short description of its
+ // children. %2v would output details of the root type and its children
+ // as well as a short description of the grandchildren.
+ fmt.Formatter
+
+ // Name of the type, empty for anonymous types and types that cannot
+ // carry a name, like Void and Pointer.
+ TypeName() string
+
+ // Make a copy of the type, without copying Type members.
+ copy() Type
+
+ // Enumerate all nested Types. Repeated calls must visit nested
+ // types in the same order.
+ walk(*typeDeque)
+}
+
+var (
+ _ Type = (*Int)(nil)
+ _ Type = (*Struct)(nil)
+ _ Type = (*Union)(nil)
+ _ Type = (*Enum)(nil)
+ _ Type = (*Fwd)(nil)
+ _ Type = (*Func)(nil)
+ _ Type = (*Typedef)(nil)
+ _ Type = (*Var)(nil)
+ _ Type = (*Datasec)(nil)
+ _ Type = (*Float)(nil)
+)
+
+// types is a list of Type.
+//
+// The order determines the ID of a type.
+type types []Type
+
+func (ts types) ByID(id TypeID) (Type, error) {
+ if int(id) > len(ts) {
+ return nil, fmt.Errorf("type ID %d: %w", id, ErrNotFound)
+ }
+ return ts[id], nil
+}
+
+// Void is the unit type of BTF.
+type Void struct{}
+
+func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) }
+func (v *Void) TypeName() string { return "" }
+func (v *Void) size() uint32 { return 0 }
+func (v *Void) copy() Type { return (*Void)(nil) }
+func (v *Void) walk(*typeDeque) {}
+
+type IntEncoding byte
+
+const (
+ Signed IntEncoding = 1 << iota
+ Char
+ Bool
+)
+
+func (ie IntEncoding) IsSigned() bool {
+ return ie&Signed != 0
+}
+
+func (ie IntEncoding) IsChar() bool {
+ return ie&Char != 0
+}
+
+func (ie IntEncoding) IsBool() bool {
+ return ie&Bool != 0
+}
+
+func (ie IntEncoding) String() string {
+ switch {
+ case ie.IsChar() && ie.IsSigned():
+ return "char"
+ case ie.IsChar() && !ie.IsSigned():
+ return "uchar"
+ case ie.IsBool():
+ return "bool"
+ case ie.IsSigned():
+ return "signed"
+ default:
+ return "unsigned"
+ }
+}
+
+// Int is an integer of a given length.
+//
+// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
+type Int struct {
+ Name string
+
+ // The size of the integer in bytes.
+ Size uint32
+ Encoding IntEncoding
+}
+
+func (i *Int) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, i, i.Encoding, "size=", i.Size*8)
+}
+
+func (i *Int) TypeName() string { return i.Name }
+func (i *Int) size() uint32 { return i.Size }
+func (i *Int) walk(*typeDeque) {}
+func (i *Int) copy() Type {
+ cpy := *i
+ return &cpy
+}
+
+// Pointer is a pointer to another type.
+type Pointer struct {
+ Target Type
+}
+
+func (p *Pointer) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, p, "target=", p.Target)
+}
+
+func (p *Pointer) TypeName() string { return "" }
+func (p *Pointer) size() uint32 { return 8 }
+func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) }
+func (p *Pointer) copy() Type {
+ cpy := *p
+ return &cpy
+}
+
+// Array is an array with a fixed number of elements.
+type Array struct {
+ Index Type
+ Type Type
+ Nelems uint32
+}
+
+func (arr *Array) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, arr, "index=", arr.Index, "type=", arr.Type, "n=", arr.Nelems)
+}
+
+func (arr *Array) TypeName() string { return "" }
+
+func (arr *Array) walk(tdq *typeDeque) {
+ tdq.push(&arr.Index)
+ tdq.push(&arr.Type)
+}
+
+func (arr *Array) copy() Type {
+ cpy := *arr
+ return &cpy
+}
+
+// Struct is a compound type of consecutive members.
+type Struct struct {
+ Name string
+ // The size of the struct including padding, in bytes
+ Size uint32
+ Members []Member
+}
+
+func (s *Struct) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, s, "fields=", len(s.Members))
+}
+
+func (s *Struct) TypeName() string { return s.Name }
+
+func (s *Struct) size() uint32 { return s.Size }
+
+func (s *Struct) walk(tdq *typeDeque) {
+ for i := range s.Members {
+ tdq.push(&s.Members[i].Type)
+ }
+}
+
+func (s *Struct) copy() Type {
+ cpy := *s
+ cpy.Members = copyMembers(s.Members)
+ return &cpy
+}
+
+func (s *Struct) members() []Member {
+ return s.Members
+}
+
+// Union is a compound type where members occupy the same memory.
+type Union struct {
+ Name string
+ // The size of the union including padding, in bytes.
+ Size uint32
+ Members []Member
+}
+
+func (u *Union) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, u, "fields=", len(u.Members))
+}
+
+func (u *Union) TypeName() string { return u.Name }
+
+func (u *Union) size() uint32 { return u.Size }
+
+func (u *Union) walk(tdq *typeDeque) {
+ for i := range u.Members {
+ tdq.push(&u.Members[i].Type)
+ }
+}
+
+func (u *Union) copy() Type {
+ cpy := *u
+ cpy.Members = copyMembers(u.Members)
+ return &cpy
+}
+
+func (u *Union) members() []Member {
+ return u.Members
+}
+
+func copyMembers(orig []Member) []Member {
+ cpy := make([]Member, len(orig))
+ copy(cpy, orig)
+ return cpy
+}
+
+type composite interface {
+ members() []Member
+}
+
+var (
+ _ composite = (*Struct)(nil)
+ _ composite = (*Union)(nil)
+)
+
+// A value in bits.
+type Bits uint32
+
+// Bytes converts a bit value into bytes.
+func (b Bits) Bytes() uint32 {
+ return uint32(b / 8)
+}
+
+// Member is part of a Struct or Union.
+//
+// It is not a valid Type.
+type Member struct {
+ Name string
+ Type Type
+ Offset Bits
+ BitfieldSize Bits
+}
+
+// Enum lists possible values.
+type Enum struct {
+ Name string
+ // Size of the enum value in bytes.
+ Size uint32
+ Values []EnumValue
+}
+
+func (e *Enum) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, e, "size=", e.Size, "values=", len(e.Values))
+}
+
+func (e *Enum) TypeName() string { return e.Name }
+
+// EnumValue is part of an Enum
+//
+// Is is not a valid Type
+type EnumValue struct {
+ Name string
+ Value int32
+}
+
+func (e *Enum) size() uint32 { return e.Size }
+func (e *Enum) walk(*typeDeque) {}
+func (e *Enum) copy() Type {
+ cpy := *e
+ cpy.Values = make([]EnumValue, len(e.Values))
+ copy(cpy.Values, e.Values)
+ return &cpy
+}
+
+// FwdKind is the type of forward declaration.
+type FwdKind int
+
+// Valid types of forward declaration.
+const (
+ FwdStruct FwdKind = iota
+ FwdUnion
+)
+
+func (fk FwdKind) String() string {
+ switch fk {
+ case FwdStruct:
+ return "struct"
+ case FwdUnion:
+ return "union"
+ default:
+ return fmt.Sprintf("%T(%d)", fk, int(fk))
+ }
+}
+
+// Fwd is a forward declaration of a Type.
+type Fwd struct {
+ Name string
+ Kind FwdKind
+}
+
+func (f *Fwd) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, f, f.Kind)
+}
+
+func (f *Fwd) TypeName() string { return f.Name }
+
+func (f *Fwd) walk(*typeDeque) {}
+func (f *Fwd) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// Typedef is an alias of a Type.
+type Typedef struct {
+ Name string
+ Type Type
+}
+
+func (td *Typedef) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, td, td.Type)
+}
+
+func (td *Typedef) TypeName() string { return td.Name }
+
+func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) }
+func (td *Typedef) copy() Type {
+ cpy := *td
+ return &cpy
+}
+
+// Volatile is a qualifier.
+type Volatile struct {
+ Type Type
+}
+
+func (v *Volatile) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, v, v.Type)
+}
+
+func (v *Volatile) TypeName() string { return "" }
+
+func (v *Volatile) qualify() Type { return v.Type }
+func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) }
+func (v *Volatile) copy() Type {
+ cpy := *v
+ return &cpy
+}
+
+// Const is a qualifier.
+type Const struct {
+ Type Type
+}
+
+func (c *Const) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, c, c.Type)
+}
+
+func (c *Const) TypeName() string { return "" }
+
+func (c *Const) qualify() Type { return c.Type }
+func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) }
+func (c *Const) copy() Type {
+ cpy := *c
+ return &cpy
+}
+
+// Restrict is a qualifier.
+type Restrict struct {
+ Type Type
+}
+
+func (r *Restrict) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, r, r.Type)
+}
+
+func (r *Restrict) TypeName() string { return "" }
+
+func (r *Restrict) qualify() Type { return r.Type }
+func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) }
+func (r *Restrict) copy() Type {
+ cpy := *r
+ return &cpy
+}
+
+// Func is a function definition.
+type Func struct {
+ Name string
+ Type Type
+ Linkage FuncLinkage
+}
+
+func FuncMetadata(ins *asm.Instruction) *Func {
+ fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func)
+ return fn
+}
+
+func (f *Func) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, f, f.Linkage, "proto=", f.Type)
+}
+
+func (f *Func) TypeName() string { return f.Name }
+
+func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) }
+func (f *Func) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// FuncProto is a function declaration.
+type FuncProto struct {
+ Return Type
+ Params []FuncParam
+}
+
+func (fp *FuncProto) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, fp, "args=", len(fp.Params), "return=", fp.Return)
+}
+
+func (fp *FuncProto) TypeName() string { return "" }
+
+func (fp *FuncProto) walk(tdq *typeDeque) {
+ tdq.push(&fp.Return)
+ for i := range fp.Params {
+ tdq.push(&fp.Params[i].Type)
+ }
+}
+
+func (fp *FuncProto) copy() Type {
+ cpy := *fp
+ cpy.Params = make([]FuncParam, len(fp.Params))
+ copy(cpy.Params, fp.Params)
+ return &cpy
+}
+
+type FuncParam struct {
+ Name string
+ Type Type
+}
+
+// Var is a global variable.
+type Var struct {
+ Name string
+ Type Type
+ Linkage VarLinkage
+}
+
+func (v *Var) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, v, v.Linkage)
+}
+
+func (v *Var) TypeName() string { return v.Name }
+
+func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) }
+func (v *Var) copy() Type {
+ cpy := *v
+ return &cpy
+}
+
+// Datasec is a global program section containing data.
+type Datasec struct {
+ Name string
+ Size uint32
+ Vars []VarSecinfo
+}
+
+func (ds *Datasec) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, ds)
+}
+
+func (ds *Datasec) TypeName() string { return ds.Name }
+
+func (ds *Datasec) size() uint32 { return ds.Size }
+
+func (ds *Datasec) walk(tdq *typeDeque) {
+ for i := range ds.Vars {
+ tdq.push(&ds.Vars[i].Type)
+ }
+}
+
+func (ds *Datasec) copy() Type {
+ cpy := *ds
+ cpy.Vars = make([]VarSecinfo, len(ds.Vars))
+ copy(cpy.Vars, ds.Vars)
+ return &cpy
+}
+
+// VarSecinfo describes variable in a Datasec.
+//
+// It is not a valid Type.
+type VarSecinfo struct {
+ Type Type
+ Offset uint32
+ Size uint32
+}
+
+// Float is a float of a given length.
+type Float struct {
+ Name string
+
+ // The size of the float in bytes.
+ Size uint32
+}
+
+func (f *Float) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, f, "size=", f.Size*8)
+}
+
+func (f *Float) TypeName() string { return f.Name }
+func (f *Float) size() uint32 { return f.Size }
+func (f *Float) walk(*typeDeque) {}
+func (f *Float) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// cycle is a type which had to be elided since it exceeded maxTypeDepth.
+type cycle struct {
+ root Type
+}
+
+func (c *cycle) ID() TypeID { return math.MaxUint32 }
+func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) }
+func (c *cycle) TypeName() string { return "" }
+func (c *cycle) walk(*typeDeque) {}
+func (c *cycle) copy() Type {
+ cpy := *c
+ return &cpy
+}
+
+type sizer interface {
+ size() uint32
+}
+
+var (
+ _ sizer = (*Int)(nil)
+ _ sizer = (*Pointer)(nil)
+ _ sizer = (*Struct)(nil)
+ _ sizer = (*Union)(nil)
+ _ sizer = (*Enum)(nil)
+ _ sizer = (*Datasec)(nil)
+)
+
+type qualifier interface {
+ qualify() Type
+}
+
+var (
+ _ qualifier = (*Const)(nil)
+ _ qualifier = (*Restrict)(nil)
+ _ qualifier = (*Volatile)(nil)
+)
+
+// Sizeof returns the size of a type in bytes.
+//
+// Returns an error if the size can't be computed.
+func Sizeof(typ Type) (int, error) {
+ var (
+ n = int64(1)
+ elem int64
+ )
+
+ for i := 0; i < maxTypeDepth; i++ {
+ switch v := typ.(type) {
+ case *Array:
+ if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
+ return 0, fmt.Errorf("type %s: overflow", typ)
+ }
+
+ // Arrays may be of zero length, which allows
+ // n to be zero as well.
+ n *= int64(v.Nelems)
+ typ = v.Type
+ continue
+
+ case sizer:
+ elem = int64(v.size())
+
+ case *Typedef:
+ typ = v.Type
+ continue
+
+ case qualifier:
+ typ = v.qualify()
+ continue
+
+ default:
+ return 0, fmt.Errorf("unsized type %T", typ)
+ }
+
+ if n > 0 && elem > math.MaxInt64/n {
+ return 0, fmt.Errorf("type %s: overflow", typ)
+ }
+
+ size := n * elem
+ if int64(int(size)) != size {
+ return 0, fmt.Errorf("type %s: overflow", typ)
+ }
+
+ return int(size), nil
+ }
+
+ return 0, fmt.Errorf("type %s: exceeded type depth", typ)
+}
+
+// alignof returns the alignment of a type.
+//
+// Currently only supports the subset of types necessary for bitfield relocations.
+func alignof(typ Type) (int, error) {
+ switch t := UnderlyingType(typ).(type) {
+ case *Enum:
+ return int(t.size()), nil
+ case *Int:
+ return int(t.Size), nil
+ default:
+ return 0, fmt.Errorf("can't calculate alignment of %T", t)
+ }
+}
+
+// Transformer modifies a given Type and returns the result.
+//
+// For example, UnderlyingType removes any qualifiers or typedefs from a type.
+// See the example on Copy for how to use a transform.
+type Transformer func(Type) Type
+
+// Copy a Type recursively.
+//
+// typ may form a cycle. If transform is not nil, it is called with the
+// to be copied type, and the returned value is copied instead.
+func Copy(typ Type, transform Transformer) Type {
+ copies := make(copier)
+ copies.copy(&typ, transform)
+ return typ
+}
+
+// copy a slice of Types recursively.
+//
+// See Copy for the semantics.
+func copyTypes(types []Type, transform Transformer) []Type {
+ result := make([]Type, len(types))
+ copy(result, types)
+
+ copies := make(copier)
+ for i := range result {
+ copies.copy(&result[i], transform)
+ }
+
+ return result
+}
+
+type copier map[Type]Type
+
+func (c copier) copy(typ *Type, transform Transformer) {
+ var work typeDeque
+ for t := typ; t != nil; t = work.pop() {
+ // *t is the identity of the type.
+ if cpy := c[*t]; cpy != nil {
+ *t = cpy
+ continue
+ }
+
+ var cpy Type
+ if transform != nil {
+ cpy = transform(*t).copy()
+ } else {
+ cpy = (*t).copy()
+ }
+
+ c[*t] = cpy
+ *t = cpy
+
+ // Mark any nested types for copying.
+ cpy.walk(&work)
+ }
+}
+
+// typeDeque keeps track of pointers to types which still
+// need to be visited.
+type typeDeque struct {
+ types []*Type
+ read, write uint64
+ mask uint64
+}
+
+func (dq *typeDeque) empty() bool {
+ return dq.read == dq.write
+}
+
+// push adds a type to the stack.
+func (dq *typeDeque) push(t *Type) {
+ if dq.write-dq.read < uint64(len(dq.types)) {
+ dq.types[dq.write&dq.mask] = t
+ dq.write++
+ return
+ }
+
+ new := len(dq.types) * 2
+ if new == 0 {
+ new = 8
+ }
+
+ types := make([]*Type, new)
+ pivot := dq.read & dq.mask
+ n := copy(types, dq.types[pivot:])
+ n += copy(types[n:], dq.types[:pivot])
+ types[n] = t
+
+ dq.types = types
+ dq.mask = uint64(new) - 1
+ dq.read, dq.write = 0, uint64(n+1)
+}
+
+// shift returns the first element or null.
+func (dq *typeDeque) shift() *Type {
+ if dq.empty() {
+ return nil
+ }
+
+ index := dq.read & dq.mask
+ t := dq.types[index]
+ dq.types[index] = nil
+ dq.read++
+ return t
+}
+
+// pop returns the last element or null.
+func (dq *typeDeque) pop() *Type {
+ if dq.empty() {
+ return nil
+ }
+
+ dq.write--
+ index := dq.write & dq.mask
+ t := dq.types[index]
+ dq.types[index] = nil
+ return t
+}
+
+// all returns all elements.
+//
+// The deque is empty after calling this method.
+func (dq *typeDeque) all() []*Type {
+ length := dq.write - dq.read
+ types := make([]*Type, 0, length)
+ for t := dq.shift(); t != nil; t = dq.shift() {
+ types = append(types, t)
+ }
+ return types
+}
+
+// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
+// it into a graph of Types connected via pointers.
+//
+// If baseTypes are provided, then the raw types are
+// considered to be of a split BTF (e.g., a kernel module).
+//
+// Returns a slice of types indexed by TypeID. Since BTF ignores compilation
+// units, multiple types may share the same name. A Type may form a cyclic graph
+// by pointing at itself.
+func inflateRawTypes(rawTypes []rawType, baseTypes types, rawStrings *stringTable) ([]Type, error) {
+ types := make([]Type, 0, len(rawTypes)+1) // +1 for Void added to base types
+
+ typeIDOffset := TypeID(1) // Void is TypeID(0), so the rest starts from TypeID(1)
+
+ if baseTypes == nil {
+ // Void is defined to always be type ID 0, and is thus omitted from BTF.
+ types = append(types, (*Void)(nil))
+ } else {
+ // For split BTF, the next ID is max base BTF type ID + 1
+ typeIDOffset = TypeID(len(baseTypes))
+ }
+
+ type fixupDef struct {
+ id TypeID
+ typ *Type
+ }
+
+ var fixups []fixupDef
+ fixup := func(id TypeID, typ *Type) {
+ if id < TypeID(len(baseTypes)) {
+ *typ = baseTypes[id]
+ return
+ }
+
+ idx := id
+ if baseTypes != nil {
+ idx = id - TypeID(len(baseTypes))
+ }
+ if idx < TypeID(len(types)) {
+ // We've already inflated this type, fix it up immediately.
+ *typ = types[idx]
+ return
+ }
+ fixups = append(fixups, fixupDef{id, typ})
+ }
+
+ type assertion struct {
+ typ *Type
+ want reflect.Type
+ }
+
+ var assertions []assertion
+ assert := func(typ *Type, want reflect.Type) error {
+ if *typ != nil {
+ // The type has already been fixed up, check the type immediately.
+ if reflect.TypeOf(*typ) != want {
+ return fmt.Errorf("expected %s, got %T", want, *typ)
+ }
+ return nil
+ }
+ assertions = append(assertions, assertion{typ, want})
+ return nil
+ }
+
+ type bitfieldFixupDef struct {
+ id TypeID
+ m *Member
+ }
+
+ var (
+ legacyBitfields = make(map[TypeID][2]Bits) // offset, size
+ bitfieldFixups []bitfieldFixupDef
+ )
+ convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) {
+ // NB: The fixup below relies on pre-allocating this array to
+ // work, since otherwise append might re-allocate members.
+ members := make([]Member, 0, len(raw))
+ for i, btfMember := range raw {
+ name, err := rawStrings.Lookup(btfMember.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
+ }
+
+ members = append(members, Member{
+ Name: name,
+ Offset: Bits(btfMember.Offset),
+ })
+
+ m := &members[i]
+ fixup(raw[i].Type, &m.Type)
+
+ if kindFlag {
+ m.BitfieldSize = Bits(btfMember.Offset >> 24)
+ m.Offset &= 0xffffff
+ // We ignore legacy bitfield definitions if the current composite
+ // is a new-style bitfield. This is kind of safe since offset and
+ // size on the type of the member must be zero if kindFlat is set
+ // according to spec.
+ continue
+ }
+
+ // This may be a legacy bitfield, try to fix it up.
+ data, ok := legacyBitfields[raw[i].Type]
+ if ok {
+ // Bingo!
+ m.Offset += data[0]
+ m.BitfieldSize = data[1]
+ continue
+ }
+
+ if m.Type != nil {
+ // We couldn't find a legacy bitfield, but we know that the member's
+ // type has already been inflated. Hence we know that it can't be
+ // a legacy bitfield and there is nothing left to do.
+ continue
+ }
+
+ // We don't have fixup data, and the type we're pointing
+ // at hasn't been inflated yet. No choice but to defer
+ // the fixup.
+ bitfieldFixups = append(bitfieldFixups, bitfieldFixupDef{
+ raw[i].Type,
+ m,
+ })
+ }
+ return members, nil
+ }
+
+ for i, raw := range rawTypes {
+ var (
+ id = typeIDOffset + TypeID(i)
+ typ Type
+ )
+
+ name, err := rawStrings.Lookup(raw.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for type id %d: %w", id, err)
+ }
+
+ switch raw.Kind() {
+ case kindInt:
+ size := raw.Size()
+ bi := raw.data.(*btfInt)
+ if bi.Offset() > 0 || bi.Bits().Bytes() != size {
+ legacyBitfields[id] = [2]Bits{bi.Offset(), bi.Bits()}
+ }
+ typ = &Int{name, raw.Size(), bi.Encoding()}
+
+ case kindPointer:
+ ptr := &Pointer{nil}
+ fixup(raw.Type(), &ptr.Target)
+ typ = ptr
+
+ case kindArray:
+ btfArr := raw.data.(*btfArray)
+ arr := &Array{nil, nil, btfArr.Nelems}
+ fixup(btfArr.IndexType, &arr.Index)
+ fixup(btfArr.Type, &arr.Type)
+ typ = arr
+
+ case kindStruct:
+ members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
+ if err != nil {
+ return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
+ }
+ typ = &Struct{name, raw.Size(), members}
+
+ case kindUnion:
+ members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag())
+ if err != nil {
+ return nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
+ }
+ typ = &Union{name, raw.Size(), members}
+
+ case kindEnum:
+ rawvals := raw.data.([]btfEnum)
+ vals := make([]EnumValue, 0, len(rawvals))
+ for i, btfVal := range rawvals {
+ name, err := rawStrings.Lookup(btfVal.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for enum value %d: %s", i, err)
+ }
+ vals = append(vals, EnumValue{
+ Name: name,
+ Value: btfVal.Val,
+ })
+ }
+ typ = &Enum{name, raw.Size(), vals}
+
+ case kindForward:
+ if raw.KindFlag() {
+ typ = &Fwd{name, FwdUnion}
+ } else {
+ typ = &Fwd{name, FwdStruct}
+ }
+
+ case kindTypedef:
+ typedef := &Typedef{name, nil}
+ fixup(raw.Type(), &typedef.Type)
+ typ = typedef
+
+ case kindVolatile:
+ volatile := &Volatile{nil}
+ fixup(raw.Type(), &volatile.Type)
+ typ = volatile
+
+ case kindConst:
+ cnst := &Const{nil}
+ fixup(raw.Type(), &cnst.Type)
+ typ = cnst
+
+ case kindRestrict:
+ restrict := &Restrict{nil}
+ fixup(raw.Type(), &restrict.Type)
+ typ = restrict
+
+ case kindFunc:
+ fn := &Func{name, nil, raw.Linkage()}
+ fixup(raw.Type(), &fn.Type)
+ if err := assert(&fn.Type, reflect.TypeOf((*FuncProto)(nil))); err != nil {
+ return nil, err
+ }
+ typ = fn
+
+ case kindFuncProto:
+ rawparams := raw.data.([]btfParam)
+ params := make([]FuncParam, 0, len(rawparams))
+ for i, param := range rawparams {
+ name, err := rawStrings.Lookup(param.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
+ }
+ params = append(params, FuncParam{
+ Name: name,
+ })
+ }
+ for i := range params {
+ fixup(rawparams[i].Type, &params[i].Type)
+ }
+
+ fp := &FuncProto{nil, params}
+ fixup(raw.Type(), &fp.Return)
+ typ = fp
+
+ case kindVar:
+ variable := raw.data.(*btfVariable)
+ v := &Var{name, nil, VarLinkage(variable.Linkage)}
+ fixup(raw.Type(), &v.Type)
+ typ = v
+
+ case kindDatasec:
+ btfVars := raw.data.([]btfVarSecinfo)
+ vars := make([]VarSecinfo, 0, len(btfVars))
+ for _, btfVar := range btfVars {
+ vars = append(vars, VarSecinfo{
+ Offset: btfVar.Offset,
+ Size: btfVar.Size,
+ })
+ }
+ for i := range vars {
+ fixup(btfVars[i].Type, &vars[i].Type)
+ if err := assert(&vars[i].Type, reflect.TypeOf((*Var)(nil))); err != nil {
+ return nil, err
+ }
+ }
+ typ = &Datasec{name, raw.SizeType, vars}
+
+ case kindFloat:
+ typ = &Float{name, raw.Size()}
+
+ default:
+ return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
+ }
+
+ types = append(types, typ)
+ }
+
+ for _, fixup := range fixups {
+ i := int(fixup.id)
+ if i >= len(types)+len(baseTypes) {
+ return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
+ }
+ if i < len(baseTypes) {
+ return nil, fmt.Errorf("fixup for base type id %d is not expected", i)
+ }
+
+ *fixup.typ = types[i-len(baseTypes)]
+ }
+
+ for _, bitfieldFixup := range bitfieldFixups {
+ if bitfieldFixup.id < TypeID(len(baseTypes)) {
+ return nil, fmt.Errorf("bitfield fixup from split to base types is not expected")
+ }
+
+ data, ok := legacyBitfields[bitfieldFixup.id]
+ if ok {
+ // This is indeed a legacy bitfield, fix it up.
+ bitfieldFixup.m.Offset += data[0]
+ bitfieldFixup.m.BitfieldSize = data[1]
+ }
+ }
+
+ for _, assertion := range assertions {
+ if reflect.TypeOf(*assertion.typ) != assertion.want {
+ return nil, fmt.Errorf("expected %s, got %T", assertion.want, *assertion.typ)
+ }
+ }
+
+ return types, nil
+}
+
+// essentialName represents the name of a BTF type stripped of any flavor
+// suffixes after a ___ delimiter.
+type essentialName string
+
+// newEssentialName returns name without a ___ suffix.
+//
+// CO-RE has the concept of 'struct flavors', which are used to deal with
+// changes in kernel data structures. Anything after three underscores
+// in a type name is ignored for the purpose of finding a candidate type
+// in the kernel's BTF.
+func newEssentialName(name string) essentialName {
+ if name == "" {
+ return ""
+ }
+ lastIdx := strings.LastIndex(name, "___")
+ if lastIdx > 0 {
+ return essentialName(name[:lastIdx])
+ }
+ return essentialName(name)
+}
+
+// UnderlyingType skips qualifiers and Typedefs.
+func UnderlyingType(typ Type) Type {
+ result := typ
+ for depth := 0; depth <= maxTypeDepth; depth++ {
+ switch v := (result).(type) {
+ case qualifier:
+ result = v.qualify()
+ case *Typedef:
+ result = v.Type
+ default:
+ return result
+ }
+ }
+ return &cycle{typ}
+}
+
+type formatState struct {
+ fmt.State
+ depth int
+}
+
+// formattableType is a subset of Type, to ease unit testing of formatType.
+type formattableType interface {
+ fmt.Formatter
+ TypeName() string
+}
+
+// formatType formats a type in a canonical form.
+//
+// Handles cyclical types by only printing cycles up to a certain depth. Elements
+// in extra are separated by spaces unless the preceding element is a string
+// ending in '='.
+func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{}) {
+ if verb != 'v' && verb != 's' {
+ fmt.Fprintf(f, "{UNRECOGNIZED: %c}", verb)
+ return
+ }
+
+ // This is the same as %T, but elides the package name. Assumes that
+ // formattableType is implemented by a pointer receiver.
+ goTypeName := reflect.TypeOf(t).Elem().Name()
+ _, _ = io.WriteString(f, goTypeName)
+
+ if name := t.TypeName(); name != "" {
+ // Output BTF type name if present.
+ fmt.Fprintf(f, ":%q", name)
+ }
+
+ if f.Flag('+') {
+ // Output address if requested.
+ fmt.Fprintf(f, ":%#p", t)
+ }
+
+ if verb == 's' {
+ // %s omits details.
+ return
+ }
+
+ var depth int
+ if ps, ok := f.(*formatState); ok {
+ depth = ps.depth
+ f = ps.State
+ }
+
+ maxDepth, ok := f.Width()
+ if !ok {
+ maxDepth = 0
+ }
+
+ if depth > maxDepth {
+ // We've reached the maximum depth. This avoids infinite recursion even
+ // for cyclical types.
+ return
+ }
+
+ if len(extra) == 0 {
+ return
+ }
+
+ wantSpace := false
+ _, _ = io.WriteString(f, "[")
+ for _, arg := range extra {
+ if wantSpace {
+ _, _ = io.WriteString(f, " ")
+ }
+
+ switch v := arg.(type) {
+ case string:
+ _, _ = io.WriteString(f, v)
+ wantSpace = len(v) > 0 && v[len(v)-1] != '='
+ continue
+
+ case formattableType:
+ v.Format(&formatState{f, depth + 1}, verb)
+
+ default:
+ fmt.Fprint(f, arg)
+ }
+
+ wantSpace = true
+ }
+ _, _ = io.WriteString(f, "]")
+}