summaryrefslogtreecommitdiff
path: root/vendor/golang.org/x/tools/internal
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/golang.org/x/tools/internal')
-rw-r--r--vendor/golang.org/x/tools/internal/aliases/aliases.go10
-rw-r--r--vendor/golang.org/x/tools/internal/aliases/aliases_go121.go31
-rw-r--r--vendor/golang.org/x/tools/internal/aliases/aliases_go122.go57
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/bimport.go61
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/exportdata.go71
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go69
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/iexport.go284
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/iimport.go43
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go53
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go22
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go14
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/predeclared.go91
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/support_go118.go34
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/unified_no.go10
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go10
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go44
-rw-r--r--vendor/golang.org/x/tools/internal/gocommand/invoke.go37
-rw-r--r--vendor/golang.org/x/tools/internal/imports/fix.go521
-rw-r--r--vendor/golang.org/x/tools/internal/imports/imports.go33
-rw-r--r--vendor/golang.org/x/tools/internal/imports/mod.go17
-rw-r--r--vendor/golang.org/x/tools/internal/imports/source.go63
-rw-r--r--vendor/golang.org/x/tools/internal/imports/source_env.go129
-rw-r--r--vendor/golang.org/x/tools/internal/imports/source_modindex.go103
-rw-r--r--vendor/golang.org/x/tools/internal/modindex/directories.go135
-rw-r--r--vendor/golang.org/x/tools/internal/modindex/index.go262
-rw-r--r--vendor/golang.org/x/tools/internal/modindex/lookup.go145
-rw-r--r--vendor/golang.org/x/tools/internal/modindex/modindex.go164
-rw-r--r--vendor/golang.org/x/tools/internal/modindex/symbols.go189
-rw-r--r--vendor/golang.org/x/tools/internal/modindex/types.go25
-rw-r--r--vendor/golang.org/x/tools/internal/packagesinternal/packages.go2
-rw-r--r--vendor/golang.org/x/tools/internal/pkgbits/decoder.go38
-rw-r--r--vendor/golang.org/x/tools/internal/pkgbits/encoder.go43
-rw-r--r--vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go21
-rw-r--r--vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go28
-rw-r--r--vendor/golang.org/x/tools/internal/pkgbits/support.go2
-rw-r--r--vendor/golang.org/x/tools/internal/pkgbits/sync.go23
-rw-r--r--vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go7
-rw-r--r--vendor/golang.org/x/tools/internal/pkgbits/version.go85
-rw-r--r--vendor/golang.org/x/tools/internal/stdlib/manifest.go111
-rw-r--r--vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go137
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/common.go140
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/coretype.go150
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/free.go131
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/normalize.go218
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/termlist.go163
-rw-r--r--vendor/golang.org/x/tools/internal/typeparams/typeterm.go169
-rw-r--r--vendor/golang.org/x/tools/internal/typesinternal/element.go133
-rw-r--r--vendor/golang.org/x/tools/internal/typesinternal/errorcode.go8
-rw-r--r--vendor/golang.org/x/tools/internal/typesinternal/recv.go8
-rw-r--r--vendor/golang.org/x/tools/internal/typesinternal/types.go71
-rw-r--r--vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go282
-rw-r--r--vendor/golang.org/x/tools/internal/versions/toolchain.go14
-rw-r--r--vendor/golang.org/x/tools/internal/versions/toolchain_go119.go14
-rw-r--r--vendor/golang.org/x/tools/internal/versions/toolchain_go120.go14
-rw-r--r--vendor/golang.org/x/tools/internal/versions/toolchain_go121.go14
-rw-r--r--vendor/golang.org/x/tools/internal/versions/types.go28
-rw-r--r--vendor/golang.org/x/tools/internal/versions/types_go121.go30
-rw-r--r--vendor/golang.org/x/tools/internal/versions/types_go122.go41
58 files changed, 3877 insertions, 975 deletions
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go
index c24c2eee4..b9425f5a2 100644
--- a/vendor/golang.org/x/tools/internal/aliases/aliases.go
+++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go
@@ -22,11 +22,17 @@ import (
// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled
// function is expensive and should be called once per task (e.g.
// package import), not once per call to NewAlias.
-func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName {
+//
+// Precondition: enabled || len(tparams)==0.
+// If materialized aliases are disabled, there must not be any type parameters.
+func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName {
if enabled {
tname := types.NewTypeName(pos, pkg, name, nil)
- newAlias(tname, rhs)
+ SetTypeParams(types.NewAlias(tname, rhs), tparams)
return tname
}
+ if len(tparams) > 0 {
+ panic("cannot create an alias with type parameters when gotypesalias is not enabled")
+ }
return types.NewTypeName(pos, pkg, name, rhs)
}
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
deleted file mode 100644
index c027b9f31..000000000
--- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.22
-// +build !go1.22
-
-package aliases
-
-import (
- "go/types"
-)
-
-// Alias is a placeholder for a go/types.Alias for <=1.21.
-// It will never be created by go/types.
-type Alias struct{}
-
-func (*Alias) String() string { panic("unreachable") }
-func (*Alias) Underlying() types.Type { panic("unreachable") }
-func (*Alias) Obj() *types.TypeName { panic("unreachable") }
-func Rhs(alias *Alias) types.Type { panic("unreachable") }
-
-// Unalias returns the type t for go <=1.21.
-func Unalias(t types.Type) types.Type { return t }
-
-func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") }
-
-// Enabled reports whether [NewAlias] should create [types.Alias] types.
-//
-// Before go1.22, this function always returns false.
-func Enabled() bool { return false }
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
index b32995484..7716a3331 100644
--- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
+++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.22
-// +build go1.22
-
package aliases
import (
@@ -14,31 +11,51 @@ import (
"go/types"
)
-// Alias is an alias of types.Alias.
-type Alias = types.Alias
-
// Rhs returns the type on the right-hand side of the alias declaration.
-func Rhs(alias *Alias) types.Type {
+func Rhs(alias *types.Alias) types.Type {
if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok {
return alias.Rhs() // go1.23+
}
// go1.22's Alias didn't have the Rhs method,
// so Unalias is the best we can do.
- return Unalias(alias)
+ return types.Unalias(alias)
+}
+
+// TypeParams returns the type parameter list of the alias.
+func TypeParams(alias *types.Alias) *types.TypeParamList {
+ if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok {
+ return alias.TypeParams() // go1.23+
+ }
+ return nil
+}
+
+// SetTypeParams sets the type parameters of the alias type.
+func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) {
+ if alias, ok := any(alias).(interface {
+ SetTypeParams(tparams []*types.TypeParam)
+ }); ok {
+ alias.SetTypeParams(tparams) // go1.23+
+ } else if len(tparams) > 0 {
+ panic("cannot set type parameters of an Alias type in go1.22")
+ }
+}
+
+// TypeArgs returns the type arguments used to instantiate the Alias type.
+func TypeArgs(alias *types.Alias) *types.TypeList {
+ if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok {
+ return alias.TypeArgs() // go1.23+
+ }
+ return nil // empty (go1.22)
}
-// Unalias is a wrapper of types.Unalias.
-func Unalias(t types.Type) types.Type { return types.Unalias(t) }
-
-// newAlias is an internal alias around types.NewAlias.
-// Direct usage is discouraged as the moment.
-// Try to use NewAlias instead.
-func newAlias(tname *types.TypeName, rhs types.Type) *Alias {
- a := types.NewAlias(tname, rhs)
- // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect.
- Unalias(a)
- return a
+// Origin returns the generic Alias type of which alias is an instance.
+// If alias is not an instance of a generic alias, Origin returns alias.
+func Origin(alias *types.Alias) *types.Alias {
+ if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok {
+ return alias.Origin() // go1.23+
+ }
+ return alias // not an instance of a generic alias (go1.22)
}
// Enabled reports whether [NewAlias] should create [types.Alias] types.
@@ -56,7 +73,7 @@ func Enabled() bool {
// many tests. Therefore any attempt to cache the result
// is just incorrect.
fset := token.NewFileSet()
- f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0)
+ f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution)
pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil)
_, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias)
return enabled
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
index d98b0db2a..d79a605ed 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
@@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir {
return 0
}
}
-
-var predeclOnce sync.Once
-var predecl []types.Type // initialized lazily
-
-func predeclared() []types.Type {
- predeclOnce.Do(func() {
- // initialize lazily to be sure that all
- // elements have been initialized before
- predecl = []types.Type{ // basic types
- types.Typ[types.Bool],
- types.Typ[types.Int],
- types.Typ[types.Int8],
- types.Typ[types.Int16],
- types.Typ[types.Int32],
- types.Typ[types.Int64],
- types.Typ[types.Uint],
- types.Typ[types.Uint8],
- types.Typ[types.Uint16],
- types.Typ[types.Uint32],
- types.Typ[types.Uint64],
- types.Typ[types.Uintptr],
- types.Typ[types.Float32],
- types.Typ[types.Float64],
- types.Typ[types.Complex64],
- types.Typ[types.Complex128],
- types.Typ[types.String],
-
- // basic type aliases
- types.Universe.Lookup("byte").Type(),
- types.Universe.Lookup("rune").Type(),
-
- // error
- types.Universe.Lookup("error").Type(),
-
- // untyped types
- types.Typ[types.UntypedBool],
- types.Typ[types.UntypedInt],
- types.Typ[types.UntypedRune],
- types.Typ[types.UntypedFloat],
- types.Typ[types.UntypedComplex],
- types.Typ[types.UntypedString],
- types.Typ[types.UntypedNil],
-
- // package unsafe
- types.Typ[types.UnsafePointer],
-
- // invalid type
- types.Typ[types.Invalid], // only appears in packages with errors
-
- // used internally by gc; never used by this package or in .a files
- anyType{},
- }
- predecl = append(predecl, additionalPredeclared()...)
- })
- return predecl
-}
-
-type anyType struct{}
-
-func (t anyType) Underlying() types.Type { return t }
-func (t anyType) String() string { return "any" }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
index f6437feb1..6f5d8a213 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
@@ -39,12 +39,15 @@ func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
}
// FindExportData positions the reader r at the beginning of the
-// export data section of an underlying GC-created object/archive
+// export data section of an underlying cmd/compile created archive
// file by reading from it. The reader must be positioned at the
-// start of the file before calling this function. The hdr result
-// is the string before the export data, either "$$" or "$$B".
-// The size result is the length of the export data in bytes, or -1 if not known.
-func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
+// start of the file before calling this function.
+// The size result is the length of the export data in bytes.
+//
+// This function is needed by [gcexportdata.Read], which must
+// accept inputs produced by the last two releases of cmd/compile,
+// plus tip.
+func FindExportData(r *bufio.Reader) (size int64, err error) {
// Read first line to make sure this is an object file.
line, err := r.ReadSlice('\n')
if err != nil {
@@ -52,27 +55,32 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
return
}
- if string(line) == "!<arch>\n" {
- // Archive file. Scan to __.PKGDEF.
- var name string
- if name, size, err = readGopackHeader(r); err != nil {
- return
- }
+ // Is the first line an archive file signature?
+ if string(line) != "!<arch>\n" {
+ err = fmt.Errorf("not the start of an archive file (%q)", line)
+ return
+ }
- // First entry should be __.PKGDEF.
- if name != "__.PKGDEF" {
- err = fmt.Errorf("go archive is missing __.PKGDEF")
- return
- }
+ // Archive file. Scan to __.PKGDEF.
+ var name string
+ if name, size, err = readGopackHeader(r); err != nil {
+ return
+ }
+ arsize := size
- // Read first line of __.PKGDEF data, so that line
- // is once again the first line of the input.
- if line, err = r.ReadSlice('\n'); err != nil {
- err = fmt.Errorf("can't find export data (%v)", err)
- return
- }
- size -= int64(len(line))
+ // First entry should be __.PKGDEF.
+ if name != "__.PKGDEF" {
+ err = fmt.Errorf("go archive is missing __.PKGDEF")
+ return
+ }
+
+ // Read first line of __.PKGDEF data, so that line
+ // is once again the first line of the input.
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
}
+ size -= int64(len(line))
// Now at __.PKGDEF in archive or still at beginning of file.
// Either way, line should begin with "go object ".
@@ -81,8 +89,8 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
return
}
- // Skip over object header to export data.
- // Begins after first line starting with $$.
+ // Skip over object headers to get to the export data section header "$$B\n".
+ // Object headers are lines that do not start with '$'.
for line[0] != '$' {
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
@@ -90,9 +98,18 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
}
size -= int64(len(line))
}
- hdr = string(line)
+
+ // Check for the binary export data section header "$$B\n".
+ hdr := string(line)
+ if hdr != "$$B\n" {
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ return
+ }
+ // TODO(taking): Remove end-of-section marker "\n$$\n" from size.
+
if size < 0 {
- size = -1
+ err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
+ return
}
return
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
index 39df91124..dbbca8604 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
@@ -161,6 +161,8 @@ func FindPkg(path, srcDir string) (filename, id string) {
// Import imports a gc-generated package given its import path and srcDir, adds
// the corresponding package object to the packages map, and returns the object.
// The packages map must contain all packages already imported.
+//
+// TODO(taking): Import is only used in tests. Move to gcimporter_test.
func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
var rc io.ReadCloser
var filename, id string
@@ -210,53 +212,50 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
}
defer rc.Close()
- var hdr string
var size int64
buf := bufio.NewReader(rc)
- if hdr, size, err = FindExportData(buf); err != nil {
+ if size, err = FindExportData(buf); err != nil {
return
}
- switch hdr {
- case "$$B\n":
- var data []byte
- data, err = io.ReadAll(buf)
- if err != nil {
- break
- }
+ var data []byte
+ data, err = io.ReadAll(buf)
+ if err != nil {
+ return
+ }
+ if len(data) == 0 {
+ return nil, fmt.Errorf("no data to load a package from for path %s", id)
+ }
- // TODO(gri): allow clients of go/importer to provide a FileSet.
- // Or, define a new standard go/types/gcexportdata package.
- fset := token.NewFileSet()
+ // TODO(gri): allow clients of go/importer to provide a FileSet.
+ // Or, define a new standard go/types/gcexportdata package.
+ fset := token.NewFileSet()
- // Select appropriate importer.
- if len(data) > 0 {
- switch data[0] {
- case 'v', 'c', 'd': // binary, till go1.10
- return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
+ // Select appropriate importer.
+ switch data[0] {
+ case 'v', 'c', 'd':
+ // binary: emitted by cmd/compile till go1.10; obsolete.
+ return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
- case 'i': // indexed, till go1.19
- _, pkg, err := IImportData(fset, packages, data[1:], id)
- return pkg, err
+ case 'i':
+ // indexed: emitted by cmd/compile till go1.19;
+ // now used only for serializing go/types.
+ // See https://github.com/golang/go/issues/69491.
+ _, pkg, err := IImportData(fset, packages, data[1:], id)
+ return pkg, err
- case 'u': // unified, from go1.20
- _, pkg, err := UImportData(fset, packages, data[1:size], id)
- return pkg, err
-
- default:
- l := len(data)
- if l > 10 {
- l = 10
- }
- return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
- }
- }
+ case 'u':
+ // unified: emitted by cmd/compile since go1.20.
+ _, pkg, err := UImportData(fset, packages, data[1:size], id)
+ return pkg, err
default:
- err = fmt.Errorf("unknown export data header: %q", hdr)
+ l := len(data)
+ if l > 10 {
+ l = 10
+ }
+ return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
}
-
- return
}
type byPath []*types.Package
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
index deeb67f31..7dfc31a37 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -2,9 +2,227 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Indexed binary package export.
-// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
-// see that file for specification of the format.
+// Indexed package export.
+//
+// The indexed export data format is an evolution of the previous
+// binary export data format. Its chief contribution is introducing an
+// index table, which allows efficient random access of individual
+// declarations and inline function bodies. In turn, this allows
+// avoiding unnecessary work for compilation units that import large
+// packages.
+//
+//
+// The top-level data format is structured as:
+//
+// Header struct {
+// Tag byte // 'i'
+// Version uvarint
+// StringSize uvarint
+// DataSize uvarint
+// }
+//
+// Strings [StringSize]byte
+// Data [DataSize]byte
+//
+// MainIndex []struct{
+// PkgPath stringOff
+// PkgName stringOff
+// PkgHeight uvarint
+//
+// Decls []struct{
+// Name stringOff
+// Offset declOff
+// }
+// }
+//
+// Fingerprint [8]byte
+//
+// uvarint means a uint64 written out using uvarint encoding.
+//
+// []T means a uvarint followed by that many T objects. In other
+// words:
+//
+// Len uvarint
+// Elems [Len]T
+//
+// stringOff means a uvarint that indicates an offset within the
+// Strings section. At that offset is another uvarint, followed by
+// that many bytes, which form the string value.
+//
+// declOff means a uvarint that indicates an offset within the Data
+// section where the associated declaration can be found.
+//
+//
+// There are five kinds of declarations, distinguished by their first
+// byte:
+//
+// type Var struct {
+// Tag byte // 'V'
+// Pos Pos
+// Type typeOff
+// }
+//
+// type Func struct {
+// Tag byte // 'F' or 'G'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'G'
+// Signature Signature
+// }
+//
+// type Const struct {
+// Tag byte // 'C'
+// Pos Pos
+// Value Value
+// }
+//
+// type Type struct {
+// Tag byte // 'T' or 'U'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'U'
+// Underlying typeOff
+//
+// Methods []struct{ // omitted if Underlying is an interface type
+// Pos Pos
+// Name stringOff
+// Recv Param
+// Signature Signature
+// }
+// }
+//
+// type Alias struct {
+// Tag byte // 'A' or 'B'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'B'
+// Type typeOff
+// }
+//
+// // "Automatic" declaration of each typeparam
+// type TypeParam struct {
+// Tag byte // 'P'
+// Pos Pos
+// Implicit bool
+// Constraint typeOff
+// }
+//
+// typeOff means a uvarint that either indicates a predeclared type,
+// or an offset into the Data section. If the uvarint is less than
+// predeclReserved, then it indicates the index into the predeclared
+// types list (see predeclared in bexport.go for order). Otherwise,
+// subtracting predeclReserved yields the offset of a type descriptor.
+//
+// Value means a type, kind, and type-specific value. See
+// (*exportWriter).value for details.
+//
+//
+// There are twelve kinds of type descriptors, distinguished by an itag:
+//
+// type DefinedType struct {
+// Tag itag // definedType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// type PointerType struct {
+// Tag itag // pointerType
+// Elem typeOff
+// }
+//
+// type SliceType struct {
+// Tag itag // sliceType
+// Elem typeOff
+// }
+//
+// type ArrayType struct {
+// Tag itag // arrayType
+// Len uint64
+// Elem typeOff
+// }
+//
+// type ChanType struct {
+// Tag itag // chanType
+// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+// Elem typeOff
+// }
+//
+// type MapType struct {
+// Tag itag // mapType
+// Key typeOff
+// Elem typeOff
+// }
+//
+// type FuncType struct {
+// Tag itag // signatureType
+// PkgPath stringOff
+// Signature Signature
+// }
+//
+// type StructType struct {
+// Tag itag // structType
+// PkgPath stringOff
+// Fields []struct {
+// Pos Pos
+// Name stringOff
+// Type typeOff
+// Embedded bool
+// Note stringOff
+// }
+// }
+//
+// type InterfaceType struct {
+// Tag itag // interfaceType
+// PkgPath stringOff
+// Embeddeds []struct {
+// Pos Pos
+// Type typeOff
+// }
+// Methods []struct {
+// Pos Pos
+// Name stringOff
+// Signature Signature
+// }
+// }
+//
+// // Reference to a type param declaration
+// type TypeParamType struct {
+// Tag itag // typeParamType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// // Instantiation of a generic type (like List[T2] or List[int])
+// type InstanceType struct {
+// Tag itag // instanceType
+// Pos pos
+// TypeArgs []typeOff
+// BaseType typeOff
+// }
+//
+// type UnionType struct {
+// Tag itag // interfaceType
+// Terms []struct {
+// tilde bool
+// Type typeOff
+// }
+// }
+//
+//
+//
+// type Signature struct {
+// Params []Param
+// Results []Param
+// Variadic bool // omitted if Results is empty
+// }
+//
+// type Param struct {
+// Pos Pos
+// Name stringOff
+// Type typOff
+// }
+//
+//
+// Pos encodes a file:line:column triple, incorporating a simple delta
+// encoding scheme within a data object. See exportWriter.pos for
+// details.
package gcimporter
@@ -24,11 +242,30 @@ import (
"golang.org/x/tools/go/types/objectpath"
"golang.org/x/tools/internal/aliases"
- "golang.org/x/tools/internal/tokeninternal"
)
// IExportShallow encodes "shallow" export data for the specified package.
//
+// For types, we use "shallow" export data. Historically, the Go
+// compiler always produced a summary of the types for a given package
+// that included types from other packages that it indirectly
+// referenced: "deep" export data. This had the advantage that the
+// compiler (and analogous tools such as gopls) need only load one
+// file per direct import. However, it meant that the files tended to
+// get larger based on the level of the package in the import
+// graph. For example, higher-level packages in the kubernetes module
+// have over 1MB of "deep" export data, even when they have almost no
+// content of their own, merely because they mention a major type that
+// references many others. In pathological cases the export data was
+// 300x larger than the source for a package due to this quadratic
+// growth.
+//
+// "Shallow" export data means that the serialized types describe only
+// a single package. If those types mention types from other packages,
+// the type checker may need to request additional packages beyond
+// just the direct imports. Type information for the entire transitive
+// closure of imports is provided (lazily) by the DAG.
+//
// No promises are made about the encoding other than that it can be decoded by
// the same version of IIExportShallow. If you plan to save export data in the
// file system, be sure to include a cryptographic digest of the executable in
@@ -51,8 +288,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc)
}
// IImportShallow decodes "shallow" types.Package data encoded by
-// IExportShallow in the same executable. This function cannot import data from
-// cmd/compile or gcexportdata.Write.
+// [IExportShallow] in the same executable. This function cannot import data
+// from cmd/compile or gcexportdata.Write.
//
// The importer calls getPackages to obtain package symbols for all
// packages mentioned in the export data, including the one being
@@ -223,7 +460,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64)
// Sort the set of needed offsets. Duplicates are harmless.
sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] })
- lines := tokeninternal.GetLines(file) // byte offset of each line start
+ lines := file.Lines() // byte offset of each line start
w.uint64(uint64(len(lines)))
// Rather than record the entire array of line start offsets,
@@ -507,13 +744,13 @@ func (p *iexporter) doDecl(obj types.Object) {
case *types.TypeName:
t := obj.Type()
- if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok {
+ if tparam, ok := types.Unalias(t).(*types.TypeParam); ok {
w.tag(typeParamTag)
w.pos(obj.Pos())
constraint := tparam.Constraint()
if p.version >= iexportVersionGo1_18 {
implicit := false
- if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil {
+ if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil {
implicit = iface.IsImplicit()
}
w.bool(implicit)
@@ -523,9 +760,22 @@ func (p *iexporter) doDecl(obj types.Object) {
}
if obj.IsAlias() {
- w.tag(aliasTag)
+ alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled
+
+ var tparams *types.TypeParamList
+ if materialized {
+ tparams = aliases.TypeParams(alias)
+ }
+ if tparams.Len() == 0 {
+ w.tag(aliasTag)
+ } else {
+ w.tag(genericAliasTag)
+ }
w.pos(obj.Pos())
- if alias, ok := t.(*aliases.Alias); ok {
+ if tparams.Len() > 0 {
+ w.tparamList(obj.Name(), tparams, obj.Pkg())
+ }
+ if materialized {
// Preserve materialized aliases,
// even of non-exported types.
t = aliases.Rhs(alias)
@@ -744,8 +994,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
}()
}
switch t := t.(type) {
- case *aliases.Alias:
- // TODO(adonovan): support parameterized aliases, following *types.Named.
+ case *types.Alias:
+ if targs := aliases.TypeArgs(t); targs.Len() > 0 {
+ w.startType(instanceType)
+ w.pos(t.Obj().Pos())
+ w.typeList(targs, pkg)
+ w.typ(aliases.Origin(t), pkg)
+ return
+ }
w.startType(aliasType)
w.qualifiedType(t.Obj())
@@ -854,7 +1110,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
for i := 0; i < n; i++ {
ft := t.EmbeddedType(i)
tPkg := pkg
- if named, _ := aliases.Unalias(ft).(*types.Named); named != nil {
+ if named, _ := types.Unalias(ft).(*types.Named); named != nil {
w.pos(named.Obj().Pos())
} else {
w.pos(token.NoPos)
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
index 136aa0365..e260c0e8d 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Indexed package import.
-// See cmd/compile/internal/gc/iexport.go for the export data format.
+// See iexport.go for the export data format.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
@@ -53,6 +53,7 @@ const (
iexportVersionPosCol = 1
iexportVersionGo1_18 = 2
iexportVersionGenerics = 2
+ iexportVersion = iexportVersionGenerics
iexportVersionCurrent = 2
)
@@ -540,7 +541,7 @@ func canReuse(def *types.Named, rhs types.Type) bool {
if def == nil {
return true
}
- iface, _ := aliases.Unalias(rhs).(*types.Interface)
+ iface, _ := types.Unalias(rhs).(*types.Interface)
if iface == nil {
return true
}
@@ -557,19 +558,28 @@ type importReader struct {
prevColumn int64
}
+// markBlack is redefined in iimport_go123.go, to work around golang/go#69912.
+//
+// If TypeNames are not marked black (in the sense of go/types cycle
+// detection), they may be mutated when dot-imported. Fix this by punching a
+// hole through the type, when compiling with Go 1.23. (The bug has been fixed
+// for 1.24, but the fix was not worth back-porting).
+var markBlack = func(name *types.TypeName) {}
+
func (r *importReader) obj(name string) {
tag := r.byte()
pos := r.pos()
switch tag {
- case aliasTag:
+ case aliasTag, genericAliasTag:
+ var tparams []*types.TypeParam
+ if tag == genericAliasTag {
+ tparams = r.tparamList()
+ }
typ := r.typ()
- // TODO(adonovan): support generic aliases:
- // if tag == genericAliasTag {
- // tparams := r.tparamList()
- // alias.SetTypeParams(tparams)
- // }
- r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ))
+ obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams)
+ markBlack(obj) // workaround for golang/go#69912
+ r.declare(obj)
case constTag:
typ, val := r.value()
@@ -589,6 +599,9 @@ func (r *importReader) obj(name string) {
// declaration before recursing.
obj := types.NewTypeName(pos, r.currPkg, name, nil)
named := types.NewNamed(obj, nil, nil)
+
+ markBlack(obj) // workaround for golang/go#69912
+
// Declare obj before calling r.tparamList, so the new type name is recognized
// if used in the constraint of one of its own typeparams (see #48280).
r.declare(obj)
@@ -615,7 +628,7 @@ func (r *importReader) obj(name string) {
if targs.Len() > 0 {
rparams = make([]*types.TypeParam, targs.Len())
for i := range rparams {
- rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam)
+ rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam)
}
}
msig := r.signature(recv, rparams, nil)
@@ -645,7 +658,7 @@ func (r *importReader) obj(name string) {
}
constraint := r.typ()
if implicit {
- iface, _ := aliases.Unalias(constraint).(*types.Interface)
+ iface, _ := types.Unalias(constraint).(*types.Interface)
if iface == nil {
errorf("non-interface constraint marked implicit")
}
@@ -852,7 +865,7 @@ func (r *importReader) typ() types.Type {
}
func isInterface(t types.Type) bool {
- _, ok := aliases.Unalias(t).(*types.Interface)
+ _, ok := types.Unalias(t).(*types.Interface)
return ok
}
@@ -862,7 +875,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
func (r *importReader) doType(base *types.Named) (res types.Type) {
k := r.kind()
if debug {
- r.p.trace("importing type %d (base: %s)", k, base)
+ r.p.trace("importing type %d (base: %v)", k, base)
r.p.indent++
defer func() {
r.p.indent--
@@ -959,7 +972,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) {
methods[i] = method
}
- typ := newInterface(methods, embeddeds)
+ typ := types.NewInterfaceType(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
@@ -1051,7 +1064,7 @@ func (r *importReader) tparamList() []*types.TypeParam {
for i := range xs {
// Note: the standard library importer is tolerant of nil types here,
// though would panic in SetTypeParams.
- xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam)
+ xs[i] = types.Unalias(r.typ()).(*types.TypeParam)
}
return xs
}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go
new file mode 100644
index 000000000..7586bfaca
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go
@@ -0,0 +1,53 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.22 && !go1.24
+
+package gcimporter
+
+import (
+ "go/token"
+ "go/types"
+ "unsafe"
+)
+
+// TODO(rfindley): delete this workaround once go1.24 is assured.
+
+func init() {
+ // Update markBlack so that it correctly sets the color
+ // of imported TypeNames.
+ //
+ // See the doc comment for markBlack for details.
+
+ type color uint32
+ const (
+ white color = iota
+ black
+ grey
+ )
+ type object struct {
+ _ *types.Scope
+ _ token.Pos
+ _ *types.Package
+ _ string
+ _ types.Type
+ _ uint32
+ color_ color
+ _ token.Pos
+ }
+ type typeName struct {
+ object
+ }
+
+ // If the size of types.TypeName changes, this will fail to compile.
+ const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{}))
+ var _ [-delta * delta]int
+
+ markBlack = func(obj *types.TypeName) {
+ type uP = unsafe.Pointer
+ var ptr *typeName
+ *(*uP)(uP(&ptr)) = uP(obj)
+ ptr.color_ = black
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
deleted file mode 100644
index 8b163e3d0..000000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.11
-// +build !go1.11
-
-package gcimporter
-
-import "go/types"
-
-func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
- named := make([]*types.Named, len(embeddeds))
- for i, e := range embeddeds {
- var ok bool
- named[i], ok = e.(*types.Named)
- if !ok {
- panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
- }
- }
- return types.NewInterface(methods, named)
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
deleted file mode 100644
index 49984f40f..000000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.11
-// +build go1.11
-
-package gcimporter
-
-import "go/types"
-
-func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
- return types.NewInterfaceType(methods, embeddeds)
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go
new file mode 100644
index 000000000..907c8557a
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go
@@ -0,0 +1,91 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter
+
+import (
+ "go/types"
+ "sync"
+)
+
+// predecl is a cache for the predeclared types in types.Universe.
+//
+// Cache a distinct result based on the runtime value of any.
+// The pointer value of the any type varies based on GODEBUG settings.
+var predeclMu sync.Mutex
+var predecl map[types.Type][]types.Type
+
+func predeclared() []types.Type {
+ anyt := types.Universe.Lookup("any").Type()
+
+ predeclMu.Lock()
+ defer predeclMu.Unlock()
+
+ if pre, ok := predecl[anyt]; ok {
+ return pre
+ }
+
+ if predecl == nil {
+ predecl = make(map[types.Type][]types.Type)
+ }
+
+ decls := []types.Type{ // basic types
+ types.Typ[types.Bool],
+ types.Typ[types.Int],
+ types.Typ[types.Int8],
+ types.Typ[types.Int16],
+ types.Typ[types.Int32],
+ types.Typ[types.Int64],
+ types.Typ[types.Uint],
+ types.Typ[types.Uint8],
+ types.Typ[types.Uint16],
+ types.Typ[types.Uint32],
+ types.Typ[types.Uint64],
+ types.Typ[types.Uintptr],
+ types.Typ[types.Float32],
+ types.Typ[types.Float64],
+ types.Typ[types.Complex64],
+ types.Typ[types.Complex128],
+ types.Typ[types.String],
+
+ // basic type aliases
+ types.Universe.Lookup("byte").Type(),
+ types.Universe.Lookup("rune").Type(),
+
+ // error
+ types.Universe.Lookup("error").Type(),
+
+ // untyped types
+ types.Typ[types.UntypedBool],
+ types.Typ[types.UntypedInt],
+ types.Typ[types.UntypedRune],
+ types.Typ[types.UntypedFloat],
+ types.Typ[types.UntypedComplex],
+ types.Typ[types.UntypedString],
+ types.Typ[types.UntypedNil],
+
+ // package unsafe
+ types.Typ[types.UnsafePointer],
+
+ // invalid type
+ types.Typ[types.Invalid], // only appears in packages with errors
+
+ // used internally by gc; never used by this package or in .a files
+ anyType{},
+
+ // comparable
+ types.Universe.Lookup("comparable").Type(),
+
+ // any
+ anyt,
+ }
+
+ predecl[anyt] = decls
+ return decls
+}
+
+type anyType struct{}
+
+func (t anyType) Underlying() types.Type { return t }
+func (t anyType) String() string { return "any" }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
deleted file mode 100644
index 0cd3b91b6..000000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gcimporter
-
-import "go/types"
-
-const iexportVersion = iexportVersionGenerics
-
-// additionalPredeclared returns additional predeclared types in go.1.18.
-func additionalPredeclared() []types.Type {
- return []types.Type{
- // comparable
- types.Universe.Lookup("comparable").Type(),
-
- // any
- types.Universe.Lookup("any").Type(),
- }
-}
-
-// See cmd/compile/internal/types.SplitVargenSuffix.
-func splitVargenSuffix(name string) (base, suffix string) {
- i := len(name)
- for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
- i--
- }
- const dot = "Β·"
- if i >= len(dot) && name[i-len(dot):i] == dot {
- i -= len(dot)
- return name[:i], name[i:]
- }
- return name, ""
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
deleted file mode 100644
index 38b624cad..000000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.unified
-// +build !goexperiment.unified
-
-package gcimporter
-
-const unifiedIR = false
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
deleted file mode 100644
index b5118d0b3..000000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.unified
-// +build goexperiment.unified
-
-package gcimporter
-
-const unifiedIR = true
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
index 2c0770688..1db408613 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) {
// See cmd/compile/internal/noder.derivedInfo.
type derivedInfo struct {
- idx pkgbits.Index
- needed bool
+ idx pkgbits.Index
}
// See cmd/compile/internal/noder.typeInfo.
@@ -110,13 +109,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st
r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
pkg := r.pkg()
- r.Bool() // has init
+ if r.Version().Has(pkgbits.HasInit) {
+ r.Bool()
+ }
for i, n := 0, r.Len(); i < n; i++ {
// As if r.obj(), but avoiding the Scope.Lookup call,
// to avoid eager loading of imports.
r.Sync(pkgbits.SyncObject)
- assert(!r.Bool())
+ if r.Version().Has(pkgbits.DerivedFuncInstance) {
+ assert(!r.Bool())
+ }
r.p.objIdx(r.Reloc(pkgbits.RelocObj))
assert(r.Len() == 0)
}
@@ -165,7 +168,7 @@ type readerDict struct {
// tparams is a slice of the constructed TypeParams for the element.
tparams []*types.TypeParam
- // devived is a slice of types derived from tparams, which may be
+ // derived is a slice of types derived from tparams, which may be
// instantiated while reading the current element.
derived []derivedInfo
derivedTypes []types.Type // lazily instantiated from derived
@@ -471,7 +474,9 @@ func (r *reader) param() *types.Var {
func (r *reader) obj() (types.Object, []types.Type) {
r.Sync(pkgbits.SyncObject)
- assert(!r.Bool())
+ if r.Version().Has(pkgbits.DerivedFuncInstance) {
+ assert(!r.Bool())
+ }
pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
obj := pkgScope(pkg).Lookup(name)
@@ -525,8 +530,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
case pkgbits.ObjAlias:
pos := r.pos()
+ var tparams []*types.TypeParam
+ if r.Version().Has(pkgbits.AliasTypeParamNames) {
+ tparams = r.typeParamNames()
+ }
typ := r.typ()
- declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ))
+ declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams))
case pkgbits.ObjConst:
pos := r.pos()
@@ -553,7 +562,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
// If the underlying type is an interface, we need to
// duplicate its methods so we can replace the receiver
// parameter's type (#49906).
- if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
+ if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
methods := make([]*types.Func, iface.NumExplicitMethods())
for i := range methods {
fn := iface.ExplicitMethod(i)
@@ -632,7 +641,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
dict.derived = make([]derivedInfo, r.Len())
dict.derivedTypes = make([]types.Type, len(dict.derived))
for i := range dict.derived {
- dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
+ dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)}
+ if r.Version().Has(pkgbits.DerivedInfoNeeded) {
+ assert(!r.Bool())
+ }
}
pr.retireReader(r)
@@ -726,3 +738,17 @@ func pkgScope(pkg *types.Package) *types.Scope {
}
return types.Universe
}
+
+// See cmd/compile/internal/types.SplitVargenSuffix.
+func splitVargenSuffix(name string) (base, suffix string) {
+ i := len(name)
+ for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
+ i--
+ }
+ const dot = "Β·"
+ if i >= len(dot) && name[i-len(dot):i] == dot {
+ i -= len(dot)
+ return name[:i], name[i:]
+ }
+ return name, ""
+}
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
index af0ee6c61..e333efc87 100644
--- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
@@ -16,7 +16,6 @@ import (
"os"
"os/exec"
"path/filepath"
- "reflect"
"regexp"
"runtime"
"strconv"
@@ -200,12 +199,14 @@ func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io
return
}
-func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
- log := i.Logf
- if log == nil {
- log = func(string, ...interface{}) {}
+// logf logs if i.Logf is non-nil.
+func (i *Invocation) logf(format string, args ...any) {
+ if i.Logf != nil {
+ i.Logf(format, args...)
}
+}
+func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
goArgs := []string{i.Verb}
appendModFile := func() {
@@ -248,16 +249,13 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
cmd.Stdout = stdout
cmd.Stderr = stderr
- // cmd.WaitDelay was added only in go1.20 (see #50436).
- if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() {
- // https://go.dev/issue/59541: don't wait forever copying stderr
- // after the command has exited.
- // After CL 484741 we copy stdout manually, so we we'll stop reading that as
- // soon as ctx is done. However, we also don't want to wait around forever
- // for stderr. Give a much-longer-than-reasonable delay and then assume that
- // something has wedged in the kernel or runtime.
- waitDelay.Set(reflect.ValueOf(30 * time.Second))
- }
+ // https://go.dev/issue/59541: don't wait forever copying stderr
+ // after the command has exited.
+ // After CL 484741 we copy stdout manually, so we we'll stop reading that as
+ // soon as ctx is done. However, we also don't want to wait around forever
+ // for stderr. Give a much-longer-than-reasonable delay and then assume that
+ // something has wedged in the kernel or runtime.
+ cmd.WaitDelay = 30 * time.Second
// The cwd gets resolved to the real path. On Darwin, where
// /tmp is a symlink, this breaks anything that expects the
@@ -277,7 +275,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
cmd.Dir = i.WorkingDir
}
- defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
+ debugStr := cmdDebugStr(cmd)
+ i.logf("starting %v", debugStr)
+ start := time.Now()
+ defer func() {
+ i.logf("%s for %v", time.Since(start), debugStr)
+ }()
return runCmdContext(ctx, cmd)
}
@@ -514,7 +517,7 @@ func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(),
for k, v := range overlay {
// Use a unique basename for each file (001-foo.go),
// to avoid creating nested directories.
- base := fmt.Sprintf("%d-%s.go", 1+len(overlays), filepath.Base(k))
+ base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k))
filename := filepath.Join(dir, base)
err := os.WriteFile(filename, v, 0666)
if err != nil {
diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go
index 4569313a0..5ae576977 100644
--- a/vendor/golang.org/x/tools/internal/imports/fix.go
+++ b/vendor/golang.org/x/tools/internal/imports/fix.go
@@ -90,18 +90,6 @@ type ImportFix struct {
Relevance float64 // see pkg
}
-// An ImportInfo represents a single import statement.
-type ImportInfo struct {
- ImportPath string // import path, e.g. "crypto/rand".
- Name string // import name, e.g. "crand", or "" if none.
-}
-
-// A packageInfo represents what's known about a package.
-type packageInfo struct {
- name string // real package name, if known.
- exports map[string]bool // known exports.
-}
-
// parseOtherFiles parses all the Go files in srcDir except filename, including
// test files if filename looks like a test.
//
@@ -130,7 +118,7 @@ func parseOtherFiles(ctx context.Context, fset *token.FileSet, srcDir, filename
continue
}
- f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0)
+ f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, parser.SkipObjectResolution)
if err != nil {
continue
}
@@ -161,8 +149,8 @@ func addGlobals(f *ast.File, globals map[string]bool) {
// collectReferences builds a map of selector expressions, from
// left hand side (X) to a set of right hand sides (Sel).
-func collectReferences(f *ast.File) references {
- refs := references{}
+func collectReferences(f *ast.File) References {
+ refs := References{}
var visitor visitFn
visitor = func(node ast.Node) ast.Visitor {
@@ -232,7 +220,7 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
allFound := true
for right := range syms {
- if !pkgInfo.exports[right] {
+ if !pkgInfo.Exports[right] {
allFound = false
break
}
@@ -245,11 +233,6 @@ func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo {
return nil
}
-// references is set of references found in a Go file. The first map key is the
-// left hand side of a selector expression, the second key is the right hand
-// side, and the value should always be true.
-type references map[string]map[string]bool
-
// A pass contains all the inputs and state necessary to fix a file's imports.
// It can be modified in some ways during use; see comments below.
type pass struct {
@@ -257,27 +240,29 @@ type pass struct {
fset *token.FileSet // fset used to parse f and its siblings.
f *ast.File // the file being fixed.
srcDir string // the directory containing f.
- env *ProcessEnv // the environment to use for go commands, etc.
- loadRealPackageNames bool // if true, load package names from disk rather than guessing them.
- otherFiles []*ast.File // sibling files.
+ logf func(string, ...any)
+ source Source // the environment to use for go commands, etc.
+ loadRealPackageNames bool // if true, load package names from disk rather than guessing them.
+ otherFiles []*ast.File // sibling files.
+ goroot string
// Intermediate state, generated by load.
existingImports map[string][]*ImportInfo
- allRefs references
- missingRefs references
+ allRefs References
+ missingRefs References
// Inputs to fix. These can be augmented between successive fix calls.
lastTry bool // indicates that this is the last call and fix should clean up as best it can.
candidates []*ImportInfo // candidate imports in priority order.
- knownPackages map[string]*packageInfo // information about all known packages.
+ knownPackages map[string]*PackageInfo // information about all known packages.
}
// loadPackageNames saves the package names for everything referenced by imports.
-func (p *pass) loadPackageNames(imports []*ImportInfo) error {
- if p.env.Logf != nil {
- p.env.Logf("loading package names for %v packages", len(imports))
+func (p *pass) loadPackageNames(ctx context.Context, imports []*ImportInfo) error {
+ if p.logf != nil {
+ p.logf("loading package names for %v packages", len(imports))
defer func() {
- p.env.Logf("done loading package names for %v packages", len(imports))
+ p.logf("done loading package names for %v packages", len(imports))
}()
}
var unknown []string
@@ -288,20 +273,17 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error {
unknown = append(unknown, imp.ImportPath)
}
- resolver, err := p.env.GetResolver()
- if err != nil {
- return err
- }
-
- names, err := resolver.loadPackageNames(unknown, p.srcDir)
+ names, err := p.source.LoadPackageNames(ctx, p.srcDir, unknown)
if err != nil {
return err
}
+ // TODO(rfindley): revisit this. Why do we need to store known packages with
+ // no exports? The inconsistent data is confusing.
for path, name := range names {
- p.knownPackages[path] = &packageInfo{
- name: name,
- exports: map[string]bool{},
+ p.knownPackages[path] = &PackageInfo{
+ Name: name,
+ Exports: map[string]bool{},
}
}
return nil
@@ -329,8 +311,8 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
return imp.Name
}
known := p.knownPackages[imp.ImportPath]
- if known != nil && known.name != "" {
- return withoutVersion(known.name)
+ if known != nil && known.Name != "" {
+ return withoutVersion(known.Name)
}
return ImportPathToAssumedName(imp.ImportPath)
}
@@ -338,9 +320,9 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
// load reads in everything necessary to run a pass, and reports whether the
// file already has all the imports it needs. It fills in p.missingRefs with the
// file's missing symbols, if any, or removes unused imports if not.
-func (p *pass) load() ([]*ImportFix, bool) {
- p.knownPackages = map[string]*packageInfo{}
- p.missingRefs = references{}
+func (p *pass) load(ctx context.Context) ([]*ImportFix, bool) {
+ p.knownPackages = map[string]*PackageInfo{}
+ p.missingRefs = References{}
p.existingImports = map[string][]*ImportInfo{}
// Load basic information about the file in question.
@@ -363,10 +345,10 @@ func (p *pass) load() ([]*ImportFix, bool) {
// f's imports by the identifier they introduce.
imports := collectImports(p.f)
if p.loadRealPackageNames {
- err := p.loadPackageNames(append(imports, p.candidates...))
+ err := p.loadPackageNames(ctx, append(imports, p.candidates...))
if err != nil {
- if p.env.Logf != nil {
- p.env.Logf("loading package names: %v", err)
+ if p.logf != nil {
+ p.logf("loading package names: %v", err)
}
return nil, false
}
@@ -536,9 +518,10 @@ func (p *pass) assumeSiblingImportsValid() {
// We have the stdlib in memory; no need to guess.
rights = symbolNameSet(m)
}
- p.addCandidate(imp, &packageInfo{
+ // TODO(rfindley): we should set package name here, for consistency.
+ p.addCandidate(imp, &PackageInfo{
// no name; we already know it.
- exports: rights,
+ Exports: rights,
})
}
}
@@ -547,14 +530,14 @@ func (p *pass) assumeSiblingImportsValid() {
// addCandidate adds a candidate import to p, and merges in the information
// in pkg.
-func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
+func (p *pass) addCandidate(imp *ImportInfo, pkg *PackageInfo) {
p.candidates = append(p.candidates, imp)
if existing, ok := p.knownPackages[imp.ImportPath]; ok {
- if existing.name == "" {
- existing.name = pkg.name
+ if existing.Name == "" {
+ existing.Name = pkg.Name
}
- for export := range pkg.exports {
- existing.exports[export] = true
+ for export := range pkg.Exports {
+ existing.Exports[export] = true
}
} else {
p.knownPackages[imp.ImportPath] = pkg
@@ -563,7 +546,14 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
// fixImports adds and removes imports from f so that all its references are
// satisfied and there are no unused imports.
-func fixImports(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
+//
+// This is declared as a variable rather than a function so goimports can
+// easily be extended by adding a file with an init function.
+//
+// DO NOT REMOVE: used internally at Google.
+var fixImports = fixImportsDefault
+
+func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
fixes, err := getFixes(context.Background(), fset, f, filename, env)
if err != nil {
return err
@@ -575,21 +565,42 @@ func fixImports(fset *token.FileSet, f *ast.File, filename string, env *ProcessE
// getFixes gets the import fixes that need to be made to f in order to fix the imports.
// It does not modify the ast.
func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) {
+ source, err := NewProcessEnvSource(env, filename, f.Name.Name)
+ if err != nil {
+ return nil, err
+ }
+ goEnv, err := env.goEnv()
+ if err != nil {
+ return nil, err
+ }
+ return getFixesWithSource(ctx, fset, f, filename, goEnv["GOROOT"], env.logf, source)
+}
+
+func getFixesWithSource(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, goroot string, logf func(string, ...any), source Source) ([]*ImportFix, error) {
+ // This logic is defensively duplicated from getFixes.
abs, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
srcDir := filepath.Dir(abs)
- if env.Logf != nil {
- env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
+
+ if logf != nil {
+ logf("fixImports(filename=%q), srcDir=%q ...", filename, abs, srcDir)
}
// First pass: looking only at f, and using the naive algorithm to
// derive package names from import paths, see if the file is already
// complete. We can't add any imports yet, because we don't know
// if missing references are actually package vars.
- p := &pass{fset: fset, f: f, srcDir: srcDir, env: env}
- if fixes, done := p.load(); done {
+ p := &pass{
+ fset: fset,
+ f: f,
+ srcDir: srcDir,
+ logf: logf,
+ goroot: goroot,
+ source: source,
+ }
+ if fixes, done := p.load(ctx); done {
return fixes, nil
}
@@ -601,7 +612,7 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
// Second pass: add information from other files in the same package,
// like their package vars and imports.
p.otherFiles = otherFiles
- if fixes, done := p.load(); done {
+ if fixes, done := p.load(ctx); done {
return fixes, nil
}
@@ -614,10 +625,17 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
// Third pass: get real package names where we had previously used
// the naive algorithm.
- p = &pass{fset: fset, f: f, srcDir: srcDir, env: env}
+ p = &pass{
+ fset: fset,
+ f: f,
+ srcDir: srcDir,
+ logf: logf,
+ goroot: goroot,
+ source: p.source, // safe to reuse, as it's just a wrapper around env
+ }
p.loadRealPackageNames = true
p.otherFiles = otherFiles
- if fixes, done := p.load(); done {
+ if fixes, done := p.load(ctx); done {
return fixes, nil
}
@@ -831,7 +849,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
return true
},
dirFound: func(pkg *pkg) bool {
- return pkgIsCandidate(filename, references{searchPkg: nil}, pkg)
+ return pkgIsCandidate(filename, References{searchPkg: nil}, pkg)
},
packageNameLoaded: func(pkg *pkg) bool {
return pkg.packageName == searchPkg
@@ -1014,16 +1032,26 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) {
// already know the view type.
if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
e.resolver = newGopathResolver(e)
+ e.logf("created gopath resolver")
} else if r, err := newModuleResolver(e, e.ModCache); err != nil {
e.resolverErr = err
+ e.logf("failed to create module resolver: %v", err)
} else {
e.resolver = Resolver(r)
+ e.logf("created module resolver")
}
}
return e.resolver, e.resolverErr
}
+// logf logs if e.Logf is non-nil.
+func (e *ProcessEnv) logf(format string, args ...any) {
+ if e.Logf != nil {
+ e.Logf(format, args...)
+ }
+}
+
// buildContext returns the build.Context to use for matching files.
//
// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform
@@ -1072,11 +1100,7 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string)
return e.GocmdRunner.Run(ctx, inv)
}
-func addStdlibCandidates(pass *pass, refs references) error {
- goenv, err := pass.env.goEnv()
- if err != nil {
- return err
- }
+func addStdlibCandidates(pass *pass, refs References) error {
localbase := func(nm string) string {
ans := path.Base(nm)
if ans[0] == 'v' {
@@ -1091,13 +1115,13 @@ func addStdlibCandidates(pass *pass, refs references) error {
}
add := func(pkg string) {
// Prevent self-imports.
- if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir {
+ if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.goroot, "src", pkg) == pass.srcDir {
return
}
exports := symbolNameSet(stdlib.PackageSymbols[pkg])
pass.addCandidate(
&ImportInfo{ImportPath: pkg},
- &packageInfo{name: localbase(pkg), exports: exports})
+ &PackageInfo{Name: localbase(pkg), Exports: exports})
}
for left := range refs {
if left == "rand" {
@@ -1127,8 +1151,8 @@ type Resolver interface {
// scan works with callback to search for packages. See scanCallback for details.
scan(ctx context.Context, callback *scanCallback) error
- // loadExports returns the set of exported symbols in the package at dir.
- // loadExports may be called concurrently.
+ // loadExports returns the package name and set of exported symbols in the
+ // package at dir. loadExports may be called concurrently.
loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error)
// scoreImportPath returns the relevance for an import path.
@@ -1161,101 +1185,22 @@ type scanCallback struct {
exportsLoaded func(pkg *pkg, exports []stdlib.Symbol)
}
-func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error {
+func addExternalCandidates(ctx context.Context, pass *pass, refs References, filename string) error {
ctx, done := event.Start(ctx, "imports.addExternalCandidates")
defer done()
- var mu sync.Mutex
- found := make(map[string][]pkgDistance)
- callback := &scanCallback{
- rootFound: func(gopathwalk.Root) bool {
- return true // We want everything.
- },
- dirFound: func(pkg *pkg) bool {
- return pkgIsCandidate(filename, refs, pkg)
- },
- packageNameLoaded: func(pkg *pkg) bool {
- if _, want := refs[pkg.packageName]; !want {
- return false
- }
- if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName {
- // The candidate is in the same directory and has the
- // same package name. Don't try to import ourselves.
- return false
- }
- if !canUse(filename, pkg.dir) {
- return false
- }
- mu.Lock()
- defer mu.Unlock()
- found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)})
- return false // We'll do our own loading after we sort.
- },
- }
- resolver, err := pass.env.GetResolver()
+ results, err := pass.source.ResolveReferences(ctx, filename, refs)
if err != nil {
return err
}
- if err = resolver.scan(ctx, callback); err != nil {
- return err
- }
-
- // Search for imports matching potential package references.
- type result struct {
- imp *ImportInfo
- pkg *packageInfo
- }
- results := make(chan result, len(refs))
-
- ctx, cancel := context.WithCancel(ctx)
- var wg sync.WaitGroup
- defer func() {
- cancel()
- wg.Wait()
- }()
- var (
- firstErr error
- firstErrOnce sync.Once
- )
- for pkgName, symbols := range refs {
- wg.Add(1)
- go func(pkgName string, symbols map[string]bool) {
- defer wg.Done()
-
- found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols)
-
- if err != nil {
- firstErrOnce.Do(func() {
- firstErr = err
- cancel()
- })
- return
- }
-
- if found == nil {
- return // No matching package.
- }
-
- imp := &ImportInfo{
- ImportPath: found.importPathShort,
- }
-
- pkg := &packageInfo{
- name: pkgName,
- exports: symbols,
- }
- results <- result{imp, pkg}
- }(pkgName, symbols)
- }
- go func() {
- wg.Wait()
- close(results)
- }()
- for result := range results {
+ for _, result := range results {
+ if result == nil {
+ continue
+ }
// Don't offer completions that would shadow predeclared
// names, such as github.com/coreos/etcd/error.
- if types.Universe.Lookup(result.pkg.name) != nil { // predeclared
+ if types.Universe.Lookup(result.Package.Name) != nil { // predeclared
// Ideally we would skip this candidate only
// if the predeclared name is actually
// referenced by the file, but that's a lot
@@ -1264,9 +1209,9 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
// user before long.
continue
}
- pass.addCandidate(result.imp, result.pkg)
+ pass.addCandidate(result.Import, result.Package)
}
- return firstErr
+ return nil
}
// notIdentifier reports whether ch is an invalid identifier character.
@@ -1608,11 +1553,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
}
fullFile := filepath.Join(dir, fi.Name())
+ // Legacy ast.Object resolution is needed here.
f, err := parser.ParseFile(fset, fullFile, nil, 0)
if err != nil {
- if env.Logf != nil {
- env.Logf("error parsing %v: %v", fullFile, err)
- }
+ env.logf("error parsing %v: %v", fullFile, err)
continue
}
if f.Name.Name == "documentation" {
@@ -1648,9 +1592,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
}
sortSymbols(exports)
- if env.Logf != nil {
- env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports)
- }
+ env.logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports)
return pkgName, exports, nil
}
@@ -1660,25 +1602,39 @@ func sortSymbols(syms []stdlib.Symbol) {
})
}
-// findImport searches for a package with the given symbols.
-// If no package is found, findImport returns ("", false, nil)
-func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
+// A symbolSearcher searches for a package with a set of symbols, among a set
+// of candidates. See [symbolSearcher.search].
+//
+// The search occurs within the scope of a single file, with context captured
+// in srcDir and xtest.
+type symbolSearcher struct {
+ logf func(string, ...any)
+ srcDir string // directory containing the file
+ xtest bool // if set, the file containing is an x_test file
+ loadExports func(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error)
+}
+
+// search searches the provided candidates for a package containing all
+// exported symbols.
+//
+// If successful, returns the resulting package.
+func (s *symbolSearcher) search(ctx context.Context, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
// Sort the candidates by their import package length,
// assuming that shorter package names are better than long
// ones. Note that this sorts by the de-vendored name, so
// there's no "penalty" for vendoring.
sort.Sort(byDistanceOrImportPathShortLength(candidates))
- if pass.env.Logf != nil {
+ if s.logf != nil {
for i, c := range candidates {
- pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
+ s.logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
}
}
- resolver, err := pass.env.GetResolver()
- if err != nil {
- return nil, err
- }
- // Collect exports for packages with matching names.
+ // Arrange rescv so that we can we can await results in order of relevance
+ // and exit as soon as we find the first match.
+ //
+ // Search with bounded concurrency, returning as soon as the first result
+ // among rescv is non-nil.
rescv := make([]chan *pkg, len(candidates))
for i := range candidates {
rescv[i] = make(chan *pkg, 1)
@@ -1686,6 +1642,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
const maxConcurrentPackageImport = 4
loadExportsSem := make(chan struct{}, maxConcurrentPackageImport)
+ // Ensure that all work is completed at exit.
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
defer func() {
@@ -1693,6 +1650,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
wg.Wait()
}()
+ // Start the search.
wg.Add(1)
go func() {
defer wg.Done()
@@ -1703,55 +1661,67 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
return
}
+ i := i
+ c := c
wg.Add(1)
- go func(c pkgDistance, resc chan<- *pkg) {
+ go func() {
defer func() {
<-loadExportsSem
wg.Done()
}()
-
- if pass.env.Logf != nil {
- pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
+ if s.logf != nil {
+ s.logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
}
- // If we're an x_test, load the package under test's test variant.
- includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir
- _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest)
+ pkg, err := s.searchOne(ctx, c, symbols)
if err != nil {
- if pass.env.Logf != nil {
- pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
- }
- resc <- nil
- return
- }
-
- exportsMap := make(map[string]bool, len(exports))
- for _, sym := range exports {
- exportsMap[sym.Name] = true
- }
-
- // If it doesn't have the right
- // symbols, send nil to mean no match.
- for symbol := range symbols {
- if !exportsMap[symbol] {
- resc <- nil
- return
+ if s.logf != nil && ctx.Err() == nil {
+ s.logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
}
+ pkg = nil
}
- resc <- c.pkg
- }(c, rescv[i])
+ rescv[i] <- pkg // may be nil
+ }()
}
}()
+ // Await the first (best) result.
for _, resc := range rescv {
- pkg := <-resc
- if pkg == nil {
- continue
+ select {
+ case r := <-resc:
+ if r != nil {
+ return r, nil
+ }
+ case <-ctx.Done():
+ return nil, ctx.Err()
}
- return pkg, nil
}
return nil, nil
}
+func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols map[string]bool) (*pkg, error) {
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+ // If we're considering the package under test from an x_test, load the
+ // test variant.
+ includeTest := s.xtest && c.pkg.dir == s.srcDir
+ _, exports, err := s.loadExports(ctx, c.pkg, includeTest)
+ if err != nil {
+ return nil, err
+ }
+
+ exportsMap := make(map[string]bool, len(exports))
+ for _, sym := range exports {
+ exportsMap[sym.Name] = true
+ }
+ for symbol := range symbols {
+ if !exportsMap[symbol] {
+ return nil, nil // no match
+ }
+ }
+ return c.pkg, nil
+}
+
// pkgIsCandidate reports whether pkg is a candidate for satisfying the
// finding which package pkgIdent in the file named by filename is trying
// to refer to.
@@ -1764,65 +1734,31 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
// filename is the file being formatted.
// pkgIdent is the package being searched for, like "client" (if
// searching for "client.New")
-func pkgIsCandidate(filename string, refs references, pkg *pkg) bool {
+func pkgIsCandidate(filename string, refs References, pkg *pkg) bool {
// Check "internal" and "vendor" visibility:
if !canUse(filename, pkg.dir) {
return false
}
// Speed optimization to minimize disk I/O:
- // the last two components on disk must contain the
- // package name somewhere.
//
- // This permits mismatch naming like directory
- // "go-foo" being package "foo", or "pkg.v3" being "pkg",
- // or directory "google.golang.org/api/cloudbilling/v1"
- // being package "cloudbilling", but doesn't
- // permit a directory "foo" to be package
- // "bar", which is strongly discouraged
- // anyway. There's no reason goimports needs
- // to be slow just to accommodate that.
+ // Use the matchesPath heuristic to filter to package paths that could
+ // reasonably match a dangling reference.
+ //
+ // This permits mismatch naming like directory "go-foo" being package "foo",
+ // or "pkg.v3" being "pkg", or directory
+ // "google.golang.org/api/cloudbilling/v1" being package "cloudbilling", but
+ // doesn't permit a directory "foo" to be package "bar", which is strongly
+ // discouraged anyway. There's no reason goimports needs to be slow just to
+ // accommodate that.
for pkgIdent := range refs {
- lastTwo := lastTwoComponents(pkg.importPathShort)
- if strings.Contains(lastTwo, pkgIdent) {
+ if matchesPath(pkgIdent, pkg.importPathShort) {
return true
}
- if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) {
- lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
- if strings.Contains(lastTwo, pkgIdent) {
- return true
- }
- }
}
return false
}
-func hasHyphenOrUpperASCII(s string) bool {
- for i := 0; i < len(s); i++ {
- b := s[i]
- if b == '-' || ('A' <= b && b <= 'Z') {
- return true
- }
- }
- return false
-}
-
-func lowerASCIIAndRemoveHyphen(s string) (ret string) {
- buf := make([]byte, 0, len(s))
- for i := 0; i < len(s); i++ {
- b := s[i]
- switch {
- case b == '-':
- continue
- case 'A' <= b && b <= 'Z':
- buf = append(buf, b+('a'-'A'))
- default:
- buf = append(buf, b)
- }
- }
- return string(buf)
-}
-
// canUse reports whether the package in dir is usable from filename,
// respecting the Go "internal" and "vendor" visibility rules.
func canUse(filename, dir string) bool {
@@ -1863,19 +1799,84 @@ func canUse(filename, dir string) bool {
return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal")
}
-// lastTwoComponents returns at most the last two path components
-// of v, using either / or \ as the path separator.
-func lastTwoComponents(v string) string {
+// matchesPath reports whether ident may match a potential package name
+// referred to by path, using heuristics to filter out unidiomatic package
+// names.
+//
+// Specifically, it checks whether either of the last two '/'- or '\'-delimited
+// path segments matches the identifier. The segment-matching heuristic must
+// allow for various conventions around segment naming, including go-foo,
+// foo-go, and foo.v3. To handle all of these, matching considers both (1) the
+// entire segment, ignoring '-' and '.', as well as (2) the last subsegment
+// separated by '-' or '.'. So the segment foo-go matches all of the following
+// identifiers: foo, go, and foogo. All matches are case insensitive (for ASCII
+// identifiers).
+//
+// See the docstring for [pkgIsCandidate] for an explanation of how this
+// heuristic filters potential candidate packages.
+func matchesPath(ident, path string) bool {
+ // Ignore case, for ASCII.
+ lowerIfASCII := func(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+ }
+
+ // match reports whether path[start:end] matches ident, ignoring [.-].
+ match := func(start, end int) bool {
+ ii := len(ident) - 1 // current byte in ident
+ pi := end - 1 // current byte in path
+ for ; pi >= start && ii >= 0; pi-- {
+ pb := path[pi]
+ if pb == '-' || pb == '.' {
+ continue
+ }
+ pb = lowerIfASCII(pb)
+ ib := lowerIfASCII(ident[ii])
+ if pb != ib {
+ return false
+ }
+ ii--
+ }
+ return ii < 0 && pi < start // all bytes matched
+ }
+
+ // segmentEnd and subsegmentEnd hold the end points of the current segment
+ // and subsegment intervals.
+ segmentEnd := len(path)
+ subsegmentEnd := len(path)
+
+ // Count slashes; we only care about the last two segments.
nslash := 0
- for i := len(v) - 1; i >= 0; i-- {
- if v[i] == '/' || v[i] == '\\' {
+
+ for i := len(path) - 1; i >= 0; i-- {
+ switch b := path[i]; b {
+ // TODO(rfindley): we handle backlashes here only because the previous
+ // heuristic handled backslashes. This is perhaps overly defensive, but is
+ // the result of many lessons regarding Chesterton's fence and the
+ // goimports codebase.
+ //
+ // However, this function is only ever called with something called an
+ // 'importPath'. Is it possible that this is a real import path, and
+ // therefore we need only consider forward slashes?
+ case '/', '\\':
+ if match(i+1, segmentEnd) || match(i+1, subsegmentEnd) {
+ return true
+ }
nslash++
if nslash == 2 {
- return v[i:]
+ return false // did not match above
+ }
+ segmentEnd, subsegmentEnd = i, i // reset
+ case '-', '.':
+ if match(i+1, subsegmentEnd) {
+ return true
}
+ subsegmentEnd = i
}
}
- return v
+ return match(0, segmentEnd) || match(0, subsegmentEnd)
}
type visitFn func(node ast.Node) ast.Visitor
diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go
index f83465520..2215a1288 100644
--- a/vendor/golang.org/x/tools/internal/imports/imports.go
+++ b/vendor/golang.org/x/tools/internal/imports/imports.go
@@ -47,7 +47,14 @@ type Options struct {
// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env.
func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) {
fileSet := token.NewFileSet()
- file, adjust, err := parse(fileSet, filename, src, opt)
+ var parserMode parser.Mode
+ if opt.Comments {
+ parserMode |= parser.ParseComments
+ }
+ if opt.AllErrors {
+ parserMode |= parser.AllErrors
+ }
+ file, adjust, err := parse(fileSet, filename, src, parserMode, opt.Fragment)
if err != nil {
return nil, err
}
@@ -66,17 +73,19 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e
//
// Note that filename's directory influences which imports can be chosen,
// so it is important that filename be accurate.
-func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
+func FixImports(ctx context.Context, filename string, src []byte, goroot string, logf func(string, ...any), source Source) (fixes []*ImportFix, err error) {
ctx, done := event.Start(ctx, "imports.FixImports")
defer done()
fileSet := token.NewFileSet()
- file, _, err := parse(fileSet, filename, src, opt)
+ // TODO(rfindley): these default values for ParseComments and AllErrors were
+ // extracted from gopls, but are they even needed?
+ file, _, err := parse(fileSet, filename, src, parser.ParseComments|parser.AllErrors, true)
if err != nil {
return nil, err
}
- return getFixes(ctx, fileSet, file, filename, opt.Env)
+ return getFixesWithSource(ctx, fileSet, file, filename, goroot, logf, source)
}
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
@@ -86,7 +95,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
// Don't use parse() -- we don't care about fragments or statement lists
// here, and we need to work with unparseable files.
fileSet := token.NewFileSet()
- parserMode := parser.Mode(0)
+ parserMode := parser.SkipObjectResolution
if opt.Comments {
parserMode |= parser.ParseComments
}
@@ -114,7 +123,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
// formatted file, and returns the postpocessed result.
func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) {
mergeImports(file)
- sortImports(opt.LocalPrefix, fset.File(file.Pos()), file)
+ sortImports(opt.LocalPrefix, fset.File(file.FileStart), file)
var spacesBefore []string // import paths we need spaces before
for _, impSection := range astutil.Imports(fset, file) {
// Within each block of contiguous imports, see if any
@@ -164,13 +173,9 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori
// parse parses src, which was read from filename,
// as a Go source file or statement list.
-func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
- parserMode := parser.Mode(0)
- if opt.Comments {
- parserMode |= parser.ParseComments
- }
- if opt.AllErrors {
- parserMode |= parser.AllErrors
+func parse(fset *token.FileSet, filename string, src []byte, parserMode parser.Mode, fragment bool) (*ast.File, func(orig, src []byte) []byte, error) {
+ if parserMode&parser.SkipObjectResolution != 0 {
+ panic("legacy ast.Object resolution is required")
}
// Try as whole source file.
@@ -181,7 +186,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast
// If the error is that the source file didn't begin with a
// package line and we accept fragmented input, fall through to
// try as a source fragment. Stop and return on any other error.
- if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
+ if !fragment || !strings.Contains(err.Error(), "expected 'package'") {
return nil, nil, err
}
diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go
index 82fe644a1..8555e3f83 100644
--- a/vendor/golang.org/x/tools/internal/imports/mod.go
+++ b/vendor/golang.org/x/tools/internal/imports/mod.go
@@ -245,7 +245,10 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe
// 2. Use this to separate module cache scanning from other scanning.
func gomodcacheForEnv(goenv map[string]string) string {
if gmc := goenv["GOMODCACHE"]; gmc != "" {
- return gmc
+ // golang/go#67156: ensure that the module cache is clean, since it is
+ // assumed as a prefix to directories scanned by gopathwalk, which are
+ // themselves clean.
+ return filepath.Clean(gmc)
}
gopaths := filepath.SplitList(goenv["GOPATH"])
if len(gopaths) == 0 {
@@ -265,9 +268,7 @@ func (r *ModuleResolver) initAllMods() error {
return err
}
if mod.Dir == "" {
- if r.env.Logf != nil {
- r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path)
- }
+ r.env.logf("module %v has not been downloaded and will be ignored", mod.Path)
// Can't do anything with a module that's not downloaded.
continue
}
@@ -742,8 +743,8 @@ func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
subdir := ""
- if dir != root.Path {
- subdir = dir[len(root.Path)+len("/"):]
+ if prefix := root.Path + string(filepath.Separator); strings.HasPrefix(dir, prefix) {
+ subdir = dir[len(prefix):]
}
importPath := filepath.ToSlash(subdir)
if strings.HasPrefix(importPath, "vendor/") {
@@ -766,9 +767,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
}
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
if err != nil {
- if r.env.Logf != nil {
- r.env.Logf("decoding module cache path %q: %v", subdir, err)
- }
+ r.env.logf("decoding module cache path %q: %v", subdir, err)
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
diff --git a/vendor/golang.org/x/tools/internal/imports/source.go b/vendor/golang.org/x/tools/internal/imports/source.go
new file mode 100644
index 000000000..cbe4f3c5b
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/imports/source.go
@@ -0,0 +1,63 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import "context"
+
+// These types document the APIs below.
+//
+// TODO(rfindley): consider making these defined types rather than aliases.
+type (
+ ImportPath = string
+ PackageName = string
+ Symbol = string
+
+ // References is set of References found in a Go file. The first map key is the
+ // left hand side of a selector expression, the second key is the right hand
+ // side, and the value should always be true.
+ References = map[PackageName]map[Symbol]bool
+)
+
+// A Result satisfies a missing import.
+//
+// The Import field describes the missing import spec, and the Package field
+// summarizes the package exports.
+type Result struct {
+ Import *ImportInfo
+ Package *PackageInfo
+}
+
+// An ImportInfo represents a single import statement.
+type ImportInfo struct {
+ ImportPath string // import path, e.g. "crypto/rand".
+ Name string // import name, e.g. "crand", or "" if none.
+}
+
+// A PackageInfo represents what's known about a package.
+type PackageInfo struct {
+ Name string // package name in the package declaration, if known
+ Exports map[string]bool // set of names of known package level sortSymbols
+}
+
+// A Source provides imports to satisfy unresolved references in the file being
+// fixed.
+type Source interface {
+ // LoadPackageNames queries PackageName information for the requested import
+ // paths, when operating from the provided srcDir.
+ //
+ // TODO(rfindley): try to refactor to remove this operation.
+ LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error)
+
+ // ResolveReferences asks the Source for the best package name to satisfy
+ // each of the missing references, in the context of fixing the given
+ // filename.
+ //
+ // Returns a map from package name to a [Result] for that package name that
+ // provides the required symbols. Keys may be omitted in the map if no
+ // candidates satisfy all missing references for that package name. It is up
+ // to each data source to select the best result for each entry in the
+ // missing map.
+ ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error)
+}
diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go
new file mode 100644
index 000000000..d14abaa31
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/imports/source_env.go
@@ -0,0 +1,129 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import (
+ "context"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/tools/internal/gopathwalk"
+)
+
+// ProcessEnvSource implements the [Source] interface using the legacy
+// [ProcessEnv] abstraction.
+type ProcessEnvSource struct {
+ env *ProcessEnv
+ srcDir string
+ filename string
+ pkgName string
+}
+
+// NewProcessEnvSource returns a [ProcessEnvSource] wrapping the given
+// env, to be used for fixing imports in the file with name filename in package
+// named pkgName.
+func NewProcessEnvSource(env *ProcessEnv, filename, pkgName string) (*ProcessEnvSource, error) {
+ abs, err := filepath.Abs(filename)
+ if err != nil {
+ return nil, err
+ }
+ srcDir := filepath.Dir(abs)
+ return &ProcessEnvSource{
+ env: env,
+ srcDir: srcDir,
+ filename: filename,
+ pkgName: pkgName,
+ }, nil
+}
+
+func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, unknown []string) (map[string]string, error) {
+ r, err := s.env.GetResolver()
+ if err != nil {
+ return nil, err
+ }
+ return r.loadPackageNames(unknown, srcDir)
+}
+
+func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) {
+ var mu sync.Mutex
+ found := make(map[string][]pkgDistance)
+ callback := &scanCallback{
+ rootFound: func(gopathwalk.Root) bool {
+ return true // We want everything.
+ },
+ dirFound: func(pkg *pkg) bool {
+ return pkgIsCandidate(filename, refs, pkg)
+ },
+ packageNameLoaded: func(pkg *pkg) bool {
+ if _, want := refs[pkg.packageName]; !want {
+ return false
+ }
+ if pkg.dir == s.srcDir && s.pkgName == pkg.packageName {
+ // The candidate is in the same directory and has the
+ // same package name. Don't try to import ourselves.
+ return false
+ }
+ if !canUse(filename, pkg.dir) {
+ return false
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(s.srcDir, pkg.dir)})
+ return false // We'll do our own loading after we sort.
+ },
+ }
+ resolver, err := s.env.GetResolver()
+ if err != nil {
+ return nil, err
+ }
+ if err := resolver.scan(ctx, callback); err != nil {
+ return nil, err
+ }
+
+ g, ctx := errgroup.WithContext(ctx)
+
+ searcher := symbolSearcher{
+ logf: s.env.logf,
+ srcDir: s.srcDir,
+ xtest: strings.HasSuffix(s.pkgName, "_test"),
+ loadExports: resolver.loadExports,
+ }
+
+ var resultMu sync.Mutex
+ results := make(map[string]*Result, len(refs))
+ for pkgName, symbols := range refs {
+ g.Go(func() error {
+ found, err := searcher.search(ctx, found[pkgName], pkgName, symbols)
+ if err != nil {
+ return err
+ }
+ if found == nil {
+ return nil // No matching package.
+ }
+
+ imp := &ImportInfo{
+ ImportPath: found.importPathShort,
+ }
+ pkg := &PackageInfo{
+ Name: pkgName,
+ Exports: symbols,
+ }
+ resultMu.Lock()
+ results[pkgName] = &Result{Import: imp, Package: pkg}
+ resultMu.Unlock()
+ return nil
+ })
+ }
+ if err := g.Wait(); err != nil {
+ return nil, err
+ }
+ var ans []*Result
+ for _, x := range results {
+ ans = append(ans, x)
+ }
+ return ans, nil
+}
diff --git a/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/vendor/golang.org/x/tools/internal/imports/source_modindex.go
new file mode 100644
index 000000000..05229f06c
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/imports/source_modindex.go
@@ -0,0 +1,103 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/internal/modindex"
+)
+
+// This code is here rather than in the modindex package
+// to avoid import loops
+
+// implements Source using modindex, so only for module cache.
+//
+// this is perhaps over-engineered. A new Index is read at first use.
+// And then Update is called after every 15 minutes, and a new Index
+// is read if the index changed. It is not clear the Mutex is needed.
+type IndexSource struct {
+ modcachedir string
+ mutex sync.Mutex
+ ix *modindex.Index
+ expires time.Time
+}
+
+// create a new Source. Called from NewView in cache/session.go.
+func NewIndexSource(cachedir string) *IndexSource {
+ return &IndexSource{modcachedir: cachedir}
+}
+
+func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) {
+ /// This is used by goimports to resolve the package names of imports of the
+ // current package, which is irrelevant for the module cache.
+ return nil, nil
+}
+
+func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) {
+ if err := s.maybeReadIndex(); err != nil {
+ return nil, err
+ }
+ var cs []modindex.Candidate
+ for pkg, nms := range missing {
+ for nm := range nms {
+ x := s.ix.Lookup(pkg, nm, false)
+ cs = append(cs, x...)
+ }
+ }
+ found := make(map[string]*Result)
+ for _, c := range cs {
+ var x *Result
+ if x = found[c.ImportPath]; x == nil {
+ x = &Result{
+ Import: &ImportInfo{
+ ImportPath: c.ImportPath,
+ Name: "",
+ },
+ Package: &PackageInfo{
+ Name: c.PkgName,
+ Exports: make(map[string]bool),
+ },
+ }
+ found[c.ImportPath] = x
+ }
+ x.Package.Exports[c.Name] = true
+ }
+ var ans []*Result
+ for _, x := range found {
+ ans = append(ans, x)
+ }
+ return ans, nil
+}
+
+func (s *IndexSource) maybeReadIndex() error {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+
+ var readIndex bool
+ if time.Now().After(s.expires) {
+ ok, err := modindex.Update(s.modcachedir)
+ if err != nil {
+ return err
+ }
+ if ok {
+ readIndex = true
+ }
+ }
+
+ if readIndex || s.ix == nil {
+ ix, err := modindex.ReadIndex(s.modcachedir)
+ if err != nil {
+ return err
+ }
+ s.ix = ix
+ // for now refresh every 15 minutes
+ s.expires = time.Now().Add(time.Minute * 15)
+ }
+
+ return nil
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/directories.go b/vendor/golang.org/x/tools/internal/modindex/directories.go
new file mode 100644
index 000000000..1e1a02f23
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/directories.go
@@ -0,0 +1,135 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "regexp"
+ "slices"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/mod/semver"
+ "golang.org/x/tools/internal/gopathwalk"
+)
+
+type directory struct {
+ path Relpath
+ importPath string
+ version string // semantic version
+ syms []symbol
+}
+
+// filterDirs groups the directories by import path,
+// sorting the ones with the same import path by semantic version,
+// most recent first.
+func byImportPath(dirs []Relpath) (map[string][]*directory, error) {
+ ans := make(map[string][]*directory) // key is import path
+ for _, d := range dirs {
+ ip, sv, err := DirToImportPathVersion(d)
+ if err != nil {
+ return nil, err
+ }
+ ans[ip] = append(ans[ip], &directory{
+ path: d,
+ importPath: ip,
+ version: sv,
+ })
+ }
+ for k, v := range ans {
+ semanticSort(v)
+ ans[k] = v
+ }
+ return ans, nil
+}
+
+// sort the directories by semantic version, latest first
+func semanticSort(v []*directory) {
+ slices.SortFunc(v, func(l, r *directory) int {
+ if n := semver.Compare(l.version, r.version); n != 0 {
+ return -n // latest first
+ }
+ return strings.Compare(string(l.path), string(r.path))
+ })
+}
+
+// modCacheRegexp splits a relpathpath into module, module version, and package.
+var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
+
+// DirToImportPathVersion computes import path and semantic version
+func DirToImportPathVersion(dir Relpath) (string, string, error) {
+ m := modCacheRegexp.FindStringSubmatch(string(dir))
+ // m[1] is the module path
+ // m[2] is the version major.minor.patch(-<pre release identifier)
+ // m[3] is the rest of the package path
+ if len(m) != 4 {
+ return "", "", fmt.Errorf("bad dir %s", dir)
+ }
+ if !semver.IsValid(m[2]) {
+ return "", "", fmt.Errorf("bad semantic version %s", m[2])
+ }
+ // ToSlash is required for Windows.
+ return filepath.ToSlash(m[1] + m[3]), m[2], nil
+}
+
+// a region controls what directories to look at, for
+// updating the index incrementally, and for testing that.
+// (for testing one builds an index as of A, incrementally
+// updates it to B, and compares the result to an index build
+// as of B.)
+type region struct {
+ onlyAfter, onlyBefore time.Time
+ sync.Mutex
+ ans []Relpath
+}
+
+func findDirs(root string, onlyAfter, onlyBefore time.Time) []Relpath {
+ roots := []gopathwalk.Root{{Path: root, Type: gopathwalk.RootModuleCache}}
+ // TODO(PJW): adjust concurrency
+ opts := gopathwalk.Options{ModulesEnabled: true, Concurrency: 1 /* ,Logf: log.Printf*/}
+ betw := &region{
+ onlyAfter: onlyAfter,
+ onlyBefore: onlyBefore,
+ }
+ gopathwalk.WalkSkip(roots, betw.addDir, betw.skipDir, opts)
+ return betw.ans
+}
+
+func (r *region) addDir(rt gopathwalk.Root, dir string) {
+ // do we need to check times?
+ r.Lock()
+ defer r.Unlock()
+ x := filepath.ToSlash(string(toRelpath(Abspath(rt.Path), dir)))
+ r.ans = append(r.ans, toRelpath(Abspath(rt.Path), x))
+}
+
+func (r *region) skipDir(_ gopathwalk.Root, dir string) bool {
+ // The cache directory is already ignored in gopathwalk\
+ if filepath.Base(dir) == "internal" {
+ return true
+ }
+ if strings.Contains(dir, "toolchain@") {
+ return true
+ }
+ // don't look inside @ directories that are too old
+ if strings.Contains(filepath.Base(dir), "@") {
+ st, err := os.Stat(dir)
+ if err != nil {
+ log.Printf("can't stat dir %s %v", dir, err)
+ return true
+ }
+ if st.ModTime().Before(r.onlyAfter) {
+ return true
+ }
+ if st.ModTime().After(r.onlyBefore) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/index.go b/vendor/golang.org/x/tools/internal/modindex/index.go
new file mode 100644
index 000000000..27b6dd832
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/index.go
@@ -0,0 +1,262 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "bufio"
+ "encoding/csv"
+ "errors"
+ "fmt"
+ "hash/crc64"
+ "io"
+ "io/fs"
+ "log"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+)
+
+/*
+The on-disk index is a text file.
+The first 3 lines are header information containing CurrentVersion,
+the value of GOMODCACHE, and the validity date of the index.
+(This is when the code started building the index.)
+Following the header are sections of lines, one section for each
+import path. These sections are sorted by package name.
+The first line of each section, marked by a leading :, contains
+the package name, the import path, the name of the directory relative
+to GOMODCACHE, and its semantic version.
+The rest of each section consists of one line per exported symbol.
+The lines are sorted by the symbol's name and contain the name,
+an indication of its lexical type (C, T, V, F), and if it is the
+name of a function, information about the signature.
+
+The fields in the section header lines are separated by commas, and
+in the unlikely event this would be confusing, the csv package is used
+to write (and read) them.
+
+In the lines containing exported names, C=const, V=var, T=type, F=func.
+If it is a func, the next field is the number of returned values,
+followed by pairs consisting of formal parameter names and types.
+All these fields are separated by spaces. Any spaces in a type
+(e.g., chan struct{}) are replaced by $s on the disk. The $s are
+turned back into spaces when read.
+
+Here is an index header (the comments are not part of the index):
+0 // version (of the index format)
+/usr/local/google/home/pjw/go/pkg/mod // GOMODCACHE
+2024-09-11 18:55:09 // validity date of the index
+
+Here is an index section:
+:yaml,gopkg.in/yaml.v1,gopkg.in/yaml.v1@v1.0.0-20140924161607-9f9df34309c0,v1.0.0-20140924161607-9f9df34309c0
+Getter T
+Marshal F 2 in interface{}
+Setter T
+Unmarshal F 1 in []byte out interface{}
+
+The package name is yaml, the import path is gopkg.in/yaml.v1.
+Getter and Setter are types, and Marshal and Unmarshal are functions.
+The latter returns one value and has two arguments, 'in' and 'out'
+whose types are []byte and interface{}.
+*/
+
+// CurrentVersion tells readers about the format of the index.
+const CurrentVersion int = 0
+
+// Index is returned by ReadIndex().
+type Index struct {
+ Version int
+ Cachedir Abspath // The directory containing the module cache
+ Changed time.Time // The index is up to date as of Changed
+ Entries []Entry
+}
+
+// An Entry contains information for an import path.
+type Entry struct {
+ Dir Relpath // directory in modcache
+ ImportPath string
+ PkgName string
+ Version string
+ //ModTime STime // is this useful?
+ Names []string // exported names and information
+}
+
+// ReadIndex reads the latest version of the on-disk index
+// for the cache directory cd.
+// It returns (nil, nil) if there is no index, but returns
+// a non-nil error if the index exists but could not be read.
+func ReadIndex(cachedir string) (*Index, error) {
+ cachedir, err := filepath.Abs(cachedir)
+ if err != nil {
+ return nil, err
+ }
+ cd := Abspath(cachedir)
+ dir, err := IndexDir()
+ if err != nil {
+ return nil, err
+ }
+ base := indexNameBase(cd)
+ iname := filepath.Join(dir, base)
+ buf, err := os.ReadFile(iname)
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("cannot read %s: %w", iname, err)
+ }
+ fname := filepath.Join(dir, string(buf))
+ fd, err := os.Open(fname)
+ if err != nil {
+ return nil, err
+ }
+ defer fd.Close()
+ r := bufio.NewReader(fd)
+ ix, err := readIndexFrom(cd, r)
+ if err != nil {
+ return nil, err
+ }
+ return ix, nil
+}
+
+func readIndexFrom(cd Abspath, bx io.Reader) (*Index, error) {
+ b := bufio.NewScanner(bx)
+ var ans Index
+ // header
+ ok := b.Scan()
+ if !ok {
+ return nil, fmt.Errorf("unexpected scan error")
+ }
+ l := b.Text()
+ var err error
+ ans.Version, err = strconv.Atoi(l)
+ if err != nil {
+ return nil, err
+ }
+ if ans.Version != CurrentVersion {
+ return nil, fmt.Errorf("got version %d, expected %d", ans.Version, CurrentVersion)
+ }
+ if ok := b.Scan(); !ok {
+ return nil, fmt.Errorf("scanner error reading cachedir")
+ }
+ ans.Cachedir = Abspath(b.Text())
+ if ok := b.Scan(); !ok {
+ return nil, fmt.Errorf("scanner error reading index creation time")
+ }
+ // TODO(pjw): need to check that this is the expected cachedir
+ // so the tag should be passed in to this function
+ ans.Changed, err = time.ParseInLocation(time.DateTime, b.Text(), time.Local)
+ if err != nil {
+ return nil, err
+ }
+ var curEntry *Entry
+ for b.Scan() {
+ v := b.Text()
+ if v[0] == ':' {
+ if curEntry != nil {
+ ans.Entries = append(ans.Entries, *curEntry)
+ }
+ // as directories may contain commas and quotes, they need to be read as csv.
+ rdr := strings.NewReader(v[1:])
+ cs := csv.NewReader(rdr)
+ flds, err := cs.Read()
+ if err != nil {
+ return nil, err
+ }
+ if len(flds) != 4 {
+ return nil, fmt.Errorf("header contains %d fields, not 4: %q", len(v), v)
+ }
+ curEntry = &Entry{PkgName: flds[0], ImportPath: flds[1], Dir: toRelpath(cd, flds[2]), Version: flds[3]}
+ continue
+ }
+ curEntry.Names = append(curEntry.Names, v)
+ }
+ if curEntry != nil {
+ ans.Entries = append(ans.Entries, *curEntry)
+ }
+ if err := b.Err(); err != nil {
+ return nil, fmt.Errorf("scanner failed %v", err)
+ }
+ return &ans, nil
+}
+
+// write the index as a text file
+func writeIndex(cachedir Abspath, ix *Index) error {
+ dir, err := IndexDir()
+ if err != nil {
+ return err
+ }
+ ipat := fmt.Sprintf("index-%d-*", CurrentVersion)
+ fd, err := os.CreateTemp(dir, ipat)
+ if err != nil {
+ return err // can this happen?
+ }
+ defer fd.Close()
+ if err := writeIndexToFile(ix, fd); err != nil {
+ return err
+ }
+ content := fd.Name()
+ content = filepath.Base(content)
+ base := indexNameBase(cachedir)
+ nm := filepath.Join(dir, base)
+ err = os.WriteFile(nm, []byte(content), 0666)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func writeIndexToFile(x *Index, fd *os.File) error {
+ cnt := 0
+ w := bufio.NewWriter(fd)
+ fmt.Fprintf(w, "%d\n", x.Version)
+ fmt.Fprintf(w, "%s\n", x.Cachedir)
+ // round the time down
+ tm := x.Changed.Add(-time.Second / 2)
+ fmt.Fprintf(w, "%s\n", tm.Format(time.DateTime))
+ for _, e := range x.Entries {
+ if e.ImportPath == "" {
+ continue // shouldn't happen
+ }
+ // PJW: maybe always write these headers as csv?
+ if strings.ContainsAny(string(e.Dir), ",\"") {
+ log.Printf("DIR: %s", e.Dir)
+ cw := csv.NewWriter(w)
+ cw.Write([]string{":" + e.PkgName, e.ImportPath, string(e.Dir), e.Version})
+ cw.Flush()
+ } else {
+ fmt.Fprintf(w, ":%s,%s,%s,%s\n", e.PkgName, e.ImportPath, e.Dir, e.Version)
+ }
+ for _, x := range e.Names {
+ fmt.Fprintf(w, "%s\n", x)
+ cnt++
+ }
+ }
+ if err := w.Flush(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// tests can override this
+var IndexDir = indexDir
+
+// IndexDir computes the directory containing the index
+func indexDir() (string, error) {
+ dir, err := os.UserCacheDir()
+ if err != nil {
+ return "", fmt.Errorf("cannot open UserCacheDir, %w", err)
+ }
+ return filepath.Join(dir, "go", "imports"), nil
+}
+
+// return the base name of the file containing the name of the current index
+func indexNameBase(cachedir Abspath) string {
+ // crc64 is a way to convert path names into 16 hex digits.
+ h := crc64.Checksum([]byte(cachedir), crc64.MakeTable(crc64.ECMA))
+ fname := fmt.Sprintf("index-name-%d-%016x", CurrentVersion, h)
+ return fname
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/lookup.go b/vendor/golang.org/x/tools/internal/modindex/lookup.go
new file mode 100644
index 000000000..29d4e3d7a
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/lookup.go
@@ -0,0 +1,145 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "slices"
+ "strconv"
+ "strings"
+)
+
+type Candidate struct {
+ PkgName string
+ Name string
+ Dir string
+ ImportPath string
+ Type LexType
+ // information for Funcs
+ Results int16 // how many results
+ Sig []Field // arg names and types
+}
+
+type Field struct {
+ Arg, Type string
+}
+
+type LexType int8
+
+const (
+ Const LexType = iota
+ Var
+ Type
+ Func
+)
+
+// Lookup finds all the symbols in the index with the given PkgName and name.
+// If prefix is true, it finds all of these with name as a prefix.
+func (ix *Index) Lookup(pkg, name string, prefix bool) []Candidate {
+ loc, ok := slices.BinarySearchFunc(ix.Entries, pkg, func(e Entry, pkg string) int {
+ return strings.Compare(e.PkgName, pkg)
+ })
+ if !ok {
+ return nil // didn't find the package
+ }
+ var ans []Candidate
+ // loc is the first entry for this package name, but there may be severeal
+ for i := loc; i < len(ix.Entries); i++ {
+ e := ix.Entries[i]
+ if e.PkgName != pkg {
+ break // end of sorted package names
+ }
+ nloc, ok := slices.BinarySearchFunc(e.Names, name, func(s string, name string) int {
+ if strings.HasPrefix(s, name) {
+ return 0
+ }
+ if s < name {
+ return -1
+ }
+ return 1
+ })
+ if !ok {
+ continue // didn't find the name, nor any symbols with name as a prefix
+ }
+ for j := nloc; j < len(e.Names); j++ {
+ nstr := e.Names[j]
+ // benchmarks show this makes a difference when there are a lot of Possibilities
+ flds := fastSplit(nstr)
+ if !(flds[0] == name || prefix && strings.HasPrefix(flds[0], name)) {
+ // past range of matching Names
+ break
+ }
+ if len(flds) < 2 {
+ continue // should never happen
+ }
+ px := Candidate{
+ PkgName: pkg,
+ Name: flds[0],
+ Dir: string(e.Dir),
+ ImportPath: e.ImportPath,
+ Type: asLexType(flds[1][0]),
+ }
+ if flds[1] == "F" {
+ n, err := strconv.Atoi(flds[2])
+ if err != nil {
+ continue // should never happen
+ }
+ px.Results = int16(n)
+ if len(flds) >= 4 {
+ sig := strings.Split(flds[3], " ")
+ for i := 0; i < len(sig); i++ {
+ // $ cannot otherwise occur. removing the spaces
+ // almost works, but for chan struct{}, e.g.
+ sig[i] = strings.Replace(sig[i], "$", " ", -1)
+ }
+ px.Sig = toFields(sig)
+ }
+ }
+ ans = append(ans, px)
+ }
+ }
+ return ans
+}
+
+func toFields(sig []string) []Field {
+ ans := make([]Field, len(sig)/2)
+ for i := 0; i < len(ans); i++ {
+ ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
+ }
+ return ans
+}
+
+// benchmarks show this is measurably better than strings.Split
+func fastSplit(x string) []string {
+ ans := make([]string, 0, 4)
+ nxt := 0
+ start := 0
+ for i := 0; i < len(x); i++ {
+ if x[i] != ' ' {
+ continue
+ }
+ ans = append(ans, x[start:i])
+ nxt++
+ start = i + 1
+ if nxt >= 3 {
+ break
+ }
+ }
+ ans = append(ans, x[start:])
+ return ans
+}
+
+func asLexType(c byte) LexType {
+ switch c {
+ case 'C':
+ return Const
+ case 'V':
+ return Var
+ case 'T':
+ return Type
+ case 'F':
+ return Func
+ }
+ return -1
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/modindex.go b/vendor/golang.org/x/tools/internal/modindex/modindex.go
new file mode 100644
index 000000000..355a53e71
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/modindex.go
@@ -0,0 +1,164 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package modindex contains code for building and searching an index to
+// the Go module cache. The directory containing the index, returned by
+// IndexDir(), contains a file index-name-<ver> that contains the name
+// of the current index. We believe writing that short file is atomic.
+// ReadIndex reads that file to get the file name of the index.
+// WriteIndex writes an index with a unique name and then
+// writes that name into a new version of index-name-<ver>.
+// (<ver> stands for the CurrentVersion of the index format.)
+package modindex
+
+import (
+ "path/filepath"
+ "slices"
+ "strings"
+ "time"
+
+ "golang.org/x/mod/semver"
+)
+
+// Create always creates a new index for the go module cache that is in cachedir.
+func Create(cachedir string) error {
+ _, err := indexModCache(cachedir, true)
+ return err
+}
+
+// Update the index for the go module cache that is in cachedir,
+// If there is no existing index it will build one.
+// If there are changed directories since the last index, it will
+// write a new one and return true. Otherwise it returns false.
+func Update(cachedir string) (bool, error) {
+ return indexModCache(cachedir, false)
+}
+
+// indexModCache writes an index current as of when it is called.
+// If clear is true the index is constructed from all of GOMODCACHE
+// otherwise the index is constructed from the last previous index
+// and the updates to the cache. It returns true if it wrote an index,
+// false otherwise.
+func indexModCache(cachedir string, clear bool) (bool, error) {
+ cachedir, err := filepath.Abs(cachedir)
+ if err != nil {
+ return false, err
+ }
+ cd := Abspath(cachedir)
+ future := time.Now().Add(24 * time.Hour) // safely in the future
+ ok, err := modindexTimed(future, cd, clear)
+ if err != nil {
+ return false, err
+ }
+ return ok, nil
+}
+
+// modindexTimed writes an index current as of onlyBefore.
+// If clear is true the index is constructed from all of GOMODCACHE
+// otherwise the index is constructed from the last previous index
+// and all the updates to the cache before onlyBefore.
+// It returns true if it wrote a new index, false if it wrote nothing.
+func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
+ var curIndex *Index
+ if !clear {
+ var err error
+ curIndex, err = ReadIndex(string(cachedir))
+ if clear && err != nil {
+ return false, err
+ }
+ // TODO(pjw): check that most of those directories still exist
+ }
+ cfg := &work{
+ onlyBefore: onlyBefore,
+ oldIndex: curIndex,
+ cacheDir: cachedir,
+ }
+ if curIndex != nil {
+ cfg.onlyAfter = curIndex.Changed
+ }
+ if err := cfg.buildIndex(); err != nil {
+ return false, err
+ }
+ if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
+ // no changes from existing curIndex, don't write a new index
+ return false, nil
+ }
+ if err := cfg.writeIndex(); err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+type work struct {
+ onlyBefore time.Time // do not use directories later than this
+ onlyAfter time.Time // only interested in directories after this
+ // directories from before onlyAfter come from oldIndex
+ oldIndex *Index
+ newIndex *Index
+ cacheDir Abspath
+}
+
+func (w *work) buildIndex() error {
+ // The effective date of the new index should be at least
+ // slightly earlier than when the directories are scanned
+ // so set it now.
+ w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
+ dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
+ if len(dirs) == 0 {
+ return nil
+ }
+ newdirs, err := byImportPath(dirs)
+ if err != nil {
+ return err
+ }
+ // for each import path it might occur only in newdirs,
+ // only in w.oldIndex, or in both.
+ // If it occurs in both, use the semantically later one
+ if w.oldIndex != nil {
+ for _, e := range w.oldIndex.Entries {
+ found, ok := newdirs[e.ImportPath]
+ if !ok {
+ w.newIndex.Entries = append(w.newIndex.Entries, e)
+ continue // use this one, there is no new one
+ }
+ if semver.Compare(found[0].version, e.Version) > 0 {
+ // use the new one
+ } else {
+ // use the old one, forget the new one
+ w.newIndex.Entries = append(w.newIndex.Entries, e)
+ delete(newdirs, e.ImportPath)
+ }
+ }
+ }
+ // get symbol information for all the new diredtories
+ getSymbols(w.cacheDir, newdirs)
+ // assemble the new index entries
+ for k, v := range newdirs {
+ d := v[0]
+ pkg, names := processSyms(d.syms)
+ if pkg == "" {
+ continue // PJW: does this ever happen?
+ }
+ entry := Entry{
+ PkgName: pkg,
+ Dir: d.path,
+ ImportPath: k,
+ Version: d.version,
+ Names: names,
+ }
+ w.newIndex.Entries = append(w.newIndex.Entries, entry)
+ }
+ // sort the entries in the new index
+ slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
+ if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
+ return n
+ }
+ return strings.Compare(l.ImportPath, r.ImportPath)
+ })
+ return nil
+}
+
+func (w *work) writeIndex() error {
+ return writeIndex(w.cacheDir, w.newIndex)
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go
new file mode 100644
index 000000000..2e285ed99
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go
@@ -0,0 +1,189 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "os"
+ "path/filepath"
+ "slices"
+ "strings"
+
+ "golang.org/x/sync/errgroup"
+)
+
+// The name of a symbol contains information about the symbol:
+// <name> T for types
+// <name> C for consts
+// <name> V for vars
+// and for funcs: <name> F <num of return values> (<arg-name> <arg-type>)*
+// any spaces in <arg-type> are replaced by $s so that the fields
+// of the name are space separated
+type symbol struct {
+ pkg string // name of the symbols's package
+ name string // declared name
+ kind string // T, C, V, or F
+ sig string // signature information, for F
+}
+
+// find the symbols for the best directories
+func getSymbols(cd Abspath, dirs map[string][]*directory) {
+ var g errgroup.Group
+ g.SetLimit(-1) // maybe throttle this some day
+ for _, vv := range dirs {
+ // throttling some day?
+ d := vv[0]
+ g.Go(func() error {
+ thedir := filepath.Join(string(cd), string(d.path))
+ mode := parser.SkipObjectResolution
+
+ fi, err := os.ReadDir(thedir)
+ if err != nil {
+ return nil // log this someday?
+ }
+ for _, fx := range fi {
+ if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
+ continue
+ }
+ fname := filepath.Join(thedir, fx.Name())
+ tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
+ if err != nil {
+ continue // ignore errors, someday log them?
+ }
+ d.syms = append(d.syms, getFileExports(tr)...)
+ }
+ return nil
+ })
+ }
+ g.Wait()
+}
+
+func getFileExports(f *ast.File) []symbol {
+ pkg := f.Name.Name
+ if pkg == "main" {
+ return nil
+ }
+ var ans []symbol
+ // should we look for //go:build ignore?
+ for _, decl := range f.Decls {
+ switch decl := decl.(type) {
+ case *ast.FuncDecl:
+ if decl.Recv != nil {
+ // ignore methods, as we are completing package selections
+ continue
+ }
+ name := decl.Name.Name
+ dtype := decl.Type
+ // not looking at dtype.TypeParams. That is, treating
+ // generic functions just like non-generic ones.
+ sig := dtype.Params
+ kind := "F"
+ result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
+ for _, x := range sig.List {
+ // This code creates a string representing the type.
+ // TODO(pjw): it may be fragile:
+ // 1. x.Type could be nil, perhaps in ill-formed code
+ // 2. ExprString might someday change incompatibly to
+ // include struct tags, which can be arbitrary strings
+ if x.Type == nil {
+ // Can this happen without a parse error? (Files with parse
+ // errors are ignored in getSymbols)
+ continue // maybe report this someday
+ }
+ tp := types.ExprString(x.Type)
+ if len(tp) == 0 {
+ // Can this happen?
+ continue // maybe report this someday
+ }
+ // This is only safe if ExprString never returns anything with a $
+ // The only place a $ can occur seems to be in a struct tag, which
+ // can be an arbitrary string literal, and ExprString does not presently
+ // print struct tags. So for this to happen the type of a formal parameter
+ // has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
+ // would have to show the struct tag. Even testing for this case seems
+ // a waste of effort, but let's not ignore such pathologies
+ if strings.Contains(tp, "$") {
+ continue
+ }
+ tp = strings.Replace(tp, " ", "$", -1)
+ if len(x.Names) == 0 {
+ result = append(result, "_")
+ result = append(result, tp)
+ } else {
+ for _, y := range x.Names {
+ result = append(result, y.Name)
+ result = append(result, tp)
+ }
+ }
+ }
+ sigs := strings.Join(result, " ")
+ if s := newsym(pkg, name, kind, sigs); s != nil {
+ ans = append(ans, *s)
+ }
+ case *ast.GenDecl:
+ switch decl.Tok {
+ case token.CONST, token.VAR:
+ tp := "V"
+ if decl.Tok == token.CONST {
+ tp = "C"
+ }
+ for _, sp := range decl.Specs {
+ for _, x := range sp.(*ast.ValueSpec).Names {
+ if s := newsym(pkg, x.Name, tp, ""); s != nil {
+ ans = append(ans, *s)
+ }
+ }
+ }
+ case token.TYPE:
+ for _, sp := range decl.Specs {
+ if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, "T", ""); s != nil {
+ ans = append(ans, *s)
+ }
+ }
+ }
+ }
+ }
+ return ans
+}
+
+func newsym(pkg, name, kind, sig string) *symbol {
+ if len(name) == 0 || !ast.IsExported(name) {
+ return nil
+ }
+ sym := symbol{pkg: pkg, name: name, kind: kind, sig: sig}
+ return &sym
+}
+
+// return the package name and the value for the symbols.
+// if there are multiple packages, choose one arbitrarily
+// the returned slice is sorted lexicographically
+func processSyms(syms []symbol) (string, []string) {
+ if len(syms) == 0 {
+ return "", nil
+ }
+ slices.SortFunc(syms, func(l, r symbol) int {
+ return strings.Compare(l.name, r.name)
+ })
+ pkg := syms[0].pkg
+ var names []string
+ for _, s := range syms {
+ var nx string
+ if s.pkg == pkg {
+ if s.sig != "" {
+ nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
+ } else {
+ nx = fmt.Sprintf("%s %s", s.name, s.kind)
+ }
+ names = append(names, nx)
+ } else {
+ continue // PJW: do we want to keep track of these?
+ }
+ }
+ return pkg, names
+}
diff --git a/vendor/golang.org/x/tools/internal/modindex/types.go b/vendor/golang.org/x/tools/internal/modindex/types.go
new file mode 100644
index 000000000..ece448863
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/modindex/types.go
@@ -0,0 +1,25 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+ "strings"
+)
+
+// some special types to avoid confusions
+
+// distinguish various types of directory names. It's easy to get confused.
+type Abspath string // absolute paths
+type Relpath string // paths with GOMODCACHE prefix removed
+
+func toRelpath(cachedir Abspath, s string) Relpath {
+ if strings.HasPrefix(s, string(cachedir)) {
+ if s == string(cachedir) {
+ return Relpath("")
+ }
+ return Relpath(s[len(cachedir)+1:])
+ }
+ return Relpath(s)
+}
diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
index 44719de17..66e69b438 100644
--- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
+++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -5,7 +5,6 @@
// Package packagesinternal exposes internal-only fields from go/packages.
package packagesinternal
-var GetForTest = func(p interface{}) string { return "" }
var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
type PackageError struct {
@@ -16,7 +15,6 @@ type PackageError struct {
var TypecheckCgo int
var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
-var ForTest int // must be set as a LoadMode to call GetForTest
var SetModFlag = func(config interface{}, value string) {}
var SetModFile = func(config interface{}, value string) {}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
index 2acd85851..f6cb37c5c 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
@@ -21,10 +21,7 @@ import (
// export data.
type PkgDecoder struct {
// version is the file format version.
- version uint32
-
- // aliases determines whether types.Aliases should be created
- aliases bool
+ version Version
// sync indicates whether the file uses sync markers.
sync bool
@@ -71,12 +68,9 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
// IR export data from input. pkgPath is the package path for the
// compilation unit that produced the export data.
-//
-// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
func NewPkgDecoder(pkgPath, input string) PkgDecoder {
pr := PkgDecoder{
pkgPath: pkgPath,
- //aliases: aliases.Enabled(),
}
// TODO(mdempsky): Implement direct indexing of input string to
@@ -84,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
r := strings.NewReader(input)
- assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
+ var ver uint32
+ assert(binary.Read(r, binary.LittleEndian, &ver) == nil)
+ pr.version = Version(ver)
- switch pr.version {
- default:
- panic(fmt.Errorf("unsupported version: %v", pr.version))
- case 0:
- // no flags
- case 1:
+ if pr.version >= numVersions {
+ panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1))
+ }
+
+ if pr.version.Has(Flags) {
var flags uint32
assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
pr.sync = flags&flagSyncMarkers != 0
@@ -106,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
assert(err == nil)
pr.elemData = input[pos:]
- assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+ const fingerprintSize = 8
+ assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1]))
return pr
}
@@ -140,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
absIdx += int(pr.elemEndsEnds[k-1])
}
if absIdx >= int(pr.elemEndsEnds[k]) {
- errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+ panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
}
return absIdx
}
@@ -197,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
Idx: idx,
}
- // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
- r.Data = *strings.NewReader(pr.DataIdx(k, idx))
-
+ r.Data.Reset(pr.DataIdx(k, idx))
r.Sync(SyncRelocs)
r.Relocs = make([]RelocEnt, r.Len())
for i := range r.Relocs {
@@ -248,7 +243,7 @@ type Decoder struct {
func (r *Decoder) checkErr(err error) {
if err != nil {
- errorf("unexpected decoding error: %w", err)
+ panicf("unexpected decoding error: %w", err)
}
}
@@ -519,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
return path, name, tag
}
+
+// Version reports the version of the bitstream.
+func (w *Decoder) Version() Version { return w.common.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
index 6482617a4..c17a12399 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
@@ -12,18 +12,15 @@ import (
"io"
"math/big"
"runtime"
+ "strings"
)
-// currentVersion is the current version number.
-//
-// - v0: initial prototype
-//
-// - v1: adds the flags uint32 word
-const currentVersion uint32 = 1
-
// A PkgEncoder provides methods for encoding a package's Unified IR
// export data.
type PkgEncoder struct {
+ // version of the bitstream.
+ version Version
+
// elems holds the bitstream for previously encoded elements.
elems [numRelocs][]string
@@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
// export data files, but can help diagnosing desync errors in
// higher-level Unified IR reader/writer code. If syncFrames is
// negative, then sync markers are omitted entirely.
-func NewPkgEncoder(syncFrames int) PkgEncoder {
+func NewPkgEncoder(version Version, syncFrames int) PkgEncoder {
return PkgEncoder{
+ version: version,
stringsIdx: make(map[string]Index),
syncFrames: syncFrames,
}
@@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
assert(binary.Write(out, binary.LittleEndian, x) == nil)
}
- writeUint32(currentVersion)
+ writeUint32(uint32(pw.version))
- var flags uint32
- if pw.SyncMarkers() {
- flags |= flagSyncMarkers
+ if pw.version.Has(Flags) {
+ var flags uint32
+ if pw.SyncMarkers() {
+ flags |= flagSyncMarkers
+ }
+ writeUint32(flags)
}
- writeUint32(flags)
// Write elemEndsEnds.
var sum uint32
@@ -159,7 +159,7 @@ type Encoder struct {
// Flush finalizes the element's bitstream and returns its Index.
func (w *Encoder) Flush() Index {
- var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+ var sb strings.Builder
// Backup the data so we write the relocations at the front.
var tmp bytes.Buffer
@@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index {
func (w *Encoder) checkErr(err error) {
if err != nil {
- errorf("unexpected encoding error: %v", err)
+ panicf("unexpected encoding error: %v", err)
}
}
@@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) {
// section (if not already present), and then writing a relocation
// into the element bitstream.
func (w *Encoder) String(s string) {
+ w.StringRef(w.p.StringIdx(s))
+}
+
+// StringRef writes a reference to the given index, which must be a
+// previously encoded string value.
+func (w *Encoder) StringRef(idx Index) {
w.Sync(SyncString)
- w.Reloc(RelocString, w.p.StringIdx(s))
+ w.Reloc(RelocString, idx)
}
// Strings encodes and writes a variable-length slice of strings into
@@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) {
func (w *Encoder) scalar(val constant.Value) {
switch v := constant.Val(val).(type) {
default:
- errorf("unhandled %v (%v)", val, val.Kind())
+ panicf("unhandled %v (%v)", val, val.Kind())
case bool:
w.Code(ValBool)
w.Bool(v)
@@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) {
b := v.Append(nil, 'p', -1)
w.String(string(b)) // TODO: More efficient encoding.
}
+
+// Version reports the version of the bitstream.
+func (w *Encoder) Version() Version { return w.p.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
deleted file mode 100644
index 5294f6a63..000000000
--- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.7
-// +build !go1.7
-
-// TODO(mdempsky): Remove after #44505 is resolved
-
-package pkgbits
-
-import "runtime"
-
-func walkFrames(pcs []uintptr, visit frameVisitor) {
- for _, pc := range pcs {
- fn := runtime.FuncForPC(pc)
- file, line := fn.FileLine(pc)
-
- visit(file, line, fn.Name(), pc-fn.Entry())
- }
-}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
deleted file mode 100644
index 2324ae7ad..000000000
--- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.7
-// +build go1.7
-
-package pkgbits
-
-import "runtime"
-
-// walkFrames calls visit for each call frame represented by pcs.
-//
-// pcs should be a slice of PCs, as returned by runtime.Callers.
-func walkFrames(pcs []uintptr, visit frameVisitor) {
- if len(pcs) == 0 {
- return
- }
-
- frames := runtime.CallersFrames(pcs)
- for {
- frame, more := frames.Next()
- visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
- if !more {
- return
- }
- }
-}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go
index ad26d3b28..50534a295 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/support.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go
@@ -12,6 +12,6 @@ func assert(b bool) {
}
}
-func errorf(format string, args ...interface{}) {
+func panicf(format string, args ...any) {
panic(fmt.Errorf(format, args...))
}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
index 5bd51ef71..1520b73af 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
@@ -6,6 +6,7 @@ package pkgbits
import (
"fmt"
+ "runtime"
"strings"
)
@@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string {
type frameVisitor func(file string, line int, name string, offset uintptr)
+// walkFrames calls visit for each call frame represented by pcs.
+//
+// pcs should be a slice of PCs, as returned by runtime.Callers.
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ if len(pcs) == 0 {
+ return
+ }
+
+ frames := runtime.CallersFrames(pcs)
+ for {
+ frame, more := frames.Next()
+ visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+ if !more {
+ return
+ }
+ }
+}
+
// SyncMarker is an enum type that represents markers that may be
// written to export data to ensure the reader and writer stay
// synchronized.
@@ -110,4 +129,8 @@ const (
SyncStmtsEnd
SyncLabel
SyncOptLabel
+
+ SyncMultiExpr
+ SyncRType
+ SyncConvRTTI
)
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
index 4a5b0ca5f..582ad56d3 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
@@ -74,11 +74,14 @@ func _() {
_ = x[SyncStmtsEnd-64]
_ = x[SyncLabel-65]
_ = x[SyncOptLabel-66]
+ _ = x[SyncMultiExpr-67]
+ _ = x[SyncRType-68]
+ _ = x[SyncConvRTTI-69]
}
-const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
+const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI"
-var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
+var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480}
func (i SyncMarker) String() string {
i -= 1
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go
new file mode 100644
index 000000000..53af9df22
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go
@@ -0,0 +1,85 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// Version indicates a version of a unified IR bitstream.
+// Each Version indicates the addition, removal, or change of
+// new data in the bitstream.
+//
+// These are serialized to disk and the interpretation remains fixed.
+type Version uint32
+
+const (
+ // V0: initial prototype.
+ //
+ // All data that is not assigned a Field is in version V0
+ // and has not been deprecated.
+ V0 Version = iota
+
+ // V1: adds the Flags uint32 word
+ V1
+
+ // V2: removes unused legacy fields and supports type parameters for aliases.
+ // - remove the legacy "has init" bool from the public root
+ // - remove obj's "derived func instance" bool
+ // - add a TypeParamNames field to ObjAlias
+ // - remove derived info "needed" bool
+ V2
+
+ numVersions = iota
+)
+
+// Field denotes a unit of data in the serialized unified IR bitstream.
+// It is conceptually a like field in a structure.
+//
+// We only really need Fields when the data may or may not be present
+// in a stream based on the Version of the bitstream.
+//
+// Unlike much of pkgbits, Fields are not serialized and
+// can change values as needed.
+type Field int
+
+const (
+ // Flags in a uint32 in the header of a bitstream
+ // that is used to indicate whether optional features are enabled.
+ Flags Field = iota
+
+ // Deprecated: HasInit was a bool indicating whether a package
+ // has any init functions.
+ HasInit
+
+ // Deprecated: DerivedFuncInstance was a bool indicating
+ // whether an object was a function instance.
+ DerivedFuncInstance
+
+ // ObjAlias has a list of TypeParamNames.
+ AliasTypeParamNames
+
+ // Deprecated: DerivedInfoNeeded was a bool indicating
+ // whether a type was a derived type.
+ DerivedInfoNeeded
+
+ numFields = iota
+)
+
+// introduced is the version a field was added.
+var introduced = [numFields]Version{
+ Flags: V1,
+ AliasTypeParamNames: V2,
+}
+
+// removed is the version a field was removed in or 0 for fields
+// that have not yet been deprecated.
+// (So removed[f]-1 is the last version it is included in.)
+var removed = [numFields]Version{
+ HasInit: V2,
+ DerivedFuncInstance: V2,
+ DerivedInfoNeeded: V2,
+}
+
+// Has reports whether field f is present in a bitstream at version v.
+func (v Version) Has(f Field) bool {
+ return introduced[f] <= v && (v < removed[f] || removed[f] == V0)
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
index fd6892075..cdaac9ab3 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -23,6 +23,7 @@ var PackageSymbols = map[string][]Symbol{
{"ErrWriteAfterClose", Var, 0},
{"ErrWriteTooLong", Var, 0},
{"FileInfoHeader", Func, 1},
+ {"FileInfoNames", Type, 23},
{"Format", Type, 10},
{"FormatGNU", Const, 10},
{"FormatPAX", Const, 10},
@@ -820,6 +821,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*ConnectionState).ExportKeyingMaterial", Method, 11},
{"(*Dialer).Dial", Method, 15},
{"(*Dialer).DialContext", Method, 15},
+ {"(*ECHRejectionError).Error", Method, 23},
{"(*QUICConn).Close", Method, 21},
{"(*QUICConn).ConnectionState", Method, 21},
{"(*QUICConn).HandleData", Method, 21},
@@ -827,6 +829,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*QUICConn).SendSessionTicket", Method, 21},
{"(*QUICConn).SetTransportParameters", Method, 21},
{"(*QUICConn).Start", Method, 21},
+ {"(*QUICConn).StoreSession", Method, 23},
{"(*SessionState).Bytes", Method, 21},
{"(AlertError).Error", Method, 21},
{"(ClientAuthType).String", Method, 15},
@@ -877,6 +880,8 @@ var PackageSymbols = map[string][]Symbol{
{"Config.ClientSessionCache", Field, 3},
{"Config.CurvePreferences", Field, 3},
{"Config.DynamicRecordSizingDisabled", Field, 7},
+ {"Config.EncryptedClientHelloConfigList", Field, 23},
+ {"Config.EncryptedClientHelloRejectionVerify", Field, 23},
{"Config.GetCertificate", Field, 4},
{"Config.GetClientCertificate", Field, 8},
{"Config.GetConfigForClient", Field, 8},
@@ -902,6 +907,7 @@ var PackageSymbols = map[string][]Symbol{
{"ConnectionState", Type, 0},
{"ConnectionState.CipherSuite", Field, 0},
{"ConnectionState.DidResume", Field, 1},
+ {"ConnectionState.ECHAccepted", Field, 23},
{"ConnectionState.HandshakeComplete", Field, 0},
{"ConnectionState.NegotiatedProtocol", Field, 0},
{"ConnectionState.NegotiatedProtocolIsMutual", Field, 0},
@@ -925,6 +931,8 @@ var PackageSymbols = map[string][]Symbol{
{"ECDSAWithP384AndSHA384", Const, 8},
{"ECDSAWithP521AndSHA512", Const, 8},
{"ECDSAWithSHA1", Const, 10},
+ {"ECHRejectionError", Type, 23},
+ {"ECHRejectionError.RetryConfigList", Field, 23},
{"Ed25519", Const, 13},
{"InsecureCipherSuites", Func, 14},
{"Listen", Func, 0},
@@ -943,6 +951,7 @@ var PackageSymbols = map[string][]Symbol{
{"ParseSessionState", Func, 21},
{"QUICClient", Func, 21},
{"QUICConfig", Type, 21},
+ {"QUICConfig.EnableSessionEvents", Field, 23},
{"QUICConfig.TLSConfig", Field, 21},
{"QUICConn", Type, 21},
{"QUICEncryptionLevel", Type, 21},
@@ -954,16 +963,20 @@ var PackageSymbols = map[string][]Symbol{
{"QUICEvent.Data", Field, 21},
{"QUICEvent.Kind", Field, 21},
{"QUICEvent.Level", Field, 21},
+ {"QUICEvent.SessionState", Field, 23},
{"QUICEvent.Suite", Field, 21},
{"QUICEventKind", Type, 21},
{"QUICHandshakeDone", Const, 21},
{"QUICNoEvent", Const, 21},
{"QUICRejectedEarlyData", Const, 21},
+ {"QUICResumeSession", Const, 23},
{"QUICServer", Func, 21},
{"QUICSessionTicketOptions", Type, 21},
{"QUICSessionTicketOptions.EarlyData", Field, 21},
+ {"QUICSessionTicketOptions.Extra", Field, 23},
{"QUICSetReadSecret", Const, 21},
{"QUICSetWriteSecret", Const, 21},
+ {"QUICStoreSession", Const, 23},
{"QUICTransportParameters", Const, 21},
{"QUICTransportParametersRequired", Const, 21},
{"QUICWriteData", Const, 21},
@@ -1036,6 +1049,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*Certificate).Verify", Method, 0},
{"(*Certificate).VerifyHostname", Method, 0},
{"(*CertificateRequest).CheckSignature", Method, 5},
+ {"(*OID).UnmarshalBinary", Method, 23},
+ {"(*OID).UnmarshalText", Method, 23},
{"(*RevocationList).CheckSignatureFrom", Method, 19},
{"(CertificateInvalidError).Error", Method, 0},
{"(ConstraintViolationError).Error", Method, 0},
@@ -1043,6 +1058,8 @@ var PackageSymbols = map[string][]Symbol{
{"(InsecureAlgorithmError).Error", Method, 6},
{"(OID).Equal", Method, 22},
{"(OID).EqualASN1OID", Method, 22},
+ {"(OID).MarshalBinary", Method, 23},
+ {"(OID).MarshalText", Method, 23},
{"(OID).String", Method, 22},
{"(PublicKeyAlgorithm).String", Method, 10},
{"(SignatureAlgorithm).String", Method, 6},
@@ -1196,6 +1213,7 @@ var PackageSymbols = map[string][]Symbol{
{"ParseCertificates", Func, 0},
{"ParseDERCRL", Func, 0},
{"ParseECPrivateKey", Func, 1},
+ {"ParseOID", Func, 23},
{"ParsePKCS1PrivateKey", Func, 0},
{"ParsePKCS1PublicKey", Func, 10},
{"ParsePKCS8PrivateKey", Func, 0},
@@ -2541,6 +2559,7 @@ var PackageSymbols = map[string][]Symbol{
{"PT_NOTE", Const, 0},
{"PT_NULL", Const, 0},
{"PT_OPENBSD_BOOTDATA", Const, 16},
+ {"PT_OPENBSD_NOBTCFI", Const, 23},
{"PT_OPENBSD_RANDOMIZE", Const, 16},
{"PT_OPENBSD_WXNEEDED", Const, 16},
{"PT_PAX_FLAGS", Const, 16},
@@ -3620,13 +3639,16 @@ var PackageSymbols = map[string][]Symbol{
{"STT_COMMON", Const, 0},
{"STT_FILE", Const, 0},
{"STT_FUNC", Const, 0},
+ {"STT_GNU_IFUNC", Const, 23},
{"STT_HIOS", Const, 0},
{"STT_HIPROC", Const, 0},
{"STT_LOOS", Const, 0},
{"STT_LOPROC", Const, 0},
{"STT_NOTYPE", Const, 0},
{"STT_OBJECT", Const, 0},
+ {"STT_RELC", Const, 23},
{"STT_SECTION", Const, 0},
+ {"STT_SRELC", Const, 23},
{"STT_TLS", Const, 0},
{"STV_DEFAULT", Const, 0},
{"STV_HIDDEN", Const, 0},
@@ -4544,11 +4566,14 @@ var PackageSymbols = map[string][]Symbol{
{"URLEncoding", Var, 0},
},
"encoding/binary": {
+ {"Append", Func, 23},
{"AppendByteOrder", Type, 19},
{"AppendUvarint", Func, 19},
{"AppendVarint", Func, 19},
{"BigEndian", Var, 0},
{"ByteOrder", Type, 0},
+ {"Decode", Func, 23},
+ {"Encode", Func, 23},
{"LittleEndian", Var, 0},
{"MaxVarintLen16", Const, 0},
{"MaxVarintLen32", Const, 0},
@@ -5308,6 +5333,7 @@ var PackageSymbols = map[string][]Symbol{
{"ParenExpr.Rparen", Field, 0},
{"ParenExpr.X", Field, 0},
{"Pkg", Const, 0},
+ {"Preorder", Func, 23},
{"Print", Func, 0},
{"RECV", Const, 0},
{"RangeStmt", Type, 0},
@@ -5898,7 +5924,12 @@ var PackageSymbols = map[string][]Symbol{
},
"go/types": {
{"(*Alias).Obj", Method, 22},
+ {"(*Alias).Origin", Method, 23},
+ {"(*Alias).Rhs", Method, 23},
+ {"(*Alias).SetTypeParams", Method, 23},
{"(*Alias).String", Method, 22},
+ {"(*Alias).TypeArgs", Method, 23},
+ {"(*Alias).TypeParams", Method, 23},
{"(*Alias).Underlying", Method, 22},
{"(*ArgumentError).Error", Method, 18},
{"(*ArgumentError).Unwrap", Method, 18},
@@ -5943,6 +5974,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Func).Pkg", Method, 5},
{"(*Func).Pos", Method, 5},
{"(*Func).Scope", Method, 5},
+ {"(*Func).Signature", Method, 23},
{"(*Func).String", Method, 5},
{"(*Func).Type", Method, 5},
{"(*Info).ObjectOf", Method, 5},
@@ -6992,6 +7024,12 @@ var PackageSymbols = map[string][]Symbol{
{"TempFile", Func, 0},
{"WriteFile", Func, 0},
},
+ "iter": {
+ {"Pull", Func, 23},
+ {"Pull2", Func, 23},
+ {"Seq", Type, 23},
+ {"Seq2", Type, 23},
+ },
"log": {
{"(*Logger).Fatal", Method, 0},
{"(*Logger).Fatalf", Method, 0},
@@ -7222,11 +7260,16 @@ var PackageSymbols = map[string][]Symbol{
{"Writer", Type, 0},
},
"maps": {
+ {"All", Func, 23},
{"Clone", Func, 21},
+ {"Collect", Func, 23},
{"Copy", Func, 21},
{"DeleteFunc", Func, 21},
{"Equal", Func, 21},
{"EqualFunc", Func, 21},
+ {"Insert", Func, 23},
+ {"Keys", Func, 23},
+ {"Values", Func, 23},
},
"math": {
{"Abs", Func, 0},
@@ -7617,6 +7660,7 @@ var PackageSymbols = map[string][]Symbol{
},
"math/rand/v2": {
{"(*ChaCha8).MarshalBinary", Method, 22},
+ {"(*ChaCha8).Read", Method, 23},
{"(*ChaCha8).Seed", Method, 22},
{"(*ChaCha8).Uint64", Method, 22},
{"(*ChaCha8).UnmarshalBinary", Method, 22},
@@ -7636,6 +7680,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Rand).NormFloat64", Method, 22},
{"(*Rand).Perm", Method, 22},
{"(*Rand).Shuffle", Method, 22},
+ {"(*Rand).Uint", Method, 23},
{"(*Rand).Uint32", Method, 22},
{"(*Rand).Uint32N", Method, 22},
{"(*Rand).Uint64", Method, 22},
@@ -7663,6 +7708,7 @@ var PackageSymbols = map[string][]Symbol{
{"Rand", Type, 22},
{"Shuffle", Func, 22},
{"Source", Type, 22},
+ {"Uint", Func, 23},
{"Uint32", Func, 22},
{"Uint32N", Func, 22},
{"Uint64", Func, 22},
@@ -7743,6 +7789,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*DNSError).Error", Method, 0},
{"(*DNSError).Temporary", Method, 0},
{"(*DNSError).Timeout", Method, 0},
+ {"(*DNSError).Unwrap", Method, 23},
{"(*Dialer).Dial", Method, 1},
{"(*Dialer).DialContext", Method, 7},
{"(*Dialer).MultipathTCP", Method, 21},
@@ -7809,6 +7856,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*TCPConn).RemoteAddr", Method, 0},
{"(*TCPConn).SetDeadline", Method, 0},
{"(*TCPConn).SetKeepAlive", Method, 0},
+ {"(*TCPConn).SetKeepAliveConfig", Method, 23},
{"(*TCPConn).SetKeepAlivePeriod", Method, 2},
{"(*TCPConn).SetLinger", Method, 0},
{"(*TCPConn).SetNoDelay", Method, 0},
@@ -7922,6 +7970,7 @@ var PackageSymbols = map[string][]Symbol{
{"DNSError.IsTimeout", Field, 0},
{"DNSError.Name", Field, 0},
{"DNSError.Server", Field, 0},
+ {"DNSError.UnwrapErr", Field, 23},
{"DefaultResolver", Var, 8},
{"Dial", Func, 0},
{"DialIP", Func, 0},
@@ -7937,6 +7986,7 @@ var PackageSymbols = map[string][]Symbol{
{"Dialer.DualStack", Field, 2},
{"Dialer.FallbackDelay", Field, 5},
{"Dialer.KeepAlive", Field, 3},
+ {"Dialer.KeepAliveConfig", Field, 23},
{"Dialer.LocalAddr", Field, 1},
{"Dialer.Resolver", Field, 8},
{"Dialer.Timeout", Field, 1},
@@ -7989,10 +8039,16 @@ var PackageSymbols = map[string][]Symbol{
{"Interfaces", Func, 0},
{"InvalidAddrError", Type, 0},
{"JoinHostPort", Func, 0},
+ {"KeepAliveConfig", Type, 23},
+ {"KeepAliveConfig.Count", Field, 23},
+ {"KeepAliveConfig.Enable", Field, 23},
+ {"KeepAliveConfig.Idle", Field, 23},
+ {"KeepAliveConfig.Interval", Field, 23},
{"Listen", Func, 0},
{"ListenConfig", Type, 11},
{"ListenConfig.Control", Field, 11},
{"ListenConfig.KeepAlive", Field, 13},
+ {"ListenConfig.KeepAliveConfig", Field, 23},
{"ListenIP", Func, 0},
{"ListenMulticastUDP", Func, 0},
{"ListenPacket", Func, 0},
@@ -8081,6 +8137,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Request).Context", Method, 7},
{"(*Request).Cookie", Method, 0},
{"(*Request).Cookies", Method, 0},
+ {"(*Request).CookiesNamed", Method, 23},
{"(*Request).FormFile", Method, 0},
{"(*Request).FormValue", Method, 0},
{"(*Request).MultipartReader", Method, 0},
@@ -8148,7 +8205,9 @@ var PackageSymbols = map[string][]Symbol{
{"Cookie.HttpOnly", Field, 0},
{"Cookie.MaxAge", Field, 0},
{"Cookie.Name", Field, 0},
+ {"Cookie.Partitioned", Field, 23},
{"Cookie.Path", Field, 0},
+ {"Cookie.Quoted", Field, 23},
{"Cookie.Raw", Field, 0},
{"Cookie.RawExpires", Field, 0},
{"Cookie.SameSite", Field, 11},
@@ -8225,7 +8284,9 @@ var PackageSymbols = map[string][]Symbol{
{"NoBody", Var, 8},
{"NotFound", Func, 0},
{"NotFoundHandler", Func, 0},
+ {"ParseCookie", Func, 23},
{"ParseHTTPVersion", Func, 0},
+ {"ParseSetCookie", Func, 23},
{"ParseTime", Func, 1},
{"Post", Func, 0},
{"PostForm", Func, 0},
@@ -8252,6 +8313,7 @@ var PackageSymbols = map[string][]Symbol{
{"Request.Host", Field, 0},
{"Request.Method", Field, 0},
{"Request.MultipartForm", Field, 0},
+ {"Request.Pattern", Field, 23},
{"Request.PostForm", Field, 1},
{"Request.Proto", Field, 0},
{"Request.ProtoMajor", Field, 0},
@@ -8453,6 +8515,7 @@ var PackageSymbols = map[string][]Symbol{
{"DefaultRemoteAddr", Const, 0},
{"NewRecorder", Func, 0},
{"NewRequest", Func, 7},
+ {"NewRequestWithContext", Func, 23},
{"NewServer", Func, 0},
{"NewTLSServer", Func, 0},
{"NewUnstartedServer", Func, 0},
@@ -8917,6 +8980,7 @@ var PackageSymbols = map[string][]Symbol{
{"Chown", Func, 0},
{"Chtimes", Func, 0},
{"Clearenv", Func, 0},
+ {"CopyFS", Func, 23},
{"Create", Func, 0},
{"CreateTemp", Func, 16},
{"DevNull", Const, 0},
@@ -9150,6 +9214,7 @@ var PackageSymbols = map[string][]Symbol{
{"IsLocal", Func, 20},
{"Join", Func, 0},
{"ListSeparator", Const, 0},
+ {"Localize", Func, 23},
{"Match", Func, 0},
{"Rel", Func, 0},
{"Separator", Const, 0},
@@ -9232,6 +9297,8 @@ var PackageSymbols = map[string][]Symbol{
{"(Value).Pointer", Method, 0},
{"(Value).Recv", Method, 0},
{"(Value).Send", Method, 0},
+ {"(Value).Seq", Method, 23},
+ {"(Value).Seq2", Method, 23},
{"(Value).Set", Method, 0},
{"(Value).SetBool", Method, 0},
{"(Value).SetBytes", Method, 0},
@@ -9314,6 +9381,7 @@ var PackageSymbols = map[string][]Symbol{
{"SelectSend", Const, 1},
{"SendDir", Const, 0},
{"Slice", Const, 0},
+ {"SliceAt", Func, 23},
{"SliceHeader", Type, 0},
{"SliceHeader.Cap", Field, 0},
{"SliceHeader.Data", Field, 0},
@@ -9655,6 +9723,7 @@ var PackageSymbols = map[string][]Symbol{
{"BuildSetting", Type, 18},
{"BuildSetting.Key", Field, 18},
{"BuildSetting.Value", Field, 18},
+ {"CrashOptions", Type, 23},
{"FreeOSMemory", Func, 1},
{"GCStats", Type, 1},
{"GCStats.LastGC", Field, 1},
@@ -9672,6 +9741,7 @@ var PackageSymbols = map[string][]Symbol{
{"PrintStack", Func, 0},
{"ReadBuildInfo", Func, 12},
{"ReadGCStats", Func, 1},
+ {"SetCrashOutput", Func, 23},
{"SetGCPercent", Func, 1},
{"SetMaxStack", Func, 2},
{"SetMaxThreads", Func, 2},
@@ -9742,10 +9812,15 @@ var PackageSymbols = map[string][]Symbol{
{"WithRegion", Func, 11},
},
"slices": {
+ {"All", Func, 23},
+ {"AppendSeq", Func, 23},
+ {"Backward", Func, 23},
{"BinarySearch", Func, 21},
{"BinarySearchFunc", Func, 21},
+ {"Chunk", Func, 23},
{"Clip", Func, 21},
{"Clone", Func, 21},
+ {"Collect", Func, 23},
{"Compact", Func, 21},
{"CompactFunc", Func, 21},
{"Compare", Func, 21},
@@ -9767,11 +9842,16 @@ var PackageSymbols = map[string][]Symbol{
{"MaxFunc", Func, 21},
{"Min", Func, 21},
{"MinFunc", Func, 21},
+ {"Repeat", Func, 23},
{"Replace", Func, 21},
{"Reverse", Func, 21},
{"Sort", Func, 21},
{"SortFunc", Func, 21},
{"SortStableFunc", Func, 21},
+ {"Sorted", Func, 23},
+ {"SortedFunc", Func, 23},
+ {"SortedStableFunc", Func, 23},
+ {"Values", Func, 23},
},
"sort": {
{"(Float64Slice).Len", Method, 0},
@@ -9936,10 +10016,14 @@ var PackageSymbols = map[string][]Symbol{
{"TrimSpace", Func, 0},
{"TrimSuffix", Func, 1},
},
+ "structs": {
+ {"HostLayout", Type, 23},
+ },
"sync": {
{"(*Cond).Broadcast", Method, 0},
{"(*Cond).Signal", Method, 0},
{"(*Cond).Wait", Method, 0},
+ {"(*Map).Clear", Method, 23},
{"(*Map).CompareAndDelete", Method, 20},
{"(*Map).CompareAndSwap", Method, 20},
{"(*Map).Delete", Method, 9},
@@ -9986,13 +10070,17 @@ var PackageSymbols = map[string][]Symbol{
{"(*Bool).Store", Method, 19},
{"(*Bool).Swap", Method, 19},
{"(*Int32).Add", Method, 19},
+ {"(*Int32).And", Method, 23},
{"(*Int32).CompareAndSwap", Method, 19},
{"(*Int32).Load", Method, 19},
+ {"(*Int32).Or", Method, 23},
{"(*Int32).Store", Method, 19},
{"(*Int32).Swap", Method, 19},
{"(*Int64).Add", Method, 19},
+ {"(*Int64).And", Method, 23},
{"(*Int64).CompareAndSwap", Method, 19},
{"(*Int64).Load", Method, 19},
+ {"(*Int64).Or", Method, 23},
{"(*Int64).Store", Method, 19},
{"(*Int64).Swap", Method, 19},
{"(*Pointer).CompareAndSwap", Method, 19},
@@ -10000,18 +10088,24 @@ var PackageSymbols = map[string][]Symbol{
{"(*Pointer).Store", Method, 19},
{"(*Pointer).Swap", Method, 19},
{"(*Uint32).Add", Method, 19},
+ {"(*Uint32).And", Method, 23},
{"(*Uint32).CompareAndSwap", Method, 19},
{"(*Uint32).Load", Method, 19},
+ {"(*Uint32).Or", Method, 23},
{"(*Uint32).Store", Method, 19},
{"(*Uint32).Swap", Method, 19},
{"(*Uint64).Add", Method, 19},
+ {"(*Uint64).And", Method, 23},
{"(*Uint64).CompareAndSwap", Method, 19},
{"(*Uint64).Load", Method, 19},
+ {"(*Uint64).Or", Method, 23},
{"(*Uint64).Store", Method, 19},
{"(*Uint64).Swap", Method, 19},
{"(*Uintptr).Add", Method, 19},
+ {"(*Uintptr).And", Method, 23},
{"(*Uintptr).CompareAndSwap", Method, 19},
{"(*Uintptr).Load", Method, 19},
+ {"(*Uintptr).Or", Method, 23},
{"(*Uintptr).Store", Method, 19},
{"(*Uintptr).Swap", Method, 19},
{"(*Value).CompareAndSwap", Method, 17},
@@ -10023,6 +10117,11 @@ var PackageSymbols = map[string][]Symbol{
{"AddUint32", Func, 0},
{"AddUint64", Func, 0},
{"AddUintptr", Func, 0},
+ {"AndInt32", Func, 23},
+ {"AndInt64", Func, 23},
+ {"AndUint32", Func, 23},
+ {"AndUint64", Func, 23},
+ {"AndUintptr", Func, 23},
{"Bool", Type, 19},
{"CompareAndSwapInt32", Func, 0},
{"CompareAndSwapInt64", Func, 0},
@@ -10038,6 +10137,11 @@ var PackageSymbols = map[string][]Symbol{
{"LoadUint32", Func, 0},
{"LoadUint64", Func, 0},
{"LoadUintptr", Func, 0},
+ {"OrInt32", Func, 23},
+ {"OrInt64", Func, 23},
+ {"OrUint32", Func, 23},
+ {"OrUint64", Func, 23},
+ {"OrUintptr", Func, 23},
{"Pointer", Type, 19},
{"StoreInt32", Func, 0},
{"StoreInt64", Func, 0},
@@ -16200,6 +16304,7 @@ var PackageSymbols = map[string][]Symbol{
{"WSAEACCES", Const, 2},
{"WSAECONNABORTED", Const, 9},
{"WSAECONNRESET", Const, 3},
+ {"WSAENOPROTOOPT", Const, 23},
{"WSAEnumProtocols", Func, 2},
{"WSAID_CONNECTEX", Var, 1},
{"WSAIoctl", Func, 0},
@@ -17284,6 +17389,7 @@ var PackageSymbols = map[string][]Symbol{
{"Encode", Func, 0},
{"EncodeRune", Func, 0},
{"IsSurrogate", Func, 0},
+ {"RuneLen", Func, 23},
},
"unicode/utf8": {
{"AppendRune", Func, 18},
@@ -17306,6 +17412,11 @@ var PackageSymbols = map[string][]Symbol{
{"ValidRune", Func, 1},
{"ValidString", Func, 0},
},
+ "unique": {
+ {"(Handle).Value", Method, 23},
+ {"Handle", Type, 23},
+ {"Make", Func, 23},
+ },
"unsafe": {
{"Add", Func, 0},
{"Alignof", Func, 0},
diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
deleted file mode 100644
index ff9437a36..000000000
--- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// package tokeninternal provides access to some internal features of the token
-// package.
-package tokeninternal
-
-import (
- "fmt"
- "go/token"
- "sort"
- "sync"
- "unsafe"
-)
-
-// GetLines returns the table of line-start offsets from a token.File.
-func GetLines(file *token.File) []int {
- // token.File has a Lines method on Go 1.21 and later.
- if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
- return file.Lines()
- }
-
- // This declaration must match that of token.File.
- // This creates a risk of dependency skew.
- // For now we check that the size of the two
- // declarations is the same, on the (fragile) assumption
- // that future changes would add fields.
- type tokenFile119 struct {
- _ string
- _ int
- _ int
- mu sync.Mutex // we're not complete monsters
- lines []int
- _ []struct{}
- }
-
- if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) {
- panic("unexpected token.File size")
- }
- var ptr *tokenFile119
- type uP = unsafe.Pointer
- *(*uP)(uP(&ptr)) = uP(file)
- ptr.mu.Lock()
- defer ptr.mu.Unlock()
- return ptr.lines
-}
-
-// AddExistingFiles adds the specified files to the FileSet if they
-// are not already present. It panics if any pair of files in the
-// resulting FileSet would overlap.
-func AddExistingFiles(fset *token.FileSet, files []*token.File) {
- // Punch through the FileSet encapsulation.
- type tokenFileSet struct {
- // This type remained essentially consistent from go1.16 to go1.21.
- mutex sync.RWMutex
- base int
- files []*token.File
- _ *token.File // changed to atomic.Pointer[token.File] in go1.19
- }
-
- // If the size of token.FileSet changes, this will fail to compile.
- const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
- var _ [-delta * delta]int
-
- type uP = unsafe.Pointer
- var ptr *tokenFileSet
- *(*uP)(uP(&ptr)) = uP(fset)
- ptr.mutex.Lock()
- defer ptr.mutex.Unlock()
-
- // Merge and sort.
- newFiles := append(ptr.files, files...)
- sort.Slice(newFiles, func(i, j int) bool {
- return newFiles[i].Base() < newFiles[j].Base()
- })
-
- // Reject overlapping files.
- // Discard adjacent identical files.
- out := newFiles[:0]
- for i, file := range newFiles {
- if i > 0 {
- prev := newFiles[i-1]
- if file == prev {
- continue
- }
- if prev.Base()+prev.Size()+1 > file.Base() {
- panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
- prev.Name(), prev.Base(), prev.Base()+prev.Size(),
- file.Name(), file.Base(), file.Base()+file.Size()))
- }
- }
- out = append(out, file)
- }
- newFiles = out
-
- ptr.files = newFiles
-
- // Advance FileSet.Base().
- if len(newFiles) > 0 {
- last := newFiles[len(newFiles)-1]
- newBase := last.Base() + last.Size() + 1
- if ptr.base < newBase {
- ptr.base = newBase
- }
- }
-}
-
-// FileSetFor returns a new FileSet containing a sequence of new Files with
-// the same base, size, and line as the input files, for use in APIs that
-// require a FileSet.
-//
-// Precondition: the input files must be non-overlapping, and sorted in order
-// of their Base.
-func FileSetFor(files ...*token.File) *token.FileSet {
- fset := token.NewFileSet()
- for _, f := range files {
- f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
- lines := GetLines(f)
- f2.SetLines(lines)
- }
- return fset
-}
-
-// CloneFileSet creates a new FileSet holding all files in fset. It does not
-// create copies of the token.Files in fset: they are added to the resulting
-// FileSet unmodified.
-func CloneFileSet(fset *token.FileSet) *token.FileSet {
- var files []*token.File
- fset.Iterate(func(f *token.File) bool {
- files = append(files, f)
- return true
- })
- newFileSet := token.NewFileSet()
- AddExistingFiles(newFileSet, files)
- return newFileSet
-}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
new file mode 100644
index 000000000..0b84acc5c
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -0,0 +1,140 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeparams contains common utilities for writing tools that
+// interact with generic Go code, as introduced with Go 1.18. It
+// supplements the standard library APIs. Notably, the StructuralTerms
+// API computes a minimal representation of the structural
+// restrictions on a type parameter.
+//
+// An external version of these APIs is available in the
+// golang.org/x/exp/typeparams module.
+package typeparams
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// UnpackIndexExpr extracts data from AST nodes that represent index
+// expressions.
+//
+// For an ast.IndexExpr, the resulting indices slice will contain exactly one
+// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
+// number of index expressions.
+//
+// For nodes that don't represent index expressions, the first return value of
+// UnpackIndexExpr will be nil.
+func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
+ switch e := n.(type) {
+ case *ast.IndexExpr:
+ return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
+ case *ast.IndexListExpr:
+ return e.X, e.Lbrack, e.Indices, e.Rbrack
+ }
+ return nil, token.NoPos, nil, token.NoPos
+}
+
+// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
+// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
+// will panic.
+func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
+ switch len(indices) {
+ case 0:
+ panic("empty indices")
+ case 1:
+ return &ast.IndexExpr{
+ X: x,
+ Lbrack: lbrack,
+ Index: indices[0],
+ Rbrack: rbrack,
+ }
+ default:
+ return &ast.IndexListExpr{
+ X: x,
+ Lbrack: lbrack,
+ Indices: indices,
+ Rbrack: rbrack,
+ }
+ }
+}
+
+// IsTypeParam reports whether t is a type parameter (or an alias of one).
+func IsTypeParam(t types.Type) bool {
+ _, ok := types.Unalias(t).(*types.TypeParam)
+ return ok
+}
+
+// GenericAssignableTo is a generalization of types.AssignableTo that
+// implements the following rule for uninstantiated generic types:
+//
+// If V and T are generic named types, then V is considered assignable to T if,
+// for every possible instantiation of V[A_1, ..., A_N], the instantiation
+// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
+//
+// If T has structural constraints, they must be satisfied by V.
+//
+// For example, consider the following type declarations:
+//
+// type Interface[T any] interface {
+// Accept(T)
+// }
+//
+// type Container[T any] struct {
+// Element T
+// }
+//
+// func (c Container[T]) Accept(t T) { c.Element = t }
+//
+// In this case, GenericAssignableTo reports that instantiations of Container
+// are assignable to the corresponding instantiation of Interface.
+func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool {
+ V = types.Unalias(V)
+ T = types.Unalias(T)
+
+ // If V and T are not both named, or do not have matching non-empty type
+ // parameter lists, fall back on types.AssignableTo.
+
+ VN, Vnamed := V.(*types.Named)
+ TN, Tnamed := T.(*types.Named)
+ if !Vnamed || !Tnamed {
+ return types.AssignableTo(V, T)
+ }
+
+ vtparams := VN.TypeParams()
+ ttparams := TN.TypeParams()
+ if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 {
+ return types.AssignableTo(V, T)
+ }
+
+ // V and T have the same (non-zero) number of type params. Instantiate both
+ // with the type parameters of V. This must always succeed for V, and will
+ // succeed for T if and only if the type set of each type parameter of V is a
+ // subset of the type set of the corresponding type parameter of T, meaning
+ // that every instantiation of V corresponds to a valid instantiation of T.
+
+ // Minor optimization: ensure we share a context across the two
+ // instantiations below.
+ if ctxt == nil {
+ ctxt = types.NewContext()
+ }
+
+ var targs []types.Type
+ for i := 0; i < vtparams.Len(); i++ {
+ targs = append(targs, vtparams.At(i))
+ }
+
+ vinst, err := types.Instantiate(ctxt, V, targs, true)
+ if err != nil {
+ panic("type parameters should satisfy their own constraints")
+ }
+
+ tinst, err := types.Instantiate(ctxt, T, targs, true)
+ if err != nil {
+ return false
+ }
+
+ return types.AssignableTo(vinst, tinst)
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
new file mode 100644
index 000000000..6e83c6fb1
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
@@ -0,0 +1,150 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "fmt"
+ "go/types"
+)
+
+// CoreType returns the core type of T or nil if T does not have a core type.
+//
+// See https://go.dev/ref/spec#Core_types for the definition of a core type.
+func CoreType(T types.Type) types.Type {
+ U := T.Underlying()
+ if _, ok := U.(*types.Interface); !ok {
+ return U // for non-interface types,
+ }
+
+ terms, err := NormalTerms(U)
+ if len(terms) == 0 || err != nil {
+ // len(terms) -> empty type set of interface.
+ // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
+ return nil // no core type.
+ }
+
+ U = terms[0].Type().Underlying()
+ var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
+ for identical = 1; identical < len(terms); identical++ {
+ if !types.Identical(U, terms[identical].Type().Underlying()) {
+ break
+ }
+ }
+
+ if identical == len(terms) {
+ // https://go.dev/ref/spec#Core_types
+ // "There is a single type U which is the underlying type of all types in the type set of T"
+ return U
+ }
+ ch, ok := U.(*types.Chan)
+ if !ok {
+ return nil // no core type as identical < len(terms) and U is not a channel.
+ }
+ // https://go.dev/ref/spec#Core_types
+ // "the type chan E if T contains only bidirectional channels, or the type chan<- E or
+ // <-chan E depending on the direction of the directional channels present."
+ for chans := identical; chans < len(terms); chans++ {
+ curr, ok := terms[chans].Type().Underlying().(*types.Chan)
+ if !ok {
+ return nil
+ }
+ if !types.Identical(ch.Elem(), curr.Elem()) {
+ return nil // channel elements are not identical.
+ }
+ if ch.Dir() == types.SendRecv {
+ // ch is bidirectional. We can safely always use curr's direction.
+ ch = curr
+ } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
+ // ch and curr are not bidirectional and not the same direction.
+ return nil
+ }
+ }
+ return ch
+}
+
+// NormalTerms returns a slice of terms representing the normalized structural
+// type restrictions of a type, if any.
+//
+// For all types other than *types.TypeParam, *types.Interface, and
+// *types.Union, this is just a single term with Tilde() == false and
+// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
+// below.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration type
+// T[P interface{~int; m()}] int the structural restriction of the type
+// parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// NormalTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, NormalTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the type is
+// invalid, exceeds complexity bounds, or has an empty type set. In the latter
+// case, NormalTerms returns ErrEmptyTypeSet.
+//
+// NormalTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func NormalTerms(typ types.Type) ([]*types.Term, error) {
+ switch typ := typ.Underlying().(type) {
+ case *types.TypeParam:
+ return StructuralTerms(typ)
+ case *types.Union:
+ return UnionTermSet(typ)
+ case *types.Interface:
+ return InterfaceTermSet(typ)
+ default:
+ return []*types.Term{types.NewTerm(false, typ)}, nil
+ }
+}
+
+// Deref returns the type of the variable pointed to by t,
+// if t's core type is a pointer; otherwise it returns t.
+//
+// Do not assume that Deref(T)==T implies T is not a pointer:
+// consider "type T *T", for example.
+//
+// TODO(adonovan): ideally this would live in typesinternal, but that
+// creates an import cycle. Move there when we melt this package down.
+func Deref(t types.Type) types.Type {
+ if ptr, ok := CoreType(t).(*types.Pointer); ok {
+ return ptr.Elem()
+ }
+ return t
+}
+
+// MustDeref returns the type of the variable pointed to by t.
+// It panics if t's core type is not a pointer.
+//
+// TODO(adonovan): ideally this would live in typesinternal, but that
+// creates an import cycle. Move there when we melt this package down.
+func MustDeref(t types.Type) types.Type {
+ if ptr, ok := CoreType(t).(*types.Pointer); ok {
+ return ptr.Elem()
+ }
+ panic(fmt.Sprintf("%v is not a pointer", t))
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go
new file mode 100644
index 000000000..0ade5c294
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/free.go
@@ -0,0 +1,131 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/internal/aliases"
+)
+
+// Free is a memoization of the set of free type parameters within a
+// type. It makes a sequence of calls to [Free.Has] for overlapping
+// types more efficient. The zero value is ready for use.
+//
+// NOTE: Adapted from go/types/infer.go. If it is later exported, factor.
+type Free struct {
+ seen map[types.Type]bool
+}
+
+// Has reports whether the specified type has a free type parameter.
+func (w *Free) Has(typ types.Type) (res bool) {
+ // detect cycles
+ if x, ok := w.seen[typ]; ok {
+ return x
+ }
+ if w.seen == nil {
+ w.seen = make(map[types.Type]bool)
+ }
+ w.seen[typ] = false
+ defer func() {
+ w.seen[typ] = res
+ }()
+
+ switch t := typ.(type) {
+ case nil, *types.Basic: // TODO(gri) should nil be handled here?
+ break
+
+ case *types.Alias:
+ if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() {
+ return true // This is an uninstantiated Alias.
+ }
+ // The expansion of an alias can have free type parameters,
+ // whether or not the alias itself has type parameters:
+ //
+ // func _[K comparable]() {
+ // type Set = map[K]bool // free(Set) = {K}
+ // type MapTo[V] = map[K]V // free(Map[foo]) = {V}
+ // }
+ //
+ // So, we must Unalias.
+ return w.Has(types.Unalias(t))
+
+ case *types.Array:
+ return w.Has(t.Elem())
+
+ case *types.Slice:
+ return w.Has(t.Elem())
+
+ case *types.Struct:
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ if w.Has(t.Field(i).Type()) {
+ return true
+ }
+ }
+
+ case *types.Pointer:
+ return w.Has(t.Elem())
+
+ case *types.Tuple:
+ n := t.Len()
+ for i := 0; i < n; i++ {
+ if w.Has(t.At(i).Type()) {
+ return true
+ }
+ }
+
+ case *types.Signature:
+ // t.tparams may not be nil if we are looking at a signature
+ // of a generic function type (or an interface method) that is
+ // part of the type we're testing. We don't care about these type
+ // parameters.
+ // Similarly, the receiver of a method may declare (rather than
+ // use) type parameters, we don't care about those either.
+ // Thus, we only need to look at the input and result parameters.
+ return w.Has(t.Params()) || w.Has(t.Results())
+
+ case *types.Interface:
+ for i, n := 0, t.NumMethods(); i < n; i++ {
+ if w.Has(t.Method(i).Type()) {
+ return true
+ }
+ }
+ terms, err := InterfaceTermSet(t)
+ if err != nil {
+ return false // ill typed
+ }
+ for _, term := range terms {
+ if w.Has(term.Type()) {
+ return true
+ }
+ }
+
+ case *types.Map:
+ return w.Has(t.Key()) || w.Has(t.Elem())
+
+ case *types.Chan:
+ return w.Has(t.Elem())
+
+ case *types.Named:
+ args := t.TypeArgs()
+ if params := t.TypeParams(); params.Len() > args.Len() {
+ return true // this is an uninstantiated named type.
+ }
+ for i, n := 0, args.Len(); i < n; i++ {
+ if w.Has(args.At(i)) {
+ return true
+ }
+ }
+ return w.Has(t.Underlying()) // recurse for types local to parameterized functions
+
+ case *types.TypeParam:
+ return true
+
+ default:
+ panic(t) // unreachable
+ }
+
+ return false
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
new file mode 100644
index 000000000..93c80fdc9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
@@ -0,0 +1,218 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "errors"
+ "fmt"
+ "go/types"
+ "os"
+ "strings"
+)
+
+//go:generate go run copytermlist.go
+
+const debug = false
+
+var ErrEmptyTypeSet = errors.New("empty type set")
+
+// StructuralTerms returns a slice of terms representing the normalized
+// structural type restrictions of a type parameter, if any.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration
+//
+// type T[P interface{~int; m()}] int
+//
+// the structural restriction of the type parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// StructuralTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, StructuralTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the
+// constraint interface is invalid, exceeds complexity bounds, or has an empty
+// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
+//
+// StructuralTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) {
+ constraint := tparam.Constraint()
+ if constraint == nil {
+ return nil, fmt.Errorf("%s has nil constraint", tparam)
+ }
+ iface, _ := constraint.Underlying().(*types.Interface)
+ if iface == nil {
+ return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
+ }
+ return InterfaceTermSet(iface)
+}
+
+// InterfaceTermSet computes the normalized terms for a constraint interface,
+// returning an error if the term set cannot be computed or is empty. In the
+// latter case, the error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) {
+ return computeTermSet(iface)
+}
+
+// UnionTermSet computes the normalized terms for a union, returning an error
+// if the term set cannot be computed or is empty. In the latter case, the
+// error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func UnionTermSet(union *types.Union) ([]*types.Term, error) {
+ return computeTermSet(union)
+}
+
+func computeTermSet(typ types.Type) ([]*types.Term, error) {
+ tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
+ if err != nil {
+ return nil, err
+ }
+ if tset.terms.isEmpty() {
+ return nil, ErrEmptyTypeSet
+ }
+ if tset.terms.isAll() {
+ return nil, nil
+ }
+ var terms []*types.Term
+ for _, term := range tset.terms {
+ terms = append(terms, types.NewTerm(term.tilde, term.typ))
+ }
+ return terms, nil
+}
+
+// A termSet holds the normalized set of terms for a given type.
+//
+// The name termSet is intentionally distinct from 'type set': a type set is
+// all types that implement a type (and includes method restrictions), whereas
+// a term set just represents the structural restrictions on a type.
+type termSet struct {
+ complete bool
+ terms termlist
+}
+
+func indentf(depth int, format string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
+}
+
+func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
+ if t == nil {
+ panic("nil type")
+ }
+
+ if debug {
+ indentf(depth, "%s", t.String())
+ defer func() {
+ if err != nil {
+ indentf(depth, "=> %s", err)
+ } else {
+ indentf(depth, "=> %s", res.terms.String())
+ }
+ }()
+ }
+
+ const maxTermCount = 100
+ if tset, ok := seen[t]; ok {
+ if !tset.complete {
+ return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
+ }
+ return tset, nil
+ }
+
+ // Mark the current type as seen to avoid infinite recursion.
+ tset := new(termSet)
+ defer func() {
+ tset.complete = true
+ }()
+ seen[t] = tset
+
+ switch u := t.Underlying().(type) {
+ case *types.Interface:
+ // The term set of an interface is the intersection of the term sets of its
+ // embedded types.
+ tset.terms = allTermlist
+ for i := 0; i < u.NumEmbeddeds(); i++ {
+ embedded := u.EmbeddedType(i)
+ if _, ok := embedded.Underlying().(*types.TypeParam); ok {
+ return nil, fmt.Errorf("invalid embedded type %T", embedded)
+ }
+ tset2, err := computeTermSetInternal(embedded, seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ tset.terms = tset.terms.intersect(tset2.terms)
+ }
+ case *types.Union:
+ // The term set of a union is the union of term sets of its terms.
+ tset.terms = nil
+ for i := 0; i < u.Len(); i++ {
+ t := u.Term(i)
+ var terms termlist
+ switch t.Type().Underlying().(type) {
+ case *types.Interface:
+ tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ terms = tset2.terms
+ case *types.TypeParam, *types.Union:
+ // A stand-alone type parameter or union is not permitted as union
+ // term.
+ return nil, fmt.Errorf("invalid union term %T", t)
+ default:
+ if t.Type() == types.Typ[types.Invalid] {
+ continue
+ }
+ terms = termlist{{t.Tilde(), t.Type()}}
+ }
+ tset.terms = tset.terms.union(terms)
+ if len(tset.terms) > maxTermCount {
+ return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
+ }
+ }
+ case *types.TypeParam:
+ panic("unreachable")
+ default:
+ // For all other types, the term set is just a single non-tilde term
+ // holding the type itself.
+ if u != types.Typ[types.Invalid] {
+ tset.terms = termlist{{false, t}}
+ }
+ }
+ return tset, nil
+}
+
+// under is a facade for the go/types internal function of the same name. It is
+// used by typeterm.go.
+func under(t types.Type) types.Type {
+ return t.Underlying()
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
new file mode 100644
index 000000000..cbd12f801
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
@@ -0,0 +1,163 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import (
+ "bytes"
+ "go/types"
+)
+
+// A termlist represents the type set represented by the union
+// t1 βˆͺ y2 βˆͺ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+ if len(xl) == 0 {
+ return "βˆ…"
+ }
+ var buf bytes.Buffer
+ for i, x := range xl {
+ if i > 0 {
+ buf.WriteString(" | ")
+ }
+ buf.WriteString(x.String())
+ }
+ return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+ // If there's a non-nil term, the entire list is not empty.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+ // If there's a 𝓀 term, the entire list is 𝓀.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil && x.typ == nil {
+ return true
+ }
+ }
+ return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ used := make([]bool, len(xl))
+ var rl termlist
+ for i, xi := range xl {
+ if xi == nil || used[i] {
+ continue
+ }
+ for j := i + 1; j < len(xl); j++ {
+ xj := xl[j]
+ if xj == nil || used[j] {
+ continue
+ }
+ if u1, u2 := xi.union(xj); u2 == nil {
+ // If we encounter a 𝓀 term, the entire list is 𝓀.
+ // Exit early.
+ // (Note that this is not just an optimization;
+ // if we continue, we may end up with a 𝓀 term
+ // and other terms and the result would not be
+ // in normal form.)
+ if u1.typ == nil {
+ return allTermlist
+ }
+ xi = u1
+ used[j] = true // xj is now unioned into xi - ignore it in future iterations
+ }
+ }
+ rl = append(rl, xi)
+ }
+ return rl
+}
+
+// union returns the union xl βˆͺ yl.
+func (xl termlist) union(yl termlist) termlist {
+ return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+ if xl.isEmpty() || yl.isEmpty() {
+ return nil
+ }
+
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var rl termlist
+ for _, x := range xl {
+ for _, y := range yl {
+ if r := x.intersect(y); r != nil {
+ rl = append(rl, r)
+ }
+ }
+ }
+ return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+ // TODO(gri) this should be more efficient
+ return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t types.Type) bool {
+ for _, x := range xl {
+ if x.includes(t) {
+ return true
+ }
+ }
+ return false
+}
+
+// supersetOf reports whether y βŠ† xl.
+func (xl termlist) supersetOf(y *term) bool {
+ for _, x := range xl {
+ if y.subsetOf(x) {
+ return true
+ }
+ }
+ return false
+}
+
+// subsetOf reports whether xl βŠ† yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+ if yl.isEmpty() {
+ return xl.isEmpty()
+ }
+
+ // each term x of xl must be a subset of yl
+ for _, x := range xl {
+ if !yl.supersetOf(x) {
+ return false // x is not a subset yl
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
new file mode 100644
index 000000000..7350bb702
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
@@ -0,0 +1,169 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import "go/types"
+
+// A term describes elementary type sets:
+//
+// βˆ…: (*term)(nil) == βˆ… // set of no types (empty set)
+// 𝓀: &term{} == 𝓀 // set of all types (𝓀niverse)
+// T: &term{false, T} == {T} // set of type T
+// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
+type term struct {
+ tilde bool // valid if typ != nil
+ typ types.Type
+}
+
+func (x *term) String() string {
+ switch {
+ case x == nil:
+ return "βˆ…"
+ case x.typ == nil:
+ return "𝓀"
+ case x.tilde:
+ return "~" + x.typ.String()
+ default:
+ return x.typ.String()
+ }
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return x == y
+ case x.typ == nil || y.typ == nil:
+ return x.typ == y.typ
+ }
+ // βˆ… βŠ‚ x, y βŠ‚ 𝓀
+
+ return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
+}
+
+// union returns the union x βˆͺ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+ // easy cases
+ switch {
+ case x == nil && y == nil:
+ return nil, nil // βˆ… βˆͺ βˆ… == βˆ…
+ case x == nil:
+ return y, nil // βˆ… βˆͺ y == y
+ case y == nil:
+ return x, nil // x βˆͺ βˆ… == x
+ case x.typ == nil:
+ return x, nil // 𝓀 βˆͺ y == 𝓀
+ case y.typ == nil:
+ return y, nil // x βˆͺ 𝓀 == 𝓀
+ }
+ // βˆ… βŠ‚ x, y βŠ‚ 𝓀
+
+ if x.disjoint(y) {
+ return x, y // x βˆͺ y == (x, y) if x ∩ y == βˆ…
+ }
+ // x.typ == y.typ
+
+ // ~t βˆͺ ~t == ~t
+ // ~t βˆͺ T == ~t
+ // T βˆͺ ~t == ~t
+ // T βˆͺ T == T
+ if x.tilde || !y.tilde {
+ return x, nil
+ }
+ return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return nil // βˆ… ∩ y == βˆ… and ∩ βˆ… == βˆ…
+ case x.typ == nil:
+ return y // 𝓀 ∩ y == y
+ case y.typ == nil:
+ return x // x ∩ 𝓀 == x
+ }
+ // βˆ… βŠ‚ x, y βŠ‚ 𝓀
+
+ if x.disjoint(y) {
+ return nil // x ∩ y == βˆ… if x ∩ y == βˆ…
+ }
+ // x.typ == y.typ
+
+ // ~t ∩ ~t == ~t
+ // ~t ∩ T == T
+ // T ∩ ~t == T
+ // T ∩ T == T
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t types.Type) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return false // t ∈ βˆ… == false
+ case x.typ == nil:
+ return true // t ∈ 𝓀 == true
+ }
+ // βˆ… βŠ‚ x βŠ‚ 𝓀
+
+ u := t
+ if x.tilde {
+ u = under(u)
+ }
+ return types.Identical(x.typ, u)
+}
+
+// subsetOf reports whether x βŠ† y.
+func (x *term) subsetOf(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return true // βˆ… βŠ† y == true
+ case y == nil:
+ return false // x βŠ† βˆ… == false since x != βˆ…
+ case y.typ == nil:
+ return true // x βŠ† 𝓀 == true
+ case x.typ == nil:
+ return false // 𝓀 βŠ† y == false since y != 𝓀
+ }
+ // βˆ… βŠ‚ x, y βŠ‚ 𝓀
+
+ if x.disjoint(y) {
+ return false // x βŠ† y == false if x ∩ y == βˆ…
+ }
+ // x.typ == y.typ
+
+ // ~t βŠ† ~t == true
+ // ~t βŠ† T == false
+ // T βŠ† ~t == true
+ // T βŠ† T == true
+ return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == βˆ….
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+ if debug && (x.typ == nil || y.typ == nil) {
+ panic("invalid argument(s)")
+ }
+ ux := x.typ
+ if y.tilde {
+ ux = under(ux)
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = under(uy)
+ }
+ return !types.Identical(ux, uy)
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go
new file mode 100644
index 000000000..4957f0216
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go
@@ -0,0 +1,133 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "fmt"
+ "go/types"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+// ForEachElement calls f for type T and each type reachable from its
+// type through reflection. It does this by recursively stripping off
+// type constructors; in addition, for each named type N, the type *N
+// is added to the result as it may have additional methods.
+//
+// The caller must provide an initially empty set used to de-duplicate
+// identical types, potentially across multiple calls to ForEachElement.
+// (Its final value holds all the elements seen, matching the arguments
+// passed to f.)
+//
+// TODO(adonovan): share/harmonize with go/callgraph/rta.
+func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) {
+ var visit func(T types.Type, skip bool)
+ visit = func(T types.Type, skip bool) {
+ if !skip {
+ if seen, _ := rtypes.Set(T, true).(bool); seen {
+ return // de-dup
+ }
+
+ f(T) // notify caller of new element type
+ }
+
+ // Recursion over signatures of each method.
+ tmset := msets.MethodSet(T)
+ for i := 0; i < tmset.Len(); i++ {
+ sig := tmset.At(i).Type().(*types.Signature)
+ // It is tempting to call visit(sig, false)
+ // but, as noted in golang.org/cl/65450043,
+ // the Signature.Recv field is ignored by
+ // types.Identical and typeutil.Map, which
+ // is confusing at best.
+ //
+ // More importantly, the true signature rtype
+ // reachable from a method using reflection
+ // has no receiver but an extra ordinary parameter.
+ // For the Read method of io.Reader we want:
+ // func(Reader, []byte) (int, error)
+ // but here sig is:
+ // func([]byte) (int, error)
+ // with .Recv = Reader (though it is hard to
+ // notice because it doesn't affect Signature.String
+ // or types.Identical).
+ //
+ // TODO(adonovan): construct and visit the correct
+ // non-method signature with an extra parameter
+ // (though since unnamed func types have no methods
+ // there is essentially no actual demand for this).
+ //
+ // TODO(adonovan): document whether or not it is
+ // safe to skip non-exported methods (as RTA does).
+ visit(sig.Params(), true) // skip the Tuple
+ visit(sig.Results(), true) // skip the Tuple
+ }
+
+ switch T := T.(type) {
+ case *types.Alias:
+ visit(types.Unalias(T), skip) // emulates the pre-Alias behavior
+
+ case *types.Basic:
+ // nop
+
+ case *types.Interface:
+ // nop---handled by recursion over method set.
+
+ case *types.Pointer:
+ visit(T.Elem(), false)
+
+ case *types.Slice:
+ visit(T.Elem(), false)
+
+ case *types.Chan:
+ visit(T.Elem(), false)
+
+ case *types.Map:
+ visit(T.Key(), false)
+ visit(T.Elem(), false)
+
+ case *types.Signature:
+ if T.Recv() != nil {
+ panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv()))
+ }
+ visit(T.Params(), true) // skip the Tuple
+ visit(T.Results(), true) // skip the Tuple
+
+ case *types.Named:
+ // A pointer-to-named type can be derived from a named
+ // type via reflection. It may have methods too.
+ visit(types.NewPointer(T), false)
+
+ // Consider 'type T struct{S}' where S has methods.
+ // Reflection provides no way to get from T to struct{S},
+ // only to S, so the method set of struct{S} is unwanted,
+ // so set 'skip' flag during recursion.
+ visit(T.Underlying(), true) // skip the unnamed type
+
+ case *types.Array:
+ visit(T.Elem(), false)
+
+ case *types.Struct:
+ for i, n := 0, T.NumFields(); i < n; i++ {
+ // TODO(adonovan): document whether or not
+ // it is safe to skip non-exported fields.
+ visit(T.Field(i).Type(), false)
+ }
+
+ case *types.Tuple:
+ for i, n := 0, T.Len(); i < n; i++ {
+ visit(T.At(i).Type(), false)
+ }
+
+ case *types.TypeParam, *types.Union:
+ // forEachReachable must not be called on parameterized types.
+ panic(T)
+
+ default:
+ panic(T)
+ }
+ }
+ visit(T, false)
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
index 834e05381..131caab28 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
@@ -838,7 +838,7 @@ const (
// InvalidCap occurs when an argument to the cap built-in function is not of
// supported type.
//
- // See https://golang.org/ref/spec#Lengthand_capacity for information on
+ // See https://golang.org/ref/spec#Length_and_capacity for information on
// which underlying types are supported as arguments to cap and len.
//
// Example:
@@ -859,7 +859,7 @@ const (
// InvalidCopy occurs when the arguments are not of slice type or do not
// have compatible type.
//
- // See https://golang.org/ref/spec#Appendingand_copying_slices for more
+ // See https://golang.org/ref/spec#Appending_and_copying_slices for more
// information on the type requirements for the copy built-in.
//
// Example:
@@ -897,7 +897,7 @@ const (
// InvalidLen occurs when an argument to the len built-in function is not of
// supported type.
//
- // See https://golang.org/ref/spec#Lengthand_capacity for information on
+ // See https://golang.org/ref/spec#Length_and_capacity for information on
// which underlying types are supported as arguments to cap and len.
//
// Example:
@@ -914,7 +914,7 @@ const (
// InvalidMake occurs when make is called with an unsupported type argument.
//
- // See https://golang.org/ref/spec#Makingslices_maps_and_channels for
+ // See https://golang.org/ref/spec#Making_slices_maps_and_channels for
// information on the types that may be created using make.
//
// Example:
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
index fea7c8b75..ba6f4f4eb 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
@@ -6,8 +6,6 @@ package typesinternal
import (
"go/types"
-
- "golang.org/x/tools/internal/aliases"
)
// ReceiverNamed returns the named type (if any) associated with the
@@ -15,11 +13,11 @@ import (
// It also reports whether a Pointer was present.
func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
t := recv.Type()
- if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
+ if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
isPtr = true
t = ptr.Elem()
}
- named, _ = aliases.Unalias(t).(*types.Named)
+ named, _ = types.Unalias(t).(*types.Named)
return
}
@@ -36,7 +34,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
// indirection from the type, regardless of named types (analogous to
// a LOAD instruction).
func Unpointer(t types.Type) types.Type {
- if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
+ if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
return ptr.Elem()
}
return t
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index 7c77c2fbc..df3ea5212 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -11,6 +11,8 @@ import (
"go/types"
"reflect"
"unsafe"
+
+ "golang.org/x/tools/internal/aliases"
)
func SetUsesCgo(conf *types.Config) bool {
@@ -48,3 +50,72 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos,
}
return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
}
+
+// NameRelativeTo returns a types.Qualifier that qualifies members of
+// all packages other than pkg, using only the package name.
+// (By contrast, [types.RelativeTo] uses the complete package path,
+// which is often excessive.)
+//
+// If pkg is nil, it is equivalent to [*types.Package.Name].
+func NameRelativeTo(pkg *types.Package) types.Qualifier {
+ return func(other *types.Package) string {
+ if pkg != nil && pkg == other {
+ return "" // same package; unqualified
+ }
+ return other.Name()
+ }
+}
+
+// A NamedOrAlias is a [types.Type] that is named (as
+// defined by the spec) and capable of bearing type parameters: it
+// abstracts aliases ([types.Alias]) and defined types
+// ([types.Named]).
+//
+// Every type declared by an explicit "type" declaration is a
+// NamedOrAlias. (Built-in type symbols may additionally
+// have type [types.Basic], which is not a NamedOrAlias,
+// though the spec regards them as "named".)
+//
+// NamedOrAlias cannot expose the Origin method, because
+// [types.Alias.Origin] and [types.Named.Origin] have different
+// (covariant) result types; use [Origin] instead.
+type NamedOrAlias interface {
+ types.Type
+ Obj() *types.TypeName
+}
+
+// TypeParams is a light shim around t.TypeParams().
+// (go/types.Alias).TypeParams requires >= 1.23.
+func TypeParams(t NamedOrAlias) *types.TypeParamList {
+ switch t := t.(type) {
+ case *types.Alias:
+ return aliases.TypeParams(t)
+ case *types.Named:
+ return t.TypeParams()
+ }
+ return nil
+}
+
+// TypeArgs is a light shim around t.TypeArgs().
+// (go/types.Alias).TypeArgs requires >= 1.23.
+func TypeArgs(t NamedOrAlias) *types.TypeList {
+ switch t := t.(type) {
+ case *types.Alias:
+ return aliases.TypeArgs(t)
+ case *types.Named:
+ return t.TypeArgs()
+ }
+ return nil
+}
+
+// Origin returns the generic type of the Named or Alias type t if it
+// is instantiated, otherwise it returns t.
+func Origin(t NamedOrAlias) NamedOrAlias {
+ switch t := t.(type) {
+ case *types.Alias:
+ return aliases.Origin(t)
+ case *types.Named:
+ return t.Origin()
+ }
+ return t
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
new file mode 100644
index 000000000..106698064
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
@@ -0,0 +1,282 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "strconv"
+ "strings"
+)
+
+// ZeroString returns the string representation of the "zero" value of the type t.
+// This string can be used on the right-hand side of an assignment where the
+// left-hand side has that explicit type.
+// Exception: This does not apply to tuples. Their string representation is
+// informational only and cannot be used in an assignment.
+// When assigning to a wider type (such as 'any'), it's the caller's
+// responsibility to handle any necessary type conversions.
+// See [ZeroExpr] for a variant that returns an [ast.Expr].
+func ZeroString(t types.Type, qf types.Qualifier) string {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch {
+ case t.Info()&types.IsBoolean != 0:
+ return "false"
+ case t.Info()&types.IsNumeric != 0:
+ return "0"
+ case t.Info()&types.IsString != 0:
+ return `""`
+ case t.Kind() == types.UnsafePointer:
+ fallthrough
+ case t.Kind() == types.UntypedNil:
+ return "nil"
+ default:
+ panic(fmt.Sprint("ZeroString for unexpected type:", t))
+ }
+
+ case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
+ return "nil"
+
+ case *types.Named, *types.Alias:
+ switch under := t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return types.TypeString(t, qf) + "{}"
+ default:
+ return ZeroString(under, qf)
+ }
+
+ case *types.Array, *types.Struct:
+ return types.TypeString(t, qf) + "{}"
+
+ case *types.TypeParam:
+ // Assumes func new is not shadowed.
+ return "*new(" + types.TypeString(t, qf) + ")"
+
+ case *types.Tuple:
+ // Tuples are not normal values.
+ // We are currently format as "(t[0], ..., t[n])". Could be something else.
+ components := make([]string, t.Len())
+ for i := 0; i < t.Len(); i++ {
+ components[i] = ZeroString(t.At(i).Type(), qf)
+ }
+ return "(" + strings.Join(components, ", ") + ")"
+
+ case *types.Union:
+ // Variables of these types cannot be created, so it makes
+ // no sense to ask for their zero value.
+ panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+ default:
+ panic(t) // unreachable.
+ }
+}
+
+// ZeroExpr returns the ast.Expr representation of the "zero" value of the type t.
+// ZeroExpr is defined for types that are suitable for variables.
+// It may panic for other types such as Tuple or Union.
+// See [ZeroString] for a variant that returns a string.
+func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
+ switch t := typ.(type) {
+ case *types.Basic:
+ switch {
+ case t.Info()&types.IsBoolean != 0:
+ return &ast.Ident{Name: "false"}
+ case t.Info()&types.IsNumeric != 0:
+ return &ast.BasicLit{Kind: token.INT, Value: "0"}
+ case t.Info()&types.IsString != 0:
+ return &ast.BasicLit{Kind: token.STRING, Value: `""`}
+ case t.Kind() == types.UnsafePointer:
+ fallthrough
+ case t.Kind() == types.UntypedNil:
+ return ast.NewIdent("nil")
+ default:
+ panic(fmt.Sprint("ZeroExpr for unexpected type:", t))
+ }
+
+ case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
+ return ast.NewIdent("nil")
+
+ case *types.Named, *types.Alias:
+ switch under := t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return &ast.CompositeLit{
+ Type: TypeExpr(f, pkg, typ),
+ }
+ default:
+ return ZeroExpr(f, pkg, under)
+ }
+
+ case *types.Array, *types.Struct:
+ return &ast.CompositeLit{
+ Type: TypeExpr(f, pkg, typ),
+ }
+
+ case *types.TypeParam:
+ return &ast.StarExpr{ // *new(T)
+ X: &ast.CallExpr{
+ // Assumes func new is not shadowed.
+ Fun: ast.NewIdent("new"),
+ Args: []ast.Expr{
+ ast.NewIdent(t.Obj().Name()),
+ },
+ },
+ }
+
+ case *types.Tuple:
+ // Unlike ZeroString, there is no ast.Expr can express tuple by
+ // "(t[0], ..., t[n])".
+ panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+ case *types.Union:
+ // Variables of these types cannot be created, so it makes
+ // no sense to ask for their zero value.
+ panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+ default:
+ panic(t) // unreachable.
+ }
+}
+
+// IsZeroExpr uses simple syntactic heuristics to report whether expr
+// is a obvious zero value, such as 0, "", nil, or false.
+// It cannot do better without type information.
+func IsZeroExpr(expr ast.Expr) bool {
+ switch e := expr.(type) {
+ case *ast.BasicLit:
+ return e.Value == "0" || e.Value == `""`
+ case *ast.Ident:
+ return e.Name == "nil" || e.Name == "false"
+ default:
+ return false
+ }
+}
+
+// TypeExpr returns syntax for the specified type. References to named types
+// from packages other than pkg are qualified by an appropriate package name, as
+// defined by the import environment of file.
+// It may panic for types such as Tuple or Union.
+func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
+ switch t := typ.(type) {
+ case *types.Basic:
+ switch t.Kind() {
+ case types.UnsafePointer:
+ // TODO(hxjiang): replace the implementation with types.Qualifier.
+ return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")}
+ default:
+ return ast.NewIdent(t.Name())
+ }
+
+ case *types.Pointer:
+ return &ast.UnaryExpr{
+ Op: token.MUL,
+ X: TypeExpr(f, pkg, t.Elem()),
+ }
+
+ case *types.Array:
+ return &ast.ArrayType{
+ Len: &ast.BasicLit{
+ Kind: token.INT,
+ Value: fmt.Sprintf("%d", t.Len()),
+ },
+ Elt: TypeExpr(f, pkg, t.Elem()),
+ }
+
+ case *types.Slice:
+ return &ast.ArrayType{
+ Elt: TypeExpr(f, pkg, t.Elem()),
+ }
+
+ case *types.Map:
+ return &ast.MapType{
+ Key: TypeExpr(f, pkg, t.Key()),
+ Value: TypeExpr(f, pkg, t.Elem()),
+ }
+
+ case *types.Chan:
+ dir := ast.ChanDir(t.Dir())
+ if t.Dir() == types.SendRecv {
+ dir = ast.SEND | ast.RECV
+ }
+ return &ast.ChanType{
+ Dir: dir,
+ Value: TypeExpr(f, pkg, t.Elem()),
+ }
+
+ case *types.Signature:
+ var params []*ast.Field
+ for i := 0; i < t.Params().Len(); i++ {
+ params = append(params, &ast.Field{
+ Type: TypeExpr(f, pkg, t.Params().At(i).Type()),
+ Names: []*ast.Ident{
+ {
+ Name: t.Params().At(i).Name(),
+ },
+ },
+ })
+ }
+ if t.Variadic() {
+ last := params[len(params)-1]
+ last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt}
+ }
+ var returns []*ast.Field
+ for i := 0; i < t.Results().Len(); i++ {
+ returns = append(returns, &ast.Field{
+ Type: TypeExpr(f, pkg, t.Results().At(i).Type()),
+ })
+ }
+ return &ast.FuncType{
+ Params: &ast.FieldList{
+ List: params,
+ },
+ Results: &ast.FieldList{
+ List: returns,
+ },
+ }
+
+ case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam}
+ switch t.Obj().Pkg() {
+ case pkg, nil:
+ return ast.NewIdent(t.Obj().Name())
+ }
+ pkgName := t.Obj().Pkg().Name()
+
+ // TODO(hxjiang): replace the implementation with types.Qualifier.
+ // If the file already imports the package under another name, use that.
+ for _, cand := range f.Imports {
+ if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() {
+ if cand.Name != nil && cand.Name.Name != "" {
+ pkgName = cand.Name.Name
+ }
+ }
+ }
+ if pkgName == "." {
+ return ast.NewIdent(t.Obj().Name())
+ }
+ return &ast.SelectorExpr{
+ X: ast.NewIdent(pkgName),
+ Sel: ast.NewIdent(t.Obj().Name()),
+ }
+
+ case *types.Struct:
+ return ast.NewIdent(t.String())
+
+ case *types.Interface:
+ return ast.NewIdent(t.String())
+
+ case *types.Union:
+ // TODO(hxjiang): handle the union through syntax (~A | ... | ~Z).
+ // Remove nil check when calling typesinternal.TypeExpr.
+ return nil
+
+ case *types.Tuple:
+ panic("invalid input type types.Tuple")
+
+ default:
+ panic("unreachable")
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go
deleted file mode 100644
index 377bf7a53..000000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package versions
-
-// toolchain is maximum version (<1.22) that the go toolchain used
-// to build the current tool is known to support.
-//
-// When a tool is built with >=1.22, the value of toolchain is unused.
-//
-// x/tools does not support building with go <1.18. So we take this
-// as the minimum possible maximum.
-var toolchain string = Go1_18
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
deleted file mode 100644
index f65beed9d..000000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.19
-// +build go1.19
-
-package versions
-
-func init() {
- if Compare(toolchain, Go1_19) < 0 {
- toolchain = Go1_19
- }
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
deleted file mode 100644
index 1a9efa126..000000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.20
-// +build go1.20
-
-package versions
-
-func init() {
- if Compare(toolchain, Go1_20) < 0 {
- toolchain = Go1_20
- }
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
deleted file mode 100644
index b7ef216df..000000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.21
-// +build go1.21
-
-package versions
-
-func init() {
- if Compare(toolchain, Go1_21) < 0 {
- toolchain = Go1_21
- }
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go
index 562eef21f..0fc10ce4e 100644
--- a/vendor/golang.org/x/tools/internal/versions/types.go
+++ b/vendor/golang.org/x/tools/internal/versions/types.go
@@ -5,15 +5,29 @@
package versions
import (
+ "go/ast"
"go/types"
)
-// GoVersion returns the Go version of the type package.
-// It returns zero if no version can be determined.
-func GoVersion(pkg *types.Package) string {
- // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25.
- if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok {
- return pkg.GoVersion()
+// FileVersion returns a file's Go version.
+// The reported version is an unknown Future version if a
+// version cannot be determined.
+func FileVersion(info *types.Info, file *ast.File) string {
+ // In tools built with Go >= 1.22, the Go version of a file
+ // follow a cascades of sources:
+ // 1) types.Info.FileVersion, which follows the cascade:
+ // 1.a) file version (ast.File.GoVersion),
+ // 1.b) the package version (types.Config.GoVersion), or
+ // 2) is some unknown Future version.
+ //
+ // File versions require a valid package version to be provided to types
+ // in Config.GoVersion. Config.GoVersion is either from the package's module
+ // or the toolchain (go run). This value should be provided by go/packages
+ // or unitchecker.Config.GoVersion.
+ if v := info.FileVersions[file]; IsValid(v) {
+ return v
}
- return ""
+ // Note: we could instead return runtime.Version() [if valid].
+ // This would act as a max version on what a tool can support.
+ return Future
}
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go
deleted file mode 100644
index b4345d334..000000000
--- a/vendor/golang.org/x/tools/internal/versions/types_go121.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.22
-// +build !go1.22
-
-package versions
-
-import (
- "go/ast"
- "go/types"
-)
-
-// FileVersion returns a language version (<=1.21) derived from runtime.Version()
-// or an unknown future version.
-func FileVersion(info *types.Info, file *ast.File) string {
- // In x/tools built with Go <= 1.21, we do not have Info.FileVersions
- // available. We use a go version derived from the toolchain used to
- // compile the tool by default.
- // This will be <= go1.21. We take this as the maximum version that
- // this tool can support.
- //
- // There are no features currently in x/tools that need to tell fine grained
- // differences for versions <1.22.
- return toolchain
-}
-
-// InitFileVersions is a noop when compiled with this Go version.
-func InitFileVersions(*types.Info) {}
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go
deleted file mode 100644
index aac5db62c..000000000
--- a/vendor/golang.org/x/tools/internal/versions/types_go122.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.22
-// +build go1.22
-
-package versions
-
-import (
- "go/ast"
- "go/types"
-)
-
-// FileVersion returns a file's Go version.
-// The reported version is an unknown Future version if a
-// version cannot be determined.
-func FileVersion(info *types.Info, file *ast.File) string {
- // In tools built with Go >= 1.22, the Go version of a file
- // follow a cascades of sources:
- // 1) types.Info.FileVersion, which follows the cascade:
- // 1.a) file version (ast.File.GoVersion),
- // 1.b) the package version (types.Config.GoVersion), or
- // 2) is some unknown Future version.
- //
- // File versions require a valid package version to be provided to types
- // in Config.GoVersion. Config.GoVersion is either from the package's module
- // or the toolchain (go run). This value should be provided by go/packages
- // or unitchecker.Config.GoVersion.
- if v := info.FileVersions[file]; IsValid(v) {
- return v
- }
- // Note: we could instead return runtime.Version() [if valid].
- // This would act as a max version on what a tool can support.
- return Future
-}
-
-// InitFileVersions initializes info to record Go versions for Go files.
-func InitFileVersions(info *types.Info) {
- info.FileVersions = make(map[*ast.File]string)
-}