diff options
Diffstat (limited to 'vendor')
29 files changed, 1628 insertions, 1957 deletions
| diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 06bea9fab..95bc08d5c 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -81,6 +81,7 @@ A not-so-up-to-date-list-that-may-be-actually-current:  * https://addr.tools/  * https://dnscheck.tools/  * https://github.com/egbakou/domainverifier +* https://github.com/semihalev/sdns  Send pull request if you want to be listed here. diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go index c1558b79c..6d7e17605 100644 --- a/vendor/github.com/miekg/dns/defaults.go +++ b/vendor/github.com/miekg/dns/defaults.go @@ -5,6 +5,7 @@ import (  	"net"  	"strconv"  	"strings" +	"unicode"  )  const hexDigit = "0123456789abcdef" @@ -330,8 +331,18 @@ func Fqdn(s string) string {  // CanonicalName returns the domain name in canonical form. A name in canonical  // form is lowercase and fully qualified. See Section 6.2 in RFC 4034. +// According to the RFC all uppercase US-ASCII letters in the owner name of the +// RR areeplaced by the corresponding lowercase US-ASCII letters.  func CanonicalName(s string) string { -	return strings.ToLower(Fqdn(s)) +	var result strings.Builder +	for _, ch := range s { +		if unicode.IsUpper(ch) && (ch >= 0x00 && ch <= 0x7F) { +			result.WriteRune(unicode.ToLower(ch)) +		} else { +			result.WriteRune(ch) +		} +	} +	return Fqdn(result.String())  }  // Copied from the official Go code. diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index d5049a4f9..b05cf14e9 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -896,23 +896,38 @@ func (dns *Msg) String() string {  		return "<nil> MsgHdr"  	}  	s := dns.MsgHdr.String() + " " -	s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " -	s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " -	s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " -	s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" +	if dns.MsgHdr.Opcode == OpcodeUpdate { +		s += "ZONE: " + strconv.Itoa(len(dns.Question)) + ", " +		s += "PREREQ: " + strconv.Itoa(len(dns.Answer)) + ", " +		s += "UPDATE: " + strconv.Itoa(len(dns.Ns)) + ", " +		s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" +	} else { +		s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " +		s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " +		s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " +		s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" +	}  	opt := dns.IsEdns0()  	if opt != nil {  		// OPT PSEUDOSECTION  		s += opt.String() + "\n"  	}  	if len(dns.Question) > 0 { -		s += "\n;; QUESTION SECTION:\n" +		if dns.MsgHdr.Opcode == OpcodeUpdate { +			s += "\n;; ZONE SECTION:\n" +		} else { +			s += "\n;; QUESTION SECTION:\n" +		}  		for _, r := range dns.Question {  			s += r.String() + "\n"  		}  	}  	if len(dns.Answer) > 0 { -		s += "\n;; ANSWER SECTION:\n" +		if dns.MsgHdr.Opcode == OpcodeUpdate { +			s += "\n;; PREREQUISITE SECTION:\n" +		} else { +			s += "\n;; ANSWER SECTION:\n" +		}  		for _, r := range dns.Answer {  			if r != nil {  				s += r.String() + "\n" @@ -920,7 +935,11 @@ func (dns *Msg) String() string {  		}  	}  	if len(dns.Ns) > 0 { -		s += "\n;; AUTHORITY SECTION:\n" +		if dns.MsgHdr.Opcode == OpcodeUpdate { +			s += "\n;; UPDATE SECTION:\n" +		} else { +			s += "\n;; AUTHORITY SECTION:\n" +		}  		for _, r := range dns.Ns {  			if r != nil {  				s += r.String() + "\n" diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index 03afeccda..c9a03dec6 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -236,6 +236,9 @@ var CertTypeToString = map[uint16]string{  	CertOID:     "OID",  } +// Prefix for IPv4 encoded as IPv6 address +const ipv4InIPv6Prefix = "::ffff:" +  //go:generate go run types_generate.go  // Question holds a DNS question. Usually there is just one. While the @@ -751,6 +754,11 @@ func (rr *AAAA) String() string {  	if rr.AAAA == nil {  		return rr.Hdr.String()  	} + +	if rr.AAAA.To4() != nil { +		return rr.Hdr.String() + ipv4InIPv6Prefix + rr.AAAA.String() +	} +  	return rr.Hdr.String() + rr.AAAA.String()  } @@ -1517,7 +1525,7 @@ func (a *APLPrefix) str() string {  	case net.IPv6len:  		// add prefix for IPv4-mapped IPv6  		if v4 := a.Network.IP.To4(); v4 != nil { -			sb.WriteString("::ffff:") +			sb.WriteString(ipv4InIPv6Prefix)  		}  		sb.WriteString(a.Network.IP.String())  	} diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index 5891044a3..a09113662 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns  import "fmt"  // Version is current version of this library. -var Version = v{1, 1, 55} +var Version = v{1, 1, 56}  // v holds the version of this library.  type v struct { diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index a30a22bf2..9a2dfd33a 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -140,7 +140,7 @@ func Compare(v, w string) int {  // Max canonicalizes its arguments and then returns the version string  // that compares greater.  // -// Deprecated: use Compare instead. In most cases, returning a canonicalized +// Deprecated: use [Compare] instead. In most cases, returning a canonicalized  // version is not expected or desired.  func Max(v, w string) string {  	v = Canonical(v) @@ -151,7 +151,7 @@ func Max(v, w string) string {  	return w  } -// ByVersion implements sort.Interface for sorting semantic version strings. +// ByVersion implements [sort.Interface] for sorting semantic version strings.  type ByVersion []string  func (vs ByVersion) Len() int      { return len(vs) } @@ -164,7 +164,7 @@ func (vs ByVersion) Less(i, j int) bool {  	return vs[i] < vs[j]  } -// Sort sorts a list of semantic version strings using ByVersion. +// Sort sorts a list of semantic version strings using [ByVersion].  func Sort(list []string) {  	sort.Sort(ByVersion(list))  } diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 165ede0f8..03543bd4b 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -128,15 +128,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,  	// (from "version"). Select appropriate importer.  	if len(data) > 0 {  		switch data[0] { -		case 'i': -			_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) -			return pkg, err +		case 'v', 'c', 'd': // binary, till go1.10 +			return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) -		case 'v', 'c', 'd': -			_, pkg, err := gcimporter.BImportData(fset, imports, data, path) +		case 'i': // indexed, till go1.19 +			_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)  			return pkg, err -		case 'u': +		case 'u': // unified, from go1.20  			_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)  			return pkg, err diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index 18a002f82..0454cdd78 100644 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -8,7 +8,6 @@ package packagesdriver  import (  	"context"  	"fmt" -	"go/types"  	"strings"  	"golang.org/x/tools/internal/gocommand" @@ -16,7 +15,7 @@ import (  var debug = false -func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { +func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) {  	inv.Verb = "list"  	inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}  	stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) @@ -29,21 +28,21 @@ func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *  			inv.Args = []string{"GOARCH"}  			envout, enverr := gocmdRunner.Run(ctx, inv)  			if enverr != nil { -				return nil, enverr +				return "", "", enverr  			}  			goarch = strings.TrimSpace(envout.String())  			compiler = "gc"  		} else { -			return nil, friendlyErr +			return "", "", friendlyErr  		}  	} else {  		fields := strings.Fields(stdout.String())  		if len(fields) < 2 { -			return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\":\nstdout: <<%s>>\nstderr: <<%s>>", +			return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\":\nstdout: <<%s>>\nstderr: <<%s>>",  				stdout.String(), stderr.String())  		}  		goarch = fields[0]  		compiler = fields[1]  	} -	return types.SizesFor(compiler, goarch), nil +	return compiler, goarch, nil  } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 6bb7168d2..b5de9cf9f 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -9,7 +9,6 @@ import (  	"context"  	"encoding/json"  	"fmt" -	"go/types"  	"io/ioutil"  	"log"  	"os" @@ -153,10 +152,10 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {  	if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {  		sizeswg.Add(1)  		go func() { -			var sizes types.Sizes -			sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) -			// types.SizesFor always returns nil or a *types.StdSizes. -			response.dr.Sizes, _ = sizes.(*types.StdSizes) +			compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) +			sizeserr = err +			response.dr.Compiler = compiler +			response.dr.Arch = arch  			sizeswg.Done()  		}()  	} @@ -625,7 +624,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse  		}  		if pkg.PkgPath == "unsafe" { -			pkg.GoFiles = nil // ignore fake unsafe.go file +			pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) +		} else if len(pkg.CompiledGoFiles) == 0 { +			// Work around for pre-go.1.11 versions of go list. +			// TODO(matloob): they should be handled by the fallback. +			// Can we delete this? +			pkg.CompiledGoFiles = pkg.GoFiles  		}  		// Assume go list emits only absolute paths for Dir. @@ -663,16 +667,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse  			response.Roots = append(response.Roots, pkg.ID)  		} -		// Work around for pre-go.1.11 versions of go list. -		// TODO(matloob): they should be handled by the fallback. -		// Can we delete this? -		if len(pkg.CompiledGoFiles) == 0 { -			pkg.CompiledGoFiles = pkg.GoFiles -		} -  		// Temporary work-around for golang/go#39986. Parse filenames out of  		// error messages. This happens if there are unrecoverable syntax  		// errors in the source, so we can't match on a specific error message. +		// +		// TODO(rfindley): remove this heuristic, in favor of considering +		// InvalidGoFiles from the list driver.  		if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) {  			addFilenameFromPos := func(pos string) bool {  				split := strings.Split(pos, ":") @@ -891,6 +891,15 @@ func golistargs(cfg *Config, words []string, goVersion int) []string {  		// probably because you'd just get the TestMain.  		fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)),  	} + +	// golang/go#60456: with go1.21 and later, go list serves pgo variants, which +	// can be costly to compute and may result in redundant processing for the +	// caller. Disable these variants. If someone wants to add e.g. a NeedPGO +	// mode flag, that should be a separate proposal. +	if goVersion >= 21 { +		fullargs = append(fullargs, "-pgo=off") +	} +  	fullargs = append(fullargs, cfg.BuildFlags...)  	fullargs = append(fullargs, "--")  	fullargs = append(fullargs, words...) diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0f1505b80..124a6fe14 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -220,8 +220,10 @@ type driverResponse struct {  	// lists of multiple drivers, go/packages will fall back to the next driver.  	NotHandled bool -	// Sizes, if not nil, is the types.Sizes to use when type checking. -	Sizes *types.StdSizes +	// Compiler and Arch are the arguments pass of types.SizesFor +	// to get a types.Sizes to use when type checking. +	Compiler string +	Arch     string  	// Roots is the set of package IDs that make up the root packages.  	// We have to encode this separately because when we encode a single package @@ -262,7 +264,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) {  	if err != nil {  		return nil, err  	} -	l.sizes = response.Sizes +	l.sizes = types.SizesFor(response.Compiler, response.Arch)  	return l.refine(response)  } @@ -308,6 +310,9 @@ type Package struct {  	TypeErrors []types.Error  	// GoFiles lists the absolute file paths of the package's Go source files. +	// It may include files that should not be compiled, for example because +	// they contain non-matching build tags, are documentary pseudo-files such as +	// unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing.  	GoFiles []string  	// CompiledGoFiles lists the absolute file paths of the package's source @@ -627,7 +632,7 @@ func newLoader(cfg *Config) *loader {  	return ld  } -// refine connects the supplied packages into a graph and then adds type and +// refine connects the supplied packages into a graph and then adds type  // and syntax information as requested by the LoadMode.  func (ld *loader) refine(response *driverResponse) ([]*Package, error) {  	roots := response.Roots @@ -1040,6 +1045,9 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {  		Error: appendError,  		Sizes: ld.sizes,  	} +	if lpkg.Module != nil && lpkg.Module.GoVersion != "" { +		typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion) +	}  	if (ld.Mode & typecheckCgo) != 0 {  		if !typesinternal.SetUsesCgo(tc) {  			appendError(Error{ diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 000000000..fa5834baf --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,827 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// +//	type A struct{ X int } +//	type B A +// +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( +	"fmt" +	"go/types" +	"sort" +	"strconv" +	"strings" +	_ "unsafe" + +	"golang.org/x/tools/internal/typeparams" +	"golang.org/x/tools/internal/typesinternal" +) + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +//	PO package->object	Package.Scope.Lookup +//	OT  object->type 	Object.Type +//	TT    type->type 	Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +//	TO   type->object	Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +//	objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +//   - The only PO operator is Package.Scope.Lookup, which requires an identifier. +//   - The only OT operator is Object.Type, +//     which we encode as '.' because dot cannot appear in an identifier. +//   - The TT operators are encoded as [EKPRUTC]; +//     one of these (TypeParam) requires an integer operand, +//     which is encoded as a string of decimal digits. +//   - The TO operators are encoded as [AFMO]; +//     three of these (At,Field,Method) require an integer operand, +//     which is encoded as a string of decimal digits. +//     These indices are stable across different representations +//     of the same package, even source and export data. +//     The indices used are implementation specific and may not correspond to +//     the argument to the go/types function. +// +// In the example below, +// +//	package p +// +//	type T interface { +//		f() (a string, b struct{ X int }) +//	} +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +//	p.Lookup("T")					T +//	.Type().Underlying().Method(0).			f +//	.Type().Results().At(1)				b +//	.Type().Field(0)					X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +const ( +	// object->type operators +	opType = '.' // .Type()		  (Object) + +	// type->type operators +	opElem       = 'E' // .Elem()		        (Pointer, Slice, Array, Chan, Map) +	opKey        = 'K' // .Key()		        (Map) +	opParams     = 'P' // .Params()		      (Signature) +	opResults    = 'R' // .Results()	      (Signature) +	opUnderlying = 'U' // .Underlying()	    (Named) +	opTypeParam  = 'T' // .TypeParams.At(i) (Named, Signature) +	opConstraint = 'C' // .Constraint()     (TypeParam) + +	// type->object operators +	opAt     = 'A' // .At(i)		 (Tuple) +	opField  = 'F' // .Field(i)	 (Struct) +	opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) +	opObj    = 'O' // .Obj()		 (Named, TypeParam) +) + +// For is equivalent to new(Encoder).For(obj). +// +// It may be more efficient to reuse a single Encoder across several calls. +func For(obj types.Object) (Path, error) { +	return new(Encoder).For(obj) +} + +// An Encoder amortizes the cost of encoding the paths of multiple objects. +// The zero value of an Encoder is ready to use. +type Encoder struct { +	scopeMemo         map[*types.Scope][]types.Object // memoization of scopeObjects +	namedMethodsMemo  map[*types.Named][]*types.Func  // memoization of namedMethods() +	skipMethodSorting bool +} + +// Expose back doors so that gopls can avoid method sorting, which can dominate +// analysis on certain repositories. +// +// TODO(golang/go#61443): remove this. +func init() { +	typesinternal.SkipEncoderMethodSorting = func(enc interface{}) { +		enc.(*Encoder).skipMethodSorting = true +	} +	typesinternal.ObjectpathObject = object +} + +// For returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// The set of objects accessible from a package's Scope depends on +// whether the package was produced by type-checking syntax, or +// reading export data; the latter may have a smaller Scope since +// export data trims objects that are not reachable from an exported +// declaration. For example, the For function will return a path for +// an exported method of an unexported type that is not reachable +// from any public declaration; this path will cause the Object +// function to fail if called on a package loaded from export data. +// TODO(adonovan): is this a bug or feature? Should this package +// compute accessibility in the same way? +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +//	package p +// +//	type T interface { +//		f() (a string, b struct{ X int }) +//	} +// +// For(X) would return a path that denotes the following sequence of operations: +// +//	p.Scope().Lookup("T")				(TypeName T) +//	.Type().Underlying().Method(0).			(method Func f) +//	.Type().Results().At(1)				(field Var b) +//	.Type().Field(0)					(field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func (enc *Encoder) For(obj types.Object) (Path, error) { +	pkg := obj.Pkg() + +	// This table lists the cases of interest. +	// +	// Object				Action +	// ------                               ------ +	// nil					reject +	// builtin				reject +	// pkgname				reject +	// label				reject +	// var +	//    package-level			accept +	//    func param/result			accept +	//    local				reject +	//    struct field			accept +	// const +	//    package-level			accept +	//    local				reject +	// func +	//    package-level			accept +	//    init functions			reject +	//    concrete method			accept +	//    interface method			accept +	// type +	//    package-level			accept +	//    local				reject +	// +	// The only accessible package-level objects are members of pkg itself. +	// +	// The cases are handled in four steps: +	// +	// 1. reject nil and builtin +	// 2. accept package-level objects +	// 3. reject obviously invalid objects +	// 4. search the API for the path to the param/result/field/method. + +	// 1. reference to nil or builtin? +	if pkg == nil { +		return "", fmt.Errorf("predeclared %s has no path", obj) +	} +	scope := pkg.Scope() + +	// 2. package-level object? +	if scope.Lookup(obj.Name()) == obj { +		// Only exported objects (and non-exported types) have a path. +		// Non-exported types may be referenced by other objects. +		if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { +			return "", fmt.Errorf("no path for non-exported %v", obj) +		} +		return Path(obj.Name()), nil +	} + +	// 3. Not a package-level object. +	//    Reject obviously non-viable cases. +	switch obj := obj.(type) { +	case *types.TypeName: +		if _, ok := obj.Type().(*typeparams.TypeParam); !ok { +			// With the exception of type parameters, only package-level type names +			// have a path. +			return "", fmt.Errorf("no path for %v", obj) +		} +	case *types.Const, // Only package-level constants have a path. +		*types.Label,   // Labels are function-local. +		*types.PkgName: // PkgNames are file-local. +		return "", fmt.Errorf("no path for %v", obj) + +	case *types.Var: +		// Could be: +		// - a field (obj.IsField()) +		// - a func parameter or result +		// - a local var. +		// Sadly there is no way to distinguish +		// a param/result from a local +		// so we must proceed to the find. + +	case *types.Func: +		// A func, if not package-level, must be a method. +		if recv := obj.Type().(*types.Signature).Recv(); recv == nil { +			return "", fmt.Errorf("func is not a method: %v", obj) +		} + +		if path, ok := enc.concreteMethod(obj); ok { +			// Fast path for concrete methods that avoids looping over scope. +			return path, nil +		} + +	default: +		panic(obj) +	} + +	// 4. Search the API for the path to the var (field/param/result) or method. + +	// First inspect package-level named types. +	// In the presence of path aliases, these give +	// the best paths because non-types may +	// refer to types, but not the reverse. +	empty := make([]byte, 0, 48) // initial space +	objs := enc.scopeObjects(scope) +	for _, o := range objs { +		tname, ok := o.(*types.TypeName) +		if !ok { +			continue // handle non-types in second pass +		} + +		path := append(empty, o.Name()...) +		path = append(path, opType) + +		T := o.Type() + +		if tname.IsAlias() { +			// type alias +			if r := find(obj, T, path, nil); r != nil { +				return Path(r), nil +			} +		} else { +			if named, _ := T.(*types.Named); named != nil { +				if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { +					// generic named type +					return Path(r), nil +				} +			} +			// defined (named) type +			if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { +				return Path(r), nil +			} +		} +	} + +	// Then inspect everything else: +	// non-types, and declared methods of defined types. +	for _, o := range objs { +		path := append(empty, o.Name()...) +		if _, ok := o.(*types.TypeName); !ok { +			if o.Exported() { +				// exported non-type (const, var, func) +				if r := find(obj, o.Type(), append(path, opType), nil); r != nil { +					return Path(r), nil +				} +			} +			continue +		} + +		// Inspect declared methods of defined types. +		if T, ok := o.Type().(*types.Named); ok { +			path = append(path, opType) +			if !enc.skipMethodSorting { +				// Note that method index here is always with respect +				// to canonical ordering of methods, regardless of how +				// they appear in the underlying type. +				for i, m := range enc.namedMethods(T) { +					path2 := appendOpArg(path, opMethod, i) +					if m == obj { +						return Path(path2), nil // found declared method +					} +					if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { +						return Path(r), nil +					} +				} +			} else { +				// This branch must match the logic in the branch above, using go/types +				// APIs without sorting. +				for i := 0; i < T.NumMethods(); i++ { +					m := T.Method(i) +					path2 := appendOpArg(path, opMethod, i) +					if m == obj { +						return Path(path2), nil // found declared method +					} +					if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { +						return Path(r), nil +					} +				} +			} +		} +	} + +	return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { +	path = append(path, op) +	path = strconv.AppendInt(path, int64(arg), 10) +	return path +} + +// concreteMethod returns the path for meth, which must have a non-nil receiver. +// The second return value indicates success and may be false if the method is +// an interface method or if it is an instantiated method. +// +// This function is just an optimization that avoids the general scope walking +// approach. You are expected to fall back to the general approach if this +// function fails. +func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { +	// Concrete methods can only be declared on package-scoped named types. For +	// that reason we can skip the expensive walk over the package scope: the +	// path will always be package -> named type -> method. We can trivially get +	// the type name from the receiver, and only have to look over the type's +	// methods to find the method index. +	// +	// Methods on generic types require special consideration, however. Consider +	// the following package: +	// +	// 	L1: type S[T any] struct{} +	// 	L2: func (recv S[A]) Foo() { recv.Bar() } +	// 	L3: func (recv S[B]) Bar() { } +	// 	L4: type Alias = S[int] +	// 	L5: func _[T any]() { var s S[int]; s.Foo() } +	// +	// The receivers of methods on generic types are instantiations. L2 and L3 +	// instantiate S with the type-parameters A and B, which are scoped to the +	// respective methods. L4 and L5 each instantiate S with int. Each of these +	// instantiations has its own method set, full of methods (and thus objects) +	// with receivers whose types are the respective instantiations. In other +	// words, we have +	// +	// S[A].Foo, S[A].Bar +	// S[B].Foo, S[B].Bar +	// S[int].Foo, S[int].Bar +	// +	// We may thus be trying to produce object paths for any of these objects. +	// +	// S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo +	// and S.Bar, which are the paths that this function naturally produces. +	// +	// S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that +	// don't correspond to the origin methods. For S[int], this is significant. +	// The most precise object path for S[int].Foo, for example, is Alias.Foo, +	// not S.Foo. Our function, however, would produce S.Foo, which would +	// resolve to a different object. +	// +	// For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are +	// still the correct paths, since only the origin methods have meaningful +	// paths. But this is likely only true for trivial cases and has edge cases. +	// Since this function is only an optimization, we err on the side of giving +	// up, deferring to the slower but definitely correct algorithm. Most users +	// of objectpath will only be giving us origin methods, anyway, as referring +	// to instantiated methods is usually not useful. + +	if typeparams.OriginMethod(meth) != meth { +		return "", false +	} + +	recvT := meth.Type().(*types.Signature).Recv().Type() +	if ptr, ok := recvT.(*types.Pointer); ok { +		recvT = ptr.Elem() +	} + +	named, ok := recvT.(*types.Named) +	if !ok { +		return "", false +	} + +	if types.IsInterface(named) { +		// Named interfaces don't have to be package-scoped +		// +		// TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface +		// methods, too, I think. +		return "", false +	} + +	// Preallocate space for the name, opType, opMethod, and some digits. +	name := named.Obj().Name() +	path := make([]byte, 0, len(name)+8) +	path = append(path, name...) +	path = append(path, opType) + +	if !enc.skipMethodSorting { +		for i, m := range enc.namedMethods(named) { +			if m == meth { +				path = appendOpArg(path, opMethod, i) +				return Path(path), true +			} +		} +	} else { +		// This branch must match the logic of the branch above, using go/types +		// APIs without sorting. +		for i := 0; i < named.NumMethods(); i++ { +			m := named.Method(i) +			if m == meth { +				path = appendOpArg(path, opMethod, i) +				return Path(path), true +			} +		} +	} + +	// Due to golang/go#59944, go/types fails to associate the receiver with +	// certain methods on cgo types. +	// +	// TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go +	// versions gopls supports. +	return "", false +	// panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named))) +} + +// find finds obj within type T, returning the path to it, or nil if not found. +// +// The seen map is used to short circuit cycles through type parameters. If +// nil, it will be allocated as necessary. +func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { +	switch T := T.(type) { +	case *types.Basic, *types.Named: +		// Named types belonging to pkg were handled already, +		// so T must belong to another package. No path. +		return nil +	case *types.Pointer: +		return find(obj, T.Elem(), append(path, opElem), seen) +	case *types.Slice: +		return find(obj, T.Elem(), append(path, opElem), seen) +	case *types.Array: +		return find(obj, T.Elem(), append(path, opElem), seen) +	case *types.Chan: +		return find(obj, T.Elem(), append(path, opElem), seen) +	case *types.Map: +		if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { +			return r +		} +		return find(obj, T.Elem(), append(path, opElem), seen) +	case *types.Signature: +		if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { +			return r +		} +		if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { +			return r +		} +		return find(obj, T.Results(), append(path, opResults), seen) +	case *types.Struct: +		for i := 0; i < T.NumFields(); i++ { +			fld := T.Field(i) +			path2 := appendOpArg(path, opField, i) +			if fld == obj { +				return path2 // found field var +			} +			if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { +				return r +			} +		} +		return nil +	case *types.Tuple: +		for i := 0; i < T.Len(); i++ { +			v := T.At(i) +			path2 := appendOpArg(path, opAt, i) +			if v == obj { +				return path2 // found param/result var +			} +			if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { +				return r +			} +		} +		return nil +	case *types.Interface: +		for i := 0; i < T.NumMethods(); i++ { +			m := T.Method(i) +			path2 := appendOpArg(path, opMethod, i) +			if m == obj { +				return path2 // found interface method +			} +			if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { +				return r +			} +		} +		return nil +	case *typeparams.TypeParam: +		name := T.Obj() +		if name == obj { +			return append(path, opObj) +		} +		if seen[name] { +			return nil +		} +		if seen == nil { +			seen = make(map[*types.TypeName]bool) +		} +		seen[name] = true +		if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { +			return r +		} +		return nil +	} +	panic(T) +} + +func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { +	for i := 0; i < list.Len(); i++ { +		tparam := list.At(i) +		path2 := appendOpArg(path, opTypeParam, i) +		if r := find(obj, tparam, path2, seen); r != nil { +			return r +		} +	} +	return nil +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { +	return object(pkg, string(p), false) +} + +// Note: the skipMethodSorting parameter must match the value of +// Encoder.skipMethodSorting used during encoding. +func object(pkg *types.Package, pathstr string, skipMethodSorting bool) (types.Object, error) { +	if pathstr == "" { +		return nil, fmt.Errorf("empty path") +	} + +	var pkgobj, suffix string +	if dot := strings.IndexByte(pathstr, opType); dot < 0 { +		pkgobj = pathstr +	} else { +		pkgobj = pathstr[:dot] +		suffix = pathstr[dot:] // suffix starts with "." +	} + +	obj := pkg.Scope().Lookup(pkgobj) +	if obj == nil { +		return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) +	} + +	// abstraction of *types.{Pointer,Slice,Array,Chan,Map} +	type hasElem interface { +		Elem() types.Type +	} +	// abstraction of *types.{Named,Signature} +	type hasTypeParams interface { +		TypeParams() *typeparams.TypeParamList +	} +	// abstraction of *types.{Named,TypeParam} +	type hasObj interface { +		Obj() *types.TypeName +	} + +	// The loop state is the pair (t, obj), +	// exactly one of which is non-nil, initially obj. +	// All suffixes start with '.' (the only object->type operation), +	// followed by optional type->type operations, +	// then a type->object operation. +	// The cycle then repeats. +	var t types.Type +	for suffix != "" { +		code := suffix[0] +		suffix = suffix[1:] + +		// Codes [AFM] have an integer operand. +		var index int +		switch code { +		case opAt, opField, opMethod, opTypeParam: +			rest := strings.TrimLeft(suffix, "0123456789") +			numerals := suffix[:len(suffix)-len(rest)] +			suffix = rest +			i, err := strconv.Atoi(numerals) +			if err != nil { +				return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) +			} +			index = int(i) +		case opObj: +			// no operand +		default: +			// The suffix must end with a type->object operation. +			if suffix == "" { +				return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) +			} +		} + +		if code == opType { +			if t != nil { +				return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) +			} +			t = obj.Type() +			obj = nil +			continue +		} + +		if t == nil { +			return nil, fmt.Errorf("invalid path: code %q in object context", code) +		} + +		// Inv: t != nil, obj == nil + +		switch code { +		case opElem: +			hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map +			if !ok { +				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) +			} +			t = hasElem.Elem() + +		case opKey: +			mapType, ok := t.(*types.Map) +			if !ok { +				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) +			} +			t = mapType.Key() + +		case opParams: +			sig, ok := t.(*types.Signature) +			if !ok { +				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) +			} +			t = sig.Params() + +		case opResults: +			sig, ok := t.(*types.Signature) +			if !ok { +				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) +			} +			t = sig.Results() + +		case opUnderlying: +			named, ok := t.(*types.Named) +			if !ok { +				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) +			} +			t = named.Underlying() + +		case opTypeParam: +			hasTypeParams, ok := t.(hasTypeParams) // Named, Signature +			if !ok { +				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) +			} +			tparams := hasTypeParams.TypeParams() +			if n := tparams.Len(); index >= n { +				return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) +			} +			t = tparams.At(index) + +		case opConstraint: +			tparam, ok := t.(*typeparams.TypeParam) +			if !ok { +				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) +			} +			t = tparam.Constraint() + +		case opAt: +			tuple, ok := t.(*types.Tuple) +			if !ok { +				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) +			} +			if n := tuple.Len(); index >= n { +				return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) +			} +			obj = tuple.At(index) +			t = nil + +		case opField: +			structType, ok := t.(*types.Struct) +			if !ok { +				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) +			} +			if n := structType.NumFields(); index >= n { +				return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) +			} +			obj = structType.Field(index) +			t = nil + +		case opMethod: +			switch t := t.(type) { +			case *types.Interface: +				if index >= t.NumMethods() { +					return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) +				} +				obj = t.Method(index) // Id-ordered + +			case *types.Named: +				if index >= t.NumMethods() { +					return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) +				} +				if skipMethodSorting { +					obj = t.Method(index) +				} else { +					methods := namedMethods(t) // (unmemoized) +					obj = methods[index]       // Id-ordered +				} + +			default: +				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) +			} +			t = nil + +		case opObj: +			hasObj, ok := t.(hasObj) +			if !ok { +				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) +			} +			obj = hasObj.Obj() +			t = nil + +		default: +			return nil, fmt.Errorf("invalid path: unknown code %q", code) +		} +	} + +	if obj.Pkg() != pkg { +		return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) +	} + +	return obj, nil // success +} + +// namedMethods returns the methods of a Named type in ascending Id order. +func namedMethods(named *types.Named) []*types.Func { +	methods := make([]*types.Func, named.NumMethods()) +	for i := range methods { +		methods[i] = named.Method(i) +	} +	sort.Slice(methods, func(i, j int) bool { +		return methods[i].Id() < methods[j].Id() +	}) +	return methods +} + +// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. +func (enc *Encoder) namedMethods(named *types.Named) []*types.Func { +	m := enc.namedMethodsMemo +	if m == nil { +		m = make(map[*types.Named][]*types.Func) +		enc.namedMethodsMemo = m +	} +	methods, ok := m[named] +	if !ok { +		methods = namedMethods(named) // allocates and sorts +		m[named] = methods +	} +	return methods +} + +// scopeObjects is a memoization of scope objects. +// Callers must not modify the result. +func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { +	m := enc.scopeMemo +	if m == nil { +		m = make(map[*types.Scope][]types.Object) +		enc.scopeMemo = m +	} +	objs, ok := m[scope] +	if !ok { +		names := scope.Names() // allocates and sorts +		objs = make([]types.Object, len(names)) +		for i, name := range names { +			objs[i] = scope.Lookup(name) +		} +		m[scope] = objs +	} +	return objs +} diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go new file mode 100644 index 000000000..581b26c20 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/tag/tag.go @@ -0,0 +1,59 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag provides the labels used for telemetry throughout gopls. +package tag + +import ( +	"golang.org/x/tools/internal/event/keys" +) + +var ( +	// create the label keys we use +	Method        = keys.NewString("method", "") +	StatusCode    = keys.NewString("status.code", "") +	StatusMessage = keys.NewString("status.message", "") +	RPCID         = keys.NewString("id", "") +	RPCDirection  = keys.NewString("direction", "") +	File          = keys.NewString("file", "") +	Directory     = keys.New("directory", "") +	URI           = keys.New("URI", "") +	Package       = keys.NewString("package", "") // sorted comma-separated list of Package IDs +	PackagePath   = keys.NewString("package_path", "") +	Query         = keys.New("query", "") +	Snapshot      = keys.NewUInt64("snapshot", "") +	Operation     = keys.NewString("operation", "") + +	Position     = keys.New("position", "") +	Category     = keys.NewString("category", "") +	PackageCount = keys.NewInt("packages", "") +	Files        = keys.New("files", "") +	Port         = keys.NewInt("port", "") +	Type         = keys.New("type", "") +	HoverKind    = keys.NewString("hoverkind", "") + +	NewServer = keys.NewString("new_server", "A new server was added") +	EndServer = keys.NewString("end_server", "A server was shut down") + +	ServerID     = keys.NewString("server", "The server ID an event is related to") +	Logfile      = keys.NewString("logfile", "") +	DebugAddress = keys.NewString("debug_address", "") +	GoplsPath    = keys.NewString("gopls_path", "") +	ClientID     = keys.NewString("client_id", "") + +	Level = keys.NewInt("level", "The logging level") +) + +var ( +	// create the stats we measure +	Started       = keys.NewInt64("started", "Count of started RPCs.") +	ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.")            //, unit.Bytes) +	SentBytes     = keys.NewInt64("sent_bytes", "Bytes sent.")                    //, unit.Bytes) +	Latency       = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds) +) + +const ( +	Inbound  = "in" +	Outbound = "out" +) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/internal/gcimporter/bexport.go deleted file mode 100644 index 30582ed6d..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go +++ /dev/null @@ -1,852 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( -	"bytes" -	"encoding/binary" -	"fmt" -	"go/constant" -	"go/token" -	"go/types" -	"math" -	"math/big" -	"sort" -	"strings" -) - -// If debugFormat is set, each integer and string value is preceded by a marker -// and position information in the encoding. This mechanism permits an importer -// to recognize immediately when it is out of sync. The importer recognizes this -// mode automatically (i.e., it can import export data produced with debugging -// support even if debugFormat is not set at the time of import). This mode will -// lead to massively larger export data (by a factor of 2 to 3) and should only -// be enabled during development and debugging. -// -// NOTE: This flag is the first flag to enable if importing dies because of -// (suspected) format errors, and whenever a change is made to the format. -const debugFormat = false // default: false - -// Current export format version. Increase with each format change. -// -// Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// -//	6: package height (CL 105038) -- NOT IMPLEMENTED HERE -//	5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE -//	4: type name objects support type aliases, uses aliasTag -//	3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -//	2: removed unused bool in ODCL export (compiler only) -//	1: header format change (more regular), export package for _ struct fields -//	0: Go1.7 encoding -const exportVersion = 4 - -// trackAllTypes enables cycle tracking for all types, not just named -// types. The existing compiler invariants assume that unnamed types -// that are not completely set up are not used, or else there are spurious -// errors. -// If disabled, only named types are tracked, possibly leading to slightly -// less efficient encoding in rare cases. It also prevents the export of -// some corner-case type declarations (but those are not handled correctly -// with with the textual export format either). -// TODO(gri) enable and remove once issues caused by it are fixed -const trackAllTypes = false - -type exporter struct { -	fset *token.FileSet -	out  bytes.Buffer - -	// object -> index maps, indexed in order of serialization -	strIndex map[string]int -	pkgIndex map[*types.Package]int -	typIndex map[types.Type]int - -	// position encoding -	posInfoFormat bool -	prevFile      string -	prevLine      int - -	// debugging support -	written int // bytes written -	indent  int // for trace -} - -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -func internalErrorf(format string, args ...interface{}) error { -	return internalError(fmt.Sprintf(format, args...)) -} - -// BExportData returns binary export data for pkg. -// If no file set is provided, position info will be missing. -func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { -	if !debug { -		defer func() { -			if e := recover(); e != nil { -				if ierr, ok := e.(internalError); ok { -					err = ierr -					return -				} -				// Not an internal error; panic again. -				panic(e) -			} -		}() -	} - -	p := exporter{ -		fset:          fset, -		strIndex:      map[string]int{"": 0}, // empty string is mapped to 0 -		pkgIndex:      make(map[*types.Package]int), -		typIndex:      make(map[types.Type]int), -		posInfoFormat: true, // TODO(gri) might become a flag, eventually -	} - -	// write version info -	// The version string must start with "version %d" where %d is the version -	// number. Additional debugging information may follow after a blank; that -	// text is ignored by the importer. -	p.rawStringln(fmt.Sprintf("version %d", exportVersion)) -	var debug string -	if debugFormat { -		debug = "debug" -	} -	p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly -	p.bool(trackAllTypes) -	p.bool(p.posInfoFormat) - -	// --- generic export data --- - -	// populate type map with predeclared "known" types -	for index, typ := range predeclared() { -		p.typIndex[typ] = index -	} -	if len(p.typIndex) != len(predeclared()) { -		return nil, internalError("duplicate entries in type map?") -	} - -	// write package data -	p.pkg(pkg, true) -	if trace { -		p.tracef("\n") -	} - -	// write objects -	objcount := 0 -	scope := pkg.Scope() -	for _, name := range scope.Names() { -		if !token.IsExported(name) { -			continue -		} -		if trace { -			p.tracef("\n") -		} -		p.obj(scope.Lookup(name)) -		objcount++ -	} - -	// indicate end of list -	if trace { -		p.tracef("\n") -	} -	p.tag(endTag) - -	// for self-verification only (redundant) -	p.int(objcount) - -	if trace { -		p.tracef("\n") -	} - -	// --- end of export data --- - -	return p.out.Bytes(), nil -} - -func (p *exporter) pkg(pkg *types.Package, emptypath bool) { -	if pkg == nil { -		panic(internalError("unexpected nil pkg")) -	} - -	// if we saw the package before, write its index (>= 0) -	if i, ok := p.pkgIndex[pkg]; ok { -		p.index('P', i) -		return -	} - -	// otherwise, remember the package, write the package tag (< 0) and package data -	if trace { -		p.tracef("P%d = { ", len(p.pkgIndex)) -		defer p.tracef("} ") -	} -	p.pkgIndex[pkg] = len(p.pkgIndex) - -	p.tag(packageTag) -	p.string(pkg.Name()) -	if emptypath { -		p.string("") -	} else { -		p.string(pkg.Path()) -	} -} - -func (p *exporter) obj(obj types.Object) { -	switch obj := obj.(type) { -	case *types.Const: -		p.tag(constTag) -		p.pos(obj) -		p.qualifiedName(obj) -		p.typ(obj.Type()) -		p.value(obj.Val()) - -	case *types.TypeName: -		if obj.IsAlias() { -			p.tag(aliasTag) -			p.pos(obj) -			p.qualifiedName(obj) -		} else { -			p.tag(typeTag) -		} -		p.typ(obj.Type()) - -	case *types.Var: -		p.tag(varTag) -		p.pos(obj) -		p.qualifiedName(obj) -		p.typ(obj.Type()) - -	case *types.Func: -		p.tag(funcTag) -		p.pos(obj) -		p.qualifiedName(obj) -		sig := obj.Type().(*types.Signature) -		p.paramList(sig.Params(), sig.Variadic()) -		p.paramList(sig.Results(), false) - -	default: -		panic(internalErrorf("unexpected object %v (%T)", obj, obj)) -	} -} - -func (p *exporter) pos(obj types.Object) { -	if !p.posInfoFormat { -		return -	} - -	file, line := p.fileLine(obj) -	if file == p.prevFile { -		// common case: write line delta -		// delta == 0 means different file or no line change -		delta := line - p.prevLine -		p.int(delta) -		if delta == 0 { -			p.int(-1) // -1 means no file change -		} -	} else { -		// different file -		p.int(0) -		// Encode filename as length of common prefix with previous -		// filename, followed by (possibly empty) suffix. Filenames -		// frequently share path prefixes, so this can save a lot -		// of space and make export data size less dependent on file -		// path length. The suffix is unlikely to be empty because -		// file names tend to end in ".go". -		n := commonPrefixLen(p.prevFile, file) -		p.int(n)           // n >= 0 -		p.string(file[n:]) // write suffix only -		p.prevFile = file -		p.int(line) -	} -	p.prevLine = line -} - -func (p *exporter) fileLine(obj types.Object) (file string, line int) { -	if p.fset != nil { -		pos := p.fset.Position(obj.Pos()) -		file = pos.Filename -		line = pos.Line -	} -	return -} - -func commonPrefixLen(a, b string) int { -	if len(a) > len(b) { -		a, b = b, a -	} -	// len(a) <= len(b) -	i := 0 -	for i < len(a) && a[i] == b[i] { -		i++ -	} -	return i -} - -func (p *exporter) qualifiedName(obj types.Object) { -	p.string(obj.Name()) -	p.pkg(obj.Pkg(), false) -} - -func (p *exporter) typ(t types.Type) { -	if t == nil { -		panic(internalError("nil type")) -	} - -	// Possible optimization: Anonymous pointer types *T where -	// T is a named type are common. We could canonicalize all -	// such types *T to a single type PT = *T. This would lead -	// to at most one *T entry in typIndex, and all future *T's -	// would be encoded as the respective index directly. Would -	// save 1 byte (pointerTag) per *T and reduce the typIndex -	// size (at the cost of a canonicalization map). We can do -	// this later, without encoding format change. - -	// if we saw the type before, write its index (>= 0) -	if i, ok := p.typIndex[t]; ok { -		p.index('T', i) -		return -	} - -	// otherwise, remember the type, write the type tag (< 0) and type data -	if trackAllTypes { -		if trace { -			p.tracef("T%d = {>\n", len(p.typIndex)) -			defer p.tracef("<\n} ") -		} -		p.typIndex[t] = len(p.typIndex) -	} - -	switch t := t.(type) { -	case *types.Named: -		if !trackAllTypes { -			// if we don't track all types, track named types now -			p.typIndex[t] = len(p.typIndex) -		} - -		p.tag(namedTag) -		p.pos(t.Obj()) -		p.qualifiedName(t.Obj()) -		p.typ(t.Underlying()) -		if !types.IsInterface(t) { -			p.assocMethods(t) -		} - -	case *types.Array: -		p.tag(arrayTag) -		p.int64(t.Len()) -		p.typ(t.Elem()) - -	case *types.Slice: -		p.tag(sliceTag) -		p.typ(t.Elem()) - -	case *dddSlice: -		p.tag(dddTag) -		p.typ(t.elem) - -	case *types.Struct: -		p.tag(structTag) -		p.fieldList(t) - -	case *types.Pointer: -		p.tag(pointerTag) -		p.typ(t.Elem()) - -	case *types.Signature: -		p.tag(signatureTag) -		p.paramList(t.Params(), t.Variadic()) -		p.paramList(t.Results(), false) - -	case *types.Interface: -		p.tag(interfaceTag) -		p.iface(t) - -	case *types.Map: -		p.tag(mapTag) -		p.typ(t.Key()) -		p.typ(t.Elem()) - -	case *types.Chan: -		p.tag(chanTag) -		p.int(int(3 - t.Dir())) // hack -		p.typ(t.Elem()) - -	default: -		panic(internalErrorf("unexpected type %T: %s", t, t)) -	} -} - -func (p *exporter) assocMethods(named *types.Named) { -	// Sort methods (for determinism). -	var methods []*types.Func -	for i := 0; i < named.NumMethods(); i++ { -		methods = append(methods, named.Method(i)) -	} -	sort.Sort(methodsByName(methods)) - -	p.int(len(methods)) - -	if trace && methods != nil { -		p.tracef("associated methods {>\n") -	} - -	for i, m := range methods { -		if trace && i > 0 { -			p.tracef("\n") -		} - -		p.pos(m) -		name := m.Name() -		p.string(name) -		if !exported(name) { -			p.pkg(m.Pkg(), false) -		} - -		sig := m.Type().(*types.Signature) -		p.paramList(types.NewTuple(sig.Recv()), false) -		p.paramList(sig.Params(), sig.Variadic()) -		p.paramList(sig.Results(), false) -		p.int(0) // dummy value for go:nointerface pragma - ignored by importer -	} - -	if trace && methods != nil { -		p.tracef("<\n} ") -	} -} - -type methodsByName []*types.Func - -func (x methodsByName) Len() int           { return len(x) } -func (x methodsByName) Swap(i, j int)      { x[i], x[j] = x[j], x[i] } -func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } - -func (p *exporter) fieldList(t *types.Struct) { -	if trace && t.NumFields() > 0 { -		p.tracef("fields {>\n") -		defer p.tracef("<\n} ") -	} - -	p.int(t.NumFields()) -	for i := 0; i < t.NumFields(); i++ { -		if trace && i > 0 { -			p.tracef("\n") -		} -		p.field(t.Field(i)) -		p.string(t.Tag(i)) -	} -} - -func (p *exporter) field(f *types.Var) { -	if !f.IsField() { -		panic(internalError("field expected")) -	} - -	p.pos(f) -	p.fieldName(f) -	p.typ(f.Type()) -} - -func (p *exporter) iface(t *types.Interface) { -	// TODO(gri): enable importer to load embedded interfaces, -	// then emit Embeddeds and ExplicitMethods separately here. -	p.int(0) - -	n := t.NumMethods() -	if trace && n > 0 { -		p.tracef("methods {>\n") -		defer p.tracef("<\n} ") -	} -	p.int(n) -	for i := 0; i < n; i++ { -		if trace && i > 0 { -			p.tracef("\n") -		} -		p.method(t.Method(i)) -	} -} - -func (p *exporter) method(m *types.Func) { -	sig := m.Type().(*types.Signature) -	if sig.Recv() == nil { -		panic(internalError("method expected")) -	} - -	p.pos(m) -	p.string(m.Name()) -	if m.Name() != "_" && !token.IsExported(m.Name()) { -		p.pkg(m.Pkg(), false) -	} - -	// interface method; no need to encode receiver. -	p.paramList(sig.Params(), sig.Variadic()) -	p.paramList(sig.Results(), false) -} - -func (p *exporter) fieldName(f *types.Var) { -	name := f.Name() - -	if f.Anonymous() { -		// anonymous field - we distinguish between 3 cases: -		// 1) field name matches base type name and is exported -		// 2) field name matches base type name and is not exported -		// 3) field name doesn't match base type name (alias name) -		bname := basetypeName(f.Type()) -		if name == bname { -			if token.IsExported(name) { -				name = "" // 1) we don't need to know the field name or package -			} else { -				name = "?" // 2) use unexported name "?" to force package export -			} -		} else { -			// 3) indicate alias and export name as is -			// (this requires an extra "@" but this is a rare case) -			p.string("@") -		} -	} - -	p.string(name) -	if name != "" && !token.IsExported(name) { -		p.pkg(f.Pkg(), false) -	} -} - -func basetypeName(typ types.Type) string { -	switch typ := deref(typ).(type) { -	case *types.Basic: -		return typ.Name() -	case *types.Named: -		return typ.Obj().Name() -	default: -		return "" // unnamed type -	} -} - -func (p *exporter) paramList(params *types.Tuple, variadic bool) { -	// use negative length to indicate unnamed parameters -	// (look at the first parameter only since either all -	// names are present or all are absent) -	n := params.Len() -	if n > 0 && params.At(0).Name() == "" { -		n = -n -	} -	p.int(n) -	for i := 0; i < params.Len(); i++ { -		q := params.At(i) -		t := q.Type() -		if variadic && i == params.Len()-1 { -			t = &dddSlice{t.(*types.Slice).Elem()} -		} -		p.typ(t) -		if n > 0 { -			name := q.Name() -			p.string(name) -			if name != "_" { -				p.pkg(q.Pkg(), false) -			} -		} -		p.string("") // no compiler-specific info -	} -} - -func (p *exporter) value(x constant.Value) { -	if trace { -		p.tracef("= ") -	} - -	switch x.Kind() { -	case constant.Bool: -		tag := falseTag -		if constant.BoolVal(x) { -			tag = trueTag -		} -		p.tag(tag) - -	case constant.Int: -		if v, exact := constant.Int64Val(x); exact { -			// common case: x fits into an int64 - use compact encoding -			p.tag(int64Tag) -			p.int64(v) -			return -		} -		// uncommon case: large x - use float encoding -		// (powers of 2 will be encoded efficiently with exponent) -		p.tag(floatTag) -		p.float(constant.ToFloat(x)) - -	case constant.Float: -		p.tag(floatTag) -		p.float(x) - -	case constant.Complex: -		p.tag(complexTag) -		p.float(constant.Real(x)) -		p.float(constant.Imag(x)) - -	case constant.String: -		p.tag(stringTag) -		p.string(constant.StringVal(x)) - -	case constant.Unknown: -		// package contains type errors -		p.tag(unknownTag) - -	default: -		panic(internalErrorf("unexpected value %v (%T)", x, x)) -	} -} - -func (p *exporter) float(x constant.Value) { -	if x.Kind() != constant.Float { -		panic(internalErrorf("unexpected constant %v, want float", x)) -	} -	// extract sign (there is no -0) -	sign := constant.Sign(x) -	if sign == 0 { -		// x == 0 -		p.int(0) -		return -	} -	// x != 0 - -	var f big.Float -	if v, exact := constant.Float64Val(x); exact { -		// float64 -		f.SetFloat64(v) -	} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { -		// TODO(gri): add big.Rat accessor to constant.Value. -		r := valueToRat(num) -		f.SetRat(r.Quo(r, valueToRat(denom))) -	} else { -		// Value too large to represent as a fraction => inaccessible. -		// TODO(gri): add big.Float accessor to constant.Value. -		f.SetFloat64(math.MaxFloat64) // FIXME -	} - -	// extract exponent such that 0.5 <= m < 1.0 -	var m big.Float -	exp := f.MantExp(&m) - -	// extract mantissa as *big.Int -	// - set exponent large enough so mant satisfies mant.IsInt() -	// - get *big.Int from mant -	m.SetMantExp(&m, int(m.MinPrec())) -	mant, acc := m.Int(nil) -	if acc != big.Exact { -		panic(internalError("internal error")) -	} - -	p.int(sign) -	p.int(exp) -	p.string(string(mant.Bytes())) -} - -func valueToRat(x constant.Value) *big.Rat { -	// Convert little-endian to big-endian. -	// I can't believe this is necessary. -	bytes := constant.Bytes(x) -	for i := 0; i < len(bytes)/2; i++ { -		bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] -	} -	return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -func (p *exporter) bool(b bool) bool { -	if trace { -		p.tracef("[") -		defer p.tracef("= %v] ", b) -	} - -	x := 0 -	if b { -		x = 1 -	} -	p.int(x) -	return b -} - -// ---------------------------------------------------------------------------- -// Low-level encoders - -func (p *exporter) index(marker byte, index int) { -	if index < 0 { -		panic(internalError("invalid index < 0")) -	} -	if debugFormat { -		p.marker('t') -	} -	if trace { -		p.tracef("%c%d ", marker, index) -	} -	p.rawInt64(int64(index)) -} - -func (p *exporter) tag(tag int) { -	if tag >= 0 { -		panic(internalError("invalid tag >= 0")) -	} -	if debugFormat { -		p.marker('t') -	} -	if trace { -		p.tracef("%s ", tagString[-tag]) -	} -	p.rawInt64(int64(tag)) -} - -func (p *exporter) int(x int) { -	p.int64(int64(x)) -} - -func (p *exporter) int64(x int64) { -	if debugFormat { -		p.marker('i') -	} -	if trace { -		p.tracef("%d ", x) -	} -	p.rawInt64(x) -} - -func (p *exporter) string(s string) { -	if debugFormat { -		p.marker('s') -	} -	if trace { -		p.tracef("%q ", s) -	} -	// if we saw the string before, write its index (>= 0) -	// (the empty string is mapped to 0) -	if i, ok := p.strIndex[s]; ok { -		p.rawInt64(int64(i)) -		return -	} -	// otherwise, remember string and write its negative length and bytes -	p.strIndex[s] = len(p.strIndex) -	p.rawInt64(-int64(len(s))) -	for i := 0; i < len(s); i++ { -		p.rawByte(s[i]) -	} -} - -// marker emits a marker byte and position information which makes -// it easy for a reader to detect if it is "out of sync". Used for -// debugFormat format only. -func (p *exporter) marker(m byte) { -	p.rawByte(m) -	// Enable this for help tracking down the location -	// of an incorrect marker when running in debugFormat. -	if false && trace { -		p.tracef("#%d ", p.written) -	} -	p.rawInt64(int64(p.written)) -} - -// rawInt64 should only be used by low-level encoders. -func (p *exporter) rawInt64(x int64) { -	var tmp [binary.MaxVarintLen64]byte -	n := binary.PutVarint(tmp[:], x) -	for i := 0; i < n; i++ { -		p.rawByte(tmp[i]) -	} -} - -// rawStringln should only be used to emit the initial version string. -func (p *exporter) rawStringln(s string) { -	for i := 0; i < len(s); i++ { -		p.rawByte(s[i]) -	} -	p.rawByte('\n') -} - -// rawByte is the bottleneck interface to write to p.out. -// rawByte escapes b as follows (any encoding does that -// hides '$'): -// -//	'$'  => '|' 'S' -//	'|'  => '|' '|' -// -// Necessary so other tools can find the end of the -// export data by searching for "$$". -// rawByte should only be used by low-level encoders. -func (p *exporter) rawByte(b byte) { -	switch b { -	case '$': -		// write '$' as '|' 'S' -		b = 'S' -		fallthrough -	case '|': -		// write '|' as '|' '|' -		p.out.WriteByte('|') -		p.written++ -	} -	p.out.WriteByte(b) -	p.written++ -} - -// tracef is like fmt.Printf but it rewrites the format string -// to take care of indentation. -func (p *exporter) tracef(format string, args ...interface{}) { -	if strings.ContainsAny(format, "<>\n") { -		var buf bytes.Buffer -		for i := 0; i < len(format); i++ { -			// no need to deal with runes -			ch := format[i] -			switch ch { -			case '>': -				p.indent++ -				continue -			case '<': -				p.indent-- -				continue -			} -			buf.WriteByte(ch) -			if ch == '\n' { -				for j := p.indent; j > 0; j-- { -					buf.WriteString(".  ") -				} -			} -		} -		format = buf.String() -	} -	fmt.Printf(format, args...) -} - -// Debugging support. -// (tagString is only used when tracing is enabled) -var tagString = [...]string{ -	// Packages -	-packageTag: "package", - -	// Types -	-namedTag:     "named type", -	-arrayTag:     "array", -	-sliceTag:     "slice", -	-dddTag:       "ddd", -	-structTag:    "struct", -	-pointerTag:   "pointer", -	-signatureTag: "signature", -	-interfaceTag: "interface", -	-mapTag:       "map", -	-chanTag:      "chan", - -	// Values -	-falseTag:    "false", -	-trueTag:     "true", -	-int64Tag:    "int64", -	-floatTag:    "float", -	-fractionTag: "fraction", -	-complexTag:  "complex", -	-stringTag:   "string", -	-unknownTag:  "unknown", - -	// Type aliases -	-aliasTag: "alias", -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index b85de0147..d98b0db2a 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -2,340 +2,24 @@  // Use of this source code is governed by a BSD-style  // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go.  package gcimporter  import ( -	"encoding/binary"  	"fmt" -	"go/constant"  	"go/token"  	"go/types" -	"sort" -	"strconv" -	"strings"  	"sync" -	"unicode" -	"unicode/utf8"  ) -type importer struct { -	imports    map[string]*types.Package -	data       []byte -	importpath string -	buf        []byte // for reading strings -	version    int    // export format version - -	// object lists -	strList       []string           // in order of appearance -	pathList      []string           // in order of appearance -	pkgList       []*types.Package   // in order of appearance -	typList       []types.Type       // in order of appearance -	interfaceList []*types.Interface // for delayed completion only -	trackAllTypes bool - -	// position encoding -	posInfoFormat bool -	prevFile      string -	prevLine      int -	fake          fakeFileSet - -	// debugging support -	debugFormat bool -	read        int // bytes read -} - -// BImportData imports a package from the serialized package data -// and returns the number of bytes consumed and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { -	// catch panics and return them as errors -	const currentVersion = 6 -	version := -1 // unknown version -	defer func() { -		if e := recover(); e != nil { -			// Return a (possibly nil or incomplete) package unchanged (see #16088). -			if version > currentVersion { -				err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) -			} else { -				err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) -			} -		} -	}() - -	p := importer{ -		imports:    imports, -		data:       data, -		importpath: path, -		version:    version, -		strList:    []string{""}, // empty string is mapped to 0 -		pathList:   []string{""}, // empty string is mapped to 0 -		fake: fakeFileSet{ -			fset:  fset, -			files: make(map[string]*fileInfo), -		}, -	} -	defer p.fake.setLines() // set lines for files in fset - -	// read version info -	var versionstr string -	if b := p.rawByte(); b == 'c' || b == 'd' { -		// Go1.7 encoding; first byte encodes low-level -		// encoding format (compact vs debug). -		// For backward-compatibility only (avoid problems with -		// old installed packages). Newly compiled packages use -		// the extensible format string. -		// TODO(gri) Remove this support eventually; after Go1.8. -		if b == 'd' { -			p.debugFormat = true -		} -		p.trackAllTypes = p.rawByte() == 'a' -		p.posInfoFormat = p.int() != 0 -		versionstr = p.string() -		if versionstr == "v1" { -			version = 0 -		} -	} else { -		// Go1.8 extensible encoding -		// read version string and extract version number (ignore anything after the version number) -		versionstr = p.rawStringln(b) -		if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { -			if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { -				version = v -			} -		} -	} -	p.version = version - -	// read version specific flags - extend as necessary -	switch p.version { -	// case currentVersion: -	// 	... -	//	fallthrough -	case currentVersion, 5, 4, 3, 2, 1: -		p.debugFormat = p.rawStringln(p.rawByte()) == "debug" -		p.trackAllTypes = p.int() != 0 -		p.posInfoFormat = p.int() != 0 -	case 0: -		// Go1.7 encoding format - nothing to do here -	default: -		errorf("unknown bexport format version %d (%q)", p.version, versionstr) -	} - -	// --- generic export data --- - -	// populate typList with predeclared "known" types -	p.typList = append(p.typList, predeclared()...) - -	// read package data -	pkg = p.pkg() - -	// read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) -	objcount := 0 -	for { -		tag := p.tagOrIndex() -		if tag == endTag { -			break -		} -		p.obj(tag) -		objcount++ -	} - -	// self-verification -	if count := p.int(); count != objcount { -		errorf("got %d objects; want %d", objcount, count) -	} - -	// ignore compiler-specific import data - -	// complete interfaces -	// TODO(gri) re-investigate if we still need to do this in a delayed fashion -	for _, typ := range p.interfaceList { -		typ.Complete() -	} - -	// record all referenced packages as imports -	list := append(([]*types.Package)(nil), p.pkgList[1:]...) -	sort.Sort(byPath(list)) -	pkg.SetImports(list) - -	// package was imported completely and without errors -	pkg.MarkComplete() - -	return p.read, pkg, nil -} -  func errorf(format string, args ...interface{}) {  	panic(fmt.Sprintf(format, args...))  } -func (p *importer) pkg() *types.Package { -	// if the package was seen before, i is its index (>= 0) -	i := p.tagOrIndex() -	if i >= 0 { -		return p.pkgList[i] -	} - -	// otherwise, i is the package tag (< 0) -	if i != packageTag { -		errorf("unexpected package tag %d version %d", i, p.version) -	} - -	// read package data -	name := p.string() -	var path string -	if p.version >= 5 { -		path = p.path() -	} else { -		path = p.string() -	} -	if p.version >= 6 { -		p.int() // package height; unused by go/types -	} - -	// we should never see an empty package name -	if name == "" { -		errorf("empty package name in import") -	} - -	// an empty path denotes the package we are currently importing; -	// it must be the first package we see -	if (path == "") != (len(p.pkgList) == 0) { -		errorf("package path %q for pkg index %d", path, len(p.pkgList)) -	} - -	// if the package was imported before, use that one; otherwise create a new one -	if path == "" { -		path = p.importpath -	} -	pkg := p.imports[path] -	if pkg == nil { -		pkg = types.NewPackage(path, name) -		p.imports[path] = pkg -	} else if pkg.Name() != name { -		errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) -	} -	p.pkgList = append(p.pkgList, pkg) - -	return pkg -} - -// objTag returns the tag value for each object kind. -func objTag(obj types.Object) int { -	switch obj.(type) { -	case *types.Const: -		return constTag -	case *types.TypeName: -		return typeTag -	case *types.Var: -		return varTag -	case *types.Func: -		return funcTag -	default: -		errorf("unexpected object: %v (%T)", obj, obj) // panics -		panic("unreachable") -	} -} - -func sameObj(a, b types.Object) bool { -	// Because unnamed types are not canonicalized, we cannot simply compare types for -	// (pointer) identity. -	// Ideally we'd check equality of constant values as well, but this is good enough. -	return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) -} - -func (p *importer) declare(obj types.Object) { -	pkg := obj.Pkg() -	if alt := pkg.Scope().Insert(obj); alt != nil { -		// This can only trigger if we import a (non-type) object a second time. -		// Excluding type aliases, this cannot happen because 1) we only import a package -		// once; and b) we ignore compiler-specific export data which may contain -		// functions whose inlined function bodies refer to other functions that -		// were already imported. -		// However, type aliases require reexporting the original type, so we need -		// to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, -		// method importer.obj, switch case importing functions). -		// TODO(gri) review/update this comment once the gc compiler handles type aliases. -		if !sameObj(obj, alt) { -			errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) -		} -	} -} - -func (p *importer) obj(tag int) { -	switch tag { -	case constTag: -		pos := p.pos() -		pkg, name := p.qualifiedName() -		typ := p.typ(nil, nil) -		val := p.value() -		p.declare(types.NewConst(pos, pkg, name, typ, val)) - -	case aliasTag: -		// TODO(gri) verify type alias hookup is correct -		pos := p.pos() -		pkg, name := p.qualifiedName() -		typ := p.typ(nil, nil) -		p.declare(types.NewTypeName(pos, pkg, name, typ)) - -	case typeTag: -		p.typ(nil, nil) - -	case varTag: -		pos := p.pos() -		pkg, name := p.qualifiedName() -		typ := p.typ(nil, nil) -		p.declare(types.NewVar(pos, pkg, name, typ)) - -	case funcTag: -		pos := p.pos() -		pkg, name := p.qualifiedName() -		params, isddd := p.paramList() -		result, _ := p.paramList() -		sig := types.NewSignature(nil, params, result, isddd) -		p.declare(types.NewFunc(pos, pkg, name, sig)) - -	default: -		errorf("unexpected object tag %d", tag) -	} -} -  const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go -func (p *importer) pos() token.Pos { -	if !p.posInfoFormat { -		return token.NoPos -	} - -	file := p.prevFile -	line := p.prevLine -	delta := p.int() -	line += delta -	if p.version >= 5 { -		if delta == deltaNewFile { -			if n := p.int(); n >= 0 { -				// file changed -				file = p.path() -				line = n -			} -		} -	} else { -		if delta == 0 { -			if n := p.int(); n >= 0 { -				// file changed -				file = p.prevFile[:n] + p.string() -				line = p.int() -			} -		} -	} -	p.prevFile = file -	p.prevLine = line - -	return p.fake.pos(file, line, 0) -} -  // Synthesize a token.Pos  type fakeFileSet struct {  	fset  *token.FileSet @@ -389,205 +73,6 @@ var (  	fakeLinesOnce sync.Once  ) -func (p *importer) qualifiedName() (pkg *types.Package, name string) { -	name = p.string() -	pkg = p.pkg() -	return -} - -func (p *importer) record(t types.Type) { -	p.typList = append(p.typList, t) -} - -// A dddSlice is a types.Type representing ...T parameters. -// It only appears for parameter types and does not escape -// the importer. -type dddSlice struct { -	elem types.Type -} - -func (t *dddSlice) Underlying() types.Type { return t } -func (t *dddSlice) String() string         { return "..." + t.elem.String() } - -// parent is the package which declared the type; parent == nil means -// the package currently imported. The parent package is needed for -// exported struct fields and interface methods which don't contain -// explicit package information in the export data. -// -// A non-nil tname is used as the "owner" of the result type; i.e., -// the result type is the underlying type of tname. tname is used -// to give interface methods a named receiver type where possible. -func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { -	// if the type was seen before, i is its index (>= 0) -	i := p.tagOrIndex() -	if i >= 0 { -		return p.typList[i] -	} - -	// otherwise, i is the type tag (< 0) -	switch i { -	case namedTag: -		// read type object -		pos := p.pos() -		parent, name := p.qualifiedName() -		scope := parent.Scope() -		obj := scope.Lookup(name) - -		// if the object doesn't exist yet, create and insert it -		if obj == nil { -			obj = types.NewTypeName(pos, parent, name, nil) -			scope.Insert(obj) -		} - -		if _, ok := obj.(*types.TypeName); !ok { -			errorf("pkg = %s, name = %s => %s", parent, name, obj) -		} - -		// associate new named type with obj if it doesn't exist yet -		t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) - -		// but record the existing type, if any -		tname := obj.Type().(*types.Named) // tname is either t0 or the existing type -		p.record(tname) - -		// read underlying type -		t0.SetUnderlying(p.typ(parent, t0)) - -		// interfaces don't have associated methods -		if types.IsInterface(t0) { -			return tname -		} - -		// read associated methods -		for i := p.int(); i > 0; i-- { -			// TODO(gri) replace this with something closer to fieldName -			pos := p.pos() -			name := p.string() -			if !exported(name) { -				p.pkg() -			} - -			recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? -			params, isddd := p.paramList() -			result, _ := p.paramList() -			p.int() // go:nointerface pragma - discarded - -			sig := types.NewSignature(recv.At(0), params, result, isddd) -			t0.AddMethod(types.NewFunc(pos, parent, name, sig)) -		} - -		return tname - -	case arrayTag: -		t := new(types.Array) -		if p.trackAllTypes { -			p.record(t) -		} - -		n := p.int64() -		*t = *types.NewArray(p.typ(parent, nil), n) -		return t - -	case sliceTag: -		t := new(types.Slice) -		if p.trackAllTypes { -			p.record(t) -		} - -		*t = *types.NewSlice(p.typ(parent, nil)) -		return t - -	case dddTag: -		t := new(dddSlice) -		if p.trackAllTypes { -			p.record(t) -		} - -		t.elem = p.typ(parent, nil) -		return t - -	case structTag: -		t := new(types.Struct) -		if p.trackAllTypes { -			p.record(t) -		} - -		*t = *types.NewStruct(p.fieldList(parent)) -		return t - -	case pointerTag: -		t := new(types.Pointer) -		if p.trackAllTypes { -			p.record(t) -		} - -		*t = *types.NewPointer(p.typ(parent, nil)) -		return t - -	case signatureTag: -		t := new(types.Signature) -		if p.trackAllTypes { -			p.record(t) -		} - -		params, isddd := p.paramList() -		result, _ := p.paramList() -		*t = *types.NewSignature(nil, params, result, isddd) -		return t - -	case interfaceTag: -		// Create a dummy entry in the type list. This is safe because we -		// cannot expect the interface type to appear in a cycle, as any -		// such cycle must contain a named type which would have been -		// first defined earlier. -		// TODO(gri) Is this still true now that we have type aliases? -		// See issue #23225. -		n := len(p.typList) -		if p.trackAllTypes { -			p.record(nil) -		} - -		var embeddeds []types.Type -		for n := p.int(); n > 0; n-- { -			p.pos() -			embeddeds = append(embeddeds, p.typ(parent, nil)) -		} - -		t := newInterface(p.methodList(parent, tname), embeddeds) -		p.interfaceList = append(p.interfaceList, t) -		if p.trackAllTypes { -			p.typList[n] = t -		} -		return t - -	case mapTag: -		t := new(types.Map) -		if p.trackAllTypes { -			p.record(t) -		} - -		key := p.typ(parent, nil) -		val := p.typ(parent, nil) -		*t = *types.NewMap(key, val) -		return t - -	case chanTag: -		t := new(types.Chan) -		if p.trackAllTypes { -			p.record(t) -		} - -		dir := chanDir(p.int()) -		val := p.typ(parent, nil) -		*t = *types.NewChan(dir, val) -		return t - -	default: -		errorf("unexpected type tag %d", i) // panics -		panic("unreachable") -	} -} -  func chanDir(d int) types.ChanDir {  	// tag values must match the constants in cmd/compile/internal/gc/go.go  	switch d { @@ -603,394 +88,6 @@ func chanDir(d int) types.ChanDir {  	}  } -func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { -	if n := p.int(); n > 0 { -		fields = make([]*types.Var, n) -		tags = make([]string, n) -		for i := range fields { -			fields[i], tags[i] = p.field(parent) -		} -	} -	return -} - -func (p *importer) field(parent *types.Package) (*types.Var, string) { -	pos := p.pos() -	pkg, name, alias := p.fieldName(parent) -	typ := p.typ(parent, nil) -	tag := p.string() - -	anonymous := false -	if name == "" { -		// anonymous field - typ must be T or *T and T must be a type name -		switch typ := deref(typ).(type) { -		case *types.Basic: // basic types are named types -			pkg = nil // // objects defined in Universe scope have no package -			name = typ.Name() -		case *types.Named: -			name = typ.Obj().Name() -		default: -			errorf("named base type expected") -		} -		anonymous = true -	} else if alias { -		// anonymous field: we have an explicit name because it's an alias -		anonymous = true -	} - -	return types.NewField(pos, pkg, name, typ, anonymous), tag -} - -func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { -	if n := p.int(); n > 0 { -		methods = make([]*types.Func, n) -		for i := range methods { -			methods[i] = p.method(parent, baseType) -		} -	} -	return -} - -func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { -	pos := p.pos() -	pkg, name, _ := p.fieldName(parent) -	// If we don't have a baseType, use a nil receiver. -	// A receiver using the actual interface type (which -	// we don't know yet) will be filled in when we call -	// types.Interface.Complete. -	var recv *types.Var -	if baseType != nil { -		recv = types.NewVar(token.NoPos, parent, "", baseType) -	} -	params, isddd := p.paramList() -	result, _ := p.paramList() -	sig := types.NewSignature(recv, params, result, isddd) -	return types.NewFunc(pos, pkg, name, sig) -} - -func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { -	name = p.string() -	pkg = parent -	if pkg == nil { -		// use the imported package instead -		pkg = p.pkgList[0] -	} -	if p.version == 0 && name == "_" { -		// version 0 didn't export a package for _ fields -		return -	} -	switch name { -	case "": -		// 1) field name matches base type name and is exported: nothing to do -	case "?": -		// 2) field name matches base type name and is not exported: need package -		name = "" -		pkg = p.pkg() -	case "@": -		// 3) field name doesn't match type name (alias) -		name = p.string() -		alias = true -		fallthrough -	default: -		if !exported(name) { -			pkg = p.pkg() -		} -	} -	return -} - -func (p *importer) paramList() (*types.Tuple, bool) { -	n := p.int() -	if n == 0 { -		return nil, false -	} -	// negative length indicates unnamed parameters -	named := true -	if n < 0 { -		n = -n -		named = false -	} -	// n > 0 -	params := make([]*types.Var, n) -	isddd := false -	for i := range params { -		params[i], isddd = p.param(named) -	} -	return types.NewTuple(params...), isddd -} - -func (p *importer) param(named bool) (*types.Var, bool) { -	t := p.typ(nil, nil) -	td, isddd := t.(*dddSlice) -	if isddd { -		t = types.NewSlice(td.elem) -	} - -	var pkg *types.Package -	var name string -	if named { -		name = p.string() -		if name == "" { -			errorf("expected named parameter") -		} -		if name != "_" { -			pkg = p.pkg() -		} -		if i := strings.Index(name, "·"); i > 0 { -			name = name[:i] // cut off gc-specific parameter numbering -		} -	} - -	// read and discard compiler-specific info -	p.string() - -	return types.NewVar(token.NoPos, pkg, name, t), isddd -} - -func exported(name string) bool { -	ch, _ := utf8.DecodeRuneInString(name) -	return unicode.IsUpper(ch) -} - -func (p *importer) value() constant.Value { -	switch tag := p.tagOrIndex(); tag { -	case falseTag: -		return constant.MakeBool(false) -	case trueTag: -		return constant.MakeBool(true) -	case int64Tag: -		return constant.MakeInt64(p.int64()) -	case floatTag: -		return p.float() -	case complexTag: -		re := p.float() -		im := p.float() -		return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) -	case stringTag: -		return constant.MakeString(p.string()) -	case unknownTag: -		return constant.MakeUnknown() -	default: -		errorf("unexpected value tag %d", tag) // panics -		panic("unreachable") -	} -} - -func (p *importer) float() constant.Value { -	sign := p.int() -	if sign == 0 { -		return constant.MakeInt64(0) -	} - -	exp := p.int() -	mant := []byte(p.string()) // big endian - -	// remove leading 0's if any -	for len(mant) > 0 && mant[0] == 0 { -		mant = mant[1:] -	} - -	// convert to little endian -	// TODO(gri) go/constant should have a more direct conversion function -	//           (e.g., once it supports a big.Float based implementation) -	for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { -		mant[i], mant[j] = mant[j], mant[i] -	} - -	// adjust exponent (constant.MakeFromBytes creates an integer value, -	// but mant represents the mantissa bits such that 0.5 <= mant < 1.0) -	exp -= len(mant) << 3 -	if len(mant) > 0 { -		for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { -			exp++ -		} -	} - -	x := constant.MakeFromBytes(mant) -	switch { -	case exp < 0: -		d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) -		x = constant.BinaryOp(x, token.QUO, d) -	case exp > 0: -		x = constant.Shift(x, token.SHL, uint(exp)) -	} - -	if sign < 0 { -		x = constant.UnaryOp(token.SUB, x, 0) -	} -	return x -} - -// ---------------------------------------------------------------------------- -// Low-level decoders - -func (p *importer) tagOrIndex() int { -	if p.debugFormat { -		p.marker('t') -	} - -	return int(p.rawInt64()) -} - -func (p *importer) int() int { -	x := p.int64() -	if int64(int(x)) != x { -		errorf("exported integer too large") -	} -	return int(x) -} - -func (p *importer) int64() int64 { -	if p.debugFormat { -		p.marker('i') -	} - -	return p.rawInt64() -} - -func (p *importer) path() string { -	if p.debugFormat { -		p.marker('p') -	} -	// if the path was seen before, i is its index (>= 0) -	// (the empty string is at index 0) -	i := p.rawInt64() -	if i >= 0 { -		return p.pathList[i] -	} -	// otherwise, i is the negative path length (< 0) -	a := make([]string, -i) -	for n := range a { -		a[n] = p.string() -	} -	s := strings.Join(a, "/") -	p.pathList = append(p.pathList, s) -	return s -} - -func (p *importer) string() string { -	if p.debugFormat { -		p.marker('s') -	} -	// if the string was seen before, i is its index (>= 0) -	// (the empty string is at index 0) -	i := p.rawInt64() -	if i >= 0 { -		return p.strList[i] -	} -	// otherwise, i is the negative string length (< 0) -	if n := int(-i); n <= cap(p.buf) { -		p.buf = p.buf[:n] -	} else { -		p.buf = make([]byte, n) -	} -	for i := range p.buf { -		p.buf[i] = p.rawByte() -	} -	s := string(p.buf) -	p.strList = append(p.strList, s) -	return s -} - -func (p *importer) marker(want byte) { -	if got := p.rawByte(); got != want { -		errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) -	} - -	pos := p.read -	if n := int(p.rawInt64()); n != pos { -		errorf("incorrect position: got %d; want %d", n, pos) -	} -} - -// rawInt64 should only be used by low-level decoders. -func (p *importer) rawInt64() int64 { -	i, err := binary.ReadVarint(p) -	if err != nil { -		errorf("read error: %v", err) -	} -	return i -} - -// rawStringln should only be used to read the initial version string. -func (p *importer) rawStringln(b byte) string { -	p.buf = p.buf[:0] -	for b != '\n' { -		p.buf = append(p.buf, b) -		b = p.rawByte() -	} -	return string(p.buf) -} - -// needed for binary.ReadVarint in rawInt64 -func (p *importer) ReadByte() (byte, error) { -	return p.rawByte(), nil -} - -// byte is the bottleneck interface for reading p.data. -// It unescapes '|' 'S' to '$' and '|' '|' to '|'. -// rawByte should only be used by low-level decoders. -func (p *importer) rawByte() byte { -	b := p.data[0] -	r := 1 -	if b == '|' { -		b = p.data[1] -		r = 2 -		switch b { -		case 'S': -			b = '$' -		case '|': -			// nothing to do -		default: -			errorf("unexpected escape sequence in export data") -		} -	} -	p.data = p.data[r:] -	p.read += r -	return b - -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( -	// Objects -	packageTag = -(iota + 1) -	constTag -	typeTag -	varTag -	funcTag -	endTag - -	// Types -	namedTag -	arrayTag -	sliceTag -	dddTag -	structTag -	pointerTag -	signatureTag -	interfaceTag -	mapTag -	chanTag - -	// Values -	falseTag -	trueTag -	int64Tag -	floatTag -	fractionTag // not used by gc -	complexTag -	stringTag -	nilTag     // only used by gc (appears in exported inlined function bodies) -	unknownTag // not used by gc (only appears in packages with errors) - -	// Type aliases -	aliasTag -) -  var predeclOnce sync.Once  var predecl []types.Type // initialized lazily diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 0372fb3a6..b1223713b 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -7,6 +7,18 @@  // Package gcimporter provides various functions for reading  // gc-generated object files that can be used to implement the  // Importer interface defined by the Go 1.5 standard library package. +// +// The encoding is deterministic: if the encoder is applied twice to +// the same types.Package data structure, both encodings are equal. +// This property may be important to avoid spurious changes in +// applications such as build systems. +// +// However, the encoder is not necessarily idempotent. Importing an +// exported package may yield a types.Package that, while it +// represents the same set of Go types as the original, may differ in +// the details of its internal representation. Because of these +// differences, re-encoding the imported package may yield a +// different, but equally valid, encoding of the package.  package gcimporter // import "golang.org/x/tools/internal/gcimporter"  import ( @@ -218,20 +230,17 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func  		// Or, define a new standard go/types/gcexportdata package.  		fset := token.NewFileSet() -		// The indexed export format starts with an 'i'; the older -		// binary export format starts with a 'c', 'd', or 'v' -		// (from "version"). Select appropriate importer. +		// Select appropriate importer.  		if len(data) > 0 {  			switch data[0] { -			case 'i': -				_, pkg, err := IImportData(fset, packages, data[1:], id) -				return pkg, err +			case 'v', 'c', 'd': // binary, till go1.10 +				return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) -			case 'v', 'c', 'd': -				_, pkg, err := BImportData(fset, packages, data, id) +			case 'i': // indexed, till go1.19 +				_, pkg, err := IImportData(fset, packages, data[1:], id)  				return pkg, err -			case 'u': +			case 'u': // unified, from go1.20  				_, pkg, err := UImportData(fset, packages, data[1:size], id)  				return pkg, err diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index ba53cdcdd..6103dd710 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -22,17 +22,23 @@ import (  	"strconv"  	"strings" +	"golang.org/x/tools/go/types/objectpath"  	"golang.org/x/tools/internal/tokeninternal"  	"golang.org/x/tools/internal/typeparams"  )  // IExportShallow encodes "shallow" export data for the specified package.  // -// No promises are made about the encoding other than that it can be -// decoded by the same version of IIExportShallow. If you plan to save -// export data in the file system, be sure to include a cryptographic -// digest of the executable in the key to avoid version skew. -func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { +// No promises are made about the encoding other than that it can be decoded by +// the same version of IIExportShallow. If you plan to save export data in the +// file system, be sure to include a cryptographic digest of the executable in +// the key to avoid version skew. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during export. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) {  	// In principle this operation can only fail if out.Write fails,  	// but that's impossible for bytes.Buffer---and as a matter of  	// fact iexportCommon doesn't even check for I/O errors. @@ -44,22 +50,30 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) {  	return out.Bytes(), err  } -// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow -// in the same executable. This function cannot import data from +// IImportShallow decodes "shallow" types.Package data encoded by +// IExportShallow in the same executable. This function cannot import data from  // cmd/compile or gcexportdata.Write. -func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) { +// +// The importer calls getPackages to obtain package symbols for all +// packages mentioned in the export data, including the one being +// decoded. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during import. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) {  	const bundle = false -	pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert) +	const shallow = true +	pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf)  	if err != nil {  		return nil, err  	}  	return pkgs[0], nil  } -// InsertType is the type of a function that creates a types.TypeName -// object for a named type and inserts it into the scope of the -// specified Package. -type InsertType = func(pkg *types.Package, name string) +// ReportFunc is the type of a function used to report formatted bugs. +type ReportFunc = func(string, ...interface{})  // Current bundled export format version. Increase with each format change.  // 0: initial implementation @@ -313,8 +327,9 @@ type iexporter struct {  	out     *bytes.Buffer  	version int -	shallow  bool           // don't put types from other packages in the index -	localpkg *types.Package // (nil in bundle mode) +	shallow    bool                // don't put types from other packages in the index +	objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated +	localpkg   *types.Package      // (nil in bundle mode)  	// allPkgs tracks all packages that have been referenced by  	// the export data, so we can ensure to include them in the @@ -354,6 +369,17 @@ func (p *iexporter) trace(format string, args ...interface{}) {  	fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)  } +// objectpathEncoder returns the lazily allocated objectpath.Encoder to use +// when encoding objects in other packages during shallow export. +// +// Using a shared Encoder amortizes some of cost of objectpath search. +func (p *iexporter) objectpathEncoder() *objectpath.Encoder { +	if p.objEncoder == nil { +		p.objEncoder = new(objectpath.Encoder) +	} +	return p.objEncoder +} +  // stringOff returns the offset of s within the string section.  // If not already present, it's added to the end.  func (p *iexporter) stringOff(s string) uint64 { @@ -413,7 +439,6 @@ type exportWriter struct {  	p *iexporter  	data       intWriter -	currPkg    *types.Package  	prevFile   string  	prevLine   int64  	prevColumn int64 @@ -436,7 +461,6 @@ func (p *iexporter) doDecl(obj types.Object) {  		}()  	}  	w := p.newWriter() -	w.setPkg(obj.Pkg(), false)  	switch obj := obj.(type) {  	case *types.Var: @@ -673,6 +697,9 @@ func (w *exportWriter) qualifiedType(obj *types.TypeName) {  	w.pkg(obj.Pkg())  } +// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass +// it in explicitly into signatures and structs that may use it for +// constructing fields.  func (w *exportWriter) typ(t types.Type, pkg *types.Package) {  	w.data.uint64(w.p.typOff(t, pkg))  } @@ -764,30 +791,53 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {  	case *types.Signature:  		w.startType(signatureType) -		w.setPkg(pkg, true) +		w.pkg(pkg)  		w.signature(t)  	case *types.Struct:  		w.startType(structType)  		n := t.NumFields() +		// Even for struct{} we must emit some qualifying package, because that's +		// what the compiler does, and thus that's what the importer expects. +		fieldPkg := pkg  		if n > 0 { -			w.setPkg(t.Field(0).Pkg(), true) // qualifying package for field objects -		} else { -			w.setPkg(pkg, true) +			fieldPkg = t.Field(0).Pkg() +		} +		if fieldPkg == nil { +			// TODO(rfindley): improve this very hacky logic. +			// +			// The importer expects a package to be set for all struct types, even +			// those with no fields. A better encoding might be to set NumFields +			// before pkg. setPkg panics with a nil package, which may be possible +			// to reach with invalid packages (and perhaps valid packages, too?), so +			// (arbitrarily) set the localpkg if available. +			// +			// Alternatively, we may be able to simply guarantee that pkg != nil, by +			// reconsidering the encoding of constant values. +			if w.p.shallow { +				fieldPkg = w.p.localpkg +			} else { +				panic(internalErrorf("no package to set for empty struct")) +			}  		} +		w.pkg(fieldPkg)  		w.uint64(uint64(n)) +  		for i := 0; i < n; i++ {  			f := t.Field(i) +			if w.p.shallow { +				w.objectPath(f) +			}  			w.pos(f.Pos())  			w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg -			w.typ(f.Type(), pkg) +			w.typ(f.Type(), fieldPkg)  			w.bool(f.Anonymous())  			w.string(t.Tag(i)) // note (or tag)  		}  	case *types.Interface:  		w.startType(interfaceType) -		w.setPkg(pkg, true) +		w.pkg(pkg)  		n := t.NumEmbeddeds()  		w.uint64(uint64(n)) @@ -802,10 +852,16 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {  			w.typ(ft, tPkg)  		} +		// See comment for struct fields. In shallow mode we change the encoding +		// for interface methods that are promoted from other packages. +  		n = t.NumExplicitMethods()  		w.uint64(uint64(n))  		for i := 0; i < n; i++ {  			m := t.ExplicitMethod(i) +			if w.p.shallow { +				w.objectPath(m) +			}  			w.pos(m.Pos())  			w.string(m.Name())  			sig, _ := m.Type().(*types.Signature) @@ -827,12 +883,61 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {  	}  } -func (w *exportWriter) setPkg(pkg *types.Package, write bool) { -	if write { -		w.pkg(pkg) +// objectPath writes the package and objectPath to use to look up obj in a +// different package, when encoding in "shallow" mode. +// +// When doing a shallow import, the importer creates only the local package, +// and requests package symbols for dependencies from the client. +// However, certain types defined in the local package may hold objects defined +// (perhaps deeply) within another package. +// +// For example, consider the following: +// +//	package a +//	func F() chan * map[string] struct { X int } +// +//	package b +//	import "a" +//	var B = a.F() +// +// In this example, the type of b.B holds fields defined in package a. +// In order to have the correct canonical objects for the field defined in the +// type of B, they are encoded as objectPaths and later looked up in the +// importer. The same problem applies to interface methods. +func (w *exportWriter) objectPath(obj types.Object) { +	if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg { +		// obj.Pkg() may be nil for the builtin error.Error. +		// In this case, or if obj is declared in the local package, no need to +		// encode. +		w.string("") +		return  	} - -	w.currPkg = pkg +	objectPath, err := w.p.objectpathEncoder().For(obj) +	if err != nil { +		// Fall back to the empty string, which will cause the importer to create a +		// new object, which matches earlier behavior. Creating a new object is +		// sufficient for many purposes (such as type checking), but causes certain +		// references algorithms to fail (golang/go#60819). However, we didn't +		// notice this problem during months of gopls@v0.12.0 testing. +		// +		// TODO(golang/go#61674): this workaround is insufficient, as in the case +		// where the field forwarded from an instantiated type that may not appear +		// in the export data of the original package: +		// +		//  // package a +		//  type A[P any] struct{ F P } +		// +		//  // package b +		//  type B a.A[int] +		// +		// We need to update references algorithms not to depend on this +		// de-duplication, at which point we may want to simply remove the +		// workaround here. +		w.string("") +		return +	} +	w.string(string(objectPath)) +	w.pkg(obj.Pkg())  }  func (w *exportWriter) signature(sig *types.Signature) { @@ -913,6 +1018,17 @@ func (w *exportWriter) value(typ types.Type, v constant.Value) {  		w.int64(int64(v.Kind()))  	} +	if v.Kind() == constant.Unknown { +		// golang/go#60605: treat unknown constant values as if they have invalid type +		// +		// This loses some fidelity over the package type-checked from source, but that +		// is acceptable. +		// +		// TODO(rfindley): we should switch on the recorded constant kind rather +		// than the constant type +		return +	} +  	switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {  	case types.IsBoolean:  		w.bool(constant.BoolVal(v)) @@ -969,6 +1085,16 @@ func constantToFloat(x constant.Value) *big.Float {  	return &f  } +func valueToRat(x constant.Value) *big.Rat { +	// Convert little-endian to big-endian. +	// I can't believe this is necessary. +	bytes := constant.Bytes(x) +	for i := 0; i < len(bytes)/2; i++ { +		bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] +	} +	return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} +  // mpint exports a multi-precision integer.  //  // For unsigned types, small values are written out as a single @@ -1178,3 +1304,19 @@ func (q *objQueue) popHead() types.Object {  	q.head++  	return obj  } + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +// TODO(adonovan): make this call panic, so that it's symmetric with errorf. +// Otherwise it's easy to forget to do anything with the error. +// +// TODO(adonovan): also, consider switching the names "errorf" and +// "internalErrorf" as the former is used for bugs, whose cause is +// internal inconsistency, whereas the latter is used for ordinary +// situations like bad input, whose cause is external. +func internalErrorf(format string, args ...interface{}) error { +	return internalError(fmt.Sprintf(format, args...)) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 448f903e8..8e64cf644 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -21,6 +21,7 @@ import (  	"sort"  	"strings" +	"golang.org/x/tools/go/types/objectpath"  	"golang.org/x/tools/internal/typeparams"  ) @@ -85,7 +86,7 @@ const (  // If the export data version is not recognized or the format is otherwise  // compromised, an error is returned.  func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { -	pkgs, err := iimportCommon(fset, imports, data, false, path, nil) +	pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil)  	if err != nil {  		return 0, nil, err  	} @@ -94,10 +95,49 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []  // IImportBundle imports a set of packages from the serialized package bundle.  func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { -	return iimportCommon(fset, imports, data, true, "", nil) +	return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil)  } -func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { +// A GetPackagesFunc function obtains the non-nil symbols for a set of +// packages, creating and recursively importing them as needed. An +// implementation should store each package symbol is in the Pkg +// field of the items array. +// +// Any error causes importing to fail. This can be used to quickly read +// the import manifest of an export data file without fully decoding it. +type GetPackagesFunc = func(items []GetPackagesItem) error + +// A GetPackagesItem is a request from the importer for the package +// symbol of the specified name and path. +type GetPackagesItem struct { +	Name, Path string +	Pkg        *types.Package // to be filled in by GetPackagesFunc call + +	// private importer state +	pathOffset uint64 +	nameIndex  map[string]uint64 +} + +// GetPackagesFromMap returns a GetPackagesFunc that retrieves +// packages from the given map of package path to package. +// +// The returned function may mutate m: each requested package that is not +// found is created with types.NewPackage and inserted into m. +func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc { +	return func(items []GetPackagesItem) error { +		for i, item := range items { +			pkg, ok := m[item.Path] +			if !ok { +				pkg = types.NewPackage(item.Path, item.Name) +				m[item.Path] = pkg +			} +			items[i].Pkg = pkg +		} +		return nil +	} +} + +func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) {  	const currentVersion = iexportVersionCurrent  	version := int64(-1)  	if !debug { @@ -108,7 +148,7 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data  				} else if version > currentVersion {  					err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)  				} else { -					err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) +					err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e)  				}  			}  		}() @@ -117,11 +157,8 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data  	r := &intReader{bytes.NewReader(data), path}  	if bundle { -		bundleVersion := r.uint64() -		switch bundleVersion { -		case bundleVersion: -		default: -			errorf("unknown bundle format version %d", bundleVersion) +		if v := r.uint64(); v != bundleVersion { +			errorf("unknown bundle format version %d", v)  		}  	} @@ -139,7 +176,7 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data  	sLen := int64(r.uint64())  	var fLen int64  	var fileOffset []uint64 -	if insert != nil { +	if shallow {  		// Shallow mode uses a different position encoding.  		fLen = int64(r.uint64())  		fileOffset = make([]uint64, r.uint64()) @@ -158,7 +195,8 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data  	p := iimporter{  		version: int(version),  		ipath:   path, -		insert:  insert, +		shallow: shallow, +		reportf: reportf,  		stringData:  stringData,  		stringCache: make(map[uint64]string), @@ -185,8 +223,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data  		p.typCache[uint64(i)] = pt  	} -	pkgList := make([]*types.Package, r.uint64()) -	for i := range pkgList { +	// Gather the relevant packages from the manifest. +	items := make([]GetPackagesItem, r.uint64()) +	for i := range items {  		pkgPathOff := r.uint64()  		pkgPath := p.stringAt(pkgPathOff)  		pkgName := p.stringAt(r.uint64()) @@ -195,30 +234,42 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data  		if pkgPath == "" {  			pkgPath = path  		} -		pkg := imports[pkgPath] -		if pkg == nil { -			pkg = types.NewPackage(pkgPath, pkgName) -			imports[pkgPath] = pkg -		} else if pkg.Name() != pkgName { -			errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) -		} -		if i == 0 && !bundle { -			p.localpkg = pkg -		} - -		p.pkgCache[pkgPathOff] = pkg +		items[i].Name = pkgName +		items[i].Path = pkgPath +		items[i].pathOffset = pkgPathOff  		// Read index for package.  		nameIndex := make(map[string]uint64)  		nSyms := r.uint64() -		// In shallow mode we don't expect an index for other packages. -		assert(nSyms == 0 || p.localpkg == pkg || p.insert == nil) +		// In shallow mode, only the current package (i=0) has an index. +		assert(!(shallow && i > 0 && nSyms != 0))  		for ; nSyms > 0; nSyms-- {  			name := p.stringAt(r.uint64())  			nameIndex[name] = r.uint64()  		} -		p.pkgIndex[pkg] = nameIndex +		items[i].nameIndex = nameIndex +	} + +	// Request packages all at once from the client, +	// enabling a parallel implementation. +	if err := getPackages(items); err != nil { +		return nil, err // don't wrap this error +	} + +	// Check the results and complete the index. +	pkgList := make([]*types.Package, len(items)) +	for i, item := range items { +		pkg := item.Pkg +		if pkg == nil { +			errorf("internal error: getPackages returned nil package for %q", item.Path) +		} else if pkg.Path() != item.Path { +			errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path) +		} else if pkg.Name() != item.Name { +			errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name) +		} +		p.pkgCache[item.pathOffset] = pkg +		p.pkgIndex[pkg] = item.nameIndex  		pkgList[i] = pkg  	} @@ -277,6 +328,13 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data  		typ.Complete()  	} +	// Workaround for golang/go#61561. See the doc for instanceList for details. +	for _, typ := range p.instanceList { +		if iface, _ := typ.Underlying().(*types.Interface); iface != nil { +			iface.Complete() +		} +	} +  	return pkgs, nil  } @@ -289,8 +347,8 @@ type iimporter struct {  	version int  	ipath   string -	localpkg *types.Package -	insert   func(pkg *types.Package, name string) // "shallow" mode only +	shallow bool +	reportf ReportFunc // if non-nil, used to report bugs  	stringData  []byte  	stringCache map[uint64]string @@ -307,6 +365,12 @@ type iimporter struct {  	fake          fakeFileSet  	interfaceList []*types.Interface +	// Workaround for the go/types bug golang/go#61561: instances produced during +	// instantiation may contain incomplete interfaces. Here we only complete the +	// underlying type of the instance, which is the most common case but doesn't +	// handle parameterized interface literals defined deeper in the type. +	instanceList []types.Type // instances for later completion (see golang/go#61561) +  	// Arguments for calls to SetConstraint that are deferred due to recursive types  	later []setConstraintArgs @@ -338,13 +402,9 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) {  	off, ok := p.pkgIndex[pkg][name]  	if !ok { -		// In "shallow" mode, call back to the application to -		// find the object and insert it into the package scope. -		if p.insert != nil { -			assert(pkg != p.localpkg) -			p.insert(pkg, name) // "can't fail" -			return -		} +		// In deep mode, the index should be complete. In shallow +		// mode, we should have already recursively loaded necessary +		// dependencies so the above Lookup succeeds.  		errorf("%v.%v not in index", pkg, name)  	} @@ -711,7 +771,8 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) {  }  func (r *importReader) pos() token.Pos { -	if r.p.insert != nil { // shallow mode +	if r.p.shallow { +		// precise offsets are encoded only in shallow mode  		return r.posv2()  	}  	if r.p.version >= iexportVersionPosCol { @@ -812,13 +873,28 @@ func (r *importReader) doType(base *types.Named) (res types.Type) {  		fields := make([]*types.Var, r.uint64())  		tags := make([]string, len(fields))  		for i := range fields { +			var field *types.Var +			if r.p.shallow { +				field, _ = r.objectPathObject().(*types.Var) +			} +  			fpos := r.pos()  			fname := r.ident()  			ftyp := r.typ()  			emb := r.bool()  			tag := r.string() -			fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) +			// Either this is not a shallow import, the field is local, or the +			// encoded objectPath failed to produce an object (a bug). +			// +			// Even in this last, buggy case, fall back on creating a new field. As +			// discussed in iexport.go, this is not correct, but mostly works and is +			// preferable to failing (for now at least). +			if field == nil { +				field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) +			} + +			fields[i] = field  			tags[i] = tag  		}  		return types.NewStruct(fields, tags) @@ -834,6 +910,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) {  		methods := make([]*types.Func, r.uint64())  		for i := range methods { +			var method *types.Func +			if r.p.shallow { +				method, _ = r.objectPathObject().(*types.Func) +			} +  			mpos := r.pos()  			mname := r.ident() @@ -843,9 +924,12 @@ func (r *importReader) doType(base *types.Named) (res types.Type) {  			if base != nil {  				recv = types.NewVar(token.NoPos, r.currPkg, "", base)  			} -  			msig := r.signature(recv, nil, nil) -			methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + +			if method == nil { +				method = types.NewFunc(mpos, r.currPkg, mname, msig) +			} +			methods[i] = method  		}  		typ := newInterface(methods, embeddeds) @@ -883,6 +967,9 @@ func (r *importReader) doType(base *types.Named) (res types.Type) {  		// we must always use the methods of the base (orig) type.  		// TODO provide a non-nil *Environment  		t, _ := typeparams.Instantiate(nil, baseType, targs, false) + +		// Workaround for golang/go#61561. See the doc for instanceList for details. +		r.p.instanceList = append(r.p.instanceList, t)  		return t  	case unionType: @@ -901,6 +988,26 @@ func (r *importReader) kind() itag {  	return itag(r.uint64())  } +// objectPathObject is the inverse of exportWriter.objectPath. +// +// In shallow mode, certain fields and methods may need to be looked up in an +// imported package. See the doc for exportWriter.objectPath for a full +// explanation. +func (r *importReader) objectPathObject() types.Object { +	objPath := objectpath.Path(r.string()) +	if objPath == "" { +		return nil +	} +	pkg := r.pkg() +	obj, err := objectpath.Object(pkg, objPath) +	if err != nil { +		if r.p.reportf != nil { +			r.p.reportf("failed to find object for objectPath %q: %v", objPath, err) +		} +	} +	return obj +} +  func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature {  	params := r.paramList()  	results := r.paramList() diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index b285a11ce..b977435f6 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -10,8 +10,10 @@  package gcimporter  import ( +	"fmt"  	"go/token"  	"go/types" +	"sort"  	"strings"  	"golang.org/x/tools/internal/pkgbits" @@ -62,6 +64,14 @@ type typeInfo struct {  }  func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { +	if !debug { +		defer func() { +			if x := recover(); x != nil { +				err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) +			} +		}() +	} +  	s := string(data)  	s = s[:strings.LastIndex(s, "\n$$\n")]  	input := pkgbits.NewPkgDecoder(path, s) @@ -121,6 +131,16 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st  		iface.Complete()  	} +	// Imports() of pkg are all of the transitive packages that were loaded. +	var imps []*types.Package +	for _, imp := range pr.pkgs { +		if imp != nil && imp != pkg { +			imps = append(imps, imp) +		} +	} +	sort.Sort(byPath(imps)) +	pkg.SetImports(imps) +  	pkg.MarkComplete()  	return pkg  } @@ -260,39 +280,9 @@ func (r *reader) doPkg() *types.Package {  	pkg := types.NewPackage(path, name)  	r.p.imports[path] = pkg -	imports := make([]*types.Package, r.Len()) -	for i := range imports { -		imports[i] = r.pkg() -	} -	pkg.SetImports(flattenImports(imports)) -  	return pkg  } -// flattenImports returns the transitive closure of all imported -// packages rooted from pkgs. -func flattenImports(pkgs []*types.Package) []*types.Package { -	var res []*types.Package -	seen := make(map[*types.Package]struct{}) -	for _, pkg := range pkgs { -		if _, ok := seen[pkg]; ok { -			continue -		} -		seen[pkg] = struct{}{} -		res = append(res, pkg) - -		// pkg.Imports() is already flattened. -		for _, pkg := range pkg.Imports() { -			if _, ok := seen[pkg]; ok { -				continue -			} -			seen[pkg] = struct{}{} -			res = append(res, pkg) -		} -	} -	return res -} -  // @@@ Types  func (r *reader) typ() types.Type { diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index d50551693..53cf66da0 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -8,10 +8,12 @@ package gocommand  import (  	"bytes"  	"context" +	"errors"  	"fmt"  	"io"  	"log"  	"os" +	"reflect"  	"regexp"  	"runtime"  	"strconv" @@ -22,6 +24,9 @@ import (  	exec "golang.org/x/sys/execabs"  	"golang.org/x/tools/internal/event" +	"golang.org/x/tools/internal/event/keys" +	"golang.org/x/tools/internal/event/label" +	"golang.org/x/tools/internal/event/tag"  )  // An Runner will run go command invocations and serialize @@ -51,9 +56,19 @@ func (runner *Runner) initialize() {  // 1.14: go: updating go.mod: existing contents have changed since last read  var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) +// verb is an event label for the go command verb. +var verb = keys.NewString("verb", "go command verb") + +func invLabels(inv Invocation) []label.Label { +	return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)} +} +  // Run is a convenience wrapper around RunRaw.  // It returns only stdout and a "friendly" error.  func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { +	ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) +	defer done() +  	stdout, _, friendly, _ := runner.RunRaw(ctx, inv)  	return stdout, friendly  } @@ -61,6 +76,9 @@ func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, e  // RunPiped runs the invocation serially, always waiting for any concurrent  // invocations to complete first.  func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { +	ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) +	defer done() +  	_, err := runner.runPiped(ctx, inv, stdout, stderr)  	return err  } @@ -68,6 +86,8 @@ func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stde  // RunRaw runs the invocation, serializing requests only if they fight over  // go.mod changes.  func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { +	ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) +	defer done()  	// Make sure the runner is always initialized.  	runner.initialize() @@ -215,6 +235,18 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {  	cmd := exec.Command("go", goArgs...)  	cmd.Stdout = stdout  	cmd.Stderr = stderr + +	// cmd.WaitDelay was added only in go1.20 (see #50436). +	if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { +		// https://go.dev/issue/59541: don't wait forever copying stderr +		// after the command has exited. +		// After CL 484741 we copy stdout manually, so we we'll stop reading that as +		// soon as ctx is done. However, we also don't want to wait around forever +		// for stderr. Give a much-longer-than-reasonable delay and then assume that +		// something has wedged in the kernel or runtime. +		waitDelay.Set(reflect.ValueOf(30 * time.Second)) +	} +  	// On darwin the cwd gets resolved to the real path, which breaks anything that  	// expects the working directory to keep the original path, including the  	// go command when dealing with modules. @@ -229,6 +261,7 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {  		cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir)  		cmd.Dir = i.WorkingDir  	} +  	defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())  	return runCmdContext(ctx, cmd) @@ -242,10 +275,85 @@ var DebugHangingGoCommands = false  // runCmdContext is like exec.CommandContext except it sends os.Interrupt  // before os.Kill. -func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { -	if err := cmd.Start(); err != nil { +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { +	// If cmd.Stdout is not an *os.File, the exec package will create a pipe and +	// copy it to the Writer in a goroutine until the process has finished and +	// either the pipe reaches EOF or command's WaitDelay expires. +	// +	// However, the output from 'go list' can be quite large, and we don't want to +	// keep reading (and allocating buffers) if we've already decided we don't +	// care about the output. We don't want to wait for the process to finish, and +	// we don't wait to wait for the WaitDelay to expire either. +	// +	// Instead, if cmd.Stdout requires a copying goroutine we explicitly replace +	// it with a pipe (which is an *os.File), which we can close in order to stop +	// copying output as soon as we realize we don't care about it. +	var stdoutW *os.File +	if cmd.Stdout != nil { +		if _, ok := cmd.Stdout.(*os.File); !ok { +			var stdoutR *os.File +			stdoutR, stdoutW, err = os.Pipe() +			if err != nil { +				return err +			} +			prevStdout := cmd.Stdout +			cmd.Stdout = stdoutW + +			stdoutErr := make(chan error, 1) +			go func() { +				_, err := io.Copy(prevStdout, stdoutR) +				if err != nil { +					err = fmt.Errorf("copying stdout: %w", err) +				} +				stdoutErr <- err +			}() +			defer func() { +				// We started a goroutine to copy a stdout pipe. +				// Wait for it to finish, or terminate it if need be. +				var err2 error +				select { +				case err2 = <-stdoutErr: +					stdoutR.Close() +				case <-ctx.Done(): +					stdoutR.Close() +					// Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close +					// should cause the Read call in io.Copy to unblock and return +					// immediately, but we still need to receive from stdoutErr to confirm +					// that it has happened. +					<-stdoutErr +					err2 = ctx.Err() +				} +				if err == nil { +					err = err2 +				} +			}() + +			// Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the +			// same writer, and have a type that can be compared with ==, at most +			// one goroutine at a time will call Write.” +			// +			// Since we're starting a goroutine that writes to cmd.Stdout, we must +			// also update cmd.Stderr so that it still holds. +			func() { +				defer func() { recover() }() +				if cmd.Stderr == prevStdout { +					cmd.Stderr = cmd.Stdout +				} +			}() +		} +	} + +	err = cmd.Start() +	if stdoutW != nil { +		// The child process has inherited the pipe file, +		// so close the copy held in this process. +		stdoutW.Close() +		stdoutW = nil +	} +	if err != nil {  		return err  	} +  	resChan := make(chan error, 1)  	go func() {  		resChan <- cmd.Wait() @@ -253,11 +361,14 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {  	// If we're interested in debugging hanging Go commands, stop waiting after a  	// minute and panic with interesting information. -	if DebugHangingGoCommands { +	debug := DebugHangingGoCommands +	if debug { +		timer := time.NewTimer(1 * time.Minute) +		defer timer.Stop()  		select {  		case err := <-resChan:  			return err -		case <-time.After(1 * time.Minute): +		case <-timer.C:  			HandleHangingGoCommand(cmd.Process)  		case <-ctx.Done():  		} @@ -270,30 +381,25 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {  	}  	// Cancelled. Interrupt and see if it ends voluntarily. -	cmd.Process.Signal(os.Interrupt) -	select { -	case err := <-resChan: -		return err -	case <-time.After(time.Second): +	if err := cmd.Process.Signal(os.Interrupt); err == nil { +		// (We used to wait only 1s but this proved +		// fragile on loaded builder machines.) +		timer := time.NewTimer(5 * time.Second) +		defer timer.Stop() +		select { +		case err := <-resChan: +			return err +		case <-timer.C: +		}  	}  	// Didn't shut down in response to interrupt. Kill it hard.  	// TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT  	// on certain platforms, such as unix. -	if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands { -		// Don't panic here as this reliably fails on windows with EINVAL. +	if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug {  		log.Printf("error killing the Go command: %v", err)  	} -	// See above: don't wait indefinitely if we're debugging hanging Go commands. -	if DebugHangingGoCommands { -		select { -		case err := <-resChan: -			return err -		case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill -			HandleHangingGoCommand(cmd.Process) -		} -	}  	return <-resChan  } diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go index 307a76d47..446c5846a 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -23,21 +23,11 @@ import (  func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {  	inv.Verb = "list"  	inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} -	inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") -	// Unset any unneeded flags, and remove them from BuildFlags, if they're -	// present. -	inv.ModFile = "" +	inv.BuildFlags = nil // This is not a build command.  	inv.ModFlag = "" -	var buildFlags []string -	for _, flag := range inv.BuildFlags { -		// Flags can be prefixed by one or two dashes. -		f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") -		if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { -			continue -		} -		buildFlags = append(buildFlags, flag) -	} -	inv.BuildFlags = buildFlags +	inv.ModFile = "" +	inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") +  	stdoutBytes, err := r.Run(ctx, inv)  	if err != nil {  		return 0, err diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go index a3fb2d4f2..7e638ec24 100644 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -7,7 +7,9 @@  package tokeninternal  import ( +	"fmt"  	"go/token" +	"sort"  	"sync"  	"unsafe"  ) @@ -57,3 +59,93 @@ func GetLines(file *token.File) []int {  		panic("unexpected token.File size")  	}  } + +// AddExistingFiles adds the specified files to the FileSet if they +// are not already present. It panics if any pair of files in the +// resulting FileSet would overlap. +func AddExistingFiles(fset *token.FileSet, files []*token.File) { +	// Punch through the FileSet encapsulation. +	type tokenFileSet struct { +		// This type remained essentially consistent from go1.16 to go1.21. +		mutex sync.RWMutex +		base  int +		files []*token.File +		_     *token.File // changed to atomic.Pointer[token.File] in go1.19 +	} + +	// If the size of token.FileSet changes, this will fail to compile. +	const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) +	var _ [-delta * delta]int + +	type uP = unsafe.Pointer +	var ptr *tokenFileSet +	*(*uP)(uP(&ptr)) = uP(fset) +	ptr.mutex.Lock() +	defer ptr.mutex.Unlock() + +	// Merge and sort. +	newFiles := append(ptr.files, files...) +	sort.Slice(newFiles, func(i, j int) bool { +		return newFiles[i].Base() < newFiles[j].Base() +	}) + +	// Reject overlapping files. +	// Discard adjacent identical files. +	out := newFiles[:0] +	for i, file := range newFiles { +		if i > 0 { +			prev := newFiles[i-1] +			if file == prev { +				continue +			} +			if prev.Base()+prev.Size()+1 > file.Base() { +				panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", +					prev.Name(), prev.Base(), prev.Base()+prev.Size(), +					file.Name(), file.Base(), file.Base()+file.Size())) +			} +		} +		out = append(out, file) +	} +	newFiles = out + +	ptr.files = newFiles + +	// Advance FileSet.Base(). +	if len(newFiles) > 0 { +		last := newFiles[len(newFiles)-1] +		newBase := last.Base() + last.Size() + 1 +		if ptr.base < newBase { +			ptr.base = newBase +		} +	} +} + +// FileSetFor returns a new FileSet containing a sequence of new Files with +// the same base, size, and line as the input files, for use in APIs that +// require a FileSet. +// +// Precondition: the input files must be non-overlapping, and sorted in order +// of their Base. +func FileSetFor(files ...*token.File) *token.FileSet { +	fset := token.NewFileSet() +	for _, f := range files { +		f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) +		lines := GetLines(f) +		f2.SetLines(lines) +	} +	return fset +} + +// CloneFileSet creates a new FileSet holding all files in fset. It does not +// create copies of the token.Files in fset: they are added to the resulting +// FileSet unmodified. +func CloneFileSet(fset *token.FileSet) *token.FileSet { +	var files []*token.File +	fset.Iterate(func(f *token.File) bool { +		files = append(files, f) +		return true +	}) +	newFileSet := token.NewFileSet() +	AddExistingFiles(newFileSet, files) +	return newFileSet +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index 25a1426d3..d0d0649fe 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -23,6 +23,7 @@  package typeparams  import ( +	"fmt"  	"go/ast"  	"go/token"  	"go/types" @@ -87,7 +88,6 @@ func IsTypeParam(t types.Type) bool {  func OriginMethod(fn *types.Func) *types.Func {  	recv := fn.Type().(*types.Signature).Recv()  	if recv == nil { -  		return fn  	}  	base := recv.Type() @@ -106,6 +106,31 @@ func OriginMethod(fn *types.Func) *types.Func {  	}  	orig := NamedTypeOrigin(named)  	gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) + +	// This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: +	// 	package p +	//      type T *int +	//      func (*T) f() {} +	// LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}. +	// Here we make them consistent by force. +	// (The go/types bug is general, but this workaround is reached only +	// for generic T thanks to the early return above.) +	if gfn == nil { +		mset := types.NewMethodSet(types.NewPointer(orig)) +		for i := 0; i < mset.Len(); i++ { +			m := mset.At(i) +			if m.Obj().Id() == fn.Id() { +				gfn = m.Obj() +				break +			} +		} +	} + +	// In golang/go#61196, we observe another crash, this time inexplicable. +	if gfn == nil { +		panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods())) +	} +  	return gfn.(*types.Func)  } diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go index 993135ec9..71248209e 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -81,13 +81,13 @@ func CoreType(T types.Type) types.Type {  // restrictions may be arbitrarily complex. For example, consider the  // following:  // -//  type A interface{ ~string|~[]byte } +//	type A interface{ ~string|~[]byte }  // -//  type B interface{ int|string } +//	type B interface{ int|string }  // -//  type C interface { ~string|~int } +//	type C interface { ~string|~int }  // -//  type T[P interface{ A|B; C }] int +//	type T[P interface{ A|B; C }] int  //  // In this example, the structural type restriction of P is ~string|int: A|B  // expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go index 933106a23..cbd12f801 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -30,7 +30,7 @@ func (xl termlist) String() string {  	var buf bytes.Buffer  	for i, x := range xl {  		if i > 0 { -			buf.WriteString(" ∪ ") +			buf.WriteString(" | ")  		}  		buf.WriteString(x.String())  	} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go index b4788978f..7ed86e171 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go @@ -129,7 +129,7 @@ func NamedTypeArgs(*types.Named) *TypeList {  }  // NamedTypeOrigin is the identity method at this Go version. -func NamedTypeOrigin(named *types.Named) types.Type { +func NamedTypeOrigin(named *types.Named) *types.Named {  	return named  } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go index 114a36b86..cf301af1d 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go @@ -103,7 +103,7 @@ func NamedTypeArgs(named *types.Named) *TypeList {  }  // NamedTypeOrigin returns named.Orig(). -func NamedTypeOrigin(named *types.Named) types.Type { +func NamedTypeOrigin(named *types.Named) *types.Named {  	return named.Origin()  } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go index 7ddee28d9..7350bb702 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -10,11 +10,10 @@ import "go/types"  // A term describes elementary type sets:  // -//   ∅:  (*term)(nil)     == ∅                      // set of no types (empty set) -//   𝓤:  &term{}          == 𝓤                      // set of all types (𝓤niverse) -//   T:  &term{false, T}  == {T}                    // set of type T -//  ~t:  &term{true, t}   == {t' | under(t') == t}  // set of types with underlying type t -// +//	 ∅:  (*term)(nil)     == ∅                      // set of no types (empty set) +//	 𝓤:  &term{}          == 𝓤                      // set of all types (𝓤niverse) +//	 T:  &term{false, T}  == {T}                    // set of type T +//	~t:  &term{true, t}   == {t' | under(t') == t}  // set of types with underlying type t  type term struct {  	tilde bool // valid if typ != nil  	typ   types.Type diff --git a/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go b/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go new file mode 100644 index 000000000..5e96e8955 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go @@ -0,0 +1,24 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import "go/types" + +// This file contains back doors that allow gopls to avoid method sorting when +// using the objectpath package. +// +// This is performance-critical in certain repositories, but changing the +// behavior of the objectpath package is still being discussed in +// golang/go#61443. If we decide to remove the sorting in objectpath we can +// simply delete these back doors. Otherwise, we should add a new API to +// objectpath that allows controlling the sorting. + +// SkipEncoderMethodSorting marks enc (which must be an *objectpath.Encoder) as +// not requiring sorted methods. +var SkipEncoderMethodSorting func(enc interface{}) + +// ObjectpathObject is like objectpath.Object, but allows suppressing method +// sorting. +var ObjectpathObject func(pkg *types.Package, p string, skipMethodSorting bool) (types.Object, error) diff --git a/vendor/modules.txt b/vendor/modules.txt index 3b4f81548..9b236c249 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -384,7 +384,7 @@ github.com/mattn/go-isatty  ## explicit; go 1.19  github.com/microcosm-cc/bluemonday  github.com/microcosm-cc/bluemonday/css -# github.com/miekg/dns v1.1.55 +# github.com/miekg/dns v1.1.56  ## explicit; go 1.19  github.com/miekg/dns  # github.com/minio/md5-simd v1.1.2 @@ -849,7 +849,7 @@ golang.org/x/image/tiff/lzw  golang.org/x/image/vp8  golang.org/x/image/vp8l  golang.org/x/image/webp -# golang.org/x/mod v0.10.0 +# golang.org/x/mod v0.12.0  ## explicit; go 1.17  golang.org/x/mod/semver  # golang.org/x/net v0.15.0 @@ -897,15 +897,17 @@ golang.org/x/text/transform  golang.org/x/text/unicode/bidi  golang.org/x/text/unicode/norm  golang.org/x/text/width -# golang.org/x/tools v0.6.0 +# golang.org/x/tools v0.13.0  ## explicit; go 1.18  golang.org/x/tools/go/gcexportdata  golang.org/x/tools/go/internal/packagesdriver  golang.org/x/tools/go/packages +golang.org/x/tools/go/types/objectpath  golang.org/x/tools/internal/event  golang.org/x/tools/internal/event/core  golang.org/x/tools/internal/event/keys  golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/event/tag  golang.org/x/tools/internal/gcimporter  golang.org/x/tools/internal/gocommand  golang.org/x/tools/internal/packagesinternal | 
