summaryrefslogtreecommitdiff
path: root/vendor/golang.org/x
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/golang.org/x')
-rw-r--r--vendor/golang.org/x/crypto/curve25519/curve25519.go11
-rw-r--r--vendor/golang.org/x/crypto/ed25519/ed25519.go11
-rw-r--r--vendor/golang.org/x/crypto/ssh/cipher.go62
-rw-r--r--vendor/golang.org/x/crypto/ssh/client_auth.go24
-rw-r--r--vendor/golang.org/x/crypto/ssh/common.go83
-rw-r--r--vendor/golang.org/x/crypto/ssh/doc.go10
-rw-r--r--vendor/golang.org/x/crypto/ssh/handshake.go9
-rw-r--r--vendor/golang.org/x/crypto/ssh/kex.go24
-rw-r--r--vendor/golang.org/x/crypto/ssh/keys.go69
-rw-r--r--vendor/golang.org/x/crypto/ssh/mac.go42
-rw-r--r--vendor/golang.org/x/crypto/ssh/server.go36
-rw-r--r--vendor/golang.org/x/crypto/ssh/transport.go4
-rw-r--r--vendor/golang.org/x/net/html/escape.go2
-rw-r--r--vendor/golang.org/x/net/html/parse.go57
-rw-r--r--vendor/golang.org/x/net/html/render.go2
-rw-r--r--vendor/golang.org/x/net/http2/config.go17
-rw-r--r--vendor/golang.org/x/net/http2/config_go125.go15
-rw-r--r--vendor/golang.org/x/net/http2/config_go126.go15
-rw-r--r--vendor/golang.org/x/net/http2/frame.go25
-rw-r--r--vendor/golang.org/x/net/http2/http2.go1
-rw-r--r--vendor/golang.org/x/net/http2/server.go61
-rw-r--r--vendor/golang.org/x/net/http2/transport.go6
-rw-r--r--vendor/golang.org/x/net/http2/writesched.go2
-rw-r--r--vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go (renamed from vendor/golang.org/x/net/http2/writesched_priority.go)104
-rw-r--r--vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go209
-rw-r--r--vendor/golang.org/x/net/http2/writesched_roundrobin.go2
-rw-r--r--vendor/golang.org/x/net/internal/httpcommon/request.go4
-rw-r--r--vendor/golang.org/x/oauth2/oauth2.go3
-rw-r--r--vendor/golang.org/x/sys/unix/affinity_linux.go9
-rw-r--r--vendor/golang.org/x/sys/unix/fdset.go4
-rw-r--r--vendor/golang.org/x/sys/unix/ifreq_linux.go4
-rw-r--r--vendor/golang.org/x/sys/unix/mkall.sh1
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux.go4
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_netbsd.go17
-rw-r--r--vendor/golang.org/x/sys/windows/syscall_windows.go2
-rw-r--r--vendor/golang.org/x/sys/windows/types_windows.go16
-rw-r--r--vendor/golang.org/x/sys/windows/zsyscall_windows.go18
-rw-r--r--vendor/golang.org/x/text/unicode/bidi/core.go11
-rw-r--r--vendor/golang.org/x/tools/go/ast/astutil/imports.go67
-rw-r--r--vendor/golang.org/x/tools/go/ast/edge/edge.go295
-rw-r--r--vendor/golang.org/x/tools/go/ast/inspector/cursor.go502
-rw-r--r--vendor/golang.org/x/tools/go/ast/inspector/inspector.go311
-rw-r--r--vendor/golang.org/x/tools/go/ast/inspector/iter.go85
-rw-r--r--vendor/golang.org/x/tools/go/ast/inspector/typeof.go227
-rw-r--r--vendor/golang.org/x/tools/go/ast/inspector/walk.go341
-rw-r--r--vendor/golang.org/x/tools/go/packages/golist.go6
-rw-r--r--vendor/golang.org/x/tools/go/packages/visit.go85
-rw-r--r--vendor/golang.org/x/tools/go/types/objectpath/objectpath.go5
-rw-r--r--vendor/golang.org/x/tools/go/types/typeutil/map.go19
-rw-r--r--vendor/golang.org/x/tools/imports/forward.go6
-rw-r--r--vendor/golang.org/x/tools/internal/event/core/event.go5
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/iexport.go1
-rw-r--r--vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go53
-rw-r--r--vendor/golang.org/x/tools/internal/imports/fix.go6
-rw-r--r--vendor/golang.org/x/tools/internal/modindex/symbols.go3
-rw-r--r--vendor/golang.org/x/tools/internal/stdlib/deps.go596
-rw-r--r--vendor/golang.org/x/tools/internal/stdlib/manifest.go58
-rw-r--r--vendor/golang.org/x/tools/internal/typesinternal/fx.go49
-rw-r--r--vendor/golang.org/x/tools/internal/typesinternal/isnamed.go71
-rw-r--r--vendor/golang.org/x/tools/internal/typesinternal/qualifier.go8
-rw-r--r--vendor/golang.org/x/tools/internal/typesinternal/types.go48
-rw-r--r--vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go17
62 files changed, 3157 insertions, 703 deletions
diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go
index 8ff087df4..048faef3a 100644
--- a/vendor/golang.org/x/crypto/curve25519/curve25519.go
+++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go
@@ -3,11 +3,14 @@
// license that can be found in the LICENSE file.
// Package curve25519 provides an implementation of the X25519 function, which
-// performs scalar multiplication on the elliptic curve known as Curve25519.
-// See RFC 7748.
+// performs scalar multiplication on the elliptic curve known as Curve25519
+// according to [RFC 7748].
//
-// This package is a wrapper for the X25519 implementation
-// in the crypto/ecdh package.
+// The curve25519 package is a wrapper for the X25519 implementation in the
+// crypto/ecdh package. It is [frozen] and is not accepting new features.
+//
+// [RFC 7748]: https://datatracker.ietf.org/doc/html/rfc7748
+// [frozen]: https://go.dev/wiki/Frozen
package curve25519
import "crypto/ecdh"
diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go
index 59b3a95a7..df453dcce 100644
--- a/vendor/golang.org/x/crypto/ed25519/ed25519.go
+++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go
@@ -2,16 +2,19 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package ed25519 implements the Ed25519 signature algorithm. See
-// https://ed25519.cr.yp.to/.
+// Package ed25519 implements the Ed25519 signature algorithm.
//
// These functions are also compatible with the “Ed25519” function defined in
-// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
+// [RFC 8032]. However, unlike RFC 8032's formulation, this package's private key
// representation includes a public key suffix to make multiple signing
// operations with the same key more efficient. This package refers to the RFC
// 8032 private key as the “seed”.
//
-// This package is a wrapper around the standard library crypto/ed25519 package.
+// The ed25519 package is a wrapper for the Ed25519 implementation in the
+// crypto/ed25519 package. It is [frozen] and is not accepting new features.
+//
+// [RFC 8032]: https://datatracker.ietf.org/doc/html/rfc8032
+// [frozen]: https://go.dev/wiki/Frozen
package ed25519
import (
diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go
index 6a5b582aa..7554ed57a 100644
--- a/vendor/golang.org/x/crypto/ssh/cipher.go
+++ b/vendor/golang.org/x/crypto/ssh/cipher.go
@@ -8,6 +8,7 @@ import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
+ "crypto/fips140"
"crypto/rc4"
"crypto/subtle"
"encoding/binary"
@@ -15,6 +16,7 @@ import (
"fmt"
"hash"
"io"
+ "slices"
"golang.org/x/crypto/chacha20"
"golang.org/x/crypto/internal/poly1305"
@@ -93,41 +95,41 @@ func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream,
}
// cipherModes documents properties of supported ciphers. Ciphers not included
-// are not supported and will not be negotiated, even if explicitly requested in
-// ClientConfig.Crypto.Ciphers.
-var cipherModes = map[string]*cipherMode{
- // Ciphers from RFC 4344, which introduced many CTR-based ciphers. Algorithms
- // are defined in the order specified in the RFC.
- CipherAES128CTR: {16, aes.BlockSize, streamCipherMode(0, newAESCTR)},
- CipherAES192CTR: {24, aes.BlockSize, streamCipherMode(0, newAESCTR)},
- CipherAES256CTR: {32, aes.BlockSize, streamCipherMode(0, newAESCTR)},
-
- // Ciphers from RFC 4345, which introduces security-improved arcfour ciphers.
- // They are defined in the order specified in the RFC.
- InsecureCipherRC4128: {16, 0, streamCipherMode(1536, newRC4)},
- InsecureCipherRC4256: {32, 0, streamCipherMode(1536, newRC4)},
-
- // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
- // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
- // RC4) has problems with weak keys, and should be used with caution."
- // RFC 4345 introduces improved versions of Arcfour.
- InsecureCipherRC4: {16, 0, streamCipherMode(0, newRC4)},
-
- // AEAD ciphers
- CipherAES128GCM: {16, 12, newGCMCipher},
- CipherAES256GCM: {32, 12, newGCMCipher},
- CipherChaCha20Poly1305: {64, 0, newChaCha20Cipher},
-
+// are not supported and will not be negotiated, even if explicitly configured.
+// When FIPS mode is enabled, only FIPS-approved algorithms are included.
+var cipherModes = map[string]*cipherMode{}
+
+func init() {
+ cipherModes[CipherAES128CTR] = &cipherMode{16, aes.BlockSize, streamCipherMode(0, newAESCTR)}
+ cipherModes[CipherAES192CTR] = &cipherMode{24, aes.BlockSize, streamCipherMode(0, newAESCTR)}
+ cipherModes[CipherAES256CTR] = &cipherMode{32, aes.BlockSize, streamCipherMode(0, newAESCTR)}
+ // Use of GCM with arbitrary IVs is not allowed in FIPS 140-only mode,
+ // we'll wire it up to NewGCMForSSH in Go 1.26.
+ //
+ // For now it means we'll work with fips140=on but not fips140=only.
+ cipherModes[CipherAES128GCM] = &cipherMode{16, 12, newGCMCipher}
+ cipherModes[CipherAES256GCM] = &cipherMode{32, 12, newGCMCipher}
+
+ if fips140.Enabled() {
+ defaultCiphers = slices.DeleteFunc(defaultCiphers, func(algo string) bool {
+ _, ok := cipherModes[algo]
+ return !ok
+ })
+ return
+ }
+
+ cipherModes[CipherChaCha20Poly1305] = &cipherMode{64, 0, newChaCha20Cipher}
+ // Insecure ciphers not included in the default configuration.
+ cipherModes[InsecureCipherRC4128] = &cipherMode{16, 0, streamCipherMode(1536, newRC4)}
+ cipherModes[InsecureCipherRC4256] = &cipherMode{32, 0, streamCipherMode(1536, newRC4)}
+ cipherModes[InsecureCipherRC4] = &cipherMode{16, 0, streamCipherMode(0, newRC4)}
// CBC mode is insecure and so is not included in the default config.
// (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely
// needed, it's possible to specify a custom Config to enable it.
// You should expect that an active attacker can recover plaintext if
// you do.
- InsecureCipherAES128CBC: {16, aes.BlockSize, newAESCBCCipher},
-
- // 3des-cbc is insecure and is not included in the default
- // config.
- InsecureCipherTripleDESCBC: {24, des.BlockSize, newTripleDESCBCCipher},
+ cipherModes[InsecureCipherAES128CBC] = &cipherMode{16, aes.BlockSize, newAESCBCCipher}
+ cipherModes[InsecureCipherTripleDESCBC] = &cipherMode{24, des.BlockSize, newTripleDESCBCCipher}
}
// prefixLen is the length of the packet prefix that contains the packet length
diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go
index c12818fdc..3127e4990 100644
--- a/vendor/golang.org/x/crypto/ssh/client_auth.go
+++ b/vendor/golang.org/x/crypto/ssh/client_auth.go
@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"io"
+ "slices"
"strings"
)
@@ -83,7 +84,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
// success
return nil
} else if ok == authFailure {
- if m := auth.method(); !contains(tried, m) {
+ if m := auth.method(); !slices.Contains(tried, m) {
tried = append(tried, m)
}
}
@@ -97,7 +98,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
findNext:
for _, a := range config.Auth {
candidateMethod := a.method()
- if contains(tried, candidateMethod) {
+ if slices.Contains(tried, candidateMethod) {
continue
}
for _, meth := range methods {
@@ -117,15 +118,6 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried)
}
-func contains(list []string, e string) bool {
- for _, s := range list {
- if s == e {
- return true
- }
- }
- return false
-}
-
// An AuthMethod represents an instance of an RFC 4252 authentication method.
type AuthMethod interface {
// auth authenticates user over transport t.
@@ -255,7 +247,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA
// Fallback to use if there is no "server-sig-algs" extension or a
// common algorithm cannot be found. We use the public key format if the
// MultiAlgorithmSigner supports it, otherwise we return an error.
- if !contains(as.Algorithms(), underlyingAlgo(keyFormat)) {
+ if !slices.Contains(as.Algorithms(), underlyingAlgo(keyFormat)) {
return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v",
underlyingAlgo(keyFormat), keyFormat, as.Algorithms())
}
@@ -284,7 +276,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA
// Filter algorithms based on those supported by MultiAlgorithmSigner.
var keyAlgos []string
for _, algo := range algorithmsForKeyFormat(keyFormat) {
- if contains(as.Algorithms(), underlyingAlgo(algo)) {
+ if slices.Contains(as.Algorithms(), underlyingAlgo(algo)) {
keyAlgos = append(keyAlgos, algo)
}
}
@@ -334,7 +326,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
// the key try to use the obtained algorithm as if "server-sig-algs" had
// not been implemented if supported from the algorithm signer.
if !ok && idx < origSignersLen && isRSACert(algo) && algo != CertAlgoRSAv01 {
- if contains(as.Algorithms(), KeyAlgoRSA) {
+ if slices.Contains(as.Algorithms(), KeyAlgoRSA) {
// We retry using the compat algorithm after all signers have
// been tried normally.
signers = append(signers, &multiAlgorithmSigner{
@@ -385,7 +377,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
// contain the "publickey" method, do not attempt to authenticate with any
// other keys. According to RFC 4252 Section 7, the latter can occur when
// additional authentication methods are required.
- if success == authSuccess || !contains(methods, cb.method()) {
+ if success == authSuccess || !slices.Contains(methods, cb.method()) {
return success, methods, err
}
}
@@ -434,7 +426,7 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
// servers send the key type instead. OpenSSH allows any algorithm
// that matches the public key, so we do the same.
// https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709
- if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) {
+ if !slices.Contains(algorithmsForKeyFormat(key.Type()), msg.Algo) {
return false, nil
}
if !bytes.Equal(msg.PubKey, pubKey) {
diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go
index 8bfad16c4..2e44e9c9e 100644
--- a/vendor/golang.org/x/crypto/ssh/common.go
+++ b/vendor/golang.org/x/crypto/ssh/common.go
@@ -6,6 +6,7 @@ package ssh
import (
"crypto"
+ "crypto/fips140"
"crypto/rand"
"fmt"
"io"
@@ -256,6 +257,40 @@ type Algorithms struct {
PublicKeyAuths []string
}
+func init() {
+ if fips140.Enabled() {
+ defaultHostKeyAlgos = slices.DeleteFunc(defaultHostKeyAlgos, func(algo string) bool {
+ _, err := hashFunc(underlyingAlgo(algo))
+ return err != nil
+ })
+ defaultPubKeyAuthAlgos = slices.DeleteFunc(defaultPubKeyAuthAlgos, func(algo string) bool {
+ _, err := hashFunc(underlyingAlgo(algo))
+ return err != nil
+ })
+ }
+}
+
+func hashFunc(format string) (crypto.Hash, error) {
+ switch format {
+ case KeyAlgoRSASHA256, KeyAlgoECDSA256, KeyAlgoSKED25519, KeyAlgoSKECDSA256:
+ return crypto.SHA256, nil
+ case KeyAlgoECDSA384:
+ return crypto.SHA384, nil
+ case KeyAlgoRSASHA512, KeyAlgoECDSA521:
+ return crypto.SHA512, nil
+ case KeyAlgoED25519:
+ // KeyAlgoED25519 doesn't pre-hash.
+ return 0, nil
+ case KeyAlgoRSA, InsecureKeyAlgoDSA:
+ if fips140.Enabled() {
+ return 0, fmt.Errorf("ssh: hash algorithm for format %q not allowed in FIPS 140 mode", format)
+ }
+ return crypto.SHA1, nil
+ default:
+ return 0, fmt.Errorf("ssh: hash algorithm for format %q not mapped", format)
+ }
+}
+
// SupportedAlgorithms returns algorithms currently implemented by this package,
// excluding those with security issues, which are returned by
// InsecureAlgorithms. The algorithms listed here are in preference order.
@@ -283,21 +318,6 @@ func InsecureAlgorithms() Algorithms {
var supportedCompressions = []string{compressionNone}
-// hashFuncs keeps the mapping of supported signature algorithms to their
-// respective hashes needed for signing and verification.
-var hashFuncs = map[string]crypto.Hash{
- KeyAlgoRSA: crypto.SHA1,
- KeyAlgoRSASHA256: crypto.SHA256,
- KeyAlgoRSASHA512: crypto.SHA512,
- InsecureKeyAlgoDSA: crypto.SHA1,
- KeyAlgoECDSA256: crypto.SHA256,
- KeyAlgoECDSA384: crypto.SHA384,
- KeyAlgoECDSA521: crypto.SHA512,
- // KeyAlgoED25519 doesn't pre-hash.
- KeyAlgoSKECDSA256: crypto.SHA256,
- KeyAlgoSKED25519: crypto.SHA256,
-}
-
// algorithmsForKeyFormat returns the supported signature algorithms for a given
// public key format (PublicKey.Type), in order of preference. See RFC 8332,
// Section 2. See also the note in sendKexInit on backwards compatibility.
@@ -312,11 +332,40 @@ func algorithmsForKeyFormat(keyFormat string) []string {
}
}
+// keyFormatForAlgorithm returns the key format corresponding to the given
+// signature algorithm. It returns an empty string if the signature algorithm is
+// invalid or unsupported.
+func keyFormatForAlgorithm(sigAlgo string) string {
+ switch sigAlgo {
+ case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512:
+ return KeyAlgoRSA
+ case CertAlgoRSAv01, CertAlgoRSASHA256v01, CertAlgoRSASHA512v01:
+ return CertAlgoRSAv01
+ case KeyAlgoED25519,
+ KeyAlgoSKED25519,
+ KeyAlgoSKECDSA256,
+ KeyAlgoECDSA256,
+ KeyAlgoECDSA384,
+ KeyAlgoECDSA521,
+ InsecureKeyAlgoDSA,
+ InsecureCertAlgoDSAv01,
+ CertAlgoECDSA256v01,
+ CertAlgoECDSA384v01,
+ CertAlgoECDSA521v01,
+ CertAlgoSKECDSA256v01,
+ CertAlgoED25519v01,
+ CertAlgoSKED25519v01:
+ return sigAlgo
+ default:
+ return ""
+ }
+}
+
// isRSA returns whether algo is a supported RSA algorithm, including certificate
// algorithms.
func isRSA(algo string) bool {
algos := algorithmsForKeyFormat(KeyAlgoRSA)
- return contains(algos, underlyingAlgo(algo))
+ return slices.Contains(algos, underlyingAlgo(algo))
}
func isRSACert(algo string) bool {
@@ -515,7 +564,7 @@ func (c *Config) SetDefaults() {
if kexAlgoMap[k] != nil {
// Ignore the KEX if we have no kexAlgoMap definition.
kexs = append(kexs, k)
- if k == KeyExchangeCurve25519 && !contains(c.KeyExchanges, keyExchangeCurve25519LibSSH) {
+ if k == KeyExchangeCurve25519 && !slices.Contains(c.KeyExchanges, keyExchangeCurve25519LibSSH) {
kexs = append(kexs, keyExchangeCurve25519LibSSH)
}
}
diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go
index 04ccce346..5b4de9eff 100644
--- a/vendor/golang.org/x/crypto/ssh/doc.go
+++ b/vendor/golang.org/x/crypto/ssh/doc.go
@@ -17,8 +17,18 @@ References:
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
[SSH-CERTS]: https://datatracker.ietf.org/doc/html/draft-miller-ssh-cert-01
+ [FIPS 140-3 mode]: https://go.dev/doc/security/fips140
This package does not fall under the stability promise of the Go language itself,
so its API may be changed when pressing needs arise.
+
+# FIPS 140-3 mode
+
+When the program is in [FIPS 140-3 mode], this package behaves as if only SP
+800-140C and SP 800-140D approved cipher suites, signature algorithms,
+certificate public key types and sizes, and key exchange and derivation
+algorithms were implemented. Others are silently ignored and not negotiated, or
+rejected. This set may depend on the algorithms supported by the FIPS 140-3 Go
+Cryptographic Module selected with GOFIPS140, and may change across Go versions.
*/
package ssh
diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go
index a90bfe331..4be3cbb6d 100644
--- a/vendor/golang.org/x/crypto/ssh/handshake.go
+++ b/vendor/golang.org/x/crypto/ssh/handshake.go
@@ -10,6 +10,7 @@ import (
"io"
"log"
"net"
+ "slices"
"strings"
"sync"
)
@@ -527,7 +528,7 @@ func (t *handshakeTransport) sendKexInit() error {
switch s := k.(type) {
case MultiAlgorithmSigner:
for _, algo := range algorithmsForKeyFormat(keyFormat) {
- if contains(s.Algorithms(), underlyingAlgo(algo)) {
+ if slices.Contains(s.Algorithms(), underlyingAlgo(algo)) {
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo)
}
}
@@ -679,7 +680,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
return err
}
- if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) {
+ if t.sessionID == nil && ((isClient && slices.Contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && slices.Contains(clientInit.KexAlgos, kexStrictClient))) {
t.strictMode = true
if err := t.conn.setStrictMode(); err != nil {
return err
@@ -736,7 +737,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
// On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO
// message with the server-sig-algs extension if the client supports it. See
// RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9.
- if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") {
+ if !isClient && firstKeyExchange && slices.Contains(clientInit.KexAlgos, "ext-info-c") {
supportedPubKeyAuthAlgosList := strings.Join(t.publicKeyAuthAlgorithms, ",")
extInfo := &extInfoMsg{
NumExtensions: 2,
@@ -790,7 +791,7 @@ func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, a
func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner {
for _, k := range hostKeys {
if s, ok := k.(MultiAlgorithmSigner); ok {
- if !contains(s.Algorithms(), underlyingAlgo(algo)) {
+ if !slices.Contains(s.Algorithms(), underlyingAlgo(algo)) {
continue
}
}
diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go
index 78aaf0310..5f7fdd851 100644
--- a/vendor/golang.org/x/crypto/ssh/kex.go
+++ b/vendor/golang.org/x/crypto/ssh/kex.go
@@ -8,12 +8,14 @@ import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
+ "crypto/fips140"
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
+ "slices"
"golang.org/x/crypto/curve25519"
)
@@ -395,9 +397,27 @@ func ecHash(curve elliptic.Curve) crypto.Hash {
return crypto.SHA512
}
+// kexAlgoMap defines the supported KEXs. KEXs not included are not supported
+// and will not be negotiated, even if explicitly configured. When FIPS mode is
+// enabled, only FIPS-approved algorithms are included.
var kexAlgoMap = map[string]kexAlgorithm{}
func init() {
+ // mlkem768x25519-sha256 we'll work with fips140=on but not fips140=only
+ // until Go 1.26.
+ kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{}
+ kexAlgoMap[KeyExchangeECDHP521] = &ecdh{elliptic.P521()}
+ kexAlgoMap[KeyExchangeECDHP384] = &ecdh{elliptic.P384()}
+ kexAlgoMap[KeyExchangeECDHP256] = &ecdh{elliptic.P256()}
+
+ if fips140.Enabled() {
+ defaultKexAlgos = slices.DeleteFunc(defaultKexAlgos, func(algo string) bool {
+ _, ok := kexAlgoMap[algo]
+ return !ok
+ })
+ return
+ }
+
p, _ := new(big.Int).SetString(oakleyGroup2, 16)
kexAlgoMap[InsecureKeyExchangeDH1SHA1] = &dhGroup{
g: new(big.Int).SetInt64(2),
@@ -431,14 +451,10 @@ func init() {
hashFunc: crypto.SHA512,
}
- kexAlgoMap[KeyExchangeECDHP521] = &ecdh{elliptic.P521()}
- kexAlgoMap[KeyExchangeECDHP384] = &ecdh{elliptic.P384()}
- kexAlgoMap[KeyExchangeECDHP256] = &ecdh{elliptic.P256()}
kexAlgoMap[KeyExchangeCurve25519] = &curve25519sha256{}
kexAlgoMap[keyExchangeCurve25519LibSSH] = &curve25519sha256{}
kexAlgoMap[InsecureKeyExchangeDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1}
kexAlgoMap[KeyExchangeDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256}
- kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{}
}
// curve25519sha256 implements the curve25519-sha256 (formerly known as
diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
index a28c0de50..a035956fc 100644
--- a/vendor/golang.org/x/crypto/ssh/keys.go
+++ b/vendor/golang.org/x/crypto/ssh/keys.go
@@ -27,6 +27,7 @@ import (
"fmt"
"io"
"math/big"
+ "slices"
"strings"
"golang.org/x/crypto/ssh/internal/bcrypt_pbkdf"
@@ -89,6 +90,11 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err
}
return cert, nil, nil
}
+ if keyFormat := keyFormatForAlgorithm(algo); keyFormat != "" {
+ return nil, nil, fmt.Errorf("ssh: signature algorithm %q isn't a key format; key is malformed and should be re-encoded with type %q",
+ algo, keyFormat)
+ }
+
return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo)
}
@@ -191,9 +197,10 @@ func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey
return "", nil, nil, "", nil, io.EOF
}
-// ParseAuthorizedKey parses a public key from an authorized_keys
-// file used in OpenSSH according to the sshd(8) manual page.
+// ParseAuthorizedKey parses a public key from an authorized_keys file used in
+// OpenSSH according to the sshd(8) manual page. Invalid lines are ignored.
func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) {
+ var lastErr error
for len(in) > 0 {
end := bytes.IndexByte(in, '\n')
if end != -1 {
@@ -222,6 +229,8 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
return out, comment, options, rest, nil
+ } else {
+ lastErr = err
}
// No key type recognised. Maybe there's an options field at
@@ -264,12 +273,18 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
options = candidateOptions
return out, comment, options, rest, nil
+ } else {
+ lastErr = err
}
in = rest
continue
}
+ if lastErr != nil {
+ return nil, "", nil, nil, fmt.Errorf("ssh: no key found; last parsing error for ignored line: %w", lastErr)
+ }
+
return nil, "", nil, nil, errors.New("ssh: no key found")
}
@@ -395,11 +410,11 @@ func NewSignerWithAlgorithms(signer AlgorithmSigner, algorithms []string) (Multi
}
for _, algo := range algorithms {
- if !contains(supportedAlgos, algo) {
+ if !slices.Contains(supportedAlgos, algo) {
return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q",
algo, signer.PublicKey().Type())
}
- if !contains(signerAlgos, algo) {
+ if !slices.Contains(signerAlgos, algo) {
return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo)
}
}
@@ -486,10 +501,13 @@ func (r *rsaPublicKey) Marshal() []byte {
func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
supportedAlgos := algorithmsForKeyFormat(r.Type())
- if !contains(supportedAlgos, sig.Format) {
+ if !slices.Contains(supportedAlgos, sig.Format) {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
}
- hash := hashFuncs[sig.Format]
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@@ -606,7 +624,11 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
- h := hashFuncs[sig.Format].New()
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@@ -651,7 +673,11 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
}
- h := hashFuncs[k.PublicKey().Type()].New()
+ hash, err := hashFunc(k.PublicKey().Type())
+ if err != nil {
+ return nil, err
+ }
+ h := hash.New()
h.Write(data)
digest := h.Sum(nil)
r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
@@ -801,8 +827,11 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
-
- h := hashFuncs[sig.Format].New()
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@@ -905,8 +934,11 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
-
- h := hashFuncs[sig.Format].New()
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
@@ -1009,7 +1041,11 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error {
return fmt.Errorf("invalid size %d for Ed25519 public key", l)
}
- h := hashFuncs[sig.Format].New()
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
@@ -1112,11 +1148,14 @@ func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm
algorithm = s.pubKey.Type()
}
- if !contains(s.Algorithms(), algorithm) {
+ if !slices.Contains(s.Algorithms(), algorithm) {
return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type())
}
- hashFunc := hashFuncs[algorithm]
+ hashFunc, err := hashFunc(algorithm)
+ if err != nil {
+ return nil, err
+ }
var digest []byte
if hashFunc != 0 {
h := hashFunc.New()
diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go
index de2639d57..87d626fbb 100644
--- a/vendor/golang.org/x/crypto/ssh/mac.go
+++ b/vendor/golang.org/x/crypto/ssh/mac.go
@@ -7,11 +7,13 @@ package ssh
// Message authentication support
import (
+ "crypto/fips140"
"crypto/hmac"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"hash"
+ "slices"
)
type macMode struct {
@@ -46,23 +48,37 @@ func (t truncatingMAC) Size() int {
func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
-var macModes = map[string]*macMode{
- HMACSHA512ETM: {64, true, func(key []byte) hash.Hash {
+// macModes defines the supported MACs. MACs not included are not supported
+// and will not be negotiated, even if explicitly configured. When FIPS mode is
+// enabled, only FIPS-approved algorithms are included.
+var macModes = map[string]*macMode{}
+
+func init() {
+ macModes[HMACSHA512ETM] = &macMode{64, true, func(key []byte) hash.Hash {
return hmac.New(sha512.New, key)
- }},
- HMACSHA256ETM: {32, true, func(key []byte) hash.Hash {
+ }}
+ macModes[HMACSHA256ETM] = &macMode{32, true, func(key []byte) hash.Hash {
return hmac.New(sha256.New, key)
- }},
- HMACSHA512: {64, false, func(key []byte) hash.Hash {
+ }}
+ macModes[HMACSHA512] = &macMode{64, false, func(key []byte) hash.Hash {
return hmac.New(sha512.New, key)
- }},
- HMACSHA256: {32, false, func(key []byte) hash.Hash {
+ }}
+ macModes[HMACSHA256] = &macMode{32, false, func(key []byte) hash.Hash {
return hmac.New(sha256.New, key)
- }},
- HMACSHA1: {20, false, func(key []byte) hash.Hash {
+ }}
+
+ if fips140.Enabled() {
+ defaultMACs = slices.DeleteFunc(defaultMACs, func(algo string) bool {
+ _, ok := macModes[algo]
+ return !ok
+ })
+ return
+ }
+
+ macModes[HMACSHA1] = &macMode{20, false, func(key []byte) hash.Hash {
return hmac.New(sha1.New, key)
- }},
- InsecureHMACSHA196: {20, false, func(key []byte) hash.Hash {
+ }}
+ macModes[InsecureHMACSHA196] = &macMode{20, false, func(key []byte) hash.Hash {
return truncatingMAC{12, hmac.New(sha1.New, key)}
- }},
+ }}
}
diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
index 98679ba5b..064dcbaf5 100644
--- a/vendor/golang.org/x/crypto/ssh/server.go
+++ b/vendor/golang.org/x/crypto/ssh/server.go
@@ -10,6 +10,7 @@ import (
"fmt"
"io"
"net"
+ "slices"
"strings"
)
@@ -43,6 +44,9 @@ type Permissions struct {
// pass data from the authentication callbacks to the server
// application layer.
Extensions map[string]string
+
+ // ExtraData allows to store user defined data.
+ ExtraData map[any]any
}
type GSSAPIWithMICConfig struct {
@@ -126,6 +130,21 @@ type ServerConfig struct {
// Permissions.Extensions entry.
PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
+ // VerifiedPublicKeyCallback, if non-nil, is called after a client
+ // successfully confirms having control over a key that was previously
+ // approved by PublicKeyCallback. The permissions object passed to the
+ // callback is the one returned by PublicKeyCallback for the given public
+ // key and its ownership is transferred to the callback. The returned
+ // Permissions object can be the same object, optionally modified, or a
+ // completely new object. If VerifiedPublicKeyCallback is non-nil,
+ // PublicKeyCallback is not allowed to return a PartialSuccessError, which
+ // can instead be returned by VerifiedPublicKeyCallback.
+ //
+ // VerifiedPublicKeyCallback does not affect which authentication methods
+ // are included in the list of methods that can be attempted by the client.
+ VerifiedPublicKeyCallback func(conn ConnMetadata, key PublicKey, permissions *Permissions,
+ signatureAlgorithm string) (*Permissions, error)
+
// KeyboardInteractiveCallback, if non-nil, is called when
// keyboard-interactive authentication is selected (RFC
// 4256). The client object's Challenge function should be
@@ -246,7 +265,7 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha
fullConf.PublicKeyAuthAlgorithms = defaultPubKeyAuthAlgos
} else {
for _, algo := range fullConf.PublicKeyAuthAlgorithms {
- if !contains(SupportedAlgorithms().PublicKeyAuths, algo) && !contains(InsecureAlgorithms().PublicKeyAuths, algo) {
+ if !slices.Contains(SupportedAlgorithms().PublicKeyAuths, algo) && !slices.Contains(InsecureAlgorithms().PublicKeyAuths, algo) {
c.Close()
return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo)
}
@@ -631,7 +650,7 @@ userAuthLoop:
return nil, parseError(msgUserAuthRequest)
}
algo := string(algoBytes)
- if !contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) {
+ if !slices.Contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
break
}
@@ -652,6 +671,9 @@ userAuthLoop:
candidate.pubKeyData = pubKeyData
candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey)
_, isPartialSuccessError := candidate.result.(*PartialSuccessError)
+ if isPartialSuccessError && config.VerifiedPublicKeyCallback != nil {
+ return nil, errors.New("ssh: invalid library usage: PublicKeyCallback must not return partial success when VerifiedPublicKeyCallback is defined")
+ }
if (candidate.result == nil || isPartialSuccessError) &&
candidate.perms != nil &&
@@ -695,7 +717,7 @@ userAuthLoop:
// ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public
// key type. The algorithm and public key type must be
// consistent: both must be certificate algorithms, or neither.
- if !contains(algorithmsForKeyFormat(pubKey.Type()), algo) {
+ if !slices.Contains(algorithmsForKeyFormat(pubKey.Type()), algo) {
authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q",
pubKey.Type(), algo)
break
@@ -705,7 +727,7 @@ userAuthLoop:
// algorithm name that corresponds to algo with
// sig.Format. This is usually the same, but
// for certs, the names differ.
- if !contains(config.PublicKeyAuthAlgorithms, sig.Format) {
+ if !slices.Contains(config.PublicKeyAuthAlgorithms, sig.Format) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
break
}
@@ -722,6 +744,12 @@ userAuthLoop:
authErr = candidate.result
perms = candidate.perms
+ if authErr == nil && config.VerifiedPublicKeyCallback != nil {
+ // Only call VerifiedPublicKeyCallback after the key has been accepted
+ // and successfully verified. If authErr is non-nil, the key is not
+ // considered verified and the callback must not run.
+ perms, authErr = config.VerifiedPublicKeyCallback(s, pubKey, perms, algo)
+ }
}
case "gssapi-with-mic":
if authConfig.GSSAPIWithMICConfig == nil {
diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go
index 663619845..fa3dd6a42 100644
--- a/vendor/golang.org/x/crypto/ssh/transport.go
+++ b/vendor/golang.org/x/crypto/ssh/transport.go
@@ -8,6 +8,7 @@ import (
"bufio"
"bytes"
"errors"
+ "fmt"
"io"
"log"
)
@@ -254,6 +255,9 @@ var (
// (to setup server->client keys) or clientKeys (for client->server keys).
func newPacketCipher(d direction, algs DirectionAlgorithms, kex *kexResult) (packetCipher, error) {
cipherMode := cipherModes[algs.Cipher]
+ if cipherMode == nil {
+ return nil, fmt.Errorf("ssh: unsupported cipher %v", algs.Cipher)
+ }
iv := make([]byte, cipherMode.ivSize)
key := make([]byte, cipherMode.keySize)
diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go
index 04c6bec21..12f227370 100644
--- a/vendor/golang.org/x/net/html/escape.go
+++ b/vendor/golang.org/x/net/html/escape.go
@@ -299,7 +299,7 @@ func escape(w writer, s string) error {
case '\r':
esc = "&#13;"
default:
- panic("unrecognized escape character")
+ panic("html: unrecognized escape character")
}
s = s[i+1:]
if _, err := w.WriteString(esc); err != nil {
diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go
index 518ee4c94..88fc0056a 100644
--- a/vendor/golang.org/x/net/html/parse.go
+++ b/vendor/golang.org/x/net/html/parse.go
@@ -136,7 +136,7 @@ func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int {
return -1
}
default:
- panic("unreachable")
+ panic(fmt.Sprintf("html: internal error: indexOfElementInScope unknown scope: %d", s))
}
}
switch s {
@@ -179,7 +179,7 @@ func (p *parser) clearStackToContext(s scope) {
return
}
default:
- panic("unreachable")
+ panic(fmt.Sprintf("html: internal error: clearStackToContext unknown scope: %d", s))
}
}
}
@@ -231,7 +231,14 @@ func (p *parser) addChild(n *Node) {
}
if n.Type == ElementNode {
- p.oe = append(p.oe, n)
+ p.insertOpenElement(n)
+ }
+}
+
+func (p *parser) insertOpenElement(n *Node) {
+ p.oe = append(p.oe, n)
+ if len(p.oe) > 512 {
+ panic("html: open stack of elements exceeds 512 nodes")
}
}
@@ -810,7 +817,7 @@ func afterHeadIM(p *parser) bool {
p.im = inFramesetIM
return true
case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
- p.oe = append(p.oe, p.head)
+ p.insertOpenElement(p.head)
defer p.oe.remove(p.head)
return inHeadIM(p)
case a.Head:
@@ -1678,7 +1685,7 @@ func inTableBodyIM(p *parser) bool {
return inTableIM(p)
}
-// Section 12.2.6.4.14.
+// Section 13.2.6.4.14.
func inRowIM(p *parser) bool {
switch p.tok.Type {
case StartTagToken:
@@ -1690,7 +1697,9 @@ func inRowIM(p *parser) bool {
p.im = inCellIM
return true
case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead, a.Tr:
- if p.popUntil(tableScope, a.Tr) {
+ if p.elementInScope(tableScope, a.Tr) {
+ p.clearStackToContext(tableRowScope)
+ p.oe.pop()
p.im = inTableBodyIM
return false
}
@@ -1700,22 +1709,28 @@ func inRowIM(p *parser) bool {
case EndTagToken:
switch p.tok.DataAtom {
case a.Tr:
- if p.popUntil(tableScope, a.Tr) {
+ if p.elementInScope(tableScope, a.Tr) {
+ p.clearStackToContext(tableRowScope)
+ p.oe.pop()
p.im = inTableBodyIM
return true
}
// Ignore the token.
return true
case a.Table:
- if p.popUntil(tableScope, a.Tr) {
+ if p.elementInScope(tableScope, a.Tr) {
+ p.clearStackToContext(tableRowScope)
+ p.oe.pop()
p.im = inTableBodyIM
return false
}
// Ignore the token.
return true
case a.Tbody, a.Tfoot, a.Thead:
- if p.elementInScope(tableScope, p.tok.DataAtom) {
- p.parseImpliedToken(EndTagToken, a.Tr, a.Tr.String())
+ if p.elementInScope(tableScope, p.tok.DataAtom) && p.elementInScope(tableScope, a.Tr) {
+ p.clearStackToContext(tableRowScope)
+ p.oe.pop()
+ p.im = inTableBodyIM
return false
}
// Ignore the token.
@@ -2222,16 +2237,20 @@ func parseForeignContent(p *parser) bool {
p.acknowledgeSelfClosingTag()
}
case EndTagToken:
+ if strings.EqualFold(p.oe[len(p.oe)-1].Data, p.tok.Data) {
+ p.oe = p.oe[:len(p.oe)-1]
+ return true
+ }
for i := len(p.oe) - 1; i >= 0; i-- {
- if p.oe[i].Namespace == "" {
- return p.im(p)
- }
if strings.EqualFold(p.oe[i].Data, p.tok.Data) {
p.oe = p.oe[:i]
+ return true
+ }
+ if i > 0 && p.oe[i-1].Namespace == "" {
break
}
}
- return true
+ return p.im(p)
default:
// Ignore the token.
}
@@ -2312,9 +2331,13 @@ func (p *parser) parseCurrentToken() {
}
}
-func (p *parser) parse() error {
+func (p *parser) parse() (err error) {
+ defer func() {
+ if panicErr := recover(); panicErr != nil {
+ err = fmt.Errorf("%s", panicErr)
+ }
+ }()
// Iterate until EOF. Any other error will cause an early return.
- var err error
for err != io.EOF {
// CDATA sections are allowed only in foreign content.
n := p.oe.top()
@@ -2343,6 +2366,8 @@ func (p *parser) parse() error {
// <tag>s. Conversely, explicit <tag>s in r's data can be silently dropped,
// with no corresponding node in the resulting tree.
//
+// Parse will reject HTML that is nested deeper than 512 elements.
+//
// The input is assumed to be UTF-8 encoded.
func Parse(r io.Reader) (*Node, error) {
return ParseWithOptions(r)
diff --git a/vendor/golang.org/x/net/html/render.go b/vendor/golang.org/x/net/html/render.go
index e8c123345..0157d89e1 100644
--- a/vendor/golang.org/x/net/html/render.go
+++ b/vendor/golang.org/x/net/html/render.go
@@ -184,7 +184,7 @@ func render1(w writer, n *Node) error {
return err
}
- // Add initial newline where there is danger of a newline beging ignored.
+ // Add initial newline where there is danger of a newline being ignored.
if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
switch n.Data {
case "pre", "listing", "textarea":
diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go
index 02fe0c2d4..8a7a89d01 100644
--- a/vendor/golang.org/x/net/http2/config.go
+++ b/vendor/golang.org/x/net/http2/config.go
@@ -27,6 +27,7 @@ import (
// - If the resulting value is zero or out of range, use a default.
type http2Config struct {
MaxConcurrentStreams uint32
+ StrictMaxConcurrentRequests bool
MaxDecoderHeaderTableSize uint32
MaxEncoderHeaderTableSize uint32
MaxReadFrameSize uint32
@@ -64,12 +65,13 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
// (the net/http Transport).
func configFromTransport(h2 *Transport) http2Config {
conf := http2Config{
- MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
- MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
- MaxReadFrameSize: h2.MaxReadFrameSize,
- SendPingTimeout: h2.ReadIdleTimeout,
- PingTimeout: h2.PingTimeout,
- WriteByteTimeout: h2.WriteByteTimeout,
+ StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams,
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
}
// Unlike most config fields, where out-of-range values revert to the default,
@@ -128,6 +130,9 @@ func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
if h2.MaxConcurrentStreams != 0 {
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
}
+ if http2ConfigStrictMaxConcurrentRequests(h2) {
+ conf.StrictMaxConcurrentRequests = true
+ }
if h2.MaxEncoderHeaderTableSize != 0 {
conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
}
diff --git a/vendor/golang.org/x/net/http2/config_go125.go b/vendor/golang.org/x/net/http2/config_go125.go
new file mode 100644
index 000000000..b4373fe33
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go125.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.26
+
+package http2
+
+import (
+ "net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+ return false
+}
diff --git a/vendor/golang.org/x/net/http2/config_go126.go b/vendor/golang.org/x/net/http2/config_go126.go
new file mode 100644
index 000000000..6b071c149
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go126.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.26
+
+package http2
+
+import (
+ "net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+ return h2.StrictMaxConcurrentRequests
+}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index db3264da8..93bcaab03 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -347,7 +347,7 @@ func (fr *Framer) maxHeaderListSize() uint32 {
func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
// Write the FrameHeader.
f.wbuf = append(f.wbuf[:0],
- 0, // 3 bytes of length, filled in in endWrite
+ 0, // 3 bytes of length, filled in endWrite
0,
0,
byte(ftype),
@@ -1152,6 +1152,15 @@ type PriorityFrame struct {
PriorityParam
}
+var defaultRFC9218Priority = PriorityParam{
+ incremental: 0,
+ urgency: 3,
+}
+
+// Note that HTTP/2 has had two different prioritization schemes, and
+// PriorityParam struct below is a superset of both schemes. The exported
+// symbols are from RFC 7540 and the non-exported ones are from RFC 9218.
+
// PriorityParam are the stream prioritzation parameters.
type PriorityParam struct {
// StreamDep is a 31-bit stream identifier for the
@@ -1167,6 +1176,20 @@ type PriorityParam struct {
// the spec, "Add one to the value to obtain a weight between
// 1 and 256."
Weight uint8
+
+ // "The urgency (u) parameter value is Integer (see Section 3.3.1 of
+ // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of
+ // priority. The default is 3."
+ urgency uint8
+
+ // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of
+ // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed
+ // incrementally, i.e., provide some meaningful output as chunks of the
+ // response arrive."
+ //
+ // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can
+ // avoid unnecessary type conversions and because either type takes 1 byte.
+ incremental uint8
}
func (p PriorityParam) IsZero() bool {
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 6878f8ecc..105fe12fe 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -34,7 +34,6 @@ var (
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
- inTests bool
// Enabling extended CONNECT by causes browsers to attempt to use
// WebSockets-over-HTTP/2. This results in problems when the server's websocket
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 64085f6e1..bdc5520eb 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -181,6 +181,10 @@ type Server struct {
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
+
+ // Pool of error channels. This is per-Server rather than global
+ // because channels can't be reused across synctest bubbles.
+ errChanPool sync.Pool
}
func (s *serverInternalState) registerConn(sc *serverConn) {
@@ -212,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() {
s.mu.Unlock()
}
+// Global error channel pool used for uninitialized Servers.
+// We use a per-Server pool when possible to avoid using channels across synctest bubbles.
+var errChanPool = sync.Pool{
+ New: func() any { return make(chan error, 1) },
+}
+
+func (s *serverInternalState) getErrChan() chan error {
+ if s == nil {
+ return errChanPool.Get().(chan error) // Server used without calling ConfigureServer
+ }
+ return s.errChanPool.Get().(chan error)
+}
+
+func (s *serverInternalState) putErrChan(ch chan error) {
+ if s == nil {
+ errChanPool.Put(ch) // Server used without calling ConfigureServer
+ return
+ }
+ s.errChanPool.Put(ch)
+}
+
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
@@ -224,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if conf == nil {
conf = new(Server)
}
- conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
+ conf.state = &serverInternalState{
+ activeConns: make(map[*serverConn]struct{}),
+ errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }},
+ }
if h1, h2 := s, conf; h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout
@@ -1124,25 +1152,6 @@ func (sc *serverConn) readPreface() error {
}
}
-var errChanPool = sync.Pool{
- New: func() interface{} { return make(chan error, 1) },
-}
-
-func getErrChan() chan error {
- if inTests {
- // Channels cannot be reused across synctest tests.
- return make(chan error, 1)
- } else {
- return errChanPool.Get().(chan error)
- }
-}
-
-func putErrChan(ch chan error) {
- if !inTests {
- errChanPool.Put(ch)
- }
-}
-
var writeDataPool = sync.Pool{
New: func() interface{} { return new(writeData) },
}
@@ -1150,7 +1159,7 @@ var writeDataPool = sync.Pool{
// writeDataFromHandler writes DATA response frames from a handler on
// the given stream.
func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
- ch := getErrChan()
+ ch := sc.srv.state.getErrChan()
writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream}
err := sc.writeFrameFromHandler(FrameWriteRequest{
@@ -1182,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
return errStreamClosed
}
}
- putErrChan(ch)
+ sc.srv.state.putErrChan(ch)
if frameWriteDone {
writeDataPool.Put(writeArg)
}
@@ -2436,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
// waiting for this frame to be written, so an http.Flush mid-handler
// writes out the correct value of keys, before a handler later potentially
// mutates it.
- errc = getErrChan()
+ errc = sc.srv.state.getErrChan()
}
if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData,
@@ -2448,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
if errc != nil {
select {
case err := <-errc:
- putErrChan(errc)
+ sc.srv.state.putErrChan(errc)
return err
case <-sc.doneServing:
return errClientDisconnected
@@ -3129,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
method: opts.Method,
url: u,
header: cloneHeader(opts.Header),
- done: getErrChan(),
+ done: sc.srv.state.getErrChan(),
}
select {
@@ -3146,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
case <-st.cw:
return errStreamClosed
case err := <-msg.done:
- putErrChan(msg.done)
+ sc.srv.state.putErrChan(msg.done)
return err
}
}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 35e390251..be759b606 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -355,6 +355,7 @@ type ClientConn struct {
readIdleTimeout time.Duration
pingTimeout time.Duration
extendedConnectAllowed bool
+ strictMaxConcurrentStreams bool
// rstStreamPingsBlocked works around an unfortunate gRPC behavior.
// gRPC strictly limits the number of PING frames that it will receive.
@@ -784,7 +785,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
initialWindowSize: 65535, // spec default
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
- peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests,
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
streams: make(map[uint32]*clientStream),
singleUse: singleUse,
seenSettingsChan: make(chan struct{}),
@@ -1018,7 +1020,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
return
}
var maxConcurrentOkay bool
- if cc.t.StrictMaxConcurrentStreams {
+ if cc.strictMaxConcurrentStreams {
// We'll tell the caller we can take a new request to
// prevent the caller from dialing a new TCP
// connection, but then we'll block later before
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index cc893adc2..4d3890f99 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -42,6 +42,8 @@ type OpenStreamOptions struct {
// PusherID is zero if the stream was initiated by the client. Otherwise,
// PusherID names the stream that pushed the newly opened stream.
PusherID uint32
+ // priority is used to set the priority of the newly opened stream.
+ priority PriorityParam
}
// FrameWriteRequest is a request to write a frame.
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
index f6783339d..6d24d6a1b 100644
--- a/vendor/golang.org/x/net/http2/writesched_priority.go
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
@@ -11,7 +11,7 @@ import (
)
// RFC 7540, Section 5.3.5: the default weight is 16.
-const priorityDefaultWeight = 15 // 16 = 15 + 1
+const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
type PriorityWriteSchedulerConfig struct {
@@ -66,8 +66,8 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
}
}
- ws := &priorityWriteScheduler{
- nodes: make(map[uint32]*priorityNode),
+ ws := &priorityWriteSchedulerRFC7540{
+ nodes: make(map[uint32]*priorityNodeRFC7540),
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
@@ -81,32 +81,32 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
return ws
}
-type priorityNodeState int
+type priorityNodeStateRFC7540 int
const (
- priorityNodeOpen priorityNodeState = iota
- priorityNodeClosed
- priorityNodeIdle
+ priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota
+ priorityNodeClosedRFC7540
+ priorityNodeIdleRFC7540
)
-// priorityNode is a node in an HTTP/2 priority tree.
+// priorityNodeRFC7540 is a node in an HTTP/2 priority tree.
// Each node is associated with a single stream ID.
// See RFC 7540, Section 5.3.
-type priorityNode struct {
- q writeQueue // queue of pending frames to write
- id uint32 // id of the stream, or 0 for the root of the tree
- weight uint8 // the actual weight is weight+1, so the value is in [1,256]
- state priorityNodeState // open | closed | idle
- bytes int64 // number of bytes written by this node, or 0 if closed
- subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
+type priorityNodeRFC7540 struct {
+ q writeQueue // queue of pending frames to write
+ id uint32 // id of the stream, or 0 for the root of the tree
+ weight uint8 // the actual weight is weight+1, so the value is in [1,256]
+ state priorityNodeStateRFC7540 // open | closed | idle
+ bytes int64 // number of bytes written by this node, or 0 if closed
+ subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
// These links form the priority tree.
- parent *priorityNode
- kids *priorityNode // start of the kids list
- prev, next *priorityNode // doubly-linked list of siblings
+ parent *priorityNodeRFC7540
+ kids *priorityNodeRFC7540 // start of the kids list
+ prev, next *priorityNodeRFC7540 // doubly-linked list of siblings
}
-func (n *priorityNode) setParent(parent *priorityNode) {
+func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) {
if n == parent {
panic("setParent to self")
}
@@ -141,7 +141,7 @@ func (n *priorityNode) setParent(parent *priorityNode) {
}
}
-func (n *priorityNode) addBytes(b int64) {
+func (n *priorityNodeRFC7540) addBytes(b int64) {
n.bytes += b
for ; n != nil; n = n.parent {
n.subtreeBytes += b
@@ -154,7 +154,7 @@ func (n *priorityNode) addBytes(b int64) {
//
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
// if any ancestor p of n is still open (ignoring the root node).
-func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
+func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool {
if !n.q.empty() && f(n, openParent) {
return true
}
@@ -165,7 +165,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
// Don't consider the root "open" when updating openParent since
// we can't send data frames on the root stream (only control frames).
if n.id != 0 {
- openParent = openParent || (n.state == priorityNodeOpen)
+ openParent = openParent || (n.state == priorityNodeOpenRFC7540)
}
// Common case: only one kid or all kids have the same weight.
@@ -195,7 +195,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
*tmp = append(*tmp, n.kids)
n.kids.setParent(nil)
}
- sort.Sort(sortPriorityNodeSiblings(*tmp))
+ sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp))
for i := len(*tmp) - 1; i >= 0; i-- {
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
}
@@ -207,11 +207,11 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
return false
}
-type sortPriorityNodeSiblings []*priorityNode
+type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540
-func (z sortPriorityNodeSiblings) Len() int { return len(z) }
-func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
-func (z sortPriorityNodeSiblings) Less(i, k int) bool {
+func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) }
+func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool {
// Prefer the subtree that has sent fewer bytes relative to its weight.
// See sections 5.3.2 and 5.3.4.
wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
@@ -225,13 +225,13 @@ func (z sortPriorityNodeSiblings) Less(i, k int) bool {
return bi/bk <= wi/wk
}
-type priorityWriteScheduler struct {
+type priorityWriteSchedulerRFC7540 struct {
// root is the root of the priority tree, where root.id = 0.
// The root queues control frames that are not associated with any stream.
- root priorityNode
+ root priorityNodeRFC7540
// nodes maps stream ids to priority tree nodes.
- nodes map[uint32]*priorityNode
+ nodes map[uint32]*priorityNodeRFC7540
// maxID is the maximum stream id in nodes.
maxID uint32
@@ -239,7 +239,7 @@ type priorityWriteScheduler struct {
// lists of nodes that have been closed or are idle, but are kept in
// the tree for improved prioritization. When the lengths exceed either
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
- closedNodes, idleNodes []*priorityNode
+ closedNodes, idleNodes []*priorityNodeRFC7540
// From the config.
maxClosedNodesInTree int
@@ -248,19 +248,19 @@ type priorityWriteScheduler struct {
enableWriteThrottle bool
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
- tmp []*priorityNode
+ tmp []*priorityNodeRFC7540
// pool of empty queues for reuse.
queuePool writeQueuePool
}
-func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) {
// The stream may be currently idle but cannot be opened or closed.
if curr := ws.nodes[streamID]; curr != nil {
- if curr.state != priorityNodeIdle {
+ if curr.state != priorityNodeIdleRFC7540 {
panic(fmt.Sprintf("stream %d already opened", streamID))
}
- curr.state = priorityNodeOpen
+ curr.state = priorityNodeOpenRFC7540
return
}
@@ -272,11 +272,11 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
if parent == nil {
parent = &ws.root
}
- n := &priorityNode{
+ n := &priorityNodeRFC7540{
q: *ws.queuePool.get(),
id: streamID,
- weight: priorityDefaultWeight,
- state: priorityNodeOpen,
+ weight: priorityDefaultWeightRFC7540,
+ state: priorityNodeOpenRFC7540,
}
n.setParent(parent)
ws.nodes[streamID] = n
@@ -285,19 +285,19 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
}
}
-func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
+func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) {
if streamID == 0 {
panic("violation of WriteScheduler interface: cannot close stream 0")
}
if ws.nodes[streamID] == nil {
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
}
- if ws.nodes[streamID].state != priorityNodeOpen {
+ if ws.nodes[streamID].state != priorityNodeOpenRFC7540 {
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
}
n := ws.nodes[streamID]
- n.state = priorityNodeClosed
+ n.state = priorityNodeClosedRFC7540
n.addBytes(-n.bytes)
q := n.q
@@ -310,7 +310,7 @@ func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
}
}
-func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) {
if streamID == 0 {
panic("adjustPriority on root")
}
@@ -324,11 +324,11 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
return
}
ws.maxID = streamID
- n = &priorityNode{
+ n = &priorityNodeRFC7540{
q: *ws.queuePool.get(),
id: streamID,
- weight: priorityDefaultWeight,
- state: priorityNodeIdle,
+ weight: priorityDefaultWeightRFC7540,
+ state: priorityNodeIdleRFC7540,
}
n.setParent(&ws.root)
ws.nodes[streamID] = n
@@ -340,7 +340,7 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
parent := ws.nodes[priority.StreamDep]
if parent == nil {
n.setParent(&ws.root)
- n.weight = priorityDefaultWeight
+ n.weight = priorityDefaultWeightRFC7540
return
}
@@ -381,8 +381,8 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
n.weight = priority.Weight
}
-func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
- var n *priorityNode
+func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) {
+ var n *priorityNodeRFC7540
if wr.isControl() {
n = &ws.root
} else {
@@ -401,8 +401,8 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
n.q.push(wr)
}
-func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
- ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
+func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) {
+ ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool {
limit := int32(math.MaxInt32)
if openParent {
limit = ws.writeThrottleLimit
@@ -428,7 +428,7 @@ func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
return wr, ok
}
-func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
+func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) {
if maxSize == 0 {
return
}
@@ -442,7 +442,7 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max
*list = append(*list, n)
}
-func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
+func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) {
for n.kids != nil {
n.kids.setParent(n.parent)
}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go
new file mode 100644
index 000000000..9b5b8808e
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go
@@ -0,0 +1,209 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+)
+
+type streamMetadata struct {
+ location *writeQueue
+ priority PriorityParam
+}
+
+type priorityWriteSchedulerRFC9218 struct {
+ // control contains control frames (SETTINGS, PING, etc.).
+ control writeQueue
+
+ // heads contain the head of a circular list of streams.
+ // We put these heads within a nested array that represents urgency and
+ // incremental, as defined in
+ // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters.
+ // 8 represents u=0 up to u=7, and 2 represents i=false and i=true.
+ heads [8][2]*writeQueue
+
+ // streams contains a mapping between each stream ID and their metadata, so
+ // we can quickly locate them when needing to, for example, adjust their
+ // priority.
+ streams map[uint32]streamMetadata
+
+ // queuePool are empty queues for reuse.
+ queuePool writeQueuePool
+
+ // prioritizeIncremental is used to determine whether we should prioritize
+ // incremental streams or not, when urgency is the same in a given Pop()
+ // call.
+ prioritizeIncremental bool
+}
+
+func newPriorityWriteSchedulerRFC9128() WriteScheduler {
+ ws := &priorityWriteSchedulerRFC9218{
+ streams: make(map[uint32]streamMetadata),
+ }
+ return ws
+}
+
+func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) {
+ if ws.streams[streamID].location != nil {
+ panic(fmt.Errorf("stream %d already opened", streamID))
+ }
+ q := ws.queuePool.get()
+ ws.streams[streamID] = streamMetadata{
+ location: q,
+ priority: opt.priority,
+ }
+
+ u, i := opt.priority.urgency, opt.priority.incremental
+ if ws.heads[u][i] == nil {
+ ws.heads[u][i] = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.heads[u][i].prev
+ q.next = ws.heads[u][i]
+ q.prev.next = q
+ q.next.prev = q
+ }
+}
+
+func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) {
+ metadata := ws.streams[streamID]
+ q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+ if q == nil {
+ return
+ }
+ if q.next == q {
+ // This was the only open stream.
+ ws.heads[u][i] = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.heads[u][i] == q {
+ ws.heads[u][i] = q.next
+ }
+ }
+ delete(ws.streams, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) {
+ metadata := ws.streams[streamID]
+ q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+ if q == nil {
+ return
+ }
+
+ // Remove stream from current location.
+ if q.next == q {
+ // This was the only open stream.
+ ws.heads[u][i] = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.heads[u][i] == q {
+ ws.heads[u][i] = q.next
+ }
+ }
+
+ // Insert stream to the new queue.
+ u, i = priority.urgency, priority.incremental
+ if ws.heads[u][i] == nil {
+ ws.heads[u][i] = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.heads[u][i].prev
+ q.next = ws.heads[u][i]
+ q.prev.next = q
+ q.next.prev = q
+ }
+
+ // Update the metadata.
+ ws.streams[streamID] = streamMetadata{
+ location: q,
+ priority: priority,
+ }
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) {
+ if wr.isControl() {
+ ws.control.push(wr)
+ return
+ }
+ q := ws.streams[wr.StreamID()].location
+ if q == nil {
+ // This is a closed stream.
+ // wr should not be a HEADERS or DATA frame.
+ // We push the request onto the control queue.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ ws.control.push(wr)
+ return
+ }
+ q.push(wr)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) {
+ // Control and RST_STREAM frames first.
+ if !ws.control.empty() {
+ return ws.control.shift(), true
+ }
+
+ // On the next Pop(), we want to prioritize incremental if we prioritized
+ // non-incremental request of the same urgency this time. Vice-versa.
+ // i.e. when there are incremental and non-incremental requests at the same
+ // priority, we give 50% of our bandwidth to the incremental ones in
+ // aggregate and 50% to the first non-incremental one (since
+ // non-incremental streams do not use round-robin writes).
+ ws.prioritizeIncremental = !ws.prioritizeIncremental
+
+ // Always prioritize lowest u (i.e. highest urgency level).
+ for u := range ws.heads {
+ for i := range ws.heads[u] {
+ // When we want to prioritize incremental, we try to pop i=true
+ // first before i=false when u is the same.
+ if ws.prioritizeIncremental {
+ i = (i + 1) % 2
+ }
+ q := ws.heads[u][i]
+ if q == nil {
+ continue
+ }
+ for {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ if i == 1 {
+ // For incremental streams, we update head to q.next so
+ // we can round-robin between multiple streams that can
+ // immediately benefit from partial writes.
+ ws.heads[u][i] = q.next
+ } else {
+ // For non-incremental streams, we try to finish one to
+ // completion rather than doing round-robin. However,
+ // we update head here so that if q.consume() is !ok
+ // (e.g. the stream has no more frame to consume), head
+ // is updated to the next q that has frames to consume
+ // on future iterations. This way, we do not prioritize
+ // writing to unavailable stream on next Pop() calls,
+ // preventing head-of-line blocking.
+ ws.heads[u][i] = q
+ }
+ return wr, true
+ }
+ q = q.next
+ if q == ws.heads[u][i] {
+ break
+ }
+ }
+
+ }
+ }
+ return FrameWriteRequest{}, false
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
index 54fe86322..737cff9ec 100644
--- a/vendor/golang.org/x/net/http2/writesched_roundrobin.go
+++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
@@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct {
}
// newRoundRobinWriteScheduler constructs a new write scheduler.
-// The round robin scheduler priorizes control frames
+// The round robin scheduler prioritizes control frames
// like SETTINGS and PING over DATA frames.
// When there are no control frames to send, it performs a round-robin
// selection from the ready streams.
diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go
index 4b7055317..1e10f89eb 100644
--- a/vendor/golang.org/x/net/internal/httpcommon/request.go
+++ b/vendor/golang.org/x/net/internal/httpcommon/request.go
@@ -51,7 +51,7 @@ type EncodeHeadersParam struct {
DefaultUserAgent string
}
-// EncodeHeadersParam is the result of EncodeHeaders.
+// EncodeHeadersResult is the result of EncodeHeaders.
type EncodeHeadersResult struct {
HasBody bool
HasTrailers bool
@@ -399,7 +399,7 @@ type ServerRequestResult struct {
// If the request should be rejected, this is a short string suitable for passing
// to the http2 package's CountError function.
- // It might be a bit odd to return errors this way rather than returing an error,
+ // It might be a bit odd to return errors this way rather than returning an error,
// but this ensures we don't forget to include a CountError reason.
InvalidReason string
}
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
index de34feb84..3e3b63069 100644
--- a/vendor/golang.org/x/oauth2/oauth2.go
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -9,7 +9,6 @@
package oauth2 // import "golang.org/x/oauth2"
import (
- "bytes"
"context"
"errors"
"net/http"
@@ -158,7 +157,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and
// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches)
func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
- var buf bytes.Buffer
+ var buf strings.Builder
buf.WriteString(c.Endpoint.AuthURL)
v := url.Values{
"response_type": {"code"},
diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go
index 3c7a6d6e2..3ea470387 100644
--- a/vendor/golang.org/x/sys/unix/affinity_linux.go
+++ b/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -41,6 +41,15 @@ func (s *CPUSet) Zero() {
clear(s[:])
}
+// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity]
+// will silently ignore any invalid CPU bits in [CPUSet] so this is an
+// efficient way of resetting the CPU affinity of a process.
+func (s *CPUSet) Fill() {
+ for i := range s {
+ s[i] = ^cpuMask(0)
+ }
+}
+
func cpuBitsIndex(cpu int) int {
return cpu / _NCPUBITS
}
diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go
index 9e83d18cd..62ed12645 100644
--- a/vendor/golang.org/x/sys/unix/fdset.go
+++ b/vendor/golang.org/x/sys/unix/fdset.go
@@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool {
// Zero clears the set fds.
func (fds *FdSet) Zero() {
- for i := range fds.Bits {
- fds.Bits[i] = 0
- }
+ clear(fds.Bits[:])
}
diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go
index 848840ae4..309f5a2b0 100644
--- a/vendor/golang.org/x/sys/unix/ifreq_linux.go
+++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go
@@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) {
// clear zeroes the ifreq's union field to prevent trailing garbage data from
// being sent to the kernel if an ifreq is reused.
func (ifr *Ifreq) clear() {
- for i := range ifr.raw.Ifru {
- ifr.raw.Ifru[i] = 0
- }
+ clear(ifr.raw.Ifru[:])
}
// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
index e6f31d374..d0ed61191 100644
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -49,6 +49,7 @@ esac
if [[ "$GOOS" = "linux" ]]; then
# Use the Docker-based build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
+ set -e
$cmd docker build --tag generate:$GOOS $GOOS
$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
exit
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 4958a6570..9439af961 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -801,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) {
// one. The kernel expects SID to be in network byte order.
binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID)
copy(sa.raw[8:14], sa.Remote)
- for i := 14; i < 14+IFNAMSIZ; i++ {
- sa.raw[i] = 0
- }
+ clear(sa.raw[14 : 14+IFNAMSIZ])
copy(sa.raw[14:], sa.Dev)
return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil
}
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
index 88162099a..34a467697 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
return Statvfs1(path, buf, ST_WAIT)
}
+func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) {
+ var (
+ _p0 unsafe.Pointer
+ bufsize uintptr
+ )
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
/*
* Exposed directly
*/
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 640f6b153..bd5133730 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP
//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
+//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents
+//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer
//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot
//sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 993a2297d..358be3c7f 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -65,6 +65,22 @@ var signals = [...]string{
15: "terminated",
}
+// File flags for [os.OpenFile]. The O_ prefix is used to indicate
+// that these flags are specific to the OpenFile function.
+const (
+ O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL
+ O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT
+ O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE
+ O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS
+ O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS
+ O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE
+ O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN
+ O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS
+ O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING
+ O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED
+ O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH
+)
+
const (
FILE_READ_DATA = 0x00000001
FILE_READ_ATTRIBUTES = 0x00000080
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 641a5f4b7..426151a01 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -238,6 +238,7 @@ var (
procFindResourceW = modkernel32.NewProc("FindResourceW")
procFindVolumeClose = modkernel32.NewProc("FindVolumeClose")
procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose")
+ procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer")
procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers")
procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile")
procFormatMessageW = modkernel32.NewProc("FormatMessageW")
@@ -284,6 +285,7 @@ var (
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId")
+ procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents")
procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult")
procGetPriorityClass = modkernel32.NewProc("GetPriorityClass")
procGetProcAddress = modkernel32.NewProc("GetProcAddress")
@@ -2111,6 +2113,14 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) {
return
}
+func FlushConsoleInputBuffer(console Handle) (err error) {
+ r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func FlushFileBuffers(handle Handle) (err error) {
r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle))
if r1 == 0 {
@@ -2481,6 +2491,14 @@ func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err erro
return
}
+func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) {
var _p0 uint32
if wait {
diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go
index 9d2ae547b..fb8273236 100644
--- a/vendor/golang.org/x/text/unicode/bidi/core.go
+++ b/vendor/golang.org/x/text/unicode/bidi/core.go
@@ -427,13 +427,6 @@ type isolatingRunSequence struct {
func (i *isolatingRunSequence) Len() int { return len(i.indexes) }
-func maxLevel(a, b level) level {
- if a > b {
- return a
- }
- return b
-}
-
// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types,
// either L or R, for each isolating run sequence.
func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
@@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
indexes: indexes,
types: types,
level: level,
- sos: typeForLevel(maxLevel(prevLevel, level)),
- eos: typeForLevel(maxLevel(succLevel, level)),
+ sos: typeForLevel(max(prevLevel, level)),
+ eos: typeForLevel(max(succLevel, level)),
}
}
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go
index 5e5601aa4..5bacc0fa4 100644
--- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go
+++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go
@@ -209,48 +209,46 @@ func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool)
// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
// If there are duplicate import declarations, all matching ones are deleted.
func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
- var delspecs []*ast.ImportSpec
- var delcomments []*ast.CommentGroup
+ var (
+ delspecs = make(map[*ast.ImportSpec]bool)
+ delcomments = make(map[*ast.CommentGroup]bool)
+ )
// Find the import nodes that import path, if any.
for i := 0; i < len(f.Decls); i++ {
- decl := f.Decls[i]
- gen, ok := decl.(*ast.GenDecl)
+ gen, ok := f.Decls[i].(*ast.GenDecl)
if !ok || gen.Tok != token.IMPORT {
continue
}
for j := 0; j < len(gen.Specs); j++ {
- spec := gen.Specs[j]
- impspec := spec.(*ast.ImportSpec)
+ impspec := gen.Specs[j].(*ast.ImportSpec)
if importName(impspec) != name || importPath(impspec) != path {
continue
}
// We found an import spec that imports path.
// Delete it.
- delspecs = append(delspecs, impspec)
+ delspecs[impspec] = true
deleted = true
- copy(gen.Specs[j:], gen.Specs[j+1:])
- gen.Specs = gen.Specs[:len(gen.Specs)-1]
+ gen.Specs = slices.Delete(gen.Specs, j, j+1)
// If this was the last import spec in this decl,
// delete the decl, too.
if len(gen.Specs) == 0 {
- copy(f.Decls[i:], f.Decls[i+1:])
- f.Decls = f.Decls[:len(f.Decls)-1]
+ f.Decls = slices.Delete(f.Decls, i, i+1)
i--
break
} else if len(gen.Specs) == 1 {
if impspec.Doc != nil {
- delcomments = append(delcomments, impspec.Doc)
+ delcomments[impspec.Doc] = true
}
if impspec.Comment != nil {
- delcomments = append(delcomments, impspec.Comment)
+ delcomments[impspec.Comment] = true
}
for _, cg := range f.Comments {
// Found comment on the same line as the import spec.
if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line {
- delcomments = append(delcomments, cg)
+ delcomments[cg] = true
break
}
}
@@ -294,38 +292,21 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del
}
// Delete imports from f.Imports.
- for i := 0; i < len(f.Imports); i++ {
- imp := f.Imports[i]
- for j, del := range delspecs {
- if imp == del {
- copy(f.Imports[i:], f.Imports[i+1:])
- f.Imports = f.Imports[:len(f.Imports)-1]
- copy(delspecs[j:], delspecs[j+1:])
- delspecs = delspecs[:len(delspecs)-1]
- i--
- break
- }
- }
+ before := len(f.Imports)
+ f.Imports = slices.DeleteFunc(f.Imports, func(imp *ast.ImportSpec) bool {
+ _, ok := delspecs[imp]
+ return ok
+ })
+ if len(f.Imports)+len(delspecs) != before {
+ // This can happen when the AST is invalid (i.e. imports differ between f.Decls and f.Imports).
+ panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
}
// Delete comments from f.Comments.
- for i := 0; i < len(f.Comments); i++ {
- cg := f.Comments[i]
- for j, del := range delcomments {
- if cg == del {
- copy(f.Comments[i:], f.Comments[i+1:])
- f.Comments = f.Comments[:len(f.Comments)-1]
- copy(delcomments[j:], delcomments[j+1:])
- delcomments = delcomments[:len(delcomments)-1]
- i--
- break
- }
- }
- }
-
- if len(delspecs) > 0 {
- panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
- }
+ f.Comments = slices.DeleteFunc(f.Comments, func(cg *ast.CommentGroup) bool {
+ _, ok := delcomments[cg]
+ return ok
+ })
return
}
diff --git a/vendor/golang.org/x/tools/go/ast/edge/edge.go b/vendor/golang.org/x/tools/go/ast/edge/edge.go
new file mode 100644
index 000000000..4f6ccfd6e
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/edge/edge.go
@@ -0,0 +1,295 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package edge defines identifiers for each field of an ast.Node
+// struct type that refers to another Node.
+package edge
+
+import (
+ "fmt"
+ "go/ast"
+ "reflect"
+)
+
+// A Kind describes a field of an ast.Node struct.
+type Kind uint8
+
+// String returns a description of the edge kind.
+func (k Kind) String() string {
+ if k == Invalid {
+ return "<invalid>"
+ }
+ info := fieldInfos[k]
+ return fmt.Sprintf("%v.%s", info.nodeType.Elem().Name(), info.name)
+}
+
+// NodeType returns the pointer-to-struct type of the ast.Node implementation.
+func (k Kind) NodeType() reflect.Type { return fieldInfos[k].nodeType }
+
+// FieldName returns the name of the field.
+func (k Kind) FieldName() string { return fieldInfos[k].name }
+
+// FieldType returns the declared type of the field.
+func (k Kind) FieldType() reflect.Type { return fieldInfos[k].fieldType }
+
+// Get returns the direct child of n identified by (k, idx).
+// n's type must match k.NodeType().
+// idx must be a valid slice index, or -1 for a non-slice.
+func (k Kind) Get(n ast.Node, idx int) ast.Node {
+ if k.NodeType() != reflect.TypeOf(n) {
+ panic(fmt.Sprintf("%v.Get(%T): invalid node type", k, n))
+ }
+ v := reflect.ValueOf(n).Elem().Field(fieldInfos[k].index)
+ if idx != -1 {
+ v = v.Index(idx) // asserts valid index
+ } else {
+ // (The type assertion below asserts that v is not a slice.)
+ }
+ return v.Interface().(ast.Node) // may be nil
+}
+
+const (
+ Invalid Kind = iota // for nodes at the root of the traversal
+
+ // Kinds are sorted alphabetically.
+ // Numbering is not stable.
+ // Each is named Type_Field, where Type is the
+ // ast.Node struct type and Field is the name of the field
+
+ ArrayType_Elt
+ ArrayType_Len
+ AssignStmt_Lhs
+ AssignStmt_Rhs
+ BinaryExpr_X
+ BinaryExpr_Y
+ BlockStmt_List
+ BranchStmt_Label
+ CallExpr_Args
+ CallExpr_Fun
+ CaseClause_Body
+ CaseClause_List
+ ChanType_Value
+ CommClause_Body
+ CommClause_Comm
+ CommentGroup_List
+ CompositeLit_Elts
+ CompositeLit_Type
+ DeclStmt_Decl
+ DeferStmt_Call
+ Ellipsis_Elt
+ ExprStmt_X
+ FieldList_List
+ Field_Comment
+ Field_Doc
+ Field_Names
+ Field_Tag
+ Field_Type
+ File_Decls
+ File_Doc
+ File_Name
+ ForStmt_Body
+ ForStmt_Cond
+ ForStmt_Init
+ ForStmt_Post
+ FuncDecl_Body
+ FuncDecl_Doc
+ FuncDecl_Name
+ FuncDecl_Recv
+ FuncDecl_Type
+ FuncLit_Body
+ FuncLit_Type
+ FuncType_Params
+ FuncType_Results
+ FuncType_TypeParams
+ GenDecl_Doc
+ GenDecl_Specs
+ GoStmt_Call
+ IfStmt_Body
+ IfStmt_Cond
+ IfStmt_Else
+ IfStmt_Init
+ ImportSpec_Comment
+ ImportSpec_Doc
+ ImportSpec_Name
+ ImportSpec_Path
+ IncDecStmt_X
+ IndexExpr_Index
+ IndexExpr_X
+ IndexListExpr_Indices
+ IndexListExpr_X
+ InterfaceType_Methods
+ KeyValueExpr_Key
+ KeyValueExpr_Value
+ LabeledStmt_Label
+ LabeledStmt_Stmt
+ MapType_Key
+ MapType_Value
+ ParenExpr_X
+ RangeStmt_Body
+ RangeStmt_Key
+ RangeStmt_Value
+ RangeStmt_X
+ ReturnStmt_Results
+ SelectStmt_Body
+ SelectorExpr_Sel
+ SelectorExpr_X
+ SendStmt_Chan
+ SendStmt_Value
+ SliceExpr_High
+ SliceExpr_Low
+ SliceExpr_Max
+ SliceExpr_X
+ StarExpr_X
+ StructType_Fields
+ SwitchStmt_Body
+ SwitchStmt_Init
+ SwitchStmt_Tag
+ TypeAssertExpr_Type
+ TypeAssertExpr_X
+ TypeSpec_Comment
+ TypeSpec_Doc
+ TypeSpec_Name
+ TypeSpec_Type
+ TypeSpec_TypeParams
+ TypeSwitchStmt_Assign
+ TypeSwitchStmt_Body
+ TypeSwitchStmt_Init
+ UnaryExpr_X
+ ValueSpec_Comment
+ ValueSpec_Doc
+ ValueSpec_Names
+ ValueSpec_Type
+ ValueSpec_Values
+
+ maxKind
+)
+
+// Assert that the encoding fits in 7 bits,
+// as the inspector relies on this.
+// (We are currently at 104.)
+var _ = [1 << 7]struct{}{}[maxKind]
+
+type fieldInfo struct {
+ nodeType reflect.Type // pointer-to-struct type of ast.Node implementation
+ name string
+ index int
+ fieldType reflect.Type
+}
+
+func info[N ast.Node](fieldName string) fieldInfo {
+ nodePtrType := reflect.TypeFor[N]()
+ f, ok := nodePtrType.Elem().FieldByName(fieldName)
+ if !ok {
+ panic(fieldName)
+ }
+ return fieldInfo{nodePtrType, fieldName, f.Index[0], f.Type}
+}
+
+var fieldInfos = [...]fieldInfo{
+ Invalid: {},
+ ArrayType_Elt: info[*ast.ArrayType]("Elt"),
+ ArrayType_Len: info[*ast.ArrayType]("Len"),
+ AssignStmt_Lhs: info[*ast.AssignStmt]("Lhs"),
+ AssignStmt_Rhs: info[*ast.AssignStmt]("Rhs"),
+ BinaryExpr_X: info[*ast.BinaryExpr]("X"),
+ BinaryExpr_Y: info[*ast.BinaryExpr]("Y"),
+ BlockStmt_List: info[*ast.BlockStmt]("List"),
+ BranchStmt_Label: info[*ast.BranchStmt]("Label"),
+ CallExpr_Args: info[*ast.CallExpr]("Args"),
+ CallExpr_Fun: info[*ast.CallExpr]("Fun"),
+ CaseClause_Body: info[*ast.CaseClause]("Body"),
+ CaseClause_List: info[*ast.CaseClause]("List"),
+ ChanType_Value: info[*ast.ChanType]("Value"),
+ CommClause_Body: info[*ast.CommClause]("Body"),
+ CommClause_Comm: info[*ast.CommClause]("Comm"),
+ CommentGroup_List: info[*ast.CommentGroup]("List"),
+ CompositeLit_Elts: info[*ast.CompositeLit]("Elts"),
+ CompositeLit_Type: info[*ast.CompositeLit]("Type"),
+ DeclStmt_Decl: info[*ast.DeclStmt]("Decl"),
+ DeferStmt_Call: info[*ast.DeferStmt]("Call"),
+ Ellipsis_Elt: info[*ast.Ellipsis]("Elt"),
+ ExprStmt_X: info[*ast.ExprStmt]("X"),
+ FieldList_List: info[*ast.FieldList]("List"),
+ Field_Comment: info[*ast.Field]("Comment"),
+ Field_Doc: info[*ast.Field]("Doc"),
+ Field_Names: info[*ast.Field]("Names"),
+ Field_Tag: info[*ast.Field]("Tag"),
+ Field_Type: info[*ast.Field]("Type"),
+ File_Decls: info[*ast.File]("Decls"),
+ File_Doc: info[*ast.File]("Doc"),
+ File_Name: info[*ast.File]("Name"),
+ ForStmt_Body: info[*ast.ForStmt]("Body"),
+ ForStmt_Cond: info[*ast.ForStmt]("Cond"),
+ ForStmt_Init: info[*ast.ForStmt]("Init"),
+ ForStmt_Post: info[*ast.ForStmt]("Post"),
+ FuncDecl_Body: info[*ast.FuncDecl]("Body"),
+ FuncDecl_Doc: info[*ast.FuncDecl]("Doc"),
+ FuncDecl_Name: info[*ast.FuncDecl]("Name"),
+ FuncDecl_Recv: info[*ast.FuncDecl]("Recv"),
+ FuncDecl_Type: info[*ast.FuncDecl]("Type"),
+ FuncLit_Body: info[*ast.FuncLit]("Body"),
+ FuncLit_Type: info[*ast.FuncLit]("Type"),
+ FuncType_Params: info[*ast.FuncType]("Params"),
+ FuncType_Results: info[*ast.FuncType]("Results"),
+ FuncType_TypeParams: info[*ast.FuncType]("TypeParams"),
+ GenDecl_Doc: info[*ast.GenDecl]("Doc"),
+ GenDecl_Specs: info[*ast.GenDecl]("Specs"),
+ GoStmt_Call: info[*ast.GoStmt]("Call"),
+ IfStmt_Body: info[*ast.IfStmt]("Body"),
+ IfStmt_Cond: info[*ast.IfStmt]("Cond"),
+ IfStmt_Else: info[*ast.IfStmt]("Else"),
+ IfStmt_Init: info[*ast.IfStmt]("Init"),
+ ImportSpec_Comment: info[*ast.ImportSpec]("Comment"),
+ ImportSpec_Doc: info[*ast.ImportSpec]("Doc"),
+ ImportSpec_Name: info[*ast.ImportSpec]("Name"),
+ ImportSpec_Path: info[*ast.ImportSpec]("Path"),
+ IncDecStmt_X: info[*ast.IncDecStmt]("X"),
+ IndexExpr_Index: info[*ast.IndexExpr]("Index"),
+ IndexExpr_X: info[*ast.IndexExpr]("X"),
+ IndexListExpr_Indices: info[*ast.IndexListExpr]("Indices"),
+ IndexListExpr_X: info[*ast.IndexListExpr]("X"),
+ InterfaceType_Methods: info[*ast.InterfaceType]("Methods"),
+ KeyValueExpr_Key: info[*ast.KeyValueExpr]("Key"),
+ KeyValueExpr_Value: info[*ast.KeyValueExpr]("Value"),
+ LabeledStmt_Label: info[*ast.LabeledStmt]("Label"),
+ LabeledStmt_Stmt: info[*ast.LabeledStmt]("Stmt"),
+ MapType_Key: info[*ast.MapType]("Key"),
+ MapType_Value: info[*ast.MapType]("Value"),
+ ParenExpr_X: info[*ast.ParenExpr]("X"),
+ RangeStmt_Body: info[*ast.RangeStmt]("Body"),
+ RangeStmt_Key: info[*ast.RangeStmt]("Key"),
+ RangeStmt_Value: info[*ast.RangeStmt]("Value"),
+ RangeStmt_X: info[*ast.RangeStmt]("X"),
+ ReturnStmt_Results: info[*ast.ReturnStmt]("Results"),
+ SelectStmt_Body: info[*ast.SelectStmt]("Body"),
+ SelectorExpr_Sel: info[*ast.SelectorExpr]("Sel"),
+ SelectorExpr_X: info[*ast.SelectorExpr]("X"),
+ SendStmt_Chan: info[*ast.SendStmt]("Chan"),
+ SendStmt_Value: info[*ast.SendStmt]("Value"),
+ SliceExpr_High: info[*ast.SliceExpr]("High"),
+ SliceExpr_Low: info[*ast.SliceExpr]("Low"),
+ SliceExpr_Max: info[*ast.SliceExpr]("Max"),
+ SliceExpr_X: info[*ast.SliceExpr]("X"),
+ StarExpr_X: info[*ast.StarExpr]("X"),
+ StructType_Fields: info[*ast.StructType]("Fields"),
+ SwitchStmt_Body: info[*ast.SwitchStmt]("Body"),
+ SwitchStmt_Init: info[*ast.SwitchStmt]("Init"),
+ SwitchStmt_Tag: info[*ast.SwitchStmt]("Tag"),
+ TypeAssertExpr_Type: info[*ast.TypeAssertExpr]("Type"),
+ TypeAssertExpr_X: info[*ast.TypeAssertExpr]("X"),
+ TypeSpec_Comment: info[*ast.TypeSpec]("Comment"),
+ TypeSpec_Doc: info[*ast.TypeSpec]("Doc"),
+ TypeSpec_Name: info[*ast.TypeSpec]("Name"),
+ TypeSpec_Type: info[*ast.TypeSpec]("Type"),
+ TypeSpec_TypeParams: info[*ast.TypeSpec]("TypeParams"),
+ TypeSwitchStmt_Assign: info[*ast.TypeSwitchStmt]("Assign"),
+ TypeSwitchStmt_Body: info[*ast.TypeSwitchStmt]("Body"),
+ TypeSwitchStmt_Init: info[*ast.TypeSwitchStmt]("Init"),
+ UnaryExpr_X: info[*ast.UnaryExpr]("X"),
+ ValueSpec_Comment: info[*ast.ValueSpec]("Comment"),
+ ValueSpec_Doc: info[*ast.ValueSpec]("Doc"),
+ ValueSpec_Names: info[*ast.ValueSpec]("Names"),
+ ValueSpec_Type: info[*ast.ValueSpec]("Type"),
+ ValueSpec_Values: info[*ast.ValueSpec]("Values"),
+}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go
new file mode 100644
index 000000000..7e72d3c28
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go
@@ -0,0 +1,502 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inspector
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "iter"
+ "reflect"
+
+ "golang.org/x/tools/go/ast/edge"
+)
+
+// A Cursor represents an [ast.Node]. It is immutable.
+//
+// Two Cursors compare equal if they represent the same node.
+//
+// Call [Inspector.Root] to obtain a valid cursor for the virtual root
+// node of the traversal.
+//
+// Use the following methods to navigate efficiently around the tree:
+// - for ancestors, use [Cursor.Parent] and [Cursor.Enclosing];
+// - for children, use [Cursor.Child], [Cursor.Children],
+// [Cursor.FirstChild], and [Cursor.LastChild];
+// - for siblings, use [Cursor.PrevSibling] and [Cursor.NextSibling];
+// - for descendants, use [Cursor.FindByPos], [Cursor.FindNode],
+// [Cursor.Inspect], and [Cursor.Preorder].
+//
+// Use the [Cursor.ChildAt] and [Cursor.ParentEdge] methods for
+// information about the edges in a tree: which field (and slice
+// element) of the parent node holds the child.
+type Cursor struct {
+ in *Inspector
+ index int32 // index of push node; -1 for virtual root node
+}
+
+// Root returns a cursor for the virtual root node,
+// whose children are the files provided to [New].
+//
+// Its [Cursor.Node] method return nil.
+func (in *Inspector) Root() Cursor {
+ return Cursor{in, -1}
+}
+
+// At returns the cursor at the specified index in the traversal,
+// which must have been obtained from [Cursor.Index] on a Cursor
+// belonging to the same Inspector (see [Cursor.Inspector]).
+func (in *Inspector) At(index int32) Cursor {
+ if index < 0 {
+ panic("negative index")
+ }
+ if int(index) >= len(in.events) {
+ panic("index out of range for this inspector")
+ }
+ if in.events[index].index < index {
+ panic("invalid index") // (a push, not a pop)
+ }
+ return Cursor{in, index}
+}
+
+// Inspector returns the cursor's Inspector.
+func (c Cursor) Inspector() *Inspector { return c.in }
+
+// Index returns the index of this cursor position within the package.
+//
+// Clients should not assume anything about the numeric Index value
+// except that it increases monotonically throughout the traversal.
+// It is provided for use with [At].
+//
+// Index must not be called on the Root node.
+func (c Cursor) Index() int32 {
+ if c.index < 0 {
+ panic("Index called on Root node")
+ }
+ return c.index
+}
+
+// Node returns the node at the current cursor position,
+// or nil for the cursor returned by [Inspector.Root].
+func (c Cursor) Node() ast.Node {
+ if c.index < 0 {
+ return nil
+ }
+ return c.in.events[c.index].node
+}
+
+// String returns information about the cursor's node, if any.
+func (c Cursor) String() string {
+ if c.in == nil {
+ return "(invalid)"
+ }
+ if c.index < 0 {
+ return "(root)"
+ }
+ return reflect.TypeOf(c.Node()).String()
+}
+
+// indices return the [start, end) half-open interval of event indices.
+func (c Cursor) indices() (int32, int32) {
+ if c.index < 0 {
+ return 0, int32(len(c.in.events)) // root: all events
+ } else {
+ return c.index, c.in.events[c.index].index + 1 // just one subtree
+ }
+}
+
+// Preorder returns an iterator over the nodes of the subtree
+// represented by c in depth-first order. Each node in the sequence is
+// represented by a Cursor that allows access to the Node, but may
+// also be used to start a new traversal, or to obtain the stack of
+// nodes enclosing the cursor.
+//
+// The traversal sequence is determined by [ast.Inspect]. The types
+// argument, if non-empty, enables type-based filtering of events. The
+// function f if is called only for nodes whose type matches an
+// element of the types slice.
+//
+// If you need control over descent into subtrees,
+// or need both pre- and post-order notifications, use [Cursor.Inspect]
+func (c Cursor) Preorder(types ...ast.Node) iter.Seq[Cursor] {
+ mask := maskOf(types)
+
+ return func(yield func(Cursor) bool) {
+ events := c.in.events
+
+ for i, limit := c.indices(); i < limit; {
+ ev := events[i]
+ if ev.index > i { // push?
+ if ev.typ&mask != 0 && !yield(Cursor{c.in, i}) {
+ break
+ }
+ pop := ev.index
+ if events[pop].typ&mask == 0 {
+ // Subtree does not contain types: skip.
+ i = pop + 1
+ continue
+ }
+ }
+ i++
+ }
+ }
+}
+
+// Inspect visits the nodes of the subtree represented by c in
+// depth-first order. It calls f(n) for each node n before it
+// visits n's children. If f returns true, Inspect invokes f
+// recursively for each of the non-nil children of the node.
+//
+// Each node is represented by a Cursor that allows access to the
+// Node, but may also be used to start a new traversal, or to obtain
+// the stack of nodes enclosing the cursor.
+//
+// The complete traversal sequence is determined by [ast.Inspect].
+// The types argument, if non-empty, enables type-based filtering of
+// events. The function f if is called only for nodes whose type
+// matches an element of the types slice.
+func (c Cursor) Inspect(types []ast.Node, f func(c Cursor) (descend bool)) {
+ mask := maskOf(types)
+ events := c.in.events
+ for i, limit := c.indices(); i < limit; {
+ ev := events[i]
+ if ev.index > i {
+ // push
+ pop := ev.index
+ if ev.typ&mask != 0 && !f(Cursor{c.in, i}) ||
+ events[pop].typ&mask == 0 {
+ // The user opted not to descend, or the
+ // subtree does not contain types:
+ // skip past the pop.
+ i = pop + 1
+ continue
+ }
+ }
+ i++
+ }
+}
+
+// Enclosing returns an iterator over the nodes enclosing the current
+// current node, starting with the Cursor itself.
+//
+// Enclosing must not be called on the Root node (whose [Cursor.Node] returns nil).
+//
+// The types argument, if non-empty, enables type-based filtering of
+// events: the sequence includes only enclosing nodes whose type
+// matches an element of the types slice.
+func (c Cursor) Enclosing(types ...ast.Node) iter.Seq[Cursor] {
+ if c.index < 0 {
+ panic("Cursor.Enclosing called on Root node")
+ }
+
+ mask := maskOf(types)
+
+ return func(yield func(Cursor) bool) {
+ events := c.in.events
+ for i := c.index; i >= 0; i = events[i].parent {
+ if events[i].typ&mask != 0 && !yield(Cursor{c.in, i}) {
+ break
+ }
+ }
+ }
+}
+
+// Parent returns the parent of the current node.
+//
+// Parent must not be called on the Root node (whose [Cursor.Node] returns nil).
+func (c Cursor) Parent() Cursor {
+ if c.index < 0 {
+ panic("Cursor.Parent called on Root node")
+ }
+
+ return Cursor{c.in, c.in.events[c.index].parent}
+}
+
+// ParentEdge returns the identity of the field in the parent node
+// that holds this cursor's node, and if it is a list, the index within it.
+//
+// For example, f(x, y) is a CallExpr whose three children are Idents.
+// f has edge kind [edge.CallExpr_Fun] and index -1.
+// x and y have kind [edge.CallExpr_Args] and indices 0 and 1, respectively.
+//
+// If called on a child of the Root node, it returns ([edge.Invalid], -1).
+//
+// ParentEdge must not be called on the Root node (whose [Cursor.Node] returns nil).
+func (c Cursor) ParentEdge() (edge.Kind, int) {
+ if c.index < 0 {
+ panic("Cursor.ParentEdge called on Root node")
+ }
+ events := c.in.events
+ pop := events[c.index].index
+ return unpackEdgeKindAndIndex(events[pop].parent)
+}
+
+// ChildAt returns the cursor for the child of the
+// current node identified by its edge and index.
+// The index must be -1 if the edge.Kind is not a slice.
+// The indicated child node must exist.
+//
+// ChildAt must not be called on the Root node (whose [Cursor.Node] returns nil).
+//
+// Invariant: c.Parent().ChildAt(c.ParentEdge()) == c.
+func (c Cursor) ChildAt(k edge.Kind, idx int) Cursor {
+ target := packEdgeKindAndIndex(k, idx)
+
+ // Unfortunately there's no shortcut to looping.
+ events := c.in.events
+ i := c.index + 1
+ for {
+ pop := events[i].index
+ if pop < i {
+ break
+ }
+ if events[pop].parent == target {
+ return Cursor{c.in, i}
+ }
+ i = pop + 1
+ }
+ panic(fmt.Sprintf("ChildAt(%v, %d): no such child of %v", k, idx, c))
+}
+
+// Child returns the cursor for n, which must be a direct child of c's Node.
+//
+// Child must not be called on the Root node (whose [Cursor.Node] returns nil).
+func (c Cursor) Child(n ast.Node) Cursor {
+ if c.index < 0 {
+ panic("Cursor.Child called on Root node")
+ }
+
+ if false {
+ // reference implementation
+ for child := range c.Children() {
+ if child.Node() == n {
+ return child
+ }
+ }
+
+ } else {
+ // optimized implementation
+ events := c.in.events
+ for i := c.index + 1; events[i].index > i; i = events[i].index + 1 {
+ if events[i].node == n {
+ return Cursor{c.in, i}
+ }
+ }
+ }
+ panic(fmt.Sprintf("Child(%T): not a child of %v", n, c))
+}
+
+// NextSibling returns the cursor for the next sibling node in the same list
+// (for example, of files, decls, specs, statements, fields, or expressions) as
+// the current node. It returns (zero, false) if the node is the last node in
+// the list, or is not part of a list.
+//
+// NextSibling must not be called on the Root node.
+//
+// See note at [Cursor.Children].
+func (c Cursor) NextSibling() (Cursor, bool) {
+ if c.index < 0 {
+ panic("Cursor.NextSibling called on Root node")
+ }
+
+ events := c.in.events
+ i := events[c.index].index + 1 // after corresponding pop
+ if i < int32(len(events)) {
+ if events[i].index > i { // push?
+ return Cursor{c.in, i}, true
+ }
+ }
+ return Cursor{}, false
+}
+
+// PrevSibling returns the cursor for the previous sibling node in the
+// same list (for example, of files, decls, specs, statements, fields,
+// or expressions) as the current node. It returns zero if the node is
+// the first node in the list, or is not part of a list.
+//
+// It must not be called on the Root node.
+//
+// See note at [Cursor.Children].
+func (c Cursor) PrevSibling() (Cursor, bool) {
+ if c.index < 0 {
+ panic("Cursor.PrevSibling called on Root node")
+ }
+
+ events := c.in.events
+ i := c.index - 1
+ if i >= 0 {
+ if j := events[i].index; j < i { // pop?
+ return Cursor{c.in, j}, true
+ }
+ }
+ return Cursor{}, false
+}
+
+// FirstChild returns the first direct child of the current node,
+// or zero if it has no children.
+func (c Cursor) FirstChild() (Cursor, bool) {
+ events := c.in.events
+ i := c.index + 1 // i=0 if c is root
+ if i < int32(len(events)) && events[i].index > i { // push?
+ return Cursor{c.in, i}, true
+ }
+ return Cursor{}, false
+}
+
+// LastChild returns the last direct child of the current node,
+// or zero if it has no children.
+func (c Cursor) LastChild() (Cursor, bool) {
+ events := c.in.events
+ if c.index < 0 { // root?
+ if len(events) > 0 {
+ // return push of final event (a pop)
+ return Cursor{c.in, events[len(events)-1].index}, true
+ }
+ } else {
+ j := events[c.index].index - 1 // before corresponding pop
+ // Inv: j == c.index if c has no children
+ // or j is last child's pop.
+ if j > c.index { // c has children
+ return Cursor{c.in, events[j].index}, true
+ }
+ }
+ return Cursor{}, false
+}
+
+// Children returns an iterator over the direct children of the
+// current node, if any.
+//
+// When using Children, NextChild, and PrevChild, bear in mind that a
+// Node's children may come from different fields, some of which may
+// be lists of nodes without a distinguished intervening container
+// such as [ast.BlockStmt].
+//
+// For example, [ast.CaseClause] has a field List of expressions and a
+// field Body of statements, so the children of a CaseClause are a mix
+// of expressions and statements. Other nodes that have "uncontained"
+// list fields include:
+//
+// - [ast.ValueSpec] (Names, Values)
+// - [ast.CompositeLit] (Type, Elts)
+// - [ast.IndexListExpr] (X, Indices)
+// - [ast.CallExpr] (Fun, Args)
+// - [ast.AssignStmt] (Lhs, Rhs)
+//
+// So, do not assume that the previous sibling of an ast.Stmt is also
+// an ast.Stmt, or if it is, that they are executed sequentially,
+// unless you have established that, say, its parent is a BlockStmt
+// or its [Cursor.ParentEdge] is [edge.BlockStmt_List].
+// For example, given "for S1; ; S2 {}", the predecessor of S2 is S1,
+// even though they are not executed in sequence.
+func (c Cursor) Children() iter.Seq[Cursor] {
+ return func(yield func(Cursor) bool) {
+ c, ok := c.FirstChild()
+ for ok && yield(c) {
+ c, ok = c.NextSibling()
+ }
+ }
+}
+
+// Contains reports whether c contains or is equal to c2.
+//
+// Both Cursors must belong to the same [Inspector];
+// neither may be its Root node.
+func (c Cursor) Contains(c2 Cursor) bool {
+ if c.in != c2.in {
+ panic("different inspectors")
+ }
+ events := c.in.events
+ return c.index <= c2.index && events[c2.index].index <= events[c.index].index
+}
+
+// FindNode returns the cursor for node n if it belongs to the subtree
+// rooted at c. It returns zero if n is not found.
+func (c Cursor) FindNode(n ast.Node) (Cursor, bool) {
+
+ // FindNode is equivalent to this code,
+ // but more convenient and 15-20% faster:
+ if false {
+ for candidate := range c.Preorder(n) {
+ if candidate.Node() == n {
+ return candidate, true
+ }
+ }
+ return Cursor{}, false
+ }
+
+ // TODO(adonovan): opt: should we assume Node.Pos is accurate
+ // and combine type-based filtering with position filtering
+ // like FindByPos?
+
+ mask := maskOf([]ast.Node{n})
+ events := c.in.events
+
+ for i, limit := c.indices(); i < limit; i++ {
+ ev := events[i]
+ if ev.index > i { // push?
+ if ev.typ&mask != 0 && ev.node == n {
+ return Cursor{c.in, i}, true
+ }
+ pop := ev.index
+ if events[pop].typ&mask == 0 {
+ // Subtree does not contain type of n: skip.
+ i = pop
+ }
+ }
+ }
+ return Cursor{}, false
+}
+
+// FindByPos returns the cursor for the innermost node n in the tree
+// rooted at c such that n.Pos() <= start && end <= n.End().
+// (For an *ast.File, it uses the bounds n.FileStart-n.FileEnd.)
+//
+// It returns zero if none is found.
+// Precondition: start <= end.
+//
+// See also [astutil.PathEnclosingInterval], which
+// tolerates adjoining whitespace.
+func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) {
+ if end < start {
+ panic("end < start")
+ }
+ events := c.in.events
+
+ // This algorithm could be implemented using c.Inspect,
+ // but it is about 2.5x slower.
+
+ best := int32(-1) // push index of latest (=innermost) node containing range
+ for i, limit := c.indices(); i < limit; i++ {
+ ev := events[i]
+ if ev.index > i { // push?
+ n := ev.node
+ var nodeEnd token.Pos
+ if file, ok := n.(*ast.File); ok {
+ nodeEnd = file.FileEnd
+ // Note: files may be out of Pos order.
+ if file.FileStart > start {
+ i = ev.index // disjoint, after; skip to next file
+ continue
+ }
+ } else {
+ nodeEnd = n.End()
+ if n.Pos() > start {
+ break // disjoint, after; stop
+ }
+ }
+ // Inv: node.{Pos,FileStart} <= start
+ if end <= nodeEnd {
+ // node fully contains target range
+ best = i
+ } else if nodeEnd < start {
+ i = ev.index // disjoint, before; skip forward
+ }
+ }
+ }
+ if best >= 0 {
+ return Cursor{c.in, best}, true
+ }
+ return Cursor{}, false
+}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
new file mode 100644
index 000000000..a703cdfcf
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
@@ -0,0 +1,311 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package inspector provides helper functions for traversal over the
+// syntax trees of a package, including node filtering by type, and
+// materialization of the traversal stack.
+//
+// During construction, the inspector does a complete traversal and
+// builds a list of push/pop events and their node type. Subsequent
+// method calls that request a traversal scan this list, rather than walk
+// the AST, and perform type filtering using efficient bit sets.
+// This representation is sometimes called a "balanced parenthesis tree."
+//
+// Experiments suggest the inspector's traversals are about 2.5x faster
+// than [ast.Inspect], but it may take around 5 traversals for this
+// benefit to amortize the inspector's construction cost.
+// If efficiency is the primary concern, do not use Inspector for
+// one-off traversals.
+//
+// The [Cursor] type provides a more flexible API for efficient
+// navigation of syntax trees in all four "cardinal directions". For
+// example, traversals may be nested, so you can find each node of
+// type A and then search within it for nodes of type B. Or you can
+// traverse from a node to its immediate neighbors: its parent, its
+// previous and next sibling, or its first and last child. We
+// recommend using methods of Cursor in preference to Inspector where
+// possible.
+package inspector
+
+// There are four orthogonal features in a traversal:
+// 1 type filtering
+// 2 pruning
+// 3 postorder calls to f
+// 4 stack
+// Rather than offer all of them in the API,
+// only a few combinations are exposed:
+// - Preorder is the fastest and has fewest features,
+// but is the most commonly needed traversal.
+// - Nodes and WithStack both provide pruning and postorder calls,
+// even though few clients need it, because supporting two versions
+// is not justified.
+// More combinations could be supported by expressing them as
+// wrappers around a more generic traversal, but this was measured
+// and found to degrade performance significantly (30%).
+
+import (
+ "go/ast"
+
+ "golang.org/x/tools/go/ast/edge"
+)
+
+// An Inspector provides methods for inspecting
+// (traversing) the syntax trees of a package.
+type Inspector struct {
+ events []event
+}
+
+func packEdgeKindAndIndex(ek edge.Kind, index int) int32 {
+ return int32(uint32(index+1)<<7 | uint32(ek))
+}
+
+// unpackEdgeKindAndIndex unpacks the edge kind and edge index (within
+// an []ast.Node slice) from the parent field of a pop event.
+func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) {
+ // The "parent" field of a pop node holds the
+ // edge Kind in the lower 7 bits and the index+1
+ // in the upper 25.
+ return edge.Kind(x & 0x7f), int(x>>7) - 1
+}
+
+// New returns an Inspector for the specified syntax trees.
+func New(files []*ast.File) *Inspector {
+ return &Inspector{traverse(files)}
+}
+
+// An event represents a push or a pop
+// of an ast.Node during a traversal.
+type event struct {
+ node ast.Node
+ typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events
+ index int32 // index of corresponding push or pop event
+ parent int32 // index of parent's push node (push nodes only), or packed edge kind/index (pop nodes only)
+}
+
+// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer).
+// Type can be recovered from the sole bit in typ.
+// [Tried this, wasn't faster. --adonovan]
+
+// Preorder visits all the nodes of the files supplied to New in
+// depth-first order. It calls f(n) for each node n before it visits
+// n's children.
+//
+// The complete traversal sequence is determined by [ast.Inspect].
+// The types argument, if non-empty, enables type-based filtering of
+// events. The function f is called only for nodes whose type
+// matches an element of the types slice.
+//
+// The [Cursor.Preorder] method provides a richer alternative interface.
+// Example:
+//
+// for c := range in.Root().Preorder(types) { ... }
+func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
+ // Because it avoids postorder calls to f, and the pruning
+ // check, Preorder is almost twice as fast as Nodes. The two
+ // features seem to contribute similar slowdowns (~1.4x each).
+
+ // This function is equivalent to the PreorderSeq call below,
+ // but to avoid the additional dynamic call (which adds 13-35%
+ // to the benchmarks), we expand it out.
+ //
+ // in.PreorderSeq(types...)(func(n ast.Node) bool {
+ // f(n)
+ // return true
+ // })
+
+ mask := maskOf(types)
+ for i := int32(0); i < int32(len(in.events)); {
+ ev := in.events[i]
+ if ev.index > i {
+ // push
+ if ev.typ&mask != 0 {
+ f(ev.node)
+ }
+ pop := ev.index
+ if in.events[pop].typ&mask == 0 {
+ // Subtrees do not contain types: skip them and pop.
+ i = pop + 1
+ continue
+ }
+ }
+ i++
+ }
+}
+
+// Nodes visits the nodes of the files supplied to New in depth-first
+// order. It calls f(n, true) for each node n before it visits n's
+// children. If f returns true, Nodes invokes f recursively for each
+// of the non-nil children of the node, followed by a call of
+// f(n, false).
+//
+// The complete traversal sequence is determined by [ast.Inspect].
+// The types argument, if non-empty, enables type-based filtering of
+// events. The function f if is called only for nodes whose type
+// matches an element of the types slice.
+//
+// The [Cursor.Inspect] method provides a richer alternative interface.
+// Example:
+//
+// in.Root().Inspect(types, func(c Cursor) bool {
+// ...
+// return true
+// }
+func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) {
+ mask := maskOf(types)
+ for i := int32(0); i < int32(len(in.events)); {
+ ev := in.events[i]
+ if ev.index > i {
+ // push
+ pop := ev.index
+ if ev.typ&mask != 0 {
+ if !f(ev.node, true) {
+ i = pop + 1 // jump to corresponding pop + 1
+ continue
+ }
+ }
+ if in.events[pop].typ&mask == 0 {
+ // Subtrees do not contain types: skip them.
+ i = pop
+ continue
+ }
+ } else {
+ // pop
+ push := ev.index
+ if in.events[push].typ&mask != 0 {
+ f(ev.node, false)
+ }
+ }
+ i++
+ }
+}
+
+// WithStack visits nodes in a similar manner to Nodes, but it
+// supplies each call to f an additional argument, the current
+// traversal stack. The stack's first element is the outermost node,
+// an *ast.File; its last is the innermost, n.
+//
+// The [Cursor.Inspect] method provides a richer alternative interface.
+// Example:
+//
+// in.Root().Inspect(types, func(c Cursor) bool {
+// stack := slices.Collect(c.Enclosing())
+// ...
+// return true
+// })
+func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) {
+ mask := maskOf(types)
+ var stack []ast.Node
+ for i := int32(0); i < int32(len(in.events)); {
+ ev := in.events[i]
+ if ev.index > i {
+ // push
+ pop := ev.index
+ stack = append(stack, ev.node)
+ if ev.typ&mask != 0 {
+ if !f(ev.node, true, stack) {
+ i = pop + 1
+ stack = stack[:len(stack)-1]
+ continue
+ }
+ }
+ if in.events[pop].typ&mask == 0 {
+ // Subtrees does not contain types: skip them.
+ i = pop
+ continue
+ }
+ } else {
+ // pop
+ push := ev.index
+ if in.events[push].typ&mask != 0 {
+ f(ev.node, false, stack)
+ }
+ stack = stack[:len(stack)-1]
+ }
+ i++
+ }
+}
+
+// traverse builds the table of events representing a traversal.
+func traverse(files []*ast.File) []event {
+ // Preallocate approximate number of events
+ // based on source file extent of the declarations.
+ // (We use End-Pos not FileStart-FileEnd to neglect
+ // the effect of long doc comments.)
+ // This makes traverse faster by 4x (!).
+ var extent int
+ for _, f := range files {
+ extent += int(f.End() - f.Pos())
+ }
+ // This estimate is based on the net/http package.
+ capacity := min(extent*33/100, 1e6) // impose some reasonable maximum (1M)
+
+ v := &visitor{
+ events: make([]event, 0, capacity),
+ stack: []item{{index: -1}}, // include an extra event so file nodes have a parent
+ }
+ for _, file := range files {
+ walk(v, edge.Invalid, -1, file)
+ }
+ return v.events
+}
+
+type visitor struct {
+ events []event
+ stack []item
+}
+
+type item struct {
+ index int32 // index of current node's push event
+ parentIndex int32 // index of parent node's push event
+ typAccum uint64 // accumulated type bits of current node's descendants
+ edgeKindAndIndex int32 // edge.Kind and index, bit packed
+}
+
+func (v *visitor) push(ek edge.Kind, eindex int, node ast.Node) {
+ var (
+ index = int32(len(v.events))
+ parentIndex = v.stack[len(v.stack)-1].index
+ )
+ v.events = append(v.events, event{
+ node: node,
+ parent: parentIndex,
+ typ: typeOf(node),
+ index: 0, // (pop index is set later by visitor.pop)
+ })
+ v.stack = append(v.stack, item{
+ index: index,
+ parentIndex: parentIndex,
+ edgeKindAndIndex: packEdgeKindAndIndex(ek, eindex),
+ })
+
+ // 2B nodes ought to be enough for anyone!
+ if int32(len(v.events)) < 0 {
+ panic("event index exceeded int32")
+ }
+
+ // 32M elements in an []ast.Node ought to be enough for anyone!
+ if ek2, eindex2 := unpackEdgeKindAndIndex(packEdgeKindAndIndex(ek, eindex)); ek2 != ek || eindex2 != eindex {
+ panic("Node slice index exceeded uint25")
+ }
+}
+
+func (v *visitor) pop(node ast.Node) {
+ top := len(v.stack) - 1
+ current := v.stack[top]
+
+ push := &v.events[current.index]
+ parent := &v.stack[top-1]
+
+ push.index = int32(len(v.events)) // make push event refer to pop
+ parent.typAccum |= current.typAccum | push.typ // accumulate type bits into parent
+
+ v.stack = v.stack[:top]
+
+ v.events = append(v.events, event{
+ node: node,
+ typ: current.typAccum,
+ index: current.index,
+ parent: current.edgeKindAndIndex, // see [unpackEdgeKindAndIndex]
+ })
+}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go
new file mode 100644
index 000000000..c576dc70a
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go
@@ -0,0 +1,85 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.23
+
+package inspector
+
+import (
+ "go/ast"
+ "iter"
+)
+
+// PreorderSeq returns an iterator that visits all the
+// nodes of the files supplied to New in depth-first order.
+// It visits each node n before n's children.
+// The complete traversal sequence is determined by ast.Inspect.
+//
+// The types argument, if non-empty, enables type-based
+// filtering of events: only nodes whose type matches an
+// element of the types slice are included in the sequence.
+func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] {
+
+ // This implementation is identical to Preorder,
+ // except that it supports breaking out of the loop.
+
+ return func(yield func(ast.Node) bool) {
+ mask := maskOf(types)
+ for i := int32(0); i < int32(len(in.events)); {
+ ev := in.events[i]
+ if ev.index > i {
+ // push
+ if ev.typ&mask != 0 {
+ if !yield(ev.node) {
+ break
+ }
+ }
+ pop := ev.index
+ if in.events[pop].typ&mask == 0 {
+ // Subtrees do not contain types: skip them and pop.
+ i = pop + 1
+ continue
+ }
+ }
+ i++
+ }
+ }
+}
+
+// All[N] returns an iterator over all the nodes of type N.
+// N must be a pointer-to-struct type that implements ast.Node.
+//
+// Example:
+//
+// for call := range All[*ast.CallExpr](in) { ... }
+func All[N interface {
+ *S
+ ast.Node
+}, S any](in *Inspector) iter.Seq[N] {
+
+ // To avoid additional dynamic call overheads,
+ // we duplicate rather than call the logic of PreorderSeq.
+
+ mask := typeOf((N)(nil))
+ return func(yield func(N) bool) {
+ for i := int32(0); i < int32(len(in.events)); {
+ ev := in.events[i]
+ if ev.index > i {
+ // push
+ if ev.typ&mask != 0 {
+ if !yield(ev.node.(N)) {
+ break
+ }
+ }
+ pop := ev.index
+ if in.events[pop].typ&mask == 0 {
+ // Subtrees do not contain types: skip them and pop.
+ i = pop + 1
+ continue
+ }
+ }
+ i++
+ }
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
new file mode 100644
index 000000000..9852331a3
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
@@ -0,0 +1,227 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inspector
+
+// This file defines func typeOf(ast.Node) uint64.
+//
+// The initial map-based implementation was too slow;
+// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196
+
+import (
+ "go/ast"
+ "math"
+)
+
+const (
+ nArrayType = iota
+ nAssignStmt
+ nBadDecl
+ nBadExpr
+ nBadStmt
+ nBasicLit
+ nBinaryExpr
+ nBlockStmt
+ nBranchStmt
+ nCallExpr
+ nCaseClause
+ nChanType
+ nCommClause
+ nComment
+ nCommentGroup
+ nCompositeLit
+ nDeclStmt
+ nDeferStmt
+ nEllipsis
+ nEmptyStmt
+ nExprStmt
+ nField
+ nFieldList
+ nFile
+ nForStmt
+ nFuncDecl
+ nFuncLit
+ nFuncType
+ nGenDecl
+ nGoStmt
+ nIdent
+ nIfStmt
+ nImportSpec
+ nIncDecStmt
+ nIndexExpr
+ nIndexListExpr
+ nInterfaceType
+ nKeyValueExpr
+ nLabeledStmt
+ nMapType
+ nPackage
+ nParenExpr
+ nRangeStmt
+ nReturnStmt
+ nSelectStmt
+ nSelectorExpr
+ nSendStmt
+ nSliceExpr
+ nStarExpr
+ nStructType
+ nSwitchStmt
+ nTypeAssertExpr
+ nTypeSpec
+ nTypeSwitchStmt
+ nUnaryExpr
+ nValueSpec
+)
+
+// typeOf returns a distinct single-bit value that represents the type of n.
+//
+// Various implementations were benchmarked with BenchmarkNewInspector:
+//
+// GOGC=off
+// - type switch 4.9-5.5ms 2.1ms
+// - binary search over a sorted list of types 5.5-5.9ms 2.5ms
+// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms
+// - linear scan, unordered list 6.4ms 2.7ms
+// - hash table 6.5ms 3.1ms
+//
+// A perfect hash seemed like overkill.
+//
+// The compiler's switch statement is the clear winner
+// as it produces a binary tree in code,
+// with constant conditions and good branch prediction.
+// (Sadly it is the most verbose in source code.)
+// Binary search suffered from poor branch prediction.
+func typeOf(n ast.Node) uint64 {
+ // Fast path: nearly half of all nodes are identifiers.
+ if _, ok := n.(*ast.Ident); ok {
+ return 1 << nIdent
+ }
+
+ // These cases include all nodes encountered by ast.Inspect.
+ switch n.(type) {
+ case *ast.ArrayType:
+ return 1 << nArrayType
+ case *ast.AssignStmt:
+ return 1 << nAssignStmt
+ case *ast.BadDecl:
+ return 1 << nBadDecl
+ case *ast.BadExpr:
+ return 1 << nBadExpr
+ case *ast.BadStmt:
+ return 1 << nBadStmt
+ case *ast.BasicLit:
+ return 1 << nBasicLit
+ case *ast.BinaryExpr:
+ return 1 << nBinaryExpr
+ case *ast.BlockStmt:
+ return 1 << nBlockStmt
+ case *ast.BranchStmt:
+ return 1 << nBranchStmt
+ case *ast.CallExpr:
+ return 1 << nCallExpr
+ case *ast.CaseClause:
+ return 1 << nCaseClause
+ case *ast.ChanType:
+ return 1 << nChanType
+ case *ast.CommClause:
+ return 1 << nCommClause
+ case *ast.Comment:
+ return 1 << nComment
+ case *ast.CommentGroup:
+ return 1 << nCommentGroup
+ case *ast.CompositeLit:
+ return 1 << nCompositeLit
+ case *ast.DeclStmt:
+ return 1 << nDeclStmt
+ case *ast.DeferStmt:
+ return 1 << nDeferStmt
+ case *ast.Ellipsis:
+ return 1 << nEllipsis
+ case *ast.EmptyStmt:
+ return 1 << nEmptyStmt
+ case *ast.ExprStmt:
+ return 1 << nExprStmt
+ case *ast.Field:
+ return 1 << nField
+ case *ast.FieldList:
+ return 1 << nFieldList
+ case *ast.File:
+ return 1 << nFile
+ case *ast.ForStmt:
+ return 1 << nForStmt
+ case *ast.FuncDecl:
+ return 1 << nFuncDecl
+ case *ast.FuncLit:
+ return 1 << nFuncLit
+ case *ast.FuncType:
+ return 1 << nFuncType
+ case *ast.GenDecl:
+ return 1 << nGenDecl
+ case *ast.GoStmt:
+ return 1 << nGoStmt
+ case *ast.Ident:
+ return 1 << nIdent
+ case *ast.IfStmt:
+ return 1 << nIfStmt
+ case *ast.ImportSpec:
+ return 1 << nImportSpec
+ case *ast.IncDecStmt:
+ return 1 << nIncDecStmt
+ case *ast.IndexExpr:
+ return 1 << nIndexExpr
+ case *ast.IndexListExpr:
+ return 1 << nIndexListExpr
+ case *ast.InterfaceType:
+ return 1 << nInterfaceType
+ case *ast.KeyValueExpr:
+ return 1 << nKeyValueExpr
+ case *ast.LabeledStmt:
+ return 1 << nLabeledStmt
+ case *ast.MapType:
+ return 1 << nMapType
+ case *ast.Package:
+ return 1 << nPackage
+ case *ast.ParenExpr:
+ return 1 << nParenExpr
+ case *ast.RangeStmt:
+ return 1 << nRangeStmt
+ case *ast.ReturnStmt:
+ return 1 << nReturnStmt
+ case *ast.SelectStmt:
+ return 1 << nSelectStmt
+ case *ast.SelectorExpr:
+ return 1 << nSelectorExpr
+ case *ast.SendStmt:
+ return 1 << nSendStmt
+ case *ast.SliceExpr:
+ return 1 << nSliceExpr
+ case *ast.StarExpr:
+ return 1 << nStarExpr
+ case *ast.StructType:
+ return 1 << nStructType
+ case *ast.SwitchStmt:
+ return 1 << nSwitchStmt
+ case *ast.TypeAssertExpr:
+ return 1 << nTypeAssertExpr
+ case *ast.TypeSpec:
+ return 1 << nTypeSpec
+ case *ast.TypeSwitchStmt:
+ return 1 << nTypeSwitchStmt
+ case *ast.UnaryExpr:
+ return 1 << nUnaryExpr
+ case *ast.ValueSpec:
+ return 1 << nValueSpec
+ }
+ return 0
+}
+
+func maskOf(nodes []ast.Node) uint64 {
+ if len(nodes) == 0 {
+ return math.MaxUint64 // match all node types
+ }
+ var mask uint64
+ for _, n := range nodes {
+ mask |= typeOf(n)
+ }
+ return mask
+}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/walk.go b/vendor/golang.org/x/tools/go/ast/inspector/walk.go
new file mode 100644
index 000000000..5f1c93c8a
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/inspector/walk.go
@@ -0,0 +1,341 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inspector
+
+// This file is a fork of ast.Inspect to reduce unnecessary dynamic
+// calls and to gather edge information.
+//
+// Consistency with the original is ensured by TestInspectAllNodes.
+
+import (
+ "fmt"
+ "go/ast"
+
+ "golang.org/x/tools/go/ast/edge"
+)
+
+func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) {
+ for i, node := range list {
+ walk(v, ek, i, node)
+ }
+}
+
+func walk(v *visitor, ek edge.Kind, index int, node ast.Node) {
+ v.push(ek, index, node)
+
+ // walk children
+ // (the order of the cases matches the order
+ // of the corresponding node types in ast.go)
+ switch n := node.(type) {
+ // Comments and fields
+ case *ast.Comment:
+ // nothing to do
+
+ case *ast.CommentGroup:
+ walkList(v, edge.CommentGroup_List, n.List)
+
+ case *ast.Field:
+ if n.Doc != nil {
+ walk(v, edge.Field_Doc, -1, n.Doc)
+ }
+ walkList(v, edge.Field_Names, n.Names)
+ if n.Type != nil {
+ walk(v, edge.Field_Type, -1, n.Type)
+ }
+ if n.Tag != nil {
+ walk(v, edge.Field_Tag, -1, n.Tag)
+ }
+ if n.Comment != nil {
+ walk(v, edge.Field_Comment, -1, n.Comment)
+ }
+
+ case *ast.FieldList:
+ walkList(v, edge.FieldList_List, n.List)
+
+ // Expressions
+ case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
+ // nothing to do
+
+ case *ast.Ellipsis:
+ if n.Elt != nil {
+ walk(v, edge.Ellipsis_Elt, -1, n.Elt)
+ }
+
+ case *ast.FuncLit:
+ walk(v, edge.FuncLit_Type, -1, n.Type)
+ walk(v, edge.FuncLit_Body, -1, n.Body)
+
+ case *ast.CompositeLit:
+ if n.Type != nil {
+ walk(v, edge.CompositeLit_Type, -1, n.Type)
+ }
+ walkList(v, edge.CompositeLit_Elts, n.Elts)
+
+ case *ast.ParenExpr:
+ walk(v, edge.ParenExpr_X, -1, n.X)
+
+ case *ast.SelectorExpr:
+ walk(v, edge.SelectorExpr_X, -1, n.X)
+ walk(v, edge.SelectorExpr_Sel, -1, n.Sel)
+
+ case *ast.IndexExpr:
+ walk(v, edge.IndexExpr_X, -1, n.X)
+ walk(v, edge.IndexExpr_Index, -1, n.Index)
+
+ case *ast.IndexListExpr:
+ walk(v, edge.IndexListExpr_X, -1, n.X)
+ walkList(v, edge.IndexListExpr_Indices, n.Indices)
+
+ case *ast.SliceExpr:
+ walk(v, edge.SliceExpr_X, -1, n.X)
+ if n.Low != nil {
+ walk(v, edge.SliceExpr_Low, -1, n.Low)
+ }
+ if n.High != nil {
+ walk(v, edge.SliceExpr_High, -1, n.High)
+ }
+ if n.Max != nil {
+ walk(v, edge.SliceExpr_Max, -1, n.Max)
+ }
+
+ case *ast.TypeAssertExpr:
+ walk(v, edge.TypeAssertExpr_X, -1, n.X)
+ if n.Type != nil {
+ walk(v, edge.TypeAssertExpr_Type, -1, n.Type)
+ }
+
+ case *ast.CallExpr:
+ walk(v, edge.CallExpr_Fun, -1, n.Fun)
+ walkList(v, edge.CallExpr_Args, n.Args)
+
+ case *ast.StarExpr:
+ walk(v, edge.StarExpr_X, -1, n.X)
+
+ case *ast.UnaryExpr:
+ walk(v, edge.UnaryExpr_X, -1, n.X)
+
+ case *ast.BinaryExpr:
+ walk(v, edge.BinaryExpr_X, -1, n.X)
+ walk(v, edge.BinaryExpr_Y, -1, n.Y)
+
+ case *ast.KeyValueExpr:
+ walk(v, edge.KeyValueExpr_Key, -1, n.Key)
+ walk(v, edge.KeyValueExpr_Value, -1, n.Value)
+
+ // Types
+ case *ast.ArrayType:
+ if n.Len != nil {
+ walk(v, edge.ArrayType_Len, -1, n.Len)
+ }
+ walk(v, edge.ArrayType_Elt, -1, n.Elt)
+
+ case *ast.StructType:
+ walk(v, edge.StructType_Fields, -1, n.Fields)
+
+ case *ast.FuncType:
+ if n.TypeParams != nil {
+ walk(v, edge.FuncType_TypeParams, -1, n.TypeParams)
+ }
+ if n.Params != nil {
+ walk(v, edge.FuncType_Params, -1, n.Params)
+ }
+ if n.Results != nil {
+ walk(v, edge.FuncType_Results, -1, n.Results)
+ }
+
+ case *ast.InterfaceType:
+ walk(v, edge.InterfaceType_Methods, -1, n.Methods)
+
+ case *ast.MapType:
+ walk(v, edge.MapType_Key, -1, n.Key)
+ walk(v, edge.MapType_Value, -1, n.Value)
+
+ case *ast.ChanType:
+ walk(v, edge.ChanType_Value, -1, n.Value)
+
+ // Statements
+ case *ast.BadStmt:
+ // nothing to do
+
+ case *ast.DeclStmt:
+ walk(v, edge.DeclStmt_Decl, -1, n.Decl)
+
+ case *ast.EmptyStmt:
+ // nothing to do
+
+ case *ast.LabeledStmt:
+ walk(v, edge.LabeledStmt_Label, -1, n.Label)
+ walk(v, edge.LabeledStmt_Stmt, -1, n.Stmt)
+
+ case *ast.ExprStmt:
+ walk(v, edge.ExprStmt_X, -1, n.X)
+
+ case *ast.SendStmt:
+ walk(v, edge.SendStmt_Chan, -1, n.Chan)
+ walk(v, edge.SendStmt_Value, -1, n.Value)
+
+ case *ast.IncDecStmt:
+ walk(v, edge.IncDecStmt_X, -1, n.X)
+
+ case *ast.AssignStmt:
+ walkList(v, edge.AssignStmt_Lhs, n.Lhs)
+ walkList(v, edge.AssignStmt_Rhs, n.Rhs)
+
+ case *ast.GoStmt:
+ walk(v, edge.GoStmt_Call, -1, n.Call)
+
+ case *ast.DeferStmt:
+ walk(v, edge.DeferStmt_Call, -1, n.Call)
+
+ case *ast.ReturnStmt:
+ walkList(v, edge.ReturnStmt_Results, n.Results)
+
+ case *ast.BranchStmt:
+ if n.Label != nil {
+ walk(v, edge.BranchStmt_Label, -1, n.Label)
+ }
+
+ case *ast.BlockStmt:
+ walkList(v, edge.BlockStmt_List, n.List)
+
+ case *ast.IfStmt:
+ if n.Init != nil {
+ walk(v, edge.IfStmt_Init, -1, n.Init)
+ }
+ walk(v, edge.IfStmt_Cond, -1, n.Cond)
+ walk(v, edge.IfStmt_Body, -1, n.Body)
+ if n.Else != nil {
+ walk(v, edge.IfStmt_Else, -1, n.Else)
+ }
+
+ case *ast.CaseClause:
+ walkList(v, edge.CaseClause_List, n.List)
+ walkList(v, edge.CaseClause_Body, n.Body)
+
+ case *ast.SwitchStmt:
+ if n.Init != nil {
+ walk(v, edge.SwitchStmt_Init, -1, n.Init)
+ }
+ if n.Tag != nil {
+ walk(v, edge.SwitchStmt_Tag, -1, n.Tag)
+ }
+ walk(v, edge.SwitchStmt_Body, -1, n.Body)
+
+ case *ast.TypeSwitchStmt:
+ if n.Init != nil {
+ walk(v, edge.TypeSwitchStmt_Init, -1, n.Init)
+ }
+ walk(v, edge.TypeSwitchStmt_Assign, -1, n.Assign)
+ walk(v, edge.TypeSwitchStmt_Body, -1, n.Body)
+
+ case *ast.CommClause:
+ if n.Comm != nil {
+ walk(v, edge.CommClause_Comm, -1, n.Comm)
+ }
+ walkList(v, edge.CommClause_Body, n.Body)
+
+ case *ast.SelectStmt:
+ walk(v, edge.SelectStmt_Body, -1, n.Body)
+
+ case *ast.ForStmt:
+ if n.Init != nil {
+ walk(v, edge.ForStmt_Init, -1, n.Init)
+ }
+ if n.Cond != nil {
+ walk(v, edge.ForStmt_Cond, -1, n.Cond)
+ }
+ if n.Post != nil {
+ walk(v, edge.ForStmt_Post, -1, n.Post)
+ }
+ walk(v, edge.ForStmt_Body, -1, n.Body)
+
+ case *ast.RangeStmt:
+ if n.Key != nil {
+ walk(v, edge.RangeStmt_Key, -1, n.Key)
+ }
+ if n.Value != nil {
+ walk(v, edge.RangeStmt_Value, -1, n.Value)
+ }
+ walk(v, edge.RangeStmt_X, -1, n.X)
+ walk(v, edge.RangeStmt_Body, -1, n.Body)
+
+ // Declarations
+ case *ast.ImportSpec:
+ if n.Doc != nil {
+ walk(v, edge.ImportSpec_Doc, -1, n.Doc)
+ }
+ if n.Name != nil {
+ walk(v, edge.ImportSpec_Name, -1, n.Name)
+ }
+ walk(v, edge.ImportSpec_Path, -1, n.Path)
+ if n.Comment != nil {
+ walk(v, edge.ImportSpec_Comment, -1, n.Comment)
+ }
+
+ case *ast.ValueSpec:
+ if n.Doc != nil {
+ walk(v, edge.ValueSpec_Doc, -1, n.Doc)
+ }
+ walkList(v, edge.ValueSpec_Names, n.Names)
+ if n.Type != nil {
+ walk(v, edge.ValueSpec_Type, -1, n.Type)
+ }
+ walkList(v, edge.ValueSpec_Values, n.Values)
+ if n.Comment != nil {
+ walk(v, edge.ValueSpec_Comment, -1, n.Comment)
+ }
+
+ case *ast.TypeSpec:
+ if n.Doc != nil {
+ walk(v, edge.TypeSpec_Doc, -1, n.Doc)
+ }
+ walk(v, edge.TypeSpec_Name, -1, n.Name)
+ if n.TypeParams != nil {
+ walk(v, edge.TypeSpec_TypeParams, -1, n.TypeParams)
+ }
+ walk(v, edge.TypeSpec_Type, -1, n.Type)
+ if n.Comment != nil {
+ walk(v, edge.TypeSpec_Comment, -1, n.Comment)
+ }
+
+ case *ast.BadDecl:
+ // nothing to do
+
+ case *ast.GenDecl:
+ if n.Doc != nil {
+ walk(v, edge.GenDecl_Doc, -1, n.Doc)
+ }
+ walkList(v, edge.GenDecl_Specs, n.Specs)
+
+ case *ast.FuncDecl:
+ if n.Doc != nil {
+ walk(v, edge.FuncDecl_Doc, -1, n.Doc)
+ }
+ if n.Recv != nil {
+ walk(v, edge.FuncDecl_Recv, -1, n.Recv)
+ }
+ walk(v, edge.FuncDecl_Name, -1, n.Name)
+ walk(v, edge.FuncDecl_Type, -1, n.Type)
+ if n.Body != nil {
+ walk(v, edge.FuncDecl_Body, -1, n.Body)
+ }
+
+ case *ast.File:
+ if n.Doc != nil {
+ walk(v, edge.File_Doc, -1, n.Doc)
+ }
+ walk(v, edge.File_Name, -1, n.Name)
+ walkList(v, edge.File_Decls, n.Decls)
+ // don't walk n.Comments - they have been
+ // visited already through the individual
+ // nodes
+
+ default:
+ // (includes *ast.Package)
+ panic(fmt.Sprintf("Walk: unexpected node type %T", n))
+ }
+
+ v.pop(node)
+}
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
index 89f89dd2d..680a70ca8 100644
--- a/vendor/golang.org/x/tools/go/packages/golist.go
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -364,12 +364,6 @@ type jsonPackage struct {
DepsErrors []*packagesinternal.PackageError
}
-type jsonPackageError struct {
- ImportStack []string
- Pos string
- Err string
-}
-
func otherFiles(p *jsonPackage) [][]string {
return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
}
diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go
index df14ffd94..af6a60d75 100644
--- a/vendor/golang.org/x/tools/go/packages/visit.go
+++ b/vendor/golang.org/x/tools/go/packages/visit.go
@@ -5,9 +5,11 @@
package packages
import (
+ "cmp"
"fmt"
+ "iter"
"os"
- "sort"
+ "slices"
)
// Visit visits all the packages in the import graph whose roots are
@@ -16,6 +18,20 @@ import (
// package's dependencies have been visited (postorder).
// The boolean result of pre(pkg) determines whether
// the imports of package pkg are visited.
+//
+// Example:
+//
+// pkgs, err := Load(...)
+// if err != nil { ... }
+// Visit(pkgs, nil, func(pkg *Package) {
+// log.Println(pkg)
+// })
+//
+// In most cases, it is more convenient to use [Postorder]:
+//
+// for pkg := range Postorder(pkgs) {
+// log.Println(pkg)
+// }
func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
seen := make(map[*Package]bool)
var visit func(*Package)
@@ -24,13 +40,8 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
seen[pkg] = true
if pre == nil || pre(pkg) {
- paths := make([]string, 0, len(pkg.Imports))
- for path := range pkg.Imports {
- paths = append(paths, path)
- }
- sort.Strings(paths) // Imports is a map, this makes visit stable
- for _, path := range paths {
- visit(pkg.Imports[path])
+ for _, imp := range sorted(pkg.Imports) { // for determinism
+ visit(imp)
}
}
@@ -50,7 +61,7 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
func PrintErrors(pkgs []*Package) int {
var n int
errModules := make(map[*Module]bool)
- Visit(pkgs, nil, func(pkg *Package) {
+ for pkg := range Postorder(pkgs) {
for _, err := range pkg.Errors {
fmt.Fprintln(os.Stderr, err)
n++
@@ -63,6 +74,60 @@ func PrintErrors(pkgs []*Package) int {
fmt.Fprintln(os.Stderr, mod.Error.Err)
n++
}
- })
+ }
return n
}
+
+// Postorder returns an iterator over the the packages in
+// the import graph whose roots are pkg.
+// Packages are enumerated in dependencies-first order.
+func Postorder(pkgs []*Package) iter.Seq[*Package] {
+ return func(yield func(*Package) bool) {
+ seen := make(map[*Package]bool)
+ var visit func(*Package) bool
+ visit = func(pkg *Package) bool {
+ if !seen[pkg] {
+ seen[pkg] = true
+ for _, imp := range sorted(pkg.Imports) { // for determinism
+ if !visit(imp) {
+ return false
+ }
+ }
+ if !yield(pkg) {
+ return false
+ }
+ }
+ return true
+ }
+ for _, pkg := range pkgs {
+ if !visit(pkg) {
+ break
+ }
+ }
+ }
+}
+
+// -- copied from golang.org.x/tools/gopls/internal/util/moremaps --
+
+// sorted returns an iterator over the entries of m in key order.
+func sorted[M ~map[K]V, K cmp.Ordered, V any](m M) iter.Seq2[K, V] {
+ // TODO(adonovan): use maps.Sorted if proposal #68598 is accepted.
+ return func(yield func(K, V) bool) {
+ keys := keySlice(m)
+ slices.Sort(keys)
+ for _, k := range keys {
+ if !yield(k, m[k]) {
+ break
+ }
+ }
+ }
+}
+
+// KeySlice returns the keys of the map M, like slices.Collect(maps.Keys(m)).
+func keySlice[M ~map[K]V, K comparable, V any](m M) []K {
+ r := make([]K, 0, len(m))
+ for k := range m {
+ r = append(r, k)
+ }
+ return r
+}
diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
index d3c2913be..6c0c74968 100644
--- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
+++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
@@ -698,7 +698,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
} else if false && aliases.Enabled() {
// The Enabled check is too expensive, so for now we
// simply assume that aliases are not enabled.
- // TODO(adonovan): replace with "if true {" when go1.24 is assured.
+ //
+ // Now that go1.24 is assured, we should be able to
+ // replace this with "if true {", but it causes tests
+ // to fail. TODO(adonovan): investigate.
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t)
}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go
index b6d542c64..f035a0b6b 100644
--- a/vendor/golang.org/x/tools/go/types/typeutil/map.go
+++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go
@@ -11,7 +11,6 @@ import (
"fmt"
"go/types"
"hash/maphash"
- "unsafe"
"golang.org/x/tools/internal/typeparams"
)
@@ -380,22 +379,8 @@ var theSeed = maphash.MakeSeed()
func (hasher) hashTypeName(tname *types.TypeName) uint32 {
// Since types.Identical uses == to compare TypeNames,
// the Hash function uses maphash.Comparable.
- // TODO(adonovan): or will, when it becomes available in go1.24.
- // In the meantime we use the pointer's numeric value.
- //
- // hash := maphash.Comparable(theSeed, tname)
- //
- // (Another approach would be to hash the name and package
- // path, and whether or not it is a package-level typename. It
- // is rare for a package to define multiple local types with
- // the same name.)
- ptr := uintptr(unsafe.Pointer(tname))
- if unsafe.Sizeof(ptr) == 8 {
- hash := uint64(ptr)
- return uint32(hash ^ (hash >> 32))
- } else {
- return uint32(ptr)
- }
+ hash := maphash.Comparable(theSeed, tname)
+ return uint32(hash ^ (hash >> 32))
}
// shallowHash computes a hash of t without looking at any of its
diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go
index cb6db8893..22ae77772 100644
--- a/vendor/golang.org/x/tools/imports/forward.go
+++ b/vendor/golang.org/x/tools/imports/forward.go
@@ -69,9 +69,3 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) {
}
return intimp.Process(filename, src, intopt)
}
-
-// VendorlessPath returns the devendorized version of the import path ipath.
-// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b".
-func VendorlessPath(ipath string) string {
- return intimp.VendorlessPath(ipath)
-}
diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go
index a6cf0e64a..ade5d1e79 100644
--- a/vendor/golang.org/x/tools/internal/event/core/event.go
+++ b/vendor/golang.org/x/tools/internal/event/core/event.go
@@ -28,11 +28,6 @@ type Event struct {
dynamic []label.Label // dynamically sized storage for remaining labels
}
-// eventLabelMap implements label.Map for a the labels of an Event.
-type eventLabelMap struct {
- event Event
-}
-
func (ev Event) At() time.Time { return ev.at }
func (ev Event) Format(f fmt.State, r rune) {
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
index 780873e3a..4a4357d2b 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -569,7 +569,6 @@ func (p *iexporter) exportName(obj types.Object) (res string) {
type iexporter struct {
fset *token.FileSet
- out *bytes.Buffer
version int
shallow bool // don't put types from other packages in the index
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go
deleted file mode 100644
index 7586bfaca..000000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.22 && !go1.24
-
-package gcimporter
-
-import (
- "go/token"
- "go/types"
- "unsafe"
-)
-
-// TODO(rfindley): delete this workaround once go1.24 is assured.
-
-func init() {
- // Update markBlack so that it correctly sets the color
- // of imported TypeNames.
- //
- // See the doc comment for markBlack for details.
-
- type color uint32
- const (
- white color = iota
- black
- grey
- )
- type object struct {
- _ *types.Scope
- _ token.Pos
- _ *types.Package
- _ string
- _ types.Type
- _ uint32
- color_ color
- _ token.Pos
- }
- type typeName struct {
- object
- }
-
- // If the size of types.TypeName changes, this will fail to compile.
- const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{}))
- var _ [-delta * delta]int
-
- markBlack = func(obj *types.TypeName) {
- type uP = unsafe.Pointer
- var ptr *typeName
- *(*uP)(uP(&ptr)) = uP(obj)
- ptr.color_ = black
- }
-}
diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go
index 50b6ca51a..1b4dc0cb5 100644
--- a/vendor/golang.org/x/tools/internal/imports/fix.go
+++ b/vendor/golang.org/x/tools/internal/imports/fix.go
@@ -16,6 +16,7 @@ import (
"go/types"
"io/fs"
"io/ioutil"
+ "maps"
"os"
"path"
"path/filepath"
@@ -27,8 +28,6 @@ import (
"unicode"
"unicode/utf8"
- "maps"
-
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/gocommand"
@@ -43,7 +42,7 @@ var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){
if localPrefix == "" {
return
}
- for _, p := range strings.Split(localPrefix, ",") {
+ for p := range strings.SplitSeq(localPrefix, ",") {
if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath {
return 3, true
}
@@ -1251,7 +1250,6 @@ func ImportPathToAssumedName(importPath string) string {
// gopathResolver implements resolver for GOPATH workspaces.
type gopathResolver struct {
env *ProcessEnv
- walked bool
cache *DirInfoCache
scanSema chan struct{} // scanSema prevents concurrent scans.
}
diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go
index fe24db9b1..8e9702d84 100644
--- a/vendor/golang.org/x/tools/internal/modindex/symbols.go
+++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go
@@ -206,8 +206,7 @@ func isDeprecated(doc *ast.CommentGroup) bool {
// go.dev/wiki/Deprecated Paragraph starting 'Deprecated:'
// This code fails for /* Deprecated: */, but it's the code from
// gopls/internal/analysis/deprecated
- lines := strings.Split(doc.Text(), "\n\n")
- for _, line := range lines {
+ for line := range strings.SplitSeq(doc.Text(), "\n\n") {
if strings.HasPrefix(line, "Deprecated:") {
return true
}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go
index 77cf8d218..96ad6c582 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/deps.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go
@@ -12,348 +12,354 @@ type pkginfo struct {
}
var deps = [...]pkginfo{
- {"archive/tar", "\x03j\x03E5\x01\v\x01#\x01\x01\x02\x05\n\x02\x01\x02\x02\v"},
- {"archive/zip", "\x02\x04`\a\x16\x0205\x01+\x05\x01\x11\x03\x02\r\x04"},
- {"bufio", "\x03j}F\x13"},
- {"bytes", "m+R\x03\fH\x02\x02"},
+ {"archive/tar", "\x03k\x03E;\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"},
+ {"archive/zip", "\x02\x04a\a\x03\x12\x021;\x01+\x05\x01\x0f\x03\x02\x0e\x04"},
+ {"bufio", "\x03k\x83\x01D\x14"},
+ {"bytes", "n*Y\x03\fG\x02\x02"},
{"cmp", ""},
- {"compress/bzip2", "\x02\x02\xe6\x01C"},
- {"compress/flate", "\x02k\x03z\r\x025\x01\x03"},
- {"compress/gzip", "\x02\x04`\a\x03\x15eU"},
- {"compress/lzw", "\x02k\x03z"},
- {"compress/zlib", "\x02\x04`\a\x03\x13\x01f"},
- {"container/heap", "\xae\x02"},
+ {"compress/bzip2", "\x02\x02\xed\x01A"},
+ {"compress/flate", "\x02l\x03\x80\x01\f\x033\x01\x03"},
+ {"compress/gzip", "\x02\x04a\a\x03\x14lT"},
+ {"compress/lzw", "\x02l\x03\x80\x01"},
+ {"compress/zlib", "\x02\x04a\a\x03\x12\x01m"},
+ {"container/heap", "\xb3\x02"},
{"container/list", ""},
{"container/ring", ""},
- {"context", "m\\i\x01\f"},
- {"crypto", "\x83\x01gE"},
- {"crypto/aes", "\x10\n\a\x8e\x02"},
- {"crypto/cipher", "\x03\x1e\x01\x01\x1d\x11\x1c,Q"},
- {"crypto/des", "\x10\x13\x1d-,\x96\x01\x03"},
- {"crypto/dsa", "@\x04)}\x0e"},
- {"crypto/ecdh", "\x03\v\f\x0e\x04\x14\x04\r\x1c}"},
- {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\x16\x01\x04\f\x01\x1c}\x0e\x04L\x01"},
- {"crypto/ed25519", "\x0e\x1c\x16\n\a\x1c}E"},
- {"crypto/elliptic", "0=}\x0e:"},
- {"crypto/fips140", " \x05\x90\x01"},
- {"crypto/hkdf", "-\x12\x01-\x16"},
- {"crypto/hmac", "\x1a\x14\x11\x01\x112"},
+ {"context", "n\\m\x01\r"},
+ {"crypto", "\x83\x01nC"},
+ {"crypto/aes", "\x10\n\a\x93\x02"},
+ {"crypto/cipher", "\x03\x1e\x01\x01\x1e\x11\x1c+X"},
+ {"crypto/des", "\x10\x13\x1e-+\x9b\x01\x03"},
+ {"crypto/dsa", "A\x04)\x83\x01\r"},
+ {"crypto/ecdh", "\x03\v\f\x0e\x04\x15\x04\r\x1c\x83\x01"},
+ {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\a\v\x05\x01\x04\f\x01\x1c\x83\x01\r\x05K\x01"},
+ {"crypto/ed25519", "\x0e\x1c\x11\x06\n\a\x1c\x83\x01C"},
+ {"crypto/elliptic", "0>\x83\x01\r9"},
+ {"crypto/fips140", " \x05"},
+ {"crypto/hkdf", "-\x13\x01-\x15"},
+ {"crypto/hmac", "\x1a\x14\x12\x01\x111"},
{"crypto/internal/boring", "\x0e\x02\rf"},
- {"crypto/internal/boring/bbig", "\x1a\xde\x01M"},
- {"crypto/internal/boring/bcache", "\xb3\x02\x12"},
+ {"crypto/internal/boring/bbig", "\x1a\xe4\x01M"},
+ {"crypto/internal/boring/bcache", "\xb8\x02\x13"},
{"crypto/internal/boring/sig", ""},
- {"crypto/internal/cryptotest", "\x03\r\n)\x0e\x19\x06\x13\x12#\a\t\x11\x11\x11\x1b\x01\f\r\x05\n"},
- {"crypto/internal/entropy", "E"},
- {"crypto/internal/fips140", ">/}9\r\x15"},
- {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x04\x01\x01\x05*\x8c\x016"},
- {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x04\x01\x06*\x8a\x01"},
- {"crypto/internal/fips140/alias", "\xc5\x02"},
- {"crypto/internal/fips140/bigmod", "%\x17\x01\x06*\x8c\x01"},
- {"crypto/internal/fips140/check", " \x0e\x06\b\x02\xac\x01["},
- {"crypto/internal/fips140/check/checktest", "%\xfe\x01\""},
- {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x04\b\x01(}\x0f9"},
- {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\f1}\x0f9"},
- {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x067}H"},
- {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v7\xc2\x01\x03"},
- {"crypto/internal/fips140/edwards25519", "%\a\f\x041\x8c\x019"},
- {"crypto/internal/fips140/edwards25519/field", "%\x13\x041\x8c\x01"},
- {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x069"},
- {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x017"},
- {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x041"},
- {"crypto/internal/fips140/nistec", "%\f\a\x041\x8c\x01*\x0f\x13"},
- {"crypto/internal/fips140/nistec/fiat", "%\x135\x8c\x01"},
- {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x069"},
- {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x025}H"},
- {"crypto/internal/fips140/sha256", "\x03\x1d\x1c\x01\x06*\x8c\x01"},
- {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x04\x010\x8c\x01L"},
- {"crypto/internal/fips140/sha512", "\x03\x1d\x1c\x01\x06*\x8c\x01"},
- {"crypto/internal/fips140/ssh", " \x05"},
- {"crypto/internal/fips140/subtle", "#"},
- {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x027"},
- {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\b1"},
+ {"crypto/internal/cryptotest", "\x03\r\n\x06$\x0e\x19\x06\x12\x12 \x04\a\t\x16\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"},
+ {"crypto/internal/entropy", "F"},
+ {"crypto/internal/fips140", "?/\x15\xa7\x01\v\x16"},
+ {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x05\x01\x01\x05*\x92\x014"},
+ {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x05\x01\x06*\x8f\x01"},
+ {"crypto/internal/fips140/alias", "\xcb\x02"},
+ {"crypto/internal/fips140/bigmod", "%\x18\x01\x06*\x92\x01"},
+ {"crypto/internal/fips140/check", " \x0e\x06\t\x02\xb2\x01Z"},
+ {"crypto/internal/fips140/check/checktest", "%\x85\x02!"},
+ {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x05\b\x01(\x83\x01\x0f7"},
+ {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\r1\x83\x01\x0f7"},
+ {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068\x15nF"},
+ {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc6\x01\x03"},
+ {"crypto/internal/fips140/edwards25519", "%\a\f\x051\x92\x017"},
+ {"crypto/internal/fips140/edwards25519/field", "%\x13\x051\x92\x01"},
+ {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:\x15"},
+ {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018\x15"},
+ {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x051"},
+ {"crypto/internal/fips140/nistec", "%\f\a\x051\x92\x01*\r\x14"},
+ {"crypto/internal/fips140/nistec/fiat", "%\x136\x92\x01"},
+ {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:\x15"},
+ {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026\x15nF"},
+ {"crypto/internal/fips140/sha256", "\x03\x1d\x1d\x01\x06*\x15}"},
+ {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x05\x010\x92\x01K"},
+ {"crypto/internal/fips140/sha512", "\x03\x1d\x1d\x01\x06*\x15}"},
+ {"crypto/internal/fips140/ssh", "%^"},
+ {"crypto/internal/fips140/subtle", "#\x1a\xc3\x01"},
+ {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028\x15"},
+ {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\t1\x15"},
+ {"crypto/internal/fips140cache", "\xaa\x02\r&"},
{"crypto/internal/fips140deps", ""},
{"crypto/internal/fips140deps/byteorder", "\x99\x01"},
- {"crypto/internal/fips140deps/cpu", "\xad\x01\a"},
- {"crypto/internal/fips140deps/godebug", "\xb5\x01"},
- {"crypto/internal/fips140hash", "5\x1a4\xc2\x01"},
- {"crypto/internal/fips140only", "'\r\x01\x01M25"},
+ {"crypto/internal/fips140deps/cpu", "\xae\x01\a"},
+ {"crypto/internal/fips140deps/godebug", "\xb6\x01"},
+ {"crypto/internal/fips140hash", "5\x1b3\xc8\x01"},
+ {"crypto/internal/fips140only", "'\r\x01\x01M3;"},
{"crypto/internal/fips140test", ""},
- {"crypto/internal/hpke", "\x0e\x01\x01\x03\x1a\x1d#,`N"},
- {"crypto/internal/impl", "\xb0\x02"},
- {"crypto/internal/randutil", "\xea\x01\x12"},
- {"crypto/internal/sysrand", "mi!\x1f\r\x0f\x01\x01\v\x06"},
- {"crypto/internal/sysrand/internal/seccomp", "m"},
- {"crypto/md5", "\x0e2-\x16\x16`"},
+ {"crypto/internal/hpke", "\x0e\x01\x01\x03\x053#+gM"},
+ {"crypto/internal/impl", "\xb5\x02"},
+ {"crypto/internal/randutil", "\xf1\x01\x12"},
+ {"crypto/internal/sysrand", "nn! \r\r\x01\x01\f\x06"},
+ {"crypto/internal/sysrand/internal/seccomp", "n"},
+ {"crypto/md5", "\x0e3-\x15\x16g"},
{"crypto/mlkem", "/"},
- {"crypto/pbkdf2", "2\r\x01-\x16"},
- {"crypto/rand", "\x1a\x06\a\x19\x04\x01(}\x0eM"},
- {"crypto/rc4", "#\x1d-\xc2\x01"},
- {"crypto/rsa", "\x0e\f\x01\t\x0f\f\x01\x04\x06\a\x1c\x03\x1325\r\x01"},
- {"crypto/sha1", "\x0e\f&-\x16\x16\x14L"},
+ {"crypto/pbkdf2", "2\x0e\x01-\x15"},
+ {"crypto/rand", "\x1a\x06\a\x1a\x04\x01(\x83\x01\rM"},
+ {"crypto/rc4", "#\x1e-\xc6\x01"},
+ {"crypto/rsa", "\x0e\f\x01\t\x0f\r\x01\x04\x06\a\x1c\x03\x123;\f\x01"},
+ {"crypto/sha1", "\x0e\f'\x03*\x15\x16\x15R"},
{"crypto/sha256", "\x0e\f\x1aO"},
- {"crypto/sha3", "\x0e'N\xc2\x01"},
+ {"crypto/sha3", "\x0e'N\xc8\x01"},
{"crypto/sha512", "\x0e\f\x1cM"},
- {"crypto/subtle", "8\x96\x01U"},
- {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x03\x01\a\x01\v\x02\n\x01\b\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x13\x16\x14\b5\x16\x16\r\n\x01\x01\x01\x02\x01\f\x06\x02\x01"},
- {"crypto/tls/internal/fips140tls", " \x93\x02"},
- {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x011\x03\x02\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x032\x01\x02\t\x01\x01\x01\a\x10\x05\x01\x06\x02\x05\f\x01\x02\r\x02\x01\x01\x02\x03\x01"},
- {"crypto/x509/pkix", "c\x06\a\x88\x01G"},
- {"database/sql", "\x03\nJ\x16\x03z\f\x06\"\x05\n\x02\x03\x01\f\x02\x02\x02"},
- {"database/sql/driver", "\r`\x03\xae\x01\x11\x10"},
- {"debug/buildinfo", "\x03W\x02\x01\x01\b\a\x03`\x18\x02\x01+\x0f "},
- {"debug/dwarf", "\x03c\a\x03z1\x13\x01\x01"},
- {"debug/elf", "\x03\x06P\r\a\x03`\x19\x01,\x19\x01\x15"},
- {"debug/gosym", "\x03c\n\xbe\x01\x01\x01\x02"},
- {"debug/macho", "\x03\x06P\r\n`\x1a,\x19\x01"},
- {"debug/pe", "\x03\x06P\r\a\x03`\x1a,\x19\x01\x15"},
- {"debug/plan9obj", "f\a\x03`\x1a,"},
- {"embed", "m+:\x18\x01T"},
+ {"crypto/subtle", "8\x9b\x01W"},
+ {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\a\x01\r\n\x01\t\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b;\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"},
+ {"crypto/tls/internal/fips140tls", "\x17\xa1\x02"},
+ {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x012\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x038\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\n\x01\x02\x0e\x02\x01\x01\x02\x03\x01"},
+ {"crypto/x509/pkix", "d\x06\a\x8d\x01G"},
+ {"database/sql", "\x03\nK\x16\x03\x80\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"},
+ {"database/sql/driver", "\ra\x03\xb4\x01\x0f\x11"},
+ {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03e\x19\x02\x01+\x0f\x1f"},
+ {"debug/dwarf", "\x03d\a\x03\x80\x011\x11\x01\x01"},
+ {"debug/elf", "\x03\x06Q\r\a\x03e\x1a\x01,\x17\x01\x16"},
+ {"debug/gosym", "\x03d\n\xc2\x01\x01\x01\x02"},
+ {"debug/macho", "\x03\x06Q\r\ne\x1b,\x17\x01"},
+ {"debug/pe", "\x03\x06Q\r\a\x03e\x1b,\x17\x01\x16"},
+ {"debug/plan9obj", "g\a\x03e\x1b,"},
+ {"embed", "n*@\x19\x01S"},
{"embed/internal/embedtest", ""},
{"encoding", ""},
- {"encoding/ascii85", "\xea\x01E"},
- {"encoding/asn1", "\x03j\x03\x87\x01\x01&\x0f\x02\x01\x0f\x03\x01"},
- {"encoding/base32", "\xea\x01C\x02"},
- {"encoding/base64", "\x99\x01QC\x02"},
- {"encoding/binary", "m}\r'\x0f\x05"},
- {"encoding/csv", "\x02\x01j\x03zF\x11\x02"},
- {"encoding/gob", "\x02_\x05\a\x03`\x1a\f\x01\x02\x1d\b\x14\x01\x0e\x02"},
- {"encoding/hex", "m\x03zC\x03"},
- {"encoding/json", "\x03\x01]\x04\b\x03z\r'\x0f\x02\x01\x02\x0f\x01\x01\x02"},
- {"encoding/pem", "\x03b\b}C\x03"},
- {"encoding/xml", "\x02\x01^\f\x03z4\x05\f\x01\x02\x0f\x02"},
- {"errors", "\xc9\x01|"},
- {"expvar", "jK9\t\n\x15\r\n\x02\x03\x01\x10"},
- {"flag", "a\f\x03z,\b\x05\n\x02\x01\x0f"},
- {"fmt", "mE8\r\x1f\b\x0f\x02\x03\x11"},
- {"go/ast", "\x03\x01l\x0f\x01j\x03)\b\x0f\x02\x01"},
- {"go/ast/internal/tests", ""},
- {"go/build", "\x02\x01j\x03\x01\x03\x02\a\x02\x01\x17\x1e\x04\x02\t\x14\x12\x01+\x01\x04\x01\a\n\x02\x01\x11\x02\x02"},
- {"go/build/constraint", "m\xc2\x01\x01\x11\x02"},
- {"go/constant", "p\x10w\x01\x016\x01\x02\x11"},
- {"go/doc", "\x04l\x01\x06\t=-1\x12\x02\x01\x11\x02"},
- {"go/doc/comment", "\x03m\xbd\x01\x01\x01\x01\x11\x02"},
- {"go/format", "\x03m\x01\f\x01\x02jF"},
- {"go/importer", "s\a\x01\x01\x04\x01i9"},
- {"go/internal/gccgoimporter", "\x02\x01W\x13\x03\x05\v\x01g\x02,\x01\x05\x13\x01\v\b"},
- {"go/internal/gcimporter", "\x02n\x10\x01/\x05\x0e',\x17\x03\x02"},
- {"go/internal/srcimporter", "p\x01\x02\n\x03\x01i,\x01\x05\x14\x02\x13"},
- {"go/parser", "\x03j\x03\x01\x03\v\x01j\x01+\x06\x14"},
- {"go/printer", "p\x01\x03\x03\tj\r\x1f\x17\x02\x01\x02\n\x05\x02"},
- {"go/scanner", "\x03m\x10j2\x12\x01\x12\x02"},
- {"go/token", "\x04l\xbd\x01\x02\x03\x01\x0e\x02"},
- {"go/types", "\x03\x01\x06c\x03\x01\x04\b\x03\x02\x15\x1e\x06+\x04\x03\n%\a\n\x01\x01\x01\x02\x01\x0e\x02\x02"},
- {"go/version", "\xba\x01v"},
- {"hash", "\xea\x01"},
- {"hash/adler32", "m\x16\x16"},
- {"hash/crc32", "m\x16\x16\x14\x85\x01\x01\x12"},
- {"hash/crc64", "m\x16\x16\x99\x01"},
- {"hash/fnv", "m\x16\x16`"},
- {"hash/maphash", "\x94\x01\x05\x1b\x03@N"},
- {"html", "\xb0\x02\x02\x11"},
- {"html/template", "\x03g\x06\x19,5\x01\v \x05\x01\x02\x03\x0e\x01\x02\v\x01\x03\x02"},
- {"image", "\x02k\x1f^\x0f6\x03\x01"},
+ {"encoding/ascii85", "\xf1\x01C"},
+ {"encoding/asn1", "\x03k\x03\x8c\x01\x01'\r\x02\x01\x10\x03\x01"},
+ {"encoding/base32", "\xf1\x01A\x02"},
+ {"encoding/base64", "\x99\x01XA\x02"},
+ {"encoding/binary", "n\x83\x01\f(\r\x05"},
+ {"encoding/csv", "\x02\x01k\x03\x80\x01D\x12\x02"},
+ {"encoding/gob", "\x02`\x05\a\x03e\x1b\v\x01\x03\x1d\b\x12\x01\x0f\x02"},
+ {"encoding/hex", "n\x03\x80\x01A\x03"},
+ {"encoding/json", "\x03\x01^\x04\b\x03\x80\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"},
+ {"encoding/pem", "\x03c\b\x83\x01A\x03"},
+ {"encoding/xml", "\x02\x01_\f\x03\x80\x014\x05\n\x01\x02\x10\x02"},
+ {"errors", "\xca\x01\x81\x01"},
+ {"expvar", "kK?\b\v\x15\r\b\x02\x03\x01\x11"},
+ {"flag", "b\f\x03\x80\x01,\b\x05\b\x02\x01\x10"},
+ {"fmt", "nE>\f \b\r\x02\x03\x12"},
+ {"go/ast", "\x03\x01m\x0e\x01q\x03)\b\r\x02\x01"},
+ {"go/build", "\x02\x01k\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\t\x19\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"},
+ {"go/build/constraint", "n\xc6\x01\x01\x12\x02"},
+ {"go/constant", "q\x0f}\x01\x024\x01\x02\x12"},
+ {"go/doc", "\x04m\x01\x05\t>31\x10\x02\x01\x12\x02"},
+ {"go/doc/comment", "\x03n\xc1\x01\x01\x01\x01\x12\x02"},
+ {"go/format", "\x03n\x01\v\x01\x02qD"},
+ {"go/importer", "s\a\x01\x01\x04\x01p9"},
+ {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x04\v\x01n\x02,\x01\x05\x11\x01\f\b"},
+ {"go/internal/gcimporter", "\x02o\x0f\x010\x05\x0e-,\x15\x03\x02"},
+ {"go/internal/srcimporter", "q\x01\x01\n\x03\x01p,\x01\x05\x12\x02\x14"},
+ {"go/parser", "\x03k\x03\x01\x02\v\x01q\x01+\x06\x12"},
+ {"go/printer", "q\x01\x02\x03\tq\f \x15\x02\x01\x02\v\x05\x02"},
+ {"go/scanner", "\x03n\x0fq2\x10\x01\x13\x02"},
+ {"go/token", "\x04m\x83\x01>\x02\x03\x01\x0f\x02"},
+ {"go/types", "\x03\x01\x06d\x03\x01\x03\b\x03\x02\x15\x1f\x061\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"},
+ {"go/version", "\xbb\x01z"},
+ {"hash", "\xf1\x01"},
+ {"hash/adler32", "n\x15\x16"},
+ {"hash/crc32", "n\x15\x16\x15\x89\x01\x01\x13"},
+ {"hash/crc64", "n\x15\x16\x9e\x01"},
+ {"hash/fnv", "n\x15\x16g"},
+ {"hash/maphash", "\x83\x01\x11!\x03\x93\x01"},
+ {"html", "\xb5\x02\x02\x12"},
+ {"html/template", "\x03h\x06\x18-;\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"},
+ {"image", "\x02l\x1ee\x0f4\x03\x01"},
{"image/color", ""},
{"image/color/palette", "\x8c\x01"},
{"image/draw", "\x8b\x01\x01\x04"},
- {"image/gif", "\x02\x01\x05e\x03\x1b\x01\x01\x01\vQ"},
+ {"image/gif", "\x02\x01\x05f\x03\x1a\x01\x01\x01\vX"},
{"image/internal/imageutil", "\x8b\x01"},
- {"image/jpeg", "\x02k\x1e\x01\x04Z"},
- {"image/png", "\x02\a]\n\x13\x02\x06\x01^E"},
- {"index/suffixarray", "\x03c\a}\r*\f\x01"},
- {"internal/abi", "\xb4\x01\x91\x01"},
- {"internal/asan", "\xc5\x02"},
- {"internal/bisect", "\xa3\x02\x0f\x01"},
- {"internal/buildcfg", "pG_\x06\x02\x05\f\x01"},
- {"internal/bytealg", "\xad\x01\x98\x01"},
+ {"image/jpeg", "\x02l\x1d\x01\x04a"},
+ {"image/png", "\x02\a^\n\x12\x02\x06\x01eC"},
+ {"index/suffixarray", "\x03d\a\x83\x01\f+\n\x01"},
+ {"internal/abi", "\xb5\x01\x96\x01"},
+ {"internal/asan", "\xcb\x02"},
+ {"internal/bisect", "\xaa\x02\r\x01"},
+ {"internal/buildcfg", "qGe\x06\x02\x05\n\x01"},
+ {"internal/bytealg", "\xae\x01\x9d\x01"},
{"internal/byteorder", ""},
{"internal/cfg", ""},
- {"internal/chacha8rand", "\x99\x01\x1b\x91\x01"},
+ {"internal/cgrouptest", "q[Q\x06\x0f\x02\x01\x04\x01"},
+ {"internal/chacha8rand", "\x99\x01\x15\a\x96\x01"},
{"internal/copyright", ""},
{"internal/coverage", ""},
{"internal/coverage/calloc", ""},
- {"internal/coverage/cfile", "j\x06\x17\x16\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x01\x1f,\x06\a\f\x01\x03\f\x06"},
- {"internal/coverage/cformat", "\x04l-\x04I\f7\x01\x02\f"},
- {"internal/coverage/cmerge", "p-Z"},
- {"internal/coverage/decodecounter", "f\n-\v\x02@,\x19\x16"},
- {"internal/coverage/decodemeta", "\x02d\n\x17\x16\v\x02@,"},
- {"internal/coverage/encodecounter", "\x02d\n-\f\x01\x02>\f \x17"},
- {"internal/coverage/encodemeta", "\x02\x01c\n\x13\x04\x16\r\x02>,/"},
- {"internal/coverage/pods", "\x04l-y\x06\x05\f\x02\x01"},
- {"internal/coverage/rtcov", "\xc5\x02"},
- {"internal/coverage/slicereader", "f\nz["},
- {"internal/coverage/slicewriter", "pz"},
- {"internal/coverage/stringtab", "p8\x04>"},
+ {"internal/coverage/cfile", "k\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x02$,\x06\a\n\x01\x03\r\x06"},
+ {"internal/coverage/cformat", "\x04m-\x04O\v6\x01\x02\r"},
+ {"internal/coverage/cmerge", "q-_"},
+ {"internal/coverage/decodecounter", "g\n-\v\x02F,\x17\x17"},
+ {"internal/coverage/decodemeta", "\x02e\n\x16\x17\v\x02F,"},
+ {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02D\v!\x15"},
+ {"internal/coverage/encodemeta", "\x02\x01d\n\x12\x04\x17\r\x02D,."},
+ {"internal/coverage/pods", "\x04m-\x7f\x06\x05\n\x02\x01"},
+ {"internal/coverage/rtcov", "\xcb\x02"},
+ {"internal/coverage/slicereader", "g\n\x80\x01Z"},
+ {"internal/coverage/slicewriter", "q\x80\x01"},
+ {"internal/coverage/stringtab", "q8\x04D"},
{"internal/coverage/test", ""},
{"internal/coverage/uleb128", ""},
- {"internal/cpu", "\xc5\x02"},
- {"internal/dag", "\x04l\xbd\x01\x03"},
- {"internal/diff", "\x03m\xbe\x01\x02"},
- {"internal/exportdata", "\x02\x01j\x03\x03]\x1a,\x01\x05\x13\x01\x02"},
- {"internal/filepathlite", "m+:\x19B"},
- {"internal/fmtsort", "\x04\x9a\x02\x0f"},
- {"internal/fuzz", "\x03\nA\x18\x04\x03\x03\x01\f\x0355\r\x02\x1d\x01\x05\x02\x05\f\x01\x02\x01\x01\v\x04\x02"},
+ {"internal/cpu", "\xcb\x02"},
+ {"internal/dag", "\x04m\xc1\x01\x03"},
+ {"internal/diff", "\x03n\xc2\x01\x02"},
+ {"internal/exportdata", "\x02\x01k\x03\x02c\x1b,\x01\x05\x11\x01\x02"},
+ {"internal/filepathlite", "n*@\x1a@"},
+ {"internal/fmtsort", "\x04\xa1\x02\r"},
+ {"internal/fuzz", "\x03\nB\x18\x04\x03\x03\x01\v\x036;\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"},
{"internal/goarch", ""},
- {"internal/godebug", "\x96\x01 |\x01\x12"},
+ {"internal/godebug", "\x96\x01!\x80\x01\x01\x13"},
{"internal/godebugs", ""},
{"internal/goexperiment", ""},
{"internal/goos", ""},
- {"internal/goroot", "\x96\x02\x01\x05\x14\x02"},
+ {"internal/goroot", "\x9d\x02\x01\x05\x12\x02"},
{"internal/gover", "\x04"},
{"internal/goversion", ""},
{"internal/itoa", ""},
- {"internal/lazyregexp", "\x96\x02\v\x0f\x02"},
- {"internal/lazytemplate", "\xea\x01,\x1a\x02\v"},
- {"internal/msan", "\xc5\x02"},
+ {"internal/lazyregexp", "\x9d\x02\v\r\x02"},
+ {"internal/lazytemplate", "\xf1\x01,\x18\x02\f"},
+ {"internal/msan", "\xcb\x02"},
{"internal/nettrace", ""},
- {"internal/obscuretestdata", "e\x85\x01,"},
- {"internal/oserror", "m"},
- {"internal/pkgbits", "\x03K\x18\a\x03\x05\vj\x0e\x1e\r\f\x01"},
+ {"internal/obscuretestdata", "f\x8b\x01,"},
+ {"internal/oserror", "n"},
+ {"internal/pkgbits", "\x03L\x18\a\x03\x04\vq\r\x1f\r\n\x01"},
{"internal/platform", ""},
- {"internal/poll", "mO\x1a\x149\x0f\x01\x01\v\x06"},
- {"internal/profile", "\x03\x04f\x03z7\r\x01\x01\x0f"},
+ {"internal/poll", "nO\x1f\x159\r\x01\x01\f\x06"},
+ {"internal/profile", "\x03\x04g\x03\x80\x017\v\x01\x01\x10"},
{"internal/profilerecord", ""},
- {"internal/race", "\x94\x01\xb1\x01"},
- {"internal/reflectlite", "\x94\x01 3<\""},
- {"internal/runtime/atomic", "\xc5\x02"},
- {"internal/runtime/exithook", "\xca\x01{"},
- {"internal/runtime/maps", "\x94\x01\x01\x1f\v\t\x05\x01w"},
- {"internal/runtime/math", "\xb4\x01"},
- {"internal/runtime/sys", "\xb4\x01\x04"},
- {"internal/runtime/syscall", "\xc5\x02"},
- {"internal/saferio", "\xea\x01["},
- {"internal/singleflight", "\xb2\x02"},
- {"internal/stringslite", "\x98\x01\xad\x01"},
- {"internal/sync", "\x94\x01 \x14k\x12"},
- {"internal/synctest", "\xc5\x02"},
- {"internal/syscall/execenv", "\xb4\x02"},
- {"internal/syscall/unix", "\xa3\x02\x10\x01\x11"},
- {"internal/sysinfo", "\x02\x01\xaa\x01=,\x1a\x02"},
+ {"internal/race", "\x94\x01\xb7\x01"},
+ {"internal/reflectlite", "\x94\x01!9<!"},
+ {"internal/runtime/atomic", "\xb5\x01\x96\x01"},
+ {"internal/runtime/cgroup", "\x98\x01:\x02w"},
+ {"internal/runtime/exithook", "\xcb\x01\x80\x01"},
+ {"internal/runtime/gc", "\xb5\x01"},
+ {"internal/runtime/maps", "\x94\x01\x01 \v\t\a\x03x"},
+ {"internal/runtime/math", "\xb5\x01"},
+ {"internal/runtime/startlinetest", ""},
+ {"internal/runtime/strconv", "\xd0\x01"},
+ {"internal/runtime/sys", "\xb5\x01\x04"},
+ {"internal/runtime/syscall", "\xb5\x01\x96\x01"},
+ {"internal/runtime/wasitest", ""},
+ {"internal/saferio", "\xf1\x01Z"},
+ {"internal/singleflight", "\xb7\x02"},
+ {"internal/stringslite", "\x98\x01\xb3\x01"},
+ {"internal/sync", "\x94\x01!\x14o\x13"},
+ {"internal/synctest", "\x94\x01\xb7\x01"},
+ {"internal/syscall/execenv", "\xb9\x02"},
+ {"internal/syscall/unix", "\xaa\x02\x0e\x01\x12"},
+ {"internal/sysinfo", "\x02\x01\xab\x01C,\x18\x02"},
{"internal/syslist", ""},
- {"internal/testenv", "\x03\n`\x02\x01*\x1a\x10'+\x01\x05\a\f\x01\x02\x02\x01\n"},
- {"internal/testlog", "\xb2\x02\x01\x12"},
- {"internal/testpty", "m\x03\xa6\x01"},
- {"internal/trace", "\x02\x01\x01\x06\\\a\x03n\x03\x03\x06\x03\n6\x01\x02\x0f\x06"},
- {"internal/trace/internal/testgen", "\x03c\nl\x03\x02\x03\x011\v\x0f"},
- {"internal/trace/internal/tracev1", "\x03\x01b\a\x03t\x06\r6\x01"},
- {"internal/trace/raw", "\x02d\nq\x03\x06E\x01\x11"},
- {"internal/trace/testtrace", "\x02\x01j\x03l\x03\x06\x057\f\x02\x01"},
+ {"internal/testenv", "\x03\na\x02\x01)\x1b\x10-+\x01\x05\a\n\x01\x02\x02\x01\v"},
+ {"internal/testhash", "\x03\x80\x01n\x118\v"},
+ {"internal/testlog", "\xb7\x02\x01\x13"},
+ {"internal/testpty", "n\x03\xac\x01"},
+ {"internal/trace", "\x02\x01\x01\x06]\a\x03t\x03\x03\x06\x03\t5\x01\x01\x01\x10\x06"},
+ {"internal/trace/internal/testgen", "\x03d\nr\x03\x02\x03\x011\v\r\x10"},
+ {"internal/trace/internal/tracev1", "\x03\x01c\a\x03z\x06\f5\x01"},
+ {"internal/trace/raw", "\x02e\nw\x03\x06C\x01\x12"},
+ {"internal/trace/testtrace", "\x02\x01k\x03r\x03\x05\x01\x057\n\x02\x01"},
{"internal/trace/tracev2", ""},
- {"internal/trace/traceviewer", "\x02]\v\x06\x1a<\x16\a\a\x04\t\n\x15\x01\x05\a\f\x01\x02\r"},
+ {"internal/trace/traceviewer", "\x02^\v\x06\x19=\x1c\a\a\x04\b\v\x15\x01\x05\a\n\x01\x02\x0e"},
{"internal/trace/traceviewer/format", ""},
- {"internal/trace/version", "pq\t"},
- {"internal/txtar", "\x03m\xa6\x01\x1a"},
- {"internal/types/errors", "\xaf\x02"},
- {"internal/unsafeheader", "\xc5\x02"},
- {"internal/xcoff", "Y\r\a\x03`\x1a,\x19\x01"},
- {"internal/zstd", "f\a\x03z\x0f"},
- {"io", "m\xc5\x01"},
- {"io/fs", "m+*(1\x12\x12\x04"},
- {"io/ioutil", "\xea\x01\x01+\x17\x03"},
- {"iter", "\xc8\x01[\""},
- {"log", "pz\x05'\r\x0f\x01\f"},
+ {"internal/trace/version", "qw\t"},
+ {"internal/txtar", "\x03n\xac\x01\x18"},
+ {"internal/types/errors", "\xb4\x02"},
+ {"internal/unsafeheader", "\xcb\x02"},
+ {"internal/xcoff", "Z\r\a\x03e\x1b,\x17\x01"},
+ {"internal/zstd", "g\a\x03\x80\x01\x0f"},
+ {"io", "n\xc9\x01"},
+ {"io/fs", "n*+.1\x10\x13\x04"},
+ {"io/ioutil", "\xf1\x01\x01+\x15\x03"},
+ {"iter", "\xc9\x01a!"},
+ {"log", "q\x80\x01\x05'\r\r\x01\r"},
{"log/internal", ""},
- {"log/slog", "\x03\nT\t\x03\x03z\x04\x01\x02\x02\x04'\x05\n\x02\x01\x02\x01\f\x02\x02\x02"},
+ {"log/slog", "\x03\nU\t\x03\x03\x80\x01\x04\x01\x02\x02\x03(\x05\b\x02\x01\x02\x01\r\x02\x02\x02"},
{"log/slog/internal", ""},
- {"log/slog/internal/benchmarks", "\r`\x03z\x06\x03<\x10"},
- {"log/slog/internal/buffer", "\xb2\x02"},
- {"log/slog/internal/slogtest", "\xf0\x01"},
- {"log/syslog", "m\x03~\x12\x16\x1a\x02\r"},
- {"maps", "\xed\x01X"},
- {"math", "\xad\x01LL"},
- {"math/big", "\x03j\x03)\x14=\r\x02\x024\x01\x02\x13"},
- {"math/bits", "\xc5\x02"},
- {"math/cmplx", "\xf7\x01\x02"},
- {"math/rand", "\xb5\x01B;\x01\x12"},
- {"math/rand/v2", "m,\x02\\\x02L"},
- {"mime", "\x02\x01b\b\x03z\f \x17\x03\x02\x0f\x02"},
- {"mime/multipart", "\x02\x01G#\x03E5\f\x01\x06\x02\x15\x02\x06\x11\x02\x01\x15"},
- {"mime/quotedprintable", "\x02\x01mz"},
- {"net", "\x04\t`+\x1d\a\x04\x05\f\x01\x04\x14\x01%\x06\r\n\x05\x01\x01\v\x06\a"},
- {"net/http", "\x02\x01\x04\x04\x02=\b\x13\x01\a\x03E5\x01\x03\b\x01\x02\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\n\x01\x01\x01\x02\x01\x01\v\x02\x02\x02\b\x01\x01\x01"},
- {"net/http/cgi", "\x02P\x1b\x03z\x04\b\n\x01\x13\x01\x01\x01\x04\x01\x05\x02\n\x02\x01\x0f\x0e"},
- {"net/http/cookiejar", "\x04i\x03\x90\x01\x01\b\f\x18\x03\x02\r\x04"},
- {"net/http/fcgi", "\x02\x01\nY\a\x03z\x16\x01\x01\x14\x1a\x02\r"},
- {"net/http/httptest", "\x02\x01\nE\x02\x1b\x01z\x04\x12\x01\n\t\x02\x19\x01\x02\r\x0e"},
- {"net/http/httptrace", "\rEn@\x14\n!"},
- {"net/http/httputil", "\x02\x01\n`\x03z\x04\x0f\x03\x01\x05\x02\x01\v\x01\x1b\x02\r\x0e"},
- {"net/http/internal", "\x02\x01j\x03z"},
- {"net/http/internal/ascii", "\xb0\x02\x11"},
- {"net/http/internal/httpcommon", "\r`\x03\x96\x01\x0e\x01\x19\x01\x01\x02\x1b\x02"},
- {"net/http/internal/testcert", "\xb0\x02"},
- {"net/http/pprof", "\x02\x01\nc\x19,\x11$\x04\x13\x14\x01\r\x06\x03\x01\x02\x01\x0f"},
+ {"log/slog/internal/benchmarks", "\ra\x03\x80\x01\x06\x03:\x11"},
+ {"log/slog/internal/buffer", "\xb7\x02"},
+ {"log/syslog", "n\x03\x84\x01\x12\x16\x18\x02\x0e"},
+ {"maps", "\xf4\x01W"},
+ {"math", "\xae\x01RK"},
+ {"math/big", "\x03k\x03(\x15C\f\x03\x020\x02\x01\x02\x14"},
+ {"math/big/internal/asmgen", "\x03\x01m\x8f\x012\x03"},
+ {"math/bits", "\xcb\x02"},
+ {"math/cmplx", "\xfd\x01\x03"},
+ {"math/rand", "\xb6\x01G:\x01\x13"},
+ {"math/rand/v2", "n+\x03a\x03K"},
+ {"mime", "\x02\x01c\b\x03\x80\x01\v!\x15\x03\x02\x10\x02"},
+ {"mime/multipart", "\x02\x01H#\x03E;\v\x01\a\x02\x15\x02\x06\x0f\x02\x01\x16"},
+ {"mime/quotedprintable", "\x02\x01n\x80\x01"},
+ {"net", "\x04\ta*\x1e\a\x04\x05\x11\x01\x04\x15\x01%\x06\r\b\x05\x01\x01\f\x06\a"},
+ {"net/http", "\x02\x01\x04\x04\x02>\b\x13\x01\a\x03E;\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"},
+ {"net/http/cgi", "\x02Q\x1b\x03\x80\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"},
+ {"net/http/cookiejar", "\x04j\x03\x96\x01\x01\b\f\x16\x03\x02\x0e\x04"},
+ {"net/http/fcgi", "\x02\x01\nZ\a\x03\x80\x01\x16\x01\x01\x14\x18\x02\x0e"},
+ {"net/http/httptest", "\x02\x01\nF\x02\x1b\x01\x80\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"},
+ {"net/http/httptrace", "\rFnF\x14\n "},
+ {"net/http/httputil", "\x02\x01\na\x03\x80\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"},
+ {"net/http/internal", "\x02\x01k\x03\x80\x01"},
+ {"net/http/internal/ascii", "\xb5\x02\x12"},
+ {"net/http/internal/httpcommon", "\ra\x03\x9c\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"},
+ {"net/http/internal/testcert", "\xb5\x02"},
+ {"net/http/pprof", "\x02\x01\nd\x18-\x11*\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"},
{"net/internal/cgotest", ""},
- {"net/internal/socktest", "p\xc2\x01\x02"},
- {"net/mail", "\x02k\x03z\x04\x0f\x03\x14\x1c\x02\r\x04"},
- {"net/netip", "\x04i+\x01#;\x026\x15"},
- {"net/rpc", "\x02f\x05\x03\x10\n`\x04\x12\x01\x1d\x0f\x03\x02"},
- {"net/rpc/jsonrpc", "j\x03\x03z\x16\x11!"},
- {"net/smtp", "\x19.\v\x13\b\x03z\x16\x14\x1c"},
- {"net/textproto", "\x02\x01j\x03z\r\t/\x01\x02\x13"},
- {"net/url", "m\x03\x86\x01%\x12\x02\x01\x15"},
- {"os", "m+\x01\x18\x03\b\t\r\x03\x01\x04\x10\x018\n\x05\x01\x01\v\x06"},
- {"os/exec", "\x03\n`H \x01\x14\x01+\x06\a\f\x01\x04\v"},
- {"os/exec/internal/fdtest", "\xb4\x02"},
- {"os/signal", "\r\x89\x02\x17\x05\x02"},
- {"os/user", "\x02\x01j\x03z,\r\f\x01\x02"},
- {"path", "m+\xab\x01"},
- {"path/filepath", "m+\x19:+\r\n\x03\x04\x0f"},
- {"plugin", "m"},
- {"reflect", "m'\x04\x1c\b\f\x04\x02\x19\x10,\f\x03\x0f\x02\x02"},
+ {"net/internal/socktest", "q\xc6\x01\x02"},
+ {"net/mail", "\x02l\x03\x80\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"},
+ {"net/netip", "\x04j*\x01$@\x034\x16"},
+ {"net/rpc", "\x02g\x05\x03\x0f\ng\x04\x12\x01\x1d\r\x03\x02"},
+ {"net/rpc/jsonrpc", "k\x03\x03\x80\x01\x16\x11\x1f"},
+ {"net/smtp", "\x19/\v\x13\b\x03\x80\x01\x16\x14\x1a"},
+ {"net/textproto", "\x02\x01k\x03\x80\x01\f\n-\x01\x02\x14"},
+ {"net/url", "n\x03\x8b\x01&\x10\x02\x01\x16"},
+ {"os", "n*\x01\x19\x03\b\t\x12\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"},
+ {"os/exec", "\x03\naH%\x01\x15\x01+\x06\a\n\x01\x04\f"},
+ {"os/exec/internal/fdtest", "\xb9\x02"},
+ {"os/signal", "\r\x90\x02\x15\x05\x02"},
+ {"os/user", "\x02\x01k\x03\x80\x01,\r\n\x01\x02"},
+ {"path", "n*\xb1\x01"},
+ {"path/filepath", "n*\x1a@+\r\b\x03\x04\x10"},
+ {"plugin", "n"},
+ {"reflect", "n&\x04\x1d\b\f\x06\x04\x1b\x06\t-\n\x03\x10\x02\x02"},
{"reflect/internal/example1", ""},
{"reflect/internal/example2", ""},
- {"regexp", "\x03\xe7\x018\v\x02\x01\x02\x0f\x02"},
- {"regexp/syntax", "\xad\x02\x01\x01\x01\x11\x02"},
- {"runtime", "\x94\x01\x04\x01\x02\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x03\x0fd"},
- {"runtime/coverage", "\x9f\x01K"},
- {"runtime/debug", "pUQ\r\n\x02\x01\x0f\x06"},
- {"runtime/internal/startlinetest", ""},
- {"runtime/internal/wasitest", ""},
- {"runtime/metrics", "\xb6\x01A,\""},
- {"runtime/pprof", "\x02\x01\x01\x03\x06Y\a\x03$3#\r\x1f\r\n\x01\x01\x01\x02\x02\b\x03\x06"},
- {"runtime/race", "\xab\x02"},
+ {"regexp", "\x03\xee\x018\t\x02\x01\x02\x10\x02"},
+ {"regexp/syntax", "\xb2\x02\x01\x01\x01\x02\x10\x02"},
+ {"runtime", "\x94\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x02\x01\x01\x04\x10c"},
+ {"runtime/coverage", "\xa0\x01Q"},
+ {"runtime/debug", "qUW\r\b\x02\x01\x10\x06"},
+ {"runtime/metrics", "\xb7\x01F-!"},
+ {"runtime/pprof", "\x02\x01\x01\x03\x06Z\a\x03#4)\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"},
+ {"runtime/race", "\xb0\x02"},
{"runtime/race/internal/amd64v1", ""},
- {"runtime/trace", "\rcz9\x0f\x01\x12"},
- {"slices", "\x04\xe9\x01\fL"},
- {"sort", "\xc9\x0104"},
- {"strconv", "m+:%\x02J"},
- {"strings", "m'\x04:\x18\x03\f9\x0f\x02\x02"},
+ {"runtime/trace", "\ra\x03w\t9\b\x05\x01\r\x06"},
+ {"slices", "\x04\xf0\x01\fK"},
+ {"sort", "\xca\x0162"},
+ {"strconv", "n*@%\x03I"},
+ {"strings", "n&\x04@\x19\x03\f7\x10\x02\x02"},
{"structs", ""},
- {"sync", "\xc8\x01\vP\x10\x12"},
- {"sync/atomic", "\xc5\x02"},
- {"syscall", "m(\x03\x01\x1b\b\x03\x03\x06\aT\n\x05\x01\x12"},
- {"testing", "\x03\n`\x02\x01X\x0f\x13\r\x04\x1b\x06\x02\x05\x02\a\x01\x02\x01\x02\x01\f\x02\x02\x02"},
- {"testing/fstest", "m\x03z\x01\v%\x12\x03\b\a"},
- {"testing/internal/testdeps", "\x02\v\xa6\x01'\x10,\x03\x05\x03\b\a\x02\r"},
- {"testing/iotest", "\x03j\x03z\x04"},
- {"testing/quick", "o\x01\x87\x01\x04#\x12\x0f"},
- {"testing/slogtest", "\r`\x03\x80\x01.\x05\x12\n"},
- {"text/scanner", "\x03mz,+\x02"},
- {"text/tabwriter", "pzY"},
- {"text/template", "m\x03B8\x01\v\x1f\x01\x05\x01\x02\x05\r\x02\f\x03\x02"},
- {"text/template/parse", "\x03m\xb3\x01\f\x01\x11\x02"},
- {"time", "m+\x1d\x1d'*\x0f\x02\x11"},
- {"time/tzdata", "m\xc7\x01\x11"},
+ {"sync", "\xc9\x01\x10\x01P\x0e\x13"},
+ {"sync/atomic", "\xcb\x02"},
+ {"syscall", "n'\x03\x01\x1c\b\x03\x03\x06\vV\b\x05\x01\x13"},
+ {"testing", "\x03\na\x02\x01X\x14\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x02\x02"},
+ {"testing/fstest", "n\x03\x80\x01\x01\n&\x10\x03\b\b"},
+ {"testing/internal/testdeps", "\x02\v\xa7\x01-\x10,\x03\x05\x03\x06\a\x02\x0e"},
+ {"testing/iotest", "\x03k\x03\x80\x01\x04"},
+ {"testing/quick", "p\x01\x8c\x01\x05#\x10\x10"},
+ {"testing/slogtest", "\ra\x03\x86\x01.\x05\x10\v"},
+ {"testing/synctest", "\xda\x01`\x11"},
+ {"text/scanner", "\x03n\x80\x01,*\x02"},
+ {"text/tabwriter", "q\x80\x01X"},
+ {"text/template", "n\x03B>\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"},
+ {"text/template/parse", "\x03n\xb9\x01\n\x01\x12\x02"},
+ {"time", "n*\x1e\"(*\r\x02\x12"},
+ {"time/tzdata", "n\xcb\x01\x12"},
{"unicode", ""},
{"unicode/utf16", ""},
{"unicode/utf8", ""},
- {"unique", "\x94\x01>\x01P\x0f\x13\x12"},
+ {"unique", "\x94\x01!#\x01Q\r\x01\x13\x12"},
{"unsafe", ""},
- {"vendor/golang.org/x/crypto/chacha20", "\x10V\a\x8c\x01*'"},
- {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10V\a\xd9\x01\x04\x01\a"},
- {"vendor/golang.org/x/crypto/cryptobyte", "c\n\x03\x88\x01&!\n"},
+ {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x92\x01*&"},
+ {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xde\x01\x04\x01\a"},
+ {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x8d\x01' \n"},
{"vendor/golang.org/x/crypto/cryptobyte/asn1", ""},
- {"vendor/golang.org/x/crypto/internal/alias", "\xc5\x02"},
- {"vendor/golang.org/x/crypto/internal/poly1305", "Q\x15\x93\x01"},
- {"vendor/golang.org/x/net/dns/dnsmessage", "m"},
- {"vendor/golang.org/x/net/http/httpguts", "\x80\x02\x14\x1c\x13\r"},
- {"vendor/golang.org/x/net/http/httpproxy", "m\x03\x90\x01\x15\x01\x1a\x13\r"},
- {"vendor/golang.org/x/net/http2/hpack", "\x03j\x03zH"},
- {"vendor/golang.org/x/net/idna", "p\x87\x019\x13\x10\x02\x01"},
- {"vendor/golang.org/x/net/nettest", "\x03c\a\x03z\x11\x05\x16\x01\f\f\x01\x02\x02\x01\n"},
- {"vendor/golang.org/x/sys/cpu", "\x96\x02\r\f\x01\x15"},
- {"vendor/golang.org/x/text/secure/bidirule", "m\xd6\x01\x11\x01"},
- {"vendor/golang.org/x/text/transform", "\x03j}Y"},
- {"vendor/golang.org/x/text/unicode/bidi", "\x03\be~@\x15"},
- {"vendor/golang.org/x/text/unicode/norm", "f\nzH\x11\x11"},
- {"weak", "\x94\x01\x8f\x01\""},
+ {"vendor/golang.org/x/crypto/internal/alias", "\xcb\x02"},
+ {"vendor/golang.org/x/crypto/internal/poly1305", "R\x15\x99\x01"},
+ {"vendor/golang.org/x/net/dns/dnsmessage", "n"},
+ {"vendor/golang.org/x/net/http/httpguts", "\x87\x02\x14\x1a\x14\r"},
+ {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x96\x01\x10\x05\x01\x18\x14\r"},
+ {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03\x80\x01F"},
+ {"vendor/golang.org/x/net/idna", "q\x8c\x018\x14\x10\x02\x01"},
+ {"vendor/golang.org/x/net/nettest", "\x03d\a\x03\x80\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"},
+ {"vendor/golang.org/x/sys/cpu", "\x9d\x02\r\n\x01\x16"},
+ {"vendor/golang.org/x/text/secure/bidirule", "n\xdb\x01\x11\x01"},
+ {"vendor/golang.org/x/text/transform", "\x03k\x83\x01X"},
+ {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf\x84\x01>\x16"},
+ {"vendor/golang.org/x/text/unicode/norm", "g\n\x80\x01F\x12\x11"},
+ {"weak", "\x94\x01\x96\x01!"},
}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
index 64f0326b6..c1faa50d3 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -502,6 +502,7 @@ var PackageSymbols = map[string][]Symbol{
{"MD4", Const, 0, ""},
{"MD5", Const, 0, ""},
{"MD5SHA1", Const, 0, ""},
+ {"MessageSigner", Type, 25, ""},
{"PrivateKey", Type, 0, ""},
{"PublicKey", Type, 2, ""},
{"RIPEMD160", Const, 0, ""},
@@ -517,6 +518,7 @@ var PackageSymbols = map[string][]Symbol{
{"SHA512", Const, 0, ""},
{"SHA512_224", Const, 5, ""},
{"SHA512_256", Const, 5, ""},
+ {"SignMessage", Func, 25, "func(signer Signer, rand io.Reader, msg []byte, opts SignerOpts) (signature []byte, err error)"},
{"Signer", Type, 4, ""},
{"SignerOpts", Type, 4, ""},
},
@@ -600,10 +602,12 @@ var PackageSymbols = map[string][]Symbol{
{"X25519", Func, 20, "func() Curve"},
},
"crypto/ecdsa": {
+ {"(*PrivateKey).Bytes", Method, 25, ""},
{"(*PrivateKey).ECDH", Method, 20, ""},
{"(*PrivateKey).Equal", Method, 15, ""},
{"(*PrivateKey).Public", Method, 4, ""},
{"(*PrivateKey).Sign", Method, 4, ""},
+ {"(*PublicKey).Bytes", Method, 25, ""},
{"(*PublicKey).ECDH", Method, 20, ""},
{"(*PublicKey).Equal", Method, 15, ""},
{"(PrivateKey).Add", Method, 0, ""},
@@ -619,6 +623,8 @@ var PackageSymbols = map[string][]Symbol{
{"(PublicKey).ScalarBaseMult", Method, 0, ""},
{"(PublicKey).ScalarMult", Method, 0, ""},
{"GenerateKey", Func, 0, "func(c elliptic.Curve, rand io.Reader) (*PrivateKey, error)"},
+ {"ParseRawPrivateKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PrivateKey, error)"},
+ {"ParseUncompressedPublicKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PublicKey, error)"},
{"PrivateKey", Type, 0, ""},
{"PrivateKey.D", Field, 0, ""},
{"PrivateKey.PublicKey", Field, 0, ""},
@@ -815,6 +821,7 @@ var PackageSymbols = map[string][]Symbol{
"crypto/sha3": {
{"(*SHA3).AppendBinary", Method, 24, ""},
{"(*SHA3).BlockSize", Method, 24, ""},
+ {"(*SHA3).Clone", Method, 25, ""},
{"(*SHA3).MarshalBinary", Method, 24, ""},
{"(*SHA3).Reset", Method, 24, ""},
{"(*SHA3).Size", Method, 24, ""},
@@ -967,6 +974,7 @@ var PackageSymbols = map[string][]Symbol{
{"Config.GetCertificate", Field, 4, ""},
{"Config.GetClientCertificate", Field, 8, ""},
{"Config.GetConfigForClient", Field, 8, ""},
+ {"Config.GetEncryptedClientHelloKeys", Field, 25, ""},
{"Config.InsecureSkipVerify", Field, 0, ""},
{"Config.KeyLogWriter", Field, 8, ""},
{"Config.MaxVersion", Field, 2, ""},
@@ -5463,6 +5471,7 @@ var PackageSymbols = map[string][]Symbol{
{"ParenExpr.X", Field, 0, ""},
{"Pkg", Const, 0, ""},
{"Preorder", Func, 23, "func(root Node) iter.Seq[Node]"},
+ {"PreorderStack", Func, 25, "func(root Node, stack []Node, f func(n Node, stack []Node) bool)"},
{"Print", Func, 0, "func(fset *token.FileSet, x any) error"},
{"RECV", Const, 0, ""},
{"RangeStmt", Type, 0, ""},
@@ -5933,6 +5942,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*File).SetLines", Method, 0, ""},
{"(*File).SetLinesForContent", Method, 0, ""},
{"(*File).Size", Method, 0, ""},
+ {"(*FileSet).AddExistingFiles", Method, 25, ""},
{"(*FileSet).AddFile", Method, 0, ""},
{"(*FileSet).Base", Method, 0, ""},
{"(*FileSet).File", Method, 0, ""},
@@ -6382,7 +6392,7 @@ var PackageSymbols = map[string][]Symbol{
{"Label", Type, 5, ""},
{"LocalVar", Const, 25, ""},
{"LookupFieldOrMethod", Func, 5, "func(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool)"},
- {"LookupSelection", Func, 25, ""},
+ {"LookupSelection", Func, 25, "func(T Type, addressable bool, pkg *Package, name string) (Selection, bool)"},
{"Map", Type, 5, ""},
{"MethodExpr", Const, 5, ""},
{"MethodSet", Type, 5, ""},
@@ -6490,9 +6500,11 @@ var PackageSymbols = map[string][]Symbol{
{"Lang", Func, 22, "func(x string) string"},
},
"hash": {
+ {"Cloner", Type, 25, ""},
{"Hash", Type, 0, ""},
{"Hash32", Type, 0, ""},
{"Hash64", Type, 0, ""},
+ {"XOF", Type, 25, ""},
},
"hash/adler32": {
{"Checksum", Func, 0, "func(data []byte) uint32"},
@@ -6533,6 +6545,7 @@ var PackageSymbols = map[string][]Symbol{
},
"hash/maphash": {
{"(*Hash).BlockSize", Method, 14, ""},
+ {"(*Hash).Clone", Method, 25, ""},
{"(*Hash).Reset", Method, 14, ""},
{"(*Hash).Seed", Method, 14, ""},
{"(*Hash).SetSeed", Method, 14, ""},
@@ -7133,7 +7146,7 @@ var PackageSymbols = map[string][]Symbol{
{"FormatFileInfo", Func, 21, "func(info FileInfo) string"},
{"Glob", Func, 16, "func(fsys FS, pattern string) (matches []string, err error)"},
{"GlobFS", Type, 16, ""},
- {"Lstat", Func, 25, ""},
+ {"Lstat", Func, 25, "func(fsys FS, name string) (FileInfo, error)"},
{"ModeAppend", Const, 16, ""},
{"ModeCharDevice", Const, 16, ""},
{"ModeDevice", Const, 16, ""},
@@ -7158,7 +7171,7 @@ var PackageSymbols = map[string][]Symbol{
{"ReadDirFile", Type, 16, ""},
{"ReadFile", Func, 16, "func(fsys FS, name string) ([]byte, error)"},
{"ReadFileFS", Type, 16, ""},
- {"ReadLink", Func, 25, ""},
+ {"ReadLink", Func, 25, "func(fsys FS, name string) (string, error)"},
{"ReadLinkFS", Type, 25, ""},
{"SkipAll", Var, 20, ""},
{"SkipDir", Var, 16, ""},
@@ -7275,6 +7288,7 @@ var PackageSymbols = map[string][]Symbol{
{"(Record).Attrs", Method, 21, ""},
{"(Record).Clone", Method, 21, ""},
{"(Record).NumAttrs", Method, 21, ""},
+ {"(Record).Source", Method, 25, ""},
{"(Value).Any", Method, 21, ""},
{"(Value).Bool", Method, 21, ""},
{"(Value).Duration", Method, 21, ""},
@@ -7306,6 +7320,7 @@ var PackageSymbols = map[string][]Symbol{
{"Float64", Func, 21, "func(key string, v float64) Attr"},
{"Float64Value", Func, 21, "func(v float64) Value"},
{"Group", Func, 21, "func(key string, args ...any) Attr"},
+ {"GroupAttrs", Func, 25, "func(key string, attrs ...Attr) Attr"},
{"GroupValue", Func, 21, "func(as ...Attr) Value"},
{"Handler", Type, 21, ""},
{"HandlerOptions", Type, 21, ""},
@@ -7916,7 +7931,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Writer).WriteField", Method, 0, ""},
{"ErrMessageTooLarge", Var, 9, ""},
{"File", Type, 0, ""},
- {"FileContentDisposition", Func, 25, ""},
+ {"FileContentDisposition", Func, 25, "func(fieldname string, filename string) string"},
{"FileHeader", Type, 0, ""},
{"FileHeader.Filename", Field, 0, ""},
{"FileHeader.Header", Field, 0, ""},
@@ -8294,6 +8309,11 @@ var PackageSymbols = map[string][]Symbol{
{"(*Client).PostForm", Method, 0, ""},
{"(*Cookie).String", Method, 0, ""},
{"(*Cookie).Valid", Method, 18, ""},
+ {"(*CrossOriginProtection).AddInsecureBypassPattern", Method, 25, ""},
+ {"(*CrossOriginProtection).AddTrustedOrigin", Method, 25, ""},
+ {"(*CrossOriginProtection).Check", Method, 25, ""},
+ {"(*CrossOriginProtection).Handler", Method, 25, ""},
+ {"(*CrossOriginProtection).SetDenyHandler", Method, 25, ""},
{"(*MaxBytesError).Error", Method, 19, ""},
{"(*ProtocolError).Error", Method, 0, ""},
{"(*ProtocolError).Is", Method, 21, ""},
@@ -8388,6 +8408,7 @@ var PackageSymbols = map[string][]Symbol{
{"Cookie.Unparsed", Field, 0, ""},
{"Cookie.Value", Field, 0, ""},
{"CookieJar", Type, 0, ""},
+ {"CrossOriginProtection", Type, 25, ""},
{"DefaultClient", Var, 0, ""},
{"DefaultMaxHeaderBytes", Const, 0, ""},
{"DefaultMaxIdleConnsPerHost", Const, 0, ""},
@@ -8460,6 +8481,7 @@ var PackageSymbols = map[string][]Symbol{
{"MethodPost", Const, 6, ""},
{"MethodPut", Const, 6, ""},
{"MethodTrace", Const, 6, ""},
+ {"NewCrossOriginProtection", Func, 25, "func() *CrossOriginProtection"},
{"NewFileTransport", Func, 0, "func(fs FileSystem) RoundTripper"},
{"NewFileTransportFS", Func, 22, "func(fsys fs.FS) RoundTripper"},
{"NewRequest", Func, 0, "func(method string, url string, body io.Reader) (*Request, error)"},
@@ -9174,15 +9196,19 @@ var PackageSymbols = map[string][]Symbol{
{"(*Root).Link", Method, 25, ""},
{"(*Root).Lstat", Method, 24, ""},
{"(*Root).Mkdir", Method, 24, ""},
+ {"(*Root).MkdirAll", Method, 25, ""},
{"(*Root).Name", Method, 24, ""},
{"(*Root).Open", Method, 24, ""},
{"(*Root).OpenFile", Method, 24, ""},
{"(*Root).OpenRoot", Method, 24, ""},
+ {"(*Root).ReadFile", Method, 25, ""},
{"(*Root).Readlink", Method, 25, ""},
{"(*Root).Remove", Method, 24, ""},
+ {"(*Root).RemoveAll", Method, 25, ""},
{"(*Root).Rename", Method, 25, ""},
{"(*Root).Stat", Method, 24, ""},
{"(*Root).Symlink", Method, 25, ""},
+ {"(*Root).WriteFile", Method, 25, ""},
{"(*SyscallError).Error", Method, 0, ""},
{"(*SyscallError).Timeout", Method, 10, ""},
{"(*SyscallError).Unwrap", Method, 13, ""},
@@ -9623,6 +9649,7 @@ var PackageSymbols = map[string][]Symbol{
{"StructTag", Type, 0, ""},
{"Swapper", Func, 8, "func(slice any) func(i int, j int)"},
{"Type", Type, 0, ""},
+ {"TypeAssert", Func, 25, "func[T any](v Value) (T, bool)"},
{"TypeFor", Func, 22, "func[T any]() Type"},
{"TypeOf", Func, 0, "func(i any) Type"},
{"Uint", Const, 0, ""},
@@ -9909,6 +9936,7 @@ var PackageSymbols = map[string][]Symbol{
{"SetBlockProfileRate", Func, 1, "func(rate int)"},
{"SetCPUProfileRate", Func, 0, "func(hz int)"},
{"SetCgoTraceback", Func, 7, "func(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)"},
+ {"SetDefaultGOMAXPROCS", Func, 25, "func()"},
{"SetFinalizer", Func, 0, "func(obj any, finalizer any)"},
{"SetMutexProfileFraction", Func, 8, "func(rate int) int"},
{"Stack", Func, 0, "func(buf []byte, all bool) int"},
@@ -10021,11 +10049,20 @@ var PackageSymbols = map[string][]Symbol{
{"WriteHeapProfile", Func, 0, "func(w io.Writer) error"},
},
"runtime/trace": {
+ {"(*FlightRecorder).Enabled", Method, 25, ""},
+ {"(*FlightRecorder).Start", Method, 25, ""},
+ {"(*FlightRecorder).Stop", Method, 25, ""},
+ {"(*FlightRecorder).WriteTo", Method, 25, ""},
{"(*Region).End", Method, 11, ""},
{"(*Task).End", Method, 11, ""},
+ {"FlightRecorder", Type, 25, ""},
+ {"FlightRecorderConfig", Type, 25, ""},
+ {"FlightRecorderConfig.MaxBytes", Field, 25, ""},
+ {"FlightRecorderConfig.MinAge", Field, 25, ""},
{"IsEnabled", Func, 11, "func() bool"},
{"Log", Func, 11, "func(ctx context.Context, category string, message string)"},
{"Logf", Func, 11, "func(ctx context.Context, category string, format string, args ...any)"},
+ {"NewFlightRecorder", Func, 25, "func(cfg FlightRecorderConfig) *FlightRecorder"},
{"NewTask", Func, 11, "func(pctx context.Context, taskType string) (ctx context.Context, task *Task)"},
{"Region", Type, 11, ""},
{"Start", Func, 5, "func(w io.Writer) error"},
@@ -16642,6 +16679,7 @@ var PackageSymbols = map[string][]Symbol{
{"ValueOf", Func, 0, ""},
},
"testing": {
+ {"(*B).Attr", Method, 25, ""},
{"(*B).Chdir", Method, 24, ""},
{"(*B).Cleanup", Method, 14, ""},
{"(*B).Context", Method, 24, ""},
@@ -16658,6 +16696,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*B).Logf", Method, 0, ""},
{"(*B).Loop", Method, 24, ""},
{"(*B).Name", Method, 8, ""},
+ {"(*B).Output", Method, 25, ""},
{"(*B).ReportAllocs", Method, 1, ""},
{"(*B).ReportMetric", Method, 13, ""},
{"(*B).ResetTimer", Method, 0, ""},
@@ -16674,6 +16713,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*B).StopTimer", Method, 0, ""},
{"(*B).TempDir", Method, 15, ""},
{"(*F).Add", Method, 18, ""},
+ {"(*F).Attr", Method, 25, ""},
{"(*F).Chdir", Method, 24, ""},
{"(*F).Cleanup", Method, 18, ""},
{"(*F).Context", Method, 24, ""},
@@ -16689,6 +16729,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*F).Log", Method, 18, ""},
{"(*F).Logf", Method, 18, ""},
{"(*F).Name", Method, 18, ""},
+ {"(*F).Output", Method, 25, ""},
{"(*F).Setenv", Method, 18, ""},
{"(*F).Skip", Method, 18, ""},
{"(*F).SkipNow", Method, 18, ""},
@@ -16697,6 +16738,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*F).TempDir", Method, 18, ""},
{"(*M).Run", Method, 4, ""},
{"(*PB).Next", Method, 3, ""},
+ {"(*T).Attr", Method, 25, ""},
{"(*T).Chdir", Method, 24, ""},
{"(*T).Cleanup", Method, 14, ""},
{"(*T).Context", Method, 24, ""},
@@ -16712,6 +16754,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*T).Log", Method, 0, ""},
{"(*T).Logf", Method, 0, ""},
{"(*T).Name", Method, 8, ""},
+ {"(*T).Output", Method, 25, ""},
{"(*T).Parallel", Method, 0, ""},
{"(*T).Run", Method, 7, ""},
{"(*T).Setenv", Method, 17, ""},
@@ -16834,6 +16877,10 @@ var PackageSymbols = map[string][]Symbol{
{"Run", Func, 22, "func(t *testing.T, newHandler func(*testing.T) slog.Handler, result func(*testing.T) map[string]any)"},
{"TestHandler", Func, 21, "func(h slog.Handler, results func() []map[string]any) error"},
},
+ "testing/synctest": {
+ {"Test", Func, 25, "func(t *testing.T, f func(*testing.T))"},
+ {"Wait", Func, 25, "func()"},
+ },
"text/scanner": {
{"(*Position).IsValid", Method, 0, ""},
{"(*Scanner).Init", Method, 0, ""},
@@ -17347,6 +17394,7 @@ var PackageSymbols = map[string][]Symbol{
{"CaseRange.Lo", Field, 0, ""},
{"CaseRanges", Var, 0, ""},
{"Categories", Var, 0, ""},
+ {"CategoryAliases", Var, 25, ""},
{"Caucasian_Albanian", Var, 4, ""},
{"Cc", Var, 0, ""},
{"Cf", Var, 0, ""},
@@ -17354,6 +17402,7 @@ var PackageSymbols = map[string][]Symbol{
{"Cham", Var, 0, ""},
{"Cherokee", Var, 0, ""},
{"Chorasmian", Var, 16, ""},
+ {"Cn", Var, 25, ""},
{"Co", Var, 0, ""},
{"Common", Var, 0, ""},
{"Coptic", Var, 0, ""},
@@ -17432,6 +17481,7 @@ var PackageSymbols = map[string][]Symbol{
{"Khojki", Var, 4, ""},
{"Khudawadi", Var, 4, ""},
{"L", Var, 0, ""},
+ {"LC", Var, 25, ""},
{"Lao", Var, 0, ""},
{"Latin", Var, 0, ""},
{"Lepcha", Var, 0, ""},
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/fx.go b/vendor/golang.org/x/tools/internal/typesinternal/fx.go
new file mode 100644
index 000000000..93acff217
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/fx.go
@@ -0,0 +1,49 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// NoEffects reports whether the expression has no side effects, i.e., it
+// does not modify the memory state. This function is conservative: it may
+// return false even when the expression has no effect.
+func NoEffects(info *types.Info, expr ast.Expr) bool {
+ noEffects := true
+ ast.Inspect(expr, func(n ast.Node) bool {
+ switch v := n.(type) {
+ case nil, *ast.Ident, *ast.BasicLit, *ast.BinaryExpr, *ast.ParenExpr,
+ *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr,
+ *ast.StarExpr, *ast.CompositeLit, *ast.ArrayType, *ast.StructType,
+ *ast.MapType, *ast.InterfaceType, *ast.KeyValueExpr:
+ // No effect
+ case *ast.UnaryExpr:
+ // Channel send <-ch has effects
+ if v.Op == token.ARROW {
+ noEffects = false
+ }
+ case *ast.CallExpr:
+ // Type conversion has no effects
+ if !info.Types[v.Fun].IsType() {
+ // TODO(adonovan): Add a case for built-in functions without side
+ // effects (by using callsPureBuiltin from tools/internal/refactor/inline)
+
+ noEffects = false
+ }
+ case *ast.FuncLit:
+ // A FuncLit has no effects, but do not descend into it.
+ return false
+ default:
+ // All other expressions have effects
+ noEffects = false
+ }
+
+ return noEffects
+ })
+ return noEffects
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go
new file mode 100644
index 000000000..f2affec4f
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go
@@ -0,0 +1,71 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/types"
+ "slices"
+)
+
+// IsTypeNamed reports whether t is (or is an alias for) a
+// package-level defined type with the given package path and one of
+// the given names. It returns false if t is nil.
+//
+// This function avoids allocating the concatenation of "pkg.Name",
+// which is important for the performance of syntax matching.
+func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool {
+ if named, ok := types.Unalias(t).(*types.Named); ok {
+ tname := named.Obj()
+ return tname != nil &&
+ IsPackageLevel(tname) &&
+ tname.Pkg().Path() == pkgPath &&
+ slices.Contains(names, tname.Name())
+ }
+ return false
+}
+
+// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a
+// package-level defined type with the given package path and one of the given
+// names. It returns false if t is not a pointer type.
+func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool {
+ r := Unpointer(t)
+ if r == t {
+ return false
+ }
+ return IsTypeNamed(r, pkgPath, names...)
+}
+
+// IsFunctionNamed reports whether obj is a package-level function
+// defined in the given package and has one of the given names.
+// It returns false if obj is nil.
+//
+// This function avoids allocating the concatenation of "pkg.Name",
+// which is important for the performance of syntax matching.
+func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool {
+ f, ok := obj.(*types.Func)
+ return ok &&
+ IsPackageLevel(obj) &&
+ f.Pkg().Path() == pkgPath &&
+ f.Type().(*types.Signature).Recv() == nil &&
+ slices.Contains(names, f.Name())
+}
+
+// IsMethodNamed reports whether obj is a method defined on a
+// package-level type with the given package and type name, and has
+// one of the given names. It returns false if obj is nil.
+//
+// This function avoids allocating the concatenation of "pkg.TypeName.Name",
+// which is important for the performance of syntax matching.
+func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool {
+ if fn, ok := obj.(*types.Func); ok {
+ if recv := fn.Type().(*types.Signature).Recv(); recv != nil {
+ _, T := ReceiverNamed(recv)
+ return T != nil &&
+ IsTypeNamed(T, pkgPath, typeName) &&
+ slices.Contains(names, fn.Name())
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
index b64f714eb..64f47919f 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
@@ -15,6 +15,14 @@ import (
// file.
// If the same package is imported multiple times, the last appearance is
// recorded.
+//
+// TODO(adonovan): this function ignores the effect of shadowing. It
+// should accept a [token.Pos] and a [types.Info] and compute only the
+// set of imports that are not shadowed at that point, analogous to
+// [analysisinternal.AddImport]. It could also compute (as a side
+// effect) the set of additional imports required to ensure that there
+// is an accessible import for each necessary package, making it
+// converge even more closely with AddImport.
func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
// Construct mapping of import paths to their defined names.
// It is only necessary to look at renaming imports.
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index a5cd7e8db..fef74a785 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -2,8 +2,20 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package typesinternal provides access to internal go/types APIs that are not
-// yet exported.
+// Package typesinternal provides helpful operators for dealing with
+// go/types:
+//
+// - operators for querying typed syntax trees (e.g. [Imports], [IsFunctionNamed]);
+// - functions for converting types to strings or syntax (e.g. [TypeExpr], FileQualifier]);
+// - helpers for working with the [go/types] API (e.g. [NewTypesInfo]);
+// - access to internal go/types APIs that are not yet
+// exported (e.g. [SetUsesCgo], [ErrorCodeStartEnd], [VarKind]); and
+// - common algorithms related to types (e.g. [TooNewStdSymbols]).
+//
+// See also:
+// - [golang.org/x/tools/internal/astutil], for operations on untyped syntax;
+// - [golang.org/x/tools/internal/analysisinernal], for helpers for analyzers;
+// - [golang.org/x/tools/internal/refactor], for operators to compute text edits.
package typesinternal
import (
@@ -13,6 +25,7 @@ import (
"reflect"
"unsafe"
+ "golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/internal/aliases"
)
@@ -60,6 +73,9 @@ func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, o
// which is often excessive.)
//
// If pkg is nil, it is equivalent to [*types.Package.Name].
+//
+// TODO(adonovan): all uses of this with TypeString should be
+// eliminated when https://go.dev/issues/75604 is resolved.
func NameRelativeTo(pkg *types.Package) types.Qualifier {
return func(other *types.Package) string {
if pkg != nil && pkg == other {
@@ -153,3 +169,31 @@ func NewTypesInfo() *types.Info {
FileVersions: map[*ast.File]string{},
}
}
+
+// EnclosingScope returns the innermost block logically enclosing the cursor.
+func EnclosingScope(info *types.Info, cur inspector.Cursor) *types.Scope {
+ for cur := range cur.Enclosing() {
+ n := cur.Node()
+ // A function's Scope is associated with its FuncType.
+ switch f := n.(type) {
+ case *ast.FuncDecl:
+ n = f.Type
+ case *ast.FuncLit:
+ n = f.Type
+ }
+ if b := info.Scopes[n]; b != nil {
+ return b
+ }
+ }
+ panic("no Scope for *ast.File")
+}
+
+// Imports reports whether path is imported by pkg.
+func Imports(pkg *types.Package, path string) bool {
+ for _, imp := range pkg.Imports() {
+ if imp.Path() == path {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
index d272949c1..453bba2ad 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
@@ -204,23 +204,12 @@ func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
}
}
-// IsZeroExpr uses simple syntactic heuristics to report whether expr
-// is a obvious zero value, such as 0, "", nil, or false.
-// It cannot do better without type information.
-func IsZeroExpr(expr ast.Expr) bool {
- switch e := expr.(type) {
- case *ast.BasicLit:
- return e.Value == "0" || e.Value == `""`
- case *ast.Ident:
- return e.Name == "nil" || e.Name == "false"
- default:
- return false
- }
-}
-
// TypeExpr returns syntax for the specified type. References to named types
// are qualified by an appropriate (optional) qualifier function.
// It may panic for types such as Tuple or Union.
+//
+// See also https://go.dev/issues/75604, which will provide a robust
+// Type-to-valid-Go-syntax formatter.
func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
switch t := t.(type) {
case *types.Basic: